├── .deepsource.toml ├── .githooks └── pre-push ├── .github └── workflows │ └── test.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── docs ├── index.md ├── keras.ipynb ├── keras.md ├── microbenchmarks.ipynb ├── microbenchmarks.md ├── pydoc.md ├── pytorch.ipynb ├── pytorch.md ├── pyzmq-performance.ipynb ├── pyzmq-performance.md ├── simple-server.ipynb └── simple-server.md ├── helpers ├── dockertest └── gendocs ├── mkdocs.yml ├── requirements.dev.txt ├── requirements.docs.txt ├── requirements.txt ├── serve-imagenet-dir ├── serve-imagenet-shards ├── setup-venv ├── setup.py ├── tensorcom ├── __init__.py ├── tenbin.py └── zcom.py ├── tensormon ├── tensorshow ├── tensorstat └── test ├── test_tenbin.py └── test_zcom.py /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | test_patterns = [ 4 | "test/**" 5 | ] 6 | 7 | exclude_patterns = [ 8 | "docs/**" 9 | ] 10 | 11 | [[analyzers]] 12 | name = "python" 13 | enabled = true 14 | 15 | [analyzers.meta] 16 | runtime_version = "3.x.x" 17 | -------------------------------------------------------------------------------- /.githooks/pre-push: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source venv/bin/activate 3 | python3 -m pytest 4 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: [push, pull_request] 3 | jobs: 4 | test: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v2 8 | - uses: actions/setup-python@v1 9 | with: 10 | python-version: 3.7 11 | - run: make venv 12 | - run: make tests 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | _* 3 | !__init__.py 4 | .* 5 | !.gitignore 6 | __pycache___ 7 | .ipynb_checkpoints 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2017 NVIDIA CORPORATION. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions 5 | are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, 8 | this list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in the 12 | documentation and/or other materials provided with the distribution. 13 | 14 | 3. Neither the name of the copyright holder nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 24 | TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 25 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 26 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 27 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VENV=venv 4 | PYTHON3=$(VENV)/bin/python3 5 | PIPOPT=--no-cache-dir 6 | PIP=$(VENV)/bin/pip $(PIPOPT) 7 | BUCKET=gs://tmb-testreleases 8 | 9 | # run the unit tests in a virtual environment 10 | 11 | tests: venv FORCE 12 | rm -f tensorcom.yaml tensorcom.yml # config files that interfere with tests 13 | . ./venv/bin/activate; python3 -m pytest -v -x 14 | 15 | format: venv FORCE 16 | black --target-version py37 tensorcom 17 | black --target-version py37 $$(egrep -l '#!.*python' [st]* 2>/dev/null) 18 | 19 | # build the virtual environment for development and testing 20 | 21 | venv: $(VENV)/bin/activate 22 | 23 | $(VENV)/bin/activate: requirements.txt requirements.dev.txt 24 | test -d $(VENV) || python3 -m venv $(VENV) 25 | $(PIP) install -r requirements.dev.txt 26 | $(PIP) install -r requirements.txt 27 | touch $(VENV)/bin/activate 28 | 29 | # push a new version to pypi; commit all changes first or this will fail 30 | # after a successful push, it will try to clone the repo into a docker container 31 | # and execute the tests 32 | 33 | dist: wheel FORCE 34 | twine check dist/* 35 | twine upload dist/* 36 | 37 | wheel: FORCE 38 | rm -f dist/* 39 | $(PYTHON3) setup.py sdist bdist_wheel 40 | 41 | wheeltest: wheel FORCE 42 | #gsutil cp dist/*.whl $(BUCKET)/$$(ls dist/*.whl | xargs basename | sed 's/-[0-9.]*-/-latest-/') 43 | #gsutil cp dist/*.tar.gz $(BUCKET)/$$(ls dist/*.tar.gz | xargs basename | sed 's/-[0-9.]*.tar.gz/-latest.tar.gz/') 44 | ./helpers/dockertest package 45 | 46 | githubtests: FORCE 47 | ./helpers/dockertest git 48 | 49 | pypitests: FORCE 50 | ./helpers/dockertest pip 51 | 52 | # build the documentation 53 | 54 | docs: FORCE 55 | ./helpers/gendocs 56 | git status | awk '/modified:/{if(index($$0, ".md")<=0)exit(1)}' 57 | git add docs/*.md 58 | git add README.md 59 | git status 60 | git commit -a -m "documentation update" 61 | git push 62 | 63 | # remove temporary build constructs 64 | 65 | clean: FORCE 66 | rm -rf build dist 67 | rm -f tensorcom.yaml tensorcom.yml # config files that interfere with tests 68 | rm -rf __pycache__ */__pycache__ *.log *.egg-info .pytest_cache .tox 69 | 70 | allclean: clean FORCE 71 | rm -rf venv 72 | 73 | # set the keyring password for pypi uploads 74 | 75 | passwd: FORCE 76 | $(PYTHON3) -m keyring set https://upload.pypi.org/legacy/ tmbdev 77 | 78 | FORCE: 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Test](https://github.com/NVLabs/tensorcom/workflows/Test/badge.svg)](https://github.com/NVLabs/tensorcom/actions?query=workflow%3ATest) 2 | [![TestPip](https://github.com/NVLabs/tensorcom/workflows/TestPip/badge.svg)](https://github.com/NVLabs/tensorcom/actions?query=workflow%3ATestPip) 3 | [![DeepSource](https://static.deepsource.io/deepsource-badge-light-mini.svg)](https://deepsource.io/gh/NVLabs/tensorcom/?ref=repository-badge) 4 | 5 | 6 | # Tensorcom 7 | 8 | Status: alpha software release, APIs may still change 9 | 10 | Tensorcom is a way of loading training data into deep learning frameworks 11 | quickly and portably. You can write a single data loading/augmentation 12 | pipeline and train one or more jobs in the same or different frameworks 13 | with it. 14 | 15 | Both Keras and PyTorch can use the Python `Connection` object for input, 16 | but MessagePack and ZMQ libraries exist in all major languages, making 17 | it easy to write servers and input operators for any framework. 18 | 19 | Tensorcom replaces the use of `multiprocessing` in Python for that purpose. 20 | Both use separate processes for loading and augmentation, but by making 21 | the processes and communications explicit, you gain some significant advantages: 22 | 23 | - the same augmentation pipeline can be used with different DL frameworks 24 | - augmentation processes can easily be run on multiple machines 25 | - output from a single automentation pipeline can be shared by many training jobs 26 | - you can start up and test the augmentation pipeline before you start the Dl jobs 27 | - DL frameworks wanting to use `tensorcom` only need a small library to handle input 28 | 29 | Using `tensorcom` for training is very simple. First, start up a data server; 30 | for Imagenet, there are two example jobs. The `serve-imagenet-dir` program 31 | illustrates how to use the standard PyTorch Imagenet `DataLoader` to serve 32 | training data: 33 | 34 | ``` 35 | $ serve-imagenet-dir -d /data/imagenet -b 64 zpub://127.0.0.1:7880 36 | ``` 37 | 38 | The server will give you information about the rate at which it serves image batches. 39 | Your training loop then becomes very simple: 40 | 41 | ``` 42 | training = tensorcom.Connection("zsub://127.0.0.1:7880", epoch=1000000) 43 | for xs, ys in training: 44 | train_batch(xs, ys) 45 | ``` 46 | 47 | If you want multiple jobs for augmentation, just use more publishers using 48 | Bash-style brace notation: `zpub://127.0.0.1:788{0..3}` and `zsub://127.0.0.1:788{0..3}`. 49 | 50 | Note that you can start up multiple training jobs connecting to the same server. 51 | 52 | # Command Line Tools 53 | 54 | There are some command line programs to help with developing and debugging these 55 | jobs: 56 | 57 | - tensormon -- connect to a data server and monitor throughput 58 | - tensorshow -- show images from input batches 59 | - tensorstat -- compute statistics over input data samples 60 | 61 | # Examples 62 | 63 | - serve-imagenet-dir -- serve Imagenet data from a file system using PyTorch 64 | - serve-imagenet-shards -- serve Imagenet from shards using `webloader` 65 | - keras.ipynb -- simple example of using Keras with tensorcom 66 | - pytorch.ipynb -- simple example of using PyTorch with tensorcom 67 | 68 | # ZMQ URLs 69 | 70 | There is no official standard for ZMQ URLs. This library uses the following notation: 71 | 72 | Socket types: 73 | 74 | - zpush / zpull -- standard PUSH/PULL sockets 75 | - zrpush / zrpull -- reverse PUSH/PULL connections (PUSH socket is server / PULL socket connects) 76 | - zpub / zsub -- standard PUB/SUB sockets 77 | - zrpub / zrsub -- reverse PUB/SUB connections 78 | 79 | The pub/sub servers allow the same augmentation pipeline to be shared by multiple 80 | learning jobs. 81 | 82 | Default transport is TCP/IP, but you can choose IPC as in `zpush+ipc://mypath`. 83 | 84 | # Connection Objects 85 | 86 | The major way of interacting with the library is through the `Connection` object. 87 | It simply gives you an iterator over training samples. 88 | 89 | # Encodings 90 | 91 | Data is encoded in a simple binary tensor format; see `codec.py` for details. 92 | The same format can also be used for saving and loading lists of 93 | tensors from disk (extension: `.ten`). 94 | Data is encoded on 64 byte aligned boundaries to allow easy memory 95 | mapping and direct use by CPUs and GPUs. 96 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | [![Test](https://github.com/NVLabs/tensorcom/workflows/Test/badge.svg)](https://github.com/NVLabs/tensorcom/actions?query=workflow%3ATest) 2 | [![TestPip](https://github.com/NVLabs/tensorcom/workflows/TestPip/badge.svg)](https://github.com/NVLabs/tensorcom/actions?query=workflow%3ATestPip) 3 | [![DeepSource](https://static.deepsource.io/deepsource-badge-light-mini.svg)](https://deepsource.io/gh/NVLabs/tensorcom/?ref=repository-badge) 4 | 5 | 6 | # Tensorcom 7 | 8 | Tensorcom is a way of loading training data into deep learning frameworks 9 | quickly and portably. You can write a single data loading/augmentation 10 | pipeline and train one or more jobs in the same or different frameworks 11 | with it. 12 | 13 | Both Keras and PyTorch can use the Python `Connection` object for input, 14 | but MessagePack and ZMQ libraries exist in all major languages, making 15 | it easy to write servers and input operators for any framework. 16 | 17 | Tensorcom replaces the use of `multiprocessing` in Python for that purpose. 18 | Both use separate processes for loading and augmentation, but by making 19 | the processes and communications explicit, you gain some significant advantages: 20 | 21 | - the same augmentation pipeline can be used with different DL frameworks 22 | - augmentation processes can easily be run on multiple machines 23 | - output from a single automentation pipeline can be shared by many training jobs 24 | - you can start up and test the augmentation pipeline before you start the Dl jobs 25 | - DL frameworks wanting to use `tensorcom` only need a small library to handle input 26 | 27 | Using `tensorcom` for training is very simple. First, start up a data server; 28 | for Imagenet, there are two example jobs. The `serve-imagenet-dir` program 29 | illustrates how to use the standard PyTorch Imagenet `DataLoader` to serve 30 | training data: 31 | 32 | ``` 33 | $ serve-imagenet-dir -d /data/imagenet -b 64 zpub://127.0.0.1:7880 34 | ``` 35 | 36 | The server will give you information about the rate at which it serves image batches. 37 | Your training loop then becomes very simple: 38 | 39 | ``` 40 | training = tensorcom.Connection("zsub://127.0.0.1:7880", epoch=1000000) 41 | for xs, ys in training: 42 | train_batch(xs, ys) 43 | ``` 44 | 45 | If you want multiple jobs for augmentation, just use more publishers using 46 | Bash-style brace notation: `zpub://127.0.0.1:788{0..3}` and `zsub://127.0.0.1:788{0..3}`. 47 | 48 | Note that you can start up multiple training jobs connecting to the same server. 49 | 50 | # Command Line Tools 51 | 52 | There are some command line programs to help with developing and debugging these 53 | jobs: 54 | 55 | - tensormon -- connect to a data server and monitor throughput 56 | - tensorshow -- show images from input batches 57 | - tensorstat -- compute statistics over input data samples 58 | 59 | # Examples 60 | 61 | - serve-imagenet-dir -- serve Imagenet data from a file system using PyTorch 62 | - serve-imagenet-shards -- serve Imagenet from shards using `webloader` 63 | - keras.ipynb -- simple example of using Keras with tensorcom 64 | - pytorch.ipynb -- simple example of using PyTorch with tensorcom 65 | 66 | # ZMQ URLs 67 | 68 | There is no official standard for ZMQ URLs. This library uses the following notation: 69 | 70 | Socket types: 71 | 72 | - zpush / zpull -- standard PUSH/PULL sockets 73 | - zrpush / zrpull -- reverse PUSH/PULL connections (PUSH socket is server / PULL socket connects) 74 | - zpub / zsub -- standard PUB/SUB sockets 75 | - zrpub / zrsub -- reverse PUB/SUB connections 76 | 77 | The pub/sub servers allow the same augmentation pipeline to be shared by multiple 78 | learning jobs. 79 | 80 | Default transport is TCP/IP, but you can choose IPC as in `zpush+ipc://mypath`. 81 | 82 | # Connection Objects 83 | 84 | The major way of interacting with the library is through the `Connection` object. 85 | It simply gives you an iterator over training samples. 86 | 87 | # Encodings 88 | 89 | Data is encoded in a simple binary tensor format; see `codec.py` for details. 90 | The same format can also be used for saving and loading lists of 91 | tensors from disk (extension: `.ten`). 92 | Data is encoded on 64 byte aligned boundaries to allow easy memory 93 | mapping and direct use by CPUs and GPUs. 94 | -------------------------------------------------------------------------------- /docs/keras.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Simple Example of using WebLoader with Keras" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "!test -f training.tgz || curl http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_train-0000-000.tgz -o training.tgz\n", 17 | "!test -f testing.tgz || curl http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_val-0000.tgz -o testing.tgz" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "In a separate window, start the server with:\n", 25 | "\n", 26 | "```\n", 27 | " serve-imagenet -u training.tgz -b 64\n", 28 | "```" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "from importlib import reload\n", 38 | "import keras\n", 39 | "from keras.datasets import cifar10\n", 40 | "from keras.preprocessing.image import ImageDataGenerator\n", 41 | "from keras.models import Sequential\n", 42 | "from keras.layers import Dense, Dropout, Activation, Flatten\n", 43 | "from keras.layers import Conv2D, MaxPooling2D\n", 44 | "from keras import models, layers\n", 45 | "import os\n", 46 | "import numpy as np\n", 47 | "import tensorcom as tc\n", 48 | "from webdataset import WebDataset\n", 49 | "from webdataset.loader import WebLoader" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "metadata": {}, 56 | "outputs": [], 57 | "source": [ 58 | "mean = np.array([0.485, 0.456, 0.406], \"f\")\n", 59 | "std = np.array([0.229, 0.224, 0.225], \"f\")\n", 60 | "def norm_image(xs):\n", 61 | " return (xs-mean[None,None,:])/std[None,None,:]\n", 62 | "def norm_cls(ys):\n", 63 | " return keras.utils.to_categorical(ys-1, 1000)" 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "We're using the regular DataLoader for the test data set. In fact, it's best just to download the test data set." 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "#urls = \"http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_val-0000.tgz\"\n", 80 | "urls = \"testing.tgz\"\n", 81 | "test_batch_size = 256\n", 82 | "test_num_batches = 50000//test_batch_size\n", 83 | "testing_ds = WebDataset(urls, extensions=\"ppm;png;jpg cls\", transforms=[norm_image, norm_cls])\n", 84 | "testing = WebLoader(testing_ds, batch_size=test_batch_size)\n", 85 | "xs, ys = next(iter(testing))\n", 86 | "print(xs.shape, xs.dtype, np.amin(xs), np.amax(xs))\n", 87 | "print(ys.shape, ys.dtype, np.amin(ys), np.amax(ys))" 88 | ] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "metadata": {}, 93 | "source": [ 94 | "The `Connection` class is similar to the `WebLoader` class, but the decompression and batching happens in the `tensorcom` server process." 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "#urls = \"http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_train-{0000..0147}-{000..019}.tgz\"\n", 104 | "num_samples = 1000000\n", 105 | "training = tc.Connection(\"zsub://127.0.0.1:7880\",\n", 106 | " epoch=num_samples,\n", 107 | " batch_transforms=[norm_image, norm_cls])\n", 108 | "xs, ys = next(iter(training))\n", 109 | "print(xs.shape, xs.dtype, np.amin(xs), np.amax(xs))\n", 110 | "print(ys.shape, ys.dtype, np.amin(ys), np.amax(ys))" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [ 119 | "batch_norm = True\n", 120 | "dropout = 0.5\n", 121 | "nclasses = 1000\n", 122 | "shape = (224, 224, 3)\n", 123 | "batchsize = 32\n", 124 | "lr = 0.001 # was: 0.0001\n", 125 | "\n", 126 | "def vgg_block(d, r=2, name=None, size=(3, 3), mp_size=(2, 2), mp_strides=(2, 2)):\n", 127 | " result = []\n", 128 | " for i in range(r):\n", 129 | " name1 = name + \"-{}\".format(i) if name is not None else None\n", 130 | " if batch_norm:\n", 131 | " result += [\n", 132 | " layers.Conv2D(d, size, padding=\"same\", name=name1),\n", 133 | " layers.BatchNormalization(),\n", 134 | " layers.Activation(\"relu\")\n", 135 | " ]\n", 136 | " else:\n", 137 | " result += [layers.Conv2D(d, size, activation=\"relu\", padding=\"same\", name=name1)] \n", 138 | " name1 = name + \"-mp\" if name is not None else None\n", 139 | " result += [layers.MaxPooling2D(mp_size, mp_strides, name=name1)]\n", 140 | " return result\n", 141 | "\n", 142 | "model = models.Sequential(\n", 143 | " [layers.Lambda(lambda x: x + 0, input_shape=shape)] + \n", 144 | " vgg_block(64, 2) + \n", 145 | " vgg_block(128, 2) +\n", 146 | " vgg_block(256, 3) +\n", 147 | " vgg_block(512, 3) +\n", 148 | " vgg_block(512, 3) +\n", 149 | " [layers.Flatten(name=\"flatten\"),\n", 150 | " layers.Dense(4096, activation=\"relu\", name=\"fc1\"),\n", 151 | " layers.Dropout(rate=1-dropout),\n", 152 | " layers.Dense(4096, activation=\"relu\", name=\"fc2\"),\n", 153 | " layers.Dropout(rate=1-dropout),\n", 154 | " layers.Dense(nclasses, activation=\"softmax\", name=\"prediction\")])\n", 155 | "\n", 156 | "opt = keras.optimizers.rmsprop(lr=lr, decay=1e-6)\n", 157 | "\n", 158 | "model.compile(loss='categorical_crossentropy',\n", 159 | " optimizer=opt,\n", 160 | " metrics=['accuracy'])\n", 161 | "\n", 162 | "model.fit_generator(iter(training),\n", 163 | " epochs=1,\n", 164 | " steps_per_epoch=1000, #num_batches,\n", 165 | " validation_data=iter(testing),\n", 166 | " validation_steps=100, #test_num_batches,\n", 167 | " shuffle=True)" 168 | ] 169 | } 170 | ], 171 | "metadata": { 172 | "kernelspec": { 173 | "display_name": "Python 3", 174 | "language": "python", 175 | "name": "python3" 176 | }, 177 | "language_info": { 178 | "codemirror_mode": { 179 | "name": "ipython", 180 | "version": 3 181 | }, 182 | "file_extension": ".py", 183 | "mimetype": "text/x-python", 184 | "name": "python", 185 | "nbconvert_exporter": "python", 186 | "pygments_lexer": "ipython3", 187 | "version": "3.7.3" 188 | } 189 | }, 190 | "nbformat": 4, 191 | "nbformat_minor": 4 192 | } 193 | -------------------------------------------------------------------------------- /docs/keras.md: -------------------------------------------------------------------------------- 1 | # Simple Example of using WebLoader with Keras 2 | 3 | 4 | ```python 5 | !test -f training.tgz || curl http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_train-0000-000.tgz -o training.tgz 6 | !test -f testing.tgz || curl http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_val-0000.tgz -o testing.tgz 7 | ``` 8 | 9 | In a separate window, start the server with: 10 | 11 | ``` 12 | serve-imagenet -u training.tgz -b 64 13 | ``` 14 | 15 | 16 | ```python 17 | from importlib import reload 18 | import keras 19 | from keras.datasets import cifar10 20 | from keras.preprocessing.image import ImageDataGenerator 21 | from keras.models import Sequential 22 | from keras.layers import Dense, Dropout, Activation, Flatten 23 | from keras.layers import Conv2D, MaxPooling2D 24 | from keras import models, layers 25 | import os 26 | import numpy as np 27 | import tensorcom as tc 28 | from webdataset import WebDataset 29 | from webdataset.loader import WebLoader 30 | ``` 31 | 32 | 33 | ```python 34 | mean = np.array([0.485, 0.456, 0.406], "f") 35 | std = np.array([0.229, 0.224, 0.225], "f") 36 | def norm_image(xs): 37 | return (xs-mean[None,None,:])/std[None,None,:] 38 | def norm_cls(ys): 39 | return keras.utils.to_categorical(ys-1, 1000) 40 | ``` 41 | 42 | We're using the regular DataLoader for the test data set. In fact, it's best just to download the test data set. 43 | 44 | 45 | ```python 46 | #urls = "http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_val-0000.tgz" 47 | urls = "testing.tgz" 48 | test_batch_size = 256 49 | test_num_batches = 50000//test_batch_size 50 | testing_ds = WebDataset(urls, extensions="ppm;png;jpg cls", transforms=[norm_image, norm_cls]) 51 | testing = WebLoader(testing_ds, batch_size=test_batch_size) 52 | xs, ys = next(iter(testing)) 53 | print(xs.shape, xs.dtype, np.amin(xs), np.amax(xs)) 54 | print(ys.shape, ys.dtype, np.amin(ys), np.amax(ys)) 55 | ``` 56 | 57 | The `Connection` class is similar to the `WebLoader` class, but the decompression and batching happens in the `tensorcom` server process. 58 | 59 | 60 | ```python 61 | #urls = "http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_train-{0000..0147}-{000..019}.tgz" 62 | num_samples = 1000000 63 | training = tc.Connection("zsub://127.0.0.1:7880", 64 | epoch=num_samples, 65 | batch_transforms=[norm_image, norm_cls]) 66 | xs, ys = next(iter(training)) 67 | print(xs.shape, xs.dtype, np.amin(xs), np.amax(xs)) 68 | print(ys.shape, ys.dtype, np.amin(ys), np.amax(ys)) 69 | ``` 70 | 71 | 72 | ```python 73 | batch_norm = True 74 | dropout = 0.5 75 | nclasses = 1000 76 | shape = (224, 224, 3) 77 | batchsize = 32 78 | lr = 0.001 # was: 0.0001 79 | 80 | def vgg_block(d, r=2, name=None, size=(3, 3), mp_size=(2, 2), mp_strides=(2, 2)): 81 | result = [] 82 | for i in range(r): 83 | name1 = name + "-{}".format(i) if name is not None else None 84 | if batch_norm: 85 | result += [ 86 | layers.Conv2D(d, size, padding="same", name=name1), 87 | layers.BatchNormalization(), 88 | layers.Activation("relu") 89 | ] 90 | else: 91 | result += [layers.Conv2D(d, size, activation="relu", padding="same", name=name1)] 92 | name1 = name + "-mp" if name is not None else None 93 | result += [layers.MaxPooling2D(mp_size, mp_strides, name=name1)] 94 | return result 95 | 96 | model = models.Sequential( 97 | [layers.Lambda(lambda x: x + 0, input_shape=shape)] + 98 | vgg_block(64, 2) + 99 | vgg_block(128, 2) + 100 | vgg_block(256, 3) + 101 | vgg_block(512, 3) + 102 | vgg_block(512, 3) + 103 | [layers.Flatten(name="flatten"), 104 | layers.Dense(4096, activation="relu", name="fc1"), 105 | layers.Dropout(rate=1-dropout), 106 | layers.Dense(4096, activation="relu", name="fc2"), 107 | layers.Dropout(rate=1-dropout), 108 | layers.Dense(nclasses, activation="softmax", name="prediction")]) 109 | 110 | opt = keras.optimizers.rmsprop(lr=lr, decay=1e-6) 111 | 112 | model.compile(loss='categorical_crossentropy', 113 | optimizer=opt, 114 | metrics=['accuracy']) 115 | 116 | model.fit_generator(iter(training), 117 | epochs=1, 118 | steps_per_epoch=1000, #num_batches, 119 | validation_data=iter(testing), 120 | validation_steps=100, #test_num_batches, 121 | shuffle=True) 122 | ``` 123 | -------------------------------------------------------------------------------- /docs/microbenchmarks.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stdout", 10 | "output_type": "stream", 11 | "text": [ 12 | "Populating the interactive namespace from numpy and matplotlib\n" 13 | ] 14 | } 15 | ], 16 | "source": [ 17 | "%pylab inline" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 2, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "import argparse\n", 27 | "import multiprocessing\n", 28 | "import sys\n", 29 | "from functools import partial\n", 30 | "\n", 31 | "import dlinputs as dli\n", 32 | "import matplotlib as mpl\n", 33 | "import matplotlib.pyplot as plt\n", 34 | "import numpy as np\n", 35 | "import simplejson\n", 36 | "import torch\n", 37 | "import torch.nn.functional as F\n", 38 | "from dlinputs import filters, gopen, improc, utils\n", 39 | "from matplotlib import cm\n", 40 | "from torch import nn, optim\n", 41 | "from torchvision import datasets, transforms\n", 42 | "from itertools import islice\n", 43 | "import time\n", 44 | "from PIL import Image\n", 45 | "import io\n", 46 | "\n", 47 | "import tensorcom" 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "metadata": {}, 53 | "source": [ 54 | "# I/O" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 46, 60 | "metadata": {}, 61 | "outputs": [ 62 | { 63 | "name": "stdout", 64 | "output_type": "stream", 65 | "text": [ 66 | "1022.95552\n", 67 | "CPU times: user 0 ns, sys: 163 ms, total: 163 ms\n", 68 | "Wall time: 160 ms\n" 69 | ] 70 | } 71 | ], 72 | "source": [ 73 | "%%time\n", 74 | "total = 0\n", 75 | "with open(\"./imagenet_train-0000.tar\", \"rb\") as stream:\n", 76 | " while True:\n", 77 | " data = stream.read(1000000)\n", 78 | " if len(data)==0: break\n", 79 | " total += len(data)\n", 80 | "print(total/1e6)" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": {}, 86 | "source": [ 87 | "# Tar Decoding and Decompressing" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 47, 93 | "metadata": {}, 94 | "outputs": [ 95 | { 96 | "name": "stdout", 97 | "output_type": "stream", 98 | "text": [ 99 | "109.936435\n", 100 | "CPU times: user 262 ms, sys: 20.3 ms, total: 283 ms\n", 101 | "Wall time: 281 ms\n" 102 | ] 103 | } 104 | ], 105 | "source": [ 106 | "%%time\n", 107 | "total = 0\n", 108 | "source = dli.gopen.open_source(\"./imagenet_train-0000.tar\", decode=False)\n", 109 | "for sample in islice(source, 0, 1000):\n", 110 | " total += len(sample[\"jpg\"])\n", 111 | "print(total/1e6)" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": 48, 117 | "metadata": {}, 118 | "outputs": [ 119 | { 120 | "name": "stdout", 121 | "output_type": "stream", 122 | "text": [ 123 | "109.936435\n", 124 | "CPU times: user 394 ms, sys: 19.6 ms, total: 414 ms\n", 125 | "Wall time: 412 ms\n" 126 | ] 127 | } 128 | ], 129 | "source": [ 130 | "%%time\n", 131 | "total = 0\n", 132 | "source = dli.gopen.open_source(\"./imagenet_train-0000.tgz\", decode=False)\n", 133 | "for sample in islice(source, 0, 1000):\n", 134 | " total += len(sample[\"jpg\"])\n", 135 | "print(total/1e6)" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": {}, 141 | "source": [ 142 | "# JPEG Decoding" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": 49, 148 | "metadata": {}, 149 | "outputs": [], 150 | "source": [ 151 | "images = []" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 50, 157 | "metadata": {}, 158 | "outputs": [ 159 | { 160 | "name": "stdout", 161 | "output_type": "stream", 162 | "text": [ 163 | "478969\n", 164 | "CPU times: user 8.17 s, sys: 305 ms, total: 8.48 s\n", 165 | "Wall time: 8.46 s\n" 166 | ] 167 | } 168 | ], 169 | "source": [ 170 | "%%time\n", 171 | "total = 0\n", 172 | "source = dli.gopen.open_source(\"./imagenet_train-0000.tgz\", decode=dli.utils.autodecoder(\"PIL\"))\n", 173 | "for sample in islice(source, 0, 1000):\n", 174 | " total += sample[\"jpg\"].size[0]\n", 175 | " images.append(sample[\"jpg\"].convert(\"RGB\"))\n", 176 | "print(total)" 177 | ] 178 | }, 179 | { 180 | "cell_type": "markdown", 181 | "metadata": {}, 182 | "source": [ 183 | "# JPEG Decoding with Multiprocessing" 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "execution_count": 51, 189 | "metadata": {}, 190 | "outputs": [ 191 | { 192 | "name": "stdout", 193 | "output_type": "stream", 194 | "text": [ 195 | "Overwriting decoder.py\n" 196 | ] 197 | } 198 | ], 199 | "source": [ 200 | "%%writefile decoder.py\n", 201 | "from PIL import Image\n", 202 | "import io\n", 203 | "def decode(sample):\n", 204 | " sample[\"jpg\"] = Image.open(io.BytesIO(sample[\"jpg\"]))\n", 205 | " return sample" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": 52, 211 | "metadata": {}, 212 | "outputs": [], 213 | "source": [ 214 | "pool = multiprocessing.Pool(16)\n", 215 | "from decoder import decode" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": 53, 221 | "metadata": {}, 222 | "outputs": [ 223 | { 224 | "name": "stdout", 225 | "output_type": "stream", 226 | "text": [ 227 | "4727094 10000\n", 228 | "CPU times: user 11.9 s, sys: 3.91 s, total: 15.8 s\n", 229 | "Wall time: 16.2 s\n" 230 | ] 231 | } 232 | ], 233 | "source": [ 234 | "%%time\n", 235 | "total = 0\n", 236 | "source = dli.gopen.open_source(\"./imagenet_train-0000.tar\", decode=False)\n", 237 | "count = 0\n", 238 | "for sample in pool.imap_unordered(decode, islice(source, 0, 10000)):\n", 239 | " total += sample[\"jpg\"].size[0]\n", 240 | " count += 1\n", 241 | "print(total, count)" 242 | ] 243 | }, 244 | { 245 | "cell_type": "markdown", 246 | "metadata": {}, 247 | "source": [ 248 | "# GPU-Based JPEG Decoding" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": 54, 254 | "metadata": {}, 255 | "outputs": [ 256 | { 257 | "data": { 258 | "text/plain": [ 259 | "torch.Size([480, 640, 3])" 260 | ] 261 | }, 262 | "execution_count": 54, 263 | "metadata": {}, 264 | "output_type": "execute_result" 265 | } 266 | ], 267 | "source": [ 268 | "from nvidia.dali.pipeline import Pipeline\n", 269 | "import nvidia.dali.ops as ops\n", 270 | "import nvidia.dali.types as types\n", 271 | "import numpy as np\n", 272 | "import nvidia.dali.plugin.pytorch as dalipyt\n", 273 | "\n", 274 | "with open(\"space.jpg\", \"rb\") as stream:\n", 275 | " space = stream.read()\n", 276 | " \n", 277 | "class DecoderPipe(Pipeline):\n", 278 | " def __init__(self, batch_size, num_threads, device_id, pipelined = False, async = False):\n", 279 | " super(DecoderPipe, self).__init__(batch_size,\n", 280 | " num_threads,\n", 281 | " device_id,\n", 282 | " exec_pipelined=pipelined,\n", 283 | " exec_async=async)\n", 284 | " self.input = ops.ExternalSource()\n", 285 | " self.decode = ops.nvJPEGDecoder(device = \"mixed\", output_type = types.RGB)\n", 286 | "\n", 287 | " def define_graph(self):\n", 288 | " self.jpegs = self.input()\n", 289 | " return self.decode(self.jpegs)\n", 290 | "\n", 291 | "pipe = DecoderPipe(1, 1, 0)\n", 292 | "pipe.build()\n", 293 | "\n", 294 | "def dali2torch(dali_tensor):\n", 295 | " import torch\n", 296 | " import ctypes\n", 297 | " assert dali_tensor.dtype() == \"B\"\n", 298 | " tensor = torch.zeros(dali_tensor.shape(), dtype=torch.uint8, device=\"cuda\")\n", 299 | " assert dali_tensor.shape() == list(tensor.shape)\n", 300 | " dali_tensor.copy_to_external(ctypes.c_void_p(tensor.data_ptr()))\n", 301 | " return tensor\n", 302 | "\n", 303 | "def gpudecode(jpeg):\n", 304 | " pipe.feed_input(pipe.jpegs, [jpeg])\n", 305 | " dali_tensors = pipe.run()[0]\n", 306 | " assert len(dali_tensors) == 1\n", 307 | " image = dali2torch(dali_tensors.at(0))\n", 308 | " return image\n", 309 | " \n", 310 | "gpudecode(space).size()" 311 | ] 312 | }, 313 | { 314 | "cell_type": "markdown", 315 | "metadata": {}, 316 | "source": [ 317 | "## One-by-one Decoding" 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "execution_count": 55, 323 | "metadata": {}, 324 | "outputs": [ 325 | { 326 | "name": "stdout", 327 | "output_type": "stream", 328 | "text": [ 329 | "405015\n", 330 | "CPU times: user 3.36 s, sys: 175 ms, total: 3.53 s\n", 331 | "Wall time: 3.53 s\n" 332 | ] 333 | } 334 | ], 335 | "source": [ 336 | "%%time\n", 337 | "total = 0\n", 338 | "source = dli.gopen.open_source(\"./imagenet_train-0000.tgz\", decode=False)\n", 339 | "for sample in islice(source, 0, 1000):\n", 340 | " image = gpudecode(sample[\"jpg\"])\n", 341 | " total += image.size(0)\n", 342 | "print(total)" 343 | ] 344 | }, 345 | { 346 | "cell_type": "markdown", 347 | "metadata": {}, 348 | "source": [ 349 | "## Multithreaded Batch Decoding" 350 | ] 351 | }, 352 | { 353 | "cell_type": "code", 354 | "execution_count": 56, 355 | "metadata": {}, 356 | "outputs": [], 357 | "source": [ 358 | "pipe = DecoderPipe(1000, 8, 0)\n", 359 | "pipe.build()\n", 360 | "dali_tensors = None" 361 | ] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "execution_count": 57, 366 | "metadata": {}, 367 | "outputs": [ 368 | { 369 | "name": "stdout", 370 | "output_type": "stream", 371 | "text": [ 372 | "405015\n", 373 | "CPU times: user 2.63 s, sys: 704 ms, total: 3.34 s\n", 374 | "Wall time: 1.38 s\n" 375 | ] 376 | } 377 | ], 378 | "source": [ 379 | "%%time\n", 380 | "global dali_tensors\n", 381 | "source = dli.gopen.open_source(\"./imagenet_train-0000.tgz\", decode=False)\n", 382 | "jpegs = [sample[\"jpg\"] for sample in islice(source, 0, 1000)]\n", 383 | "pipe.feed_input(pipe.jpegs, jpegs)\n", 384 | "dali_tensors = pipe.run()[0]\n", 385 | "images = [dali2torch(dali_tensors.at(i)) for i in range(len(dali_tensors))]\n", 386 | "images = [a.cpu().numpy() for a in images]\n", 387 | "total = np.sum([a.shape[0] for a in images])\n", 388 | "print(total)" 389 | ] 390 | }, 391 | { 392 | "cell_type": "markdown", 393 | "metadata": {}, 394 | "source": [ 395 | "# Data Augmentation" 396 | ] 397 | }, 398 | { 399 | "cell_type": "code", 400 | "execution_count": 58, 401 | "metadata": {}, 402 | "outputs": [ 403 | { 404 | "name": "stdout", 405 | "output_type": "stream", 406 | "text": [ 407 | "CPU times: user 943 ms, sys: 0 ns, total: 943 ms\n", 408 | "Wall time: 940 ms\n" 409 | ] 410 | } 411 | ], 412 | "source": [ 413 | "%%time\n", 414 | "augment = transforms.Compose([transforms.RandomResizedCrop(224),\n", 415 | " transforms.RandomHorizontalFlip()])\n", 416 | "for image in images:\n", 417 | " augment(image)" 418 | ] 419 | }, 420 | { 421 | "cell_type": "code", 422 | "execution_count": 59, 423 | "metadata": {}, 424 | "outputs": [ 425 | { 426 | "name": "stdout", 427 | "output_type": "stream", 428 | "text": [ 429 | "CPU times: user 21.4 s, sys: 18 ms, total: 21.4 s\n", 430 | "Wall time: 8.18 s\n" 431 | ] 432 | } 433 | ], 434 | "source": [ 435 | "%%time\n", 436 | "channel_stats = dict(mean=[0.485, 0.456, 0.406],\n", 437 | " std=[0.229, 0.224, 0.225])\n", 438 | "augment = transforms.Compose([\n", 439 | " transforms.RandomRotation(10),\n", 440 | " transforms.RandomResizedCrop(224),\n", 441 | " transforms.RandomHorizontalFlip(),\n", 442 | " transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1),\n", 443 | " transforms.ToTensor(),\n", 444 | " transforms.Normalize(**channel_stats)\n", 445 | "])\n", 446 | "for image in images:\n", 447 | " augment(image)" 448 | ] 449 | }, 450 | { 451 | "cell_type": "code", 452 | "execution_count": 60, 453 | "metadata": {}, 454 | "outputs": [], 455 | "source": [ 456 | "arrays = []" 457 | ] 458 | }, 459 | { 460 | "cell_type": "code", 461 | "execution_count": 61, 462 | "metadata": {}, 463 | "outputs": [ 464 | { 465 | "name": "stdout", 466 | "output_type": "stream", 467 | "text": [ 468 | "1000\n", 469 | "CPU times: user 1.65 s, sys: 67.7 ms, total: 1.72 s\n", 470 | "Wall time: 1.72 s\n" 471 | ] 472 | } 473 | ], 474 | "source": [ 475 | "%%time\n", 476 | "augment = transforms.Compose([transforms.RandomResizedCrop(224),\n", 477 | " transforms.RandomHorizontalFlip()])\n", 478 | "for image in images:\n", 479 | " a = np.asarray(augment(image)).astype(np.float16)\n", 480 | " arrays.append(a)\n", 481 | "print(len(arrays))" 482 | ] 483 | }, 484 | { 485 | "cell_type": "markdown", 486 | "metadata": {}, 487 | "source": [ 488 | "# Tensor Sending" 489 | ] 490 | }, 491 | { 492 | "cell_type": "code", 493 | "execution_count": 62, 494 | "metadata": {}, 495 | "outputs": [ 496 | { 497 | "name": "stdout", 498 | "output_type": "stream", 499 | "text": [ 500 | "(224, 224, 3)\n", 501 | "CPU times: user 57.7 ms, sys: 8.22 ms, total: 65.9 ms\n", 502 | "Wall time: 64.2 ms\n" 503 | ] 504 | } 505 | ], 506 | "source": [ 507 | "%%time\n", 508 | "serve = tensorcom.Connection(\"zpub://127.0.0.1:7893\")\n", 509 | "for array in arrays:\n", 510 | " serve.send([array, 0])\n", 511 | "serve.close()\n", 512 | "print(array.shape)" 513 | ] 514 | }, 515 | { 516 | "cell_type": "markdown", 517 | "metadata": {}, 518 | "source": [ 519 | "# Tensor Sending with Batching" 520 | ] 521 | }, 522 | { 523 | "cell_type": "code", 524 | "execution_count": 63, 525 | "metadata": {}, 526 | "outputs": [ 527 | { 528 | "name": "stdout", 529 | "output_type": "stream", 530 | "text": [ 531 | "(50, 224, 224, 3)\n", 532 | "CPU times: user 137 ms, sys: 0 ns, total: 137 ms\n", 533 | "Wall time: 136 ms\n" 534 | ] 535 | } 536 | ], 537 | "source": [ 538 | "%%time\n", 539 | "serve = tensorcom.Connection(\"zpub://127.0.0.1:7895\")\n", 540 | "for batch in filters.batched(50)(dict(img=array) for array in arrays):\n", 541 | " serve.send([batch[\"img\"], 0])\n", 542 | "serve.close()\n", 543 | "print(batch[\"img\"].shape)" 544 | ] 545 | }, 546 | { 547 | "cell_type": "markdown", 548 | "metadata": {}, 549 | "source": [ 550 | "# Summary\n", 551 | "\n", 552 | "|Step | s/1000 |\n", 553 | "|------------------|--------|\n", 554 | "|tar decoding | 0.270 |\n", 555 | "|tgz decoding | 0.412 |\n", 556 | "|JPEG decoding | 8.640 |\n", 557 | "|simple aug | 0.920 |\n", 558 | "|complex aug | 7.920 |\n", 559 | "|simple aug + conv | 1.760 |\n", 560 | "|batch + send | 0.158 |" 561 | ] 562 | }, 563 | { 564 | "cell_type": "code", 565 | "execution_count": null, 566 | "metadata": {}, 567 | "outputs": [], 568 | "source": [] 569 | } 570 | ], 571 | "metadata": { 572 | "kernelspec": { 573 | "display_name": "Python 3", 574 | "language": "python", 575 | "name": "python3" 576 | }, 577 | "language_info": { 578 | "codemirror_mode": { 579 | "name": "ipython", 580 | "version": 3 581 | }, 582 | "file_extension": ".py", 583 | "mimetype": "text/x-python", 584 | "name": "python", 585 | "nbconvert_exporter": "python", 586 | "pygments_lexer": "ipython3", 587 | "version": "3.6.7" 588 | } 589 | }, 590 | "nbformat": 4, 591 | "nbformat_minor": 2 592 | } 593 | -------------------------------------------------------------------------------- /docs/microbenchmarks.md: -------------------------------------------------------------------------------- 1 | ```python 2 | %pylab inline 3 | ``` 4 | 5 | Populating the interactive namespace from numpy and matplotlib 6 | 7 | 8 | 9 | ```python 10 | import argparse 11 | import multiprocessing 12 | import sys 13 | from functools import partial 14 | 15 | import dlinputs as dli 16 | import matplotlib as mpl 17 | import matplotlib.pyplot as plt 18 | import numpy as np 19 | import simplejson 20 | import torch 21 | import torch.nn.functional as F 22 | from dlinputs import filters, gopen, improc, utils 23 | from matplotlib import cm 24 | from torch import nn, optim 25 | from torchvision import datasets, transforms 26 | from itertools import islice 27 | import time 28 | from PIL import Image 29 | import io 30 | 31 | import tensorcom 32 | ``` 33 | 34 | # I/O 35 | 36 | 37 | ```python 38 | %%time 39 | total = 0 40 | with open("./imagenet_train-0000.tar", "rb") as stream: 41 | while True: 42 | data = stream.read(1000000) 43 | if len(data)==0: break 44 | total += len(data) 45 | print(total/1e6) 46 | ``` 47 | 48 | 1022.95552 49 | CPU times: user 0 ns, sys: 163 ms, total: 163 ms 50 | Wall time: 160 ms 51 | 52 | 53 | # Tar Decoding and Decompressing 54 | 55 | 56 | ```python 57 | %%time 58 | total = 0 59 | source = dli.gopen.open_source("./imagenet_train-0000.tar", decode=False) 60 | for sample in islice(source, 0, 1000): 61 | total += len(sample["jpg"]) 62 | print(total/1e6) 63 | ``` 64 | 65 | 109.936435 66 | CPU times: user 262 ms, sys: 20.3 ms, total: 283 ms 67 | Wall time: 281 ms 68 | 69 | 70 | 71 | ```python 72 | %%time 73 | total = 0 74 | source = dli.gopen.open_source("./imagenet_train-0000.tgz", decode=False) 75 | for sample in islice(source, 0, 1000): 76 | total += len(sample["jpg"]) 77 | print(total/1e6) 78 | ``` 79 | 80 | 109.936435 81 | CPU times: user 394 ms, sys: 19.6 ms, total: 414 ms 82 | Wall time: 412 ms 83 | 84 | 85 | # JPEG Decoding 86 | 87 | 88 | ```python 89 | images = [] 90 | ``` 91 | 92 | 93 | ```python 94 | %%time 95 | total = 0 96 | source = dli.gopen.open_source("./imagenet_train-0000.tgz", decode=dli.utils.autodecoder("PIL")) 97 | for sample in islice(source, 0, 1000): 98 | total += sample["jpg"].size[0] 99 | images.append(sample["jpg"].convert("RGB")) 100 | print(total) 101 | ``` 102 | 103 | 478969 104 | CPU times: user 8.17 s, sys: 305 ms, total: 8.48 s 105 | Wall time: 8.46 s 106 | 107 | 108 | # JPEG Decoding with Multiprocessing 109 | 110 | 111 | ```python 112 | %%writefile decoder.py 113 | from PIL import Image 114 | import io 115 | def decode(sample): 116 | sample["jpg"] = Image.open(io.BytesIO(sample["jpg"])) 117 | return sample 118 | ``` 119 | 120 | Overwriting decoder.py 121 | 122 | 123 | 124 | ```python 125 | pool = multiprocessing.Pool(16) 126 | from decoder import decode 127 | ``` 128 | 129 | 130 | ```python 131 | %%time 132 | total = 0 133 | source = dli.gopen.open_source("./imagenet_train-0000.tar", decode=False) 134 | count = 0 135 | for sample in pool.imap_unordered(decode, islice(source, 0, 10000)): 136 | total += sample["jpg"].size[0] 137 | count += 1 138 | print(total, count) 139 | ``` 140 | 141 | 4727094 10000 142 | CPU times: user 11.9 s, sys: 3.91 s, total: 15.8 s 143 | Wall time: 16.2 s 144 | 145 | 146 | # GPU-Based JPEG Decoding 147 | 148 | 149 | ```python 150 | from nvidia.dali.pipeline import Pipeline 151 | import nvidia.dali.ops as ops 152 | import nvidia.dali.types as types 153 | import numpy as np 154 | import nvidia.dali.plugin.pytorch as dalipyt 155 | 156 | with open("space.jpg", "rb") as stream: 157 | space = stream.read() 158 | 159 | class DecoderPipe(Pipeline): 160 | def __init__(self, batch_size, num_threads, device_id, pipelined = False, async = False): 161 | super(DecoderPipe, self).__init__(batch_size, 162 | num_threads, 163 | device_id, 164 | exec_pipelined=pipelined, 165 | exec_async=async) 166 | self.input = ops.ExternalSource() 167 | self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB) 168 | 169 | def define_graph(self): 170 | self.jpegs = self.input() 171 | return self.decode(self.jpegs) 172 | 173 | pipe = DecoderPipe(1, 1, 0) 174 | pipe.build() 175 | 176 | def dali2torch(dali_tensor): 177 | import torch 178 | import ctypes 179 | assert dali_tensor.dtype() == "B" 180 | tensor = torch.zeros(dali_tensor.shape(), dtype=torch.uint8, device="cuda") 181 | assert dali_tensor.shape() == list(tensor.shape) 182 | dali_tensor.copy_to_external(ctypes.c_void_p(tensor.data_ptr())) 183 | return tensor 184 | 185 | def gpudecode(jpeg): 186 | pipe.feed_input(pipe.jpegs, [jpeg]) 187 | dali_tensors = pipe.run()[0] 188 | assert len(dali_tensors) == 1 189 | image = dali2torch(dali_tensors.at(0)) 190 | return image 191 | 192 | gpudecode(space).size() 193 | ``` 194 | 195 | 196 | 197 | 198 | torch.Size([480, 640, 3]) 199 | 200 | 201 | 202 | ## One-by-one Decoding 203 | 204 | 205 | ```python 206 | %%time 207 | total = 0 208 | source = dli.gopen.open_source("./imagenet_train-0000.tgz", decode=False) 209 | for sample in islice(source, 0, 1000): 210 | image = gpudecode(sample["jpg"]) 211 | total += image.size(0) 212 | print(total) 213 | ``` 214 | 215 | 405015 216 | CPU times: user 3.36 s, sys: 175 ms, total: 3.53 s 217 | Wall time: 3.53 s 218 | 219 | 220 | ## Multithreaded Batch Decoding 221 | 222 | 223 | ```python 224 | pipe = DecoderPipe(1000, 8, 0) 225 | pipe.build() 226 | dali_tensors = None 227 | ``` 228 | 229 | 230 | ```python 231 | %%time 232 | global dali_tensors 233 | source = dli.gopen.open_source("./imagenet_train-0000.tgz", decode=False) 234 | jpegs = [sample["jpg"] for sample in islice(source, 0, 1000)] 235 | pipe.feed_input(pipe.jpegs, jpegs) 236 | dali_tensors = pipe.run()[0] 237 | images = [dali2torch(dali_tensors.at(i)) for i in range(len(dali_tensors))] 238 | images = [a.cpu().numpy() for a in images] 239 | total = np.sum([a.shape[0] for a in images]) 240 | print(total) 241 | ``` 242 | 243 | 405015 244 | CPU times: user 2.63 s, sys: 704 ms, total: 3.34 s 245 | Wall time: 1.38 s 246 | 247 | 248 | # Data Augmentation 249 | 250 | 251 | ```python 252 | %%time 253 | augment = transforms.Compose([transforms.RandomResizedCrop(224), 254 | transforms.RandomHorizontalFlip()]) 255 | for image in images: 256 | augment(image) 257 | ``` 258 | 259 | CPU times: user 943 ms, sys: 0 ns, total: 943 ms 260 | Wall time: 940 ms 261 | 262 | 263 | 264 | ```python 265 | %%time 266 | channel_stats = dict(mean=[0.485, 0.456, 0.406], 267 | std=[0.229, 0.224, 0.225]) 268 | augment = transforms.Compose([ 269 | transforms.RandomRotation(10), 270 | transforms.RandomResizedCrop(224), 271 | transforms.RandomHorizontalFlip(), 272 | transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1), 273 | transforms.ToTensor(), 274 | transforms.Normalize(**channel_stats) 275 | ]) 276 | for image in images: 277 | augment(image) 278 | ``` 279 | 280 | CPU times: user 21.4 s, sys: 18 ms, total: 21.4 s 281 | Wall time: 8.18 s 282 | 283 | 284 | 285 | ```python 286 | arrays = [] 287 | ``` 288 | 289 | 290 | ```python 291 | %%time 292 | augment = transforms.Compose([transforms.RandomResizedCrop(224), 293 | transforms.RandomHorizontalFlip()]) 294 | for image in images: 295 | a = np.asarray(augment(image)).astype(np.float16) 296 | arrays.append(a) 297 | print(len(arrays)) 298 | ``` 299 | 300 | 1000 301 | CPU times: user 1.65 s, sys: 67.7 ms, total: 1.72 s 302 | Wall time: 1.72 s 303 | 304 | 305 | # Tensor Sending 306 | 307 | 308 | ```python 309 | %%time 310 | serve = tensorcom.Connection("zpub://127.0.0.1:7893") 311 | for array in arrays: 312 | serve.send([array, 0]) 313 | serve.close() 314 | print(array.shape) 315 | ``` 316 | 317 | (224, 224, 3) 318 | CPU times: user 57.7 ms, sys: 8.22 ms, total: 65.9 ms 319 | Wall time: 64.2 ms 320 | 321 | 322 | # Tensor Sending with Batching 323 | 324 | 325 | ```python 326 | %%time 327 | serve = tensorcom.Connection("zpub://127.0.0.1:7895") 328 | for batch in filters.batched(50)(dict(img=array) for array in arrays): 329 | serve.send([batch["img"], 0]) 330 | serve.close() 331 | print(batch["img"].shape) 332 | ``` 333 | 334 | (50, 224, 224, 3) 335 | CPU times: user 137 ms, sys: 0 ns, total: 137 ms 336 | Wall time: 136 ms 337 | 338 | 339 | # Summary 340 | 341 | |Step | s/1000 | 342 | |------------------|--------| 343 | |tar decoding | 0.270 | 344 | |tgz decoding | 0.412 | 345 | |JPEG decoding | 8.640 | 346 | |simple aug | 0.920 | 347 | |complex aug | 7.920 | 348 | |simple aug + conv | 1.760 | 349 | |batch + send | 0.158 | 350 | 351 | 352 | ```python 353 | 354 | ``` 355 | -------------------------------------------------------------------------------- /docs/pydoc.md: -------------------------------------------------------------------------------- 1 | Documentation as generated by 'pydoc3' (default Python documentation 2 | viewer). You can view this at the command line by typing 3 | 'pydoc3 MODULE_NAME'. 4 | # module `tensorcom.__init__` 5 | 6 | ``` 7 | Help on module tensorcom.__init__ in tensorcom: 8 | 9 | NAME 10 | tensorcom.__init__ 11 | 12 | FILE 13 | /home/tmb/proj/tensorcom/tensorcom/__init__.py 14 | 15 | 16 | ``` 17 | 18 | # module `tensorcom.tenbin` 19 | 20 | ``` 21 | Help on module tensorcom.tenbin in tensorcom: 22 | 23 | NAME 24 | tensorcom.tenbin 25 | 26 | FUNCTIONS 27 | bytedata(a) 28 | Return a the raw data corresponding to a. 29 | 30 | bytelen(a) 31 | Determine the length of a in bytes. 32 | 33 | check_acceptable_input_type(data, allow64) 34 | 35 | check_infos(data, infos, required_infos=None) 36 | Implement infos verification logic. 37 | 38 | decode_buffer(buf, infos=False) 39 | Decode a byte array into a list of arrays. 40 | 41 | decode_chunks(buf) 42 | Decode a byte array into a list of chunks. 43 | 44 | decode_header(h) 45 | Decode a byte array into an array header. 46 | 47 | decode_list(l, infos=False) 48 | Given a list of byte arrays, decode them into arrays. 49 | 50 | encode_buffer(l, infos=None) 51 | Encode a list of arrays into a single byte array. 52 | 53 | encode_chunks(l) 54 | Encode a list of chunks into a single byte array, with lengths and magics.. 55 | 56 | encode_header(a, info='') 57 | Encode an array header as a byte array. 58 | 59 | encode_list(l, infos=None) 60 | Given a list of arrays, encode them into a list of byte arrays. 61 | 62 | load(fname, infos=False, nocheck=False) 63 | Read a list of arrays from a file, with magics, length, and padding. 64 | 65 | read(stream, n=999999, infos=False) 66 | Read a list of arrays from a stream, with magics, length, and padding. 67 | 68 | read_chunk(stream) 69 | Read a byte chunk from a stream with magics, length, and padding. 70 | 71 | roundup(n, k=64) 72 | Round up to the next multiple of 64. 73 | 74 | save(fname, *args, infos=None, nocheck=False) 75 | Save a list of arrays to a file, with magics, length, and padding. 76 | 77 | sctp_recv(socket, infos=False, maxsize=100000000) 78 | Receive arrays as an SCTP datagram. 79 | 80 | This is just a convenience function and illustration. 81 | For more complex networking needs, you may want 82 | to call sctp_recv and decode_buffer directly. 83 | 84 | sctp_send(socket, dest, l, infos=None) 85 | Send arrays as an SCTP datagram. 86 | 87 | This is just a convenience function and illustration. 88 | For more complex networking needs, you may want 89 | to call encode_buffer and sctp_send directly. 90 | 91 | str64(s) 92 | Convert a string to an int64. 93 | 94 | unstr64(i) 95 | Convert an int64 to a string. 96 | 97 | write(stream, l, infos=None) 98 | Write a list of arrays to a stream, with magics, length, and padding. 99 | 100 | write_chunk(stream, buf) 101 | Write a byte chunk to the stream with magics, length, and padding. 102 | 103 | zrecv_multipart(socket, infos=False) 104 | Receive arrays as a multipart ZMQ message. 105 | 106 | zrecv_single(socket, infos=False) 107 | Receive arrays as a single part ZMQ message. 108 | 109 | zsend_multipart(socket, l, infos=None) 110 | Send arrays as a multipart ZMQ message. 111 | 112 | zsend_single(socket, l, infos=None) 113 | Send arrays as a single part ZMQ message. 114 | 115 | DATA 116 | long_to_short = {'float16': 'f2', 'float32': 'f4', 'float64': 'f8', 'i... 117 | magic = 9110334830257984638 118 | magic_bytes = b'~TenBin~' 119 | magic_str = '~TenBin~' 120 | short_to_long = {'f2': 'float16', 'f4': 'float32', 'f8': 'float64', 'i... 121 | 122 | FILE 123 | /home/tmb/proj/tensorcom/tensorcom/tenbin.py 124 | 125 | 126 | ``` 127 | 128 | # module `tensorcom.zcom` 129 | 130 | ``` 131 | Help on module tensorcom.zcom in tensorcom: 132 | 133 | NAME 134 | tensorcom.zcom 135 | 136 | CLASSES 137 | builtins.object 138 | Connection 139 | Statistics 140 | 141 | class Connection(builtins.object) 142 | | Connection(url=None, epoch=100000, total=-1, multipart=True, infos=None, device=None, allow64=False, raw=False, batch_transforms=None, batch_count=True, converters=None, report=-1, stats_horizon=1000, noexpand=False) 143 | | 144 | | A class for sending/receiving tensors via ZMQ sockets. 145 | | 146 | | Methods defined here: 147 | | 148 | | __init__(self, url=None, epoch=100000, total=-1, multipart=True, infos=None, device=None, allow64=False, raw=False, batch_transforms=None, batch_count=True, converters=None, report=-1, stats_horizon=1000, noexpand=False) 149 | | Initialize a connection. 150 | | 151 | | :param url: ZMQ-URL to connect to (Default value = None) 152 | | :param epoch: length of an epoch, for len() (Default value = 100000) 153 | | :param total: total number of samples (Default value = -1) 154 | | :param multipart: send tensors in multipart messages (Default value = True) 155 | | :param infos: info fields for tensors (Default value = [""]) 156 | | :param device: device placement for tensors (None=numpy, else torch) (Default value = None) 157 | | :param allow64: allow 64bit values on sending (Default value = False) 158 | | :param raw: return undecoded tensor data (Default value = False) 159 | | :param batch_transforms: list of functions to apply to each sample (Default value = None) 160 | | :param converters: list of functions to apply after batch_transforms (Default value = None) 161 | | :param report: how frequently to report stats when iterating (Default value = -1) 162 | | :param stats_horizon: horizon for computing stats (Default value = 1000) 163 | | :param noexpand: do not expand braces in URLs (Default value = False) 164 | | 165 | | __iter__(self) 166 | | Receive data through an iterator 167 | | 168 | | __len__(self) 169 | | Returns the value of `epoch` given at initialization. 170 | | 171 | | This allows the Connection object to be used directly as a dataloader 172 | | and communicates the epoch size to frameworks that need it. 173 | | The `len`/`epoch` value is otherwise unused. 174 | | 175 | | batchsize(self, xs) 176 | | 177 | | close(self) 178 | | Close the connection. 179 | | 180 | | connect(self, url, topic='') 181 | | Explicitly connect to a ZMQ socket. 182 | | 183 | | :param url: ZMQ-URL to connect to (Default value = "") 184 | | :param topic: topic to subscribe to for SUB sockets (Default value = "") 185 | | 186 | | items(self, report=-1) 187 | | Receive data through an iterator 188 | | 189 | | recv(self) 190 | | Receive data from the connection. 191 | | 192 | | send(self, data, allow64=False) 193 | | Send data over the connection. 194 | | 195 | | :param data: list of tensors (Default value = False) 196 | | :param allow64: allow 64 bit data (Default value = False) 197 | | 198 | | serve(self, source, report=-1) 199 | | Serve data from an iterator. 200 | | 201 | | :param source: iterator yielding lists/tuples of tensors 202 | | :param report: how often to report statistics (Default value = -1) 203 | | 204 | | write(self, data) 205 | | Alias for send. 206 | | 207 | | ---------------------------------------------------------------------- 208 | | Data descriptors defined here: 209 | | 210 | | __dict__ 211 | | dictionary for instance variables (if defined) 212 | | 213 | | __weakref__ 214 | | list of weak references to the object (if defined) 215 | 216 | class Statistics(builtins.object) 217 | | Statistics(horizon=1000) 218 | | 219 | | Compute running statistics over numbers, including rates. 220 | | 221 | | Methods defined here: 222 | | 223 | | __init__(self, horizon=1000) 224 | | :param horizon: (Default value = 1000) 225 | | 226 | | add(self, x) 227 | | Add a value to the statistics. 228 | | 229 | | :param x: value to add 230 | | 231 | | rate(self) 232 | | Compute the rate. 233 | | 234 | | recent_rate(self) 235 | | Compute the recent rate. 236 | | 237 | | recent_throughput(self) 238 | | Compute the recent throughput. 239 | | 240 | | reset(self) 241 | | Reset the statistics 242 | | 243 | | summary(self) 244 | | Return a summary of recent statistics. 245 | | 246 | | throughput(self) 247 | | Compute the throughput. 248 | | 249 | | ---------------------------------------------------------------------- 250 | | Data descriptors defined here: 251 | | 252 | | __dict__ 253 | | dictionary for instance variables (if defined) 254 | | 255 | | __weakref__ 256 | | list of weak references to the object (if defined) 257 | 258 | FUNCTIONS 259 | listify(x) 260 | Turn argument into a list. 261 | 262 | This is a convenience function that allows strings 263 | to be used as a shorthand for [string] in some arguments. 264 | 265 | Returns None for None. 266 | Returns a list for a list or tuple. 267 | Returns [x] for anything else. 268 | 269 | :param x: value to be listified. 270 | 271 | tonumpy(dtype=None, transpose=True) 272 | Curried any-to-numpy converter. 273 | 274 | :param dtype: desired dtype (Default value = None) 275 | :param transpose: whether to transpose images from PyTorch (Default value = True) 276 | 277 | totorch(dtype=None, device='cpu', transpose=True) 278 | Curried any-to-torch converter. 279 | 280 | :param dtype: desired dtype (Default value = None) 281 | :param device: desired device placement (Default value = "cpu") 282 | :param transpose: transpose images to PyTorch conventions (Default value = True) 283 | 284 | transform_with(sample, transformers) 285 | Given a list of values and functions, apply functions to values. 286 | 287 | This does nothing if the list of functions is None or empty. 288 | If there are fewer transformers than the length of the list, it wraps around. 289 | 290 | :param sample: list of values 291 | :param transformers: list of functions to apply to values 292 | 293 | DATA 294 | converter_table = {'numpy': .f>, 'torch': training.tgz\n", 18 | " serve-imagenet -u training.tgz -b 64\n", 19 | "```\n", 20 | "\n", 21 | "Or, if you have a local clone of the entire dataset:\n", 22 | "\n", 23 | "```\n", 24 | " serve-imagenet -u http://my-server/some-path/imagenet_train-{0000..0147}-{000..019}.tgz\n", 25 | "```" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 1, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "%matplotlib inline" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 2, 40 | "metadata": {}, 41 | "outputs": [], 42 | "source": [ 43 | "from importlib import reload\n", 44 | "\n", 45 | "import os\n", 46 | "import numpy as np\n", 47 | "\n", 48 | "import torch\n", 49 | "import torch.nn as nn\n", 50 | "import torch.nn.parallel\n", 51 | "import torch.backends.cudnn as cudnn\n", 52 | "import torch.distributed as dist\n", 53 | "import torch.optim\n", 54 | "from torchvision import models\n", 55 | "\n", 56 | "from webdataset import WebDataset\n", 57 | "from torch.utils.data import DataLoader\n", 58 | "import tensorcom as tc\n", 59 | "\n", 60 | "from IPython import display\n", 61 | "import matplotlib.pyplot as plt\n" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 3, 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "mean = np.array([0.485, 0.456, 0.406], \"f\")\n", 71 | "std = np.array([0.229, 0.224, 0.225], \"f\")\n", 72 | "def norm_image(xs):\n", 73 | " return (xs-mean[None,None,:])/std[None,None,:]\n", 74 | "def norm_cls(ys):\n", 75 | " ys = ys.astype(np.int64)\n", 76 | " return ys-1" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": 4, 82 | "metadata": {}, 83 | "outputs": [ 84 | { 85 | "name": "stdout", 86 | "output_type": "stream", 87 | "text": [ 88 | "torch.Size([32, 3, 224, 224]) torch.float32 -2.1179039478302 1136.357177734375\n", 89 | "torch.Size([32]) torch.int64 33 998\n" 90 | ] 91 | } 92 | ], 93 | "source": [ 94 | "#urls = \"http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_train-{0000..0147}-{000..019}.tgz\"\n", 95 | "num_samples = 1000000\n", 96 | "training = tc.Connection(\"zsub://127.0.0.1:7880\",\n", 97 | " epoch=num_samples,\n", 98 | " batch_transforms=[norm_image, norm_cls],\n", 99 | " converters=\"torch\")\n", 100 | "xs, ys = next(iter(training))\n", 101 | "print(xs.shape, xs.dtype, xs.min().item(), xs.max().item())\n", 102 | "print(ys.shape, ys.dtype, ys.min().item(), ys.max().item())" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 5, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "model = models.resnet50()\n", 112 | "criterion = nn.CrossEntropyLoss().cuda()\n", 113 | "optimizer = torch.optim.SGD(model.parameters(), 0.0001, momentum=0.9, weight_decay=0.0)" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": 6, 119 | "metadata": {}, 120 | "outputs": [ 121 | { 122 | "data": { 123 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAD8CAYAAACb4nSYAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAIABJREFUeJzt3Xl0lPXd/vH3JzsJIQESwr7voGwBwiZW6wK1oK0iSEWsGlBba318rO2v7WOfLk+trVW0IIuKUAVEUWldoRVBJELYdwibrCGChH1J8v39MYMnpoEkkOSe5XqdM4eZ79yTuXKfwzV3vnMv5pxDRETCQ4TXAUREpPqo9EVEwohKX0QkjKj0RUTCiEpfRCSMqPRFRMJImaVvZu3MbFWx21Eze7jEMmZm48wsx8zWmFn3Ys/9yczWm9lG/zJWFb+IiIiULaqsBZxzm4GuAGYWCewF3iqx2CCgjf/WG5gA9DazvkA/4Er/cp8CA4EFlZBdREQqqMzSL+FaYJtzbleJ8aHANOc70ivLzJLNrAHggDggBjAgGsi9zMwiInKJKlr6w4EZpYw3AnYXe7wHaOScW2JmHwP78ZX+8865jRd7g5SUFNe8efMKxhIRCW/Lly//0jmXWtZy5S59M4sBhgA/L+3pUsacmbUGOgCN/WPzzOwq59zCEj87E8gEaNq0KdnZ2eWNJSIigJmVnIEpVUX23hkErHDOlTY9swdoUuxxY2AfcAuQ5Zw77pw7DrwPZJR8sXNuknMu3TmXnppa5geViIhcooqU/ghKn9oBmAuM8u/FkwHkO+f2A18AA80sysyi8X2Je9HpHRERqTrlmt4xs3jgOmBMsbGxAM65F4D3gMFADnASuNu/2BvANcBafF/qfuCc+0dlhRcRkYopV+k7504CdUuMvVDsvgMeLOV1hRT7oBAREW/piFwRkTCi0hcRCSMqfRGRMBIypV9Y5PjDexvZ89VJr6OIiASskCn9Lw6fZObSL7h9Yha7D6v4RURKEzKl3yIlgdfuy+D4mQJun7iEXYdOeB1JRCTghEzpA3RulMRr9/Xm1LlCbp+Yxfa8415HEhEJKCFV+gCdGiYxIzODc4VFDJ+URc5BFb+IyHkhV/oA7evXYmZmBkUOhk/KYkvuMa8jiYgEhJAsfYA2aYnMzMwgwmDEpCw2HTjqdSQREc+FbOkDtK5Xk1lj+hAdGcGISVms35fvdSQREU+FdOmDb6+eWWMyqBEdyR2TP2fdXhW/iISvkC99gGZ1E5g1pg+JcVHcMTmL1buPeB1JRMQTYVH6AE3qxDNrTB+S42P4wZTPWfHFV15HEhGpdmFT+gCNkmswMzODOjVjGPXiUpbvOux1JBGRahVWpQ/QMLkGszL7kJoYy6gXl7Jsp4pfRMJH2JU+QP2kOGZlZpCWFMddLy3l8+2HvI4kIlItwrL0AerVimNmZgYNk2sw+uVlfLbtS68jiYhUubAtfYB6ib7ib1KnBj+cuozFOSp+EQltYV36ACk1Y5lxXwbN6ybww6nLWLglz+tIIiJVJuxLH6BuzVheuy+DVqk1uXdaNh9vPuh1JBGRKqHS96uTEMNr9/WmbVpNxkxbzr835XodSUSk0qn0i0mOj+HVezJo3yCRMdOXM3+Dil9EQotKv4Sk+Gim39Objg2TuP/V5Xy4/oDXkUREKo1KvxRJNaKZfk8vOjdK4sFXV/D+2v1eRxIRqRQq/QuoFRfNtB/2okuTZH40YyXvrlHxi0jwU+lfRGJcNK/8sBfdmybz0MyVzF29z+tIIiKXpczSN7N2Zraq2O2omT1cYhkzs3FmlmNma8ysu3/8WyVee9rMbq6qX6Yq1IyNYurdvejRrDYPz1zJ2yv3eh1JROSSRZW1gHNuM9AVwMwigb3AWyUWGwS08d96AxOA3s65j4u9tg6QA3xUWeGrS0JsFFPv7sk9U7N55PVVFDnH97o39jqWiEiFVXR651pgm3NuV4nxocA055MFJJtZgxLL3Aq875w7eYlZPRUfE8VLo3vSp1Vd/mv2amZn7/Y6kohIhVW09IcDM0oZbwQUb8E9/rHyvBYzyzSzbDPLzssL3NMg1IiJ5MW7etK/dQqPvbmGWcu+8DqSiEiFlLv0zSwGGALMLu3pUsZcsdc2AK4APiztZzvnJjnn0p1z6ampqeWN5Im46Egmj0rnqjap/OzNtbz2uYpfRIJHRbb0BwErnHOlHaa6B2hS7HFjoPiuLsOAt5xz5yoeMfDERUcy8c4efKtdKr94ay3Tl+z0OpKISLlUpPRHcIHpGWAuMMq/F08GkO+cK75j+8VeG5TioiN54c4efLtDPX71znqmLt7hdSQRkTKVq/TNLB64DphTbGysmY31P3wP2I5v75zJwAPFlmuO76+ATyolcQCJjYpk/MgeXN8xjSf+sYEpi7Z7HUlE5KLMOVf2UtUoPT3dZWdnex2jQs4VFvHQjJW8v+4AvxjcnsyrWnkdSUTCjJktd86ll7WcjsitBNGREYwb0Y3vXNGAP7y3iQkLtnkdSUSkVGUenCXlEx0ZwbPDuxIRYTz5wSYKi4r40TVtvI4lIvINKv1KFBUZwV+HdSHS4M8fbaGgyPHwt9t6HUtE5Gsq/UoWFRnBX4Z1JTIigmfmb6WwyPHIdW0xK+1QBhGR6qXSrwKREcZTt15JVITx3L9zKChyPHZDOxW/iHhOpV9FIiKM//veFURGGhMWbKOgsIhfDO6g4hcRT6n0q1BEhPH7mzsTFWFMXrSDgiLHr2/qqOIXEc+o9KuYmfGbIZ2IiojgpcU7KCxyPPHdTkREqPhFpPqp9KuBmfGrmzoQFWlMWridgiLH74Z2VvGLSLVT6VcTM+Png9oTFWGMX7CNwkLH/33vChW/iFQrlX41MjP++4Z2REVGMO5fWzlXVMRTt3YhUsUvItVEpV/NzIxHrmtLVITx9LwtFBQ6nh7WhahInRFDRKqeSt8jD13bhujICJ78YBMFRUU8O7wb0Sp+EaliKn0P3X91K6Ijjd+9u5FzhSt4/o5uxEZFeh1LREKYNi09du+Alvzv0E7M25DL2OnLOX2u0OtIIhLCVPoBYFSf5vzf965gwZY87n0lm1NnVfwiUjVU+gFiRK+mPHVrFz7b9iV3T13KiTMFXkcSkRCk0g8gt/ZozF9v78qynV9x10tLOXY6JK4jLyIBRKUfYIZ2bcRzI7qxavcR7nxxKfmnVPwiUnlU+gFo8BUNGD+yO+v35TNyShZfnTjrdSQRCREq/QB1faf6TBqVztbc4wyflEXesTNeRxKREKDSD2DfalePl0f35IvDJ7l90hIO5J/2OpKIBDmVfoDr2zqFaff04uDRMwybuIQ9X530OpKIBDGVfhDo2bwOf7+3N0dOnmXYC0vY+eUJryOJSJBS6QeJrk2SmZGZwemCIoZNXMLW3GNeRxKRIKTSDyKdGiYxMzMDBwyflMWGfUe9jiQiQabM0jezdma2qtjtqJk9XGIZM7NxZpZjZmvMrHux55qa2UdmttHMNphZ88r/NcJH27REZmVmEBMVwYjJWazZc8TrSCISRMosfefcZudcV+dcV6AHcBJ4q8Rig4A2/lsmMKHYc9OAp5xzHYBewMHKCB7OWqbW5PUxfahVI4qRkz8ne+dhryOJSJCo6PTOtcA259yuEuNDgWnOJwtINrMGZtYRiHLOzQNwzh13zmn3k0rQpE48r4/pQ2piLHe+uJRPt37pdSQRCQIVLf3hwIxSxhsBu4s93uMfawscMbM5ZrbSzJ4ys/84YbyZZZpZtpll5+XlVTBS+GqQVINZY/rQrG48P5y6jPkbcr2OJCIBrtylb2YxwBBgdmlPlzLm8F2kZQDwKNATaAmM/o8FnZvknEt3zqWnpqaWN5IAqYmxzMzMoEODRMb+fTn/WL3P60giEsAqsqU/CFjhnCttc3IP0KTY48bAPv/4SufcdudcAfA20L2U18tlSI6P4e/39qZ7s9o8NHMlry/bXfaLRCQsVaT0R1D61A7AXGCUfy+eDCDfObcfWAbUNrPzm+/XABsuOa1cUGJcNK/c3YsBbVJ57M01TF28w+tIIhKAylX6ZhYPXAfMKTY21szG+h++B2wHcoDJwAMAzrlCfFM7/zKztfimgSZXWnr5hhoxkUwe1YMbOqXxxD828LePc7yOJCIBxpxzXmf4hvT0dJedne11jKBWUFjEo7NX8/aqfTz4rVY8en07zEr72kVEQoWZLXfOpZe1XFR1hJHqFRUZwdPDulIjJoq/fbyNE2cK+fVNHYmIUPGLhDuVfoiKiDD+cEtnEmIimfLpDo6fKeCP37uCqEideUMknKn0Q5iZ8f++04FaNaJ5et4Wjp0+x7gR3YiN+o9DJUQkTGizL8SZGQ9d24b/+W5HPlyfy72vZHPiTIHXsUTEIyr9MHF3vxb8+bYuLM75kh+8+Dn5J3XBdZFwpNIPI7f2aMz4kT1Yv/cot09awsFjuvyiSLhR6YeZGzvX56XRPdl16CTDXtDlF0XCjUo/DPVvk8Lf7+3N4RNnue2FJeQcPO51JBGpJir9MNWjWW1mjenDuULHsIlLWLsn3+tIIlINVPphrEODWswe24ca0ZGMmJzFZ9t0Tn6RUKfSD3MtUhJ44/4+NEiKY/RLy/hg3QGvI4lIFVLpCw2SavD6mD50alSLB15dzqxlX3gdSUSqiEpfAKidEMOr9/amf5tUfvbmWiYs2EagnYxPRC6fSl++Fh8TxZRR6Qzp0pAnP9jEH97bqOIXCTE69458Q0xUBM/c3pXa8dFMXrSDwyfO8eT3daI2kVCh0pf/EBFhPDGkE7UTYnhm/lbyT53l+Tu6ExetE7WJBDttvkmpzIyHv92W3w7txL82HWTUi0vJP6Xz9YgEO5W+XNSdfZozbng3Vu7+itsnLuFAvs7XIxLMVPpSpu92acjLo3ux+/BJvj/hM3IOHvM6kohcIpW+lEv/NinMGtOHMwVF3PrCEpbv+srrSCJyCVT6Um6dGyUx5/6+1I6PYeSULOZvyPU6kohUkEpfKqRp3XjeGNuHdmmJZE7PZuZSHb0rEkxU+lJhdWvG8tp9GVzVNpXH56zl2flbdRCXSJBQ6cslSYiNYvKodL7fvTF/nb+F//f2OgqLVPwigU4HZ8kli46M4M+3XUlarVjGL9hG3rEzjBvejRoxOohLJFBpS18ui5nx2I3t+c2QTszfmMuIyVkcOn7G61gicgFllr6ZtTOzVcVuR83s4RLLmJmNM7McM1tjZt2LPVdY7LVzq+KXEO/d1bc5E0b2YOP+o3xvwmdsz9MlGEUCUZml75zb7Jzr6pzrCvQATgJvlVhsENDGf8sEJhR77tT51zvnhlRSbglAN3auz4zMDI6dLuD7Ez5j+a7DXkcSkRIqOr1zLbDNOberxPhQYJrzyQKSzaxBpSSUoNK9aW3m3N+XpBrRjJj8Oe+v3e91JBEppqKlPxyYUcp4I2B3scd7/GMAcWaWbWZZZnZzaT/UzDL9y2Tn5eVVMJIEmuYpCcx5oB+dG9bigddWMGXRdq8jiYhfuUvfzGKAIcDs0p4uZez8/ntNnXPpwB3AM2bW6j8WdG6Scy7dOZeemppa3kgSwOokxPDafRnc0LE+v3t3I0/MXa9dOkUCQEW29AcBK5xzpR17vwdoUuxxY2AfgHPu/L/bgQVAt0tKKkEnLjqSv43szj39WzD1s5088OpyTp0t9DqWSFirSOmPoPSpHYC5wCj/XjwZQL5zbr+Z1TazWAAzSwH6ARsuK7EElcgI41c3deTXN3Xkow25DJ+cRd4x7dIp4pVylb6ZxQPXAXOKjY01s7H+h+8B24EcYDLwgH+8A5BtZquBj4E/OudU+mHoh/1bMGFkDzYfOMrNf1vM5gM6PbOIFyzQzpmSnp7usrOzvY4hVWTtnnzueWUZJ88W8vwd3bi6XT2vI4mEBDNb7v/+9KJ0RK5UqysaJ/HOj/rRtE48P5y6jGlLdnodSSSsqPSl2jVIqsHssX24pn09fv3Oeu3ZI1KNVPriiYTYKCbemf71nj33Tcvm+JkCr2OJhDyVvnjm/J49v7+lM59syePWCZ+x98gpr2OJhDSVvnhuZO9mTL27J3u/OsXNf1vMqt1HvI4kErJU+hIQBrRJZc4DfYmLjmDYxCW8vXKv15FEQpJKXwJGm7RE3nmwP92bJvPwrFX88f1N+oJXpJKp9CWg1EmIYfo9vRnZuykvfLKNzGnZHDt9zutYIiFDpS8BJzoygt/fcgW/vbkzC7bk8b3xn7Hr0AmvY4mEBJW+BKw7M5ox/Z5e5B0/w9C/LeaznC+9jiQS9FT6EtD6tkrhnQf7kVozljtfWsr0JTu9jiQS1FT6EvCa1U1gzgN9+Va7VH71znp+8dZazhYUeR1LJCip9CUoJMZFM/HOdO6/uhWvff4Fd0zO4uDR017HEgk6Kn0JGpERxs9ubM9zI7qxft9RbnruU118XaSCVPoSdL7bpSFvPdiXGjGRDJ+Uxd+zdhFopwgXCVQqfQlK7evXYu6D/enXOoVfvr2On725htPndClGkbKo9CVoJcVH8+JdPfnxNa15PXsPt09cwj6dsE3kolT6EtQiI4z/ur4dE+/swba8E3z3uU/J2n7I61giAUulLyHhhk71efvBfiTFRzNyyudMWbRd8/wipVDpS8hoXa8m7zzYj2vb1+N3727kgVdX6Lw9IiWo9CWk+Pbn78HPB7Xnow25fPe5T9mw76jXsUQChkpfQo6ZMWZgK2bcl8HJs4XcMn4xry/b7XUskYCg0peQ1atFHd59aAA9mtXmsTfX8Ojs1Zw6q906Jbyp9CWkpSbGMv2e3jx0TWveXLGHW8YvZnveca9jiXhGpS8hLzLCeOT6drw8uie5R08z5PnFvLtmv9exRDyh0pewcXW7erz70ADapNXkwddW8Ot31ukoXgk7ZZa+mbUzs1XFbkfN7OESy5iZjTOzHDNbY2bdSzxfy8z2mtnzlf0LiFREw+QazMrswz39WzBtyS5uGf8ZOQc13SPho8zSd85tds51dc51BXoAJ4G3Siw2CGjjv2UCE0o8/1vgk8uPK3L5YqIi+NVNHXlpdDq5R0/z3ec+5fXs3TqYS8JCRad3rgW2Oed2lRgfCkxzPllAspk1ADCzHkAa8NFlpxWpRNe0T+P9nwyga5NkHntjDT+ZuUoHc0nIq2jpDwdmlDLeCCi+I/QeoJGZRQB/Af770uKJVK20WnH8/d7ePHp9W95du5/vjPuU1buPeB1LpMqUu/TNLAYYAswu7elSxhzwAPCec+6iR8aYWaaZZZtZdl5eXnkjiVSKyAjjR9e04fUxGRQWOb4/4TMmLdxGUZGmeyT0VGRLfxCwwjmXW8pze4AmxR43BvYBfYAfmdlO4M/AKDP7Y8kXO+cmOefSnXPpqampFYgkUnl6NKvDew8N4LqOafzhvU2MnrqMg8d0SUYJLRUp/RGUPrUDMBdfoZuZZQD5zrn9zrmRzrmmzrnmwKP45v0fv7zIIlUnKT6a8SO78/tbOvP59kPc+MwiPlx/wOtYIpWmXKVvZvHAdcCcYmNjzWys/+F7wHYgB5iMb1pHJCiZGSN7N+PdhwbQKLkGY6Yv579nr+b4mQKvo4lcNgu03dTS09Nddna21zFEADhbUMS4f21l/IIcGtWuwdPDutKzeR2vY4n8BzNb7pxLL2s5HZErchExURE8ekM7Zo/tg2HcPnEJf/pgE2cLiryOJnJJVPoi5dCjWR3e+8kAbuvRhPELtnHL+MVszT3mdSyRClPpi5RTzdgonrz1Sibe2YP9+ae56blPeenTHdq1U4KKSl+kgm7oVJ8PH76K/q1T+N9/bmD45Cx2fnnC61gi5aLSF7kEqYmxTLkrnaduvZKN+49y47MLtdUvQUGlL3KJzIzb0psw76cD6dvKt9V/+6Ql7NBWvwQwlb7IZaqfFMeLd6Xzl9u6sPnAMW58ZiFTFm2nUFv9EoBU+iKVwMz4fo/GzHtkIP1bp/C7dzcybOIStunSjBJgVPoilSitVhxT7krnr7d3IefgcQY/u4gXPtnGuULt1y+BQaUvUsnMjFu6NWbeT69iYNtU/vj+JoY8v5hVOmWzBACVvkgVqVcrjkmj0nnhBz04fOIMt4xfzBNz1+tCLeIplb5IFbuxc33mPzKQURnNeGXJTq57eqHO3CmeUemLVIPEuGh+M7Qzc+7vS3J8NGOmLydzWjb78095HU3CjEpfpBp1a1qbf/y4P48Pas/CrXlc9/RCpi7eod07pdqo9EWqWXRkBGMHtmLeTwfSvVltnvjHBoY8/ynLdx32OpqEAZW+iEea1Innlbt78vwd3Th84izfn7CER15fpUs0SpVS6Yt4yMy46cqGzH9kIA9c3Yp/rt7PNX/+hCmLtmvffqkSKn2RAJAQG8VjN7bnw59eRXrz2vzu3Y0MfnYRn+V86XU0CTEqfZEA0iIlgZdH92TKqHROFxRyx5TPefDVFew7or18pHJEeR1ARL7JzPh2xzT6t0lh0sLt/O3jHP61KZfMAS0ZM7AVCbH6byuXTlv6IgEqLjqSh65tw/xHBvLtDmmM+3cOV/95ATOXfqFdPOWSqfRFAlyTOvE8f0d35jzQl6Z14nl8zloGP7uIT7bkeR1NgpBKXyRIdG9amzfG9mH8yO6cOlfIXS8tZdRLS9l04KjX0SSIqPRFgoiZMfiKBsx75Cp++Z0OrN59hMHPLuLxN9dw8Kj275eymXOBNTeYnp7usrOzvY4hEhSOnDzLuH/lMD1rJ5ERxl19mzP2qlbUTojxOppUMzNb7pxLL3M5lb5I8Nt16AR/nbeFd1bvo2ZMFPcOaMk9A1pQU3v6hI3yln6Z0ztm1s7MVhW7HTWzh0ssY2Y2zsxyzGyNmXX3jzczs+X+1603s7GX/iuJyIU0q5vAM8O78cFPrqJPq7r8df4WrvrTx0xeuJ3T5wq9jicBpEJb+mYWCewFejvndhUbHwz8GBgM9Aaedc71NrMY/3ucMbOawDqgr3Nu34XeQ1v6Ipdv9e4j/PmjzSza+iVptWL58TVtGJbehJgofY0XqiptS7+Ea4FtxQvfbygwzflkAclm1sA5d9Y5d8a/TOwlvJ+IXIIuTZKZfk9vZmZm0KR2PL98ex3XPr2A17N365w+Ya6iJTwcmFHKeCNgd7HHe/xjmFkTM1vjf/7Ji23li0jlymhZl9lj+/Dy6J4k1YjmsTfWcPVTC5ietUvTPmGq3KXvn6oZAswu7elSxhyAc263c+5KoDVwl5mllfKzM80s28yy8/J0wIlIZTIzvtW+Hv/4UX9eHt2TerVi+dXb6xj41Me8+OkOTp1V+YeTimzpDwJWOOdyS3luD9Ck2OPGwDe26P1b+OuBASVf7Jyb5JxLd86lp6amViCSiJTX+fKfc39fXr23Ny1SEvjtPzfQ/8l/M2HBNo6fKfA6olSDipT+CEqf2gGYC4zy78WTAeQ75/abWWMzqwFgZrWBfsDmy0osIpfFzOjXOoWZmX2YPbYPnRol8eQHm+j3x3/zzPwtHD5x1uuIUoXKtfeOmcXjm5Nv6ZzL94+NBXDOvWBmBjwP3AicBO52zmWb2XXAX/BN9RjwvHNu0sXeS3vviFS/1buP8PzHOczbkEtcdAS39mjMPf1b0iIlwetoUk46OEtEKmxr7jGmLNrBWyv3cq6oiG93SOO+AS3p2bw2vm07CVQqfRG5ZAePnWb6kl1Mz9rFkZPn6NI4iXsHtGRQ5/pERWrP60Ck0heRy3bqbCFvrNjDS5/uYMeXJ2iUXIO7+zXnth5NSIqP9jqeFKPSF5FKU1jkmL8xlymLtrNs51fERUcwpEtD7sxozhWNk7yOJ6j0RaSKrN+Xz9+zvuDtlXs5da6QLk2SGZXRjO9c2YC46Eiv44Utlb6IVKmjp88xZ/kepmftYlveCWrHRzMsvQkjezejad14r+OFHZW+iFQL5xxLth1ietYuPtqQS5Fz9G+dwq09GnNDp/ra+q8mKn0RqXYH8k8zc9kXzM7ew94jp6gVF8WQrg25rUcTrmycpN0+q5BKX0Q8U1TkWLL9ELOzd/P+ugOcKSiiXVoit6U35uZujUipGet1xJCj0heRgJB/6hz/XLOP2dl7WLX7CFERxjXt6zG0ayOu7VBP0z+VRKUvIgFna+4xZi/fw1sr95J37AwJMZFc36k+Q7o0pH+bFKJ14NclU+mLSMAqLHJkbT/E3FX7eH/dfo6eLiA5PppBnRswpEtDerWoQ2SE5v8rQqUvIkHhbEERC7fkMXf1PuZtyOXUuULSasUy+IoG3NCpPj2b6wOgPFT6IhJ0Tp4tYP7Gg8xdtY+FW/I4W1hEnYQYrm1fjxs61ad/mxR9B3ABKn0RCWrHzxTwyeY8PtpwgH9vPMixMwXEx0QysG0q13dK45p2aTr/TzHlLf2o6ggjIlJRNWOj+M6VDfjOlQ04W1BE1vZDfLThAB+tz+X9dQeIijDSm9fm6nb1GNg2lfb1E3UcQDloS19EgkpRkWP1niN8uD6XBZsPsunAMQDSasUysG0qV7erR7/WKSTVCK+/AjS9IyJh4UD+aRZuyeOTLXks3JrHsdMFREYY3ZokM7BtKn1bp3Bl46SQ3x1UpS8iYaegsIhVu4+wYLPvQ2Dt3nwA4mMiSW9ehz4t69KnVV06N6wVcheDUemLSNg7dPwMn+84zJJth8jafoitB48Dvu8LejavTZ9Wdendoi4dG9YK+r8E9EWuiIS9ujV9+/sPvqIBAHnHzpC13fcBsGT7IT7enAdAXHQEVzZKpnuz2nRv6vs3VM8PpC19EQlbuUdPs3THYVZ88RUrvjjChn35nCv0dWLTOvH08H8IdG1Sm7b1axIbFbjHCGh6R0Skgk6fK2Td3nyW7/rq6w+CvGNnAIiKMNqmJdK5US2uaJREp0ZJdKhfixoxgfFBoOkdEZEKiov2feGb3rwO4LtAzJ6vTrF2bz5r9+azbm8+8zbk8nr2HgAiDFrXq0nnhkm0b5BI27RE2tVPpH6tuIA9ZkClLyJyAWZGkzrxNKkT//X3As459uefZu3efNbvzWfdvqN8mvMlc1bu/fp1ibFRtK2fSNu0mr4PgrREWqeomsiSAAAGUUlEQVTVJLVmrOcfBpreERGpBF+dOMuW3GNsOXicLQeOsTn3GFtyj3Hk5Lmvl6kZG0WLlASapyTQom48LVITaF43gRYpCSTHx1zW+2t6R0SkGtVOiKF3y7r0bln36zHnHHnHz7A19zhbco+x88sT7Dh0ktW7j/Dumn0UFdvmrh0fTf82qTw3oluV5iyz9M2sHTCr2FBL4NfOuWeKLWPAs8Bg4CQw2jm3wsy6AhOAWkAh8HvnXPGfJSISssyMeolx1EuMo1/rlG88d6agkN2HT7HjyxP+D4MT1K6GE8iVWfrOuc1AVwAziwT2Am+VWGwQ0MZ/642v6Hvj+wAY5ZzbamYNgeVm9qFz7kjl/QoiIsEnNiqS1vVq0rpezWp934pO71wLbHPO7SoxPhSY5nxfEGSZWbKZNXDObTm/gHNun5kdBFIBlb6IiAcqetzxcGBGKeONgN3FHu/xj33NzHoBMcC2Cr6niIhUknKXvpnFAEOA2aU9XcrY119RmFkDYDpwt3OuqJSfnWlm2WaWnZeXV95IIiJSQRXZ0h8ErHDO5Zby3B6gSbHHjYF9AGZWC3gX+KVzLqu0H+ycm+ScS3fOpaemplYgkoiIVERFSn8EpU/tAMwFRplPBpDvnNvv/+vgLXzz/aX9hSAiItWoXF/kmlk8cB0wptjYWADn3AvAe/h218zBt8fO3f7FhgFXAXXNbLR/bLRzblVlhBcRkYrREbkiIiGgvEfkBvdVA0REpEICbkvfzPKAkscBVEQK8GUlxalqwZQVgitvMGWF4MobTFkhuPJeTtZmzrky94QJuNK/XGaWXZ4/cQJBMGWF4MobTFkhuPIGU1YIrrzVkVXTOyIiYUSlLyISRkKx9Cd5HaACgikrBFfeYMoKwZU3mLJCcOWt8qwhN6cvIiIXFopb+iIicgEhU/pmdqOZbTazHDN73Os8ZTGznWa21sxWmVlAHY1mZi+Z2UEzW1dsrI6ZzTOzrf5/a3uZsbgL5H3CzPb61+8qMxvsZcbzzKyJmX1sZhvNbL2Z/cQ/HnDr9yJZA3XdxpnZUjNb7c/7G/94CzP73L9uZ/lPDxOoWaea2Y5i67Zrpb+5cy7ob0AkvlM2t8R3+ubVQEevc5WReSeQ4nWOC2S7CugOrCs29ifgcf/9x4Envc5ZRt4ngEe9zlZK1gZAd//9RGAL0DEQ1+9FsgbqujWgpv9+NPA5kAG8Dgz3j78A3B/AWacCt1ble4fKln4vIMc5t905dxaYie/CLnIJnHMLgcMlhocCr/jvvwLcXK2hLuICeQOSc26/c26F//4xYCO+a08E3Pq9SNaA5HyO+x9G+28OuAZ4wz8eKOv2QlmrXKiUfpkXcQlADvjIzJabWabXYcohzTm3H3xlANTzOE95/MjM1vinfzyfLinJzJoD3fBt5QX0+i2RFQJ03ZpZpJmtAg4C8/DNABxxzhX4FwmYbiiZ1Tl3ft3+3r9u/2pmsZX9vqFS+he9iEuA6uec647vOgUPmtlVXgcKMROAVviu77wf+Iu3cb7JzGoCbwIPO+eOep3nYkrJGrDr1jlX6Jzriu+aHr2ADqUtVr2pSlcyq5l1Bn4OtAd6AnWAn1X2+4ZK6V/wIi6Byjm3z//vQXzXHOjlbaIy5fqvgHb+SmgHPc5zUc65XP9/qiJgMgG0fs0sGl+Jvuqcm+MfDsj1W1rWQF635znnjgAL8M2TJ5vZ+dPIB1w3FMt6o39KzTnnzgAvUwXrNlRKfxnQxv8tfQy+a/nO9TjTBZlZgpklnr8PXA+su/irPDcXuMt//y7gHQ+zlOl8gfrdQoCsXzMz4EVgo3Pu6WJPBdz6vVDWAF63qWaW7L9fA/g2vu8hPgZu9S8WKOu2tKybin3wG77vHip93YbMwVn+3caewbcnz0vOud97HOmCzKwlvq178F3I5rVAymtmM4Cr8Z3xLxf4H+BtfHtBNAW+AG5zzgXEl6cXyHs1vukHh29PqTHn58y9ZGb9gUXAWuD89aJ/gW+uPKDW70WyjiAw1+2V+L6ojcS3Qfu6c+5//f/fZuKbLlkJ/MC/Je2Zi2T9N5CKb8p6FTC22Be+lfPeoVL6IiJStlCZ3hERkXJQ6YuIhBGVvohIGFHpi4iEEZW+iEgYUemLiIQRlb6ISBhR6YuIhJH/D0Z0VjgbejMsAAAAAElFTkSuQmCC\n", 124 | "text/plain": [ 125 | "
" 126 | ] 127 | }, 128 | "metadata": {}, 129 | "output_type": "display_data" 130 | }, 131 | { 132 | "ename": "KeyboardInterrupt", 133 | "evalue": "", 134 | "output_type": "error", 135 | "traceback": [ 136 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 137 | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", 138 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0mdisplay\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclear_output\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwait\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 30\u001b[0;31m \u001b[0mtrain_for\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m10000\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", 139 | "\u001b[0;32m\u001b[0m in \u001b[0;36mtrain_for\u001b[0;34m(steps, losses)\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 19\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 20\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m>=\u001b[0m\u001b[0msteps\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 140 | "\u001b[0;32m~/exp/tensorcom/tensorcom/zcom.py\u001b[0m in \u001b[0;36mitems\u001b[0;34m(self, report)\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtotal\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcount\u001b[0m \u001b[0;34m>=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtotal\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 367\u001b[0m \u001b[0;32mreturn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 368\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 369\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mreport\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mcount\u001b[0m \u001b[0;34m>=\u001b[0m \u001b[0mnext_report\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 370\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"count\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcount\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstats\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msummary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 141 | "\u001b[0;32m~/exp/tensorcom/tensorcom/zcom.py\u001b[0m in \u001b[0;36mrecv\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 327\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msocket\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv_multipart\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 328\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmultipart\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 329\u001b[0;31m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtenbin\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzrecv_multipart\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msocket\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfos\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 330\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 331\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtenbin\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzrecv_single\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msocket\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfos\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 142 | "\u001b[0;32m~/exp/tensorcom/tensorcom/tenbin.py\u001b[0m in \u001b[0;36mzrecv_multipart\u001b[0;34m(socket, infos)\u001b[0m\n\u001b[1;32m 222\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mzrecv_multipart\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msocket\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minfos\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 223\u001b[0m \u001b[0;34m\"\"\"Receive arrays as a multipart ZMQ message.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 224\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdecode_list\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msocket\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv_multipart\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minfos\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minfos\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 225\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 226\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0msctp_send\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msocket\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdest\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ml\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minfos\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 143 | "\u001b[0;32m/usr/lib/python3/dist-packages/zmq/sugar/socket.py\u001b[0m in \u001b[0;36mrecv_multipart\u001b[0;34m(self, flags, copy, track)\u001b[0m\n\u001b[1;32m 465\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0many\u001b[0m \u001b[0mof\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mreasons\u001b[0m \u001b[0;34m:\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m~\u001b[0m\u001b[0mSocket\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv\u001b[0m\u001b[0;31m`\u001b[0m \u001b[0mmight\u001b[0m \u001b[0mfail\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 466\u001b[0m \"\"\"\n\u001b[0;32m--> 467\u001b[0;31m \u001b[0mparts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mflags\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcopy\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcopy\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrack\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrack\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 468\u001b[0m \u001b[0;31m# have first part already, only loop while more to receive\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 469\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetsockopt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mzmq\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRCVMORE\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 144 | "\u001b[0;32mzmq/backend/cython/socket.pyx\u001b[0m in \u001b[0;36mzmq.backend.cython.socket.Socket.recv\u001b[0;34m()\u001b[0m\n", 145 | "\u001b[0;32mzmq/backend/cython/socket.pyx\u001b[0m in \u001b[0;36mzmq.backend.cython.socket.Socket.recv\u001b[0;34m()\u001b[0m\n", 146 | "\u001b[0;32mzmq/backend/cython/socket.pyx\u001b[0m in \u001b[0;36mzmq.backend.cython.socket._recv_copy\u001b[0;34m()\u001b[0m\n", 147 | "\u001b[0;32m/usr/lib/python3/dist-packages/zmq/backend/cython/checkrc.pxd\u001b[0m in \u001b[0;36mzmq.backend.cython.checkrc._check_rc\u001b[0;34m()\u001b[0m\n", 148 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " 149 | ] 150 | }, 151 | { 152 | "data": { 153 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAD8CAYAAACb4nSYAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAIABJREFUeJzt3Xl0lPXd/vH3JzsJIQESwr7voGwBwiZW6wK1oK0iSEWsGlBba318rO2v7WOfLk+trVW0IIuKUAVEUWldoRVBJELYdwibrCGChH1J8v39MYMnpoEkkOSe5XqdM4eZ79yTuXKfwzV3vnMv5pxDRETCQ4TXAUREpPqo9EVEwohKX0QkjKj0RUTCiEpfRCSMqPRFRMJImaVvZu3MbFWx21Eze7jEMmZm48wsx8zWmFn3Ys/9yczWm9lG/zJWFb+IiIiULaqsBZxzm4GuAGYWCewF3iqx2CCgjf/WG5gA9DazvkA/4Er/cp8CA4EFlZBdREQqqMzSL+FaYJtzbleJ8aHANOc70ivLzJLNrAHggDggBjAgGsi9zMwiInKJKlr6w4EZpYw3AnYXe7wHaOScW2JmHwP78ZX+8865jRd7g5SUFNe8efMKxhIRCW/Lly//0jmXWtZy5S59M4sBhgA/L+3pUsacmbUGOgCN/WPzzOwq59zCEj87E8gEaNq0KdnZ2eWNJSIigJmVnIEpVUX23hkErHDOlTY9swdoUuxxY2AfcAuQ5Zw77pw7DrwPZJR8sXNuknMu3TmXnppa5geViIhcooqU/ghKn9oBmAuM8u/FkwHkO+f2A18AA80sysyi8X2Je9HpHRERqTrlmt4xs3jgOmBMsbGxAM65F4D3gMFADnASuNu/2BvANcBafF/qfuCc+0dlhRcRkYopV+k7504CdUuMvVDsvgMeLOV1hRT7oBAREW/piFwRkTCi0hcRCSMqfRGRMBIypV9Y5PjDexvZ89VJr6OIiASskCn9Lw6fZObSL7h9Yha7D6v4RURKEzKl3yIlgdfuy+D4mQJun7iEXYdOeB1JRCTghEzpA3RulMRr9/Xm1LlCbp+Yxfa8415HEhEJKCFV+gCdGiYxIzODc4VFDJ+URc5BFb+IyHkhV/oA7evXYmZmBkUOhk/KYkvuMa8jiYgEhJAsfYA2aYnMzMwgwmDEpCw2HTjqdSQREc+FbOkDtK5Xk1lj+hAdGcGISVms35fvdSQREU+FdOmDb6+eWWMyqBEdyR2TP2fdXhW/iISvkC99gGZ1E5g1pg+JcVHcMTmL1buPeB1JRMQTYVH6AE3qxDNrTB+S42P4wZTPWfHFV15HEhGpdmFT+gCNkmswMzODOjVjGPXiUpbvOux1JBGRahVWpQ/QMLkGszL7kJoYy6gXl7Jsp4pfRMJH2JU+QP2kOGZlZpCWFMddLy3l8+2HvI4kIlItwrL0AerVimNmZgYNk2sw+uVlfLbtS68jiYhUubAtfYB6ib7ib1KnBj+cuozFOSp+EQltYV36ACk1Y5lxXwbN6ybww6nLWLglz+tIIiJVJuxLH6BuzVheuy+DVqk1uXdaNh9vPuh1JBGRKqHS96uTEMNr9/WmbVpNxkxbzr835XodSUSk0qn0i0mOj+HVezJo3yCRMdOXM3+Dil9EQotKv4Sk+Gim39Objg2TuP/V5Xy4/oDXkUREKo1KvxRJNaKZfk8vOjdK4sFXV/D+2v1eRxIRqRQq/QuoFRfNtB/2okuTZH40YyXvrlHxi0jwU+lfRGJcNK/8sBfdmybz0MyVzF29z+tIIiKXpczSN7N2Zraq2O2omT1cYhkzs3FmlmNma8ysu3/8WyVee9rMbq6qX6Yq1IyNYurdvejRrDYPz1zJ2yv3eh1JROSSRZW1gHNuM9AVwMwigb3AWyUWGwS08d96AxOA3s65j4u9tg6QA3xUWeGrS0JsFFPv7sk9U7N55PVVFDnH97o39jqWiEiFVXR651pgm3NuV4nxocA055MFJJtZgxLL3Aq875w7eYlZPRUfE8VLo3vSp1Vd/mv2amZn7/Y6kohIhVW09IcDM0oZbwQUb8E9/rHyvBYzyzSzbDPLzssL3NMg1IiJ5MW7etK/dQqPvbmGWcu+8DqSiEiFlLv0zSwGGALMLu3pUsZcsdc2AK4APiztZzvnJjnn0p1z6ampqeWN5Im46Egmj0rnqjap/OzNtbz2uYpfRIJHRbb0BwErnHOlHaa6B2hS7HFjoPiuLsOAt5xz5yoeMfDERUcy8c4efKtdKr94ay3Tl+z0OpKISLlUpPRHcIHpGWAuMMq/F08GkO+cK75j+8VeG5TioiN54c4efLtDPX71znqmLt7hdSQRkTKVq/TNLB64DphTbGysmY31P3wP2I5v75zJwAPFlmuO76+ATyolcQCJjYpk/MgeXN8xjSf+sYEpi7Z7HUlE5KLMOVf2UtUoPT3dZWdnex2jQs4VFvHQjJW8v+4AvxjcnsyrWnkdSUTCjJktd86ll7WcjsitBNGREYwb0Y3vXNGAP7y3iQkLtnkdSUSkVGUenCXlEx0ZwbPDuxIRYTz5wSYKi4r40TVtvI4lIvINKv1KFBUZwV+HdSHS4M8fbaGgyPHwt9t6HUtE5Gsq/UoWFRnBX4Z1JTIigmfmb6WwyPHIdW0xK+1QBhGR6qXSrwKREcZTt15JVITx3L9zKChyPHZDOxW/iHhOpV9FIiKM//veFURGGhMWbKOgsIhfDO6g4hcRT6n0q1BEhPH7mzsTFWFMXrSDgiLHr2/qqOIXEc+o9KuYmfGbIZ2IiojgpcU7KCxyPPHdTkREqPhFpPqp9KuBmfGrmzoQFWlMWridgiLH74Z2VvGLSLVT6VcTM+Png9oTFWGMX7CNwkLH/33vChW/iFQrlX41MjP++4Z2REVGMO5fWzlXVMRTt3YhUsUvItVEpV/NzIxHrmtLVITx9LwtFBQ6nh7WhahInRFDRKqeSt8jD13bhujICJ78YBMFRUU8O7wb0Sp+EaliKn0P3X91K6Ijjd+9u5FzhSt4/o5uxEZFeh1LREKYNi09du+Alvzv0E7M25DL2OnLOX2u0OtIIhLCVPoBYFSf5vzf965gwZY87n0lm1NnVfwiUjVU+gFiRK+mPHVrFz7b9iV3T13KiTMFXkcSkRCk0g8gt/ZozF9v78qynV9x10tLOXY6JK4jLyIBRKUfYIZ2bcRzI7qxavcR7nxxKfmnVPwiUnlU+gFo8BUNGD+yO+v35TNyShZfnTjrdSQRCREq/QB1faf6TBqVztbc4wyflEXesTNeRxKREKDSD2DfalePl0f35IvDJ7l90hIO5J/2OpKIBDmVfoDr2zqFaff04uDRMwybuIQ9X530OpKIBDGVfhDo2bwOf7+3N0dOnmXYC0vY+eUJryOJSJBS6QeJrk2SmZGZwemCIoZNXMLW3GNeRxKRIKTSDyKdGiYxMzMDBwyflMWGfUe9jiQiQabM0jezdma2qtjtqJk9XGIZM7NxZpZjZmvMrHux55qa2UdmttHMNphZ88r/NcJH27REZmVmEBMVwYjJWazZc8TrSCISRMosfefcZudcV+dcV6AHcBJ4q8Rig4A2/lsmMKHYc9OAp5xzHYBewMHKCB7OWqbW5PUxfahVI4qRkz8ne+dhryOJSJCo6PTOtcA259yuEuNDgWnOJwtINrMGZtYRiHLOzQNwzh13zmn3k0rQpE48r4/pQ2piLHe+uJRPt37pdSQRCQIVLf3hwIxSxhsBu4s93uMfawscMbM5ZrbSzJ4ys/84YbyZZZpZtpll5+XlVTBS+GqQVINZY/rQrG48P5y6jPkbcr2OJCIBrtylb2YxwBBgdmlPlzLm8F2kZQDwKNATaAmM/o8FnZvknEt3zqWnpqaWN5IAqYmxzMzMoEODRMb+fTn/WL3P60giEsAqsqU/CFjhnCttc3IP0KTY48bAPv/4SufcdudcAfA20L2U18tlSI6P4e/39qZ7s9o8NHMlry/bXfaLRCQsVaT0R1D61A7AXGCUfy+eDCDfObcfWAbUNrPzm+/XABsuOa1cUGJcNK/c3YsBbVJ57M01TF28w+tIIhKAylX6ZhYPXAfMKTY21szG+h++B2wHcoDJwAMAzrlCfFM7/zKztfimgSZXWnr5hhoxkUwe1YMbOqXxxD828LePc7yOJCIBxpxzXmf4hvT0dJedne11jKBWUFjEo7NX8/aqfTz4rVY8en07zEr72kVEQoWZLXfOpZe1XFR1hJHqFRUZwdPDulIjJoq/fbyNE2cK+fVNHYmIUPGLhDuVfoiKiDD+cEtnEmIimfLpDo6fKeCP37uCqEideUMknKn0Q5iZ8f++04FaNaJ5et4Wjp0+x7gR3YiN+o9DJUQkTGizL8SZGQ9d24b/+W5HPlyfy72vZHPiTIHXsUTEIyr9MHF3vxb8+bYuLM75kh+8+Dn5J3XBdZFwpNIPI7f2aMz4kT1Yv/cot09awsFjuvyiSLhR6YeZGzvX56XRPdl16CTDXtDlF0XCjUo/DPVvk8Lf7+3N4RNnue2FJeQcPO51JBGpJir9MNWjWW1mjenDuULHsIlLWLsn3+tIIlINVPphrEODWswe24ca0ZGMmJzFZ9t0Tn6RUKfSD3MtUhJ44/4+NEiKY/RLy/hg3QGvI4lIFVLpCw2SavD6mD50alSLB15dzqxlX3gdSUSqiEpfAKidEMOr9/amf5tUfvbmWiYs2EagnYxPRC6fSl++Fh8TxZRR6Qzp0pAnP9jEH97bqOIXCTE69458Q0xUBM/c3pXa8dFMXrSDwyfO8eT3daI2kVCh0pf/EBFhPDGkE7UTYnhm/lbyT53l+Tu6ExetE7WJBDttvkmpzIyHv92W3w7txL82HWTUi0vJP6Xz9YgEO5W+XNSdfZozbng3Vu7+itsnLuFAvs7XIxLMVPpSpu92acjLo3ux+/BJvj/hM3IOHvM6kohcIpW+lEv/NinMGtOHMwVF3PrCEpbv+srrSCJyCVT6Um6dGyUx5/6+1I6PYeSULOZvyPU6kohUkEpfKqRp3XjeGNuHdmmJZE7PZuZSHb0rEkxU+lJhdWvG8tp9GVzVNpXH56zl2flbdRCXSJBQ6cslSYiNYvKodL7fvTF/nb+F//f2OgqLVPwigU4HZ8kli46M4M+3XUlarVjGL9hG3rEzjBvejRoxOohLJFBpS18ui5nx2I3t+c2QTszfmMuIyVkcOn7G61gicgFllr6ZtTOzVcVuR83s4RLLmJmNM7McM1tjZt2LPVdY7LVzq+KXEO/d1bc5E0b2YOP+o3xvwmdsz9MlGEUCUZml75zb7Jzr6pzrCvQATgJvlVhsENDGf8sEJhR77tT51zvnhlRSbglAN3auz4zMDI6dLuD7Ez5j+a7DXkcSkRIqOr1zLbDNOberxPhQYJrzyQKSzaxBpSSUoNK9aW3m3N+XpBrRjJj8Oe+v3e91JBEppqKlPxyYUcp4I2B3scd7/GMAcWaWbWZZZnZzaT/UzDL9y2Tn5eVVMJIEmuYpCcx5oB+dG9bigddWMGXRdq8jiYhfuUvfzGKAIcDs0p4uZez8/ntNnXPpwB3AM2bW6j8WdG6Scy7dOZeemppa3kgSwOokxPDafRnc0LE+v3t3I0/MXa9dOkUCQEW29AcBK5xzpR17vwdoUuxxY2AfgHPu/L/bgQVAt0tKKkEnLjqSv43szj39WzD1s5088OpyTp0t9DqWSFirSOmPoPSpHYC5wCj/XjwZQL5zbr+Z1TazWAAzSwH6ARsuK7EElcgI41c3deTXN3Xkow25DJ+cRd4x7dIp4pVylb6ZxQPXAXOKjY01s7H+h+8B24EcYDLwgH+8A5BtZquBj4E/OudU+mHoh/1bMGFkDzYfOMrNf1vM5gM6PbOIFyzQzpmSnp7usrOzvY4hVWTtnnzueWUZJ88W8vwd3bi6XT2vI4mEBDNb7v/+9KJ0RK5UqysaJ/HOj/rRtE48P5y6jGlLdnodSSSsqPSl2jVIqsHssX24pn09fv3Oeu3ZI1KNVPriiYTYKCbemf71nj33Tcvm+JkCr2OJhDyVvnjm/J49v7+lM59syePWCZ+x98gpr2OJhDSVvnhuZO9mTL27J3u/OsXNf1vMqt1HvI4kErJU+hIQBrRJZc4DfYmLjmDYxCW8vXKv15FEQpJKXwJGm7RE3nmwP92bJvPwrFX88f1N+oJXpJKp9CWg1EmIYfo9vRnZuykvfLKNzGnZHDt9zutYIiFDpS8BJzoygt/fcgW/vbkzC7bk8b3xn7Hr0AmvY4mEBJW+BKw7M5ox/Z5e5B0/w9C/LeaznC+9jiQS9FT6EtD6tkrhnQf7kVozljtfWsr0JTu9jiQS1FT6EvCa1U1gzgN9+Va7VH71znp+8dZazhYUeR1LJCip9CUoJMZFM/HOdO6/uhWvff4Fd0zO4uDR017HEgk6Kn0JGpERxs9ubM9zI7qxft9RbnruU118XaSCVPoSdL7bpSFvPdiXGjGRDJ+Uxd+zdhFopwgXCVQqfQlK7evXYu6D/enXOoVfvr2On725htPndClGkbKo9CVoJcVH8+JdPfnxNa15PXsPt09cwj6dsE3kolT6EtQiI4z/ur4dE+/swba8E3z3uU/J2n7I61giAUulLyHhhk71efvBfiTFRzNyyudMWbRd8/wipVDpS8hoXa8m7zzYj2vb1+N3727kgVdX6Lw9IiWo9CWk+Pbn78HPB7Xnow25fPe5T9mw76jXsUQChkpfQo6ZMWZgK2bcl8HJs4XcMn4xry/b7XUskYCg0peQ1atFHd59aAA9mtXmsTfX8Ojs1Zw6q906Jbyp9CWkpSbGMv2e3jx0TWveXLGHW8YvZnveca9jiXhGpS8hLzLCeOT6drw8uie5R08z5PnFvLtmv9exRDyh0pewcXW7erz70ADapNXkwddW8Ot31ukoXgk7ZZa+mbUzs1XFbkfN7OESy5iZjTOzHDNbY2bdSzxfy8z2mtnzlf0LiFREw+QazMrswz39WzBtyS5uGf8ZOQc13SPho8zSd85tds51dc51BXoAJ4G3Siw2CGjjv2UCE0o8/1vgk8uPK3L5YqIi+NVNHXlpdDq5R0/z3ec+5fXs3TqYS8JCRad3rgW2Oed2lRgfCkxzPllAspk1ADCzHkAa8NFlpxWpRNe0T+P9nwyga5NkHntjDT+ZuUoHc0nIq2jpDwdmlDLeCCi+I/QeoJGZRQB/Af770uKJVK20WnH8/d7ePHp9W95du5/vjPuU1buPeB1LpMqUu/TNLAYYAswu7elSxhzwAPCec+6iR8aYWaaZZZtZdl5eXnkjiVSKyAjjR9e04fUxGRQWOb4/4TMmLdxGUZGmeyT0VGRLfxCwwjmXW8pze4AmxR43BvYBfYAfmdlO4M/AKDP7Y8kXO+cmOefSnXPpqampFYgkUnl6NKvDew8N4LqOafzhvU2MnrqMg8d0SUYJLRUp/RGUPrUDMBdfoZuZZQD5zrn9zrmRzrmmzrnmwKP45v0fv7zIIlUnKT6a8SO78/tbOvP59kPc+MwiPlx/wOtYIpWmXKVvZvHAdcCcYmNjzWys/+F7wHYgB5iMb1pHJCiZGSN7N+PdhwbQKLkGY6Yv579nr+b4mQKvo4lcNgu03dTS09Nddna21zFEADhbUMS4f21l/IIcGtWuwdPDutKzeR2vY4n8BzNb7pxLL2s5HZErchExURE8ekM7Zo/tg2HcPnEJf/pgE2cLiryOJnJJVPoi5dCjWR3e+8kAbuvRhPELtnHL+MVszT3mdSyRClPpi5RTzdgonrz1Sibe2YP9+ae56blPeenTHdq1U4KKSl+kgm7oVJ8PH76K/q1T+N9/bmD45Cx2fnnC61gi5aLSF7kEqYmxTLkrnaduvZKN+49y47MLtdUvQUGlL3KJzIzb0psw76cD6dvKt9V/+6Ql7NBWvwQwlb7IZaqfFMeLd6Xzl9u6sPnAMW58ZiFTFm2nUFv9EoBU+iKVwMz4fo/GzHtkIP1bp/C7dzcybOIStunSjBJgVPoilSitVhxT7krnr7d3IefgcQY/u4gXPtnGuULt1y+BQaUvUsnMjFu6NWbeT69iYNtU/vj+JoY8v5hVOmWzBACVvkgVqVcrjkmj0nnhBz04fOIMt4xfzBNz1+tCLeIplb5IFbuxc33mPzKQURnNeGXJTq57eqHO3CmeUemLVIPEuGh+M7Qzc+7vS3J8NGOmLydzWjb78095HU3CjEpfpBp1a1qbf/y4P48Pas/CrXlc9/RCpi7eod07pdqo9EWqWXRkBGMHtmLeTwfSvVltnvjHBoY8/ynLdx32OpqEAZW+iEea1Innlbt78vwd3Th84izfn7CER15fpUs0SpVS6Yt4yMy46cqGzH9kIA9c3Yp/rt7PNX/+hCmLtmvffqkSKn2RAJAQG8VjN7bnw59eRXrz2vzu3Y0MfnYRn+V86XU0CTEqfZEA0iIlgZdH92TKqHROFxRyx5TPefDVFew7or18pHJEeR1ARL7JzPh2xzT6t0lh0sLt/O3jHP61KZfMAS0ZM7AVCbH6byuXTlv6IgEqLjqSh65tw/xHBvLtDmmM+3cOV/95ATOXfqFdPOWSqfRFAlyTOvE8f0d35jzQl6Z14nl8zloGP7uIT7bkeR1NgpBKXyRIdG9amzfG9mH8yO6cOlfIXS8tZdRLS9l04KjX0SSIqPRFgoiZMfiKBsx75Cp++Z0OrN59hMHPLuLxN9dw8Kj275eymXOBNTeYnp7usrOzvY4hEhSOnDzLuH/lMD1rJ5ERxl19mzP2qlbUTojxOppUMzNb7pxLL3M5lb5I8Nt16AR/nbeFd1bvo2ZMFPcOaMk9A1pQU3v6hI3yln6Z0ztm1s7MVhW7HTWzh0ssY2Y2zsxyzGyNmXX3jzczs+X+1603s7GX/iuJyIU0q5vAM8O78cFPrqJPq7r8df4WrvrTx0xeuJ3T5wq9jicBpEJb+mYWCewFejvndhUbHwz8GBgM9Aaedc71NrMY/3ucMbOawDqgr3Nu34XeQ1v6Ipdv9e4j/PmjzSza+iVptWL58TVtGJbehJgofY0XqiptS7+Ea4FtxQvfbygwzflkAclm1sA5d9Y5d8a/TOwlvJ+IXIIuTZKZfk9vZmZm0KR2PL98ex3XPr2A17N365w+Ya6iJTwcmFHKeCNgd7HHe/xjmFkTM1vjf/7Ji23li0jlymhZl9lj+/Dy6J4k1YjmsTfWcPVTC5ietUvTPmGq3KXvn6oZAswu7elSxhyAc263c+5KoDVwl5mllfKzM80s28yy8/J0wIlIZTIzvtW+Hv/4UX9eHt2TerVi+dXb6xj41Me8+OkOTp1V+YeTimzpDwJWOOdyS3luD9Ck2OPGwDe26P1b+OuBASVf7Jyb5JxLd86lp6amViCSiJTX+fKfc39fXr23Ny1SEvjtPzfQ/8l/M2HBNo6fKfA6olSDipT+CEqf2gGYC4zy78WTAeQ75/abWWMzqwFgZrWBfsDmy0osIpfFzOjXOoWZmX2YPbYPnRol8eQHm+j3x3/zzPwtHD5x1uuIUoXKtfeOmcXjm5Nv6ZzL94+NBXDOvWBmBjwP3AicBO52zmWb2XXAX/BN9RjwvHNu0sXeS3vviFS/1buP8PzHOczbkEtcdAS39mjMPf1b0iIlwetoUk46OEtEKmxr7jGmLNrBWyv3cq6oiG93SOO+AS3p2bw2vm07CVQqfRG5ZAePnWb6kl1Mz9rFkZPn6NI4iXsHtGRQ5/pERWrP60Ck0heRy3bqbCFvrNjDS5/uYMeXJ2iUXIO7+zXnth5NSIqP9jqeFKPSF5FKU1jkmL8xlymLtrNs51fERUcwpEtD7sxozhWNk7yOJ6j0RaSKrN+Xz9+zvuDtlXs5da6QLk2SGZXRjO9c2YC46Eiv44Utlb6IVKmjp88xZ/kepmftYlveCWrHRzMsvQkjezejad14r+OFHZW+iFQL5xxLth1ietYuPtqQS5Fz9G+dwq09GnNDp/ra+q8mKn0RqXYH8k8zc9kXzM7ew94jp6gVF8WQrg25rUcTrmycpN0+q5BKX0Q8U1TkWLL9ELOzd/P+ugOcKSiiXVoit6U35uZujUipGet1xJCj0heRgJB/6hz/XLOP2dl7WLX7CFERxjXt6zG0ayOu7VBP0z+VRKUvIgFna+4xZi/fw1sr95J37AwJMZFc36k+Q7o0pH+bFKJ14NclU+mLSMAqLHJkbT/E3FX7eH/dfo6eLiA5PppBnRswpEtDerWoQ2SE5v8rQqUvIkHhbEERC7fkMXf1PuZtyOXUuULSasUy+IoG3NCpPj2b6wOgPFT6IhJ0Tp4tYP7Gg8xdtY+FW/I4W1hEnYQYrm1fjxs61ad/mxR9B3ABKn0RCWrHzxTwyeY8PtpwgH9vPMixMwXEx0QysG0q13dK45p2aTr/TzHlLf2o6ggjIlJRNWOj+M6VDfjOlQ04W1BE1vZDfLThAB+tz+X9dQeIijDSm9fm6nb1GNg2lfb1E3UcQDloS19EgkpRkWP1niN8uD6XBZsPsunAMQDSasUysG0qV7erR7/WKSTVCK+/AjS9IyJh4UD+aRZuyeOTLXks3JrHsdMFREYY3ZokM7BtKn1bp3Bl46SQ3x1UpS8iYaegsIhVu4+wYLPvQ2Dt3nwA4mMiSW9ehz4t69KnVV06N6wVcheDUemLSNg7dPwMn+84zJJth8jafoitB48Dvu8LejavTZ9Wdendoi4dG9YK+r8E9EWuiIS9ujV9+/sPvqIBAHnHzpC13fcBsGT7IT7enAdAXHQEVzZKpnuz2nRv6vs3VM8PpC19EQlbuUdPs3THYVZ88RUrvjjChn35nCv0dWLTOvH08H8IdG1Sm7b1axIbFbjHCGh6R0Skgk6fK2Td3nyW7/rq6w+CvGNnAIiKMNqmJdK5US2uaJREp0ZJdKhfixoxgfFBoOkdEZEKiov2feGb3rwO4LtAzJ6vTrF2bz5r9+azbm8+8zbk8nr2HgAiDFrXq0nnhkm0b5BI27RE2tVPpH6tuIA9ZkClLyJyAWZGkzrxNKkT//X3As459uefZu3efNbvzWfdvqN8mvMlc1bu/fp1ibFRtK2fSNu0mr4PgrREWqeomsiSAAAGUUlEQVTVJLVmrOcfBpreERGpBF+dOMuW3GNsOXicLQeOsTn3GFtyj3Hk5Lmvl6kZG0WLlASapyTQom48LVITaF43gRYpCSTHx1zW+2t6R0SkGtVOiKF3y7r0bln36zHnHHnHz7A19zhbco+x88sT7Dh0ktW7j/Dumn0UFdvmrh0fTf82qTw3oluV5iyz9M2sHTCr2FBL4NfOuWeKLWPAs8Bg4CQw2jm3wsy6AhOAWkAh8HvnXPGfJSISssyMeolx1EuMo1/rlG88d6agkN2HT7HjyxP+D4MT1K6GE8iVWfrOuc1AVwAziwT2Am+VWGwQ0MZ/642v6Hvj+wAY5ZzbamYNgeVm9qFz7kjl/QoiIsEnNiqS1vVq0rpezWp934pO71wLbHPO7SoxPhSY5nxfEGSZWbKZNXDObTm/gHNun5kdBFIBlb6IiAcqetzxcGBGKeONgN3FHu/xj33NzHoBMcC2Cr6niIhUknKXvpnFAEOA2aU9XcrY119RmFkDYDpwt3OuqJSfnWlm2WaWnZeXV95IIiJSQRXZ0h8ErHDO5Zby3B6gSbHHjYF9AGZWC3gX+KVzLqu0H+ycm+ScS3fOpaemplYgkoiIVERFSn8EpU/tAMwFRplPBpDvnNvv/+vgLXzz/aX9hSAiItWoXF/kmlk8cB0wptjYWADn3AvAe/h218zBt8fO3f7FhgFXAXXNbLR/bLRzblVlhBcRkYrREbkiIiGgvEfkBvdVA0REpEICbkvfzPKAkscBVEQK8GUlxalqwZQVgitvMGWF4MobTFkhuPJeTtZmzrky94QJuNK/XGaWXZ4/cQJBMGWF4MobTFkhuPIGU1YIrrzVkVXTOyIiYUSlLyISRkKx9Cd5HaACgikrBFfeYMoKwZU3mLJCcOWt8qwhN6cvIiIXFopb+iIicgEhU/pmdqOZbTazHDN73Os8ZTGznWa21sxWmVlAHY1mZi+Z2UEzW1dsrI6ZzTOzrf5/a3uZsbgL5H3CzPb61+8qMxvsZcbzzKyJmX1sZhvNbL2Z/cQ/HnDr9yJZA3XdxpnZUjNb7c/7G/94CzP73L9uZ/lPDxOoWaea2Y5i67Zrpb+5cy7ob0AkvlM2t8R3+ubVQEevc5WReSeQ4nWOC2S7CugOrCs29ifgcf/9x4Envc5ZRt4ngEe9zlZK1gZAd//9RGAL0DEQ1+9FsgbqujWgpv9+NPA5kAG8Dgz3j78A3B/AWacCt1ble4fKln4vIMc5t905dxaYie/CLnIJnHMLgcMlhocCr/jvvwLcXK2hLuICeQOSc26/c26F//4xYCO+a08E3Pq9SNaA5HyO+x9G+28OuAZ4wz8eKOv2QlmrXKiUfpkXcQlADvjIzJabWabXYcohzTm3H3xlANTzOE95/MjM1vinfzyfLinJzJoD3fBt5QX0+i2RFQJ03ZpZpJmtAg4C8/DNABxxzhX4FwmYbiiZ1Tl3ft3+3r9u/2pmsZX9vqFS+he9iEuA6uec647vOgUPmtlVXgcKMROAVviu77wf+Iu3cb7JzGoCbwIPO+eOep3nYkrJGrDr1jlX6Jzriu+aHr2ADqUtVr2pSlcyq5l1Bn4OtAd6AnWAn1X2+4ZK6V/wIi6Byjm3z//vQXzXHOjlbaIy5fqvgHb+SmgHPc5zUc65XP9/qiJgMgG0fs0sGl+Jvuqcm+MfDsj1W1rWQF635znnjgAL8M2TJ5vZ+dPIB1w3FMt6o39KzTnnzgAvUwXrNlRKfxnQxv8tfQy+a/nO9TjTBZlZgpklnr8PXA+su/irPDcXuMt//y7gHQ+zlOl8gfrdQoCsXzMz4EVgo3Pu6WJPBdz6vVDWAF63qWaW7L9fA/g2vu8hPgZu9S8WKOu2tKybin3wG77vHip93YbMwVn+3caewbcnz0vOud97HOmCzKwlvq178F3I5rVAymtmM4Cr8Z3xLxf4H+BtfHtBNAW+AG5zzgXEl6cXyHs1vukHh29PqTHn58y9ZGb9gUXAWuD89aJ/gW+uPKDW70WyjiAw1+2V+L6ojcS3Qfu6c+5//f/fZuKbLlkJ/MC/Je2Zi2T9N5CKb8p6FTC22Be+lfPeoVL6IiJStlCZ3hERkXJQ6YuIhBGVvohIGFHpi4iEEZW+iEgYUemLiIQRlb6ISBhR6YuIhJH/D0Z0VjgbejMsAAAAAElFTkSuQmCC\n", 154 | "text/plain": [ 155 | "
" 156 | ] 157 | }, 158 | "metadata": { 159 | "needs_background": "light" 160 | }, 161 | "output_type": "display_data" 162 | } 163 | ], 164 | "source": [ 165 | "def rtplot(ys, xs=None, sigma=20, fig=None):\n", 166 | " fig = fig or plt.gcf()\n", 167 | " fig.clf()\n", 168 | " fig.add_subplot(1, 1, 1)\n", 169 | " ax = fig.get_axes()[0]\n", 170 | " ax.cla()\n", 171 | " from scipy.ndimage import filters\n", 172 | " ys = filters.gaussian_filter(np.array(ys, \"f\"), sigma, mode=\"nearest\")\n", 173 | " if xs is not None:\n", 174 | " plt.plot(xs, ys)\n", 175 | " else:\n", 176 | " plt.plot(ys)\n", 177 | " display.clear_output(wait=True)\n", 178 | " display.display(fig)\n", 179 | " \n", 180 | "def train_for(steps, losses=[]):\n", 181 | " model.cuda()\n", 182 | " model.train()\n", 183 | " for i, (input, target) in enumerate(training):\n", 184 | " if i>=steps: break\n", 185 | " output = model(input.cuda())\n", 186 | " loss = criterion(output, target.cuda())\n", 187 | " losses.append(loss.item())\n", 188 | " rtplot(losses)\n", 189 | " optimizer.zero_grad()\n", 190 | " loss.backward()\n", 191 | " optimizer.step()\n", 192 | " display.clear_output(wait=True)\n", 193 | " \n", 194 | "train_for(10000)" 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "metadata": {}, 201 | "outputs": [], 202 | "source": [] 203 | } 204 | ], 205 | "metadata": { 206 | "kernelspec": { 207 | "display_name": "Python 3", 208 | "language": "python", 209 | "name": "python3" 210 | }, 211 | "language_info": { 212 | "codemirror_mode": { 213 | "name": "ipython", 214 | "version": 3 215 | }, 216 | "file_extension": ".py", 217 | "mimetype": "text/x-python", 218 | "name": "python", 219 | "nbconvert_exporter": "python", 220 | "pygments_lexer": "ipython3", 221 | "version": "3.7.3" 222 | } 223 | }, 224 | "nbformat": 4, 225 | "nbformat_minor": 4 226 | } 227 | -------------------------------------------------------------------------------- /docs/pytorch.md: -------------------------------------------------------------------------------- 1 | # Simple Example of using Tensorcom with PyTorch 2 | 3 | In a separate window, start the server with: 4 | 5 | ``` 6 | curl http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_train-0000-000.tgz > training.tgz 7 | serve-imagenet -u training.tgz -b 64 8 | ``` 9 | 10 | Or, if you have a local clone of the entire dataset: 11 | 12 | ``` 13 | serve-imagenet -u http://my-server/some-path/imagenet_train-{0000..0147}-{000..019}.tgz 14 | ``` 15 | 16 | 17 | ```python 18 | %matplotlib inline 19 | ``` 20 | 21 | 22 | ```python 23 | from importlib import reload 24 | 25 | import os 26 | import numpy as np 27 | 28 | import torch 29 | import torch.nn as nn 30 | import torch.nn.parallel 31 | import torch.backends.cudnn as cudnn 32 | import torch.distributed as dist 33 | import torch.optim 34 | from torchvision import models 35 | 36 | from webdataset import WebDataset 37 | from torch.utils.data import DataLoader 38 | import tensorcom as tc 39 | 40 | from IPython import display 41 | import matplotlib.pyplot as plt 42 | 43 | ``` 44 | 45 | 46 | ```python 47 | mean = np.array([0.485, 0.456, 0.406], "f") 48 | std = np.array([0.229, 0.224, 0.225], "f") 49 | def norm_image(xs): 50 | return (xs-mean[None,None,:])/std[None,None,:] 51 | def norm_cls(ys): 52 | ys = ys.astype(np.int64) 53 | return ys-1 54 | ``` 55 | 56 | 57 | ```python 58 | #urls = "http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_train-{0000..0147}-{000..019}.tgz" 59 | num_samples = 1000000 60 | training = tc.Connection("zsub://127.0.0.1:7880", 61 | epoch=num_samples, 62 | batch_transforms=[norm_image, norm_cls], 63 | converters="torch") 64 | xs, ys = next(iter(training)) 65 | print(xs.shape, xs.dtype, xs.min().item(), xs.max().item()) 66 | print(ys.shape, ys.dtype, ys.min().item(), ys.max().item()) 67 | ``` 68 | 69 | torch.Size([32, 3, 224, 224]) torch.float32 -2.1179039478302 1136.357177734375 70 | torch.Size([32]) torch.int64 33 998 71 | 72 | 73 | 74 | ```python 75 | model = models.resnet50() 76 | criterion = nn.CrossEntropyLoss().cuda() 77 | optimizer = torch.optim.SGD(model.parameters(), 0.0001, momentum=0.9, weight_decay=0.0) 78 | ``` 79 | 80 | 81 | ```python 82 | def rtplot(ys, xs=None, sigma=20, fig=None): 83 | fig = fig or plt.gcf() 84 | fig.clf() 85 | fig.add_subplot(1, 1, 1) 86 | ax = fig.get_axes()[0] 87 | ax.cla() 88 | from scipy.ndimage import filters 89 | ys = filters.gaussian_filter(np.array(ys, "f"), sigma, mode="nearest") 90 | if xs is not None: 91 | plt.plot(xs, ys) 92 | else: 93 | plt.plot(ys) 94 | display.clear_output(wait=True) 95 | display.display(fig) 96 | 97 | def train_for(steps, losses=[]): 98 | model.cuda() 99 | model.train() 100 | for i, (input, target) in enumerate(training): 101 | if i>=steps: break 102 | output = model(input.cuda()) 103 | loss = criterion(output, target.cuda()) 104 | losses.append(loss.item()) 105 | rtplot(losses) 106 | optimizer.zero_grad() 107 | loss.backward() 108 | optimizer.step() 109 | display.clear_output(wait=True) 110 | 111 | train_for(10000) 112 | ``` 113 | 114 | 115 | ![png](pytorch_files/pytorch_7_0.png) 116 | 117 | 118 | 119 | --------------------------------------------------------------------------- 120 | 121 | KeyboardInterrupt Traceback (most recent call last) 122 | 123 | in 124 | 28 display.clear_output(wait=True) 125 | 29 126 | ---> 30 train_for(10000) 127 | 128 | 129 | in train_for(steps, losses) 130 | 17 model.cuda() 131 | 18 model.train() 132 | ---> 19 for i, (input, target) in enumerate(training): 133 | 20 if i>=steps: break 134 | 21 output = model(input.cuda()) 135 | 136 | 137 | ~/exp/tensorcom/tensorcom/zcom.py in items(self, report) 138 | 366 if self.total > 0 and self.count >= self.total: 139 | 367 return 140 | --> 368 result = self.recv() 141 | 369 if report > 0 and count >= next_report: 142 | 370 print("count", count, self.stats.summary()) 143 | 144 | 145 | ~/exp/tensorcom/tensorcom/zcom.py in recv(self) 146 | 327 self.socket.recv_multipart() 147 | 328 if self.multipart: 148 | --> 329 data = tenbin.zrecv_multipart(self.socket, self.infos) 149 | 330 else: 150 | 331 data = tenbin.zrecv_single(self.socket, self.infos) 151 | 152 | 153 | ~/exp/tensorcom/tensorcom/tenbin.py in zrecv_multipart(socket, infos) 154 | 222 def zrecv_multipart(socket, infos=False): 155 | 223 """Receive arrays as a multipart ZMQ message.""" 156 | --> 224 return decode_list(socket.recv_multipart(), infos=infos) 157 | 225 158 | 226 def sctp_send(socket, dest, l, infos=None): 159 | 160 | 161 | /usr/lib/python3/dist-packages/zmq/sugar/socket.py in recv_multipart(self, flags, copy, track) 162 | 465 for any of the reasons :func:`~Socket.recv` might fail 163 | 466 """ 164 | --> 467 parts = [self.recv(flags, copy=copy, track=track)] 165 | 468 # have first part already, only loop while more to receive 166 | 469 while self.getsockopt(zmq.RCVMORE): 167 | 168 | 169 | zmq/backend/cython/socket.pyx in zmq.backend.cython.socket.Socket.recv() 170 | 171 | 172 | zmq/backend/cython/socket.pyx in zmq.backend.cython.socket.Socket.recv() 173 | 174 | 175 | zmq/backend/cython/socket.pyx in zmq.backend.cython.socket._recv_copy() 176 | 177 | 178 | /usr/lib/python3/dist-packages/zmq/backend/cython/checkrc.pxd in zmq.backend.cython.checkrc._check_rc() 179 | 180 | 181 | KeyboardInterrupt: 182 | 183 | 184 | 185 | ![png](pytorch_files/pytorch_7_2.png) 186 | 187 | 188 | 189 | ```python 190 | 191 | ``` 192 | -------------------------------------------------------------------------------- /docs/pyzmq-performance.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stdout", 10 | "output_type": "stream", 11 | "text": [ 12 | "Populating the interactive namespace from numpy and matplotlib\n" 13 | ] 14 | } 15 | ], 16 | "source": [ 17 | "%pylab inline" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 2, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "import numpy as np\n", 27 | "import zmq\n", 28 | "import subprocess\n", 29 | "from subprocess import Popen" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 3, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "data = np.ones((256, 3, 224, 224), dtype=np.float16)\n", 39 | "bin = data.tobytes()" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": 4, 45 | "metadata": {}, 46 | "outputs": [ 47 | { 48 | "name": "stdout", 49 | "output_type": "stream", 50 | "text": [ 51 | "Overwriting server.py\n" 52 | ] 53 | } 54 | ], 55 | "source": [ 56 | "%%writefile server.py\n", 57 | "import numpy as np\n", 58 | "import zmq\n", 59 | "data = np.ones((256, 3, 224, 224), dtype=np.float16)\n", 60 | "bindata = data.tobytes()\n", 61 | "context = zmq.Context()\n", 62 | "socket = context.socket(zmq.PUSH)\n", 63 | "socket.bind(\"tcp://*:7988\")\n", 64 | "for i in range(10000):\n", 65 | " socket.send(bindata)" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 5, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "proc = Popen([\"python3\", \"server.py\"])\n", 75 | "proc.poll()" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 6, 81 | "metadata": {}, 82 | "outputs": [ 83 | { 84 | "name": "stdout", 85 | "output_type": "stream", 86 | "text": [ 87 | "0 77070336\n", 88 | "10 77070336\n", 89 | "20 77070336\n", 90 | "30 77070336\n", 91 | "40 77070336\n", 92 | "50 77070336\n", 93 | "60 77070336\n", 94 | "70 77070336\n", 95 | "80 77070336\n", 96 | "90 77070336\n", 97 | "7707.0336\n", 98 | "CPU times: user 1.3 s, sys: 8.64 s, total: 9.94 s\n", 99 | "Wall time: 6.13 s\n" 100 | ] 101 | } 102 | ], 103 | "source": [ 104 | "%%time\n", 105 | "context = zmq.Context()\n", 106 | "socket = context.socket(zmq.PULL)\n", 107 | "socket.connect(\"tcp://127.0.0.1:7988\")\n", 108 | "total = 0\n", 109 | "for i in range(100):\n", 110 | " data = socket.recv()\n", 111 | " total += len(data)\n", 112 | " if i%10==0: print(i, len(data))\n", 113 | "print(total/1e6)\n", 114 | "del socket\n", 115 | "proc.terminate()\n", 116 | "proc.wait()" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 7, 122 | "metadata": {}, 123 | "outputs": [], 124 | "source": [ 125 | "proc = Popen([\"python3\", \"server.py\"])\n", 126 | "proc.poll()" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": 8, 132 | "metadata": {}, 133 | "outputs": [ 134 | { 135 | "name": "stdout", 136 | "output_type": "stream", 137 | "text": [ 138 | "0 38535168\n", 139 | "10 38535168\n", 140 | "20 38535168\n", 141 | "30 38535168\n", 142 | "40 38535168\n", 143 | "50 38535168\n", 144 | "60 38535168\n", 145 | "70 38535168\n", 146 | "80 38535168\n", 147 | "90 38535168\n", 148 | "3853.5168\n", 149 | "CPU times: user 1.36 s, sys: 8.76 s, total: 10.1 s\n", 150 | "Wall time: 6.38 s\n" 151 | ] 152 | } 153 | ], 154 | "source": [ 155 | "%%time\n", 156 | "context = zmq.Context()\n", 157 | "socket = context.socket(zmq.PULL)\n", 158 | "socket.connect(\"tcp://127.0.0.1:7988\")\n", 159 | "total = 0\n", 160 | "for i in range(100):\n", 161 | " data = socket.recv()\n", 162 | " a = np.frombuffer(data, dtype=np.float16).reshape(256, 3, 224, 224)\n", 163 | " if i%10==0: print(i, a.size)\n", 164 | " total += a.size\n", 165 | "print(total/1e6)\n", 166 | "del socket\n", 167 | "proc.terminate()\n", 168 | "proc.wait()" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": 9, 174 | "metadata": {}, 175 | "outputs": [], 176 | "source": [ 177 | "proc = Popen([\"python3\", \"server.py\"])\n", 178 | "proc.poll()" 179 | ] 180 | } 181 | ], 182 | "metadata": { 183 | "kernelspec": { 184 | "display_name": "Python 3", 185 | "language": "python", 186 | "name": "python3" 187 | }, 188 | "language_info": { 189 | "codemirror_mode": { 190 | "name": "ipython", 191 | "version": 3 192 | }, 193 | "file_extension": ".py", 194 | "mimetype": "text/x-python", 195 | "name": "python", 196 | "nbconvert_exporter": "python", 197 | "pygments_lexer": "ipython3", 198 | "version": "3.6.7" 199 | } 200 | }, 201 | "nbformat": 4, 202 | "nbformat_minor": 2 203 | } 204 | -------------------------------------------------------------------------------- /docs/pyzmq-performance.md: -------------------------------------------------------------------------------- 1 | ```python 2 | %pylab inline 3 | ``` 4 | 5 | Populating the interactive namespace from numpy and matplotlib 6 | 7 | 8 | 9 | ```python 10 | import numpy as np 11 | import zmq 12 | import subprocess 13 | from subprocess import Popen 14 | ``` 15 | 16 | 17 | ```python 18 | data = np.ones((256, 3, 224, 224), dtype=np.float16) 19 | bin = data.tobytes() 20 | ``` 21 | 22 | 23 | ```python 24 | %%writefile server.py 25 | import numpy as np 26 | import zmq 27 | data = np.ones((256, 3, 224, 224), dtype=np.float16) 28 | bindata = data.tobytes() 29 | context = zmq.Context() 30 | socket = context.socket(zmq.PUSH) 31 | socket.bind("tcp://*:7988") 32 | for i in range(10000): 33 | socket.send(bindata) 34 | ``` 35 | 36 | Overwriting server.py 37 | 38 | 39 | 40 | ```python 41 | proc = Popen(["python3", "server.py"]) 42 | proc.poll() 43 | ``` 44 | 45 | 46 | ```python 47 | %%time 48 | context = zmq.Context() 49 | socket = context.socket(zmq.PULL) 50 | socket.connect("tcp://127.0.0.1:7988") 51 | total = 0 52 | for i in range(100): 53 | data = socket.recv() 54 | total += len(data) 55 | if i%10==0: print(i, len(data)) 56 | print(total/1e6) 57 | del socket 58 | proc.terminate() 59 | proc.wait() 60 | ``` 61 | 62 | 0 77070336 63 | 10 77070336 64 | 20 77070336 65 | 30 77070336 66 | 40 77070336 67 | 50 77070336 68 | 60 77070336 69 | 70 77070336 70 | 80 77070336 71 | 90 77070336 72 | 7707.0336 73 | CPU times: user 1.3 s, sys: 8.64 s, total: 9.94 s 74 | Wall time: 6.13 s 75 | 76 | 77 | 78 | ```python 79 | proc = Popen(["python3", "server.py"]) 80 | proc.poll() 81 | ``` 82 | 83 | 84 | ```python 85 | %%time 86 | context = zmq.Context() 87 | socket = context.socket(zmq.PULL) 88 | socket.connect("tcp://127.0.0.1:7988") 89 | total = 0 90 | for i in range(100): 91 | data = socket.recv() 92 | a = np.frombuffer(data, dtype=np.float16).reshape(256, 3, 224, 224) 93 | if i%10==0: print(i, a.size) 94 | total += a.size 95 | print(total/1e6) 96 | del socket 97 | proc.terminate() 98 | proc.wait() 99 | ``` 100 | 101 | 0 38535168 102 | 10 38535168 103 | 20 38535168 104 | 30 38535168 105 | 40 38535168 106 | 50 38535168 107 | 60 38535168 108 | 70 38535168 109 | 80 38535168 110 | 90 38535168 111 | 3853.5168 112 | CPU times: user 1.36 s, sys: 8.76 s, total: 10.1 s 113 | Wall time: 6.38 s 114 | 115 | 116 | 117 | ```python 118 | proc = Popen(["python3", "server.py"]) 119 | proc.poll() 120 | ``` 121 | -------------------------------------------------------------------------------- /docs/simple-server.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Tensorcom Data Server\n", 8 | "\n", 9 | "This illustrates transmitting a training set via tensorcom. Here, we use a standard Torch `Dataloader` as a data source." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import sys\n", 19 | "import torch\n", 20 | "from torchvision import datasets, transforms\n", 21 | "import numpy as np\n", 22 | "import tensorcom" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 2, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "loader = torch.utils.data.DataLoader(\n", 32 | " datasets.MNIST('.', train=True, download=True,\n", 33 | " transform=transforms.Compose([\n", 34 | " transforms.ToTensor(),\n", 35 | " transforms.Normalize((0.1307,), (0.3081,))\n", 36 | " ])),\n", 37 | " batch_size=32, shuffle=True)" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "We use simple URLs with `zpub`, `zsub`, `zpush`, and `zpull` schemas for making ZMQ connections. There are also reverse versions `zr...`, which reverse the connect/bind schemes.\n", 45 | "\n", 46 | "Here we use a ZMQ PUB socket for distributing data. Such a socket will send data asynchronously, whether clients are connected or not." 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 3, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "serve = tensorcom.Connection()\n", 56 | "serve.connect(\"zpub://127.0.0.1:7888\")" 57 | ] 58 | }, 59 | { 60 | "cell_type": "markdown", 61 | "metadata": {}, 62 | "source": [ 63 | "In this sample library, all tensors are represented as NumPy arrays, so we have to convert the PyTorch tensors to NumPy before sending.\n", 64 | "\n", 65 | "For many application, sending floating point data in `float16` format is sufficient and potentially faster when networking is involved." 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 4, 71 | "metadata": {}, 72 | "outputs": [ 73 | { 74 | "name": "stderr", 75 | "output_type": "stream", 76 | "text": [ 77 | "0 1 2 3 4 " 78 | ] 79 | } 80 | ], 81 | "source": [ 82 | "for epoch in range(5):\n", 83 | " sys.stderr.write(\"{} \".format(epoch))\n", 84 | " for i, (xs, ys) in enumerate(loader):\n", 85 | " xs = np.array(xs).astype('float16')\n", 86 | " ys = np.array(ys).astype('int32')\n", 87 | " serve.send([xs, ys])" 88 | ] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "metadata": {}, 93 | "source": [ 94 | "Note that to achieve higher data rates, you can start up multiple publishers and then connect to them from a single training job." 95 | ] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "metadata": {}, 100 | "source": [ 101 | "Note also that, under the covers, PyTorch's parallel `Dataloader` functions very similarly to this approach; it also uses multiple processes and IPC for loading data asynchronously. However, by making the communication explicit with Tensorcom, we can use the same preprocessing pipelines for PyTorch and TensorFlow, and we can also share training data between multiple jobs.\n", 102 | "\n", 103 | "Also note that you can use any data loading and augmentation framework you like in the sender, and combine it with any DL framework. In particular, you can use PyTorch `Dataset`/`DataLoader`, you can use TensorFlow input pipelines, and you can use the `dlinputs` framework." 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": {}, 110 | "outputs": [], 111 | "source": [] 112 | } 113 | ], 114 | "metadata": { 115 | "kernelspec": { 116 | "display_name": "Python 3", 117 | "language": "python", 118 | "name": "python3" 119 | }, 120 | "language_info": { 121 | "codemirror_mode": { 122 | "name": "ipython", 123 | "version": 3 124 | }, 125 | "file_extension": ".py", 126 | "mimetype": "text/x-python", 127 | "name": "python", 128 | "nbconvert_exporter": "python", 129 | "pygments_lexer": "ipython3", 130 | "version": "3.6.7" 131 | } 132 | }, 133 | "nbformat": 4, 134 | "nbformat_minor": 2 135 | } 136 | -------------------------------------------------------------------------------- /docs/simple-server.md: -------------------------------------------------------------------------------- 1 | # Tensorcom Data Server 2 | 3 | This illustrates transmitting a training set via tensorcom. Here, we use a standard Torch `Dataloader` as a data source. 4 | 5 | 6 | ```python 7 | import sys 8 | import torch 9 | from torchvision import datasets, transforms 10 | import numpy as np 11 | import tensorcom 12 | ``` 13 | 14 | 15 | ```python 16 | loader = torch.utils.data.DataLoader( 17 | datasets.MNIST('.', train=True, download=True, 18 | transform=transforms.Compose([ 19 | transforms.ToTensor(), 20 | transforms.Normalize((0.1307,), (0.3081,)) 21 | ])), 22 | batch_size=32, shuffle=True) 23 | ``` 24 | 25 | We use simple URLs with `zpub`, `zsub`, `zpush`, and `zpull` schemas for making ZMQ connections. There are also reverse versions `zr...`, which reverse the connect/bind schemes. 26 | 27 | Here we use a ZMQ PUB socket for distributing data. Such a socket will send data asynchronously, whether clients are connected or not. 28 | 29 | 30 | ```python 31 | serve = tensorcom.Connection() 32 | serve.connect("zpub://127.0.0.1:7888") 33 | ``` 34 | 35 | In this sample library, all tensors are represented as NumPy arrays, so we have to convert the PyTorch tensors to NumPy before sending. 36 | 37 | For many application, sending floating point data in `float16` format is sufficient and potentially faster when networking is involved. 38 | 39 | 40 | ```python 41 | for epoch in range(5): 42 | sys.stderr.write("{} ".format(epoch)) 43 | for i, (xs, ys) in enumerate(loader): 44 | xs = np.array(xs).astype('float16') 45 | ys = np.array(ys).astype('int32') 46 | serve.send([xs, ys]) 47 | ``` 48 | 49 | 0 1 2 3 4 50 | 51 | Note that to achieve higher data rates, you can start up multiple publishers and then connect to them from a single training job. 52 | 53 | Note also that, under the covers, PyTorch's parallel `Dataloader` functions very similarly to this approach; it also uses multiple processes and IPC for loading data asynchronously. However, by making the communication explicit with Tensorcom, we can use the same preprocessing pipelines for PyTorch and TensorFlow, and we can also share training data between multiple jobs. 54 | 55 | Also note that you can use any data loading and augmentation framework you like in the sender, and combine it with any DL framework. In particular, you can use PyTorch `Dataset`/`DataLoader`, you can use TensorFlow input pipelines, and you can use the `dlinputs` framework. 56 | 57 | 58 | ```python 59 | 60 | ``` 61 | -------------------------------------------------------------------------------- /helpers/dockertest: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # test the github distribution in a container 4 | 5 | # build the base container 6 | 7 | docker build -t tensorcomtest-base - < Dockerfile < Dockerfile < docs/pydoc.md 34 | 35 | # ( 36 | # cat < docs/commands.md 53 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: tensorcom 2 | nav: 3 | - Home: index.md 4 | - Keras Example: keras.md 5 | - Microbenchmarks: microbenchmarks.md 6 | - Module Docs: pydoc.md 7 | - Pytorch Example: pytorch.md 8 | - PyZMQ Performance: pyzmq-performance.md 9 | - Simple Server: simple-server.md 10 | - Module Docs: pydoc.md 11 | theme: readthedocs 12 | -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | coverage 2 | jupyter 3 | licenseheaders 4 | mkdocs 5 | pytest 6 | setuptools 7 | wheel 8 | nbconvert 9 | keyring 10 | black 11 | -------------------------------------------------------------------------------- /requirements.docs.txt: -------------------------------------------------------------------------------- 1 | mkdocs 2 | setuptools 3 | wheel 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | webdataset 2 | pyzmq 3 | msgpack 4 | torch 5 | braceexpand 6 | -------------------------------------------------------------------------------- /serve-imagenet-dir: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import argparse 4 | import logging 5 | import multiprocessing 6 | import os 7 | import sys 8 | 9 | import numpy as np 10 | from tensorcom import zcom 11 | import torch 12 | from torchvision import datasets, transforms 13 | 14 | logger = logging.getLogger() 15 | logger.setLevel(logging.INFO) 16 | 17 | parser = argparse.ArgumentParser( 18 | """ 19 | Serve the Imagenet dataset for training. 20 | 21 | By default, data is served as tuples (img, cls), where 22 | img is a (batch, h, w, channel) array of type uint8 23 | and cls is a (batch,) array of type int32. 24 | 25 | The batch size can be adjusted using the `-b` argument. 26 | 27 | This program uses the PyTorch data loader, but still 28 | uses NumPy conventions for actually serving the data. 29 | 30 | Usage: 31 | """ 32 | ) 33 | parser.add_argument("service_address", nargs="*") 34 | parser.add_argument( 35 | "-d", 36 | "--dir", 37 | default="./imagenet", 38 | help="directory containing the ImagenNet dataset", 39 | ) 40 | parser.add_argument( 41 | "-b", 42 | "--batch-size", 43 | type=int, 44 | default=32, 45 | help="batch the input (default is no batching)", 46 | ) 47 | parser.add_argument( 48 | "-r", "--report", type=int, default=10, help="report on progress this frequently" 49 | ) 50 | parser.add_argument( 51 | "-B", 52 | "--benchmark", 53 | action="store_true", 54 | help="eliminate I/O overhead by just preloading and serving one sample", 55 | ) 56 | parser.add_argument( 57 | "-w", "--workers", type=int, default=0, help="number of DataLoader workers" 58 | ) 59 | parser.add_argument( 60 | "-P", 61 | "--parallel", 62 | type=int, 63 | default=0, 64 | help="spawn multiple subprocesses for parallel I/O", 65 | ) 66 | parser.add_argument("-S", "--no-shuffle", action="store_false") 67 | parser.add_argument("-n", "--normalize", action="store_true") 68 | args = parser.parse_args() 69 | 70 | if args.service_address == []: 71 | args.service_address = ["zpub://127.0.0.1:7880"] 72 | 73 | if args.parallel > 0: 74 | assert len(args.service_address) == 1 75 | assert args.service_address[0].startswith("zpush") or args.service_address[ 76 | 0 77 | ].startswith("zrpub") 78 | args.service_address = args.service_address * args.parallel 79 | 80 | logger.info("service:", args.service_address) 81 | 82 | 83 | def fixtype(a): 84 | if isinstance(a, (int, float, str)): 85 | return a 86 | if isinstance(a, np.ndarray): 87 | if a.dtype == np.int64: 88 | return a.astype(np.int32) 89 | if a.dtype == np.float64: 90 | return a.astype(np.float32) 91 | return a 92 | 93 | 94 | def start_server(con, report=args.report): 95 | 96 | logger.info("starting server") 97 | 98 | serve = zcom.Connection(con) 99 | 100 | logger.info("loading dataset") 101 | 102 | traindir = os.path.join(args.dir, "train") 103 | valdir = os.path.join(args.dir, "val") 104 | 105 | if args.normalize: 106 | normalize = transforms.Normalize( 107 | mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] 108 | ) 109 | train_dataset = datasets.ImageFolder( 110 | traindir, 111 | transforms.Compose( 112 | [ 113 | transforms.RandomResizedCrop(224), 114 | transforms.RandomHorizontalFlip(), 115 | transforms.ToTensor(), 116 | normalize, 117 | ] 118 | ), 119 | ) 120 | else: 121 | train_dataset = datasets.ImageFolder( 122 | traindir, 123 | transforms.Compose( 124 | [ 125 | transforms.RandomResizedCrop(224), 126 | transforms.RandomHorizontalFlip(), 127 | transforms.ToTensor(), 128 | lambda x: (255 * x).type(torch.uint8), 129 | ] 130 | ), 131 | ) 132 | 133 | logger.info("creating dataloader") 134 | 135 | train_loader = torch.utils.data.DataLoader( 136 | train_dataset, 137 | batch_size=args.batch_size, 138 | shuffle=not args.no_shuffle, 139 | num_workers=args.workers, 140 | pin_memory=False, 141 | ) 142 | 143 | for i, (img, cls) in enumerate(train_loader): 144 | if i % report == 0: 145 | print(i, serve.stats.summary()) 146 | sys.stdout.flush() 147 | img = img.permute(0, 2, 3, 1).numpy() 148 | cls = cls.type(torch.int32).numpy() - 1 149 | if i == 0: 150 | print(img.shape, img.dtype, np.amin(img), np.amax(img)) 151 | print(cls.shape, cls.dtype) 152 | serve.send([img, cls]) 153 | 154 | 155 | if len(args.service_address) == 1: 156 | start_server(args.service_address[0]) 157 | else: 158 | nproc = len(args.service_address) 159 | pool = multiprocessing.Pool(nproc) 160 | print(pool) 161 | pool.map(start_server, args.service_address) 162 | -------------------------------------------------------------------------------- /serve-imagenet-shards: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import argparse 4 | import multiprocessing 5 | import sys 6 | 7 | import numpy as np 8 | from tensorcom import zcom 9 | from webdataset import WebDataset 10 | from torch.utils.data import DataLoader 11 | 12 | parser = argparse.ArgumentParser( 13 | """ 14 | Serve the Imagenet dataset for training. 15 | 16 | By default, data is served as tuples (img, cls), where 17 | img is a (batch, h, w, channel) array of type uint8 18 | and cls is a (batch,) array of type int32. 19 | 20 | The batch size can be adjusted using the `-b` argument. 21 | 22 | This program reads sharded tar files from any URL. 23 | 24 | Usage: 25 | """ 26 | ) 27 | parser.add_argument("service_address", nargs="*") 28 | parser.add_argument( 29 | "-u", 30 | "--url", 31 | default="http://storage.googleapis.com/lpr-imagenet-augmented/imagenet_train-{0000..0147}-{000..019}.tgz", 32 | help="source shard(s) (use --google to point at Google bucket)", 33 | ) 34 | parser.add_argument("-b", "--batch-size", type=int, default=32, help="batch the input") 35 | parser.add_argument( 36 | "-r", "--report", type=int, default=10, help="report on progress this frequently" 37 | ) 38 | parser.add_argument( 39 | "-B", 40 | "--benchmark", 41 | action="store_true", 42 | help="eliminate I/O overhead by just preloading and serving one sample", 43 | ) 44 | parser.add_argument( 45 | "-p", 46 | "--parallel", 47 | type=int, 48 | default=0, 49 | help="spawn multiple subprocesses for parallel I/O", 50 | ) 51 | parser.add_argument("-S", "--shuffle", action="store_true", help="shuffle the data") 52 | parser.add_argument( 53 | "-N", "--num-workers", type=int, default=0, help="num_workers for DataLoader" 54 | ) 55 | args = parser.parse_args() 56 | 57 | assert args.batch_size > 0, args.batch_size 58 | assert args.batch_size < 100000, args.batch_size 59 | 60 | 61 | if args.service_address == []: 62 | args.service_address = ["zpub://127.0.0.1:7880"] 63 | 64 | 65 | if args.parallel > 0: 66 | assert len(args.service_address) == 1 67 | assert args.service_address[0].startswith("zpush") or args.service_address[ 68 | 0 69 | ].startswith("zrpub") 70 | args.service_address = args.service_address * args.parallel 71 | 72 | 73 | def fixtype(a): 74 | if isinstance(a, (int, float, str)): 75 | return a 76 | if isinstance(a, np.ndarray): 77 | if a.dtype == np.int64: 78 | return a.astype(np.int32) 79 | if a.dtype == np.float64: 80 | return a.astype(np.float32) 81 | return a 82 | 83 | 84 | def infinite(source): 85 | while True: 86 | for sample in source: 87 | yield sample 88 | 89 | 90 | def start_server(con, report=args.report, benchmark=args.benchmark): 91 | print("serving {}".format(con)) 92 | serve = zcom.Connection(con) 93 | dataset = WebDataset( 94 | args.url, 95 | extensions="png;jpg;jpeg;ppm cls", 96 | decoder="rgb8", 97 | shuffle=int(args.shuffle > 0), 98 | ) 99 | source = DataLoader( 100 | dataset, 101 | batch_size=args.batch_size, 102 | shuffle=args.shuffle, 103 | num_workers=args.num_workers, 104 | ) 105 | 106 | if not benchmark: 107 | for i, (img, cls) in enumerate(infinite(source)): 108 | if i % report == 0: 109 | print(i, serve.stats.summary()) 110 | sys.stdout.flush() 111 | img, cls = img.numpy().astype(np.uint8), cls.numpy().astype(np.int32) 112 | serve.send([img, cls]) 113 | else: 114 | for i, (img, cls) in enumerate(source): 115 | break 116 | while True: 117 | if i % report == 0: 118 | print(i, serve.stats.summary()) 119 | sys.stdout.flush() 120 | img, cls = img.numpy(), cls.numpy() 121 | serve.send([img, cls]) 122 | 123 | 124 | if len(args.service_address) == 1: 125 | start_server(args.service_address[0]) 126 | else: 127 | nproc = len(args.service_address) 128 | pool = multiprocessing.Pool(nproc) 129 | print(pool) 130 | pool.map(start_server, args.service_address) 131 | -------------------------------------------------------------------------------- /setup-venv: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | die() { 6 | echo "$*" 1>&2 7 | exit 1 8 | } 9 | dieget() { 10 | echo "$1: missing repository" 11 | echo "hg clone http://bragi.nvidia.com/~tmb/$1" 12 | exit 2 13 | } 14 | 15 | pipx() { 16 | echo 17 | echo "################ pip3 $* ################" 18 | echo 19 | pip3 "$@" 20 | } 21 | 22 | type pip3 || die "install pip3" 23 | python3 -m virtualenv --version || die "install virtualenv for python3" 24 | 25 | test -d venv || python3 -m virtualenv venv 26 | 27 | source venv/bin/activate 28 | 29 | pipx install virtualenv 30 | pipx install numpy 31 | #pipx install scipy 32 | #pipx install scikit-image 33 | #pipx install scikit-learn 34 | 35 | pipx install future 36 | pipx install six 37 | pipx install braceexpand 38 | pipx install imageio 39 | pipx install Pillow 40 | pipx install simplejson 41 | pipx install msgpack 42 | 43 | pipx install matplotlib 44 | pipx install ipython 45 | pipx install jupyter 46 | #pipx install bash_kernel 47 | #pipx install sos 48 | #pipx install sos-notebook 49 | #pipx install sos-python 50 | #pipx install sos-bash 51 | 52 | pipx install torch 53 | pipx install torchvision 54 | 55 | #pipx install tensorflow-gpu 56 | #pipx install keras 57 | 58 | pipx install git+git://github.com/NVlabs/torchtrainers.git 59 | pipx install git+git://github.com/NVlabs/webloader.git 60 | 61 | echo 62 | echo activate with: 63 | echo source ./venv/bin/activate 64 | echo 65 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # Copyright (c) 2017 NVIDIA CORPORATION. All rights reserved. 3 | # See the LICENSE file for licensing terms (BSD-style). 4 | 5 | import sys 6 | import setuptools 7 | 8 | if sys.version_info < (3, 6): 9 | sys.exit("Python versions less than 3.6 are not supported") 10 | 11 | VERSION = "0.1.0" 12 | 13 | SCRIPTS = ( 14 | "tensorshow tensorstat tensormon serve-imagenet-dir serve-imagenet-shards".split() 15 | ) 16 | 17 | setuptools.setup( 18 | author="Thomas Breuel", 19 | author_email="tmbdev+removeme@gmail.com", 20 | description="Distributed preprocessing for deep learning.", 21 | install_requires="webdataset pyzmq msgpack torch".split(), 22 | keywords="object store, client, deep learning", 23 | license="MIT", 24 | long_description=open("README.md").read(), 25 | long_description_content_type="text/markdown", 26 | name="tensorcom", 27 | packages=["tensorcom"], 28 | python_requires=">=3.6", 29 | scripts=SCRIPTS, 30 | url="http://github.com/tmbdev/tensorcom", 31 | version=VERSION, 32 | classifiers=[ 33 | "Development Status :: 3 - Alpha", 34 | "License :: OSI Approved :: BSD License", 35 | "Programming Language :: Python :: 3.6", 36 | "Programming Language :: Python :: 3.7", 37 | ], 38 | ) 39 | -------------------------------------------------------------------------------- /tensorcom/__init__.py: -------------------------------------------------------------------------------- 1 | from .zcom import Connection, TensorcomDataset 2 | -------------------------------------------------------------------------------- /tensorcom/tenbin.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import struct 3 | from numpy import ndarray 4 | 5 | 6 | def bytelen(a): 7 | """Determine the length of a in bytes.""" 8 | if hasattr(a, "nbytes"): 9 | return a.nbytes 10 | elif isinstance(a, (bytearray, bytes)): 11 | return len(a) 12 | else: 13 | raise ValueError(a, "cannot determine nbytes") 14 | 15 | 16 | def bytedata(a): 17 | """Return a the raw data corresponding to a.""" 18 | if isinstance(a, (bytearray, bytes, memoryview)): 19 | return a 20 | elif hasattr(a, "data"): 21 | return a.data 22 | else: 23 | raise ValueError(a, "cannot return bytedata") 24 | 25 | 26 | # tables for converting between long/short NumPy dtypes 27 | 28 | long_to_short = """ 29 | float16 f2 30 | float32 f4 31 | float64 f8 32 | int8 i1 33 | int16 i2 34 | int32 i4 35 | int64 i8 36 | uint8 u1 37 | uint16 u2 38 | unit32 u4 39 | uint64 u8 40 | """.strip() 41 | long_to_short = [x.split() for x in long_to_short.split("\n")] 42 | long_to_short = {x[0]: x[1] for x in long_to_short} 43 | short_to_long = {v: k for k, v in long_to_short.items()} 44 | long_to_short 45 | 46 | 47 | def check_acceptable_input_type(data, allow64): 48 | for a in data: 49 | assert a.dtype.name in long_to_short, a.dtype.name 50 | if not allow64: 51 | assert a.dtype.name not in ["float64", "int64", "uint64"] 52 | 53 | 54 | def str64(s): 55 | """Convert a string to an int64.""" 56 | s = s + "\0" * (8 - len(s)) 57 | s = s.encode("ascii") 58 | return struct.unpack("@q", s)[0] 59 | 60 | 61 | def unstr64(i): 62 | """Convert an int64 to a string.""" 63 | b = struct.pack("@q", i) 64 | return b.decode("ascii").strip("\0") 65 | 66 | 67 | def check_infos(data, infos, required_infos=None): 68 | """Implement infos verification logic.""" 69 | if required_infos is False or required_infos is None: 70 | return data 71 | if required_infos is True: 72 | return data, infos 73 | assert isinstance(required_infos, (tuple, list)) 74 | for required, actual in zip(required_infos, infos): 75 | assert required == actual, (required, actual) 76 | return data 77 | 78 | 79 | def encode_header(a, info=""): 80 | """Encode an array header as a byte array.""" 81 | assert a.ndim < 10 82 | assert a.nbytes == np.prod(a.shape) * a.itemsize 83 | assert a.dtype.name in long_to_short 84 | header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list( 85 | a.shape 86 | ) 87 | return bytedata(np.array(header, dtype="i8")) 88 | 89 | 90 | def decode_header(h): 91 | """Decode a byte array into an array header.""" 92 | h = np.frombuffer(h, dtype="i8") 93 | assert unstr64(h[0]) in short_to_long, h 94 | dtype = np.dtype(short_to_long[unstr64(h[0])]) 95 | info = unstr64(h[1]) 96 | rank = int(h[2]) 97 | shape = tuple(h[3:3 + rank]) 98 | return shape, dtype, info 99 | 100 | 101 | def encode_list(l, infos=None): 102 | """Given a list of arrays, encode them into a list of byte arrays.""" 103 | if infos is None: 104 | infos = [""] 105 | else: 106 | assert len(l) == len(infos) 107 | result = [] 108 | for i, a in enumerate(l): 109 | header = encode_header(a, infos[i % len(infos)]) 110 | result += [header, bytedata(a)] 111 | return result 112 | 113 | 114 | def decode_list(l, infos=False): 115 | """Given a list of byte arrays, decode them into arrays.""" 116 | result = [] 117 | infos0 = [] 118 | for header, data in zip(l[::2], l[1::2]): 119 | shape, dtype, info = decode_header(header) 120 | a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape) 121 | result += [a] 122 | infos0 += [info] 123 | return check_infos(result, infos0, infos) 124 | 125 | 126 | magic_str = "~TenBin~" 127 | magic = str64(magic_str) 128 | magic_bytes = unstr64(magic).encode("ascii") 129 | 130 | 131 | def roundup(n, k=64): 132 | """Round up to the next multiple of 64.""" 133 | return k * ((n + k - 1) // k) 134 | 135 | 136 | def encode_chunks(l): 137 | """Encode a list of chunks into a single byte array, with lengths and magics..""" 138 | size = sum(16 + roundup(b.nbytes) for b in l) 139 | result = bytearray(size) 140 | offset = 0 141 | for b in l: 142 | result[offset:offset + 8] = magic_bytes 143 | offset += 8 144 | result[offset:offset + 8] = struct.pack("@q", b.nbytes) 145 | offset += 8 146 | result[offset:offset + bytelen(b)] = b 147 | offset += roundup(bytelen(b)) 148 | return result 149 | 150 | 151 | def decode_chunks(buf): 152 | """Decode a byte array into a list of chunks.""" 153 | result = [] 154 | offset = 0 155 | total = bytelen(buf) 156 | while offset < total: 157 | assert magic_bytes == buf[offset:offset + 8] 158 | offset += 8 159 | nbytes = struct.unpack("@q", buf[offset:offset + 8])[0] 160 | offset += 8 161 | b = buf[offset:offset + nbytes] 162 | offset += roundup(nbytes) 163 | result.append(b) 164 | return result 165 | 166 | 167 | def encode_buffer(l, infos=None): 168 | """Encode a list of arrays into a single byte array.""" 169 | return encode_chunks(encode_list(l, infos=infos)) 170 | 171 | 172 | def decode_buffer(buf, infos=False): 173 | """Decode a byte array into a list of arrays.""" 174 | return decode_list(decode_chunks(buf), infos=infos) 175 | 176 | 177 | def write_chunk(stream, buf): 178 | """Write a byte chunk to the stream with magics, length, and padding.""" 179 | nbytes = bytelen(buf) 180 | stream.write(magic_bytes) 181 | stream.write(struct.pack("@q", nbytes)) 182 | stream.write(bytedata(buf)) 183 | padding = roundup(nbytes) - nbytes 184 | if padding > 0: 185 | stream.write(b"\0" * padding) 186 | 187 | 188 | def read_chunk(stream): 189 | """Read a byte chunk from a stream with magics, length, and padding.""" 190 | magic = stream.read(8) 191 | if magic == b"": 192 | return None 193 | assert magic == magic_bytes, (magic, magic_bytes) 194 | nbytes = stream.read(8) 195 | nbytes = struct.unpack("@q", nbytes)[0] 196 | assert nbytes >= 0 197 | data = stream.read(nbytes) 198 | padding = roundup(nbytes) - nbytes 199 | if padding > 0: 200 | stream.read(padding) 201 | return data 202 | 203 | 204 | def write(stream, l, infos=None): 205 | """Write a list of arrays to a stream, with magics, length, and padding.""" 206 | for chunk in encode_list(l, infos=infos): 207 | write_chunk(stream, chunk) 208 | 209 | 210 | def read(stream, n=999999, infos=False): 211 | """Read a list of arrays from a stream, with magics, length, and padding.""" 212 | chunks = [] 213 | for i in range(n): 214 | header = read_chunk(stream) 215 | if header is None: 216 | break 217 | data = read_chunk(stream) 218 | assert data is not None 219 | chunks += [header, data] 220 | return decode_list(chunks, infos=infos) 221 | 222 | 223 | def save(fname, *args, infos=None, nocheck=False): 224 | """Save a list of arrays to a file, with magics, length, and padding.""" 225 | if not nocheck: 226 | assert fname.endswith(".ten") 227 | with open(fname, "wb") as stream: 228 | write(stream, args, infos=infos) 229 | 230 | 231 | def load(fname, infos=False, nocheck=False): 232 | """Read a list of arrays from a file, with magics, length, and padding.""" 233 | if not nocheck: 234 | assert fname.endswith(".ten") 235 | with open(fname, "rb") as stream: 236 | return read(stream, infos=infos) 237 | 238 | 239 | def zsend_single(socket, l, infos=None): 240 | """Send arrays as a single part ZMQ message.""" 241 | return socket.send(encode_buffer(l, infos=infos)) 242 | 243 | 244 | def zrecv_single(socket, infos=False): 245 | """Receive arrays as a single part ZMQ message.""" 246 | return decode_buffer(socket.recv(), infos=infos) 247 | 248 | 249 | def zsend_multipart(socket, l, infos=None): 250 | """Send arrays as a multipart ZMQ message.""" 251 | return socket.send_multipart(encode_list(l, infos=infos)) 252 | 253 | 254 | def zrecv_multipart(socket, infos=False): 255 | """Receive arrays as a multipart ZMQ message.""" 256 | return decode_list(socket.recv_multipart(), infos=infos) 257 | 258 | 259 | def sctp_send(socket, dest, l, infos=None): 260 | """Send arrays as an SCTP datagram. 261 | 262 | This is just a convenience function and illustration. 263 | For more complex networking needs, you may want 264 | to call encode_buffer and sctp_send directly. 265 | """ 266 | socket.sctp_send(bytes(encode_buffer(l, infos=infos)), to=dest) 267 | 268 | 269 | def sctp_recv(socket, infos=False, maxsize=100000000): 270 | """Receive arrays as an SCTP datagram. 271 | 272 | This is just a convenience function and illustration. 273 | For more complex networking needs, you may want 274 | to call sctp_recv and decode_buffer directly. 275 | """ 276 | client, _ = socket.accept() 277 | _, _, data, _ = client.sctp_recv(maxsize) 278 | return decode_buffer(data, infos=infos) 279 | -------------------------------------------------------------------------------- /tensorcom/zcom.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import time 3 | from builtins import object 4 | from urllib.parse import urlparse 5 | import braceexpand 6 | import numpy as np 7 | import torch 8 | from torch.utils.data import IterableDataset 9 | 10 | import zmq 11 | import logging 12 | 13 | from . import tenbin 14 | 15 | 16 | default_context = zmq.Context() 17 | 18 | 19 | schemes = dict( 20 | # (KIND, BIND) 21 | zpush=(zmq.PUSH, False), 22 | zpull=(zmq.PULL, True), 23 | zpub=(zmq.PUB, True), 24 | zsub=(zmq.SUB, False), 25 | zrpush=(zmq.PUSH, True), 26 | zrpull=(zmq.PULL, False), 27 | zrpub=(zmq.PUB, False), 28 | zrsub=(zmq.SUB, True), 29 | ) 30 | 31 | 32 | class Statistics(object): 33 | """Compute running statistics over numbers, including rates. """ 34 | 35 | def __init__(self, horizon=1000): 36 | """ 37 | 38 | :param horizon: (Default value = 1000) 39 | 40 | """ 41 | self.horizon = horizon 42 | self.reset() 43 | 44 | def reset(self): 45 | """Reset the statistics""" 46 | self.start = time.time() 47 | self.last = time.time() 48 | self.count = 0 49 | self.total = 0 50 | self.recent = collections.deque(maxlen=self.horizon) 51 | 52 | def add(self, x): 53 | """Add a value to the statistics. 54 | 55 | :param x: value to add 56 | 57 | """ 58 | self.last = time.time() 59 | self.count += 1 60 | self.total += x 61 | self.recent.append((self.last, x)) 62 | 63 | def rate(self): 64 | """Compute the rate.""" 65 | if self.count == 0: 66 | return 0 67 | return self.count / (self.last - self.start) 68 | 69 | def throughput(self): 70 | """Compute the throughput.""" 71 | if self.count == 0: 72 | return 0 73 | return self.total / (self.last - self.start) 74 | 75 | def recent_rate(self): 76 | """Compute the recent rate.""" 77 | if self.count == 0: 78 | return 0 79 | delta = self.recent[-1][0] - self.recent[0][0] 80 | if delta == 0: 81 | return 0 82 | return len(self.recent) / delta 83 | 84 | def recent_throughput(self): 85 | """Compute the recent throughput.""" 86 | if self.count == 0: 87 | return 0 88 | total = sum(r[1] for r in self.recent) 89 | delta = self.recent[-1][0] - self.recent[0][0] 90 | if delta == 0: 91 | return 0 92 | return total / delta 93 | 94 | def summary(self): 95 | """Return a summary of recent statistics.""" 96 | return "rate {:1f} msg/s throughput {:.2e} bytes/s".format( 97 | self.recent_rate(), self.recent_throughput() 98 | ) 99 | 100 | 101 | def tonumpy(dtype=None, transpose=True): 102 | """Curried any-to-numpy converter. 103 | 104 | :param dtype: desired dtype (Default value = None) 105 | :param transpose: whether to transpose images from PyTorch (Default value = True) 106 | 107 | """ 108 | 109 | def f(a): 110 | """ 111 | 112 | :param a: 113 | 114 | """ 115 | import torch 116 | 117 | if isinstance(a, torch.Tensor): 118 | if a.ndim == 3 and a.shape[0] in [3, 4]: 119 | a = a.permute(1, 2, 0) 120 | elif a.ndim == 4 and a.shape[1] in [3, 4]: 121 | a = a.transpose(0, 2, 3, 1) 122 | return a.detach().cpu().numpy() 123 | else: 124 | return a 125 | 126 | return f 127 | 128 | 129 | def totorch(dtype=None, device="cpu", transpose=True): 130 | """Curried any-to-torch converter. 131 | 132 | :param dtype: desired dtype (Default value = None) 133 | :param device: desired device placement (Default value = "cpu") 134 | :param transpose: transpose images to PyTorch conventions (Default value = True) 135 | 136 | """ 137 | 138 | def f(a): 139 | """ 140 | 141 | :param a: 142 | 143 | """ 144 | 145 | if isinstance(a, np.ndarray): 146 | dtype_ = dtype 147 | if dtype_ is None: 148 | if a.dtype in [np.float16, np.float32, np.float64]: 149 | dtype_ = torch.float32 150 | elif a.dtype in [np.int16, np.uint16, np.int32, np.int64]: 151 | dtype_ = torch.int64 152 | elif isinstance(dtype_, str): 153 | dtype_ = getattr(torch, dtype_) 154 | if a.ndim == 3 and a.shape[2] in [3, 4]: 155 | a = a.transpose(2, 0, 1) 156 | elif a.ndim == 4 and a.shape[3] in [3, 4]: 157 | a = a.transpose(0, 3, 1, 2) 158 | return torch.as_tensor(a, device=device, dtype=dtype_) 159 | else: 160 | return a 161 | 162 | return f 163 | 164 | 165 | def transform_with(sample, transformers): 166 | """Given a list of values and functions, apply functions to values. 167 | 168 | This does nothing if the list of functions is None or empty. 169 | If there are fewer transformers than the length of the list, it wraps around. 170 | 171 | :param sample: list of values 172 | :param transformers: list of functions to apply to values 173 | 174 | """ 175 | if transformers is None or len(transformers) == 0: 176 | return sample 177 | result = list(sample) 178 | ntransformers = len(transformers) 179 | assert len(sample) >= ntransformers 180 | for i in range(len(sample)): 181 | f = transformers[i % ntransformers] 182 | if f is not None: 183 | result[i] = f(sample[i]) 184 | return result 185 | 186 | 187 | def listify(x): 188 | """Turn argument into a list. 189 | 190 | This is a convenience function that allows strings 191 | to be used as a shorthand for [string] in some arguments. 192 | 193 | Returns None for None. 194 | Returns a list for a list or tuple. 195 | Returns [x] for anything else. 196 | 197 | :param x: value to be listified. 198 | 199 | """ 200 | if x is None: 201 | return None 202 | elif isinstance(x, (list, tuple)): 203 | return x 204 | else: 205 | return [x] 206 | 207 | 208 | converter_table = dict(torch=totorch(), torch_cuda=totorch(device="cuda"), numpy=tonumpy()) 209 | 210 | 211 | def estimate_bytes(a): 212 | if isinstance(a, (bytearray, str)): 213 | return len(a) 214 | elif isinstance(a, np.ndarray): 215 | return a.nbytes 216 | else: 217 | return 8 218 | 219 | 220 | def zconnect(url, context=default_context): 221 | """Explicitly connect to a ZMQ socket. 222 | 223 | :param url: ZMQ-URL to connect to (Default value = "") 224 | :param topic: topic to subscribe to for SUB sockets (Default value = "") 225 | 226 | """ 227 | addr = urlparse(url) 228 | scheme, transport = (addr.scheme.split("+", 2) + ["tcp"])[:2] 229 | kind, bind = schemes[scheme] 230 | logging.info("kind %s bind %s", kind, bind) 231 | socket = context.socket(kind) 232 | location = transport + "://" + addr.netloc 233 | if transport == "ipc": 234 | location += addr.path 235 | socket.setsockopt(zmq.LINGER, 0) 236 | if bind: 237 | logging.info("binding to %s", location) 238 | socket.bind(location) 239 | else: 240 | logging.info("connecting to %s", location) 241 | socket.connect(location) 242 | if kind == zmq.SUB: 243 | topic = "" if addr.fragment is None else addr.fragment 244 | logging.info("subscribing to '%s'", topic) 245 | socket.setsockopt_string(zmq.SUBSCRIBE, topic) 246 | return socket 247 | 248 | 249 | class Connection(object): 250 | """A class for sending/receiving tensors via ZMQ sockets.""" 251 | 252 | def __init__( 253 | self, 254 | url=None, 255 | epoch=100000, 256 | total=-1, 257 | multipart=True, 258 | infos=None, 259 | device=None, 260 | allow64=False, 261 | raw=False, 262 | batch_transforms=None, 263 | batch_count=True, 264 | converters=None, 265 | report=-1, 266 | stats_horizon=1000, 267 | noexpand=False, 268 | ): 269 | """Initialize a connection. 270 | 271 | :param url: ZMQ-URL to connect to (Default value = None) 272 | :param epoch: length of an epoch, for len() (Default value = 100000) 273 | :param total: total number of samples (Default value = -1) 274 | :param multipart: send tensors in multipart messages (Default value = True) 275 | :param infos: info fields for tensors (Default value = [""]) 276 | :param device: device placement for tensors (None=numpy, else torch) (Default value = None) 277 | :param allow64: allow 64bit values on sending (Default value = False) 278 | :param raw: return undecoded tensor data (Default value = False) 279 | :param batch_transforms: list of functions to apply to each sample (Default value = None) 280 | :param converters: list of functions to apply after batch_transforms (Default value = None) 281 | :param report: how frequently to report stats when iterating (Default value = -1) 282 | :param stats_horizon: horizon for computing stats (Default value = 1000) 283 | :param noexpand: do not expand braces in URLs (Default value = False) 284 | 285 | """ 286 | self.stats = Statistics(stats_horizon) 287 | self.multipart = multipart 288 | self.infos = infos 289 | self.device = device 290 | self.allow64 = allow64 291 | self.context = default_context 292 | self.socket = None 293 | self.raw = False 294 | self.epoch = epoch 295 | self.total = total 296 | self.batch_transforms = listify(batch_transforms) 297 | self.batch_count = batch_count 298 | if isinstance(converters, str): 299 | converters = converter_table.get(converters, []) 300 | self.converters = listify(converters) 301 | self.count = 0 302 | self.report = -1 303 | self.sampler = None # compatibility with DataLoader 304 | self.batch_sampler = None # compatibility with DataLoader 305 | self.batch_size = 1 306 | if url is not None: 307 | if isinstance(url, str): 308 | url = [url] 309 | if not noexpand: 310 | urls = [] 311 | for u in url: 312 | urls += list(braceexpand.braceexpand(u)) 313 | else: 314 | urls = url 315 | self.connect(urls) 316 | 317 | def connect(self, url): 318 | 319 | if isinstance(url, (list, tuple)): 320 | for u in url: 321 | self.connect(u) 322 | return 323 | try: 324 | self.socket = zconnect(url) 325 | except Exception as e: 326 | print(f"error: url {url}") 327 | raise e 328 | 329 | def close(self): 330 | """Close the connection.""" 331 | self.socket.close() 332 | self.socket = None 333 | self.context = None 334 | 335 | def send(self, data, *, allow64=False): 336 | """Send data over the connection. 337 | 338 | :param data: list of tensors (Default value = False) 339 | :param allow64: allow 64 bit data (Default value = False) 340 | 341 | """ 342 | tenbin.check_acceptable_input_type(data, allow64) 343 | if self.multipart: 344 | tenbin.zsend_multipart(self.socket, data, self.infos) 345 | else: 346 | tenbin.zsend_single(self.socket, data, self.infos) 347 | self.stats.add(sum(a.nbytes for a in data)) 348 | 349 | def recv(self): 350 | """Receive data from the connection.""" 351 | if self.raw: 352 | self.socket.recv_multipart() 353 | if self.multipart: 354 | data = tenbin.zrecv_multipart(self.socket, self.infos) 355 | else: 356 | data = tenbin.zrecv_single(self.socket, self.infos) 357 | tenbin.check_acceptable_input_type(data, True) 358 | if isinstance(data, tuple): 359 | data = list(data) 360 | self.stats.add(sum(estimate_bytes(a) for a in data)) 361 | data = transform_with(data, self.batch_transforms) 362 | data = transform_with(data, self.converters) 363 | self.batch_size = len(data[0]) 364 | return data 365 | 366 | def batchsize(self, xs): 367 | if self.batch_count: 368 | return len(xs[0]) 369 | else: 370 | return 1 371 | 372 | def serve(self, source, *, report=-1): 373 | """Serve data from an iterator. 374 | 375 | :param source: iterator yielding lists/tuples of tensors 376 | :param report: how often to report statistics (Default value = -1) 377 | 378 | """ 379 | count = 0 380 | next_report = 0 381 | for sample in source: 382 | self.send(sample) 383 | if report > 0 and count >= next_report: 384 | print("count", count, self.stats.summary()) 385 | next_report += report 386 | count += self.batchsize(sample) 387 | 388 | def items(self, report=-1): 389 | """Receive data through an iterator""" 390 | count = 0 391 | next_report = 0 392 | while True: 393 | if self.total > 0 and self.count >= self.total: 394 | return 395 | result = self.recv() 396 | if report > 0 and count >= next_report: 397 | print("count", count, self.stats.summary()) 398 | next_report += report 399 | count += self.batchsize(result) 400 | yield result 401 | 402 | def __iter__(self): 403 | """Receive data through an iterator""" 404 | return self.items() 405 | 406 | def write(self, data): 407 | """Alias for send.""" 408 | self.send(data) 409 | 410 | def __len__(self): 411 | """Returns the value of `epoch` given at initialization. 412 | 413 | This allows the Connection object to be used directly as a dataloader 414 | and communicates the epoch size to frameworks that need it. 415 | The `len`/`epoch` value is otherwise unused. 416 | """ 417 | return self.epoch 418 | 419 | 420 | class TensorcomDataset(IterableDataset): 421 | 422 | def __init__(self, urls, length=1000000): 423 | self.urls = urls 424 | self.length = length 425 | self.con = None 426 | 427 | def __iter__(self): 428 | 429 | if self.con is None: 430 | self.con = Connection(self.urls) 431 | 432 | for i in range(self.length): 433 | sample = self.con.recv() 434 | yield sample 435 | 436 | def __len__(self): 437 | return self.length 438 | -------------------------------------------------------------------------------- /tensormon: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import argparse 4 | import time 5 | import tensorcom 6 | 7 | parser = argparse.ArgumentParser("show tensor inputs") 8 | parser.add_argument("input", nargs="*") 9 | parser.add_argument("-R", "--raw", action="store_true") 10 | parser.add_argument("-r", "--report", type=int, default=10) 11 | parser.add_argument("-c", "--count", type=int, default=999999999) 12 | args = parser.parse_args() 13 | 14 | if args.input == []: 15 | args.input = ["zsub://127.0.0.1:7880"] 16 | 17 | 18 | def make_source(): 19 | print("input:", args.input) 20 | source = tensorcom.Connection(device=None, raw=args.raw) 21 | for c in args.input: 22 | print(c) 23 | source.connect(c) 24 | return source 25 | 26 | 27 | index = 0 28 | total = 0 29 | 30 | while True: 31 | source = make_source() 32 | for i, batch in enumerate(source.items()): 33 | if index == 0: 34 | print("connected") 35 | last = time.time() 36 | index += 1 37 | total += 1 38 | bs = len(batch[0]) 39 | if index % args.report == 0: 40 | delta = time.time() - last 41 | print( 42 | "{:20d} {:8.3f} batches/s {:8.3f} samples/s (batchsize: {:d})".format( 43 | index, total / delta, total / delta * bs, bs 44 | ) 45 | ) 46 | total = 0 47 | last = time.time() 48 | if index > args.count: 49 | break 50 | if index > args.count: 51 | break 52 | -------------------------------------------------------------------------------- /tensorshow: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import argparse 4 | import sys 5 | 6 | import matplotlib.pyplot as plt 7 | import numpy as np 8 | import simplejson 9 | import tensorcom 10 | from matplotlib import cm 11 | 12 | parser = argparse.ArgumentParser("show tensor inputs") 13 | parser.add_argument("input", nargs="*") 14 | parser.add_argument("-b", "--unbatched", action="store_true") 15 | parser.add_argument("-m", "--mode", default="imgclass") 16 | parser.add_argument("-B", "--perbatch", default=1, type=int) 17 | parser.add_argument("-C", "--classes", default=None) 18 | parser.add_argument("-t", "--timeout", default=-1, type=float) 19 | args = parser.parse_args() 20 | 21 | if args.input == []: 22 | args.input = ["zsub://127.0.0.1:%d" % i for i in range(7880, 7884)] 23 | 24 | classtable = {} 25 | if args.classes is not None: 26 | with open(args.classes) as stream: 27 | classtable = simplejson.load(stream) 28 | 29 | 30 | def isimage(a, batched=True, minsize=24): 31 | if not isinstance(a, np.ndarray): 32 | return False 33 | if batched: 34 | a = a[0] 35 | if sum([d >= minsize for d in a.shape]) < 2: 36 | return False 37 | return True 38 | 39 | 40 | def smartshow(img, batch_index=None, cmap=cm.viridis): 41 | if index is not None: 42 | img = img[batch_index] 43 | if img.dtype == np.uint8: 44 | img = img.astype(np.float32) / 255.0 45 | img = img.astype(np.float32) 46 | if np.amin(img) < 0 or np.amax(img) > 1: 47 | img -= np.amin(img) 48 | img /= np.amax(img) 49 | if img.ndim == 3: 50 | if img.shape[0] in [3, 4]: 51 | plt.imshow(img.transpose(1, 2, 0)[..., :3]) 52 | elif img.shape[0] == 1: 53 | plt.imshow(img[0], cmap=cmap) 54 | elif img.shape[-1] in [3, 4]: 55 | plt.imshow(img[..., :3]) 56 | elif img.shape[-1] == 1: 57 | plt.imshow(img[..., 0], cmap=cmap) 58 | elif img.ndim == 2: 59 | plt.imshow(img, cmap=cmap) 60 | 61 | 62 | def info(x): 63 | if isinstance(x, np.ndarray): 64 | print("{} {} {} {}".format(x.dtype, x.shape, np.amin(x), np.amax(x))) 65 | else: 66 | print("{} ".format(str(x)[:50])) 67 | 68 | 69 | plt.ion() 70 | source = tensorcom.Connection(device=None) 71 | for c in args.input: 72 | print(c) 73 | source.connect(c) 74 | 75 | if args.mode == "imgclass": 76 | for img, cls in source.items(): 77 | print(img.shape, img.dtype, np.amin(img), np.amax(img)) 78 | print(cls.shape, cls.dtype, np.amin(cls), np.amax(cls)) 79 | if args.unbatched: 80 | img = np.array([img]) 81 | for index in range(min(len(img), args.perbatch)): 82 | plt.clf() 83 | smartshow(img, index) 84 | c = cls[index] 85 | c = classtable.get(c, c) 86 | plt.title("[{}] class = {}".format(index, c)) 87 | plt.show() 88 | plt.waitforbuttonpress(timeout=args.timeout) 89 | if not plt.fignum_exists(1): 90 | sys.exit(0) 91 | elif args.mode == "img2img": 92 | for img, img2 in source.items(): 93 | print(img.shape, img.dtype, np.amin(img), np.amax(img)) 94 | print(img2.shape, img2.dtype, np.amin(img2), np.amax(img2)) 95 | if args.unbatched: 96 | img = np.array([img]) 97 | img2 = np.array([img2]) 98 | for index in range(min(len(img), args.perbatch)): 99 | plt.clf() 100 | plt.subplot(121) 101 | smartshow(img, index) 102 | plt.subplot(122) 103 | smartshow(img2, index) 104 | plt.title("[{}]".format(index)) 105 | plt.show() 106 | plt.waitforbuttonpress(timeout=args.timeout) 107 | if not plt.fignum_exists(1): 108 | sys.exit(0) 109 | 110 | else: 111 | print("{}: unknown mode".format(args.mode)) 112 | sys.exit(1) 113 | -------------------------------------------------------------------------------- /tensorstat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import argparse 4 | import sys 5 | import time 6 | from math import inf 7 | 8 | import numpy as np 9 | import tensorcom 10 | 11 | parser = argparse.ArgumentParser( 12 | """ 13 | Compute statistics over a tensor in a tensorcom input stream. 14 | 15 | Each item in a tensorcom input stream is usually a list of 16 | tensors, each representing a batch. 17 | """ 18 | ) 19 | parser.add_argument("input", nargs="*") 20 | parser.add_argument("-b", "--unbatched", action="store_true") 21 | parser.add_argument("-c", "--count", type=int, default=20) 22 | parser.add_argument("-r", "--raw", action="store_true") 23 | args = parser.parse_args() 24 | 25 | if args.input == []: 26 | args.input = ["zsub://127.0.0.1:7880"] 27 | 28 | source = tensorcom.Connection(args.input, device=None, raw=args.raw) 29 | 30 | print("reading batches...\n") 31 | 32 | 33 | class Stats(object): 34 | def __init__(self): 35 | self.count = 0 36 | self.lo = inf 37 | self.hi = -inf 38 | self.sx = 0 39 | self.sx2 = 0 40 | self.n = 0 41 | 42 | def __iadd__(self, x): 43 | self.count += 1 44 | self.lo = min(self.lo, np.amin(x)) 45 | self.hi = max(self.hi, np.amax(x)) 46 | self.sx += np.sum(x) 47 | self.sx2 += np.sum(x ** 2) 48 | self.n += x.size 49 | return self 50 | 51 | def summary(self): 52 | return "{:d} [{:.3g} {:.3g}] mean={:.3g} std={:.3g} n={:d}".format( 53 | self.count, 54 | self.lo, 55 | self.hi, 56 | self.sx / self.n, 57 | (self.sx2 / self.n - (self.sx / self.n) ** 2) ** 0.5, 58 | self.n, 59 | ) 60 | 61 | 62 | shapes = [set() for _ in range(10)] 63 | stats = [Stats() for _ in range(10)] 64 | ninputs = 0 65 | 66 | start = time.time() 67 | for i, batch in enumerate(source.items()): 68 | if i >= args.count: 69 | break 70 | if args.raw: 71 | continue 72 | ninputs = max(ninputs, len(batch)) 73 | for i, a in enumerate(batch): 74 | if not isinstance(a, np.ndarray): 75 | continue 76 | shapes[i].add((str(a.dtype),) + tuple(a.shape)) 77 | stats[i] += a.astype(np.float32) 78 | 79 | finish = time.time() 80 | 81 | if args.raw: 82 | print(source.stats.summary()) 83 | sys.exit(0) 84 | 85 | print("Source:") 86 | print(source.stats.summary()) 87 | print() 88 | 89 | for i in range(ninputs): 90 | print("=== Input {} ===\n".format(i)) 91 | if stats[i].count == 0: 92 | print("not a tensor") 93 | else: 94 | print(stats[i].summary()) 95 | print(shapes[i]) 96 | print() 97 | -------------------------------------------------------------------------------- /test/test_tenbin.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from tensorcom import zcom 3 | from tensorcom import tenbin 4 | 5 | 6 | def test_str64(): 7 | s = "hello" 8 | assert s == tenbin.unstr64(tenbin.str64(s)) 9 | 10 | 11 | def test_header(): 12 | a = np.random.uniform(size=(12, 19)).astype("f") 13 | shape, dtype, info = tenbin.decode_header(tenbin.encode_header(a, "data")) 14 | assert a.shape == shape, (a.shape, shape) 15 | assert a.dtype == dtype, (a.dtype, dtype) 16 | assert info == "data" 17 | 18 | 19 | def test_list(): 20 | a = np.random.uniform(size=(12, 19)).astype("f") 21 | encoded = tenbin.encode_list([a, a], infos=["tensor1", "tensor2"]) 22 | b, c = tenbin.decode_list(encoded, infos=["tensor1", "tensor2"]) 23 | assert (a == b).all() 24 | assert (a == c).all() 25 | 26 | 27 | def test_buffer(): 28 | a = np.random.uniform(size=(1, 3, 4)).astype("float32") 29 | encoded = tenbin.encode_buffer([a, a], infos=["input", "target"]) 30 | b, c = tenbin.decode_buffer(encoded, infos=["input", "target"]) 31 | assert (a == b).all() 32 | assert (a == c).all() 33 | 34 | 35 | def test_buffer(): 36 | a = np.random.uniform(size=(7, 7)).astype("float16") 37 | tenbin.save("/tmp/_temp.ten", a, a, infos=["hello", "world"]) 38 | b, c = tenbin.load("/tmp/_temp.ten", infos=["hello", "world"]) 39 | assert (a == b).all() 40 | assert (a == c).all() 41 | 42 | 43 | def test_zmq(): 44 | import zmq 45 | import random 46 | 47 | port = random.randint(17000, 18999) 48 | con = zmq.Context() 49 | sink = con.socket(zmq.PULL) 50 | sink.bind(f"tcp://127.0.0.1:{port}") 51 | source = con.socket(zmq.PUSH) 52 | source.connect(f"tcp://127.0.0.1:{port}") 53 | a = np.random.uniform(size=(7, 7)).astype("float16") 54 | tenbin.zsend_multipart(source, [a, a], infos=["za", "zb"]) 55 | b, c = tenbin.zrecv_multipart(sink, infos=["za", "zb"]) 56 | sink.close() 57 | source.close() 58 | del sink 59 | del source 60 | assert (a == b).all() 61 | assert (a == c).all() 62 | 63 | 64 | def no_test_sctp(): 65 | import socket 66 | import sctp 67 | import random 68 | 69 | port = random.randint(17000, 18999) 70 | sk = sctp.sctpsocket_tcp(socket.AF_INET) 71 | sk.bind(("0.0.0.0", port)) 72 | sk.listen(20) 73 | sr = sctp.sctpsocket_tcp(socket.AF_INET) 74 | a = np.random.uniform(size=(17)).astype("float16") 75 | tenbin.sctp_send(sk, ("127.0.0.1", port), [a, a]) 76 | b, c = tenbin.sctp_recv(sr) 77 | sk.close() 78 | sr.close() 79 | del sk 80 | del sr 81 | assert (a == b).all() 82 | assert (a == c).all() 83 | -------------------------------------------------------------------------------- /test/test_zcom.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from tensorcom import zcom 3 | import random 4 | 5 | 6 | def test_zmq(): 7 | port = random.randint(17000, 38999) 8 | source = zcom.Connection(f"zpush://127.0.0.1:{port}") 9 | sink = zcom.Connection(f"zpull://127.0.0.1:{port}") 10 | a = np.random.uniform(size=(9, 7)).astype("float16") 11 | source.send([a, a]) 12 | b, c = sink.recv() 13 | del sink 14 | del source 15 | assert (a == b).all() 16 | assert (a == c).all() 17 | --------------------------------------------------------------------------------