├── .gitignore ├── COPYING.txt ├── Dockerfile ├── LICENSE.txt ├── README.md ├── cslaier.cfg ├── docker_config └── cslaier.cfg ├── docs └── img │ └── ss │ ├── dataset.png │ ├── model_detail.png │ ├── top_view.png │ └── train_result.png ├── examples ├── README.md ├── inspection │ ├── README.md │ └── inspect_example.py └── prediction │ ├── README.md │ └── predict.py ├── inspection_temp └── .gitkeep ├── logs └── .gitkeep ├── main.py ├── prepared_data └── .gitkeep ├── requirements.txt ├── run.sh ├── scheme ├── cslaier.sql ├── migration_00.sql ├── migration_20160208.sql ├── migration_20160209.sql ├── migration_20160314.sql ├── migration_20160513.sql └── migration_20160617.sql ├── setup.sh ├── src ├── common │ ├── __init__.py │ ├── nvidia_devices_info.py │ ├── strings.py │ └── utils.py ├── db_models │ ├── __init__.py │ ├── datasets.py │ ├── models.py │ └── shared_models.py ├── deeplearning │ ├── __init__.py │ ├── log_subscriber.py │ ├── predict │ │ ├── __init__.py │ │ ├── imagenet_inspect.py │ │ └── text_predict.py │ ├── prepare │ │ ├── __init__.py │ │ ├── prepare_for_imagenet.py │ │ └── prepare_for_lstm.py │ ├── runner.py │ ├── train │ │ ├── __init__.py │ │ ├── train_imagenet.py │ │ ├── train_lstm.py │ │ └── utils.py │ └── visualizer.py ├── main.py ├── model_templates │ ├── alex.py │ ├── alexbn.py │ ├── alexnet_tf.py │ ├── googlenet.py │ ├── googlenetbn.py │ ├── nin.py │ └── uei_lstm.py ├── models │ └── __init__.py ├── profiler.py ├── static │ ├── CodeMirror │ │ ├── lib │ │ │ ├── codemirror.css │ │ │ └── codemirror.js │ │ └── mode │ │ │ └── python │ │ │ ├── index.html │ │ │ └── python.js │ ├── css │ │ ├── bootstrap.css │ │ ├── bootstrap.css.map │ │ ├── bootstrap.min.css │ │ └── cslaier.css │ ├── fonts │ │ ├── glyphicons-halflings-regular.eot │ │ ├── glyphicons-halflings-regular.svg │ │ ├── glyphicons-halflings-regular.ttf │ │ ├── glyphicons-halflings-regular.woff │ │ └── glyphicons-halflings-regular.woff2 │ ├── html │ │ └── gpu_usage.html │ ├── img │ │ ├── logo.png │ │ └── logo_hover.png │ └── js │ │ ├── bootstrap.js │ │ ├── bootstrap.min.js │ │ ├── cslaier.js │ │ ├── d3.min.js │ │ ├── enchant.js │ │ ├── gpu_meter.js │ │ ├── jquery-1.11.3.min.js │ │ ├── moment.js │ │ └── underscore.js └── templates │ ├── admin │ ├── datasets.html │ ├── index.html │ └── models.html │ ├── common │ ├── base.html │ ├── gpu_info.html │ ├── gpu_script.html │ ├── header.html │ ├── macro.html │ ├── resource_info.html │ └── version_info.html │ ├── dataset │ ├── show_category_detail.html │ └── show_dataset.html │ ├── index.html │ ├── index_partial │ ├── datasets.html │ ├── modals.html │ └── models.html │ └── model │ ├── inspect_result.html │ ├── new.html │ ├── partial │ ├── create_new_network_modals.html │ ├── in_progress.html │ ├── not_trained.html │ ├── prediction_modals.html │ ├── resume_train_modals.html │ ├── start_train_modals.html │ └── trained.html │ └── show.html ├── temp └── .gitkeep ├── trained_data └── .gitkeep ├── uploaded_files └── .gitkeep └── uploaded_raw_files └── .gitkeep /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | deepstation.db 3 | cslaier.db 4 | uploaded_files/* 5 | uploaded_raw_files/* 6 | train_images/* 7 | inspection_temp/* 8 | trained_data/* 9 | prepared_data/* 10 | temp/* 11 | logs/* 12 | src/models/*.py 13 | .python-version 14 | !models/__init__.py 15 | !.gitkeep -------------------------------------------------------------------------------- /COPYING.txt: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------ 2 | Bootstrap 3 | 4 | The MIT License (MIT) 5 | 6 | Copyright (c) 2011-2016 Twitter, Inc. 7 | 8 | Permission is hereby granted, free of charge, to any person obtaining a copy 9 | of this software and associated documentation files (the "Software"), to deal 10 | in the Software without restriction, including without limitation the rights 11 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | copies of the Software, and to permit persons to whom the Software is 13 | furnished to do so, subject to the following conditions: 14 | 15 | The above copyright notice and this permission notice shall be included in 16 | all copies or substantial portions of the Software. 17 | 18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 | THE SOFTWARE. 25 | 26 | ------------------------------------------------------------ 27 | CodeMirror 28 | 29 | Copyright (C) 2016 by Marijn Haverbeke and others 30 | 31 | Permission is hereby granted, free of charge, to any person obtaining a copy 32 | of this software and associated documentation files (the "Software"), to deal 33 | in the Software without restriction, including without limitation the rights 34 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 35 | copies of the Software, and to permit persons to whom the Software is 36 | furnished to do so, subject to the following conditions: 37 | 38 | The above copyright notice and this permission notice shall be included in 39 | all copies or substantial portions of the Software. 40 | 41 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 42 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 43 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 44 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 45 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 46 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 47 | THE SOFTWARE. 48 | 49 | ------------------------------------------------------------ 50 | Data-Driven Documents 51 | 52 | Copyright (c) 2010-2016, Michael Bostock 53 | All rights reserved. 54 | 55 | Redistribution and use in source and binary forms, with or without 56 | modification, are permitted provided that the following conditions are met: 57 | 58 | * Redistributions of source code must retain the above copyright notice, this 59 | list of conditions and the following disclaimer. 60 | 61 | * Redistributions in binary form must reproduce the above copyright notice, 62 | this list of conditions and the following disclaimer in the documentation 63 | and/or other materials provided with the distribution. 64 | 65 | * The name Michael Bostock may not be used to endorse or promote products 66 | derived from this software without specific prior written permission. 67 | 68 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 69 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 70 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 71 | DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT, 72 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 73 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 74 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 75 | OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 76 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 77 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 78 | 79 | ------------------------------------------------------------ 80 | enchant.js 81 | 82 | Copyright (c) 2011-2012 UEI Corporation 83 | 84 | Permission is hereby granted, free of charge, to any person obtaining a copy 85 | of this software and associated documentation files (the "Software"), to deal 86 | in the Software without restriction, including without limitation the rights 87 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 88 | copies of the Software, and to permit persons to whom the Software is 89 | furnished to do so, subject to the following conditions: 90 | 91 | The above copyright notice and this permission notice shall be included in 92 | all copies or substantial portions of the Software. 93 | 94 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 95 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 96 | FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE 97 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 98 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 99 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 100 | THE SOFTWARE. 101 | 102 | ------------------------------------------------------------ 103 | jQuery 104 | 105 | Copyright jQuery Foundation and other contributors, https://jquery.org/ 106 | 107 | This software consists of voluntary contributions made by many 108 | individuals. For exact contribution history, see the revision history 109 | available at https://github.com/jquery/jquery 110 | 111 | The following license applies to all parts of this software except as 112 | documented below: 113 | 114 | ==== 115 | 116 | Permission is hereby granted, free of charge, to any person obtaining 117 | a copy of this software and associated documentation files (the 118 | "Software"), to deal in the Software without restriction, including 119 | without limitation the rights to use, copy, modify, merge, publish, 120 | distribute, sublicense, and/or sell copies of the Software, and to 121 | permit persons to whom the Software is furnished to do so, subject to 122 | the following conditions: 123 | 124 | The above copyright notice and this permission notice shall be 125 | included in all copies or substantial portions of the Software. 126 | 127 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 128 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 129 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 130 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 131 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 132 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 133 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 134 | 135 | ==== 136 | ------------------------------------------------------------ 137 | Moment.js 138 | 139 | Copyright (c) 2011-2016 Tim Wood, Iskren Chernev, Moment.js contributors 140 | 141 | Permission is hereby granted, free of charge, to any person 142 | obtaining a copy of this software and associated documentation 143 | files (the "Software"), to deal in the Software without 144 | restriction, including without limitation the rights to use, 145 | copy, modify, merge, publish, distribute, sublicense, and/or sell 146 | copies of the Software, and to permit persons to whom the 147 | Software is furnished to do so, subject to the following 148 | conditions: 149 | 150 | The above copyright notice and this permission notice shall be 151 | included in all copies or substantial portions of the Software. 152 | 153 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 154 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 155 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 156 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 157 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 158 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 159 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 160 | OTHER DEALINGS IN THE SOFTWARE. 161 | 162 | ------------------------------------------------------------ 163 | Underscore.js 164 | 165 | Copyright (c) 2009-2016 Jeremy Ashkenas, DocumentCloud and Investigative 166 | Reporters & Editors 167 | 168 | Permission is hereby granted, free of charge, to any person 169 | obtaining a copy of this software and associated documentation 170 | files (the "Software"), to deal in the Software without 171 | restriction, including without limitation the rights to use, 172 | copy, modify, merge, publish, distribute, sublicense, and/or sell 173 | copies of the Software, and to permit persons to whom the 174 | Software is furnished to do so, subject to the following 175 | conditions: 176 | 177 | The above copyright notice and this permission notice shall be 178 | included in all copies or substantial portions of the Software. 179 | 180 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 181 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 182 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 183 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 184 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 185 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 186 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 187 | OTHER DEALINGS IN THE SOFTWARE. 188 | 189 | ------------------------------------------------------------ 190 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | MAINTAINER UEI Corporation 4 | 5 | ENV TF_BINARY_URL https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl 6 | ENV APPROOT /cslaier 7 | ENV CSLAIER_CONFIG $APPROOT/docker_config/cslaier.cfg 8 | 9 | RUN apt-get -y update && \ 10 | apt-get -y install \ 11 | python \ 12 | python-dev \ 13 | python-pip \ 14 | python-opencv \ 15 | python-matplotlib \ 16 | sqlite3 \ 17 | libhdf5-dev \ 18 | nkf \ 19 | python-scipy && \ 20 | apt-get clean && \ 21 | rm -rf /var/lib/apt/lists/* && \ 22 | pip install --upgrade $TF_BINARY_URL && \ 23 | mkdir -p $APPROOT 24 | 25 | EXPOSE 8080 26 | WORKDIR $APPROOT 27 | COPY ./ $APPROOT 28 | RUN pip install -r requirements.txt && \ 29 | sh setup.sh 30 | 31 | # http://stackoverflow.com/questions/31768441/how-to-persist-ln-in-docker-with-ubuntu 32 | CMD sh -c 'ln -s /dev/null /dev/raw1394'; sh run.sh 33 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2015-2016 Sony Computer Science Laboratories, Inc. 2 | 3 | 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | 13 | 14 | The above copyright notice and this permission notice shall be included in 15 | all copies or substantial portions of the Software. 16 | 17 | 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 25 | THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /cslaier.cfg: -------------------------------------------------------------------------------- 1 | HOST='localhost' 2 | PORT=8080 3 | DATABASE_PATH='cslaier.db' 4 | DEBUG=True 5 | UPLOADED_RAW_FILE='uploaded_raw_files' 6 | UPLOADED_FILE='uploaded_files' 7 | PREPARED_DATA='prepared_data' 8 | TRAINED_DATA='trained_data' 9 | INSPECTION_TEMP='inspection_temp' 10 | LOG_DIR='logs' -------------------------------------------------------------------------------- /docker_config/cslaier.cfg: -------------------------------------------------------------------------------- 1 | HOST='0.0.0.0' 2 | PORT=8080 3 | DATABASE_PATH='cslaier.db' 4 | DEBUG=True 5 | UPLOADED_RAW_FILE='uploaded_raw_files' 6 | UPLOADED_FILE='uploaded_files' 7 | PREPARED_DATA='prepared_data' 8 | TRAINED_DATA='trained_data' 9 | INSPECTION_TEMP='inspection_temp' 10 | LOG_DIR='logs' -------------------------------------------------------------------------------- /docs/img/ss/dataset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/docs/img/ss/dataset.png -------------------------------------------------------------------------------- /docs/img/ss/model_detail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/docs/img/ss/model_detail.png -------------------------------------------------------------------------------- /docs/img/ss/top_view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/docs/img/ss/top_view.png -------------------------------------------------------------------------------- /docs/img/ss/train_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/docs/img/ss/train_result.png -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | Examples 2 | ========= 3 | -------------------------------------------------------------------------------- /examples/inspection/README.md: -------------------------------------------------------------------------------- 1 | DEEPstationの学習結果を用いて画像の予測を行う 2 | ======================================= 3 | 4 | 学習済モデルを利用して、コマンドラインから画像の調査(inspection)を行うサンプルです。 5 | 6 | 必要なファイルの入手 7 | ----------------- 8 | 9 | * DEEPStationの学習済みModelの画面を表示する 10 | * 調査に利用したいEpochを選択する。 11 | * `Download Model`ボタンを押下し、学習済モデルをダウンロードする。 12 | * `Download Label`ボタンを押下し、学習に使用した`labels.txt`をダウンロードする。 13 | * `Download Mean File`ボタンを押下し、学習に使用した`mean.npy`をダウンロードする。 14 | * `Networkタブ`で、学習に使用したNetworkをコピーし、任意のファイル名で保存する。 15 | この時、ファイルは`.py`で終わる必要がある。 16 | 17 | inspect.pyの実行 18 | ---------------- 19 | 20 | * 前項で用意した4つのファイルを任意のディレクトリに配置する。 21 | * 下記のようなディレクトリ構成を想定 22 | 23 | inspection 24 | ├── network.py # network 25 | ├── image_to_inspect.jpg # 調査したい画像 26 | ├── inspect.py 27 | ├── labels.txt 28 | ├── mean.npy 29 | └── trained_model # 学習済みモデル 30 | 31 | * `inspect.py`のあるディレクトリで下記を実行。 32 | 33 | $python main.py image_to_inspect.jpg network.py trained_model 34 | 35 | * コマンドの引数に関しての詳細は `$python main.py --help`を参照されたし。 36 | 37 | -------------------------------------------------------------------------------- /examples/inspection/inspect_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import argparse 4 | import numpy as np 5 | import random 6 | import re 7 | import imp 8 | from PIL import Image 9 | import scipy.misc 10 | import cPickle as pickle 11 | from chainer import cuda 12 | from chainer import serializers 13 | try: 14 | import tensorflow as tf 15 | except ImportError: 16 | pass 17 | 18 | 19 | def load_module(target): 20 | network = target.split(os.sep)[-1] 21 | model_name = re.sub(r"\.py$", "", network) 22 | (file, path, description) = imp.find_module(model_name, [os.path.dirname(target)]) 23 | return imp.load_module(model_name, file, path, description) 24 | 25 | 26 | def read_image(path, height, width, 27 | resize_mode="squash", channels=3, flip=False): 28 | """ 29 | Load an image from disk 30 | 31 | Returns an np.ndarray (channels x width x height) 32 | 33 | Arguments: 34 | path -- path to an image on disk 35 | width -- resize dimension 36 | height -- resize dimension 37 | 38 | Keyword arguments: 39 | channels -- the PIL mode that the image should be converted to 40 | (3 for color or 1 for grayscale) 41 | resize_mode -- can be crop, squash, fill or half_crop 42 | flip -- flag for flipping 43 | """ 44 | 45 | if channels == 1: 46 | mode = "L" 47 | else: 48 | mode = "RGB" 49 | 50 | image = Image.open(path) 51 | image = image.convert(mode) 52 | image = np.array(image) 53 | 54 | # Resize 55 | interp = 'bilinear' 56 | 57 | width_ratio = float(image.shape[1]) / width 58 | height_ratio = float(image.shape[0]) / height 59 | if resize_mode == 'squash' or width_ratio == height_ratio: 60 | return scipy.misc.imresize(image, (height, width), interp=interp) 61 | elif resize_mode == 'crop': 62 | # resize to smallest of ratios (relatively larger image), keeping aspect ratio 63 | if width_ratio > height_ratio: 64 | resize_height = height 65 | resize_width = int(round(image.shape[1] / height_ratio)) 66 | else: 67 | resize_width = width 68 | resize_height = int(round(image.shape[0] / width_ratio)) 69 | image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp) 70 | 71 | # chop off ends of dimension that is still too long 72 | if width_ratio > height_ratio: 73 | start = int(round((resize_width-width)/2.0)) 74 | return image[:, start:start+width] 75 | else: 76 | start = int(round((resize_height-height)/2.0)) 77 | return image[start:start+height, :] 78 | else: 79 | if resize_mode == 'fill': 80 | # resize to biggest of ratios (relatively smaller image), keeping aspect ratio 81 | if width_ratio > height_ratio: 82 | resize_width = width 83 | resize_height = int(round(image.shape[0] / width_ratio)) 84 | if (height - resize_height) % 2 == 1: 85 | resize_height += 1 86 | else: 87 | resize_height = height 88 | resize_width = int(round(image.shape[1] / height_ratio)) 89 | if (width - resize_width) % 2 == 1: 90 | resize_width += 1 91 | image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp) 92 | elif resize_mode == 'half_crop': 93 | # resize to average ratio keeping aspect ratio 94 | new_ratio = (width_ratio + height_ratio) / 2.0 95 | resize_width = int(round(image.shape[1] / new_ratio)) 96 | resize_height = int(round(image.shape[0] / new_ratio)) 97 | if width_ratio > height_ratio and (height - resize_height) % 2 == 1: 98 | resize_height += 1 99 | elif width_ratio < height_ratio and (width - resize_width) % 2 == 1: 100 | resize_width += 1 101 | image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp) 102 | # chop off ends of dimension that is still too long 103 | if width_ratio > height_ratio: 104 | start = int(round((resize_width-width)/2.0)) 105 | image = image[:, start:start+width] 106 | else: 107 | start = int(round((resize_height-height)/2.0)) 108 | image = image[start:start+height, :] 109 | else: 110 | raise Exception('unrecognized resize_mode "%s"' % resize_mode) 111 | 112 | # fill ends of dimension that is too short with random noise 113 | if width_ratio > height_ratio: 114 | padding = int((height - resize_height)/2) 115 | noise_size = (padding, width) 116 | if channels > 1: 117 | noise_size += (channels,) 118 | print noise_size 119 | noise = np.random.randint(0, 255, noise_size).astype('uint8') 120 | image = np.concatenate((noise, image, noise), axis=0) 121 | else: 122 | padding = (width - resize_width)/2 123 | noise_size = (height, padding) 124 | if channels > 1: 125 | noise_size += (channels,) 126 | noise = np.random.randint(0, 255, noise_size).astype('uint8') 127 | image = np.concatenate((noise, image, noise), axis=1) 128 | 129 | if flip and random.randint(0, 1) == 0: 130 | return np.fliplr(image) 131 | else: 132 | return image 133 | 134 | 135 | def inspect_by_chainer(image_path, mean, model_path, label, 136 | network_path, resize_mode, channels, gpu=-1): 137 | model_module = load_module(network_path) 138 | mean_image = pickle.load(open(mean, 'rb')) 139 | model = model_module.Network() 140 | serializers.load_hdf5(model_path, model) 141 | if gpu >= 0: 142 | cuda.check_cuda_available() 143 | cuda.get_device(gpu).use() 144 | model.to_gpu() 145 | 146 | img = read_image(image_path, 256, 256, resize_mode, channels) 147 | cropwidth = 256 - model.insize 148 | top = left = cropwidth / 2 149 | bottom = model.insize + top 150 | right = model.insize + left 151 | 152 | if img.ndim == 3: 153 | img = img.transpose(2, 0, 1) 154 | img = img[:, top:bottom, left:right].astype(np.float32) 155 | else: 156 | img = img[top:bottom, left:right].astype(np.float32) 157 | zeros = np.zeros((model.insize, model.insize)) 158 | img = np.array([img, zeros, zeros]) 159 | img -= mean_image[:, top:bottom, left:right] 160 | img /= 255 161 | 162 | x = np.ndarray((1, 3, model.insize, model.insize), dtype=np.float32) 163 | x[0] = img 164 | 165 | if gpu >= 0: 166 | x = cuda.to_gpu(x) 167 | score = model.predict(x) 168 | score = cuda.to_cpu(score.data) 169 | categories = np.loadtxt(label, str, delimiter="\t") 170 | top_k = 20 171 | prediction = zip(score[0].tolist(), categories) 172 | prediction.sort(cmp=lambda x, y: cmp(x[0], y[0]), reverse=True) 173 | ret = [] 174 | for rank, (score, name) in enumerate(prediction[:top_k], start=1): 175 | ret.append({"rank": rank, "name": name, "score": "{0:4.1f}%".format(score*100)}) 176 | return ret 177 | 178 | 179 | def inspect_by_tensorflow(image_path, mean, model_path, label, 180 | network_path, resize_mode, channels, gpu=0): 181 | model_module = load_module(network_path) 182 | img = read_image(image_path, 128, 128, resize_mode, channels) 183 | if img.ndim == 3: 184 | img = img.transpose(2, 0, 1).astype(np.float32) 185 | else: 186 | zeros = np.zeros(128, 128) 187 | img = np.array([img, zeros, zeros]).astype(np.float32) 188 | mean_image = pickle.load(open(mean, 'rb')) 189 | img -= mean_image[:, 0:128, 0:128] 190 | feed_data = [] 191 | feed_data.append(img.flatten().astype(np.float32) / 255.0) 192 | 193 | images_placeholder = tf.placeholder(tf.float32, [None, 128*128*3]) 194 | keep_prob = tf.placeholder(tf.float32) 195 | 196 | logits = model_module.inference(images_placeholder, keep_prob) 197 | prediction_op = tf.nn.top_k(tf.nn.softmax(logits), k=20) 198 | 199 | with tf.Session() as sess: 200 | sess.run(tf.initialize_all_variables()) 201 | saver = tf.train.Saver() 202 | saver.restore(sess, model_path) 203 | (values, indeceis) = sess.run(prediction_op, feed_dict={ 204 | images_placeholder: feed_data, 205 | keep_prob: 1.0, 206 | }) 207 | 208 | categories = np.loadtxt(label, str, delimiter="\t") 209 | ret = [] 210 | for i, idx in enumerate(indeceis[0]): 211 | ret.append({ 212 | 'rank': i+1, 213 | 'name': categories[idx], 214 | 'score': "{:4.1f}%".format(values[0][i]*100) 215 | }) 216 | return ret 217 | 218 | 219 | if __name__ == '__main__': 220 | parser = argparse.ArgumentParser(description='Do inspection by command line') 221 | parser.add_argument('image_to_inspect', help='Path to the image file which you want to inspect') 222 | parser.add_argument('network', help='Path to the network model file') 223 | parser.add_argument('model', help='Path to the trained model (downloaded from CSLAIER ') 224 | parser.add_argument('--label', '-l', default='labels.txt', 225 | help='Path to the labels.txt file (downloaded from CSLAIER)') 226 | parser.add_argument('--mean', '-m', default='mean.npy', 227 | help='Path to the mean file (downloaded from CSLAIER)') 228 | parser.add_argument('--gpu', '-g', default=-1, type=int, 229 | help='GPU ID (negative value indicates CPU)') 230 | parser.add_argument('--resize_mode', '-r', default='squash', 231 | help='can be crop, squash, fill or half_crop') 232 | parser.add_argument('--channels', '-c', default='3', 233 | help='3 for RGB or 1 for grayscale') 234 | parser.add_argument('--framework', '-f', default='chainer', 235 | help='chainer or tensorflow') 236 | args = parser.parse_args() 237 | 238 | if args.framework == 'chainer': 239 | results = inspect_by_chainer(args.image_to_inspect, args.mean, args.model, args.label, 240 | args.network, args.resize_mode, int(args.channels), args.gpu) 241 | elif args.framework == 'tensorflow': 242 | results = inspect_by_tensorflow(args.image_to_inspect, args.mean, args.model, args.label, 243 | args.network, args.resize_mode, int(args.channels), args.gpu) 244 | else: 245 | print 'Unknown Framework' 246 | sys.exit() 247 | print "{rank:<5}:{name:<40} {score}".format(rank='Rank', name='Name', score='Score') 248 | print "----------------------------------------------------" 249 | for result in results: 250 | print "{rank:<5}:{name:<40} {score}".format(**result) 251 | -------------------------------------------------------------------------------- /examples/prediction/README.md: -------------------------------------------------------------------------------- 1 | DEEPstationの学習結果を用いて文章の自動生成を行う 2 | ======================================= 3 | 4 | 学習済モデルを利用して、コマンドラインから文章の自動生成(prediction)を行うサンプルです。 5 | 6 | 必要なファイルの入手 7 | ----------------- 8 | 9 | * DEEPStationの学習済みModelの画面を表示する 10 | * 調査に利用したいEpochを選択する。 11 | * `Download Model`ボタンを押下し、学習済モデルをダウンロードする。 12 | * `Download Vocab File`ボタンを押下し、学習に使用した`Vocab.bin`をダウンロードする。 13 | * `Networkタブ`で、学習に使用したNetworkをコピーし、任意のファイル名で保存する。 14 | この時、ファイルは`.py`で終わる必要がある。 15 | 16 | predict.pyの実行 17 | ---------------- 18 | 19 | * 前項で用意した4つのファイルを任意のディレクトリに配置する。 20 | * 下記のようなディレクトリ構成を想定 21 | 22 | predict 23 | ├── network.py # network 24 | ├── predict.py 25 | ├── vocab.bin 26 | └── trained_model # 学習済みモデル 27 | 28 | * `predict.py`のあるディレクトリで下記を実行。 29 | 30 | $python predict.py --model trained_model --vocabulary vocab.bin --network network.py --unit 256 --primetext 日本経済 --length 1000 --seed 354 31 | 32 | * コマンドの引数に関しての詳細は `$python predict.py --help`を参照されたし。 33 | 34 | -------------------------------------------------------------------------------- /examples/prediction/predict.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Sample script of recurrent neural network language model for generating text 5 | This code is ported from following implementation. 6 | https://github.com/longjie/chainer-char-rnn/blob/master/sample.py 7 | """ 8 | 9 | import time 10 | import math 11 | import sys 12 | import argparse 13 | import cPickle as pickle 14 | 15 | import os 16 | import re 17 | import imp 18 | 19 | import random 20 | import bisect 21 | 22 | import numpy as np 23 | from chainer import cuda, Variable 24 | import chainer.functions as F 25 | import chainer.links as L 26 | from chainer import optimizers 27 | from chainer import serializers 28 | 29 | import codecs 30 | 31 | #%% arguments 32 | parser = argparse.ArgumentParser() 33 | 34 | parser.add_argument('--model', '-m',type=str, required=True, 35 | help='model data, saved by train_ptb.py') 36 | parser.add_argument('--vocabulary','-v',type=str, required=True, 37 | help='vocabulary data, saved by train_ptb.py') 38 | parser.add_argument('--network', '-n', type=str, required=True, 39 | help='Path to the network model file') 40 | parser.add_argument('--primetext', '-p', type=str, default='', 41 | help='base text data, used for text generation') 42 | parser.add_argument('--seed', '-s', type=int, default=123, 43 | help='random seeds for text generation') 44 | parser.add_argument('--unit', '-u', type=int, default=650, 45 | help='number of units') 46 | parser.add_argument('--dropout', type=float, default=0.0, 47 | help='dropout_ratio for the network') 48 | parser.add_argument('--sample', type=int, default=1, 49 | help='negative value indicates NOT use random choice') 50 | parser.add_argument('--length', type=int, default=20, 51 | help='length of the generated text') 52 | parser.add_argument('--gpu', type=int, default=-1, 53 | help='GPU ID (negative value indicates CPU)') 54 | 55 | args = parser.parse_args() 56 | 57 | np.random.seed(args.seed) 58 | 59 | def load_module(dir_name, symbol): 60 | (file, path, description) = imp.find_module(symbol, [dir_name]) 61 | return imp.load_module(symbol, file, path, description) 62 | 63 | # load vocabulary 64 | vocab = pickle.load(open(args.vocabulary, 'rb')) 65 | ivocab = {} 66 | for c, i in vocab.items(): 67 | ivocab[i] = c 68 | 69 | n_units = args.unit 70 | 71 | network = args.network.split(os.sep)[-1] 72 | model_name = re.sub(r"\.py$", "", network) 73 | model_module = load_module(os.path.dirname(args.network), model_name) 74 | lm = model_module.Network(len(vocab), n_units, dropout_ratio=args.dropout, train=False) 75 | 76 | model = L.Classifier(lm) 77 | model.compute_accuracy = False # we only want the perplexity 78 | for param in model.params(): 79 | data = param.data 80 | data[:] = np.random.uniform(-0.1, 0.1, data.shape) 81 | 82 | serializers.load_npz(args.model, model) 83 | 84 | if args.gpu >= 0: 85 | cuda.init() 86 | model.to_gpu() 87 | 88 | model.predictor.reset_state() # initialize state 89 | 90 | global prev_char 91 | 92 | prev_char = np.array([0]) 93 | if args.gpu >= 0: 94 | prev_char = cuda.to_gpu(prev_char) 95 | 96 | sys.stdout = codecs.getwriter('utf_8')(sys.stdout) 97 | 98 | if len(args.primetext) > 0: 99 | for i in unicode(args.primetext, 'utf-8'): 100 | sys.stdout.write(i) 101 | prev_char = Variable(np.ones((1,)).astype(np.int32) * vocab[i]) 102 | if args.gpu >= 0: 103 | prev_char = cuda.to_gpu(prev_char) 104 | 105 | prob = model.predictor.predict(prev_char) 106 | 107 | for i in xrange(args.length): 108 | prob = model.predictor.predict(prev_char) 109 | 110 | if args.sample > 0: 111 | probability = cuda.to_cpu(prob.data)[0].astype(np.float64) 112 | probability /= np.sum(probability) 113 | index = np.random.choice(range(len(probability)), p=probability) 114 | else: 115 | index = np.argmax(cuda.to_cpu(prob.data)) 116 | 117 | sys.stdout.write(ivocab[index]) 118 | 119 | prev_char = Variable(np.ones((1,)).astype(np.int32) * vocab[ivocab[index]]) 120 | if args.gpu >= 0: 121 | prev_char = cuda.to_gpu(prev_char) 122 | 123 | -------------------------------------------------------------------------------- /inspection_temp/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/inspection_temp/.gitkeep -------------------------------------------------------------------------------- /logs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/logs/.gitkeep -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | print "`python main.py` will be deprecated. Please use `./run.sh` instead." 3 | cmd = "./run.sh" 4 | subprocess.call(cmd, shell=True) 5 | -------------------------------------------------------------------------------- /prepared_data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/prepared_data/.gitkeep -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==0.10.1 2 | Flask-SQLAlchemy==2.1 3 | Cython>=0.24 4 | chainer>=1.8.1 5 | nkf>=0.2.0 6 | pillow>=3.2.0 7 | numpy>=1.11.0 8 | scipy>=0.15.1 9 | h5py==2.6.0 10 | chardet==2.3.0 11 | gevent==1.1.2 12 | greenlet==0.4.10 13 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ -z "${CSLAIER_CONFIG+x}" ] ; then 3 | export CSLAIER_CONFIG=$(pwd)/cslaier.cfg 4 | fi 5 | 6 | OPT=${1:-0} 7 | 8 | if [ $OPT = '-profiler' ]; then 9 | python $(pwd)/src/profiler.py 10 | else 11 | python $(pwd)/src/main.py 12 | fi 13 | -------------------------------------------------------------------------------- /scheme/cslaier.sql: -------------------------------------------------------------------------------- 1 | create table if not exists Model( 2 | id integer primary key AUTOINCREMENT, 3 | name text unique not null, 4 | epoch integer Default 1, 5 | algorithm text, 6 | network_name text, 7 | is_trained integer check(is_trained = 0 or is_trained = 1 or is_trained = 2) Default 0, 8 | network_path text, 9 | trained_model_path text, 10 | graph_data_path text, 11 | line_graph_data_path text, 12 | dataset_id integer, 13 | prepared_file_path integer, 14 | updated_at timestamp, 15 | created_at timestamp default current_timestamp, 16 | pid integer Default null, 17 | resize_mode text, 18 | channels integer Default 3, 19 | type text, 20 | use_wakatigaki integer default 0, 21 | framework text, 22 | gpu integer, 23 | batchsize integer 24 | ); 25 | 26 | create table if not exists Dataset( 27 | id integer primary key AUTOINCREMENT, 28 | name text unique not null, 29 | dataset_path text, 30 | updated_at timestamp, 31 | created_at timestamp default current_timestamp, 32 | type text, 33 | category_num int, 34 | file_num int 35 | ); 36 | -------------------------------------------------------------------------------- /scheme/migration_00.sql: -------------------------------------------------------------------------------- 1 | create table Model( 2 | id integer primary key AUTOINCREMENT, 3 | name text unique not null, 4 | epoch integer Default 1, 5 | algorithm text, 6 | network_name text, 7 | is_trained integer check(is_trained = 0 or is_trained = 1 or is_trained = 2) Default 0, 8 | network_path text, 9 | trained_model_path text, 10 | graph_data_path text, 11 | line_graph_data_path text, 12 | dataset_id integer, 13 | prepared_file_path integer, 14 | created_at timestamp default current_timestamp 15 | ); 16 | 17 | create table Dataset( 18 | id integer primary key AUTOINCREMENT, 19 | name text unique not null, 20 | dataset_path text, 21 | updated_at timestamp, 22 | created_at timestamp default current_timestamp 23 | ); 24 | -------------------------------------------------------------------------------- /scheme/migration_20160208.sql: -------------------------------------------------------------------------------- 1 | alter table Model add column pid integer default null; -------------------------------------------------------------------------------- /scheme/migration_20160209.sql: -------------------------------------------------------------------------------- 1 | alter table Model add column resize_mode text; 2 | alter table Model add column channels integer Default 3; 3 | -------------------------------------------------------------------------------- /scheme/migration_20160314.sql: -------------------------------------------------------------------------------- 1 | alter table Model add column type text default 'image'; 2 | alter table Model add column use_wakatigaki integer default 0; 3 | alter table Dataset add column type text default 'image'; -------------------------------------------------------------------------------- /scheme/migration_20160513.sql: -------------------------------------------------------------------------------- 1 | alter table Model add column updated_at timestamp; 2 | update Model set updated_at = datetime('now'); 3 | alter table Dataset add column category_num int; 4 | alter table Dataset add column file_num int; 5 | -------------------------------------------------------------------------------- /scheme/migration_20160617.sql: -------------------------------------------------------------------------------- 1 | alter table Model add column framework text; 2 | update Model set framework = 'chainer'; 3 | alter table Model add column gpu integer; 4 | alter table Model add column batchsize integer; -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | touch cslaier.db 3 | sqlite3 cslaier.db < $(pwd)/scheme/cslaier.sql -------------------------------------------------------------------------------- /src/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/common/__init__.py -------------------------------------------------------------------------------- /src/common/strings.py: -------------------------------------------------------------------------------- 1 | EPOCH_FILE_UNDER_TRAINING_ERROR = 'Selected epoch is currently under training. Could not use for inspection. Please wait for a while.' -------------------------------------------------------------------------------- /src/common/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import sys 3 | import os 4 | import random 5 | import datetime 6 | import pkg_resources 7 | import subprocess 8 | from .nvidia_devices_info import get_devices_info 9 | from xml.etree import ElementTree 10 | 11 | import nkf 12 | 13 | 14 | def get_python_version(): 15 | v = sys.version_info 16 | return str(v[0]) + '.' + str(v[1]) + '.' + str(v[2]) 17 | 18 | 19 | def get_chainer_version(): 20 | try: 21 | from chainer import __version__ as version 22 | except ImportError: 23 | return '---' 24 | return version 25 | 26 | 27 | def get_tensorflow_version(): 28 | try: 29 | from tensorflow import __version__ as version 30 | except ImportError: 31 | return '---' 32 | return version 33 | 34 | 35 | def get_disk_info(): 36 | try: 37 | df = subprocess.check_output(['df', '-h']) 38 | except: 39 | return None 40 | disks = df[:-1].split('\n') 41 | titles = disks[0].split() 42 | filesystem_index = None 43 | mounted_on_index = None 44 | for i, title in enumerate(titles): 45 | if title.startswith('Filesystem'): 46 | filesystem_index = i 47 | elif title.startswith('Mounted'): 48 | mounted_on_index = i 49 | disk_info = [] 50 | for disk in disks: 51 | row = disk.split() 52 | if row[filesystem_index].startswith('/'): 53 | st = os.statvfs(row[mounted_on_index]) 54 | disk_info.append({ 55 | 'mount': row[mounted_on_index], 56 | 'size': calculate_human_readable_filesize(st.f_frsize * st.f_blocks), 57 | 'used': calculate_human_readable_filesize(st.f_frsize * (st.f_blocks-st.f_bfree)), 58 | 'avail': calculate_human_readable_filesize(st.f_frsize * st.f_favail) 59 | }) 60 | return disk_info 61 | 62 | 63 | def get_gpu_info(nvidia_smi_cmd='nvidia-smi'): 64 | # return get_devices_info() 65 | try: 66 | xml = subprocess.check_output([nvidia_smi_cmd, '-q', '-x']) 67 | except: 68 | return None 69 | ret = {} 70 | elem = ElementTree.fromstring(xml) 71 | ret['driver_version'] = elem.find('driver_version').text 72 | gpus = elem.findall('gpu') 73 | ret_gpus = [] 74 | for g in gpus: 75 | info = { 76 | 'product_name': g.find('product_name').text, 77 | 'uuid': g.find('uuid').text, 78 | 'fan': g.find('fan_speed').text, 79 | 'minor_number': g.find('minor_number').text 80 | } 81 | temperature = g.find('temperature') 82 | info['temperature'] = temperature.find('gpu_temp').text 83 | power = g.find('power_readings') 84 | info['power_draw'] = power.find('power_draw').text 85 | info['power_limit'] = power.find('power_limit').text 86 | memory = g.find('fb_memory_usage') 87 | info['memory_total'] = memory.find('total').text 88 | info['memory_used'] = memory.find('used').text 89 | utilization = g.find('utilization') 90 | info['gpu_util'] = utilization.find('gpu_util').text 91 | ret_gpus.append(info) 92 | ret_gpus.sort(cmp=lambda x, y: cmp(int(x['minor_number']), int(y['minor_number']))) 93 | ret['gpus'] = ret_gpus 94 | return ret 95 | 96 | 97 | def get_system_info(): 98 | return { 99 | 'python_version': get_python_version(), 100 | 'chainer_version': get_chainer_version(), 101 | 'tensorflow_version': get_tensorflow_version(), 102 | 'disk_info': get_disk_info(), 103 | 'gpu_info': get_gpu_info() 104 | } 105 | 106 | 107 | def is_module_available(module_name): 108 | for dist in pkg_resources.working_set: 109 | if dist.project_name.lower().find(module_name.lower()) > -1: 110 | return True 111 | return False 112 | 113 | 114 | def get_timestamp(): 115 | return datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') 116 | 117 | 118 | def find_all_files(directory): 119 | for root, dirs, files in os.walk(directory): 120 | files.sort() 121 | for f in files: 122 | if f.startswith('__MACOSX') or f.startswith('.DS_Store'): 123 | continue 124 | yield os.path.join(root, f) 125 | 126 | 127 | def find_all_directories(directory): 128 | for root, dirs, files in os.walk(directory): 129 | dirs.sort() 130 | if len(dirs) == 0: 131 | yield root 132 | 133 | 134 | def count_categories(path): 135 | ch = os.listdir(path) 136 | count = 0 137 | if len(ch) is 1: 138 | if os.path.isdir(path + os.sep + ch[0]): 139 | count += count_categories(path + os.sep + ch[0]) 140 | else: 141 | for c in ch: 142 | if os.path.isdir(path + os.sep + c): 143 | count += 1 144 | return count 145 | 146 | 147 | def get_file_size_all(path): 148 | size = 0 149 | for f in find_all_files(path): 150 | size += os.path.getsize(f) 151 | return size 152 | 153 | 154 | def calculate_human_readable_filesize(byte): 155 | if byte / 1024 < 1: 156 | return str(byte) + 'bytes' 157 | elif byte / (1024 ** 2) < 1: 158 | return str(byte / 1024) + 'k bytes' 159 | elif byte / (1024 ** 3) < 1: 160 | return str(byte / (1024 ** 2)) + 'M bytes' 161 | else: 162 | return str(byte / (1024 ** 3)) + 'G Bytes' 163 | 164 | 165 | def count_files(path): 166 | ch = os.listdir(path) 167 | counter = 0 168 | for c in ch: 169 | if os.path.isdir(path + os.sep + c): 170 | counter += count_files(path + os.sep + c) 171 | else: 172 | counter += 1 173 | return counter 174 | 175 | 176 | def get_files_in_random_order(path, num): 177 | """ 178 | path配下の画像をランダムでnum枚取り出す。 179 | path配下がディレクトリしか無い場合はさらに配下のディレクトリから 180 | """ 181 | children_files = [] 182 | for cf in os.listdir(path): 183 | if os.path.isdir(path + os.sep + cf): 184 | if len(os.listdir(path + os.sep + cf)) != 0: 185 | children_files.append(cf) 186 | else: 187 | children_files.append(cf) 188 | children_files_num = len(children_files) 189 | if children_files_num is 0: 190 | return [] 191 | elif children_files_num is 1: 192 | if os.path.isdir(path + os.sep + children_files[0]): 193 | path = path + os.sep + children_files[0] 194 | temp_file_num = len(os.listdir(path)) 195 | if temp_file_num < num: 196 | num = temp_file_num 197 | else: 198 | num = 1 199 | elif children_files_num < num: 200 | num = children_files_num 201 | files = [] 202 | candidates = random.sample(map(lambda n: path + os.sep + n, os.listdir(path)), num) 203 | for f in candidates: 204 | if os.path.isdir(f): 205 | files.extend(get_files_in_random_order(f, 1)) 206 | else: 207 | files.append(f) 208 | return files 209 | 210 | 211 | def get_texts_in_random_order(path, num, character_num=-1): 212 | files = get_files_in_random_order(path, num) 213 | ret = [] 214 | for f in files: 215 | if os.path.exists(f): 216 | ret.append(get_text_sample(f, character_num)) 217 | return ret 218 | 219 | 220 | def get_images_in_random_order(path, num): 221 | files = get_files_in_random_order(path, num) 222 | ret = [] 223 | for f in files: 224 | (name, ext) = os.path.splitext(f) 225 | ext = ext.lower() 226 | if ext in ('.png', '.jpg', '.jpeg', 'gif'): 227 | ret.append(f) 228 | return ret 229 | 230 | 231 | def get_text_sample(path, character_num=-1): 232 | raw_text = open(path).read() 233 | encoding = nkf.guess(raw_text) 234 | text = raw_text.decode(encoding) 235 | if character_num > -1: 236 | return text[0:character_num] 237 | else: 238 | return text 239 | -------------------------------------------------------------------------------- /src/db_models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/db_models/__init__.py -------------------------------------------------------------------------------- /src/db_models/shared_models.py: -------------------------------------------------------------------------------- 1 | from flask_sqlalchemy import SQLAlchemy 2 | db = SQLAlchemy() 3 | -------------------------------------------------------------------------------- /src/deeplearning/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/deeplearning/__init__.py -------------------------------------------------------------------------------- /src/deeplearning/log_subscriber.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import json 3 | from gevent.subprocess import Popen, PIPE 4 | import gevent 5 | 6 | 7 | ''' 8 | TODO: ただのDispatcherなのでログの種類に関心を持たないほうが良さげ。 9 | もしログの種類が増える場合はこの定数やTailFDispatcherの初期化引数を増やして対応するのではなく 10 | 呼び出し側がDictionaryを渡すようにするなどの方法を検討してください。 11 | ''' 12 | TRAIN_LOG = 'log' 13 | LINE_GRAPH = 'graph' 14 | 15 | 16 | def wrap_log_type(row, log_type): 17 | return json.dumps({'type': log_type, 'data': row}) 18 | 19 | 20 | class TailFDispatcher(object): 21 | def __init__(self, train_log, line_graph): 22 | super(TailFDispatcher, self).__init__() 23 | self.train_log = train_log 24 | self.line_graph = line_graph 25 | self.processes = [] 26 | self.queues = [] 27 | if train_log: 28 | self.processes.append(gevent.spawn(self._tail, train_log, TRAIN_LOG)) 29 | if line_graph: 30 | self.processes.append(gevent.spawn(self._tail, line_graph, LINE_GRAPH)) 31 | 32 | self.processes.append(gevent.spawn(self._avoid_timeout)) 33 | 34 | def subscribe(self, queue): 35 | def notify(msg): 36 | queue.put(msg) 37 | 38 | with open(self.train_log) as fp: 39 | for row in fp: 40 | gevent.spawn(notify, wrap_log_type(row, TRAIN_LOG)) 41 | with open(self.line_graph) as fp: 42 | for row in fp: 43 | gevent.spawn(notify, wrap_log_type(row, LINE_GRAPH)) 44 | 45 | self.queues.append(queue) 46 | 47 | def unsubscribe(self, queue): 48 | self.queues.remove(queue) 49 | 50 | def terminate(self): 51 | for process in self.processes: 52 | process.kill() 53 | 54 | def notify(): 55 | for queue in self.queues[:]: 56 | queue.put(json.dumps({ 57 | 'type': 'end' 58 | })) 59 | 60 | gevent.spawn(notify) 61 | 62 | def _tail(self, file_path, log_type): 63 | def notify(msg): 64 | for queue in self.queues[:]: 65 | queue.put(msg) 66 | 67 | p = Popen(['tail', '-n', '0', '-f', file_path], stdout=PIPE) 68 | self.processes.append(p) 69 | while True: 70 | row = p.stdout.readline() 71 | gevent.spawn(notify, wrap_log_type(row, log_type)) 72 | 73 | def _avoid_timeout(self): 74 | def notify(): 75 | for queue in self.queues[:]: 76 | queue.put(None) 77 | 78 | while True: 79 | gevent.spawn(notify) 80 | gevent.sleep(45) 81 | 82 | 83 | class LogSubscriber(object): 84 | def __init__(self): 85 | super(LogSubscriber, self).__init__() 86 | self.tail_processes = {} 87 | 88 | def file_subscribe(self, model_id, train_log, line_graph): 89 | model_id = int(model_id) 90 | self.tail_processes[model_id] = TailFDispatcher(train_log, line_graph) 91 | 92 | def subscribe(self, model_id, queue): 93 | model_id = int(model_id) 94 | if model_id in self.tail_processes: 95 | self.tail_processes[model_id].subscribe(queue) 96 | 97 | def unsubscribe(self, model_id, queue): 98 | model_id = int(model_id) 99 | if model_id in self.tail_processes: 100 | self.tail_processes[model_id].unsubscribe(queue) 101 | 102 | def terminate_train(self, model_id): 103 | model_id = int(model_id) 104 | if model_id in self.tail_processes: 105 | self.tail_processes[model_id].terminate() 106 | 107 | 108 | train_logger = LogSubscriber() 109 | -------------------------------------------------------------------------------- /src/deeplearning/predict/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/deeplearning/predict/__init__.py -------------------------------------------------------------------------------- /src/deeplearning/predict/imagenet_inspect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import random 4 | import re 5 | import imp 6 | 7 | from PIL import Image 8 | import scipy.misc 9 | 10 | import cPickle as pickle 11 | 12 | from chainer import cuda 13 | from chainer import serializers 14 | try: 15 | import tensorflow as tf 16 | except ImportError: 17 | pass 18 | 19 | 20 | def load_module(target): 21 | network = target.split(os.sep)[-1] 22 | model_name = re.sub(r"\.py$", "", network) 23 | (file, path, description) = imp.find_module(model_name, [os.path.dirname(target)]) 24 | return imp.load_module(model_name, file, path, description) 25 | 26 | 27 | def read_image(path, height, width, resize_mode="squash", channels=3, flip=False): 28 | """ 29 | Load an image from disk 30 | 31 | Returns an np.ndarray (channels x width x height) 32 | 33 | Arguments: 34 | path -- path to an image on disk 35 | width -- resize dimension 36 | height -- resize dimension 37 | 38 | Keyword arguments: 39 | channels -- the PIL mode that the image should be converted to 40 | (3 for color or 1 for grayscale) 41 | resize_mode -- can be crop, squash, fill or half_crop 42 | flip -- flag for flipping 43 | """ 44 | if channels == 1: 45 | mode = "L" 46 | else: 47 | mode = "RGB" 48 | 49 | image = Image.open(path) 50 | image = image.convert(mode) 51 | image = np.array(image) 52 | 53 | # Resize 54 | interp = 'bilinear' 55 | 56 | width_ratio = float(image.shape[1]) / width 57 | height_ratio = float(image.shape[0]) / height 58 | if resize_mode == 'squash' or width_ratio == height_ratio: 59 | return scipy.misc.imresize(image, (height, width), interp=interp) 60 | elif resize_mode == 'crop': 61 | # resize to smallest of ratios (relatively larger image), keeping aspect ratio 62 | if width_ratio > height_ratio: 63 | resize_height = height 64 | resize_width = int(round(image.shape[1] / height_ratio)) 65 | else: 66 | resize_width = width 67 | resize_height = int(round(image.shape[0] / width_ratio)) 68 | image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp) 69 | 70 | # chop off ends of dimension that is still too long 71 | if width_ratio > height_ratio: 72 | start = int(round((resize_width-width)/2.0)) 73 | return image[:, start:start+width] 74 | else: 75 | start = int(round((resize_height-height)/2.0)) 76 | return image[start:start+height, :] 77 | else: 78 | if resize_mode == 'fill': 79 | # resize to biggest of ratios (relatively smaller image), keeping aspect ratio 80 | if width_ratio > height_ratio: 81 | resize_width = width 82 | resize_height = int(round(image.shape[0] / width_ratio)) 83 | if (height - resize_height) % 2 == 1: 84 | resize_height += 1 85 | else: 86 | resize_height = height 87 | resize_width = int(round(image.shape[1] / height_ratio)) 88 | if (width - resize_width) % 2 == 1: 89 | resize_width += 1 90 | image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp) 91 | elif resize_mode == 'half_crop': 92 | # resize to average ratio keeping aspect ratio 93 | new_ratio = (width_ratio + height_ratio) / 2.0 94 | resize_width = int(round(image.shape[1] / new_ratio)) 95 | resize_height = int(round(image.shape[0] / new_ratio)) 96 | if width_ratio > height_ratio and (height - resize_height) % 2 == 1: 97 | resize_height += 1 98 | elif width_ratio < height_ratio and (width - resize_width) % 2 == 1: 99 | resize_width += 1 100 | image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp) 101 | # chop off ends of dimension that is still too long 102 | if width_ratio > height_ratio: 103 | start = int(round((resize_width-width)/2.0)) 104 | image = image[:, start:start+width] 105 | else: 106 | start = int(round((resize_height-height)/2.0)) 107 | image = image[start:start+height, :] 108 | else: 109 | raise Exception('unrecognized resize_mode "%s"' % resize_mode) 110 | 111 | # fill ends of dimension that is too short with random noise 112 | if width_ratio > height_ratio: 113 | padding = int((height - resize_height)/2) 114 | noise_size = (padding, width) 115 | if channels > 1: 116 | noise_size += (channels,) 117 | noise = np.random.randint(0, 255, noise_size).astype('uint8') 118 | image = np.concatenate((noise, image, noise), axis=0) 119 | else: 120 | padding = int((width - resize_width)/2) 121 | noise_size = (height, padding) 122 | if channels > 1: 123 | noise_size += (channels,) 124 | noise = np.random.randint(0, 255, noise_size).astype('uint8') 125 | image = np.concatenate((noise, image, noise), axis=1) 126 | 127 | if flip and random.randint(0, 1) == 0: 128 | return np.fliplr(image) 129 | else: 130 | return image 131 | 132 | 133 | def inspect_by_chainer(image_path, mean, model_path, label, 134 | network_path, resize_mode, channels, gpu=0): 135 | model_module = load_module(network_path) 136 | model = model_module.Network() 137 | serializers.load_hdf5(model_path, model) 138 | if gpu >= 0: 139 | cuda.check_cuda_available() 140 | cuda.get_device(gpu).use() 141 | model.to_gpu() 142 | 143 | img = read_image(image_path, 256, 256, resize_mode, channels) 144 | 145 | cropwidth = 256 - model.insize 146 | top = left = cropwidth / 2 147 | bottom = model.insize + top 148 | right = model.insize + left 149 | if img.ndim == 3: 150 | img = img.transpose(2, 0, 1) 151 | img = img[:, top:bottom, left:right].astype(np.float32) 152 | else: 153 | img = img[top:bottom, left:right].astype(np.float32) 154 | zeros = np.zeros((model.insize, model.insize)) 155 | img = np.array([img, zeros, zeros]) 156 | mean_image = pickle.load(open(mean, 'rb')) 157 | img -= mean_image[:, top:bottom, left:right] 158 | img /= 255 159 | 160 | x = np.ndarray((1, 3, model.insize, model.insize), dtype=np.float32) 161 | x[0] = img 162 | 163 | if gpu >= 0: 164 | x = cuda.to_gpu(x) 165 | score = model.predict(x) 166 | score = cuda.to_cpu(score.data) 167 | categories = np.loadtxt(label, str, delimiter="\t") 168 | top_k = 20 169 | prediction = zip(score[0].tolist(), categories) 170 | prediction.sort(cmp=lambda x, y: cmp(x[0], y[0]), reverse=True) 171 | ret = [] 172 | for rank, (score, name) in enumerate(prediction[:top_k], start=1): 173 | ret.append({"rank": rank, "name": name, "score": "{0:4.1f}%".format(score*100)}) 174 | return ret 175 | 176 | 177 | def inspect_by_tensorflow(image_path, mean, model_path, label, 178 | network_path, resize_mode, channels, gpu=0): 179 | model_module = load_module(network_path) 180 | img = read_image(image_path, 128, 128, resize_mode, channels) 181 | if img.ndim == 3: 182 | img = img.astype(np.float32) 183 | else: 184 | zeros = np.zeros(128, 128) 185 | img = np.array([img, zeros, zeros]).astype(np.float32) 186 | feed_data = [] 187 | feed_data.append(img.astype(np.float32) / 255.0) 188 | 189 | images_placeholder = tf.placeholder(tf.float32, [None, 128, 128, 3]) 190 | keep_prob = tf.placeholder(tf.float32) 191 | 192 | logits = model_module.inference(images_placeholder, keep_prob) 193 | prediction_op = tf.nn.top_k(tf.nn.softmax(logits), k=20) 194 | 195 | with tf.Session() as sess: 196 | sess.run(tf.initialize_all_variables()) 197 | saver = tf.train.Saver() 198 | saver.restore(sess, model_path) 199 | (values, indeceis) = sess.run(prediction_op, feed_dict={ 200 | images_placeholder: feed_data, 201 | keep_prob: 1.0, 202 | }) 203 | 204 | categories = np.loadtxt(label, str, delimiter="\t") 205 | ret = [] 206 | for i, idx in enumerate(indeceis[0]): 207 | ret.append({ 208 | 'rank': i+1, 209 | 'name': categories[idx], 210 | 'score': "{:4.1f}%".format(values[0][i]*100) 211 | }) 212 | return ret 213 | -------------------------------------------------------------------------------- /src/deeplearning/predict/text_predict.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import re 4 | import imp 5 | import cPickle as pickle 6 | 7 | import numpy as np 8 | from chainer import cuda, Variable 9 | import chainer.functions as F 10 | import chainer.links as L 11 | from chainer import serializers 12 | 13 | 14 | def load_module(dir_name, symbol): 15 | (file, path, description) = imp.find_module(symbol, [dir_name]) 16 | return imp.load_module(symbol, file, path, description) 17 | 18 | 19 | def predict(model_path, vocab_path, network_path, primetext, 20 | seed, unit, dropout, sample, length, use_mecab=False): 21 | 22 | np.random.seed(seed) 23 | 24 | # load vocabulary 25 | vocab = pickle.load(open(vocab_path, 'rb')) 26 | ivocab = {} 27 | for c, i in vocab.items(): 28 | ivocab[i] = c 29 | n_units = unit 30 | 31 | network = network_path.split(os.sep)[-1] 32 | model_name = re.sub(r"\.py$", "", network) 33 | model_module = load_module(os.path.dirname(network_path), model_name) 34 | lm = model_module.Network(len(vocab), n_units, dropout_ratio=dropout, train=False) 35 | 36 | model = L.Classifier(lm) 37 | model.compute_accuracy = False # we only want the perplexity 38 | for param in model.params(): 39 | data = param.data 40 | data[:] = np.random.uniform(-0.1, 0.1, data.shape) 41 | 42 | serializers.load_npz(model_path, model) 43 | model.predictor.reset_state() # initialize state 44 | prev_char = np.array([0]) 45 | ret = [] 46 | if not isinstance(primetext, unicode): 47 | primetext = unicode(primetext, 'utf-8') 48 | if use_mecab: 49 | if len(primetext) > 0: 50 | if primetext not in vocab: 51 | prev_char = Variable(np.ones((1,)).astype(np.int32) * 0) 52 | else: 53 | prev_char = Variable(np.ones((1,)).astype(np.int32) * vocab[primetext]) 54 | prob = F.softmax(model.predictor(prev_char)) 55 | ret.append(primetext) 56 | else: 57 | if len(primetext) > 0: 58 | for i in primetext: 59 | ret.append(i) 60 | prev_char = Variable(np.ones((1,)).astype(np.int32) * vocab[i]) 61 | prob = model.predictor.predict(prev_char) 62 | 63 | for i in xrange(length): 64 | prob = model.predictor.predict(prev_char) 65 | 66 | if sample > 0: 67 | probability = cuda.to_cpu(prob.data)[0].astype(np.float64) 68 | probability /= np.sum(probability) 69 | index = np.random.choice(range(len(probability)), p=probability) 70 | else: 71 | index = np.argmax(cuda.to_cpu(prob.data)) 72 | 73 | if ivocab[index] == "": 74 | ret.append(".") 75 | else: 76 | ret.append(ivocab[index]) 77 | prev_char = Variable(np.ones((1,)).astype(np.int32) * vocab[ivocab[index]]) 78 | return "".join(ret) 79 | -------------------------------------------------------------------------------- /src/deeplearning/prepare/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/deeplearning/prepare/__init__.py -------------------------------------------------------------------------------- /src/deeplearning/prepare/prepare_for_imagenet.py: -------------------------------------------------------------------------------- 1 | # -*- encoding:utf-8 -*- 2 | import os 3 | from logging import getLogger 4 | 5 | import numpy 6 | import scipy 7 | import scipy.ndimage 8 | import cv2 9 | import cPickle as pickle 10 | from PIL import Image 11 | try: 12 | import tensorflow as tf 13 | except: 14 | pass 15 | 16 | import common.utils as ds_utils 17 | 18 | logger = getLogger(__name__) 19 | 20 | 21 | def do(model, prepared_data_root): 22 | if model.prepared_file_path: 23 | # re-use existing directory 24 | for f in os.listdir(model.prepared_file_path): 25 | os.remove(os.path.join(model.prepared_file_path, f)) 26 | else: 27 | model.prepared_file_path = os.path.join(prepared_data_root, ds_utils.get_timestamp()) 28 | os.mkdir(model.prepared_file_path) 29 | model.update_and_commit() 30 | logger.info('Start making training data.') 31 | if model.framework == 'chainer': 32 | train_image_num, val_image_num = make_train_data_for_chainer(model) 33 | compute_mean(model.prepared_file_path) 34 | else: 35 | train_image_num, val_image_num = make_train_data_for_tensorflow(model) 36 | logger.info('Finish making training data.') 37 | return model, train_image_num, val_image_num 38 | 39 | 40 | def get_image_num(dataset_path): 41 | class_no = 0 42 | 43 | train_images_counter = 0 44 | val_images_counter = 0 45 | for path, dirs, files in os.walk(dataset_path): 46 | if not dirs: 47 | length = len(files) 48 | for i, f in enumerate(files): 49 | (head, ext) = os.path.splitext(f) 50 | ext = ext.lower() 51 | if ext not in ['.jpg', 'jpeg', '.gif', '.png']: 52 | continue 53 | if os.path.getsize(os.path.join(path, f)) <= 0: 54 | continue 55 | if i < length * 0.75: 56 | train_images_counter += 1 57 | else: 58 | val_images_counter += 1 59 | class_no += 1 60 | return train_images_counter, val_images_counter 61 | 62 | 63 | def make_train_data_for_chainer(model): 64 | class_no = 0 65 | count = 0 66 | train_images_counter = 0 67 | val_images_counter = 0 68 | with open(os.path.join(model.prepared_file_path, 'train.txt'), 'w') as train_text, \ 69 | open(os.path.join(model.prepared_file_path, 'test.txt'), 'w') as test_text, \ 70 | open(os.path.join(model.prepared_file_path, 'labels.txt'), 'w') as labels_text: 71 | for path, dirs, files in os.walk(model.dataset.dataset_path): 72 | if not dirs: 73 | (head, tail) = os.path.split(path) 74 | labels_text.write(tail.encode('utf-8') + "\n") 75 | start_count = count 76 | length = len(files) 77 | for f in files: 78 | logger.info('Processing File: {0}' 79 | .format(os.path.join(path, f).encode('utf-8'))) 80 | (head, ext) = os.path.splitext(f) 81 | ext = ext.lower() 82 | if ext not in [".jpg", ".jpeg", ".gif", ".png"]: 83 | logger.info('Invalid ext. This file is skipped. {}' 84 | .format(os.path.join(path, f).encode('utf-8'))) 85 | continue 86 | if os.path.getsize(os.path.join(path, f)) <= 0: 87 | logger.info('File size is 0. This file is skipped. {}' 88 | .format(os.path.join(path, f).encode('utf-8'))) 89 | continue 90 | new_image_path = os.path.join(model.prepared_file_path, 91 | "image{0:0>7}.jpg".format(count)) 92 | resize_image(os.path.join(path, f), new_image_path, model) 93 | if count - start_count < length * 0.75: 94 | train_text.write("{0} {1:d}\n".format(new_image_path, class_no)) 95 | train_images_counter += 1 96 | else: 97 | test_text.write("{0} {1:d}\n".format(new_image_path, class_no)) 98 | val_images_counter += 1 99 | count += 1 100 | class_no += 1 101 | return train_images_counter, val_images_counter 102 | 103 | 104 | def make_train_data_for_tensorflow(model): 105 | with tf.python_io.TFRecordWriter(os.path.join(model.prepared_file_path, 'train.tfrecord')) as train_data, \ 106 | tf.python_io.TFRecordWriter(os.path.join(model.prepared_file_path, 'test.tfrecord')) as test_data, \ 107 | open(os.path.join(model.prepared_file_path, 'labels.txt'), 'w') as labels_text: 108 | class_no = 0 109 | train_images_counter = 0 110 | val_images_counter = 0 111 | for path, dirs, files in os.walk(model.dataset.dataset_path): 112 | if not dirs: 113 | (head, tail) = os.path.split(path) 114 | labels_text.write(tail.encode('utf-8') + '\n') 115 | length = len(files) 116 | for i, f in enumerate(files): 117 | logger.info('Processing File: {}'.format(os.path.join(path, f).encode('utf-8'))) 118 | (head, ext) = os.path.splitext(f) 119 | ext = ext.lower() 120 | if ext not in ['.jpg', 'jpeg', '.gif', '.png']: 121 | logger.info('Invalid ext. This file is skipped. {}' 122 | .format(os.path.join(path, f).encode('utf-8'))) 123 | continue 124 | if os.path.getsize(os.path.join(path, f)) <= 0: 125 | logger.info('File size is 0. This file is skipped. {}' 126 | .format(os.path.join(path, f).encode('utf-8'))) 127 | continue 128 | image = resize_image(os.path.join(path, f), None, model) 129 | image_raw = image.tostring() 130 | example = tf.train.Example(features=tf.train.Features(feature={ 131 | 'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_raw])), 132 | 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[class_no])) 133 | })) 134 | if i < length * 0.75: 135 | train_data.write(example.SerializeToString()) 136 | train_images_counter += 1 137 | else: 138 | test_data.write(example.SerializeToString()) 139 | val_images_counter += 1 140 | class_no += 1 141 | return train_images_counter, val_images_counter 142 | 143 | 144 | def resize_image(source, dest, model): 145 | if model.framework == 'chainer': 146 | output_side_length = 256 147 | elif model.framework == 'tensorflow': 148 | output_side_length = 128 149 | 150 | if model.channels == 1: 151 | mode = "L" 152 | else: 153 | mode = "RGB" 154 | 155 | image = Image.open(source) 156 | image = image.convert(mode) 157 | image = numpy.array(image) 158 | 159 | height = image.shape[0] 160 | width = image.shape[1] 161 | 162 | if model.resize_mode not in ['crop', 'squash', 'fill', 'half_crop']: 163 | raise ValueError('resize_mode "%s" not supported' % model.resize_mode) 164 | 165 | # Resize 166 | interp = 'bilinear' 167 | 168 | width_ratio = float(width) / output_side_length 169 | height_ratio = float(height) / output_side_length 170 | if model.resize_mode == 'squash' or width_ratio == height_ratio: 171 | image = scipy.misc.imresize(image, (output_side_length, output_side_length), interp=interp) 172 | elif model.resize_mode == 'crop': 173 | # resize to smallest of ratios (relatively larger image), keeping aspect ratio 174 | if width_ratio > height_ratio: 175 | resize_height = output_side_length 176 | resize_width = int(round(width / height_ratio)) 177 | else: 178 | resize_width = output_side_length 179 | resize_height = int(round(height / width_ratio)) 180 | image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp) 181 | 182 | # chop off ends of dimension that is still too long 183 | if width_ratio > height_ratio: 184 | start = int(round((resize_width-output_side_length)/2.0)) 185 | image = image[:, start:start+output_side_length] 186 | else: 187 | start = int(round((resize_height-output_side_length)/2.0)) 188 | image = image[start:start+output_side_length, :] 189 | else: 190 | if model.resize_mode == 'fill': 191 | # resize to biggest of ratios (relatively smaller image), keeping aspect ratio 192 | if width_ratio > height_ratio: 193 | resize_width = output_side_length 194 | resize_height = int(round(height / width_ratio)) 195 | if (output_side_length - resize_height) % 2 == 1: 196 | resize_height += 1 197 | else: 198 | resize_height = output_side_length 199 | resize_width = int(round(width / height_ratio)) 200 | if (output_side_length - resize_width) % 2 == 1: 201 | resize_width += 1 202 | image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp) 203 | elif model.resize_mode == 'half_crop': 204 | # resize to average ratio keeping aspect ratio 205 | new_ratio = (width_ratio + height_ratio) / 2.0 206 | resize_width = int(round(width / new_ratio)) 207 | resize_height = int(round(height / new_ratio)) 208 | if width_ratio > height_ratio and (output_side_length - resize_height) % 2 == 1: 209 | resize_height += 1 210 | elif width_ratio < height_ratio and (output_side_length - resize_width) % 2 == 1: 211 | resize_width += 1 212 | image = scipy.misc.imresize(image, (resize_height, resize_width), interp=interp) 213 | # chop off ends of dimension that is still too long 214 | if width_ratio > height_ratio: 215 | start = int(round((resize_width-output_side_length)/2.0)) 216 | image = image[:, start:start+output_side_length] 217 | else: 218 | start = int(round((resize_height-output_side_length)/2.0)) 219 | image = image[start:start+output_side_length, :] 220 | else: 221 | raise Exception('unrecognized resize_mode "%s"' % model.resize_mode) 222 | 223 | # fill ends of dimension that is too short with random noise 224 | if width_ratio > height_ratio: 225 | padding = int((output_side_length - resize_height)/2) 226 | noise_size = (padding, output_side_length) 227 | if model.channels > 1: 228 | noise_size += (model.channels,) 229 | noise = numpy.random.randint(0, 255, noise_size).astype('uint8') 230 | image = numpy.concatenate((noise, image, noise), axis=0) 231 | else: 232 | padding = int((output_side_length - resize_width)/2) 233 | noise_size = (output_side_length, padding) 234 | if model.channels > 1: 235 | noise_size += (model.channels,) 236 | noise = numpy.random.randint(0, 255, noise_size).astype('uint8') 237 | image = numpy.concatenate((noise, image, noise), axis=1) 238 | if dest is not None: 239 | cv2.imwrite(dest, image) 240 | return image 241 | 242 | 243 | def compute_mean(data_path): 244 | sum_image = None 245 | count = 0 246 | train_text = os.path.join(data_path, 'train.txt') 247 | for line in open(train_text): 248 | filepath = line.strip().split()[0] 249 | image = numpy.asarray(Image.open(filepath)) 250 | if image.ndim == 3: 251 | image = image.transpose(2, 0, 1) 252 | else: 253 | image_size, _ = image.shape 254 | zeros = numpy.zeros((image_size, image_size)) 255 | image = numpy.array([image, zeros, zeros]) 256 | if sum_image is None: 257 | sum_image = numpy.ndarray(image.shape, dtype=numpy.float32) 258 | sum_image[:] = image 259 | else: 260 | sum_image += image 261 | count += 1 262 | mean = sum_image / count 263 | pickle.dump(mean, open(os.path.join(data_path, 'mean.npy'), 'wb'), -1) 264 | -------------------------------------------------------------------------------- /src/deeplearning/prepare/prepare_for_lstm.py: -------------------------------------------------------------------------------- 1 | # -*- encoding:utf-8 -*- 2 | import os 3 | from logging import getLogger 4 | 5 | import nkf 6 | 7 | import common.utils as ds_utils 8 | 9 | logger = getLogger(__name__) 10 | 11 | 12 | def do( 13 | model, 14 | prepared_data_root, 15 | pretrained_model, 16 | use_wakatigaki 17 | ): 18 | logger.info('Start making LSTM training data.') 19 | if model.prepared_file_path: 20 | # re-use existing directory 21 | for f in os.listdir(model.prepared_file_path): 22 | os.remove(os.path.join(model.prepared_file_path, f)) 23 | else: 24 | model.prepared_file_path = os.path.join(prepared_data_root, ds_utils.get_timestamp()) 25 | os.mkdir(model.prepared_file_path) 26 | if pretrained_model != "-1": 27 | trained_model_path = model.trained_model_path 28 | if trained_model_path: 29 | pretrained_vocab = os.path.join(trained_model_path, 'vocab2.bin') 30 | if not os.path.exists(pretrained_vocab): 31 | logger.error("Could not find vocab2.bin file. It is possible that previsou train have failed: {0}" 32 | .format(pretrained_vocab)) 33 | raise Exception("Could not find vocab2.bin file. It is possible that previsou train have failed: ", 34 | pretrained_vocab) 35 | else: 36 | pretrained_vocab = '' 37 | else: 38 | pretrained_vocab = '' 39 | input_data_path = make_train_text(model, use_wakatigaki) 40 | model.update_and_commit() 41 | logger.info('Finish making LSTM training data.') 42 | return input_data_path, pretrained_vocab, model 43 | 44 | 45 | def make_train_text(model, use_wakatigaki): 46 | input_text = open(os.path.join(model.prepared_file_path, 'input.txt'), 'w') 47 | if use_wakatigaki: 48 | logger.info('Use wakatigaki option.') 49 | import MeCab 50 | none = None 51 | m = MeCab.Tagger("-Owakati") 52 | for f in ds_utils.find_all_files(model.dataset.dataset_path): 53 | raw_text = open(f, 'r').read() 54 | encoding = nkf.guess(raw_text) 55 | if encoding == 'BINARY': 56 | continue 57 | text = raw_text.decode(encoding, 'ignore') 58 | text = text.replace('\r', '') 59 | encoded_text = text.encode('UTF-8') 60 | lines = encoded_text.splitlines() 61 | for line in lines: 62 | result = m.parse(line) 63 | if isinstance(none, type(result)): 64 | continue 65 | input_text.write(result) 66 | input_text.flush() 67 | else: 68 | for f in ds_utils.find_all_files(model.dataset.dataset_path): 69 | temp_text = open(f, 'r').read() 70 | encoding = nkf.guess(temp_text) 71 | if encoding == 'BINARY': 72 | continue 73 | decoded_text = temp_text.decode(encoding, 'ignore') 74 | decoded_text = decoded_text.replace('\r', '') 75 | encoded_text = decoded_text.encode('UTF-8') 76 | input_text.write(encoded_text) 77 | input_text.flush() 78 | input_text.close() 79 | return os.path.join(model.prepared_file_path, 'input.txt') 80 | -------------------------------------------------------------------------------- /src/deeplearning/runner.py: -------------------------------------------------------------------------------- 1 | # -*- encoding:utf-8 -*- 2 | import os 3 | import logging 4 | from multiprocessing import Process, Event 5 | 6 | from db_models.datasets import Dataset 7 | from db_models.models import Model 8 | import deeplearning.prepare.prepare_for_imagenet 9 | import deeplearning.prepare.prepare_for_lstm 10 | import deeplearning.train.train_imagenet 11 | import deeplearning.train.train_lstm 12 | from deeplearning.log_subscriber import train_logger 13 | from time import sleep 14 | import gevent 15 | import re 16 | 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | INTERRUPTABLE_PROCESSES = {} 21 | 22 | 23 | class Interruptable(object): 24 | def __init__(self): 25 | super(Interruptable, self).__init__() 26 | self.interrupt_event = Event() 27 | self.interruptable_event = Event() 28 | self.end_event = Event() 29 | self.completion = None 30 | 31 | def wait_for_end(): 32 | while True: 33 | if self.end_event.is_set(): 34 | break 35 | gevent.sleep(1) 36 | if self.completion: 37 | self.completion() 38 | 39 | gevent.spawn(wait_for_end) 40 | 41 | def terminate(self): 42 | self.end_event.set() 43 | 44 | def set_interrupt(self): 45 | self.interrupt_event.set() 46 | 47 | def clear_interrupt(self): 48 | self.interrupt_event.set() 49 | 50 | def set_interruptable(self): 51 | self.interruptable_event.set() 52 | 53 | def clear_interruptable(self): 54 | self.interruptable_event.set() 55 | 56 | def is_interrupting(self): 57 | return self.interrupt_event.is_set() 58 | 59 | def is_interruptable(self): 60 | return self.interruptable_event.is_set() 61 | 62 | 63 | def _create_trained_model_dir(path, root_output_dir, model_name): 64 | if path is None: 65 | path = os.path.join(root_output_dir, model_name) 66 | if not os.path.exists(path): 67 | os.mkdir(path) 68 | return path 69 | 70 | 71 | # 学習の後片付け 72 | def _cleanup_for_train_terminate(model_id): 73 | train_logger.terminate_train(model_id) 74 | del INTERRUPTABLE_PROCESSES[model_id] 75 | 76 | 77 | def run_imagenet_train( 78 | prepared_data_root, 79 | output_dir_root, 80 | dataset_id, 81 | model_id, 82 | epoch, 83 | pretrained_model, 84 | gpu_num, 85 | resize_mode, 86 | channels, 87 | avoid_flipping, 88 | batchsize 89 | ): 90 | dataset = Dataset.query.get(dataset_id) 91 | model = Model.query.get(model_id) 92 | model.dataset = dataset 93 | model.epoch = epoch 94 | model.resize_mode = resize_mode 95 | model.channels = channels 96 | model.gpu = gpu_num 97 | model.batchsize = batchsize 98 | model.update_and_commit() 99 | model, train_image_num, val_image_num = deeplearning.prepare.prepare_for_imagenet.do(model, prepared_data_root) 100 | (model_dir, model_name) = os.path.split(model.network_path) 101 | model_name = re.sub(r"\.py$", "", model_name) 102 | trained_model_path = _create_trained_model_dir(model.trained_model_path, 103 | output_dir_root, model_name) 104 | model.trained_model_path = trained_model_path 105 | train_log = os.path.join(trained_model_path, 'train.log') 106 | graph = os.path.join(trained_model_path, 'line_graph.tsv') 107 | 108 | open(train_log, 'w').close() 109 | open(graph, 'w').close() 110 | train_logger.file_subscribe(model_id, train_log, graph) 111 | interruptable = Interruptable() 112 | # model.clean_old_models() 113 | if model.framework == 'chainer': 114 | train_process = Process( 115 | target=deeplearning.train.train_imagenet.do_train_by_chainer, 116 | args=( 117 | model, 118 | output_dir_root, 119 | 250, # val_batchsize 120 | 20, # loader_job 121 | pretrained_model, 122 | avoid_flipping, 123 | interruptable, 124 | ) 125 | ) 126 | elif model.framework == 'tensorflow': 127 | train_process = Process( 128 | target=deeplearning.train.train_imagenet.do_train_by_tensorflow, 129 | args=( 130 | model, 131 | output_dir_root, 132 | 500, # val_batchsize 133 | pretrained_model, 134 | train_image_num, 135 | val_image_num, 136 | avoid_flipping, 137 | False, # resume 138 | interruptable, 139 | ) 140 | ) 141 | else: 142 | raise Exception('Unknown framework') 143 | train_process.start() 144 | model.pid = train_process.pid 145 | model.update_and_commit() 146 | logging.info('start imagenet training. PID: ', model.pid) 147 | 148 | def completion(): 149 | _cleanup_for_train_terminate(model.id) 150 | 151 | interruptable.completion = completion 152 | 153 | INTERRUPTABLE_PROCESSES[model.id] = interruptable 154 | 155 | 156 | # 再現に必要な情報はモデルと、稼働させるGPUだけのはず。 157 | def resume_imagenet_train(output_dir_root, model, gpu_num): 158 | model.gpu = gpu_num 159 | model.update_and_commit() 160 | train_logger.file_subscribe(model.id, model.train_log_path, model.line_graph) 161 | interruptable = Interruptable() 162 | if model.framework == 'chainer': 163 | train_process = Process( 164 | target=deeplearning.train.train_imagenet.resume_train_by_chainer, 165 | args=( 166 | model, 167 | output_dir_root, 168 | 250, # val_batchsize 169 | 20, # loader_job 170 | interruptable, 171 | ) 172 | ) 173 | elif model.framework == 'tensorflow': 174 | train_image_num, val_image_num = deeplearning.prepare.prepare_for_imagenet.get_image_num( 175 | model.dataset.dataset_path) 176 | train_process = Process( 177 | target=deeplearning.train.train_imagenet.do_train_by_tensorflow, 178 | args=( 179 | model, 180 | output_dir_root, 181 | 500, # val_batchsize 182 | None, 183 | train_image_num, 184 | val_image_num, 185 | False, # not used 186 | True, # resume 187 | interruptable, 188 | ) 189 | ) 190 | else: 191 | raise Exception('Unknown framework') 192 | train_process.start() 193 | model.pid = train_process.pid 194 | model.update_and_commit() 195 | logging.info('start imagenet training. PID: ', model.pid) 196 | 197 | def completion(): 198 | _cleanup_for_train_terminate(model.id) 199 | 200 | interruptable.completion = completion 201 | 202 | INTERRUPTABLE_PROCESSES[model.id] = interruptable 203 | 204 | 205 | def run_lstm_train( 206 | prepared_data_root, 207 | output_dir_root, 208 | dataset_id, 209 | model_id, 210 | epoch, 211 | pretrained_model, 212 | gpu_num, 213 | use_wakatigaki, 214 | batchsize=50 215 | ): 216 | dataset = Dataset.query.get(dataset_id) 217 | model = Model.query.get(model_id) 218 | model.dataset = dataset 219 | model.epoch = epoch 220 | model.gpu = gpu_num 221 | model.enable_wakatigaki(use_wakatigaki) 222 | model.batchsize = batchsize 223 | # model.clean_old_models() 224 | (input_data_path, pretrained_vocab, model) = deeplearning.prepare.prepare_for_lstm.do( 225 | model, prepared_data_root, pretrained_model, use_wakatigaki) 226 | 227 | (model_dir, model_name) = os.path.split(model.network_path) 228 | model_name = re.sub(r"\.py$", "", model_name) 229 | trained_model_path = _create_trained_model_dir(model.trained_model_path, 230 | output_dir_root, model_name) 231 | model.trained_model_path = trained_model_path 232 | train_log = os.path.join(trained_model_path, 'train.log') 233 | graph = os.path.join(trained_model_path, 'line_graph.tsv') 234 | open(train_log, 'w').close() 235 | open(graph, 'w').close() 236 | train_logger.file_subscribe(model_id, train_log, graph) 237 | 238 | interruptable = Interruptable() 239 | train_process = Process( 240 | target=deeplearning.train.train_lstm.do_train, 241 | args=( 242 | model, 243 | output_dir_root, 244 | input_data_path, 245 | pretrained_vocab, 246 | use_wakatigaki, 247 | pretrained_model, 248 | None, # resume 249 | 128, # runsize 250 | 2e-3, # learning_rate 251 | 0.97, # learning_rate_decay 252 | 10, # learning_rate_decay_after 253 | 0.95, # decay_rate 254 | 0.0, # dropout 255 | 50, # seq_length 256 | batchsize, # batchsize 257 | 5, # grad_clip 258 | interruptable, 259 | ) 260 | ) 261 | train_process.start() 262 | model.pid = train_process.pid 263 | model.update_and_commit() 264 | logging.info('start LSTM training. PID: ', model.pid) 265 | 266 | def completion(): 267 | _cleanup_for_train_terminate(model.id) 268 | 269 | interruptable.completion = completion 270 | 271 | INTERRUPTABLE_PROCESSES[model.id] = interruptable 272 | 273 | 274 | def resume_lstm_train( 275 | prepared_data_root, 276 | output_dir_root, 277 | model, 278 | gpu_num, 279 | ): 280 | model.gpu = gpu_num 281 | (input_data_path, pretrained_vocab, model) = deeplearning.prepare.prepare_for_lstm.do( 282 | model, prepared_data_root, None, model.use_wakatigaki) 283 | 284 | train_logger.file_subscribe(model.id, model.train_log_path, model.line_graph) 285 | 286 | interruptable = Interruptable() 287 | train_process = Process( 288 | target=deeplearning.train.train_lstm.do_train, 289 | args=( 290 | model, 291 | output_dir_root, 292 | input_data_path, 293 | pretrained_vocab, 294 | model.use_wakatigaki, 295 | '', 296 | True, # resume 297 | 128, # runsize 298 | 2e-3, # learning_rate 299 | 0.97, # learning_rate_decay 300 | 10, # learning_rate_decay_after 301 | 0.95, # decay_rate 302 | 0.0, # dropout 303 | 50, # seq_length 304 | model.batchsize, # batchsize 305 | 5, # grad_clip 306 | interruptable, 307 | ) 308 | ) 309 | train_process.start() 310 | model.pid = train_process.pid 311 | model.update_and_commit() 312 | logging.info('start LSTM training. PID: ', model.pid) 313 | 314 | def completion(): 315 | _cleanup_for_train_terminate(model.id) 316 | 317 | interruptable.completion = completion 318 | INTERRUPTABLE_PROCESSES[model.id] = interruptable 319 | -------------------------------------------------------------------------------- /src/deeplearning/train/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/deeplearning/train/__init__.py -------------------------------------------------------------------------------- /src/deeplearning/train/utils.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import os 3 | 4 | 5 | def remove_resume_file(base_path): 6 | try: 7 | shutil.rmtree(os.path.join(base_path, 'resume')) 8 | except OSError: 9 | pass 10 | -------------------------------------------------------------------------------- /src/deeplearning/visualizer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import imp 4 | import re 5 | import math 6 | 7 | import chainer.links as L 8 | from chainer import serializers 9 | 10 | import matplotlib 11 | matplotlib.use('Agg') 12 | import matplotlib.pyplot as plt 13 | import matplotlib.ticker as ticker 14 | 15 | float32 = 0 16 | 17 | 18 | def load_module(dir_name, symbol): 19 | (file, path, description) = imp.find_module(symbol, [dir_name]) 20 | return imp.load_module(symbol, file, path, description) 21 | 22 | 23 | class LayerVisualizer: 24 | def __init__(self, network_path, trained_model_path, output_dir, **kwargs): 25 | model_name = re.sub(r"\.py$", "", os.path.basename(network_path)) 26 | model_module = load_module(os.path.dirname(network_path), model_name) 27 | if len(kwargs) == 0: 28 | self.model = model_module.Network() 29 | serializers.load_hdf5(trained_model_path, self.model) 30 | else: 31 | lm = model_module.Network(kwargs['vocab_len'], kwargs['n_units'], 32 | kwargs['dropout'], train=False) 33 | self.model = L.Classifier(lm) 34 | serializers.load_npz(trained_model_path, self.model) 35 | self.output_dir = output_dir 36 | 37 | def plot(self, W): 38 | dim = eval('(' + W.label + ')')[0] 39 | size = int(math.ceil(math.sqrt(dim[0]))) 40 | if len(dim) == 4: 41 | for i, channel in enumerate(W.data): 42 | ax = plt.subplot(size, size, i+1) 43 | ax.xaxis.set_major_locator(ticker.NullLocator()) 44 | ax.yaxis.set_major_locator(ticker.NullLocator()) 45 | accum = channel[0] 46 | for ch in channel: 47 | accum += ch 48 | accum /= len(channel) 49 | ax.imshow(accum) 50 | else: 51 | plt.imshow(W.data) 52 | 53 | def save_plot(self, W, name): 54 | plt.clf() 55 | fig = plt.figure() 56 | fig.suptitle(name + " " + W.label, fontweight='bold', color='#ffffff') 57 | self.plot(W) 58 | plt.draw() 59 | output_file_name = name.replace('/', '_') + '.png' 60 | plt.savefig(self.output_dir + os.sep + output_file_name, facecolor="#001100") 61 | return output_file_name 62 | 63 | def get_layer_list(self): 64 | layers = [] 65 | for layer in sorted(self.model.namedparams()): 66 | if layer[0].find("W") > -1: 67 | layers.append({"name": layer[0], "params": layer[1].label}) 68 | return layers 69 | 70 | def visualize_all(self): 71 | for layer in sorted(self.model.namedparams()): 72 | if layer[0].find("W") > -1: 73 | self.save_plot(layer[1], layer[0]) 74 | 75 | def visualize(self, layer_name): 76 | output_file_name = None 77 | for layer in sorted(self.model.namedparams()): 78 | if layer[0].find(layer_name) > -1: 79 | output_file_name = self.save_plot(layer[1], layer[0]) 80 | break 81 | return output_file_name 82 | 83 | 84 | if __name__ == "__main__": 85 | import argparse 86 | parser = argparse.ArgumentParser(description='visualize layer') 87 | parser.add_argument('network', help='Path to Network') 88 | parser.add_argument('trained_model', help='Path to trained model') 89 | parser.add_argument('output_dir', help="Path to output") 90 | args = parser.parse_args() 91 | 92 | v = LayerVisualizer(args.network, args.trained_model, args.output_dir) 93 | -------------------------------------------------------------------------------- /src/model_templates/alex.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # HINT:image 3 | import chainer 4 | import chainer.functions as F 5 | import chainer.links as L 6 | 7 | """ 8 | Alexnet: 9 | ImageNet Classification with Deep Convolutional Neural Networks 10 | http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf 11 | 12 | ############################## 13 | ## DO NOT CHANGE CLASS NAME ## 14 | ############################## 15 | """ 16 | 17 | 18 | class Network(chainer.Chain): 19 | 20 | """Single-GPU AlexNet without partition toward the channel axis.""" 21 | 22 | insize = 227 23 | 24 | def __init__(self): 25 | super(Network, self).__init__( 26 | conv1=L.Convolution2D(3, 96, 11, stride=4), 27 | conv2=L.Convolution2D(96, 256, 5, pad=2), 28 | conv3=L.Convolution2D(256, 384, 3, pad=1), 29 | conv4=L.Convolution2D(384, 384, 3, pad=1), 30 | conv5=L.Convolution2D(384, 256, 3, pad=1), 31 | fc6=L.Linear(9216, 4096), 32 | fc7=L.Linear(4096, 4096), 33 | fc8=L.Linear(4096, 1000), 34 | ) 35 | self.train = True 36 | 37 | def __call__(self, x, t): 38 | h = F.max_pooling_2d(F.relu( 39 | F.local_response_normalization(self.conv1(x))), 3, stride=2) 40 | h = F.max_pooling_2d(F.relu( 41 | F.local_response_normalization(self.conv2(h))), 3, stride=2) 42 | h = F.relu(self.conv3(h)) 43 | h = F.relu(self.conv4(h)) 44 | h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) 45 | h = F.dropout(F.relu(self.fc6(h)), train=self.train) 46 | h = F.dropout(F.relu(self.fc7(h)), train=self.train) 47 | h = self.fc8(h) 48 | 49 | self.loss = F.softmax_cross_entropy(h, t) 50 | self.accuracy = F.accuracy(h, t) 51 | return self.loss 52 | 53 | def predict(self, x_data): 54 | x = chainer.Variable(x_data, volatile=True) 55 | h = F.max_pooling_2d(F.relu( 56 | F.local_response_normalization(self.conv1(x))), 3, stride=2) 57 | h = F.max_pooling_2d(F.relu( 58 | F.local_response_normalization(self.conv2(h))), 3, stride=2) 59 | h = F.relu(self.conv3(h)) 60 | h = F.relu(self.conv4(h)) 61 | h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) 62 | h = F.relu(self.fc6(h)) 63 | h = F.relu(self.fc7(h)) 64 | h = self.fc8(h) 65 | return F.softmax(h) 66 | -------------------------------------------------------------------------------- /src/model_templates/alexbn.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # HINT:image 3 | import chainer 4 | import chainer.functions as F 5 | import chainer.links as L 6 | 7 | """ 8 | Alexnet: 9 | ImageNet Classification with Deep Convolutional Neural Networks 10 | http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf 11 | 12 | ############################## 13 | ## DO NOT CHANGE CLASS NAME ## 14 | ############################## 15 | """ 16 | 17 | 18 | class Network(chainer.Chain): 19 | 20 | """Single-GPU AlexNet with LRN layers replaced by BatchNormalization.""" 21 | 22 | insize = 227 23 | 24 | def __init__(self): 25 | super(Network, self).__init__( 26 | conv1=L.Convolution2D(3, 96, 11, stride=4), 27 | bn1=L.BatchNormalization(96), 28 | conv2=L.Convolution2D(96, 256, 5, pad=2), 29 | bn2=L.BatchNormalization(256), 30 | conv3=L.Convolution2D(256, 384, 3, pad=1), 31 | conv4=L.Convolution2D(384, 384, 3, pad=1), 32 | conv5=L.Convolution2D(384, 256, 3, pad=1), 33 | fc6=L.Linear(9216, 4096), 34 | fc7=L.Linear(4096, 4096), 35 | fc8=L.Linear(4096, 1000), 36 | ) 37 | self.train = True 38 | 39 | def __call__(self, x, t): 40 | h = self.bn1(self.conv1(x), test=not self.train) 41 | h = F.max_pooling_2d(F.relu(h), 3, stride=2) 42 | h = self.bn2(self.conv2(h), test=not self.train) 43 | h = F.max_pooling_2d(F.relu(h), 3, stride=2) 44 | h = F.relu(self.conv3(h)) 45 | h = F.relu(self.conv4(h)) 46 | h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) 47 | h = F.dropout(F.relu(self.fc6(h)), train=self.train) 48 | h = F.dropout(F.relu(self.fc7(h)), train=self.train) 49 | h = self.fc8(h) 50 | 51 | self.loss = F.softmax_cross_entropy(h, t) 52 | self.accuracy = F.accuracy(h, t) 53 | return self.loss 54 | 55 | def predict(self, x_data): 56 | x = chainer.Variable(x_data, volatile=True) 57 | h = self.bn1(self.conv1(x), test=True) 58 | h = F.max_pooling_2d(F.relu(h), 3, stride=2) 59 | h = self.bn2(self.conv2(h), test=True) 60 | h = F.max_pooling_2d(F.relu(h), 3, stride=2) 61 | h = F.relu(self.conv3(h)) 62 | h = F.relu(self.conv4(h)) 63 | h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) 64 | h = F.relu(self.fc6(h)) 65 | h = F.relu(self.fc7(h)) 66 | h = self.fc8(h) 67 | return F.softmax(h) 68 | -------------------------------------------------------------------------------- /src/model_templates/alexnet_tf.py: -------------------------------------------------------------------------------- 1 | # -*- encoding:utf-8 -*- 2 | # HINT:image 3 | import tensorflow as tf 4 | 5 | ''' 6 | Alexnet: 7 | ImageNet Classification with Deep Convolutional Neural Networks 8 | http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf 9 | ''' 10 | 11 | 12 | def inference(images, keep_prob): 13 | 14 | def weight_variable(shape): 15 | # conv : shape=[kernel_height, kernel_widht, network_input, network_output] 16 | # fc : shape = [network_input, network_output] 17 | initial = tf.truncated_normal(shape, dtype=tf.float32, stddev=1e-1) 18 | return tf.Variable(initial, name='weights') 19 | 20 | def conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME'): 21 | return tf.nn.conv2d(x, W, strides, padding=padding) 22 | 23 | def bias_variable(shape, conv): 24 | initial = tf.constant(0.0, shape=shape, dtype=tf.float32) 25 | biases = tf.Variable(initial, name='biases') 26 | bias = tf.nn.bias_add(conv, biases, 'NHWC') 27 | return bias 28 | 29 | def max_pool(input, name, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1]): 30 | return tf.nn.max_pool(input, ksize=ksize, strides=strides, 31 | padding='VALID', data_format='NHWC', name=name) 32 | 33 | # conv1 34 | with tf.name_scope('conv1') as scope: 35 | W_conv1 = weight_variable([11, 11, 3, 64]) 36 | c_conv1 = conv2d(images, W_conv1, strides=[1, 4, 4, 1], padding='VALID') 37 | b_conv1 = bias_variable([64], c_conv1) 38 | h_conv1 = tf.nn.relu(b_conv1, name=scope) 39 | 40 | # pool1 41 | with tf.name_scope('pool1') as scope: 42 | h_pool1 = max_pool(h_conv1, scope) 43 | 44 | # conv2 45 | with tf.name_scope('conv2') as scope: 46 | W_conv2 = weight_variable([5, 5, 64, 192]) 47 | c_conv2 = conv2d(h_pool1, W_conv2) 48 | b_conv2 = bias_variable([192], c_conv2) 49 | h_conv2 = tf.nn.relu(b_conv2, name=scope) 50 | 51 | # pool2 52 | with tf.name_scope('pool2') as scope: 53 | h_pool2 = max_pool(h_conv2, scope) 54 | 55 | # conv3 56 | with tf.name_scope('conv3') as scope: 57 | W_conv3 = weight_variable([3, 3, 192, 384]) 58 | c_conv3 = conv2d(h_pool2, W_conv3) 59 | b_conv3 = bias_variable([384], c_conv3) 60 | h_conv3 = tf.nn.relu(b_conv3, name=scope) 61 | 62 | # conv4 63 | with tf.name_scope('conv4') as scope: 64 | W_conv4 = weight_variable([3, 3, 384, 256]) 65 | c_conv4 = conv2d(h_conv3, W_conv4) 66 | b_conv4 = bias_variable([256], c_conv4) 67 | h_conv4 = tf.nn.relu(b_conv4, name=scope) 68 | 69 | # conv5 70 | with tf.name_scope('conv5') as scope: 71 | W_conv5 = weight_variable([3, 3, 256, 256]) 72 | c_conv5 = conv2d(h_conv4, W_conv5) 73 | b_conv5 = bias_variable([256], c_conv5) 74 | h_conv5 = tf.nn.relu(b_conv5, name=scope) 75 | 76 | # pool5 77 | with tf.name_scope('pool5') as scope: 78 | h_pool5 = max_pool(h_conv5, scope) 79 | 80 | # fc6 81 | with tf.name_scope('fc6') as scope: 82 | r_fc6 = tf.reshape(h_pool5, [-1, 256 * 2 * 2]) 83 | W_fc6 = weight_variable([256 * 2 * 2, 4096]) 84 | b_fc6 = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32), name='biases') 85 | h_fc6 = tf.nn.relu_layer(r_fc6, W_fc6, b_fc6, name=scope) 86 | h_fc6_dropout = tf.nn.dropout(h_fc6, keep_prob) 87 | 88 | # fc7 89 | with tf.name_scope('fc7') as scope: 90 | W_fc7 = weight_variable([4096, 4096]) 91 | b_fc7 = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32), name='biases') 92 | h_fc7 = tf.nn.relu_layer(h_fc6_dropout, W_fc7, b_fc7, name=scope) 93 | h_fc7_dropout = tf.nn.dropout(h_fc7, keep_prob) 94 | 95 | # fc8 96 | with tf.name_scope('fc8') as scope: 97 | W_fc8 = weight_variable([4096, 1000]) 98 | b_fc8 = tf.Variable(tf.constant(0.0, shape=[1000], dtype=tf.float32), name='biases') 99 | h_fc8 = tf.matmul(h_fc7_dropout, W_fc8) + b_fc8 100 | 101 | return h_fc8 102 | 103 | 104 | def loss(logits, labels): 105 | cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name='xentropy') 106 | loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') 107 | return loss 108 | 109 | 110 | def accuracy(logits, sparse_indecies): 111 | labels = tf.one_hot(sparse_indecies, 1000, 1, 0) 112 | correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)) 113 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 114 | return accuracy 115 | 116 | 117 | def training(loss, learning_rate): 118 | train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss) 119 | return train_step 120 | -------------------------------------------------------------------------------- /src/model_templates/googlenet.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # HINT:image 3 | import chainer 4 | import chainer.functions as F 5 | import chainer.links as L 6 | 7 | """ 8 | GoogLeNet: 9 | Going Deeper with Convolutions 10 | http://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf 11 | ############################## 12 | ## DO NOT CHANGE CLASS NAME ## 13 | ############################## 14 | """ 15 | 16 | 17 | class Network(chainer.Chain): 18 | 19 | insize = 224 20 | 21 | def __init__(self): 22 | super(Network, self).__init__( 23 | conv1=L.Convolution2D(3, 64, 7, stride=2, pad=3), 24 | conv2_reduce=L.Convolution2D(64, 64, 1), 25 | conv2=L.Convolution2D(64, 192, 3, stride=1, pad=1), 26 | inc3a=L.Inception(192, 64, 96, 128, 16, 32, 32), 27 | inc3b=L.Inception(256, 128, 128, 192, 32, 96, 64), 28 | inc4a=L.Inception(480, 192, 96, 208, 16, 48, 64), 29 | inc4b=L.Inception(512, 160, 112, 224, 24, 64, 64), 30 | inc4c=L.Inception(512, 128, 128, 256, 24, 64, 64), 31 | inc4d=L.Inception(512, 112, 144, 288, 32, 64, 64), 32 | inc4e=L.Inception(528, 256, 160, 320, 32, 128, 128), 33 | inc5a=L.Inception(832, 256, 160, 320, 32, 128, 128), 34 | inc5b=L.Inception(832, 384, 192, 384, 48, 128, 128), 35 | loss3_fc=L.Linear(1024, 1000), 36 | 37 | loss1_conv=L.Convolution2D(512, 128, 1), 38 | loss1_fc1=L.Linear(4 * 4 * 128, 1024), 39 | loss1_fc2=L.Linear(1024, 1000), 40 | 41 | loss2_conv=L.Convolution2D(528, 128, 1), 42 | loss2_fc1=L.Linear(4 * 4 * 128, 1024), 43 | loss2_fc2=L.Linear(1024, 1000) 44 | ) 45 | self.train = True 46 | 47 | def __call__(self, x, t): 48 | h = F.relu(self.conv1(x)) 49 | h = F.local_response_normalization( 50 | F.max_pooling_2d(h, 3, stride=2), n=5) 51 | h = F.relu(self.conv2_reduce(h)) 52 | h = F.relu(self.conv2(h)) 53 | h = F.max_pooling_2d( 54 | F.local_response_normalization(h, n=5), 3, stride=2) 55 | 56 | h = self.inc3a(h) 57 | h = self.inc3b(h) 58 | h = F.max_pooling_2d(h, 3, stride=2) 59 | h = self.inc4a(h) 60 | 61 | l = F.average_pooling_2d(h, 5, stride=3) 62 | l = F.relu(self.loss1_conv(l)) 63 | l = F.relu(self.loss1_fc1(l)) 64 | l = self.loss1_fc2(l) 65 | self.loss1 = F.softmax_cross_entropy(l, t) 66 | 67 | h = self.inc4b(h) 68 | h = self.inc4c(h) 69 | h = self.inc4d(h) 70 | 71 | l = F.average_pooling_2d(h, 5, stride=3) 72 | l = F.relu(self.loss2_conv(l)) 73 | l = F.relu(self.loss2_fc1(l)) 74 | l = self.loss2_fc2(l) 75 | self.loss2 = F.softmax_cross_entropy(l, t) 76 | 77 | h = self.inc4e(h) 78 | h = F.max_pooling_2d(h, 3, stride=2) 79 | h = self.inc5a(h) 80 | h = self.inc5b(h) 81 | 82 | h = F.average_pooling_2d(h, 7, stride=1) 83 | h = self.loss3_fc(F.dropout(h, 0.4, train=self.train)) 84 | self.loss3 = F.softmax_cross_entropy(h, t) 85 | 86 | self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3 87 | self.accuracy = F.accuracy(h, t) 88 | return self.loss 89 | 90 | def predict(self, x_data): 91 | x = chainer.Variable(x_data, volatile=True) 92 | h = F.relu(self.conv1(x)) 93 | h = F.local_response_normalization( 94 | F.max_pooling_2d(h, 3, stride=2), n=5) 95 | h = F.relu(self.conv2_reduce(h)) 96 | h = F.relu(self.conv2(h)) 97 | h = F.max_pooling_2d( 98 | F.local_response_normalization(h, n=5), 3, stride=2) 99 | 100 | h = self.inc3a(h) 101 | h = self.inc3b(h) 102 | h = F.max_pooling_2d(h, 3, stride=2) 103 | h = self.inc4a(h) 104 | 105 | l = F.average_pooling_2d(h, 5, stride=3) 106 | l = F.relu(self.loss1_conv(l)) 107 | l = F.relu(self.loss1_fc1(l)) 108 | l = self.loss1_fc2(l) 109 | 110 | h = self.inc4b(h) 111 | h = self.inc4c(h) 112 | h = self.inc4d(h) 113 | 114 | l = F.average_pooling_2d(h, 5, stride=3) 115 | l = F.relu(self.loss2_conv(l)) 116 | l = F.relu(self.loss2_fc1(l)) 117 | l = self.loss2_fc2(l) 118 | 119 | h = self.inc4e(h) 120 | h = F.max_pooling_2d(h, 3, stride=2) 121 | h = self.inc5a(h) 122 | h = self.inc5b(h) 123 | 124 | h = F.average_pooling_2d(h, 7, stride=1) 125 | h = self.loss3_fc(F.dropout(h, 0.4, train=False)) 126 | 127 | return F.softmax(h) -------------------------------------------------------------------------------- /src/model_templates/googlenetbn.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # HINT:image 3 | import chainer 4 | import chainer.functions as F 5 | import chainer.links as L 6 | 7 | """ 8 | GoogLeNet: 9 | Going Deeper with Convolutions 10 | http://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf 11 | ############################## 12 | ## DO NOT CHANGE CLASS NAME ## 13 | ############################## 14 | """ 15 | 16 | 17 | class Network(chainer.Chain): 18 | 19 | """New GoogLeNet of BatchNormalization version.""" 20 | 21 | insize = 224 22 | 23 | def __init__(self): 24 | super(Network, self).__init__( 25 | conv1=L.Convolution2D(3, 64, 7, stride=2, pad=3, nobias=True), 26 | norm1=L.BatchNormalization(64), 27 | conv2=L.Convolution2D(64, 192, 3, pad=1, nobias=True), 28 | norm2=L.BatchNormalization(192), 29 | inc3a=L.InceptionBN(192, 64, 64, 64, 64, 96, 'avg', 32), 30 | inc3b=L.InceptionBN(256, 64, 64, 96, 64, 96, 'avg', 64), 31 | inc3c=L.InceptionBN(320, 0, 128, 160, 64, 96, 'max', stride=2), 32 | inc4a=L.InceptionBN(576, 224, 64, 96, 96, 128, 'avg', 128), 33 | inc4b=L.InceptionBN(576, 192, 96, 128, 96, 128, 'avg', 128), 34 | inc4c=L.InceptionBN(576, 128, 128, 160, 128, 160, 'avg', 128), 35 | inc4d=L.InceptionBN(576, 64, 128, 192, 160, 192, 'avg', 128), 36 | inc4e=L.InceptionBN(576, 0, 128, 192, 192, 256, 'max', stride=2), 37 | inc5a=L.InceptionBN(1024, 352, 192, 320, 160, 224, 'avg', 128), 38 | inc5b=L.InceptionBN(1024, 352, 192, 320, 192, 224, 'max', 128), 39 | out=L.Linear(1024, 1000), 40 | 41 | conva=L.Convolution2D(576, 128, 1, nobias=True), 42 | norma=L.BatchNormalization(128), 43 | lina=L.Linear(2048, 1024, nobias=True), 44 | norma2=L.BatchNormalization(1024), 45 | outa=L.Linear(1024, 1000), 46 | 47 | convb=L.Convolution2D(576, 128, 1, nobias=True), 48 | normb=L.BatchNormalization(128), 49 | linb=L.Linear(2048, 1024, nobias=True), 50 | normb2=L.BatchNormalization(1024), 51 | outb=L.Linear(1024, 1000), 52 | ) 53 | self._train = True 54 | 55 | @property 56 | def train(self): 57 | return self._train 58 | 59 | @train.setter 60 | def train(self, value): 61 | self._train = value 62 | self.inc3a.train = value 63 | self.inc3b.train = value 64 | self.inc3c.train = value 65 | self.inc4a.train = value 66 | self.inc4b.train = value 67 | self.inc4c.train = value 68 | self.inc4d.train = value 69 | self.inc4e.train = value 70 | self.inc5a.train = value 71 | self.inc5b.train = value 72 | 73 | def __call__(self, x, t): 74 | test = not self.train 75 | 76 | h = F.max_pooling_2d( 77 | F.relu(self.norm1(self.conv1(x), test=test)), 3, stride=2, pad=1) 78 | h = F.max_pooling_2d( 79 | F.relu(self.norm2(self.conv2(h), test=test)), 3, stride=2, pad=1) 80 | 81 | h = self.inc3a(h) 82 | h = self.inc3b(h) 83 | h = self.inc3c(h) 84 | h = self.inc4a(h) 85 | 86 | a = F.average_pooling_2d(h, 5, stride=3) 87 | a = F.relu(self.norma(self.conva(a), test=test)) 88 | a = F.relu(self.norma2(self.lina(a), test=test)) 89 | a = self.outa(a) 90 | self.loss1 = F.softmax_cross_entropy(a, t) 91 | 92 | h = self.inc4b(h) 93 | h = self.inc4c(h) 94 | h = self.inc4d(h) 95 | 96 | b = F.average_pooling_2d(h, 5, stride=3) 97 | b = F.relu(self.normb(self.convb(b), test=test)) 98 | b = F.relu(self.normb2(self.linb(b), test=test)) 99 | b = self.outb(b) 100 | self.loss2 = F.softmax_cross_entropy(b, t) 101 | 102 | h = self.inc4e(h) 103 | h = self.inc5a(h) 104 | h = F.average_pooling_2d(self.inc5b(h), 7) 105 | h = self.out(h) 106 | self.loss3 = F.softmax_cross_entropy(h, t) 107 | 108 | self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3 109 | self.accuracy = F.accuracy(h, t) 110 | return self.loss 111 | 112 | def predict(self, x_data): 113 | x = chainer.Variable(x_data, volatile=True) 114 | self.train = False 115 | test = True 116 | 117 | h = F.max_pooling_2d( 118 | F.relu(self.norm1(self.conv1(x), test=test)), 3, stride=2, pad=1) 119 | h = F.max_pooling_2d( 120 | F.relu(self.norm2(self.conv2(h), test=test)), 3, stride=2, pad=1) 121 | 122 | h = self.inc3a(h) 123 | h = self.inc3b(h) 124 | h = self.inc3c(h) 125 | h = self.inc4a(h) 126 | 127 | a = F.average_pooling_2d(h, 5, stride=3) 128 | a = F.relu(self.norma(self.conva(a), test=test)) 129 | a = F.relu(self.norma2(self.lina(a), test=test)) 130 | a = self.outa(a) 131 | 132 | h = self.inc4b(h) 133 | h = self.inc4c(h) 134 | h = self.inc4d(h) 135 | 136 | b = F.average_pooling_2d(h, 5, stride=3) 137 | b = F.relu(self.normb(self.convb(b), test=test)) 138 | b = F.relu(self.normb2(self.linb(b), test=test)) 139 | b = self.outb(b) 140 | 141 | h = self.inc4e(h) 142 | h = self.inc5a(h) 143 | h = F.average_pooling_2d(self.inc5b(h), 7) 144 | h = self.out(h) 145 | 146 | return F.softmax(0.3 * (a + b) + h) 147 | -------------------------------------------------------------------------------- /src/model_templates/nin.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # HINT:image 3 | import math 4 | 5 | import chainer 6 | import chainer.functions as F 7 | import chainer.links as L 8 | 9 | """ 10 | Network in Network 11 | https://arxiv.org/abs/1312.4400 12 | 13 | ############################## 14 | ## DO NOT CHANGE CLASS NAME ## 15 | ############################## 16 | """ 17 | 18 | 19 | class Network(chainer.Chain): 20 | 21 | """Network-in-Network example model.""" 22 | 23 | insize = 227 24 | 25 | def __init__(self): 26 | w = math.sqrt(2) # MSRA scaling 27 | super(Network, self).__init__( 28 | mlpconv1=L.MLPConvolution2D( 29 | 3, (96, 96, 96), 11, stride=4, wscale=w), 30 | mlpconv2=L.MLPConvolution2D( 31 | 96, (256, 256, 256), 5, pad=2, wscale=w), 32 | mlpconv3=L.MLPConvolution2D( 33 | 256, (384, 384, 384), 3, pad=1, wscale=w), 34 | mlpconv4=L.MLPConvolution2D( 35 | 384, (1024, 1024, 1000), 3, pad=1, wscale=w), 36 | ) 37 | self.train = True 38 | 39 | def __call__(self, x, t): 40 | h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2) 41 | h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2) 42 | h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2) 43 | h = self.mlpconv4(F.dropout(h, train=self.train)) 44 | h = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 1000)) 45 | 46 | self.loss = F.softmax_cross_entropy(h, t) 47 | self.accuracy = F.accuracy(h, t) 48 | return self.loss 49 | 50 | def predict(self, x_data): 51 | x = chainer.Variable(x_data, volatile=True) 52 | h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2) 53 | h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2) 54 | h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2) 55 | h = self.mlpconv4(F.dropout(h, train=False)) 56 | h = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 1000)) 57 | return F.softmax(h) 58 | 59 | -------------------------------------------------------------------------------- /src/model_templates/uei_lstm.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # HINT:text 3 | import chainer 4 | import chainer.functions as F 5 | import chainer.links as L 6 | import numpy 7 | 8 | """ 9 | ############################## 10 | ## DO NOT CHANGE CLASS NAME ## 11 | ############################## 12 | """ 13 | 14 | 15 | class Network(chainer.Chain): 16 | def __init__(self, n_vocab, n_units, dropout_ratio=0.0, train=True): 17 | super(Network, self).__init__( 18 | embed=L.EmbedID(n_vocab, n_units), 19 | l1=L.LSTM(n_units, n_units), 20 | l2=L.LSTM(n_units, n_units), 21 | l3=L.LSTM(n_units, n_units), 22 | l4=L.LSTM(n_units, n_units), 23 | l5=L.LSTM(n_units, n_units), 24 | l6=L.Linear(n_units, n_vocab), 25 | ) 26 | 27 | self.train = train 28 | self.dropout_ratio = dropout_ratio 29 | 30 | def reset_state(self): 31 | self.l1.reset_state() 32 | self.l2.reset_state() 33 | self.l3.reset_state() 34 | self.l4.reset_state() 35 | self.l5.reset_state() 36 | 37 | def __call__(self, x): 38 | h0 = self.embed(x) 39 | h1 = self.l1(F.dropout(h0, ratio=self.dropout_ratio, train=self.train)) 40 | h2 = self.l2(F.dropout(h1, ratio=self.dropout_ratio, train=self.train)) 41 | h3 = self.l3(F.dropout(h2, ratio=self.dropout_ratio, train=self.train)) 42 | h4 = self.l4(F.dropout(h3, ratio=self.dropout_ratio, train=self.train)) 43 | h5 = self.l5(F.dropout(h4, ratio=self.dropout_ratio, train=self.train)) 44 | y = self.l6(F.dropout(h5, ratio=self.dropout_ratio, train=self.train)) 45 | 46 | return y 47 | 48 | def predict(self, x): 49 | h0 = self.embed(x) 50 | h1 = self.l1(F.dropout(h0, ratio=self.dropout_ratio, train=False)) 51 | h2 = self.l2(F.dropout(h1, ratio=self.dropout_ratio, train=False)) 52 | h3 = self.l3(F.dropout(h2, ratio=self.dropout_ratio, train=False)) 53 | h4 = self.l4(F.dropout(h3, ratio=self.dropout_ratio, train=False)) 54 | h5 = self.l5(F.dropout(h4, ratio=self.dropout_ratio, train=False)) 55 | y = self.l6(F.dropout(h5, ratio=self.dropout_ratio, train=False)) 56 | 57 | return F.softmax(y) 58 | 59 | def add_embed(self, add_counts, dimension): 60 | add_W = numpy.random.randn(add_counts, dimension).astype(numpy.float32) 61 | add_gW = numpy.empty((add_counts, dimension)).astype(numpy.float32) 62 | self.embed.W = numpy.r_[self.embed.W, add_W] 63 | self.embed.gW = numpy.r_[self.embed.gW, add_gW] 64 | -------------------------------------------------------------------------------- /src/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/models/__init__.py -------------------------------------------------------------------------------- /src/profiler.py: -------------------------------------------------------------------------------- 1 | from werkzeug.contrib.profiler import ProfilerMiddleware 2 | from main import app 3 | import os 4 | from gevent.wsgi import WSGIServer 5 | 6 | 7 | app.config.from_envvar('CSLAIER_CONFIG') 8 | cslaier_config_params = ('DATABASE_PATH', 'UPLOADED_RAW_FILE', 'UPLOADED_FILE', 'PREPARED_DATA', 9 | 'TRAINED_DATA', 'INSPECTION_TEMP', 'LOG_DIR') 10 | # WebApp settings 11 | app.config['CSLAIER_ROOT'] = os.getcwd() 12 | 13 | 14 | def normalize_config_path(): 15 | for param in cslaier_config_params: 16 | if not app.config[param].startswith('/'): 17 | app.config[param] = os.path.abspath(app.config['CSLAIER_ROOT'] + os.sep + app.config[param]) 18 | 19 | normalize_config_path() 20 | 21 | app.config['PROFILE'] = True 22 | app.wsgi_app = ProfilerMiddleware(app.wsgi_app) 23 | app.debug = app.config['DEBUG'] 24 | server = WSGIServer((app.config['HOST'], app.config['PORT']), app) 25 | server.serve_forever() 26 | -------------------------------------------------------------------------------- /src/static/CodeMirror/lib/codemirror.css: -------------------------------------------------------------------------------- 1 | /* BASICS */ 2 | 3 | .CodeMirror { 4 | /* Set height, width, borders, and global font properties here */ 5 | font-family: monospace; 6 | height: 300px; 7 | color: black; 8 | } 9 | 10 | /* PADDING */ 11 | 12 | .CodeMirror-lines { 13 | padding: 4px 0; /* Vertical padding around content */ 14 | } 15 | .CodeMirror pre { 16 | padding: 0 4px; /* Horizontal padding of content */ 17 | } 18 | 19 | .CodeMirror-scrollbar-filler, .CodeMirror-gutter-filler { 20 | background-color: white; /* The little square between H and V scrollbars */ 21 | } 22 | 23 | /* GUTTER */ 24 | 25 | .CodeMirror-gutters { 26 | border-right: 1px solid #ddd; 27 | background-color: #f7f7f7; 28 | white-space: nowrap; 29 | } 30 | .CodeMirror-linenumbers {} 31 | .CodeMirror-linenumber { 32 | padding: 0 3px 0 5px; 33 | min-width: 20px; 34 | text-align: right; 35 | color: #999; 36 | white-space: nowrap; 37 | } 38 | 39 | .CodeMirror-guttermarker { color: black; } 40 | .CodeMirror-guttermarker-subtle { color: #999; } 41 | 42 | /* CURSOR */ 43 | 44 | .CodeMirror-cursor { 45 | border-left: 1px solid black; 46 | border-right: none; 47 | width: 0; 48 | } 49 | /* Shown when moving in bi-directional text */ 50 | .CodeMirror div.CodeMirror-secondarycursor { 51 | border-left: 1px solid silver; 52 | } 53 | .cm-fat-cursor .CodeMirror-cursor { 54 | width: auto; 55 | border: 0; 56 | background: #7e7; 57 | } 58 | .cm-fat-cursor div.CodeMirror-cursors { 59 | z-index: 1; 60 | } 61 | 62 | .cm-animate-fat-cursor { 63 | width: auto; 64 | border: 0; 65 | -webkit-animation: blink 1.06s steps(1) infinite; 66 | -moz-animation: blink 1.06s steps(1) infinite; 67 | animation: blink 1.06s steps(1) infinite; 68 | background-color: #7e7; 69 | } 70 | @-moz-keyframes blink { 71 | 0% {} 72 | 50% { background-color: transparent; } 73 | 100% {} 74 | } 75 | @-webkit-keyframes blink { 76 | 0% {} 77 | 50% { background-color: transparent; } 78 | 100% {} 79 | } 80 | @keyframes blink { 81 | 0% {} 82 | 50% { background-color: transparent; } 83 | 100% {} 84 | } 85 | 86 | /* Can style cursor different in overwrite (non-insert) mode */ 87 | .CodeMirror-overwrite .CodeMirror-cursor {} 88 | 89 | .cm-tab { display: inline-block; text-decoration: inherit; } 90 | 91 | .CodeMirror-ruler { 92 | border-left: 1px solid #ccc; 93 | position: absolute; 94 | } 95 | 96 | /* DEFAULT THEME */ 97 | 98 | .cm-s-default .cm-header {color: blue;} 99 | .cm-s-default .cm-quote {color: #090;} 100 | .cm-negative {color: #d44;} 101 | .cm-positive {color: #292;} 102 | .cm-header, .cm-strong {font-weight: bold;} 103 | .cm-em {font-style: italic;} 104 | .cm-link {text-decoration: underline;} 105 | .cm-strikethrough {text-decoration: line-through;} 106 | 107 | .cm-s-default .cm-keyword {color: #708;} 108 | .cm-s-default .cm-atom {color: #219;} 109 | .cm-s-default .cm-number {color: #164;} 110 | .cm-s-default .cm-def {color: #00f;} 111 | .cm-s-default .cm-variable, 112 | .cm-s-default .cm-punctuation, 113 | .cm-s-default .cm-property, 114 | .cm-s-default .cm-operator {} 115 | .cm-s-default .cm-variable-2 {color: #05a;} 116 | .cm-s-default .cm-variable-3 {color: #085;} 117 | .cm-s-default .cm-comment {color: #a50;} 118 | .cm-s-default .cm-string {color: #a11;} 119 | .cm-s-default .cm-string-2 {color: #f50;} 120 | .cm-s-default .cm-meta {color: #555;} 121 | .cm-s-default .cm-qualifier {color: #555;} 122 | .cm-s-default .cm-builtin {color: #30a;} 123 | .cm-s-default .cm-bracket {color: #997;} 124 | .cm-s-default .cm-tag {color: #170;} 125 | .cm-s-default .cm-attribute {color: #00c;} 126 | .cm-s-default .cm-hr {color: #999;} 127 | .cm-s-default .cm-link {color: #00c;} 128 | 129 | .cm-s-default .cm-error {color: #f00;} 130 | .cm-invalidchar {color: #f00;} 131 | 132 | .CodeMirror-composing { border-bottom: 2px solid; } 133 | 134 | /* Default styles for common addons */ 135 | 136 | div.CodeMirror span.CodeMirror-matchingbracket {color: #0f0;} 137 | div.CodeMirror span.CodeMirror-nonmatchingbracket {color: #f22;} 138 | .CodeMirror-matchingtag { background: rgba(255, 150, 0, .3); } 139 | .CodeMirror-activeline-background {background: #e8f2ff;} 140 | 141 | /* STOP */ 142 | 143 | /* The rest of this file contains styles related to the mechanics of 144 | the editor. You probably shouldn't touch them. */ 145 | 146 | .CodeMirror { 147 | position: relative; 148 | overflow: hidden; 149 | background: white; 150 | } 151 | 152 | .CodeMirror-scroll { 153 | overflow: scroll !important; /* Things will break if this is overridden */ 154 | /* 30px is the magic margin used to hide the element's real scrollbars */ 155 | /* See overflow: hidden in .CodeMirror */ 156 | margin-bottom: -30px; margin-right: -30px; 157 | padding-bottom: 30px; 158 | height: 100%; 159 | outline: none; /* Prevent dragging from highlighting the element */ 160 | position: relative; 161 | } 162 | .CodeMirror-sizer { 163 | position: relative; 164 | border-right: 30px solid transparent; 165 | } 166 | 167 | /* The fake, visible scrollbars. Used to force redraw during scrolling 168 | before actuall scrolling happens, thus preventing shaking and 169 | flickering artifacts. */ 170 | .CodeMirror-vscrollbar, .CodeMirror-hscrollbar, .CodeMirror-scrollbar-filler, .CodeMirror-gutter-filler { 171 | position: absolute; 172 | z-index: 6; 173 | display: none; 174 | } 175 | .CodeMirror-vscrollbar { 176 | right: 0; top: 0; 177 | overflow-x: hidden; 178 | overflow-y: scroll; 179 | } 180 | .CodeMirror-hscrollbar { 181 | bottom: 0; left: 0; 182 | overflow-y: hidden; 183 | overflow-x: scroll; 184 | } 185 | .CodeMirror-scrollbar-filler { 186 | right: 0; bottom: 0; 187 | } 188 | .CodeMirror-gutter-filler { 189 | left: 0; bottom: 0; 190 | } 191 | 192 | .CodeMirror-gutters { 193 | position: absolute; left: 0; top: 0; 194 | z-index: 3; 195 | } 196 | .CodeMirror-gutter { 197 | white-space: normal; 198 | height: 100%; 199 | display: inline-block; 200 | margin-bottom: -30px; 201 | /* Hack to make IE7 behave */ 202 | *zoom:1; 203 | *display:inline; 204 | } 205 | .CodeMirror-gutter-wrapper { 206 | position: absolute; 207 | z-index: 4; 208 | background: none !important; 209 | border: none !important; 210 | } 211 | .CodeMirror-gutter-background { 212 | position: absolute; 213 | top: 0; bottom: 0; 214 | z-index: 4; 215 | } 216 | .CodeMirror-gutter-elt { 217 | position: absolute; 218 | cursor: default; 219 | z-index: 4; 220 | } 221 | .CodeMirror-gutter-wrapper { 222 | -webkit-user-select: none; 223 | -moz-user-select: none; 224 | user-select: none; 225 | } 226 | 227 | .CodeMirror-lines { 228 | cursor: text; 229 | min-height: 1px; /* prevents collapsing before first draw */ 230 | } 231 | .CodeMirror pre { 232 | /* Reset some styles that the rest of the page might have set */ 233 | -moz-border-radius: 0; -webkit-border-radius: 0; border-radius: 0; 234 | border-width: 0; 235 | background: transparent; 236 | font-family: inherit; 237 | font-size: inherit; 238 | margin: 0; 239 | white-space: pre; 240 | word-wrap: normal; 241 | line-height: inherit; 242 | color: inherit; 243 | z-index: 2; 244 | position: relative; 245 | overflow: visible; 246 | -webkit-tap-highlight-color: transparent; 247 | } 248 | .CodeMirror-wrap pre { 249 | word-wrap: break-word; 250 | white-space: pre-wrap; 251 | word-break: normal; 252 | } 253 | 254 | .CodeMirror-linebackground { 255 | position: absolute; 256 | left: 0; right: 0; top: 0; bottom: 0; 257 | z-index: 0; 258 | } 259 | 260 | .CodeMirror-linewidget { 261 | position: relative; 262 | z-index: 2; 263 | overflow: auto; 264 | } 265 | 266 | .CodeMirror-widget {} 267 | 268 | .CodeMirror-code { 269 | outline: none; 270 | } 271 | 272 | /* Force content-box sizing for the elements where we expect it */ 273 | .CodeMirror-scroll, 274 | .CodeMirror-sizer, 275 | .CodeMirror-gutter, 276 | .CodeMirror-gutters, 277 | .CodeMirror-linenumber { 278 | -moz-box-sizing: content-box; 279 | box-sizing: content-box; 280 | } 281 | 282 | .CodeMirror-measure { 283 | position: absolute; 284 | width: 100%; 285 | height: 0; 286 | overflow: hidden; 287 | visibility: hidden; 288 | } 289 | 290 | .CodeMirror-cursor { position: absolute; } 291 | .CodeMirror-measure pre { position: static; } 292 | 293 | div.CodeMirror-cursors { 294 | visibility: hidden; 295 | position: relative; 296 | z-index: 3; 297 | } 298 | div.CodeMirror-dragcursors { 299 | visibility: visible; 300 | } 301 | 302 | .CodeMirror-focused div.CodeMirror-cursors { 303 | visibility: visible; 304 | } 305 | 306 | .CodeMirror-selected { background: #d9d9d9; } 307 | .CodeMirror-focused .CodeMirror-selected { background: #d7d4f0; } 308 | .CodeMirror-crosshair { cursor: crosshair; } 309 | .CodeMirror-line::selection, .CodeMirror-line > span::selection, .CodeMirror-line > span > span::selection { background: #d7d4f0; } 310 | .CodeMirror-line::-moz-selection, .CodeMirror-line > span::-moz-selection, .CodeMirror-line > span > span::-moz-selection { background: #d7d4f0; } 311 | 312 | .cm-searching { 313 | background: #ffa; 314 | background: rgba(255, 255, 0, .4); 315 | } 316 | 317 | /* IE7 hack to prevent it from returning funny offsetTops on the spans */ 318 | .CodeMirror span { *vertical-align: text-bottom; } 319 | 320 | /* Used to force a border model for a node */ 321 | .cm-force-border { padding-right: .1px; } 322 | 323 | @media print { 324 | /* Hide the cursor when printing */ 325 | .CodeMirror div.CodeMirror-cursors { 326 | visibility: hidden; 327 | } 328 | } 329 | 330 | /* See issue #2901 */ 331 | .cm-tab-wrap-hack:after { content: ''; } 332 | 333 | /* Help users use markselection to safely style text background */ 334 | span.CodeMirror-selectedtext { background: none; } 335 | -------------------------------------------------------------------------------- /src/static/CodeMirror/mode/python/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | CodeMirror: Python mode 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 25 | 26 |
27 |

Python mode

28 | 29 |
130 | 131 | 132 |

Cython mode

133 | 134 |
157 | 158 | 177 |

Configuration Options for Python mode:

178 |
    179 |
  • version - 2/3 - The version of Python to recognize. Default is 2.
  • 180 |
  • singleLineStringErrors - true/false - If you have a single-line string that is not terminated at the end of the line, this will show subsequent lines as errors if true, otherwise it will consider the newline as the end of the string. Default is false.
  • 181 |
  • hangingIndent - int - If you want to write long arguments to a function starting on a new line, how much that line should be indented. Defaults to one normal indentation unit.
  • 182 |
183 |

Advanced Configuration Options:

184 |

Usefull for superset of python syntax like Enthought enaml, IPython magics and questionmark help

185 |
    186 |
  • singleOperators - RegEx - Regular Expression for single operator matching, default :
    ^[\\+\\-\\*/%&|\\^~<>!]
    including
    @
    on Python 3
  • 187 |
  • singleDelimiters - RegEx - Regular Expression for single delimiter matching, default :
    ^[\\(\\)\\[\\]\\{\\}@,:`=;\\.]
  • 188 |
  • doubleOperators - RegEx - Regular Expression for double operators matching, default :
    ^((==)|(!=)|(<=)|(>=)|(<>)|(<<)|(>>)|(//)|(\\*\\*))
  • 189 |
  • doubleDelimiters - RegEx - Regular Expressoin for double delimiters matching, default :
    ^((\\+=)|(\\-=)|(\\*=)|(%=)|(/=)|(&=)|(\\|=)|(\\^=))
  • 190 |
  • tripleDelimiters - RegEx - Regular Expression for triple delimiters matching, default :
    ^((//=)|(>>=)|(<<=)|(\\*\\*=))
  • 191 |
  • identifiers - RegEx - Regular Expression for identifier, default :
    ^[_A-Za-z][_A-Za-z0-9]*
    on Python 2 and
    ^[_A-Za-z\u00A1-\uFFFF][_A-Za-z0-9\u00A1-\uFFFF]*
    on Python 3.
  • 192 |
  • extra_keywords - list of string - List of extra words ton consider as keywords
  • 193 |
  • extra_builtins - list of string - List of extra words ton consider as builtins
  • 194 |
195 | 196 | 197 |

MIME types defined: text/x-python and text/x-cython.

198 |
199 | -------------------------------------------------------------------------------- /src/static/css/cslaier.css: -------------------------------------------------------------------------------- 1 | html, body { 2 | height: 100%; 3 | } 4 | 5 | body { 6 | color:#ccc; 7 | background-color:#010; 8 | padding-top: 70px; 9 | margin-left:30px; 10 | } 11 | 12 | h5 { 13 | font-weight: bold; 14 | } 15 | 16 | body.noscroll { 17 | top: 70px; 18 | position: fixed; 19 | overflow-y: scroll; 20 | } 21 | 22 | .modal-content { 23 | background-color: #010; 24 | border: 1px solid #0c0; 25 | } 26 | 27 | .modal-header { 28 | border-bottom-color: #0c0; 29 | } 30 | 31 | .modal-footer { 32 | border-top-color: #0c0; 33 | } 34 | 35 | .modal-content p.help-block { 36 | font-weight: bold; 37 | } 38 | 39 | .modal-content .btn-cyber { 40 | border:solid 1px #060; 41 | background-color: #040; 42 | font-weight: bold; 43 | } 44 | 45 | input.form-control, textarea.form-control, select.form-control, 46 | pre#network_edit_area,pre#training_log, .CodeMirror, .form-control[disabled] { 47 | background-color: #020; 48 | color: #0c0; 49 | border-color: #060; 50 | } 51 | 52 | select.form-control { 53 | box-shadow: none; 54 | transition: none; 55 | } 56 | 57 | .nav-tabs { 58 | border-bottom: #060; 59 | } 60 | 61 | .nav-tabs > li > a { 62 | background-color: #010; 63 | color:#0c0; 64 | border-bottom: #060; 65 | } 66 | 67 | .nav-tabs > li.active > a { 68 | background-color: #010; 69 | border:solid 1px #060; 70 | color:#0c0; 71 | } 72 | 73 | hr.cyber { 74 | border-color: #0c0; 75 | } 76 | 77 | .nav-tabs > li.active > a:hover, 78 | .nav-tabs > li > a:hover { 79 | background-color: #060; 80 | color:#0f0; 81 | } 82 | 83 | .desc { 84 | color:#c80; 85 | display: block; 86 | } 87 | ul { 88 | color:#c80; 89 | } 90 | 91 | h3 { 92 | color:#fc0; 93 | } 94 | 95 | .container { 96 | height: 100%; 97 | } 98 | 99 | .subtitle { 100 | width: 100%; 101 | border-bottom: 1px solid #0e0; 102 | margin-bottom: 10px; 103 | } 104 | 105 | .subtitle > h2, .subtitle > h3 { 106 | width:75%; 107 | color:#0d0; 108 | display: inline-block; 109 | } 110 | 111 | .subtitle > .button_div { 112 | width:20%; 113 | display: inline-block; 114 | } 115 | 116 | .btn-cyber{ 117 | background-color: #010; 118 | border:solid 1px #060; 119 | color:#0c0; 120 | } 121 | 122 | .btn-cyber:hover{ 123 | background-color: #060; 124 | color:#0f0; 125 | } 126 | 127 | .label-trained{ 128 | background-color: #040; 129 | border:solid 1px #0f0; 130 | color:#0c0; 131 | } 132 | .label-progress{ 133 | background-color: #400; 134 | border:solid 1px #f00; 135 | color:#c00; 136 | } 137 | .label-nottrained{ 138 | background-color: #210; 139 | border:solid 1px #a60; 140 | color:#a60; 141 | } 142 | 143 | .right { 144 | margin-left:auto; 145 | text-align:right; 146 | } 147 | 148 | .left { 149 | margin-right: auto; 150 | text-align: left; 151 | } 152 | 153 | .datasets, .models { 154 | color:#333; 155 | padding-top: 10px; 156 | margin-bottom: 30px; 157 | margin-top: 10px; 158 | } 159 | 160 | .datasets_row, .models_row { 161 | display: flex; 162 | display: -moz-flex; 163 | display: -o-flex; 164 | display: -webkit-flex; 165 | display: -ms-flex; 166 | flex-wrap: wrap; 167 | -moz-flex-wrap: wrap; 168 | -o-flex-wrap: wrap; 169 | -webkit-flex-wrap: wrap; 170 | -ms-flex-wrap: wrap; 171 | } 172 | 173 | 174 | .dataset, .model , .disk_info{ 175 | flex: 1; 176 | max-width: 280px; 177 | max-height: 280px; 178 | min-height: 250px; 179 | border-radius: 5px; 180 | border:solid 1px #0c0; 181 | background-color: #020; 182 | color:#0c0; 183 | padding: 0px 10px; 184 | margin: 5px; 185 | overflow: hidden; 186 | } 187 | 188 | .gpu, .disk_info, #train_time { 189 | min-height: 0px; 190 | border-radius: 5px; 191 | border:solid 1px #0c0; 192 | background-color: #020; 193 | color:#0c0; 194 | padding: 0px 10px; 195 | margin: 5px; 196 | overflow: hidden; 197 | border:solid 1px #0c0; 198 | height: auto; 199 | } 200 | 201 | .gpu th, .disk_info th, #train_time th{ 202 | color:#0f0; 203 | padding-right: 5px; 204 | } 205 | 206 | #train_time td { 207 | border-top: 1px solid #0c0; 208 | } 209 | 210 | .gpu.error{ 211 | border:solid 1px #f00; 212 | color:#c00; 213 | } 214 | 215 | .model { 216 | width: 250px; 217 | height: 200px; 218 | } 219 | 220 | .model-progress{ 221 | border:solid 1px #a00; 222 | background-color: #300; 223 | } 224 | .model-nottrained{ 225 | border:solid 1px #a60; 226 | background-color: #320; 227 | } 228 | 229 | .dataset:hover, .model:hover { 230 | border:solid 1px #fff; 231 | } 232 | 233 | .dataset_detail { 234 | background-color:#020; 235 | border: 1px solid #0c0; 236 | border-radius: 5px; 237 | padding: 0px 3px; 238 | padding-bottom: 10px; 239 | } 240 | 241 | .category { 242 | width: 220px; 243 | height: 200px; 244 | display: inline-block; 245 | padding: 5px; 246 | vertical-align: top; 247 | overflow: hidden; 248 | } 249 | 250 | .category:hover { 251 | border: 1px solid white; 252 | border-radius: 5px; 253 | padding: 3px; 254 | } 255 | 256 | .thumbnail-30 { 257 | vertical-align: top; 258 | padding-top: 2px; 259 | max-width: 100px; 260 | max-height: 75px; 261 | display: inline-block; 262 | } 263 | 264 | .category-image { 265 | margin: 5px; 266 | display: inline-block; 267 | max-width: 200px; 268 | } 269 | 270 | .category-image:hover { 271 | border: 1px solid white; 272 | box-shadow: 10px 10px 10px rgba(0,0,0,0.4); 273 | -moz-box-shadow: 10px 10px 10px rgba(0,0,0,0.4); 274 | -webkit-box-shadow: 10px 10px 10px rgba(0,0,0,0.4); 275 | -o-box-shadow: 10px 10px 10px rgba(0,0,0,0.4); 276 | -ms-box-shadow: 10px 10px 10px rgba(0,0,0,0.4); 277 | } 278 | #training_log { 279 | height: 100%; 280 | width: 100%; 281 | font-size: 8px; 282 | } 283 | 284 | #network_edit_area { 285 | height: 100%; 286 | width: 100%; 287 | } 288 | 289 | #network_edit_area_div { 290 | height: 76%; 291 | } 292 | 293 | #create_model_form { 294 | height: 100%; 295 | } 296 | 297 | #create_model_div_buttons { 298 | padding-top: 20px; 299 | } 300 | 301 | #create_network_buttons { 302 | margin-top: -20px; 303 | } 304 | 305 | #processing_screen, #uploading_progress_div { 306 | background-color: rgba(0, 0, 0, 0.7); 307 | width: 100%; 308 | height: 100%; 309 | position: absolute; 310 | top: 0; 311 | right: 0; 312 | display: table; 313 | z-index: 100; 314 | } 315 | 316 | #uploading_progress_div > .progress-wrap, 317 | #processing_screen > .progress-wrap { 318 | height: 20px; 319 | display: table-cell; 320 | vertical-align: middle; 321 | text-align: center; 322 | margin: 0 auto; 323 | color: white; 324 | z-index: 200; 325 | } 326 | 327 | #uploading_progress_div > .progress-wrap > .progress, 328 | #processing_screen > .progress-wrap > .progress { 329 | width: 60%; 330 | margin: 0 auto; 331 | } 332 | 333 | #progress_rate, 334 | #processing_screen > .progress-wrap > h4 { 335 | color:white; 336 | font-weight: bold; 337 | } 338 | 339 | .model-sub-header { 340 | width: 100%; 341 | margin-top: -10px; 342 | margin-bottom: 10px; 343 | vertical-align: middle; 344 | } 345 | 346 | #progress-label { 347 | width: 200px; 348 | font-size: 2em; 349 | float: left; 350 | } 351 | 352 | .model-control-div { 353 | float: right; 354 | } 355 | 356 | .full-height{ 357 | height: 100%; 358 | } 359 | 360 | .has-textarea { 361 | height:71%; 362 | } 363 | 364 | textarea.form-control { 365 | height:100%; 366 | border-color: #060; 367 | } 368 | 369 | .CodeMirror { 370 | height: 100%; 371 | border-color: #060; 372 | } 373 | 374 | .CodeMirror-cursor { 375 | border-left: 1px solid white; 376 | border-right: none; 377 | width: 0; 378 | } 379 | 380 | .CodeMirror-gutters { 381 | border-right: 1px solid #000; 382 | background-color: #000; 383 | white-space: nowrap; 384 | } 385 | 386 | .cm-s-default .cm-keyword {color: #ff6347;} 387 | .cm-s-default .cm-def {color: #8470FF;} 388 | .cm-s-default .cm-builtin {color: #778899;} 389 | 390 | #network_edit_area.form-control { 391 | border-radius: 0px 0px 4px 4px; 392 | } 393 | 394 | .axis text { 395 | font-family: sans-serif; 396 | font-size: 11px; 397 | } 398 | 399 | .axis path, 400 | .axis line { 401 | fill: none; 402 | stroke: #fff; 403 | shape-rendering: crispEdges; 404 | } 405 | 406 | .x.axis path { 407 | display: none; 408 | } 409 | 410 | .line-loss { 411 | fill: none; 412 | stroke: steelblue; 413 | stroke-width: 1.5px; 414 | } 415 | 416 | .line-accuracy { 417 | fill: none; 418 | stroke: orange; 419 | stroke-width: 1.5px; 420 | } 421 | 422 | .line-val-loss { 423 | fill: none; 424 | stroke: #0c0; 425 | stroke-width: 1.5px; 426 | } 427 | 428 | .line-val-accuracy { 429 | fill: none; 430 | stroke: red; 431 | stroke-width: 1.5px; 432 | } 433 | 434 | .tick line { 435 | opacity: 0.4; 436 | } 437 | 438 | .progress { 439 | background-color: #fc0; 440 | } 441 | 442 | .progress-bar { 443 | background-color: #c80; 444 | } 445 | 446 | .form-control:focus { 447 | border-color: #0c0; 448 | } 449 | 450 | .navbar-right > .navbar-brand > img { 451 | height: 38px; 452 | } 453 | 454 | .start_train_section_header { 455 | border-bottom: 1px solid #060; 456 | } 457 | 458 | .start_train_section { 459 | padding-left: 20px; 460 | } 461 | 462 | .sample_text { 463 | margin: 4px; 464 | overflow: hidden; 465 | color: white; 466 | } 467 | 468 | .text_detail { 469 | width: 220px; 470 | height: 200px; 471 | display: inline-block; 472 | padding: 5px; 473 | vertical-align: top; 474 | margin: 4px; 475 | overflow: hidden; 476 | color: white; 477 | } 478 | 479 | .text_detail:hover { 480 | border: 1px solid white; 481 | border-radius: 5px; 482 | padding: 3px; 483 | } 484 | 485 | .modal .close { 486 | color: #0c0; 487 | } 488 | 489 | #show_text_detail_modal .modal-header { 490 | text-align: right; 491 | } 492 | 493 | .pagination.center { 494 | width: 100%; 495 | text-align: center; 496 | margin: 0 auto; 497 | } 498 | 499 | .pagination > li > a { 500 | background-color: #010; 501 | border: 1px solid #0c0; 502 | color: #0c0; 503 | } 504 | 505 | .pagination > .active > a { 506 | background-color: #060; 507 | border: 1px solid #0c0; 508 | } 509 | 510 | .pagination>.disabled>a, 511 | .pagination>.disabled>a:focus, 512 | .pagination>.disabled>a:hover, 513 | .pagination>.disabled>span, 514 | .pagination>.disabled>span:focus, 515 | .pagination>.disabled>span:hover 516 | .pagination > .disabled > span { 517 | background-color: #010; 518 | border-color: #0c0; 519 | } 520 | 521 | .pagination>.disabled>span:hover, 522 | .pagination>.active>a:hover, 523 | .pagination>li>a:hover 524 | { 525 | color: white; 526 | background-color: #080; 527 | } 528 | 529 | .pagination>.active>a:hover 530 | { 531 | border-color: #080; 532 | } 533 | 534 | #resume_train_div { 535 | margin-bottom: 15px; 536 | } 537 | -------------------------------------------------------------------------------- /src/static/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/static/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /src/static/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/static/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /src/static/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/static/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /src/static/fonts/glyphicons-halflings-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/static/fonts/glyphicons-halflings-regular.woff2 -------------------------------------------------------------------------------- /src/static/html/gpu_usage.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CSLAILER - GPU Usage 6 | 13 | 14 | 15 | 16 | 17 | 35 | 36 | -------------------------------------------------------------------------------- /src/static/img/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/static/img/logo.png -------------------------------------------------------------------------------- /src/static/img/logo_hover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/src/static/img/logo_hover.png -------------------------------------------------------------------------------- /src/static/js/gpu_meter.js: -------------------------------------------------------------------------------- 1 | enchant(); 2 | 3 | var circluarGraph = function(context,fan,power,memory,temp){ 4 | context.clearRect(0,0,228,228); 5 | var percent=memory; 6 | fan = 360*fan-90; 7 | memory = 360*memory-90; 8 | temp=360*temp-90; 9 | power=360*power-90; 10 | 11 | createCircle(context, 100, 0.7, 'rgb(64,64,0)', 100, 0, 360, true); 12 | createCircle(context, 100, 0.7, 'rgb(200,128,0)', 100, -90, memory, false); 13 | createCircle(context, 70, 0.7, 'rgb(200,0,0)', 80, -90, power, false); 14 | createCircle(context, 50, 0.5, 'rgb(50,150,0)', 60, -90, temp, false); 15 | createCircle(context, 50, 0.1, 'rgb(0,200,100)', 40, -90, fan, false); 16 | 17 | context.fillStyle = '#fa0'; 18 | context.font= 'bold 36px Century Gothic'; 19 | context.fillText( ~~(percent*100)+"%",90,140); 20 | context.fillStyle = '#fa0'; 21 | context.font= 'bold 10px Century Gothic'; 22 | context.fillText( "Memory ",100,100); 23 | }; 24 | 25 | var createCircle = function(context, r, beginColorOffset, endColorRGB, radius, start, end, bool){ 26 | var x = y = 120; 27 | context.beginPath(); //パスを開始 28 | 29 | //グラデーションを設定 30 | var grad = context.createRadialGradient(120,120,10,120,120,r); 31 | 32 | //グラデーション開始 33 | grad.addColorStop(0,'#020'); 34 | grad.addColorStop(beginColorOffset,'#020'); 35 | 36 | //グラデーション終了 37 | grad.addColorStop(1,endColorRGB); 38 | context.fillStyle = grad; 39 | 40 | context.moveTo(x,y); 41 | context.arc(x, y, radius, (start * Math.PI / 180), (end * Math.PI / 180), bool); 42 | context.fill(); 43 | context.closePath(); //パスを終了 44 | }; 45 | 46 | var create_gpu_meter = function(fan,power,power_limit,memory,memory_total,temp) { 47 | enchant.ENV.USE_TOUCH_TO_START_SCENE = false; 48 | var game = new Game(228, 228); 49 | 50 | var re = /^[\d\.]+/; 51 | fan = fan.match(re)[0] * 0.01; 52 | power = power.match(re)[0] / power_limit.match(re)[0]; 53 | memory = memory.match(re)[0] / memory_total.match(re)[0]; 54 | temp = temp.match(re)[0] * 0.01; 55 | 56 | game.onload = function(){ 57 | game.rootScene.backgroundColor = "#020"; 58 | //Spriteを作ります 59 | var sprite = new Sprite(228,228); 60 | //Surfaceを作ります 61 | var surface = new Surface(228,228); 62 | 63 | //spriteのimageにsurfaceを代入します 64 | sprite.image = surface; 65 | 66 | //コンテキストを取得します 67 | var context = surface.context; 68 | sprite.on('enterframe',function(){ 69 | circluarGraph(context,fan,power,memory,temp); 70 | }) 71 | //シーンにサーフェスを追加する 72 | game.rootScene.addChild(sprite); 73 | } 74 | game.start(); 75 | } -------------------------------------------------------------------------------- /src/templates/admin/datasets.html: -------------------------------------------------------------------------------- 1 | {% extends 'common/base.html' %} 2 | {% block title %} Admin - Datasets {% endblock %} 3 | {% block main_content %} 4 |
5 |

CSLAIER Admin - Datasets

6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | {% for d in datasets %} 20 | 21 | 22 | 26 | 27 | 30 | 31 | 32 | 33 | 34 | 35 | 38 | 41 | 42 | 43 | {% endfor %} 44 |
IDnamedataset_pathtypecategory_numfile_numcreated_atupdated_at
23 | {{d.id}} 24 | 25 | {{d.name}} 28 | 29 | {{d.type}}{{d.category_num}}{{d.file_num}}{{d.created_at}}{{d.updated_at}} 36 | 37 | 39 | 40 |
45 |
46 | 47 | {% endblock %} 48 | 49 | {% block scripts %} 50 | 60 | {% endblock %} -------------------------------------------------------------------------------- /src/templates/admin/index.html: -------------------------------------------------------------------------------- 1 | {% extends 'common/base.html' %} 2 | {% block title %} Admin index {% endblock %} 3 | {% block main_content %} 4 |
5 |

CSLAIER Admin

6 | 7 | 8 |
9 | 10 | {% endblock %} 11 | 12 | {% block scripts %} 13 | 14 | 23 | 24 | {% endblock %} -------------------------------------------------------------------------------- /src/templates/admin/models.html: -------------------------------------------------------------------------------- 1 | {% extends 'common/base.html' %} 2 | {% block title %} Admin - Models {% endblock%} 3 | {% block main_content %} 4 |
5 |

CSLAIER Admin - Models

6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | {% for m in models %} 30 | 31 | 32 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 57 | 58 | 59 | {% endfor %} 60 |
IDnametypeframeworkepochnetwork_nametrained_model_pathprepared_file_pathis_trainedpidresize_modechannelsuse_wakatigakigpubatchsizedataset_idupdated_atcreated_at
33 | {{m.id}} 34 | 35 | {{m.name}}{{m.type}}{{m.framework}}{{m.epoch}}{{m.network_name}}{{m.trained_model_path}}{{m.prepared_file_path}}{{m.is_trained}}{{m.pid}}{{m.resize_mode}}{{m.channels}}{{m.use_wakatigaki}}{{m.gpu}}{{m.batchsize}}{{m.dataset_id}}{{m.updated_at}}{{m.created_at}} 55 | 56 |
61 |
62 | {% endblock %} -------------------------------------------------------------------------------- /src/templates/common/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | {% block title %}{% endblock %} - CSLAIER 6 | 7 | {% block stylesheets %}{% endblock %} 8 | 9 | 17 | 18 | 19 | {% include 'common/header.html' %} 20 | {% block main_content %}{% endblock %} 21 | 22 | 23 | 24 | 25 | 26 | 27 | {% block scripts %} 28 | {% endblock %} 29 | 30 | -------------------------------------------------------------------------------- /src/templates/common/gpu_info.html: -------------------------------------------------------------------------------- 1 | {% if system_info['gpu_info'] %} 2 | {% for g in system_info['gpu_info']['gpus'] %} 3 |
4 |

{{g['minor_number']}}:{{g['product_name']}}

5 | 6 | 7 | 8 | 9 | 10 | 11 |
UUID{{g['uuid']}}
FAN{{g['fan']}}
Temp{{g['temperature']}}
Power{{g['power_draw']}} / {{g['power_limit']}}
Memory{{g['memory_used']}} / {{g['memory_total']}}
12 |
13 | {% endfor %} 14 | {% else %} 15 |
16 |

GPU Info Not Available

17 |
18 | {% endif %} -------------------------------------------------------------------------------- /src/templates/common/gpu_script.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/templates/common/header.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/templates/common/macro.html: -------------------------------------------------------------------------------- 1 | {% macro inspection_component(model, usable_epochs) %} 2 | {% if usable_epochs %} 3 |
4 |
5 |
6 | 7 | 12 |
13 | 14 | {% if model.type == 'image' %} 15 | 16 | {% elif model.type == 'text' %} 17 | 18 | {% endif %} 19 |
20 |
21 | {% endif %} 22 | {% endmacro %} 23 | -------------------------------------------------------------------------------- /src/templates/common/resource_info.html: -------------------------------------------------------------------------------- 1 | {% if system_info['disk_info'] != None %} 2 |
Disk Info
3 | {% for disk in system_info['disk_info'] %} 4 |
5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 |
Mounted on{{disk['mount']}}
size{{disk['size']}}
used{{disk['used']}}
available{{disk['avail']}}
23 |
24 | {% endfor %} 25 | {% endif %} -------------------------------------------------------------------------------- /src/templates/common/version_info.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 |
CSLAIER Version:{{ system_info['cslaier_version'] }}
Graphic Board Driver Version: 9 | {% if system_info['gpu_info']['driver_version'] %} 10 | {{system_info['gpu_info']['driver_version']}} 11 | {% else %} 12 | --- 13 | {% endif %} 14 |
Chainer Version:{{ system_info['chainer_version'] }}
TensorFlow Version:{{ system_info['tensorflow_version' ]}}
Python Version:{{ system_info['python_version'] }}
-------------------------------------------------------------------------------- /src/templates/dataset/show_category_detail.html: -------------------------------------------------------------------------------- 1 | {% extends 'common/base.html' %} 2 | {% block title %} {{ dataset.name }} > {{ dataset.category }}{% endblock %} 3 | {% block main_content %} 4 | 5 |
6 |

Dataset

7 |
8 |
9 |

{{ dataset.name }} - {{ dataset.category }} ({{ dataset.count }})

10 |
11 | {% if dataset.type == 'image' %} 12 | 13 | {% elif dataset.type == 'text' %} 14 | 15 | {% endif %} 16 | 17 |
18 |
19 | {% if dataset.type == 'image' %} 20 | {% for file in dataset.files %} 21 | 22 | {% endfor %} 23 | {% elif dataset.type == 'text' %} 24 | {% for file in dataset.files %} 25 |
26 | {{ file['sample_text'] }} 27 |
28 | {% endfor %} 29 | {% endif %} 30 |
31 | {% if dataset.pages > 1 %} 32 | 61 | {% endif %} 62 |
63 | 64 | 96 | 97 | 114 | {% endblock %} -------------------------------------------------------------------------------- /src/templates/dataset/show_dataset.html: -------------------------------------------------------------------------------- 1 | {% extends 'common/base.html' %} 2 | {% block title %} {{ dataset.name }} {% endblock %} 3 | {% block main_content %} 4 | 5 |
6 |

Dataset

7 |
8 |
9 |

{{ dataset.name }}

10 |
11 | 12 | 13 |
14 |
15 | {% for category in dataset.categories %} 16 |
17 |

{{ category["category"] }} ({{ category["file_num"] }})

18 | {% if category["dataset_type"] == "image" %} 19 | {% for thumb in category["thumbnails"] %} 20 | 21 | {% endfor %} 22 | {% elif category["dataset_type"] == "text" %} 23 |
24 | {% for t in category["sample_text"] %} 25 | {{t}} 26 | {% endfor %} 27 |
28 | {% endif %} 29 |
30 | {% endfor %} 31 |
32 | {% if dataset.pages > 1 %} 33 | 62 | {% endif %} 63 |
64 | 65 | 94 | {% endblock %} 95 | 96 | {% block scripts %} 97 | 104 | {% endblock %} 105 | -------------------------------------------------------------------------------- /src/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends 'common/base.html' %} 2 | {% block title %} index {% endblock %} 3 | {% block main_content %} 4 |
5 |
6 |
7 | {% include 'index_partial/datasets.html' %} 8 | {% include 'index_partial/models.html' %} 9 |
10 |
11 | {% include 'common/version_info.html' %} 12 | {% include 'common/gpu_info.html' %} 13 | {% include 'common/resource_info.html' %} 14 |
15 |
16 |
17 | {% include 'index_partial/modals.html' %} 18 | {% endblock %} 19 | 20 | {% block scripts %} 21 | {% include 'common/gpu_script.html' %} 22 | 28 | {% endblock %} -------------------------------------------------------------------------------- /src/templates/index_partial/datasets.html: -------------------------------------------------------------------------------- 1 |
2 |

Dataset

3 | 10 |
11 |
12 |
13 | {% for dataset in datasets %} 14 | {% if dataset.type == 'image' %} 15 |
16 |

{{ dataset.name }}

17 | Total {{ dataset.category_num }} Classes, {{ dataset.file_num }} Images 18 | {% for img in dataset.thumbnails %} 19 | 20 | {% endfor %} 21 |
22 | {% elif dataset.type == 'text' %} 23 |
24 |

{{ dataset.name }}

25 | Text File size : {{ dataset.filesize }} 26 |
27 | {% for t in dataset.sample_text %} 28 | {{t}} 29 | {% endfor %} 30 |
31 |
32 | {% endif %} 33 | {% endfor %} 34 |
35 | {% if dataset_count > 3 %} 36 | 37 | {% endif %} 38 |
39 | 40 | 49 | 60 | 61 | -------------------------------------------------------------------------------- /src/templates/index_partial/modals.html: -------------------------------------------------------------------------------- 1 | 51 | 52 | -------------------------------------------------------------------------------- /src/templates/index_partial/models.html: -------------------------------------------------------------------------------- 1 |
2 |

Models

3 | 8 |
9 |
10 | {% for model in models %} 11 | {% if loop.first or loop.index0 % 3 == 0 %} 12 |
13 | {% endif %} 14 | {% if model.is_trained == 1 %} 15 |
16 | {% elif model.is_trained == 2 %} 17 |
18 | {% else %} 19 |
20 | {% endif %} 21 |

{{model.name}} 22 |
23 | {% if model.is_trained == 1 %} 24 | In Progress 25 | {% elif model.is_trained == 2 %} 26 | Trained 27 | {% else %} 28 | Not Trained 29 | {% endif %} 30 |
31 |

32 |
    33 |
  • Date: {{model.created_at.strftime('%Y-%m-%d %H:%M:%S')}}
  • 34 |
  • Epoch: {{model.epoch}}
  • 35 |
  • Processing Unit: {{model.gpu_str}}
  • 36 |
  • 37 | {% if model.type == 'image' %} 38 | Image Classification 39 | {% elif model.type == 'text' %} 40 | Natural Language Processing 41 | {% endif %} 42 |
  • 43 |
  • Dataset: 44 | {% if model.dataset %} 45 | {{model.dataset.name}} 46 | {% else %} 47 | --- 48 | {% endif %} 49 |
  • 50 |
  • Network: 51 | {% if model.network_name %} 52 | {{model.network_name}} 53 | {% else %} 54 | --- 55 | {% endif %} 56 |
  • 57 |
  • Framework:{{model.framework}}
  • 58 |
59 |
60 | {% if loop.index0 % 3 == 2 or loop.last %} 61 |
62 | {% endif %} 63 | {% endfor %} 64 |
-------------------------------------------------------------------------------- /src/templates/model/inspect_result.html: -------------------------------------------------------------------------------- 1 | {% extends 'common/base.html' %} 2 | {% block title %} Inspection Result {% endblock %} 3 | {% block main_content %} 4 |
5 |

{{model.name}} Epoch:{{epoch}}

6 | {% if error %} 7 | 8 |

{{ error }}

9 | 10 | {% else %} 11 |
12 | 17 |
18 |
19 |
20 | 21 |
22 |
23 |

Inspection Result

24 | 25 | 26 | {% for result in results %} 27 | 28 | 29 | 30 | 31 | 32 | {% endfor %} 33 |
ranknamescore
{{result["rank"]}}{{result["name"].decode('utf-8')}}{{result["score"]}}
34 |
35 |
36 | {% endif %} 37 |

38 | {% endblock %} -------------------------------------------------------------------------------- /src/templates/model/new.html: -------------------------------------------------------------------------------- 1 | {% extends 'common/base.html' %} 2 | {% block title %}Create New Model{% endblock %} 3 | {% block stylesheets %} 4 | 5 | {% endblock %} 6 | {% block main_content %} 7 | 8 |
9 |

create new model

10 |
11 |
12 |
13 |
14 | 15 | 21 |
22 |
23 | 24 |
25 | 28 | 31 |
32 |
33 |
34 | 35 |
36 | 39 | 42 |
43 |
44 |
45 | 46 | 47 |
48 |
49 | 50 | 51 |
52 |
53 |
54 |
55 | 56 | 57 |
58 |
59 | 60 | Cancel 61 |
62 |
63 |
64 |
65 |
66 | {% endblock %} 67 | {% block scripts %} 68 | 69 | 70 | 75 | {% endblock %} -------------------------------------------------------------------------------- /src/templates/model/partial/create_new_network_modals.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/templates/model/partial/in_progress.html: -------------------------------------------------------------------------------- 1 |
2 |
3 | In Progress 4 |
5 |
6 | 7 | 8 |
9 |
10 |
11 |
12 |
13 |
14 |
    15 |
  • Created: {{model.created_at.strftime('%Y-%m-%d %H:%M:%S')}}
  • 16 |
  • Updated: {{model.updated_at.strftime('%Y-%m-%d %H:%M:%S')}}
  • 17 |
  • Framework: {{model.framework}}
  • 18 |
  • Epoch: {{model.epoch}}
  • 19 |
  • Processing Unit: {{model.gpu_str}}
  • 20 |
  • Batch Size: {{model.batchsize_str}}
  • 21 | {% if model.dataset %} 22 |
  • Dataset: {{model.dataset.name}}
  • 23 | {% endif %} 24 |
  • 25 | Train Type: 26 | {% if model.type == 'image' %} 27 | Image Classification 28 | {% elif model.type == 'text' %} 29 | Natural Language Processing 30 | {% else %} 31 | --- 32 | {% endif %} 33 |
  • 34 |
  • Network: 35 | {% if model.network_name %} 36 | {{model.network_name}} 37 | {% else %} 38 | --- 39 | {% endif %} 40 |
  • 41 | {% if model.type == 'image' %} 42 |
  • Resize mode: {{model.resize_mode}}
  • 43 |
  • Color mode: {{model.channels}}
  • 44 | {% endif %} 45 |
46 |
47 |
48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 |
REMAINING TIME(estimating...)
ELAPSED TIME0 days 00:00:00
58 |
59 | {% if not model.framework == 'tensorflow' %} 60 | {% from "/common/macro.html" import inspection_component with context %} 61 | {{ inspection_component(model, usable_epochs) }} 62 | {% endif %} 63 |
64 |
65 | 73 | 78 | 81 |
82 |
83 |
{{model.code}}
84 |
85 |
86 | 87 |
88 |
-------------------------------------------------------------------------------- /src/templates/model/partial/not_trained.html: -------------------------------------------------------------------------------- 1 |
2 |
3 | Not Trained 4 |
5 |
6 | 7 | 8 |
9 |
10 |
11 |
12 |
13 |
14 |
    15 |
  • Created: {{model.created_at.strftime('%Y-%m-%d %H:%M:%S')}}
  • 16 |
  • Updated: {{model.updated_at.strftime('%Y-%m-%d %H:%M:%S')}}
  • 17 |
  • Framework: {{model.framework}}
  • 18 |
  • Epoch: ---
  • 19 | {% if model.dataset %} 20 |
  • Dataset: {{model.dataset.name}}
  • 21 | {% endif %} 22 |
  • 23 | Train Type: 24 | {% if model.type == 'image' %} 25 | Image Classification 26 | {% elif model.type == 'text' %} 27 | Natural Language Processing 28 | {% else %} 29 | --- 30 | {% endif %} 31 |
  • 32 |
  • Network: 33 | {% if model.network_name %} 34 | {{model.network_name}} 35 | {% else %} 36 | --- 37 | {% endif %} 38 |
  • 39 | {% if model.type == 'image' %} 40 |
  • Resize mode: ---
  • 41 |
  • Color mode: ---
  • 42 | {% endif %} 43 | {% if resumable %} 44 |
  • Trained epoch: {{ trained_epoch }}
  • 45 | {% endif %} 46 |
47 |
48 |
49 | {% if resumable %} 50 |
51 | 52 |
53 | {% endif %} 54 |
55 | 56 |
57 |
58 | {% from "/common/macro.html" import inspection_component with context %} 59 | {{ inspection_component(model, usable_epochs) }} 60 |
61 |
62 | 70 | 75 | 78 |
79 |
80 | 81 |
82 | 86 |
87 | 88 |
89 |
-------------------------------------------------------------------------------- /src/templates/model/partial/prediction_modals.html: -------------------------------------------------------------------------------- 1 | 2 | {% if model.type == 'image' %} 3 | 24 | {% endif %} 25 | 26 | {% if model.type == 'text' %} 27 | 65 | {% endif %} 66 | -------------------------------------------------------------------------------- /src/templates/model/partial/resume_train_modals.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/templates/model/partial/start_train_modals.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/templates/model/partial/trained.html: -------------------------------------------------------------------------------- 1 |
2 |
3 | Trained 4 |
5 |
6 | 7 | 8 |
9 |
10 |
11 |
12 |
13 |
14 |
    15 |
  • Created: {{model.created_at.strftime('%Y-%m-%d %H:%M:%S')}}
  • 16 |
  • Updated: {{model.updated_at.strftime('%Y-%m-%d %H:%M:%S')}}
  • 17 |
  • Framework: {{model.framework}}
  • 18 |
  • Epoch: {{model.epoch}}
  • 19 |
  • Batch Size: {{model.batchsize_str}}
  • 20 | {% if model.dataset %} 21 |
  • Dataset: {{model.dataset.name}}
  • 22 | {% endif %} 23 |
  • 24 | Train Type: 25 | {% if model.type == 'image' %} 26 | Image Classification 27 | {% elif model.type == 'text' %} 28 | Natural Language Processing 29 | {% else %} 30 | --- 31 | {% endif %} 32 |
  • 33 |
  • Network: 34 | {% if model.network_name %} 35 | {{model.network_name}} 36 | {% else %} 37 | --- 38 | {% endif %} 39 |
  • 40 | {% if model.type == 'image' %} 41 |
  • Resize mode: {{model.resize_mode}}
  • 42 |
  • Color mode: {{model.channels}}
  • 43 | {% endif %} 44 |
45 |
46 |
47 |
48 | 49 |
50 | {% from "/common/macro.html" import inspection_component with context %} 51 | {{ inspection_component(model, usable_epochs) }} 52 |
53 |
54 |
55 | 63 | 68 | 71 |
72 |
73 | 74 |
75 | 79 |
80 | 121 |
122 |
-------------------------------------------------------------------------------- /src/templates/model/show.html: -------------------------------------------------------------------------------- 1 | {% extends 'common/base.html' %} 2 | {% block title %}{{model.name}}{% endblock %} 3 | {% block stylesheets %} 4 | 5 | {% endblock %} 6 | 7 | {% block main_content %} 8 | 9 | 10 |
11 |
12 |
13 |

14 | Model: {{model.name}} 15 |

16 | {% if model.is_trained == 0 %} 17 | {% include 'model/partial/not_trained.html' %} 18 | {% elif model.is_trained == 1 %} 19 | {% include 'model/partial/in_progress.html' %} 20 | {% elif model.is_trained == 2 %} 21 | {% include 'model/partial/trained.html' %} 22 | {% endif %} 23 |
24 |
25 | {% include 'common/version_info.html' %} 26 | {% include 'common/gpu_info.html' %} 27 | {% include 'common/resource_info.html' %} 28 |
29 |
30 |
31 | 32 | 35 | 36 | 37 | 38 | 48 | 49 | {% include 'model/partial/prediction_modals.html' %} 50 | {% include 'model/partial/create_new_network_modals.html' %} 51 | {% include 'model/partial/start_train_modals.html' %} 52 | {% include 'model/partial/resume_train_modals.html' %} 53 | {% endblock %} 54 | 55 | {% block scripts %} 56 | 57 | 58 | {% include 'common/gpu_script.html' %} 59 | 79 | {% endblock %} -------------------------------------------------------------------------------- /temp/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/temp/.gitkeep -------------------------------------------------------------------------------- /trained_data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/trained_data/.gitkeep -------------------------------------------------------------------------------- /uploaded_files/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/uploaded_files/.gitkeep -------------------------------------------------------------------------------- /uploaded_raw_files/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SonyCSL/CSLAIER/341e2fe6c0b9a2e0d57eca4667125e99ddb2a3f8/uploaded_raw_files/.gitkeep --------------------------------------------------------------------------------