├── .gitignore ├── .nojekyll ├── .travis.yml ├── LICENSE ├── README.md ├── data ├── README.md └── get_sample_data.sh ├── docs ├── .buildinfo ├── .nojekyll ├── _sources │ ├── code.txt │ ├── index.txt │ ├── intro.txt │ ├── modules.txt │ └── tutorial.txt ├── _static │ ├── ajax-loader.gif │ ├── basic.css │ ├── comment-bright.png │ ├── comment-close.png │ ├── comment.png │ ├── css │ │ ├── badge_only.css │ │ └── theme.css │ ├── doctools.js │ ├── down-pressed.png │ ├── down.png │ ├── file.png │ ├── fonts │ │ ├── Inconsolata-Bold.ttf │ │ ├── Inconsolata-Regular.ttf │ │ ├── Lato-Bold.ttf │ │ ├── Lato-Regular.ttf │ │ ├── RobotoSlab-Bold.ttf │ │ ├── RobotoSlab-Regular.ttf │ │ ├── fontawesome-webfont.eot │ │ ├── fontawesome-webfont.svg │ │ ├── fontawesome-webfont.ttf │ │ └── fontawesome-webfont.woff │ ├── jquery-1.11.1.js │ ├── jquery.js │ ├── js │ │ ├── modernizr.min.js │ │ └── theme.js │ ├── minus.png │ ├── plus.png │ ├── pygments.css │ ├── searchtools.js │ ├── underscore-1.3.1.js │ ├── underscore.js │ ├── up-pressed.png │ ├── up.png │ └── websupport.js ├── code.html ├── doctrees │ ├── code.doctree │ ├── environment.pickle │ ├── index.doctree │ ├── intro.doctree │ ├── modules.doctree │ └── tutorial.doctree ├── genindex.html ├── index.html ├── intro.html ├── modules.html ├── objects.inv ├── py-modindex.html ├── search.html ├── searchindex.js └── tutorial.html ├── keras_wrapper ├── __init__.py ├── cnn_model.py ├── dataset.py ├── demo.ipynb ├── extra │ ├── __init__.py │ ├── callbacks.py │ ├── evaluation.py │ ├── localization_utilities.py │ ├── nms │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── cpu_nms.pyx │ │ ├── gpu_nms.hpp │ │ ├── gpu_nms.pyx │ │ ├── nms_kernel.cu │ │ ├── py_cpu_nms.py │ │ └── setup.py │ ├── read_write.py │ ├── regularize.py │ └── tokenizers.py ├── model_ensemble.py ├── models.py ├── plots │ └── visualize_filters.ipynb ├── saving.py ├── search.py ├── test.py └── utils.py ├── pytest.ini ├── requirements.txt ├── setup.cfg ├── setup.py ├── sphinx ├── Makefile ├── _ext │ └── edit_on_github.py ├── _templates │ └── sourcelink.html └── source │ ├── conf.py │ ├── index.rst │ ├── intro.md │ ├── modules.rst │ └── tutorial.md ├── tests ├── data │ └── test_data.txt ├── extra │ ├── __init__.py │ ├── test_wrapper_callbacks.py │ ├── test_wrapper_evaluation.py │ ├── test_wrapper_localization_utilities.py │ ├── test_wrapper_read_write.py │ └── test_wrapper_tokenizers.py └── general │ ├── __init__.py │ ├── test_model_wrapper.py │ ├── test_model_wrapper_ensemble.py │ ├── test_wrapper_dataset.py │ └── test_wrapper_utils.py └── utils ├── README.md ├── average_models.py └── minimize_dataset.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | keras_wrapper/Datasets 3 | keras_wrapper/Models 4 | .idea 5 | Multimodal_Keras_Wrapper.egg-info 6 | -------------------------------------------------------------------------------- /.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/.nojekyll -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: bionic 3 | language: python 4 | matrix: 5 | include: 6 | - python: 3.6 7 | env: TEST_MODE=PEP8 8 | - python: 3.6 9 | env: TEST_MODE=GENERAL 10 | - python: 3.6 11 | env: TEST_MODE=EXTRA 12 | 13 | # command to install dependencies 14 | install: 15 | 16 | - travis_wait 30 pip install . 17 | - pip install pytest pytest-cache pytest-cov pytest-forked pytest-pep8 pytest-xdist --progress-bar off 18 | - pip install -e git+https://github.com/lvapeab/coco-caption.git#egg=coco-caption --progress-bar off 19 | 20 | # command to run tests 21 | script: 22 | - if [[ "$TEST_MODE" == "PEP8" ]]; then 23 | PYTHONPATH=$PWD:$PYTHONPATH pytest --pep8 -m pep8 ; 24 | elif [[ "$TEST_MODE" == "GENERAL" ]]; then 25 | PYTHONPATH=$PWD:$PYTHONPATH pytest -s tests/general/ 2>&1 ; 26 | elif [[ "$TEST_MODE" == "EXTRA" ]]; then 27 | PYTHONPATH=$PWD:$PYTHONPATH pytest -s tests/extra/ 2>&1 ; 28 | fi 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Marc Bolaños, Álvaro Peris 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Multimodal Keras Wrapper 2 | Wrapper for Keras with support to easy multimodal data and models loading and handling. 3 | 4 | [![PyPI version](https://badge.fury.io/py/multimodal-keras-wrapper.svg)](https://badge.fury.io/py/multimodal-keras-wrapper) [![Build Status](https://travis-ci.org/lvapeab/multimodal_keras_wrapper.svg?branch=master)](https://travis-ci.org/lvapeab/multimodal_keras_wrapper) [![Requirements Status](https://requires.io/github/lvapeab/multimodal_keras_wrapper/requirements.svg?branch=master)](https://requires.io/github/lvapeab/multimodal_keras_wrapper/requirements/?branch=master) ![Compatibility](https://img.shields.io/badge/Python-3.6-blue.svg) [![license](https://img.shields.io/github/license/mashape/apistatus.svg)](https://github.com/lvapeab/multimodal_keras_wrapper/blob/master/LICENSE) 5 | 6 | ## Documentation 7 | 8 | You can access the library documentation page at [marcbs.github.io/multimodal_keras_wrapper/](http://marcbs.github.io/multimodal_keras_wrapper/) 9 | 10 | Some code examples are available in demo.ipynb and test.py. Additionally, in the section Projects you can see some practical examples of projects using this library. 11 | 12 | 13 | ## Installation 14 | 15 | Assuming that you have [pip](https://en.wikipedia.org/wiki/Pip_(package_manager)) installed, run: 16 | 17 | ``` 18 | pip install multimodal-keras-wrapper 19 | ``` 20 | 21 | Alternatively, if you want to install the library from the source code, you just have to follow these steps: 22 | 23 | 1) Clone this repository. 24 | 25 | 2) Include the repository path into your PYTHONPATH: 26 | ``` 27 | export PYTHONPATH=$PYTHONPATH:/path/to/multimodal_keras_wrapper 28 | ``` 29 | 30 | 3) Install the dependencies (it will install our [custom Keras fork](https://github.com/MarcBS/keras)): 31 | ``` 32 | pip install -r requirements.txt 33 | ``` 34 | 35 | 36 | ## Additional dependencies 37 | 38 | The following additional dependencies are required to fully exploit this library: 39 | 40 | - Keras - [custom fork](https://github.com/MarcBS/keras) or [original version](https://github.com/fchollet/keras) 41 | - The `cupy` package can be used for performing numpy-like operations in the GPU. If not available, the package will fall back to numpy. 42 | - [Coco-caption evaluation package](https://github.com/lvapeab/coco-caption/tree/master/pycocoevalcap/) (Only required to perform COCO evaluation). This package requires `java` (version 1.8.0 or newer). 43 | 44 | Only when using NMS for certain localization utilities: 45 | - [cython](https://pypi.python.org/pypi/Cython/0.25.2) >= 0.23.4 46 | 47 | 48 | ## Projects 49 | 50 | You can see more practical examples in projects which use this library: 51 | 52 | [TMA for Egocentric Video Captioning based on Temporally-linked Sequences](https://github.com/MarcBS/TMA). 53 | 54 | [NMT-Keras: Neural Machine Translation](https://github.com/lvapeab/nmt-keras). 55 | 56 | [VIBIKNet for Visual Question Answering](https://github.com/MarcBS/VIBIKNet) 57 | 58 | [ABiViRNet for Video Description](https://github.com/lvapeab/ABiViRNet) 59 | 60 | [Sentence-SelectioNN for Domain Adaptation in SMT](https://github.com/lvapeab/sentence-selectioNN) 61 | 62 | 63 | ## Keras 64 | 65 | For additional information on the Deep Learning library, visit the official web page www.keras.io or the GitHub repository https://github.com/keras-team/keras. 66 | 67 | You can also use our [custom Keras version](https://github.com/MarcBS/keras), which provides several additional layers for Multimodal Learning. 68 | -------------------------------------------------------------------------------- /data/README.md: -------------------------------------------------------------------------------- 1 | # Sample data for Multimodal Keras Wrapper 2 | 3 | Execute `./get_sample_data.sh` in order to download the sample data. -------------------------------------------------------------------------------- /data/get_sample_data.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cd data 2>/dev/null; 4 | echo "Downloading data..."; 5 | wget https://github.com/lvapeab/lvapeab.github.io/raw/master/multimodal_keras_wrapper_data/multimodal_keras_wrapper_data.tar.gz; 6 | echo "Uncompressing data..."; 7 | tar xzf multimodal_keras_wrapper_data.tar.gz; 8 | rm multimodal_keras_wrapper_data.tar.gz; 9 | echo "Done."; -------------------------------------------------------------------------------- /docs/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: b4eda7f74e0ca91402e1402a058ea098 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/.nojekyll -------------------------------------------------------------------------------- /docs/_sources/code.txt: -------------------------------------------------------------------------------- 1 | Available Modules 2 | ************************** 3 | 4 | Documentation for the keras_wrapper module. 5 | 6 | See code examples in demo.ipynb and test.py 7 | 8 | 9 | dataset.py 10 | ========================= 11 | 12 | .. automodule:: keras_wrapper.dataset 13 | :members: 14 | 15 | 16 | cnn_model.py 17 | ========================= 18 | 19 | .. automodule:: keras_wrapper.cnn_model 20 | :members: 21 | 22 | 23 | staged_network.py 24 | ========================= 25 | 26 | .. automodule:: keras_wrapper.staged_network 27 | :members: 28 | 29 | 30 | stage.py 31 | ========================= 32 | 33 | .. automodule:: keras_wrapper.stage 34 | :members: 35 | 36 | 37 | ecoc_classifier.py 38 | ========================= 39 | 40 | .. automodule:: keras_wrapper.ecoc_classifier 41 | :members: 42 | 43 | 44 | utils.py 45 | ========================= 46 | 47 | .. automodule:: keras_wrapper.utils 48 | :members: 49 | 50 | 51 | thread_loader.py 52 | ========================= 53 | 54 | .. automodule:: keras_wrapper.thread_loader 55 | :members: 56 | -------------------------------------------------------------------------------- /docs/_sources/index.txt: -------------------------------------------------------------------------------- 1 | .. multimodal_keras_wrapper documentation master file, created by 2 | sphinx-quickstart on Tue Apr 26 10:43:19 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Multimodal Keras Wrapper's documentation! 7 | ====================================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | intro.md 15 | tutorial.md 16 | modules.rst 17 | 18 | 19 | Indices and tables 20 | ==================== 21 | 22 | * :ref:`genindex` 23 | * :ref:`modindex` 24 | * :ref:`search` 25 | 26 | -------------------------------------------------------------------------------- /docs/_sources/intro.txt: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | ## Multimodal Keras Wrapper 4 | Wrapper for Keras with support to easy multimodal data and models loading and handling. 5 | 6 | You can download and contribute to the code downloading [this repository](https://github.com/MarcBS/multimodal_keras_wrapper). 7 | 8 | 9 | ## Documentation 10 | 11 | You can access the library documentation page at [marcbs.github.io/multimodal_keras_wrapper/](http://marcbs.github.io/multimodal_keras_wrapper/) 12 | 13 | Some code examples are available in demo.ipynb and test.py. Additionally, in the section Projects you can see some practical examples of projects using this library. 14 | 15 | 16 | ## Dependencies 17 | 18 | The following dependencies are required for using this library: 19 | 20 | - [Anaconda](https://www.continuum.io/downloads) 21 | - Keras - [custom fork](https://github.com/MarcBS/keras) or [original version](https://github.com/fchollet/keras) 22 | - [cloud](https://pypi.python.org/pypi/cloud/2.8.5) >= 2.8.5 23 | - [scipy](https://pypi.python.org/pypi/scipy/0.7.0) 24 | 25 | Only when using NMS for certain localization utilities: 26 | - [cython](https://pypi.python.org/pypi/Cython/0.25.2) >= 0.23.4 27 | 28 | ## Installation 29 | 30 | In order to install the library you just have to follow these steps: 31 | 32 | 1) Clone this repository: 33 | ``` 34 | git clone https://github.com/MarcBS/multimodal_keras_wrapper.git 35 | ``` 36 | 37 | 2) Include the repository path into your PYTHONPATH: 38 | ``` 39 | export PYTHONPATH=$PYTHONPATH:/path/to/multimodal_keras_wrapper 40 | ``` 41 | 42 | 3) If you wish to install the dependencies (it will install our [custom Keras fork](https://github.com/MarcBS/keras)): 43 | ``` 44 | pip install -r requirements.txt 45 | ``` 46 | 47 | ## Projects 48 | 49 | You can see more practical examples in projects which use this library: 50 | 51 | [VIBIKNet for Visual Question Answering](https://github.com/MarcBS/VIBIKNet) 52 | 53 | [ABiViRNet for Video Description](https://github.com/lvapeab/ABiViRNet) 54 | 55 | [Sentence-SelectioNN for Domain Adaptation in SMT](https://github.com/lvapeab/sentence-selectioNN) 56 | 57 | 58 | ## Keras 59 | 60 | For additional information on the Deep Learning library, visit the official web page www.keras.io or the GitHub repository https://github.com/fchollet/keras. 61 | 62 | You can also use our [custom Keras version](https://github.com/MarcBS/keras), which provides several additional layers for Multimodal Learning. 63 | -------------------------------------------------------------------------------- /docs/_sources/modules.txt: -------------------------------------------------------------------------------- 1 | Available Modules 2 | ************************** 3 | 4 | List of all files, classes and methods available in the library. 5 | 6 | 7 | dataset.py 8 | ============================= 9 | 10 | .. automodule:: keras_wrapper.dataset 11 | :members: 12 | 13 | 14 | cnn_model.py 15 | ============================= 16 | 17 | .. automodule:: keras_wrapper.cnn_model 18 | :members: 19 | 20 | 21 | callbacks_keras_wrapper.py 22 | ============================= 23 | 24 | .. automodule:: keras_wrapper.callbacks_keras_wrapper 25 | :members: 26 | 27 | 28 | beam_search_ensemble.py 29 | ============================= 30 | 31 | .. automodule:: keras_wrapper.beam_search_ensemble 32 | :members: 33 | 34 | 35 | utils.py 36 | ============================= 37 | 38 | .. automodule:: keras_wrapper.utils 39 | :members: 40 | -------------------------------------------------------------------------------- /docs/_sources/tutorial.txt: -------------------------------------------------------------------------------- 1 | # Tutorial 2 | 3 | ## Basic components 4 | 5 | There are two basic components that have to be built in order to use the Multimodal Keras Wrapper, 6 | which are a **[Dataset](https://github.com/MarcBS/multimodal_keras_wrapper/blob/6d0b11248fd353cc189f674dc30beaf9689da182/keras_wrapper/dataset.py#L331)** and a **[Model_Wrapper](https://github.com/MarcBS/multimodal_keras_wrapper/blob/6d0b11248fd353cc189f674dc30beaf9689da182/keras_wrapper/cnn_model.py#L154)**. 7 | 8 | The class **Dataset** is in charge of: 9 | - Storing, preprocessing and loading any kind of data for training a model (inputs). 10 | - Storing, preprocessing and loading the ground truth associated to our data (outputs). 11 | - Loading the data in batches for training or prediction. 12 | 13 | The Datasets can manage different [types of input/output data](https://github.com/MarcBS/multimodal_keras_wrapper/blob/6d0b11248fd353cc189f674dc30beaf9689da182/keras_wrapper/dataset.py#L389-L390), which can be summarized as: 14 | - input types: 'raw-image', 'video', 'image-features', 'video-features', 'text' 15 | - output types: 'categorical', 'binary', 'real', 'text', '3DLabel' 16 | 17 | Currently, the class Dataset can be used for multiple kinds of multimodal problems, 18 | e.g. image/video classification, detection, multilabel prediction, regression, image/video captioning, 19 | visual question answering, multimodal translation, neural machine translation, etc. 20 | 21 | The class **Model_Wrapper** is in charge of: 22 | - Storing an instance of a Keras' model. 23 | - Receiving the inputs/outputs of the class Dataset and using the model for training or prediction. 24 | - Providing two different methods for prediction. Either [predictNet()](http://marcbs.github.io/multimodal_keras_wrapper/modules.html#keras_wrapper.cnn_model.Model_Wrapper.predictNet), which uses a conventional Keras model for prediction, or [predictBeamSearchNet()](http://marcbs.github.io/multimodal_keras_wrapper/modules.html#keras_wrapper.cnn_model.Model_Wrapper.predictBeamSearchNet), which applies a BeamSearch for sequence generative models and additionally allows to create separate models **model_init** and **model_next** for applying an optimized prediction (see [this](https://github.com/MarcBS/multimodal_keras_wrapper/blob/b348ce9d52404434b1e98316c7f09b5d5fd3df00/keras_wrapper/cnn_model.py#L1319-L1328) and [this](https://github.com/MarcBS/multimodal_keras_wrapper/blob/f269207a65bfc77d5c2c89ea708bad8bff7f72ab/keras_wrapper/cnn_model.py#L1057) for further information). 25 | 26 | In this tutorial we will learn how to create each of the two basic components and how use a 27 | model for training and prediction. 28 | 29 | 30 | ## Creating a Dataset 31 | 32 | First, let's create a simple Dataset object with some sample data. 33 | The data used for this example can be found in `/repository_root/data/sample_data`. 34 | 35 | 36 | Dataset parameters definition. 37 | 38 | ``` 39 | from keras_wrapper.dataset import Dataset 40 | 41 | dataset_name = 'test_dataset' 42 | image_id = 'input_image' 43 | label_id = 'output_label' 44 | images_size = [256, 256, 3] 45 | images_crop_size = [224, 224, 3] 46 | train_mean = [103.939, 116.779, 123.68] 47 | base_path = '/data/sample_data' 48 | ``` 49 | 50 | Empty dataset instance creation 51 | 52 | ``` 53 | ds = Dataset(dataset_name, base_path+'/images') 54 | ``` 55 | 56 | 57 | Insert dataset/model inputs 58 | 59 | ``` 60 | # train split 61 | ds.setInput(base_path + '/train.txt', 'train', 62 | type='raw-image', id=image_id, 63 | img_size=images_size, img_size_crop=images_crop_size) 64 | # val split 65 | ds.setInput(base_path + '/val.txt', 'val', 66 | type='raw-image', id=image_id, 67 | img_size=images_size, img_size_crop=images_crop_size) 68 | # test split 69 | ds.setInput(base_path + '/test.txt', 'test', 70 | type='raw-image', id=image_id, 71 | img_size=images_size, img_size_crop=images_crop_size) 72 | ``` 73 | 74 | Insert pre-calculated images train mean 75 | 76 | ``` 77 | ds.setTrainMean(train_mean, image_id) 78 | ``` 79 | 80 | Insert dataset/model outputs 81 | 82 | ``` 83 | # train split 84 | ds.setOutput(base_path+'/train_labels.txt', 'train', 85 | type='categorical', id=label_id) 86 | # val split 87 | ds.setOutput(base_path+'/val_labels.txt', 'val', 88 | type='categorical', id=label_id) 89 | # test split 90 | ds.setOutput(base_path+'/test_labels.txt', 'test', 91 | type='categorical', id=label_id) 92 | ``` 93 | 94 | ## Saving or loading a Dataset 95 | 96 | ``` 97 | from keras_wrapper.dataset import saveDataset, loadDataset 98 | 99 | save_path = '/Datasets' 100 | 101 | # Save dataset 102 | saveDataset(ds, save_path) 103 | 104 | # Load dataset 105 | ds = loadDataset(save_path+'/Dataset_'+dataset_name+'.pkl') 106 | ``` 107 | 108 | In addition, we can print some basic information of the data stored in the dataset: 109 | 110 | ``` 111 | print ds 112 | ``` 113 | 114 | ## Creating a Model_Wrapper 115 | 116 | Model_Wrapper parameters definition. 117 | 118 | ``` 119 | from keras_wrapper.cnn_model import Model_Wrapper 120 | 121 | model_name = 'our_model' 122 | type = 'VGG_19_ImageNet' 123 | save_path = '/Models/' 124 | ``` 125 | 126 | Create a basic CNN model 127 | 128 | ``` 129 | net = Model_Wrapper(nOutput=2, type=type, model_name=model_name, input_shape=images_crop_size) 130 | net.setOptimizer(lr=0.001, metrics=['accuracy']) # compile it 131 | ``` 132 | 133 | By default, the model type built is the one defined in [Model_Wrapper.basic_model()](https://github.com/MarcBS/multimodal_keras_wrapper/blob/6d0b11248fd353cc189f674dc30beaf9689da182/keras_wrapper/cnn_model.py#L2003). 134 | Although, any kind of custom model can be defined just by: 135 | - Defining a new method for the class Model_Wrapper which builds the model and stores it in self.model. 136 | - Referencing it with type='method_name' when creating a new Model_Wrapper instance. 137 | 138 | 139 | ## Saving or loading a Model_Wrapper 140 | 141 | ``` 142 | from keras_wrapper.cnn_model import saveModel, loadModel 143 | 144 | save_epoch = 0 145 | 146 | # Save model 147 | saveModel(net, save_epoch) 148 | 149 | # Load model 150 | net = loadModel(save_path+'/'+model_name, save_epoch) 151 | ``` 152 | 153 | 154 | ## Connecting a Dataset to a Model_Wrapper 155 | 156 | In order to provide a correct communication between the Dataset and the Model_Wrapper objects, we have to provide the links between the Dataset ids positions and their corresponding layer identifiers in the Keras' Model as a dictionary. 157 | 158 | In this case we only have one input and one output, for this reason both ids are mapped to position 0 of our Dataset. 159 | 160 | ``` 161 | net.setInputsMapping({net.ids_inputs[0]: 0}) 162 | net.setOutputsMapping({net.ids_outputs[0]: 0}) 163 | ``` 164 | 165 | 166 | ## Training 167 | 168 | We can specify several options for training our model, which are [summarized here](http://marcbs.github.io/multimodal_keras_wrapper/modules.html#keras_wrapper.cnn_model.Model_Wrapper.trainNet). If any of them is overriden then the [default values](https://github.com/MarcBS/multimodal_keras_wrapper/blob/011393580b2253a01c168d638b8c0bd06fe6d522/keras_wrapper/cnn_model.py#L454-L458) will be used. 169 | 170 | ``` 171 | train_overriden_parameters = {'n_epochs': 2, 'batch_size': 10} 172 | 173 | net.trainNet(ds, train_overriden_parameters) 174 | ``` 175 | 176 | ## Prediction 177 | 178 | The same applies to the prediction method. We can find the [available parameters here](http://marcbs.github.io/multimodal_keras_wrapper/modules.html#keras_wrapper.cnn_model.Model_Wrapper.predictNet) and the [default values here](https://github.com/MarcBS/multimodal_keras_wrapper/blob/011393580b2253a01c168d638b8c0bd06fe6d522/keras_wrapper/cnn_model.py#L1468-L1470). 179 | 180 | ``` 181 | predict_overriden_parameters = {'batch_size': 10, 'predict_on_sets': ['test']} 182 | 183 | net.predictNet(ds, predict_overriden_parameters) 184 | ``` -------------------------------------------------------------------------------- /docs/_static/ajax-loader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/ajax-loader.gif -------------------------------------------------------------------------------- /docs/_static/basic.css: -------------------------------------------------------------------------------- 1 | /* 2 | * basic.css 3 | * ~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- basic theme. 6 | * 7 | * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /* -- main layout ----------------------------------------------------------- */ 13 | 14 | div.clearer { 15 | clear: both; 16 | } 17 | 18 | /* -- relbar ---------------------------------------------------------------- */ 19 | 20 | div.related { 21 | width: 100%; 22 | font-size: 90%; 23 | } 24 | 25 | div.related h3 { 26 | display: none; 27 | } 28 | 29 | div.related ul { 30 | margin: 0; 31 | padding: 0 0 0 10px; 32 | list-style: none; 33 | } 34 | 35 | div.related li { 36 | display: inline; 37 | } 38 | 39 | div.related li.right { 40 | float: right; 41 | margin-right: 5px; 42 | } 43 | 44 | /* -- sidebar --------------------------------------------------------------- */ 45 | 46 | div.sphinxsidebarwrapper { 47 | padding: 10px 5px 0 10px; 48 | } 49 | 50 | div.sphinxsidebar { 51 | float: left; 52 | width: 230px; 53 | margin-left: -100%; 54 | font-size: 90%; 55 | word-wrap: break-word; 56 | overflow-wrap : break-word; 57 | } 58 | 59 | div.sphinxsidebar ul { 60 | list-style: none; 61 | } 62 | 63 | div.sphinxsidebar ul ul, 64 | div.sphinxsidebar ul.want-points { 65 | margin-left: 20px; 66 | list-style: square; 67 | } 68 | 69 | div.sphinxsidebar ul ul { 70 | margin-top: 0; 71 | margin-bottom: 0; 72 | } 73 | 74 | div.sphinxsidebar form { 75 | margin-top: 10px; 76 | } 77 | 78 | div.sphinxsidebar input { 79 | border: 1px solid #98dbcc; 80 | font-family: sans-serif; 81 | font-size: 1em; 82 | } 83 | 84 | div.sphinxsidebar #searchbox input[type="text"] { 85 | width: 170px; 86 | } 87 | 88 | div.sphinxsidebar #searchbox input[type="submit"] { 89 | width: 30px; 90 | } 91 | 92 | img { 93 | border: 0; 94 | max-width: 100%; 95 | } 96 | 97 | /* -- search page ----------------------------------------------------------- */ 98 | 99 | ul.search { 100 | margin: 10px 0 0 20px; 101 | padding: 0; 102 | } 103 | 104 | ul.search li { 105 | padding: 5px 0 5px 20px; 106 | background-image: url(file.png); 107 | background-repeat: no-repeat; 108 | background-position: 0 7px; 109 | } 110 | 111 | ul.search li a { 112 | font-weight: bold; 113 | } 114 | 115 | ul.search li div.context { 116 | color: #888; 117 | margin: 2px 0 0 30px; 118 | text-align: left; 119 | } 120 | 121 | ul.keywordmatches li.goodmatch a { 122 | font-weight: bold; 123 | } 124 | 125 | /* -- index page ------------------------------------------------------------ */ 126 | 127 | table.contentstable { 128 | width: 90%; 129 | } 130 | 131 | table.contentstable p.biglink { 132 | line-height: 150%; 133 | } 134 | 135 | a.biglink { 136 | font-size: 1.3em; 137 | } 138 | 139 | span.linkdescr { 140 | font-style: italic; 141 | padding-top: 5px; 142 | font-size: 90%; 143 | } 144 | 145 | /* -- general index --------------------------------------------------------- */ 146 | 147 | table.indextable { 148 | width: 100%; 149 | } 150 | 151 | table.indextable td { 152 | text-align: left; 153 | vertical-align: top; 154 | } 155 | 156 | table.indextable dl, table.indextable dd { 157 | margin-top: 0; 158 | margin-bottom: 0; 159 | } 160 | 161 | table.indextable tr.pcap { 162 | height: 10px; 163 | } 164 | 165 | table.indextable tr.cap { 166 | margin-top: 10px; 167 | background-color: #f2f2f2; 168 | } 169 | 170 | img.toggler { 171 | margin-right: 3px; 172 | margin-top: 3px; 173 | cursor: pointer; 174 | } 175 | 176 | div.modindex-jumpbox { 177 | border-top: 1px solid #ddd; 178 | border-bottom: 1px solid #ddd; 179 | margin: 1em 0 1em 0; 180 | padding: 0.4em; 181 | } 182 | 183 | div.genindex-jumpbox { 184 | border-top: 1px solid #ddd; 185 | border-bottom: 1px solid #ddd; 186 | margin: 1em 0 1em 0; 187 | padding: 0.4em; 188 | } 189 | 190 | /* -- general body styles --------------------------------------------------- */ 191 | 192 | div.body p, div.body dd, div.body li, div.body blockquote { 193 | -moz-hyphens: auto; 194 | -ms-hyphens: auto; 195 | -webkit-hyphens: auto; 196 | hyphens: auto; 197 | } 198 | 199 | a.headerlink { 200 | visibility: hidden; 201 | } 202 | 203 | h1:hover > a.headerlink, 204 | h2:hover > a.headerlink, 205 | h3:hover > a.headerlink, 206 | h4:hover > a.headerlink, 207 | h5:hover > a.headerlink, 208 | h6:hover > a.headerlink, 209 | dt:hover > a.headerlink, 210 | caption:hover > a.headerlink, 211 | p.caption:hover > a.headerlink, 212 | div.code-block-caption:hover > a.headerlink { 213 | visibility: visible; 214 | } 215 | 216 | div.body p.caption { 217 | text-align: inherit; 218 | } 219 | 220 | div.body td { 221 | text-align: left; 222 | } 223 | 224 | .field-list ul { 225 | padding-left: 1em; 226 | } 227 | 228 | .first { 229 | margin-top: 0 !important; 230 | } 231 | 232 | p.rubric { 233 | margin-top: 30px; 234 | font-weight: bold; 235 | } 236 | 237 | img.align-left, .figure.align-left, object.align-left { 238 | clear: left; 239 | float: left; 240 | margin-right: 1em; 241 | } 242 | 243 | img.align-right, .figure.align-right, object.align-right { 244 | clear: right; 245 | float: right; 246 | margin-left: 1em; 247 | } 248 | 249 | img.align-center, .figure.align-center, object.align-center { 250 | display: block; 251 | margin-left: auto; 252 | margin-right: auto; 253 | } 254 | 255 | .align-left { 256 | text-align: left; 257 | } 258 | 259 | .align-center { 260 | text-align: center; 261 | } 262 | 263 | .align-right { 264 | text-align: right; 265 | } 266 | 267 | /* -- sidebars -------------------------------------------------------------- */ 268 | 269 | div.sidebar { 270 | margin: 0 0 0.5em 1em; 271 | border: 1px solid #ddb; 272 | padding: 7px 7px 0 7px; 273 | background-color: #ffe; 274 | width: 40%; 275 | float: right; 276 | } 277 | 278 | p.sidebar-title { 279 | font-weight: bold; 280 | } 281 | 282 | /* -- topics ---------------------------------------------------------------- */ 283 | 284 | div.topic { 285 | border: 1px solid #ccc; 286 | padding: 7px 7px 0 7px; 287 | margin: 10px 0 10px 0; 288 | } 289 | 290 | p.topic-title { 291 | font-size: 1.1em; 292 | font-weight: bold; 293 | margin-top: 10px; 294 | } 295 | 296 | /* -- admonitions ----------------------------------------------------------- */ 297 | 298 | div.admonition { 299 | margin-top: 10px; 300 | margin-bottom: 10px; 301 | padding: 7px; 302 | } 303 | 304 | div.admonition dt { 305 | font-weight: bold; 306 | } 307 | 308 | div.admonition dl { 309 | margin-bottom: 0; 310 | } 311 | 312 | p.admonition-title { 313 | margin: 0px 10px 5px 0px; 314 | font-weight: bold; 315 | } 316 | 317 | div.body p.centered { 318 | text-align: center; 319 | margin-top: 25px; 320 | } 321 | 322 | /* -- tables ---------------------------------------------------------------- */ 323 | 324 | table.docutils { 325 | border: 0; 326 | border-collapse: collapse; 327 | } 328 | 329 | table caption span.caption-number { 330 | font-style: italic; 331 | } 332 | 333 | table caption span.caption-text { 334 | } 335 | 336 | table.docutils td, table.docutils th { 337 | padding: 1px 8px 1px 5px; 338 | border-top: 0; 339 | border-left: 0; 340 | border-right: 0; 341 | border-bottom: 1px solid #aaa; 342 | } 343 | 344 | table.field-list td, table.field-list th { 345 | border: 0 !important; 346 | } 347 | 348 | table.footnote td, table.footnote th { 349 | border: 0 !important; 350 | } 351 | 352 | th { 353 | text-align: left; 354 | padding-right: 5px; 355 | } 356 | 357 | table.citation { 358 | border-left: solid 1px gray; 359 | margin-left: 1px; 360 | } 361 | 362 | table.citation td { 363 | border-bottom: none; 364 | } 365 | 366 | /* -- figures --------------------------------------------------------------- */ 367 | 368 | div.figure { 369 | margin: 0.5em; 370 | padding: 0.5em; 371 | } 372 | 373 | div.figure p.caption { 374 | padding: 0.3em; 375 | } 376 | 377 | div.figure p.caption span.caption-number { 378 | font-style: italic; 379 | } 380 | 381 | div.figure p.caption span.caption-text { 382 | } 383 | 384 | 385 | /* -- other body styles ----------------------------------------------------- */ 386 | 387 | ol.arabic { 388 | list-style: decimal; 389 | } 390 | 391 | ol.loweralpha { 392 | list-style: lower-alpha; 393 | } 394 | 395 | ol.upperalpha { 396 | list-style: upper-alpha; 397 | } 398 | 399 | ol.lowerroman { 400 | list-style: lower-roman; 401 | } 402 | 403 | ol.upperroman { 404 | list-style: upper-roman; 405 | } 406 | 407 | dl { 408 | margin-bottom: 15px; 409 | } 410 | 411 | dd p { 412 | margin-top: 0px; 413 | } 414 | 415 | dd ul, dd table { 416 | margin-bottom: 10px; 417 | } 418 | 419 | dd { 420 | margin-top: 3px; 421 | margin-bottom: 10px; 422 | margin-left: 30px; 423 | } 424 | 425 | dt:target, .highlighted { 426 | background-color: #fbe54e; 427 | } 428 | 429 | dl.glossary dt { 430 | font-weight: bold; 431 | font-size: 1.1em; 432 | } 433 | 434 | .field-list ul { 435 | margin: 0; 436 | padding-left: 1em; 437 | } 438 | 439 | .field-list p { 440 | margin: 0; 441 | } 442 | 443 | .optional { 444 | font-size: 1.3em; 445 | } 446 | 447 | .sig-paren { 448 | font-size: larger; 449 | } 450 | 451 | .versionmodified { 452 | font-style: italic; 453 | } 454 | 455 | .system-message { 456 | background-color: #fda; 457 | padding: 5px; 458 | border: 3px solid red; 459 | } 460 | 461 | .footnote:target { 462 | background-color: #ffa; 463 | } 464 | 465 | .line-block { 466 | display: block; 467 | margin-top: 1em; 468 | margin-bottom: 1em; 469 | } 470 | 471 | .line-block .line-block { 472 | margin-top: 0; 473 | margin-bottom: 0; 474 | margin-left: 1.5em; 475 | } 476 | 477 | .guilabel, .menuselection { 478 | font-family: sans-serif; 479 | } 480 | 481 | .accelerator { 482 | text-decoration: underline; 483 | } 484 | 485 | .classifier { 486 | font-style: oblique; 487 | } 488 | 489 | abbr, acronym { 490 | border-bottom: dotted 1px; 491 | cursor: help; 492 | } 493 | 494 | /* -- code displays --------------------------------------------------------- */ 495 | 496 | pre { 497 | overflow: auto; 498 | overflow-y: hidden; /* fixes display issues on Chrome browsers */ 499 | } 500 | 501 | td.linenos pre { 502 | padding: 5px 0px; 503 | border: 0; 504 | background-color: transparent; 505 | color: #aaa; 506 | } 507 | 508 | table.highlighttable { 509 | margin-left: 0.5em; 510 | } 511 | 512 | table.highlighttable td { 513 | padding: 0 0.5em 0 0.5em; 514 | } 515 | 516 | div.code-block-caption { 517 | padding: 2px 5px; 518 | font-size: small; 519 | } 520 | 521 | div.code-block-caption code { 522 | background-color: transparent; 523 | } 524 | 525 | div.code-block-caption + div > div.highlight > pre { 526 | margin-top: 0; 527 | } 528 | 529 | div.code-block-caption span.caption-number { 530 | padding: 0.1em 0.3em; 531 | font-style: italic; 532 | } 533 | 534 | div.code-block-caption span.caption-text { 535 | } 536 | 537 | div.literal-block-wrapper { 538 | padding: 1em 1em 0; 539 | } 540 | 541 | div.literal-block-wrapper div.highlight { 542 | margin: 0; 543 | } 544 | 545 | code.descname { 546 | background-color: transparent; 547 | font-weight: bold; 548 | font-size: 1.2em; 549 | } 550 | 551 | code.descclassname { 552 | background-color: transparent; 553 | } 554 | 555 | code.xref, a code { 556 | background-color: transparent; 557 | font-weight: bold; 558 | } 559 | 560 | h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { 561 | background-color: transparent; 562 | } 563 | 564 | .viewcode-link { 565 | float: right; 566 | } 567 | 568 | .viewcode-back { 569 | float: right; 570 | font-family: sans-serif; 571 | } 572 | 573 | div.viewcode-block:target { 574 | margin: -1px -10px; 575 | padding: 0 10px; 576 | } 577 | 578 | /* -- math display ---------------------------------------------------------- */ 579 | 580 | img.math { 581 | vertical-align: middle; 582 | } 583 | 584 | div.body div.math p { 585 | text-align: center; 586 | } 587 | 588 | span.eqno { 589 | float: right; 590 | } 591 | 592 | /* -- printout stylesheet --------------------------------------------------- */ 593 | 594 | @media print { 595 | div.document, 596 | div.documentwrapper, 597 | div.bodywrapper { 598 | margin: 0 !important; 599 | width: 100%; 600 | } 601 | 602 | div.sphinxsidebar, 603 | div.related, 604 | div.footer, 605 | #top-link { 606 | display: none; 607 | } 608 | } -------------------------------------------------------------------------------- /docs/_static/comment-bright.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/comment-bright.png -------------------------------------------------------------------------------- /docs/_static/comment-close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/comment-close.png -------------------------------------------------------------------------------- /docs/_static/comment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/comment.png -------------------------------------------------------------------------------- /docs/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../font/fontawesome_webfont.eot");src:url("../font/fontawesome_webfont.eot?#iefix") format("embedded-opentype"),url("../font/fontawesome_webfont.woff") format("woff"),url("../font/fontawesome_webfont.ttf") format("truetype"),url("../font/fontawesome_webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:0.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;border-top:solid 10px #343131;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}img{width:100%;height:auto}} 2 | /*# sourceMappingURL=badge_only.css.map */ 3 | -------------------------------------------------------------------------------- /docs/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Sphinx JavaScript utilities for all documentation. 6 | * 7 | * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | /** 18 | * make the code below compatible with browsers without 19 | * an installed firebug like debugger 20 | if (!window.console || !console.firebug) { 21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir", 22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", 23 | "profile", "profileEnd"]; 24 | window.console = {}; 25 | for (var i = 0; i < names.length; ++i) 26 | window.console[names[i]] = function() {}; 27 | } 28 | */ 29 | 30 | /** 31 | * small helper function to urldecode strings 32 | */ 33 | jQuery.urldecode = function(x) { 34 | return decodeURIComponent(x).replace(/\+/g, ' '); 35 | }; 36 | 37 | /** 38 | * small helper function to urlencode strings 39 | */ 40 | jQuery.urlencode = encodeURIComponent; 41 | 42 | /** 43 | * This function returns the parsed url parameters of the 44 | * current request. Multiple values per key are supported, 45 | * it will always return arrays of strings for the value parts. 46 | */ 47 | jQuery.getQueryParameters = function(s) { 48 | if (typeof s == 'undefined') 49 | s = document.location.search; 50 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 51 | var result = {}; 52 | for (var i = 0; i < parts.length; i++) { 53 | var tmp = parts[i].split('=', 2); 54 | var key = jQuery.urldecode(tmp[0]); 55 | var value = jQuery.urldecode(tmp[1]); 56 | if (key in result) 57 | result[key].push(value); 58 | else 59 | result[key] = [value]; 60 | } 61 | return result; 62 | }; 63 | 64 | /** 65 | * highlight a given string on a jquery object by wrapping it in 66 | * span elements with the given class name. 67 | */ 68 | jQuery.fn.highlightText = function(text, className) { 69 | function highlight(node) { 70 | if (node.nodeType == 3) { 71 | var val = node.nodeValue; 72 | var pos = val.toLowerCase().indexOf(text); 73 | if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { 74 | var span = document.createElement("span"); 75 | span.className = className; 76 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 77 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 78 | document.createTextNode(val.substr(pos + text.length)), 79 | node.nextSibling)); 80 | node.nodeValue = val.substr(0, pos); 81 | } 82 | } 83 | else if (!jQuery(node).is("button, select, textarea")) { 84 | jQuery.each(node.childNodes, function() { 85 | highlight(this); 86 | }); 87 | } 88 | } 89 | return this.each(function() { 90 | highlight(this); 91 | }); 92 | }; 93 | 94 | /* 95 | * backward compatibility for jQuery.browser 96 | * This will be supported until firefox bug is fixed. 97 | */ 98 | if (!jQuery.browser) { 99 | jQuery.uaMatch = function(ua) { 100 | ua = ua.toLowerCase(); 101 | 102 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || 103 | /(webkit)[ \/]([\w.]+)/.exec(ua) || 104 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || 105 | /(msie) ([\w.]+)/.exec(ua) || 106 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || 107 | []; 108 | 109 | return { 110 | browser: match[ 1 ] || "", 111 | version: match[ 2 ] || "0" 112 | }; 113 | }; 114 | jQuery.browser = {}; 115 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; 116 | } 117 | 118 | /** 119 | * Small JavaScript module for the documentation. 120 | */ 121 | var Documentation = { 122 | 123 | init : function() { 124 | this.fixFirefoxAnchorBug(); 125 | this.highlightSearchWords(); 126 | this.initIndexTable(); 127 | 128 | }, 129 | 130 | /** 131 | * i18n support 132 | */ 133 | TRANSLATIONS : {}, 134 | PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, 135 | LOCALE : 'unknown', 136 | 137 | // gettext and ngettext don't access this so that the functions 138 | // can safely bound to a different name (_ = Documentation.gettext) 139 | gettext : function(string) { 140 | var translated = Documentation.TRANSLATIONS[string]; 141 | if (typeof translated == 'undefined') 142 | return string; 143 | return (typeof translated == 'string') ? translated : translated[0]; 144 | }, 145 | 146 | ngettext : function(singular, plural, n) { 147 | var translated = Documentation.TRANSLATIONS[singular]; 148 | if (typeof translated == 'undefined') 149 | return (n == 1) ? singular : plural; 150 | return translated[Documentation.PLURALEXPR(n)]; 151 | }, 152 | 153 | addTranslations : function(catalog) { 154 | for (var key in catalog.messages) 155 | this.TRANSLATIONS[key] = catalog.messages[key]; 156 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); 157 | this.LOCALE = catalog.locale; 158 | }, 159 | 160 | /** 161 | * add context elements like header anchor links 162 | */ 163 | addContextElements : function() { 164 | $('div[id] > :header:first').each(function() { 165 | $('\u00B6'). 166 | attr('href', '#' + this.id). 167 | attr('title', _('Permalink to this headline')). 168 | appendTo(this); 169 | }); 170 | $('dt[id]').each(function() { 171 | $('\u00B6'). 172 | attr('href', '#' + this.id). 173 | attr('title', _('Permalink to this definition')). 174 | appendTo(this); 175 | }); 176 | }, 177 | 178 | /** 179 | * workaround a firefox stupidity 180 | * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 181 | */ 182 | fixFirefoxAnchorBug : function() { 183 | if (document.location.hash) 184 | window.setTimeout(function() { 185 | document.location.href += ''; 186 | }, 10); 187 | }, 188 | 189 | /** 190 | * highlight the search words provided in the url in the text 191 | */ 192 | highlightSearchWords : function() { 193 | var params = $.getQueryParameters(); 194 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; 195 | if (terms.length) { 196 | var body = $('div.body'); 197 | if (!body.length) { 198 | body = $('body'); 199 | } 200 | window.setTimeout(function() { 201 | $.each(terms, function() { 202 | body.highlightText(this.toLowerCase(), 'highlighted'); 203 | }); 204 | }, 10); 205 | $('') 207 | .appendTo($('#searchbox')); 208 | } 209 | }, 210 | 211 | /** 212 | * init the domain index toggle buttons 213 | */ 214 | initIndexTable : function() { 215 | var togglers = $('img.toggler').click(function() { 216 | var src = $(this).attr('src'); 217 | var idnum = $(this).attr('id').substr(7); 218 | $('tr.cg-' + idnum).toggle(); 219 | if (src.substr(-9) == 'minus.png') 220 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); 221 | else 222 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); 223 | }).css('display', ''); 224 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { 225 | togglers.click(); 226 | } 227 | }, 228 | 229 | /** 230 | * helper function to hide the search marks again 231 | */ 232 | hideSearchWords : function() { 233 | $('#searchbox .highlight-link').fadeOut(300); 234 | $('span.highlighted').removeClass('highlighted'); 235 | }, 236 | 237 | /** 238 | * make the url absolute 239 | */ 240 | makeURL : function(relativeURL) { 241 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; 242 | }, 243 | 244 | /** 245 | * get the current relative url 246 | */ 247 | getCurrentURL : function() { 248 | var path = document.location.pathname; 249 | var parts = path.split(/\//); 250 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { 251 | if (this == '..') 252 | parts.pop(); 253 | }); 254 | var url = parts.join('/'); 255 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1); 256 | }, 257 | 258 | initOnKeyListeners: function() { 259 | $(document).keyup(function(event) { 260 | var activeElementType = document.activeElement.tagName; 261 | // don't navigate when in search box or textarea 262 | if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { 263 | switch (event.keyCode) { 264 | case 37: // left 265 | var prevHref = $('link[rel="prev"]').prop('href'); 266 | if (prevHref) { 267 | window.location.href = prevHref; 268 | return false; 269 | } 270 | case 39: // right 271 | var nextHref = $('link[rel="next"]').prop('href'); 272 | if (nextHref) { 273 | window.location.href = nextHref; 274 | return false; 275 | } 276 | } 277 | } 278 | }); 279 | } 280 | }; 281 | 282 | // quick alias for translations 283 | _ = Documentation.gettext; 284 | 285 | $(document).ready(function() { 286 | Documentation.init(); 287 | }); -------------------------------------------------------------------------------- /docs/_static/down-pressed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/down-pressed.png -------------------------------------------------------------------------------- /docs/_static/down.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/down.png -------------------------------------------------------------------------------- /docs/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/file.png -------------------------------------------------------------------------------- /docs/_static/fonts/Inconsolata-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/fonts/Inconsolata-Bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Inconsolata-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/fonts/Inconsolata-Regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/fonts/Lato-Bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/fonts/Lato-Regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/fonts/RobotoSlab-Bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/fonts/RobotoSlab-Regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | require=(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o"); 77 | 78 | // Add expand links to all parents of nested ul 79 | $('.wy-menu-vertical ul').not('.simple').siblings('a').each(function () { 80 | var link = $(this); 81 | expand = $(''); 82 | expand.on('click', function (ev) { 83 | self.toggleCurrent(link); 84 | ev.stopPropagation(); 85 | return false; 86 | }); 87 | link.prepend(expand); 88 | }); 89 | }; 90 | 91 | nav.reset = function () { 92 | // Get anchor from URL and open up nested nav 93 | var anchor = encodeURI(window.location.hash); 94 | if (anchor) { 95 | try { 96 | var link = $('.wy-menu-vertical') 97 | .find('[href="' + anchor + '"]'); 98 | $('.wy-menu-vertical li.toctree-l1 li.current') 99 | .removeClass('current'); 100 | link.closest('li.toctree-l2').addClass('current'); 101 | link.closest('li.toctree-l3').addClass('current'); 102 | link.closest('li.toctree-l4').addClass('current'); 103 | } 104 | catch (err) { 105 | console.log("Error expanding nav for anchor", err); 106 | } 107 | } 108 | }; 109 | 110 | nav.onScroll = function () { 111 | this.winScroll = false; 112 | var newWinPosition = this.win.scrollTop(), 113 | winBottom = newWinPosition + this.winHeight, 114 | navPosition = this.navBar.scrollTop(), 115 | newNavPosition = navPosition + (newWinPosition - this.winPosition); 116 | if (newWinPosition < 0 || winBottom > this.docHeight) { 117 | return; 118 | } 119 | this.navBar.scrollTop(newNavPosition); 120 | this.winPosition = newWinPosition; 121 | }; 122 | 123 | nav.onResize = function () { 124 | this.winResize = false; 125 | this.winHeight = this.win.height(); 126 | this.docHeight = $(document).height(); 127 | }; 128 | 129 | nav.hashChange = function () { 130 | this.linkScroll = true; 131 | this.win.one('hashchange', function () { 132 | this.linkScroll = false; 133 | }); 134 | }; 135 | 136 | nav.toggleCurrent = function (elem) { 137 | var parent_li = elem.closest('li'); 138 | parent_li.siblings('li.current').removeClass('current'); 139 | parent_li.siblings().find('li.current').removeClass('current'); 140 | parent_li.find('> ul li.current').removeClass('current'); 141 | parent_li.toggleClass('current'); 142 | }; 143 | 144 | return nav; 145 | } 146 | module.exports.ThemeNav = ThemeNav(); 147 | 148 | if (typeof(window) != 'undefined') { 149 | window.SphinxRtdTheme = { StickyNav: module.exports.ThemeNav }; 150 | } 151 | 152 | },{"jquery":"jquery"}]},{},["sphinx-rtd-theme"]); 153 | -------------------------------------------------------------------------------- /docs/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/minus.png -------------------------------------------------------------------------------- /docs/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/plus.png -------------------------------------------------------------------------------- /docs/_static/pygments.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #eeffcc; } 3 | .highlight .c { color: #408090; font-style: italic } /* Comment */ 4 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */ 6 | .highlight .o { color: #666666 } /* Operator */ 7 | .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ 8 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ 9 | .highlight .cp { color: #007020 } /* Comment.Preproc */ 10 | .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ 11 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ 12 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ 13 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 14 | .highlight .ge { font-style: italic } /* Generic.Emph */ 15 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 16 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 17 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 18 | .highlight .go { color: #333333 } /* Generic.Output */ 19 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ 20 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 21 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 22 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 23 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ 24 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ 25 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ 26 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */ 27 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ 28 | .highlight .kt { color: #902000 } /* Keyword.Type */ 29 | .highlight .m { color: #208050 } /* Literal.Number */ 30 | .highlight .s { color: #4070a0 } /* Literal.String */ 31 | .highlight .na { color: #4070a0 } /* Name.Attribute */ 32 | .highlight .nb { color: #007020 } /* Name.Builtin */ 33 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ 34 | .highlight .no { color: #60add5 } /* Name.Constant */ 35 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ 36 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ 37 | .highlight .ne { color: #007020 } /* Name.Exception */ 38 | .highlight .nf { color: #06287e } /* Name.Function */ 39 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ 40 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ 41 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ 42 | .highlight .nv { color: #bb60d5 } /* Name.Variable */ 43 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ 44 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 45 | .highlight .mb { color: #208050 } /* Literal.Number.Bin */ 46 | .highlight .mf { color: #208050 } /* Literal.Number.Float */ 47 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */ 48 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */ 49 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */ 50 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ 51 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */ 52 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ 53 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ 54 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ 55 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ 56 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ 57 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */ 58 | .highlight .sr { color: #235388 } /* Literal.String.Regex */ 59 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ 60 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */ 61 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ 62 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ 63 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ 64 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ 65 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /docs/_static/underscore.js: -------------------------------------------------------------------------------- 1 | // Underscore.js 1.3.1 2 | // (c) 2009-2012 Jeremy Ashkenas, DocumentCloud Inc. 3 | // Underscore is freely distributable under the MIT license. 4 | // Portions of Underscore are inspired or borrowed from Prototype, 5 | // Oliver Steele's Functional, and John Resig's Micro-Templating. 6 | // For all details and documentation: 7 | // http://documentcloud.github.com/underscore 8 | (function(){function q(a,c,d){if(a===c)return a!==0||1/a==1/c;if(a==null||c==null)return a===c;if(a._chain)a=a._wrapped;if(c._chain)c=c._wrapped;if(a.isEqual&&b.isFunction(a.isEqual))return a.isEqual(c);if(c.isEqual&&b.isFunction(c.isEqual))return c.isEqual(a);var e=l.call(a);if(e!=l.call(c))return false;switch(e){case "[object String]":return a==String(c);case "[object Number]":return a!=+a?c!=+c:a==0?1/a==1/c:a==+c;case "[object Date]":case "[object Boolean]":return+a==+c;case "[object RegExp]":return a.source== 9 | c.source&&a.global==c.global&&a.multiline==c.multiline&&a.ignoreCase==c.ignoreCase}if(typeof a!="object"||typeof c!="object")return false;for(var f=d.length;f--;)if(d[f]==a)return true;d.push(a);var f=0,g=true;if(e=="[object Array]"){if(f=a.length,g=f==c.length)for(;f--;)if(!(g=f in a==f in c&&q(a[f],c[f],d)))break}else{if("constructor"in a!="constructor"in c||a.constructor!=c.constructor)return false;for(var h in a)if(b.has(a,h)&&(f++,!(g=b.has(c,h)&&q(a[h],c[h],d))))break;if(g){for(h in c)if(b.has(c, 10 | h)&&!f--)break;g=!f}}d.pop();return g}var r=this,G=r._,n={},k=Array.prototype,o=Object.prototype,i=k.slice,H=k.unshift,l=o.toString,I=o.hasOwnProperty,w=k.forEach,x=k.map,y=k.reduce,z=k.reduceRight,A=k.filter,B=k.every,C=k.some,p=k.indexOf,D=k.lastIndexOf,o=Array.isArray,J=Object.keys,s=Function.prototype.bind,b=function(a){return new m(a)};if(typeof exports!=="undefined"){if(typeof module!=="undefined"&&module.exports)exports=module.exports=b;exports._=b}else r._=b;b.VERSION="1.3.1";var j=b.each= 11 | b.forEach=function(a,c,d){if(a!=null)if(w&&a.forEach===w)a.forEach(c,d);else if(a.length===+a.length)for(var e=0,f=a.length;e2;a== 12 | null&&(a=[]);if(y&&a.reduce===y)return e&&(c=b.bind(c,e)),f?a.reduce(c,d):a.reduce(c);j(a,function(a,b,i){f?d=c.call(e,d,a,b,i):(d=a,f=true)});if(!f)throw new TypeError("Reduce of empty array with no initial value");return d};b.reduceRight=b.foldr=function(a,c,d,e){var f=arguments.length>2;a==null&&(a=[]);if(z&&a.reduceRight===z)return e&&(c=b.bind(c,e)),f?a.reduceRight(c,d):a.reduceRight(c);var g=b.toArray(a).reverse();e&&!f&&(c=b.bind(c,e));return f?b.reduce(g,c,d,e):b.reduce(g,c)};b.find=b.detect= 13 | function(a,c,b){var e;E(a,function(a,g,h){if(c.call(b,a,g,h))return e=a,true});return e};b.filter=b.select=function(a,c,b){var e=[];if(a==null)return e;if(A&&a.filter===A)return a.filter(c,b);j(a,function(a,g,h){c.call(b,a,g,h)&&(e[e.length]=a)});return e};b.reject=function(a,c,b){var e=[];if(a==null)return e;j(a,function(a,g,h){c.call(b,a,g,h)||(e[e.length]=a)});return e};b.every=b.all=function(a,c,b){var e=true;if(a==null)return e;if(B&&a.every===B)return a.every(c,b);j(a,function(a,g,h){if(!(e= 14 | e&&c.call(b,a,g,h)))return n});return e};var E=b.some=b.any=function(a,c,d){c||(c=b.identity);var e=false;if(a==null)return e;if(C&&a.some===C)return a.some(c,d);j(a,function(a,b,h){if(e||(e=c.call(d,a,b,h)))return n});return!!e};b.include=b.contains=function(a,c){var b=false;if(a==null)return b;return p&&a.indexOf===p?a.indexOf(c)!=-1:b=E(a,function(a){return a===c})};b.invoke=function(a,c){var d=i.call(arguments,2);return b.map(a,function(a){return(b.isFunction(c)?c||a:a[c]).apply(a,d)})};b.pluck= 15 | function(a,c){return b.map(a,function(a){return a[c]})};b.max=function(a,c,d){if(!c&&b.isArray(a))return Math.max.apply(Math,a);if(!c&&b.isEmpty(a))return-Infinity;var e={computed:-Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;b>=e.computed&&(e={value:a,computed:b})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);if(!c&&b.isEmpty(a))return Infinity;var e={computed:Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;bd?1:0}),"value")};b.groupBy=function(a,c){var d={},e=b.isFunction(c)?c:function(a){return a[c]};j(a,function(a,b){var c=e(a,b);(d[c]||(d[c]=[])).push(a)});return d};b.sortedIndex=function(a, 17 | c,d){d||(d=b.identity);for(var e=0,f=a.length;e>1;d(a[g])=0})})};b.difference=function(a){var c=b.flatten(i.call(arguments,1));return b.filter(a,function(a){return!b.include(c,a)})};b.zip=function(){for(var a=i.call(arguments),c=b.max(b.pluck(a,"length")),d=Array(c),e=0;e=0;d--)b=[a[d].apply(this,b)];return b[0]}}; 24 | b.after=function(a,b){return a<=0?b():function(){if(--a<1)return b.apply(this,arguments)}};b.keys=J||function(a){if(a!==Object(a))throw new TypeError("Invalid object");var c=[],d;for(d in a)b.has(a,d)&&(c[c.length]=d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=b.methods=function(a){var c=[],d;for(d in a)b.isFunction(a[d])&&c.push(d);return c.sort()};b.extend=function(a){j(i.call(arguments,1),function(b){for(var d in b)a[d]=b[d]});return a};b.defaults=function(a){j(i.call(arguments, 25 | 1),function(b){for(var d in b)a[d]==null&&(a[d]=b[d])});return a};b.clone=function(a){return!b.isObject(a)?a:b.isArray(a)?a.slice():b.extend({},a)};b.tap=function(a,b){b(a);return a};b.isEqual=function(a,b){return q(a,b,[])};b.isEmpty=function(a){if(b.isArray(a)||b.isString(a))return a.length===0;for(var c in a)if(b.has(a,c))return false;return true};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=o||function(a){return l.call(a)=="[object Array]"};b.isObject=function(a){return a===Object(a)}; 26 | b.isArguments=function(a){return l.call(a)=="[object Arguments]"};if(!b.isArguments(arguments))b.isArguments=function(a){return!(!a||!b.has(a,"callee"))};b.isFunction=function(a){return l.call(a)=="[object Function]"};b.isString=function(a){return l.call(a)=="[object String]"};b.isNumber=function(a){return l.call(a)=="[object Number]"};b.isNaN=function(a){return a!==a};b.isBoolean=function(a){return a===true||a===false||l.call(a)=="[object Boolean]"};b.isDate=function(a){return l.call(a)=="[object Date]"}; 27 | b.isRegExp=function(a){return l.call(a)=="[object RegExp]"};b.isNull=function(a){return a===null};b.isUndefined=function(a){return a===void 0};b.has=function(a,b){return I.call(a,b)};b.noConflict=function(){r._=G;return this};b.identity=function(a){return a};b.times=function(a,b,d){for(var e=0;e/g,">").replace(/"/g,""").replace(/'/g,"'").replace(/\//g,"/")};b.mixin=function(a){j(b.functions(a), 28 | function(c){K(c,b[c]=a[c])})};var L=0;b.uniqueId=function(a){var b=L++;return a?a+b:b};b.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g};var t=/.^/,u=function(a){return a.replace(/\\\\/g,"\\").replace(/\\'/g,"'")};b.template=function(a,c){var d=b.templateSettings,d="var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push('"+a.replace(/\\/g,"\\\\").replace(/'/g,"\\'").replace(d.escape||t,function(a,b){return"',_.escape("+ 29 | u(b)+"),'"}).replace(d.interpolate||t,function(a,b){return"',"+u(b)+",'"}).replace(d.evaluate||t,function(a,b){return"');"+u(b).replace(/[\r\n\t]/g," ")+";__p.push('"}).replace(/\r/g,"\\r").replace(/\n/g,"\\n").replace(/\t/g,"\\t")+"');}return __p.join('');",e=new Function("obj","_",d);return c?e(c,b):function(a){return e.call(this,a,b)}};b.chain=function(a){return b(a).chain()};var m=function(a){this._wrapped=a};b.prototype=m.prototype;var v=function(a,c){return c?b(a).chain():a},K=function(a,c){m.prototype[a]= 30 | function(){var a=i.call(arguments);H.call(a,this._wrapped);return v(c.apply(b,a),this._chain)}};b.mixin(b);j("pop,push,reverse,shift,sort,splice,unshift".split(","),function(a){var b=k[a];m.prototype[a]=function(){var d=this._wrapped;b.apply(d,arguments);var e=d.length;(a=="shift"||a=="splice")&&e===0&&delete d[0];return v(d,this._chain)}});j(["concat","join","slice"],function(a){var b=k[a];m.prototype[a]=function(){return v(b.apply(this._wrapped,arguments),this._chain)}});m.prototype.chain=function(){this._chain= 31 | true;return this};m.prototype.value=function(){return this._wrapped}}).call(this); 32 | -------------------------------------------------------------------------------- /docs/_static/up-pressed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/up-pressed.png -------------------------------------------------------------------------------- /docs/_static/up.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/_static/up.png -------------------------------------------------------------------------------- /docs/doctrees/code.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/doctrees/code.doctree -------------------------------------------------------------------------------- /docs/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/doctrees/index.doctree -------------------------------------------------------------------------------- /docs/doctrees/intro.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/doctrees/intro.doctree -------------------------------------------------------------------------------- /docs/doctrees/modules.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/doctrees/modules.doctree -------------------------------------------------------------------------------- /docs/doctrees/tutorial.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/doctrees/tutorial.doctree -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Welcome to Multimodal Keras Wrapper’s documentation! — Multimodal Keras Wrapper 0.55 documentation 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 |
44 | 45 | 46 | 94 | 95 |
96 | 97 | 98 | 102 | 103 | 104 | 105 |
106 |
107 | 108 | 109 | 110 | 111 | 112 | 113 |
114 |
    115 |
  • Docs »
  • 116 | 117 |
  • Welcome to Multimodal Keras Wrapper’s documentation!
  • 118 |
  • 119 | 120 | 121 | View page source 122 | 123 | 124 |
  • 125 |
126 |
127 |
128 |
129 |
130 | 131 |
132 |

Welcome to Multimodal Keras Wrapper’s documentation!

133 |

Contents:

134 | 166 |
167 |
168 |

Indices and tables

169 | 174 |
175 | 176 | 177 |
178 |
179 |
180 | 181 | 187 | 188 | 189 |
190 | 191 |
192 |

193 | © Copyright 2016, Marc Bolaños. 194 | 195 |

196 |
197 | Built with Sphinx using a theme provided by Read the Docs. 198 | 199 |
200 | 201 |
202 |
203 | 204 |
205 | 206 |
207 | 208 | 209 | 210 | 211 | 212 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 239 | 240 | 241 | 242 | -------------------------------------------------------------------------------- /docs/intro.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Introduction — Multimodal Keras Wrapper 0.55 documentation 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |
45 | 46 | 47 | 103 | 104 |
105 | 106 | 107 | 111 | 112 | 113 | 114 |
115 |
116 | 117 | 118 | 119 | 120 | 121 | 122 |
123 |
    124 |
  • Docs »
  • 125 | 126 |
  • Introduction
  • 127 |
  • 128 | 129 | 130 | View page source 131 | 132 | 133 |
  • 134 |
135 |
136 |
137 |
138 |
139 | 140 |
141 |

Introduction

142 |
143 |

Multimodal Keras Wrapper

144 |

Wrapper for Keras with support to easy multimodal data and models loading and handling.

145 |

You can download and contribute to the code downloading this repository.

146 |
147 |
148 |

Documentation

149 |

You can access the library documentation page at marcbs.github.io/multimodal_keras_wrapper/

150 |

Some code examples are available in demo.ipynb and test.py. Additionally, in the section Projects you can see some practical examples of projects using this library.

151 |
152 |
153 |

Dependencies

154 |

The following dependencies are required for using this library:

155 | 161 |

Only when using NMS for certain localization utilities:

162 |
    163 |
  • cython >= 0.23.4
  • 164 |
165 |
166 |
167 |

Installation

168 |

In order to install the library you just have to follow these steps:

169 |
    170 |
  1. Clone this repository:
  2. 171 |
172 |
git clone https://github.com/MarcBS/multimodal_keras_wrapper.git
173 | 
174 |
175 |
    176 |
  1. Include the repository path into your PYTHONPATH:
  2. 177 |
178 |
export PYTHONPATH=$PYTHONPATH:/path/to/multimodal_keras_wrapper
179 | 
180 |
181 |
    182 |
  1. If you wish to install the dependencies (it will install our custom Keras fork):
  2. 183 |
184 |
pip install -r requirements.txt
185 | 
186 |
187 |
188 |
189 |

Projects

190 |

You can see more practical examples in projects which use this library:

191 |

VIBIKNet for Visual Question Answering

192 |

ABiViRNet for Video Description

193 |

Sentence-SelectioNN for Domain Adaptation in SMT

194 |
195 |
196 |

Keras

197 |

For additional information on the Deep Learning library, visit the official web page www.keras.io or the GitHub repository https://github.com/fchollet/keras.

198 |

You can also use our custom Keras version, which provides several additional layers for Multimodal Learning.

199 |
200 |
201 | 202 | 203 |
204 |
205 |
206 | 207 | 215 | 216 | 217 |
218 | 219 |
220 |

221 | © Copyright 2016, Marc Bolaños. 222 | 223 |

224 |
225 | Built with Sphinx using a theme provided by Read the Docs. 226 | 227 |
228 | 229 |
230 |
231 | 232 |
233 | 234 |
235 | 236 | 237 | 238 | 239 | 240 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 267 | 268 | 269 | 270 | -------------------------------------------------------------------------------- /docs/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/docs/objects.inv -------------------------------------------------------------------------------- /docs/py-modindex.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Python Module Index — Multimodal Keras Wrapper 0.55 documentation 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 |
46 | 47 | 48 | 96 | 97 |
98 | 99 | 100 | 104 | 105 | 106 | 107 |
108 |
109 | 110 | 111 | 112 | 113 | 114 | 115 |
116 |
    117 |
  • Docs »
  • 118 | 119 |
  • 120 |
  • 121 | 122 | 123 | 124 |
  • 125 |
126 |
127 |
128 |
129 |
130 | 131 | 132 |

Python Module Index

133 | 134 |
135 | k 136 |
137 | 138 | 139 | 140 | 142 | 143 | 145 | 148 | 149 | 150 | 153 | 154 | 155 | 158 | 159 | 160 | 163 | 164 | 165 | 168 |
 
141 | k
146 | keras_wrapper 147 |
    151 | keras_wrapper.beam_search_ensemble 152 |
    156 | keras_wrapper.cnn_model 157 |
    161 | keras_wrapper.dataset 162 |
    166 | keras_wrapper.utils 167 |
169 | 170 | 171 |
172 |
173 |
174 | 175 | 176 |
177 | 178 |
179 |

180 | © Copyright 2016, Marc Bolaños. 181 | 182 |

183 |
184 | Built with Sphinx using a theme provided by Read the Docs. 185 | 186 |
187 | 188 |
189 |
190 | 191 |
192 | 193 |
194 | 195 | 196 | 197 | 198 | 199 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 226 | 227 | 228 | 229 | -------------------------------------------------------------------------------- /docs/search.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Search — Multimodal Keras Wrapper 0.55 documentation 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 |
43 | 44 | 45 | 93 | 94 |
95 | 96 | 97 | 101 | 102 | 103 | 104 |
105 |
106 | 107 | 108 | 109 | 110 | 111 | 112 |
113 |
    114 |
  • Docs »
  • 115 | 116 |
  • 117 |
  • 118 | 119 |
  • 120 |
121 |
122 |
123 |
124 |
125 | 126 | 134 | 135 | 136 |
137 | 138 |
139 | 140 |
141 |
142 |
143 | 144 | 145 |
146 | 147 |
148 |

149 | © Copyright 2016, Marc Bolaños. 150 | 151 |

152 |
153 | Built with Sphinx using a theme provided by Read the Docs. 154 | 155 |
156 | 157 |
158 |
159 | 160 |
161 | 162 |
163 | 164 | 165 | 166 | 167 | 168 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 196 | 197 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | -------------------------------------------------------------------------------- /keras_wrapper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/keras_wrapper/__init__.py -------------------------------------------------------------------------------- /keras_wrapper/extra/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/keras_wrapper/extra/__init__.py -------------------------------------------------------------------------------- /keras_wrapper/extra/nms/.gitignore: -------------------------------------------------------------------------------- 1 | *.c 2 | *.cpp 3 | *.so 4 | -------------------------------------------------------------------------------- /keras_wrapper/extra/nms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/keras_wrapper/extra/nms/__init__.py -------------------------------------------------------------------------------- /keras_wrapper/extra/nms/cpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | cimport numpy as np 10 | 11 | cdef inline np.float32_t max(np.float32_t a, np.float32_t b): 12 | return a if a >= b else b 13 | 14 | cdef inline np.float32_t min(np.float32_t a, np.float32_t b): 15 | return a if a <= b else b 16 | 17 | def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): 18 | cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] 19 | cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] 20 | cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] 21 | cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] 22 | cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] 23 | 24 | cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) 25 | cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1] 26 | 27 | cdef int ndets = dets.shape[0] 28 | cdef np.ndarray[np.int_t, ndim=1] suppressed = \ 29 | np.zeros((ndets), dtype=np.int) 30 | 31 | # nominal indices 32 | cdef int _i, _j 33 | # sorted indices 34 | cdef int i, j 35 | # temp variables for box i's (the box currently under consideration) 36 | cdef np.float32_t ix1, iy1, ix2, iy2, iarea 37 | # variables for computing overlap with box j (lower scoring box) 38 | cdef np.float32_t xx1, yy1, xx2, yy2 39 | cdef np.float32_t w, h 40 | cdef np.float32_t inter, ovr 41 | 42 | keep = [] 43 | for _i in range(ndets): 44 | i = order[_i] 45 | if suppressed[i] == 1: 46 | continue 47 | keep.append(i) 48 | ix1 = x1[i] 49 | iy1 = y1[i] 50 | ix2 = x2[i] 51 | iy2 = y2[i] 52 | iarea = areas[i] 53 | for _j in range(_i + 1, ndets): 54 | j = order[_j] 55 | if suppressed[j] == 1: 56 | continue 57 | xx1 = max(ix1, x1[j]) 58 | yy1 = max(iy1, y1[j]) 59 | xx2 = min(ix2, x2[j]) 60 | yy2 = min(iy2, y2[j]) 61 | w = max(0.0, xx2 - xx1 + 1) 62 | h = max(0.0, yy2 - yy1 + 1) 63 | inter = w * h 64 | ovr = inter / (iarea + areas[j] - inter) 65 | if ovr >= thresh: 66 | suppressed[j] = 1 67 | 68 | return keep 69 | -------------------------------------------------------------------------------- /keras_wrapper/extra/nms/gpu_nms.hpp: -------------------------------------------------------------------------------- 1 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, 2 | int boxes_dim, float nms_overlap_thresh, int device_id); 3 | -------------------------------------------------------------------------------- /keras_wrapper/extra/nms/gpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Faster R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | cimport numpy as np 10 | 11 | assert sizeof(int) == sizeof(np.int32_t) 12 | 13 | cdef extern from "gpu_nms.hpp": 14 | void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) 15 | 16 | def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, 17 | np.int32_t device_id=0): 18 | cdef int boxes_num = dets.shape[0] 19 | cdef int boxes_dim = dets.shape[1] 20 | cdef int num_out 21 | cdef np.ndarray[np.int32_t, ndim=1] \ 22 | keep = np.zeros(boxes_num, dtype=np.int32) 23 | cdef np.ndarray[np.float32_t, ndim=1] \ 24 | scores = dets[:, 4] 25 | cdef np.ndarray[np.int_t, ndim=1] \ 26 | order = scores.argsort()[::-1] 27 | cdef np.ndarray[np.float32_t, ndim=2] \ 28 | sorted_dets = dets[order, :] 29 | _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) 30 | keep = keep[:num_out] 31 | return list(order[keep]) 32 | -------------------------------------------------------------------------------- /keras_wrapper/extra/nms/nms_kernel.cu: -------------------------------------------------------------------------------- 1 | // ------------------------------------------------------------------ 2 | // Faster R-CNN 3 | // Copyright (c) 2015 Microsoft 4 | // Licensed under The MIT License [see fast-rcnn/LICENSE for details] 5 | // Written by Shaoqing Ren 6 | // ------------------------------------------------------------------ 7 | 8 | #include "gpu_nms.hpp" 9 | #include 10 | #include 11 | 12 | #define CUDA_CHECK(condition) \ 13 | /* Code block avoids redefinition of cudaError_t error */ \ 14 | do { \ 15 | cudaError_t error = condition; \ 16 | if (error != cudaSuccess) { \ 17 | std::cout << cudaGetErrorString(error) << std::endl; \ 18 | } \ 19 | } while (0) 20 | 21 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) 22 | int const threadsPerBlock = sizeof(unsigned long long) * 8; 23 | 24 | __device__ inline float devIoU(float const * const a, float const * const b) { 25 | float left = max(a[0], b[0]), right = min(a[2], b[2]); 26 | float top = max(a[1], b[1]), bottom = min(a[3], b[3]); 27 | float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); 28 | float interS = width * height; 29 | float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); 30 | float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); 31 | return interS / (Sa + Sb - interS); 32 | } 33 | 34 | __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, 35 | const float *dev_boxes, unsigned long long *dev_mask) { 36 | const int row_start = blockIdx.y; 37 | const int col_start = blockIdx.x; 38 | 39 | // if (row_start > col_start) return; 40 | 41 | const int row_size = 42 | min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); 43 | const int col_size = 44 | min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); 45 | 46 | __shared__ float block_boxes[threadsPerBlock * 5]; 47 | if (threadIdx.x < col_size) { 48 | block_boxes[threadIdx.x * 5 + 0] = 49 | dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; 50 | block_boxes[threadIdx.x * 5 + 1] = 51 | dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; 52 | block_boxes[threadIdx.x * 5 + 2] = 53 | dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; 54 | block_boxes[threadIdx.x * 5 + 3] = 55 | dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; 56 | block_boxes[threadIdx.x * 5 + 4] = 57 | dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; 58 | } 59 | __syncthreads(); 60 | 61 | if (threadIdx.x < row_size) { 62 | const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; 63 | const float *cur_box = dev_boxes + cur_box_idx * 5; 64 | int i = 0; 65 | unsigned long long t = 0; 66 | int start = 0; 67 | if (row_start == col_start) { 68 | start = threadIdx.x + 1; 69 | } 70 | for (i = start; i < col_size; i++) { 71 | if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { 72 | t |= 1ULL << i; 73 | } 74 | } 75 | const int col_blocks = DIVUP(n_boxes, threadsPerBlock); 76 | dev_mask[cur_box_idx * col_blocks + col_start] = t; 77 | } 78 | } 79 | 80 | void _set_device(int device_id) { 81 | int current_device; 82 | CUDA_CHECK(cudaGetDevice(¤t_device)); 83 | if (current_device == device_id) { 84 | return; 85 | } 86 | // The call to cudaSetDevice must come before any calls to Get, which 87 | // may perform initialization using the GPU. 88 | CUDA_CHECK(cudaSetDevice(device_id)); 89 | } 90 | 91 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, 92 | int boxes_dim, float nms_overlap_thresh, int device_id) { 93 | _set_device(device_id); 94 | 95 | float* boxes_dev = NULL; 96 | unsigned long long* mask_dev = NULL; 97 | 98 | const int col_blocks = DIVUP(boxes_num, threadsPerBlock); 99 | 100 | CUDA_CHECK(cudaMalloc(&boxes_dev, 101 | boxes_num * boxes_dim * sizeof(float))); 102 | CUDA_CHECK(cudaMemcpy(boxes_dev, 103 | boxes_host, 104 | boxes_num * boxes_dim * sizeof(float), 105 | cudaMemcpyHostToDevice)); 106 | 107 | CUDA_CHECK(cudaMalloc(&mask_dev, 108 | boxes_num * col_blocks * sizeof(unsigned long long))); 109 | 110 | dim3 blocks(DIVUP(boxes_num, threadsPerBlock), 111 | DIVUP(boxes_num, threadsPerBlock)); 112 | dim3 threads(threadsPerBlock); 113 | nms_kernel<<>>(boxes_num, 114 | nms_overlap_thresh, 115 | boxes_dev, 116 | mask_dev); 117 | 118 | std::vector mask_host(boxes_num * col_blocks); 119 | CUDA_CHECK(cudaMemcpy(&mask_host[0], 120 | mask_dev, 121 | sizeof(unsigned long long) * boxes_num * col_blocks, 122 | cudaMemcpyDeviceToHost)); 123 | 124 | std::vector remv(col_blocks); 125 | memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); 126 | 127 | int num_to_keep = 0; 128 | for (int i = 0; i < boxes_num; i++) { 129 | int nblock = i / threadsPerBlock; 130 | int inblock = i % threadsPerBlock; 131 | 132 | if (!(remv[nblock] & (1ULL << inblock))) { 133 | keep_out[num_to_keep++] = i; 134 | unsigned long long *p = &mask_host[0] + i * col_blocks; 135 | for (int j = nblock; j < col_blocks; j++) { 136 | remv[j] |= p[j]; 137 | } 138 | } 139 | } 140 | *num_out = num_to_keep; 141 | 142 | CUDA_CHECK(cudaFree(boxes_dev)); 143 | CUDA_CHECK(cudaFree(mask_dev)); 144 | } 145 | -------------------------------------------------------------------------------- /keras_wrapper/extra/nms/py_cpu_nms.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | 10 | 11 | def py_cpu_nms(dets, thresh): 12 | """Pure Python NMS baseline.""" 13 | x1 = dets[:, 0] 14 | y1 = dets[:, 1] 15 | x2 = dets[:, 2] 16 | y2 = dets[:, 3] 17 | scores = dets[:, 4] 18 | 19 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 20 | order = scores.argsort()[::-1] 21 | 22 | keep = [] 23 | while order.size > 0: 24 | i = order[0] 25 | keep.append(i) 26 | xx1 = np.maximum(x1[i], x1[order[1:]]) 27 | yy1 = np.maximum(y1[i], y1[order[1:]]) 28 | xx2 = np.minimum(x2[i], x2[order[1:]]) 29 | yy2 = np.minimum(y2[i], y2[order[1:]]) 30 | 31 | w = np.maximum(0.0, xx2 - xx1 + 1) 32 | h = np.maximum(0.0, yy2 - yy1 + 1) 33 | inter = w * h 34 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 35 | 36 | inds = np.where(ovr <= thresh)[0] 37 | order = order[inds + 1] 38 | 39 | return keep 40 | -------------------------------------------------------------------------------- /keras_wrapper/extra/nms/setup.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Code adapted from: 3 | # Fast R-CNN 4 | # Copyright (c) 2015 Microsoft 5 | # Licensed under The MIT License [see LICENSE for details] 6 | # Written by Ross Girshick 7 | # -------------------------------------------------------- 8 | from six import iteritems 9 | import os 10 | from os.path import join as pjoin 11 | from setuptools import setup 12 | from distutils.extension import Extension 13 | from Cython.Distutils import build_ext 14 | import subprocess 15 | import numpy as np 16 | 17 | 18 | def find_in_path(name, path): 19 | "Find a file in a search path" 20 | # Adapted fom 21 | # http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/ 22 | for dir in path.split(os.pathsep): 23 | binpath = pjoin(dir, name) 24 | if os.path.exists(binpath): 25 | return os.path.abspath(binpath) 26 | return None 27 | 28 | 29 | def locate_cuda(): 30 | """Locate the CUDA environment on the system 31 | 32 | Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64' 33 | and values giving the absolute path to each directory. 34 | 35 | Starts by looking for the CUDAHOME env variable. If not found, everything 36 | is based on finding 'nvcc' in the PATH. 37 | """ 38 | 39 | # first check if the CUDAHOME env variable is in use 40 | if 'CUDAHOME' in os.environ: 41 | home = os.environ['CUDAHOME'] 42 | nvcc = pjoin(home, 'bin', 'nvcc') 43 | else: 44 | # otherwise, search the PATH for NVCC 45 | default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin') 46 | nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path) 47 | if nvcc is None: 48 | raise EnvironmentError('The nvcc binary could not be ' 49 | 'located in your $PATH. Either add it to your path, or set $CUDAHOME') 50 | home = os.path.dirname(os.path.dirname(nvcc)) 51 | 52 | cudaconfig = {'home': home, 'nvcc': nvcc, 53 | 'include': pjoin(home, 'include'), 54 | 'lib64': pjoin(home, 'lib64')} 55 | for k, v in iteritems(cudaconfig): 56 | if not os.path.exists(v): 57 | raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v)) 58 | 59 | return cudaconfig 60 | 61 | 62 | CUDA = locate_cuda() 63 | 64 | # Obtain the numpy include directory. This logic works across numpy versions. 65 | try: 66 | numpy_include = np.get_include() 67 | except AttributeError: 68 | numpy_include = np.get_numpy_include() 69 | 70 | 71 | def customize_compiler_for_nvcc(self): 72 | """inject deep into distutils to customize how the dispatch 73 | to gcc/nvcc works. 74 | 75 | If you subclass UnixCCompiler, it's not trivial to get your subclass 76 | injected in, and still have the right customizations (i.e. 77 | distutils.sysconfig.customize_compiler) run on it. So instead of going 78 | the OO route, I have this. Note, it's kindof like a wierd functional 79 | subclassing going on.""" 80 | 81 | # tell the compiler it can processes .cu 82 | self.src_extensions.append('.cu') 83 | 84 | # save references to the default compiler_so and _comple methods 85 | default_compiler_so = self.compiler_so 86 | super = self._compile 87 | 88 | # now redefine the _compile method. This gets executed for each 89 | # object but distutils doesn't have the ability to change compilers 90 | # based on source extension: we add it. 91 | def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): 92 | if os.path.splitext(src)[1] == '.cu': 93 | # use the cuda for .cu files 94 | self.set_executable('compiler_so', CUDA['nvcc']) 95 | # use only a subset of the extra_postargs, which are 1-1 translated 96 | # from the extra_compile_args in the Extension class 97 | postargs = extra_postargs['nvcc'] 98 | else: 99 | postargs = extra_postargs['gcc'] 100 | 101 | super(obj, src, ext, cc_args, postargs, pp_opts) 102 | # reset the default compiler_so, which we might have changed for cuda 103 | self.compiler_so = default_compiler_so 104 | 105 | # inject our redefined _compile method into the class 106 | self._compile = _compile 107 | 108 | 109 | # run the customize_compiler 110 | class custom_build_ext(build_ext): 111 | def build_extensions(self): 112 | customize_compiler_for_nvcc(self.compiler) 113 | build_ext.build_extensions(self) 114 | 115 | 116 | ext_modules = [ 117 | Extension( 118 | "cpu_nms", 119 | ["cpu_nms.pyx"], 120 | extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]}, 121 | include_dirs=[numpy_include] 122 | ), 123 | Extension('gpu_nms', 124 | ['nms_kernel.cu', 'gpu_nms.pyx'], 125 | library_dirs=[CUDA['lib64']], 126 | libraries=['cudart'], 127 | language='c++', 128 | runtime_library_dirs=[CUDA['lib64']], 129 | # this syntax is specific to this build system 130 | # we're only going to use certain compiler args with nvcc and not with 131 | # gcc the implementation of this trick is in customize_compiler() below 132 | extra_compile_args={'gcc': ["-Wno-unused-function"], 133 | 'nvcc': ['-arch=sm_35', 134 | '--ptxas-options=-v', 135 | '-c', 136 | '--compiler-options', 137 | "'-fPIC'"]}, 138 | include_dirs=[numpy_include, CUDA['include']] 139 | ) 140 | ] 141 | 142 | setup( 143 | name='nms', 144 | ext_modules=ext_modules, 145 | # inject our custom trigger 146 | cmdclass={'build_ext': custom_build_ext}, 147 | ) 148 | -------------------------------------------------------------------------------- /keras_wrapper/extra/read_write.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Reads from input file or writes to the output file. 4 | 5 | Author: Mateusz Malinowski 6 | Email: mmalinow@mpi-inf.mpg.de 7 | 8 | Modified by: Marc Bola\~nos 9 | \'Alvaro Peris 10 | """ 11 | from __future__ import print_function 12 | from six import iteritems 13 | import json 14 | import os 15 | import codecs 16 | import numpy as np 17 | import tables 18 | import sys 19 | import logging 20 | 21 | logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', datefmt='%d/%m/%Y %H:%M:%S') 22 | logger = logging.getLogger(__name__) 23 | 24 | if sys.version_info.major == 3: 25 | import _pickle as pk 26 | 27 | unicode_fn = str 28 | else: 29 | import cPickle as pk 30 | 31 | unicode_fn = unicode 32 | 33 | 34 | # Helpers 35 | 36 | 37 | def encode_list(mylist): 38 | """ 39 | Encode list as utf-8 if we are working with Python 2.x or as str if we are working with Python 3.x. 40 | :param mylist: 41 | :return: 42 | """ 43 | return [l.decode('utf-8') if isinstance(l, str) else unicode(l) for l in 44 | mylist] if sys.version_info.major == 2 else [str(l) for l in mylist] 45 | 46 | 47 | def dirac(pred, 48 | gt): 49 | """ 50 | Chechks whether pred == gt. 51 | :param pred: Prediction 52 | :param gt: Ground-truth. 53 | :return: 54 | """ 55 | return int(pred == gt) 56 | 57 | 58 | def create_dir_if_not_exists(directory): 59 | """ 60 | Creates a directory if it doen't exist 61 | 62 | :param directory: Directory to create 63 | :return: None 64 | """ 65 | if not os.path.exists(directory): 66 | logger.info("<<< creating directory " + directory + " ... >>>") 67 | os.makedirs(directory) 68 | 69 | 70 | def clean_dir(directory): 71 | """ 72 | Creates (or empties) a directory 73 | :param directory: Directory to create 74 | :return: None 75 | """ 76 | 77 | if os.path.exists(directory): 78 | import shutil 79 | logger.warning('<<< Deleting directory: %s >>>' % directory) 80 | shutil.rmtree(directory) 81 | os.makedirs(directory) 82 | else: 83 | os.makedirs(directory) 84 | 85 | 86 | # Main functions 87 | def file2list(filepath, 88 | stripfile=True): 89 | """ 90 | Loads a file into a list. One line per element. 91 | :param filepath: Path to the file to load. 92 | :param stripfile: Whether we should strip the lines of the file or not. 93 | :return: List containing the lines read. 94 | """ 95 | with codecs.open(filepath, 'r', encoding='utf-8') as f: 96 | lines = [k for k in [k.strip() for k in f.readlines()] if len(k) > 0] if stripfile else [k for k in 97 | f.readlines()] 98 | return lines 99 | 100 | 101 | def numpy2hdf5(filepath, 102 | mylist, 103 | data_name='data', 104 | permission='w'): 105 | """ 106 | Saves a numpy array as HDF5. 107 | """ 108 | if 'w' in permission: 109 | f = tables.open_file(filepath, 110 | mode=permission) 111 | atom = tables.Float32Atom() 112 | array_c = f.create_earray(f.root, data_name, atom, 113 | tuple([0] + [mylist.shape[i] for i in range(1, len(mylist.shape))])) 114 | array_c.append(mylist) 115 | f.close() 116 | elif permission == 'a': 117 | f = tables.open_file(filepath, mode='a') 118 | f.root.data.append(mylist) 119 | f.close() 120 | 121 | 122 | def numpy2file(filepath, 123 | mylist, 124 | permission='wb', 125 | split=False): 126 | """ 127 | Saves a numpy array as a file. 128 | :param filepath: Destination path. 129 | :param mylist: Numpy array to save. 130 | :param permission: Write permission. 131 | :param split: Whether we save each element from mylist in a separate file or not. 132 | :return: 133 | """ 134 | mylist = np.asarray(mylist) 135 | if split: 136 | for i, filepath_ in list(enumerate(filepath)): 137 | with open(filepath_, permission) as f: 138 | np.save(f, mylist[i]) 139 | else: 140 | with open(filepath, permission) as f: 141 | np.save(f, mylist) 142 | 143 | 144 | def numpy2imgs(folder_path, 145 | mylist, 146 | imgs_names, 147 | dataset): 148 | """ 149 | Save a numpy array as images. 150 | :param folder_path: Folder of the images to save. 151 | :param mylist: Numpy array containing the images. 152 | :param imgs_names: Names of the images to be saved. 153 | :param dataset: 154 | :return: 155 | """ 156 | from PIL import Image as pilimage 157 | create_dir_if_not_exists(folder_path) 158 | n_classes = mylist.shape[-1] 159 | 160 | for img, name in zip(mylist, imgs_names): 161 | name = '_'.join(name.split('/')) 162 | file_path = folder_path + "/" + name # image file 163 | 164 | out_img = dataset.getImageFromPrediction_3DSemanticLabel(img, n_classes) 165 | 166 | # save the segmented image 167 | out_img = pilimage.fromarray(np.uint8(out_img)) 168 | out_img.save(file_path) 169 | 170 | 171 | def listoflists2file(filepath, 172 | mylist, 173 | permission='w'): 174 | """ 175 | Saves a list of lists into a file. Each element in a line. 176 | :param filepath: Destination file. 177 | :param mylist: List of lists to save. 178 | :param permission: Writing permission. 179 | :return: 180 | """ 181 | mylist = [encode_list(sublist) for sublist in mylist] 182 | mylist = [item for sublist in mylist for item in sublist] 183 | mylist = u'\n'.join(mylist) 184 | with codecs.open(filepath, permission, encoding='utf-8') as f: 185 | f.write(mylist) 186 | f.write('\n') 187 | 188 | 189 | def list2file(filepath, 190 | mylist, 191 | permission='w'): 192 | """ 193 | Saves a list into a file. Each element in a line. 194 | :param filepath: Destination file. 195 | :param mylist: List to save. 196 | :param permission: Writing permission. 197 | :return: 198 | """ 199 | mylist = encode_list(mylist) 200 | mylist = u'\n'.join(mylist) 201 | with codecs.open(filepath, 202 | permission, 203 | encoding='utf-8') as f: 204 | f.write(mylist) 205 | f.write('\n') 206 | 207 | 208 | def list2stdout(mylist): 209 | """ 210 | Prints a list in STDOUT 211 | :param mylist: List to print. 212 | """ 213 | mylist = encode_list(mylist) 214 | mylist = '\n'.join(mylist) 215 | print(mylist) 216 | 217 | 218 | def nbest2file(filepath, 219 | mylist, 220 | separator=u'|||', 221 | permission='w'): 222 | """ 223 | Saves an N-best list into a file. 224 | :param filepath: Destination path. 225 | :param mylist: List to save. 226 | :param separator: Separator between N-best list components. 227 | :param permission: Writing permission. 228 | :return: 229 | """ 230 | newlist = [] 231 | for l in mylist: 232 | for l2 in l: 233 | a = [] 234 | for l3 in l2: 235 | if isinstance(l3, list): 236 | l3 = l3[0] 237 | if sys.version_info.major == 2: 238 | if isinstance(l3, str): 239 | a.append(l3.decode('utf-8') + u' ' + separator) 240 | else: 241 | a.append(unicode(l3) + u' ' + separator) 242 | else: 243 | a.append(str(l3) + ' ' + separator) 244 | a = ' '.join(a + [' ']) 245 | newlist.append(a.strip()[:-len(separator)].strip()) 246 | mylist = '\n'.join(newlist) 247 | if isinstance(mylist[0], str) and sys.version_info.major == 2: 248 | mylist = mylist.encode('utf-8') 249 | with codecs.open(filepath, permission, encoding='utf-8') as f: 250 | f.write(mylist) 251 | 252 | 253 | def list2vqa(filepath, 254 | mylist, 255 | qids, 256 | permission='w', 257 | extra=None): 258 | """ 259 | Saves a list with the VQA format. 260 | """ 261 | res = [] 262 | for i, (ans, qst) in list(enumerate(zip(mylist, qids))): 263 | line = {'answer': ans, 'question_id': int(qst)} 264 | if extra is not None: 265 | line['reference'] = extra['reference'][i] 266 | line['top5'] = str( 267 | [[extra['vocab'][p], extra['probs'][i][p]] for p in np.argsort(extra['probs'][i])[::-1][:5]]) 268 | line['max_prob'] = str(max(extra['probs'][i])) 269 | res.append(line) 270 | with codecs.open(filepath, permission, encoding='utf-8') as f: 271 | json.dump(res, f) 272 | 273 | 274 | def dump_hdf5_simple(filepath, 275 | dataset_name, 276 | data): 277 | """ 278 | Saves a HDF5 file. 279 | """ 280 | import h5py 281 | h5f = h5py.File(filepath, 282 | 'w') 283 | h5f.create_dataset(dataset_name, 284 | data=data) 285 | h5f.close() 286 | 287 | 288 | def load_hdf5_simple(filepath, 289 | dataset_name='data'): 290 | """ 291 | Loads a HDF5 file. 292 | """ 293 | import h5py 294 | h5f = h5py.File(filepath, 'r') 295 | tmp = h5f[dataset_name][:] 296 | h5f.close() 297 | return tmp 298 | 299 | 300 | def model_to_json(path, 301 | model): 302 | """ 303 | Saves model as a json file under the path. 304 | """ 305 | json_model = model.to_json() 306 | with open(path, 'w') as f: 307 | json.dump(json_model, f) 308 | 309 | 310 | def json_to_model(path): 311 | """ 312 | Loads a model from the json file. 313 | """ 314 | from keras.models import model_from_json 315 | with open(path, 'r') as f: 316 | json_model = json.load(f) 317 | model = model_from_json(json_model) 318 | return model 319 | 320 | 321 | def model_to_text(filepath, model_added): 322 | """ 323 | Save the model to text file. 324 | """ 325 | pass 326 | 327 | 328 | def text_to_model(filepath): 329 | """ 330 | Loads the model from the text file. 331 | """ 332 | pass 333 | 334 | 335 | def print_qa(questions, 336 | answers_gt, 337 | answers_gt_original, 338 | answers_pred, 339 | era, 340 | similarity=dirac, 341 | path=''): 342 | """ 343 | In: 344 | questions - list of questions 345 | answers_gt - list of answers (after modifications like truncation) 346 | answers_gt_original - list of answers (before modifications) 347 | answers_pred - list of predicted answers 348 | era - current era 349 | similarity - measure that measures similarity between gt_original and prediction; 350 | by default dirac measure 351 | path - path for the output (if empty then stdout is used) 352 | by fedault an empty path 353 | Out: 354 | the similarity score 355 | """ 356 | if len(questions) != len(answers_gt): 357 | raise AssertionError('Diferent questions and answers_gt lengths.') 358 | if len(questions) != len(answers_pred): 359 | raise AssertionError('Diferent questions and answers_pred lengths.') 360 | 361 | output = ['-' * 50, 'Era {0}'.format(era)] 362 | score = 0.0 363 | for k, q in list(enumerate(questions)): 364 | a_gt = answers_gt[k] 365 | a_gt_original = answers_gt_original[k] 366 | a_p = answers_pred[k] 367 | score += dirac(a_p, a_gt_original) 368 | if isinstance(q[0], unicode_fn): 369 | tmp = unicode_fn('question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n') 370 | else: 371 | tmp = 'question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n' 372 | output.append(tmp.format(q, a_gt, a_gt_original, a_p)) 373 | score = (score / len(questions)) * 100.0 374 | output.append('Score: {0}'.format(score)) 375 | if path == '': 376 | print('%s' % '\n'.join(map(str, output))) 377 | else: 378 | list2file(path, output) 379 | return score 380 | 381 | 382 | def dict2file(mydict, 383 | path, 384 | title=None, 385 | separator=':', 386 | permission='a'): 387 | """ 388 | In: 389 | mydict - dictionary to save in a file 390 | path - path where mydict is stored 391 | title - the first sentence in the file; 392 | useful if we write many dictionaries 393 | into the same file 394 | """ 395 | tmp = [encode_list([x[0]])[0] + separator + encode_list([x[1]])[0] for x in list(iteritems(mydict))] 396 | if title is not None: 397 | output_list = [title] 398 | output_list.extend(tmp) 399 | else: 400 | output_list = tmp 401 | list2file(path, 402 | output_list, 403 | permission=permission) 404 | 405 | 406 | def dict2pkl(mydict, 407 | path): 408 | """ 409 | Saves a dictionary object into a pkl file. 410 | :param mydict: dictionary to save in a file 411 | :param path: path where my_dict is stored 412 | :return: 413 | """ 414 | if path[-4:] == '.pkl': 415 | extension = '' 416 | else: 417 | extension = '.pkl' 418 | with open(path + extension, 'wb') as f: 419 | pk.dump(mydict, 420 | f, 421 | protocol=-1) 422 | 423 | 424 | def pkl2dict(path): 425 | """ 426 | Loads a dictionary object from a pkl file. 427 | 428 | :param path: Path to the pkl file to load 429 | :return: Dict() containing the loaded pkl 430 | """ 431 | with open(path, 'rb') as f: 432 | if sys.version_info.major == 2: 433 | return pk.load(f) 434 | else: 435 | return pk.load(f, 436 | encoding='latin1') 437 | -------------------------------------------------------------------------------- /keras_wrapper/extra/regularize.py: -------------------------------------------------------------------------------- 1 | from keras.layers.noise import GaussianNoise 2 | from keras.layers.advanced_activations import ChannelWisePReLU as PReLU 3 | from keras.layers.normalization import BatchNormalization, L2_norm, L1_norm 4 | from keras.regularizers import l2 5 | from keras.layers.core import Dropout, Lambda 6 | 7 | 8 | def Regularize(layer, 9 | params, 10 | shared_layers=False, 11 | name='', 12 | apply_noise=True, 13 | apply_batch_normalization=True, 14 | apply_prelu=True, 15 | apply_dropout=True, 16 | apply_l1=True, 17 | apply_l2=True): 18 | """ 19 | Apply the regularization specified in parameters to the layer 20 | :param layer: Layer to regularize 21 | :param params: Params specifying the regularizations to apply 22 | :param shared_layers: Boolean indicating if we want to get the used layers for applying to a shared-layers model. 23 | :param name: Name prepended to regularizer layer 24 | :param apply_noise: If False, noise won't be applied, independently of params 25 | :param apply_dropout: If False, dropout won't be applied, independently of params 26 | :param apply_prelu: If False, prelu won't be applied, independently of params 27 | :param apply_batch_normalization: If False, batch normalization won't be applied, independently of params 28 | :param apply_l1: If False, l1 normalization won't be applied, independently of params 29 | :param apply_l2: If False, l2 normalization won't be applied, independently of params 30 | :return: Regularized layer 31 | """ 32 | shared_layers_list = [] 33 | 34 | if apply_noise and params.get('USE_NOISE', False): 35 | shared_layers_list.append(GaussianNoise(params.get('NOISE_AMOUNT', 0.01), name=name + '_gaussian_noise')) 36 | 37 | if apply_batch_normalization and params.get('USE_BATCH_NORMALIZATION', False): 38 | if params.get('WEIGHT_DECAY'): 39 | l2_gamma_reg = l2(params['WEIGHT_DECAY']) 40 | l2_beta_reg = l2(params['WEIGHT_DECAY']) 41 | else: 42 | l2_gamma_reg = None 43 | l2_beta_reg = None 44 | 45 | bn_mode = params.get('BATCH_NORMALIZATION_MODE', 0) 46 | 47 | shared_layers_list.append(BatchNormalization(mode=bn_mode, 48 | gamma_regularizer=l2_gamma_reg, 49 | beta_regularizer=l2_beta_reg, 50 | name=name + '_batch_normalization')) 51 | 52 | if apply_prelu and params.get('USE_PRELU', False): 53 | shared_layers_list.append(PReLU(name=name + '_PReLU')) 54 | 55 | if apply_dropout and params.get('DROPOUT_P', 0) > 0: 56 | shared_layers_list.append(Dropout(params.get('DROPOUT_P', 0.5), name=name + '_dropout')) 57 | 58 | if apply_l1 and params.get('USE_L1', False): 59 | shared_layers_list.append(Lambda(L1_norm, name=name + '_L1_norm')) 60 | 61 | if apply_l2 and params.get('USE_L2', False): 62 | shared_layers_list.append(Lambda(L2_norm, name=name + '_L2_norm')) 63 | 64 | # Apply all the previously built shared layers 65 | for l in shared_layers_list: 66 | layer = l(layer) 67 | result = layer 68 | 69 | # Return result or shared layers too 70 | if shared_layers: 71 | return result, shared_layers_list 72 | return result 73 | -------------------------------------------------------------------------------- /keras_wrapper/search.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import copy 3 | import numpy as np 4 | 5 | try: 6 | import cupy as cp 7 | 8 | cupy = True 9 | except: 10 | import numpy as cp 11 | 12 | cupy = False 13 | 14 | 15 | def beam_search(model, 16 | X, 17 | params, 18 | return_alphas=False, 19 | eos_sym=0, 20 | null_sym=2, 21 | model_ensemble=False, 22 | n_models=0): 23 | """ 24 | Beam search method for Cond models. 25 | (https://en.wikibooks.org/wiki/Artificial_Intelligence/Search/Heuristic_search/Beam_search) 26 | The algorithm in a nutshell does the following: 27 | 28 | 1. k = beam_size 29 | 2. open_nodes = [[]] * k 30 | 3. while k > 0: 31 | 32 | 3.1. Given the inputs, get (log) probabilities for the outputs. 33 | 34 | 3.2. Expand each open node with all possible output. 35 | 36 | 3.3. Prune and keep the k best nodes. 37 | 38 | 3.4. If a sample has reached the symbol: 39 | 40 | 3.4.1. Mark it as final sample. 41 | 42 | 3.4.2. k -= 1 43 | 44 | 3.5. Build new inputs (state_below) and go to 1. 45 | 46 | 4. return final_samples, final_scores 47 | :param model: Model to use 48 | :param X: Model inputs 49 | :param params: Search parameters 50 | :param return_alphas: Whether we should return attention weights or not. 51 | :param eos_sym: symbol 52 | :param null_sym: symbol 53 | :param model_ensemble: Whether we are using several models in an ensemble 54 | :param n_models; Number of models in the ensemble. 55 | :return: UNSORTED list of [k_best_samples, k_best_scores] (k: beam size) 56 | """ 57 | k = params['beam_size'] 58 | samples = [] 59 | sample_scores = [] 60 | pad_on_batch = params['pad_on_batch'] 61 | dead_k = 0 # samples that reached eos 62 | live_k = 1 # samples that did not yet reach eos 63 | hyp_samples = [[]] * live_k 64 | hyp_scores = cp.zeros(live_k, dtype='float32') 65 | ret_alphas = return_alphas or params['pos_unk'] 66 | if ret_alphas: 67 | sample_alphas = [] 68 | hyp_alphas = [[]] * live_k 69 | if pad_on_batch: 70 | maxlen = int(len(X[params['dataset_inputs'][0]][0]) * params['output_max_length_depending_on_x_factor']) if \ 71 | params['output_max_length_depending_on_x'] else params['maxlen'] 72 | minlen = int( 73 | len(X[params['dataset_inputs'][0]][0]) / params['output_min_length_depending_on_x_factor'] + 1e-7) if \ 74 | params['output_min_length_depending_on_x'] else 0 75 | else: 76 | minlen = int(np.argmax(X[params['dataset_inputs'][0]][0] == eos_sym) / 77 | params['output_min_length_depending_on_x_factor'] + 1e-7) if \ 78 | params['output_min_length_depending_on_x'] else 0 79 | 80 | maxlen = int(np.argmax(X[params['dataset_inputs'][0]][0] == eos_sym) * params[ 81 | 'output_max_length_depending_on_x_factor']) if \ 82 | params['output_max_length_depending_on_x'] else params['maxlen'] 83 | maxlen = min(params['state_below_maxlen'] - 1, maxlen) 84 | 85 | # we must include an additional dimension if the input for each timestep are all the generated "words_so_far" 86 | if params['words_so_far']: 87 | if k > maxlen: 88 | raise NotImplementedError( 89 | "BEAM_SIZE can't be higher than MAX_OUTPUT_TEXT_LEN on the current implementation.") 90 | state_below = np.asarray([[null_sym]] * live_k) if pad_on_batch else np.asarray( 91 | [np.zeros((maxlen, maxlen))] * live_k) 92 | else: 93 | state_below = np.asarray([null_sym] * live_k) if pad_on_batch else np.asarray( 94 | [np.zeros(params['state_below_maxlen']) + null_sym] * live_k) 95 | prev_out = [None] * n_models if model_ensemble else None 96 | 97 | for ii in range(maxlen): 98 | # for every possible live sample calc prob for every possible label 99 | if params['optimized_search']: # use optimized search model if available 100 | if model_ensemble: 101 | [probs, prev_out, alphas] = model.predict_cond_optimized(X, state_below, params, ii, prev_out) 102 | else: 103 | [probs, prev_out] = model.predict_cond_optimized(X, state_below, params, ii, prev_out) 104 | if ret_alphas: 105 | alphas = prev_out[-1][0] # Shape: (k, n_steps) 106 | prev_out = prev_out[:-1] 107 | else: 108 | probs = model.predict_cond(X, state_below, params, ii) 109 | log_probs = cp.log(probs) 110 | if minlen > 0 and ii < minlen: 111 | log_probs[:, eos_sym] = -cp.inf 112 | # total score for every sample is sum of -log of word prb 113 | cand_scores = hyp_scores[:, None] - log_probs 114 | cand_flat = cand_scores.flatten() 115 | # Find the best options by calling argsort of flatten array 116 | ranks_flat = cp.argsort(cand_flat)[:(k - dead_k)] 117 | # Decypher flatten indices 118 | voc_size = log_probs.shape[1] 119 | trans_indices = ranks_flat // voc_size # index of row 120 | word_indices = ranks_flat % voc_size # index of col 121 | costs = cand_flat[ranks_flat] 122 | best_cost = costs[0] 123 | if cupy: 124 | trans_indices = cp.asnumpy(trans_indices) 125 | word_indices = cp.asnumpy(word_indices) 126 | if ret_alphas: 127 | alphas = cp.asnumpy(alphas) 128 | 129 | # Form a beam for the next iteration 130 | new_hyp_samples = [] 131 | new_trans_indices = [] 132 | new_hyp_scores = cp.zeros(k - dead_k, dtype='float32') 133 | if ret_alphas: 134 | new_hyp_alphas = [] 135 | for idx, [ti, wi] in list(enumerate(zip(trans_indices, word_indices))): 136 | if params['search_pruning']: 137 | if costs[idx] < k * best_cost: 138 | new_hyp_samples.append(hyp_samples[ti] + [wi]) 139 | new_trans_indices.append(ti) 140 | new_hyp_scores[idx] = copy.copy(costs[idx]) 141 | if ret_alphas: 142 | new_hyp_alphas.append(hyp_alphas[ti] + [alphas[ti]]) 143 | else: 144 | dead_k += 1 145 | else: 146 | new_hyp_samples.append(hyp_samples[ti] + [wi]) 147 | new_trans_indices.append(ti) 148 | new_hyp_scores[idx] = copy.copy(costs[idx]) 149 | if ret_alphas: 150 | new_hyp_alphas.append(hyp_alphas[ti] + [alphas[ti]]) 151 | # check the finished samples 152 | new_live_k = 0 153 | hyp_samples = [] 154 | hyp_scores = [] 155 | hyp_alphas = [] 156 | indices_alive = [] 157 | for idx in range(len(new_hyp_samples)): 158 | if new_hyp_samples[idx][-1] == eos_sym: # finished sample 159 | samples.append(new_hyp_samples[idx]) 160 | sample_scores.append(new_hyp_scores[idx]) 161 | if ret_alphas: 162 | sample_alphas.append(new_hyp_alphas[idx]) 163 | dead_k += 1 164 | else: 165 | indices_alive.append(new_trans_indices[idx]) 166 | new_live_k += 1 167 | hyp_samples.append(new_hyp_samples[idx]) 168 | hyp_scores.append(new_hyp_scores[idx]) 169 | if ret_alphas: 170 | hyp_alphas.append(new_hyp_alphas[idx]) 171 | hyp_scores = cp.array(np.asarray(hyp_scores, dtype='float32'), dtype='float32') 172 | live_k = new_live_k 173 | 174 | if new_live_k < 1: 175 | break 176 | if dead_k >= k: 177 | break 178 | state_below = np.asarray(hyp_samples, dtype='int64') 179 | 180 | state_below = np.hstack((np.zeros((state_below.shape[0], 1), dtype='int64') + null_sym, state_below)) \ 181 | if pad_on_batch else \ 182 | np.hstack((np.zeros((state_below.shape[0], 1), dtype='int64') + null_sym, 183 | state_below, 184 | np.zeros((state_below.shape[0], 185 | max(params['state_below_maxlen'] - state_below.shape[1] - 1, 0)), dtype='int64'))) 186 | 187 | # we must include an additional dimension if the input for each timestep are all the generated words so far 188 | if params['words_so_far']: 189 | state_below = np.expand_dims(state_below, axis=0) 190 | 191 | if params['optimized_search'] and ii > 0: 192 | # filter next search inputs w.r.t. remaining samples 193 | if model_ensemble: 194 | for n_model in range(n_models): 195 | # filter next search inputs w.r.t. remaining samples 196 | for idx_vars in range(len(prev_out[n_model])): 197 | prev_out[n_model][idx_vars] = prev_out[n_model][idx_vars][indices_alive] 198 | else: 199 | for idx_vars in range(len(prev_out)): 200 | prev_out[idx_vars] = prev_out[idx_vars][indices_alive] 201 | 202 | # dump every remaining one 203 | if live_k > 0: 204 | for idx in range(live_k): 205 | samples.append(hyp_samples[idx]) 206 | sample_scores.append(hyp_scores[idx]) 207 | if ret_alphas: 208 | sample_alphas.append(hyp_alphas[idx]) 209 | 210 | alphas = np.asarray(sample_alphas) if ret_alphas else None 211 | return samples, np.asarray(sample_scores, dtype='float32'), alphas 212 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | # Configuration of py.test 2 | [pytest] 3 | addopts=-v 4 | # -n 2 5 | # --durations=10 6 | 7 | # PEP-8 The following are ignored: 8 | # E501 line too long (82 > 79 characters) 9 | # E402 module level import not at top of file - temporary measure to continue adding ros python packaged in sys.path 10 | # E731 do not assign a lambda expression, use a def 11 | 12 | pep8ignore=* E501 \ 13 | * E402 \ 14 | * E731 \ 15 | * E211 16 | 17 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | six 3 | toolz 4 | cloudpickle 5 | matplotlib 6 | sacremoses 7 | sacrebleu 8 | scipy 9 | future 10 | cython 11 | keras_applications 12 | keras_preprocessing 13 | sklearn 14 | tables -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from setuptools import setup 3 | from setuptools import find_packages 4 | 5 | setup(name='multimodal-keras-wrapper', 6 | version='3.1.6', 7 | description='Wrapper for Keras with support to easy multimodal data and models loading and handling.', 8 | author='Marc Bolaños - Alvaro Peris', 9 | author_email='marc.bolanos@ub.edu', 10 | url='https://github.com/MarcBS/multimodal_keras_wrapper', 11 | download_url='https://github.com/MarcBS/multimodal_keras_wrapper/archive/master.zip', 12 | license='MIT', 13 | classifiers=[ 14 | 'Intended Audience :: Developers', 15 | 'Intended Audience :: Education', 16 | 'Intended Audience :: Science/Research', 17 | 'Programming Language :: Python :: 2', 18 | 'Programming Language :: Python :: 2.7', 19 | 'Programming Language :: Python :: 3', 20 | 'Programming Language :: Python :: 3.6', 21 | 'Programming Language :: Python :: 3.7', 22 | 'Topic :: Software Development :: Libraries', 23 | 'Topic :: Software Development :: Libraries :: Python Modules', 24 | "License :: OSI Approved :: MIT License" 25 | ], 26 | install_requires=['keras', 27 | 'numpy', 28 | 'six', 29 | 'toolz', 30 | 'cloudpickle', 31 | 'matplotlib', 32 | 'sacremoses', 33 | 'sacrebleu', 34 | 'scipy', 35 | 'subword-nmt', 36 | 'future', 37 | 'cython', 38 | 'keras_applications', 39 | 'keras_preprocessing', 40 | 'sklearn', 41 | 'tables' 42 | ], 43 | extras_require={ 44 | 'cython ': ['cython'], 45 | 'tests': ['pytest', 46 | 'pytest-pep8', 47 | 'pytest-xdist', 48 | 'flaky', 49 | 'pytest-cov', 50 | 'requests', 51 | 'markdown'], 52 | }, 53 | packages=find_packages()) 54 | -------------------------------------------------------------------------------- /sphinx/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = ../docs 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 21 | 22 | .PHONY: help 23 | help: 24 | @echo "Please use \`make ' where is one of" 25 | @echo " html to make standalone HTML files" 26 | @echo " dirhtml to make HTML files named index.html in directories" 27 | @echo " singlehtml to make a single large HTML file" 28 | @echo " pickle to make pickle files" 29 | @echo " json to make JSON files" 30 | @echo " htmlhelp to make HTML files and a HTML help project" 31 | @echo " qthelp to make HTML files and a qthelp project" 32 | @echo " applehelp to make an Apple Help Book" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " epub3 to make an epub3" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | @echo " dummy to check syntax errors of document sources" 51 | 52 | .PHONY: clean 53 | clean: 54 | rm -rf $(BUILDDIR)/* 55 | 56 | .PHONY: html 57 | html: 58 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR) 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)." 61 | 62 | .PHONY: dirhtml 63 | dirhtml: 64 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 65 | @echo 66 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 67 | 68 | .PHONY: singlehtml 69 | singlehtml: 70 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 71 | @echo 72 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 73 | 74 | .PHONY: pickle 75 | pickle: 76 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 77 | @echo 78 | @echo "Build finished; now you can process the pickle files." 79 | 80 | .PHONY: json 81 | json: 82 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 83 | @echo 84 | @echo "Build finished; now you can process the JSON files." 85 | 86 | .PHONY: htmlhelp 87 | htmlhelp: 88 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 89 | @echo 90 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 91 | ".hhp project file in $(BUILDDIR)/htmlhelp." 92 | 93 | .PHONY: qthelp 94 | qthelp: 95 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 96 | @echo 97 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 98 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 99 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/staged_keras_wrapper.qhcp" 100 | @echo "To view the help file:" 101 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/staged_keras_wrapper.qhc" 102 | 103 | .PHONY: applehelp 104 | applehelp: 105 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 106 | @echo 107 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 108 | @echo "N.B. You won't be able to view it unless you put it in" \ 109 | "~/Library/Documentation/Help or install it in your application" \ 110 | "bundle." 111 | 112 | .PHONY: devhelp 113 | devhelp: 114 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 115 | @echo 116 | @echo "Build finished." 117 | @echo "To view the help file:" 118 | @echo "# mkdir -p $$HOME/.local/share/devhelp/staged_keras_wrapper" 119 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/staged_keras_wrapper" 120 | @echo "# devhelp" 121 | 122 | .PHONY: epub 123 | epub: 124 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 125 | @echo 126 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 127 | 128 | .PHONY: epub3 129 | epub3: 130 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 131 | @echo 132 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 133 | 134 | .PHONY: latex 135 | latex: 136 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 137 | @echo 138 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 139 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 140 | "(use \`make latexpdf' here to do that automatically)." 141 | 142 | .PHONY: latexpdf 143 | latexpdf: 144 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 145 | @echo "Running LaTeX files through pdflatex..." 146 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 147 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 148 | 149 | .PHONY: latexpdfja 150 | latexpdfja: 151 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 152 | @echo "Running LaTeX files through platex and dvipdfmx..." 153 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 154 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 155 | 156 | .PHONY: text 157 | text: 158 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 159 | @echo 160 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 161 | 162 | .PHONY: man 163 | man: 164 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 165 | @echo 166 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 167 | 168 | .PHONY: texinfo 169 | texinfo: 170 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 171 | @echo 172 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 173 | @echo "Run \`make' in that directory to run these through makeinfo" \ 174 | "(use \`make info' here to do that automatically)." 175 | 176 | .PHONY: info 177 | info: 178 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 179 | @echo "Running Texinfo files through makeinfo..." 180 | make -C $(BUILDDIR)/texinfo info 181 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 182 | 183 | .PHONY: gettext 184 | gettext: 185 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 186 | @echo 187 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 188 | 189 | .PHONY: changes 190 | changes: 191 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 192 | @echo 193 | @echo "The overview file is in $(BUILDDIR)/changes." 194 | 195 | .PHONY: linkcheck 196 | linkcheck: 197 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 198 | @echo 199 | @echo "Link check complete; look for any errors in the above output " \ 200 | "or in $(BUILDDIR)/linkcheck/output.txt." 201 | 202 | .PHONY: doctest 203 | doctest: 204 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 205 | @echo "Testing of doctests in the sources finished, look at the " \ 206 | "results in $(BUILDDIR)/doctest/output.txt." 207 | 208 | .PHONY: coverage 209 | coverage: 210 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 211 | @echo "Testing of coverage in the sources finished, look at the " \ 212 | "results in $(BUILDDIR)/coverage/python.txt." 213 | 214 | .PHONY: xml 215 | xml: 216 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 217 | @echo 218 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 219 | 220 | .PHONY: pseudoxml 221 | pseudoxml: 222 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 223 | @echo 224 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 225 | 226 | .PHONY: dummy 227 | dummy: 228 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 229 | @echo 230 | @echo "Build finished. Dummy builder generates no files." 231 | 232 | -------------------------------------------------------------------------------- /sphinx/_ext/edit_on_github.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sphinx extension to add ReadTheDocs-style "Edit on GitHub" links to the 3 | sidebar. 4 | Loosely based on https://github.com/astropy/astropy/pull/347 5 | """ 6 | 7 | import os 8 | import warnings 9 | 10 | __licence__ = 'BSD (3 clause)' 11 | 12 | 13 | def get_github_url(app, view, path): 14 | return 'https://github.com/{project}/{view}/{branch}/{path}'.format( 15 | project=app.config.edit_on_github_project, 16 | view=view, 17 | branch=app.config.edit_on_github_branch, 18 | path=path) 19 | 20 | 21 | def html_page_context(app, pagename, templatename, context, doctree): 22 | if templatename != 'page.html': 23 | return 24 | 25 | if not app.config.edit_on_github_project: 26 | warnings.warn("edit_on_github_project not specified") 27 | return 28 | 29 | path = os.path.relpath(doctree.get('source'), app.builder.srcdir) 30 | show_url = get_github_url(app, 'blob', path) 31 | edit_url = get_github_url(app, 'edit', path) 32 | 33 | context['show_on_github_url'] = show_url 34 | context['edit_on_github_url'] = edit_url 35 | 36 | 37 | def setup(app): 38 | app.add_config_value('edit_on_github_project', '', True) 39 | app.add_config_value('edit_on_github_branch', 'master', True) 40 | app.connect('html-page-context', html_page_context) 41 | -------------------------------------------------------------------------------- /sphinx/_templates/sourcelink.html: -------------------------------------------------------------------------------- 1 | {%- if show_source and has_source and sourcename %} 2 |

{{ _('This Page') }}

3 | 15 | {%- endif %} 16 | -------------------------------------------------------------------------------- /sphinx/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # multimodal_keras_wrapper documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Apr 26 10:43:19 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | from recommonmark.parser import CommonMarkParser 18 | 19 | # If extensions (or modules to document with autodoc) are in another directory, 20 | # add these directories to sys.path here. If the directory is relative to the 21 | # documentation root, use os.path.abspath to make it absolute, like shown here. 22 | # sys.path.insert(0, os.path.abspath('.')) 23 | sys.path.insert(0, os.path.abspath('../../')) 24 | sys.path.insert(0, os.path.abspath('../_ext')) 25 | 26 | # -- General configuration ------------------------------------------------ 27 | 28 | # If your documentation needs a minimal Sphinx version, state it here. 29 | # needs_sphinx = '1.0' 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [ 35 | 'sphinx.ext.doctest', 36 | 'sphinx.ext.ifconfig', 37 | 'sphinx.ext.autodoc', 38 | 'edit_on_github' 39 | ] 40 | 41 | edit_on_github_project = 'MarcBS/multimodal_keras_wrapper' 42 | edit_on_github_branch = 'master' 43 | 44 | # Add any paths that contain templates here, relative to this directory. 45 | templates_path = ['_templates'] 46 | 47 | source_parsers = { 48 | '.md': CommonMarkParser, 49 | } 50 | 51 | # The suffix(es) of source filenames. 52 | # You can specify multiple suffix as a list of string: 53 | source_suffix = ['.rst', '.md'] 54 | # source_suffix = '.rst' 55 | 56 | # The encoding of source files. 57 | # source_encoding = 'utf-8-sig' 58 | 59 | # The master toctree document. 60 | master_doc = 'index' 61 | 62 | # General information about the project. 63 | project = u'Multimodal Keras Wrapper' 64 | copyright = u'2016, Marc Bolaños' 65 | author = u'Marc Bolaños' 66 | 67 | # The version info for the project you're documenting, acts as replacement for 68 | # |version| and |release|, also used in various other places throughout the 69 | # built documents. 70 | # 71 | # The short X.Y version. 72 | version = u'0.55' 73 | # The full version, including alpha/beta/rc tags. 74 | release = u'0.55' 75 | 76 | # The language for content autogenerated by Sphinx. Refer to documentation 77 | # for a list of supported languages. 78 | # 79 | # This is also used if you do content translation via gettext catalogs. 80 | # Usually you set "language" from the command line for these cases. 81 | language = None 82 | 83 | # There are two options for replacing |today|: either, you set today to some 84 | # non-false value, then it is used: 85 | # today = '' 86 | # Else, today_fmt is used as the format for a strftime call. 87 | # today_fmt = '%B %d, %Y' 88 | 89 | # List of patterns, relative to source directory, that match files and 90 | # directories to ignore when looking for source files. 91 | # This patterns also effect to html_static_path and html_extra_path 92 | exclude_patterns = [] 93 | 94 | # The reST default role (used for this markup: `text`) to use for all 95 | # documents. 96 | # default_role = None 97 | 98 | # If true, '()' will be appended to :func: etc. cross-reference text. 99 | # add_function_parentheses = True 100 | 101 | # If true, the current module name will be prepended to all description 102 | # unit titles (such as .. function::). 103 | # add_module_names = True 104 | 105 | # If true, sectionauthor and moduleauthor directives will be shown in the 106 | # output. They are ignored by default. 107 | # show_authors = False 108 | 109 | # The name of the Pygments (syntax highlighting) style to use. 110 | pygments_style = 'sphinx' 111 | 112 | # A list of ignored prefixes for module index sorting. 113 | # modindex_common_prefix = [] 114 | 115 | # If true, keep warnings as "system message" paragraphs in the built documents. 116 | # keep_warnings = False 117 | 118 | # If true, `todo` and `todoList` produce output, else they produce nothing. 119 | todo_include_todos = False 120 | 121 | # -- Options for HTML output ---------------------------------------------- 122 | 123 | # The theme to use for HTML and HTML Help pages. See the documentation for 124 | # a list of builtin themes. 125 | # html_theme = 'alabaster' 126 | html_theme = 'sphinx_rtd_theme' 127 | 128 | # Theme options are theme-specific and customize the look and feel of a theme 129 | # further. For a list of options available for each theme, see the 130 | # documentation. 131 | # html_theme_options = {} 132 | 133 | # Add any paths that contain custom themes here, relative to this directory. 134 | html_theme_path = ["_themes", ] 135 | 136 | # The name for this set of Sphinx documents. 137 | # " v documentation" by default. 138 | # html_title = u'multimodal_keras_wrapper v0.1' 139 | 140 | # A shorter title for the navigation bar. Default is the same as html_title. 141 | # html_short_title = None 142 | 143 | # The name of an image file (relative to this directory) to place at the top 144 | # of the sidebar. 145 | # html_logo = None 146 | 147 | # The name of an image file (relative to this directory) to use as a favicon of 148 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 149 | # pixels large. 150 | # html_favicon = None 151 | 152 | # Add any paths that contain custom static files (such as style sheets) here, 153 | # relative to this directory. They are copied after the builtin static files, 154 | # so a file named "default.css" will overwrite the builtin "default.css". 155 | html_static_path = ['_static'] 156 | 157 | # Add any extra paths that contain custom files (such as robots.txt or 158 | # .htaccess) here, relative to this directory. These files are copied 159 | # directly to the root of the documentation. 160 | # html_extra_path = [] 161 | 162 | # If not None, a 'Last updated on:' timestamp is inserted at every page 163 | # bottom, using the given strftime format. 164 | # The empty string is equivalent to '%b %d, %Y'. 165 | # html_last_updated_fmt = None 166 | 167 | # If true, SmartyPants will be used to convert quotes and dashes to 168 | # typographically correct entities. 169 | # html_use_smartypants = True 170 | 171 | # Custom sidebar templates, maps document names to template names. 172 | # html_sidebars = {} 173 | 174 | # Additional templates that should be rendered to pages, maps page names to 175 | # template names. 176 | # html_additional_pages = {} 177 | 178 | # If false, no module index is generated. 179 | # html_domain_indices = True 180 | 181 | # If false, no index is generated. 182 | # html_use_index = True 183 | 184 | # If true, the index is split into individual pages for each letter. 185 | # html_split_index = False 186 | 187 | # If true, links to the reST sources are added to the pages. 188 | # html_show_sourcelink = True 189 | 190 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 191 | # html_show_sphinx = True 192 | 193 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 194 | # html_show_copyright = True 195 | 196 | # If true, an OpenSearch description file will be output, and all pages will 197 | # contain a tag referring to it. The value of this option must be the 198 | # base URL from which the finished HTML is served. 199 | # html_use_opensearch = '' 200 | 201 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 202 | # html_file_suffix = None 203 | 204 | # Language to be used for generating the HTML full-text search index. 205 | # Sphinx supports the following languages: 206 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 207 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' 208 | # html_search_language = 'en' 209 | 210 | # A dictionary with options for the search language support, empty by default. 211 | # 'ja' uses this config value. 212 | # 'zh' user can custom change `jieba` dictionary path. 213 | # html_search_options = {'type': 'default'} 214 | 215 | # The name of a javascript file (relative to the configuration directory) that 216 | # implements a search results scorer. If empty, the default will be used. 217 | # html_search_scorer = 'scorer.js' 218 | 219 | # Output file base name for HTML help builder. 220 | htmlhelp_basename = 'multimodal_keras_wrapperdoc' 221 | 222 | # -- Options for LaTeX output --------------------------------------------- 223 | 224 | latex_elements = { 225 | # The paper size ('letterpaper' or 'a4paper'). 226 | # 'papersize': 'letterpaper', 227 | 228 | # The font size ('10pt', '11pt' or '12pt'). 229 | # 'pointsize': '10pt', 230 | 231 | # Additional stuff for the LaTeX preamble. 232 | # 'preamble': '', 233 | 234 | # Latex figure (float) alignment 235 | # 'figure_align': 'htbp', 236 | } 237 | 238 | # Grouping the document tree into LaTeX files. List of tuples 239 | # (source start file, target name, title, 240 | # author, documentclass [howto, manual, or own class]). 241 | latex_documents = [ 242 | (master_doc, 'multimodal_keras_wrapper.tex', u'multimodal\\_keras\\_wrapper Documentation', 243 | u'Marc Bolaños', 'manual'), 244 | ] 245 | 246 | # The name of an image file (relative to this directory) to place at the top of 247 | # the title page. 248 | # latex_logo = None 249 | 250 | # For "manual" documents, if this is true, then toplevel headings are parts, 251 | # not chapters. 252 | # latex_use_parts = False 253 | 254 | # If true, show page references after internal links. 255 | # latex_show_pagerefs = False 256 | 257 | # If true, show URL addresses after external links. 258 | # latex_show_urls = False 259 | 260 | # Documents to append as an appendix to all manuals. 261 | # latex_appendices = [] 262 | 263 | # If false, no module index is generated. 264 | # latex_domain_indices = True 265 | 266 | 267 | # -- Options for manual page output --------------------------------------- 268 | 269 | # One entry per manual page. List of tuples 270 | # (source start file, name, description, authors, manual section). 271 | man_pages = [ 272 | (master_doc, 'multimodal_keras_wrapper', u'multimodal_keras_wrapper Documentation', 273 | [author], 1) 274 | ] 275 | 276 | # If true, show URL addresses after external links. 277 | # man_show_urls = False 278 | 279 | 280 | # -- Options for Texinfo output ------------------------------------------- 281 | 282 | # Grouping the document tree into Texinfo files. List of tuples 283 | # (source start file, target name, title, author, 284 | # dir menu entry, description, category) 285 | texinfo_documents = [ 286 | (master_doc, 'multimodal_keras_wrapper', u'multimodal_keras_wrapper Documentation', 287 | author, 'multimodal_keras_wrapper', 'One line description of project.', 288 | 'Miscellaneous'), 289 | ] 290 | 291 | # Documents to append as an appendix to all manuals. 292 | # texinfo_appendices = [] 293 | 294 | # If false, no module index is generated. 295 | # texinfo_domain_indices = True 296 | 297 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 298 | # texinfo_show_urls = 'footnote' 299 | 300 | # If true, do not generate a @detailmenu in the "Top" node's menu. 301 | # texinfo_no_detailmenu = False 302 | 303 | 304 | # -- Options for Epub output ---------------------------------------------- 305 | 306 | # Bibliographic Dublin Core info. 307 | epub_title = project 308 | epub_author = author 309 | epub_publisher = author 310 | epub_copyright = copyright 311 | 312 | # The basename for the epub file. It defaults to the project name. 313 | # epub_basename = project 314 | 315 | # The HTML theme for the epub output. Since the default themes are not 316 | # optimized for small screen space, using the same theme for HTML and epub 317 | # output is usually not wise. This defaults to 'epub', a theme designed to save 318 | # visual space. 319 | # epub_theme = 'epub' 320 | 321 | # The language of the text. It defaults to the language option 322 | # or 'en' if the language is not set. 323 | # epub_language = '' 324 | 325 | # The scheme of the identifier. Typical schemes are ISBN or URL. 326 | # epub_scheme = '' 327 | 328 | # The unique identifier of the text. This can be a ISBN number 329 | # or the project homepage. 330 | # epub_identifier = '' 331 | 332 | # A unique identification for the text. 333 | # epub_uid = '' 334 | 335 | # A tuple containing the cover image and cover page html template filenames. 336 | # epub_cover = () 337 | 338 | # A sequence of (type, uri, title) tuples for the guide element of content.opf. 339 | # epub_guide = () 340 | 341 | # HTML files that should be inserted before the pages created by sphinx. 342 | # The format is a list of tuples containing the path and title. 343 | # epub_pre_files = [] 344 | 345 | # HTML files that should be inserted after the pages created by sphinx. 346 | # The format is a list of tuples containing the path and title. 347 | # epub_post_files = [] 348 | 349 | # A list of files that should not be packed into the epub file. 350 | epub_exclude_files = ['search.html'] 351 | 352 | # The depth of the table of contents in toc.ncx. 353 | # epub_tocdepth = 3 354 | 355 | # Allow duplicate toc entries. 356 | # epub_tocdup = True 357 | 358 | # Choose between 'default' and 'includehidden'. 359 | # epub_tocscope = 'default' 360 | 361 | # Fix unsupported image types using the Pillow. 362 | # epub_fix_images = False 363 | 364 | # Scale large images. 365 | # epub_max_image_width = 0 366 | 367 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 368 | # epub_show_urls = 'inline' 369 | 370 | # If false, no index is generated. 371 | # epub_use_index = True 372 | -------------------------------------------------------------------------------- /sphinx/source/index.rst: -------------------------------------------------------------------------------- 1 | .. multimodal_keras_wrapper documentation master file, created by 2 | sphinx-quickstart on Tue Apr 26 10:43:19 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Multimodal Keras Wrapper's documentation! 7 | ====================================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | intro.md 15 | tutorial.md 16 | modules.rst 17 | 18 | 19 | Indices and tables 20 | ==================== 21 | 22 | * :ref:`genindex` 23 | * :ref:`modindex` 24 | * :ref:`search` 25 | 26 | -------------------------------------------------------------------------------- /sphinx/source/intro.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | ## Multimodal Keras Wrapper 4 | Wrapper for Keras with support to easy multimodal data and models loading and handling. 5 | 6 | You can download and contribute to the code downloading [this repository](https://github.com/MarcBS/multimodal_keras_wrapper). 7 | 8 | 9 | ## Documentation 10 | 11 | You can access the library documentation page at [marcbs.github.io/multimodal_keras_wrapper/](http://marcbs.github.io/multimodal_keras_wrapper/) 12 | 13 | Some code examples are available in demo.ipynb and test.py. Additionally, in the section Projects you can see some practical examples of projects using this library. 14 | 15 | 16 | ## Dependencies 17 | 18 | The following dependencies are required for using this library: 19 | 20 | - [Anaconda](https://www.continuum.io/downloads) 21 | - Keras - [custom fork](https://github.com/MarcBS/keras) or [original version](https://github.com/fchollet/keras) 22 | - [cloud](https://pypi.python.org/pypi/cloud/2.8.5) >= 2.8.5 23 | - [scipy](https://pypi.python.org/pypi/scipy/0.7.0) 24 | 25 | Only when using NMS for certain localization utilities: 26 | - [cython](https://pypi.python.org/pypi/Cython/0.25.2) >= 0.23.4 27 | 28 | ## Installation 29 | 30 | In order to install the library you just have to follow these steps: 31 | 32 | 1) Clone this repository: 33 | ``` 34 | git clone https://github.com/MarcBS/multimodal_keras_wrapper.git 35 | ``` 36 | 37 | 2) Include the repository path into your PYTHONPATH: 38 | ``` 39 | export PYTHONPATH=$PYTHONPATH:/path/to/multimodal_keras_wrapper 40 | ``` 41 | 42 | 3) If you wish to install the dependencies (it will install our [custom Keras fork](https://github.com/MarcBS/keras)): 43 | ``` 44 | pip install -r requirements.txt 45 | ``` 46 | 47 | ## Projects 48 | 49 | You can see more practical examples in projects which use this library: 50 | 51 | [VIBIKNet for Visual Question Answering](https://github.com/MarcBS/VIBIKNet) 52 | 53 | [ABiViRNet for Video Description](https://github.com/lvapeab/ABiViRNet) 54 | 55 | [Sentence-SelectioNN for Domain Adaptation in SMT](https://github.com/lvapeab/sentence-selectioNN) 56 | 57 | 58 | ## Keras 59 | 60 | For additional information on the Deep Learning library, visit the official web page www.keras.io or the GitHub repository https://github.com/fchollet/keras. 61 | 62 | You can also use our [custom Keras version](https://github.com/MarcBS/keras), which provides several additional layers for Multimodal Learning. 63 | -------------------------------------------------------------------------------- /sphinx/source/modules.rst: -------------------------------------------------------------------------------- 1 | Available Modules 2 | ************************** 3 | 4 | List of all files, classes and methods available in the library. 5 | 6 | 7 | dataset.py 8 | ============================= 9 | 10 | .. automodule:: keras_wrapper.dataset 11 | :members: 12 | 13 | 14 | cnn_model.py 15 | ============================= 16 | 17 | .. automodule:: keras_wrapper.cnn_model 18 | :members: 19 | 20 | 21 | callbacks_keras_wrapper.py 22 | ============================= 23 | 24 | .. automodule:: keras_wrapper.callbacks_keras_wrapper 25 | :members: 26 | 27 | 28 | beam_search_ensemble.py 29 | ============================= 30 | 31 | .. automodule:: keras_wrapper.beam_search_ensemble 32 | :members: 33 | 34 | 35 | utils.py 36 | ============================= 37 | 38 | .. automodule:: keras_wrapper.utils 39 | :members: 40 | -------------------------------------------------------------------------------- /sphinx/source/tutorial.md: -------------------------------------------------------------------------------- 1 | # Tutorial 2 | 3 | ## Basic components 4 | 5 | There are two basic components that have to be built in order to use the Multimodal Keras Wrapper, 6 | which are a **[Dataset](https://github.com/MarcBS/multimodal_keras_wrapper/blob/6d0b11248fd353cc189f674dc30beaf9689da182/keras_wrapper/dataset.py#L331)** and a **[Model_Wrapper](https://github.com/MarcBS/multimodal_keras_wrapper/blob/6d0b11248fd353cc189f674dc30beaf9689da182/keras_wrapper/cnn_model.py#L154)**. 7 | 8 | The class **Dataset** is in charge of: 9 | - Storing, preprocessing and loading any kind of data for training a model (inputs). 10 | - Storing, preprocessing and loading the ground truth associated to our data (outputs). 11 | - Loading the data in batches for training or prediction. 12 | 13 | The Datasets can manage different [types of input/output data](https://github.com/MarcBS/multimodal_keras_wrapper/blob/6d0b11248fd353cc189f674dc30beaf9689da182/keras_wrapper/dataset.py#L389-L390), which can be summarized as: 14 | - input types: 'raw-image', 'video', 'image-features', 'video-features', 'text' 15 | - output types: 'categorical', 'binary', 'real', 'text', '3DLabel' 16 | 17 | Currently, the class Dataset can be used for multiple kinds of multimodal problems, 18 | e.g. image/video classification, detection, multilabel prediction, regression, image/video captioning, 19 | visual question answering, multimodal translation, neural machine translation, etc. 20 | 21 | The class **Model_Wrapper** is in charge of: 22 | - Storing an instance of a Keras' model. 23 | - Receiving the inputs/outputs of the class Dataset and using the model for training or prediction. 24 | - Providing two different methods for prediction. Either [predictNet()](http://marcbs.github.io/multimodal_keras_wrapper/modules.html#keras_wrapper.cnn_model.Model_Wrapper.predictNet), which uses a conventional Keras model for prediction, or [predictBeamSearchNet()](http://marcbs.github.io/multimodal_keras_wrapper/modules.html#keras_wrapper.cnn_model.Model_Wrapper.predictBeamSearchNet), which applies a BeamSearch for sequence generative models and additionally allows to create separate models **model_init** and **model_next** for applying an optimized prediction (see [this](https://github.com/MarcBS/multimodal_keras_wrapper/blob/b348ce9d52404434b1e98316c7f09b5d5fd3df00/keras_wrapper/cnn_model.py#L1319-L1328) and [this](https://github.com/MarcBS/multimodal_keras_wrapper/blob/f269207a65bfc77d5c2c89ea708bad8bff7f72ab/keras_wrapper/cnn_model.py#L1057) for further information). 25 | 26 | In this tutorial we will learn how to create each of the two basic components and how use a 27 | model for training and prediction. 28 | 29 | 30 | ## Creating a Dataset 31 | 32 | First, let's create a simple Dataset object with some sample data. 33 | The data used for this example can be obtained by executing `/repository_root/data/get_data.sh`. 34 | This will download the data used for this example into `/repository_root/data/sample_data`. 35 | 36 | 37 | Dataset parameters definition. 38 | 39 | ``` 40 | from keras_wrapper.dataset import Dataset 41 | 42 | dataset_name = 'test_dataset' 43 | image_id = 'input_image' 44 | label_id = 'output_label' 45 | images_size = [256, 256, 3] 46 | images_crop_size = [224, 224, 3] 47 | train_mean = [103.939, 116.779, 123.68] 48 | base_path = '/data/sample_data' 49 | ``` 50 | 51 | Empty dataset instance creation 52 | 53 | ``` 54 | ds = Dataset(dataset_name, base_path+'/images') 55 | ``` 56 | 57 | 58 | Insert dataset/model inputs 59 | 60 | ``` 61 | # train split 62 | ds.setInput(base_path + '/train.txt', 'train', 63 | type='raw-image', id=image_id, 64 | img_size=images_size, img_size_crop=images_crop_size) 65 | # val split 66 | ds.setInput(base_path + '/val.txt', 'val', 67 | type='raw-image', id=image_id, 68 | img_size=images_size, img_size_crop=images_crop_size) 69 | # test split 70 | ds.setInput(base_path + '/test.txt', 'test', 71 | type='raw-image', id=image_id, 72 | img_size=images_size, img_size_crop=images_crop_size) 73 | ``` 74 | 75 | Insert pre-calculated images train mean 76 | 77 | ``` 78 | ds.setTrainMean(train_mean, image_id) 79 | ``` 80 | 81 | Insert dataset/model outputs 82 | 83 | ``` 84 | # train split 85 | ds.setOutput(base_path+'/train_labels.txt', 'train', 86 | type='categorical', id=label_id) 87 | # val split 88 | ds.setOutput(base_path+'/val_labels.txt', 'val', 89 | type='categorical', id=label_id) 90 | # test split 91 | ds.setOutput(base_path+'/test_labels.txt', 'test', 92 | type='categorical', id=label_id) 93 | ``` 94 | 95 | ## Saving or loading a Dataset 96 | 97 | ``` 98 | from keras_wrapper.dataset import saveDataset, loadDataset 99 | 100 | save_path = '/Datasets' 101 | 102 | # Save dataset 103 | saveDataset(ds, save_path) 104 | 105 | # Load dataset 106 | ds = loadDataset(save_path+'/Dataset_'+dataset_name+'.pkl') 107 | ``` 108 | 109 | In addition, we can print some basic information of the data stored in the dataset: 110 | 111 | ``` 112 | print ds 113 | ``` 114 | 115 | ## Creating a Model_Wrapper 116 | 117 | Model_Wrapper parameters definition. 118 | 119 | ``` 120 | from keras_wrapper.cnn_model import Model_Wrapper 121 | 122 | model_name = 'our_model' 123 | type = 'VGG_19_ImageNet' 124 | save_path = '/Models/' 125 | ``` 126 | 127 | Create a basic CNN model 128 | 129 | ``` 130 | net = Model_Wrapper(nOutput=2, type=type, model_name=model_name, input_shape=images_crop_size) 131 | net.setOptimizer(lr=0.001, metrics=['accuracy']) # compile it 132 | ``` 133 | 134 | By default, the model type built is the one defined in [Model_Wrapper.basic_model()](https://github.com/MarcBS/multimodal_keras_wrapper/blob/6d0b11248fd353cc189f674dc30beaf9689da182/keras_wrapper/cnn_model.py#L2003). 135 | Although, any kind of custom model can be defined just by: 136 | - Defining a new method for the class Model_Wrapper which builds the model and stores it in self.model. 137 | - Referencing it with type='method_name' when creating a new Model_Wrapper instance. 138 | 139 | 140 | ## Saving or loading a Model_Wrapper 141 | 142 | ``` 143 | from keras_wrapper.cnn_model import saveModel, loadModel 144 | 145 | save_epoch = 0 146 | 147 | # Save model 148 | saveModel(net, save_epoch) 149 | 150 | # Load model 151 | net = loadModel(save_path+'/'+model_name, save_epoch) 152 | ``` 153 | 154 | 155 | ## Connecting a Dataset to a Model_Wrapper 156 | 157 | In order to provide a correct communication between the Dataset and the Model_Wrapper objects, we have to provide the links between the Dataset ids positions and their corresponding layer identifiers in the Keras' Model as a dictionary. 158 | 159 | In this case we only have one input and one output, for this reason both ids are mapped to position 0 of our Dataset. 160 | 161 | ``` 162 | net.setInputsMapping({net.ids_inputs[0]: 0}) 163 | net.setOutputsMapping({net.ids_outputs[0]: 0}) 164 | ``` 165 | 166 | 167 | ## Training 168 | 169 | We can specify several options for training our model, which are [summarized here](http://marcbs.github.io/multimodal_keras_wrapper/modules.html#keras_wrapper.cnn_model.Model_Wrapper.trainNet). If any of them is overriden then the [default values](https://github.com/MarcBS/multimodal_keras_wrapper/blob/011393580b2253a01c168d638b8c0bd06fe6d522/keras_wrapper/cnn_model.py#L454-L458) will be used. 170 | 171 | ``` 172 | train_overriden_parameters = {'n_epochs': 2, 'batch_size': 10} 173 | 174 | net.trainNet(ds, train_overriden_parameters) 175 | ``` 176 | 177 | ## Prediction 178 | 179 | The same applies to the prediction method. We can find the [available parameters here](http://marcbs.github.io/multimodal_keras_wrapper/modules.html#keras_wrapper.cnn_model.Model_Wrapper.predictNet) and the [default values here](https://github.com/MarcBS/multimodal_keras_wrapper/blob/011393580b2253a01c168d638b8c0bd06fe6d522/keras_wrapper/cnn_model.py#L1468-L1470). 180 | 181 | ``` 182 | predict_overriden_parameters = {'batch_size': 10, 'predict_on_sets': ['test']} 183 | 184 | net.predictNet(ds, predict_overriden_parameters) 185 | ``` -------------------------------------------------------------------------------- /tests/data/test_data.txt: -------------------------------------------------------------------------------- 1 | This is a text file. Containing characters of different encodings. 2 | ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^ 3 | 首先 , 4 | -------------------------------------------------------------------------------- /tests/extra/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/tests/extra/__init__.py -------------------------------------------------------------------------------- /tests/extra/test_wrapper_callbacks.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from six import iteritems 3 | 4 | # TODO 5 | -------------------------------------------------------------------------------- /tests/extra/test_wrapper_evaluation.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | from keras_wrapper.extra.evaluation import get_sacrebleu_score, get_coco_score, multilabel_metrics, get_perplexity 4 | 5 | 6 | def test_get_sacrebleu_score(): 7 | pred_list = ['Prediction 1 X W Z', 'Prediction 2 X W Z', 'Prediction 3 X W Z'] 8 | 9 | for tokenize_hypothesis in {True, False}: 10 | for tokenize_references in {True, False}: 11 | for apply_detokenization in {True, False}: 12 | extra_vars = {'val': {'references': {0: ['Prediction 1 X W Z', 'Prediction 5'], 13 | 1: ['Prediction 2 X W Z', 'X Y Z'], 14 | 2: ['Prediction 3 X W Z', 'Prediction 5']}, 15 | }, 16 | 17 | 'test': {'references': {0: ['Prediction 2 X W Z'], 18 | 1: ['Prediction 3 X W Z'], 19 | 2: ['Prediction 1 X W Z']} 20 | }, 21 | 'tokenize_hypothesis': tokenize_hypothesis, 22 | 'tokenize_references': tokenize_references, 23 | 'tokenize_references': apply_detokenization, 24 | 'tokenize_f': lambda x: x, 25 | 'detokenize_f': lambda x: x, 26 | } 27 | val_scores = get_sacrebleu_score(pred_list, 0, extra_vars, 'val') 28 | assert np.allclose(val_scores['Bleu_4'], 100.0, atol=1e6) 29 | 30 | test_scores = get_sacrebleu_score(pred_list, 0, extra_vars, 'test') 31 | assert np.allclose(test_scores['Bleu_4'], 0., atol=1e6) 32 | 33 | 34 | def test_get_coco_score(): 35 | pred_list = ['Prediction 1', 'Prediction 2', 'Prediction 3'] 36 | extra_vars = {'val': {'references': {0: ['Prediction 1'], 1: ['Prediction 2'], 37 | 2: ['Prediction 3', 'Prediction 5']}}, 38 | 'test': {'references': {0: ['Prediction 2'], 1: ['Prediction 3'], 39 | 2: ['Prediction 1']}} 40 | } 41 | val_scores = get_coco_score(pred_list, 0, extra_vars, 'val') 42 | assert np.allclose(val_scores['Bleu_1'], 1.0, atol=1e6) 43 | assert np.allclose(val_scores['Bleu_2'], 1.0, atol=1e6) 44 | assert np.allclose(val_scores['Bleu_3'], 1.0, atol=1e6) 45 | assert np.allclose(val_scores['Bleu_4'], 1.0, atol=1e6) 46 | assert np.allclose(val_scores['ROUGE_L'], 1.0, atol=1e6) 47 | assert np.allclose(val_scores['CIDEr'], 5.0, atol=1e6) 48 | assert np.allclose(val_scores['TER'], 0., atol=1e6) 49 | assert np.allclose(val_scores['METEOR'], 1.0, atol=1e6) 50 | test_scores = get_coco_score(pred_list, 0, extra_vars, 'test') 51 | 52 | assert np.allclose(test_scores['Bleu_1'], 0.5, atol=1e6) 53 | assert np.allclose(test_scores['Bleu_2'], 0., atol=1e6) 54 | assert np.allclose(test_scores['Bleu_3'], 0., atol=1e6) 55 | assert np.allclose(test_scores['Bleu_4'], 0., atol=1e6) 56 | assert np.allclose(test_scores['ROUGE_L'], 0.5, atol=1e6) 57 | assert np.allclose(test_scores['CIDEr'], 0., atol=1e6) 58 | assert np.allclose(test_scores['TER'], 0.5, atol=1e6) 59 | assert np.allclose(test_scores['METEOR'], 0.2, atol=1e6) 60 | 61 | 62 | def test_multilabel_metrics(): 63 | pred_list = [['w1'], ['w2'], ['w3']] 64 | extra_vars = { 65 | 'val': {'references': [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]], 66 | 'word2idx': {'w1': 0, 'w2': 1, 'w3': 3, 'w4': 3, 'w5': 4} 67 | }, 68 | 'test': {'references': [[0, 0, 0, 0, 1], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], 69 | 'word2idx': {'w1': 0, 'w2': 1, 'w3': 2, 'w4': 3, 'w5': 4} 70 | } 71 | } 72 | val_scores = multilabel_metrics(pred_list, 0, extra_vars, 'val') 73 | 74 | assert np.allclose(val_scores['f1'], 0.66, atol=1e6) 75 | assert np.allclose(val_scores['recall'], 0.66, atol=1e6) 76 | assert np.allclose(val_scores['precision'], 0.66, atol=1e6) 77 | assert np.allclose(val_scores['ranking_loss'], 0.33, atol=1e6) 78 | assert np.allclose(val_scores['coverage_error'], 2.33, atol=1e6) 79 | assert np.allclose(val_scores['average_precision'], 0.73, atol=1e6) 80 | 81 | test_scores = multilabel_metrics(pred_list, 0, extra_vars, 'test') 82 | assert np.allclose(test_scores['f1'], 0.33, atol=1e6) 83 | assert np.allclose(test_scores['recall'], 0.33, atol=1e6) 84 | assert np.allclose(test_scores['precision'], 0.22, atol=1e6) 85 | assert np.allclose(test_scores['ranking_loss'], 0.66, atol=1e6) 86 | assert np.allclose(test_scores['coverage_error'], 3.66, atol=1e6) 87 | assert np.allclose(test_scores['average_precision'], 0.466, atol=1e6) 88 | 89 | 90 | def test_multiclass_metrics(): 91 | # TODO 92 | pass 93 | 94 | 95 | def test_compute_perplexity(): 96 | costs = [1., 1., 1.] 97 | ppl = get_perplexity(costs=costs) 98 | assert np.allclose(ppl['Perplexity'], np.e, atol=1e6) 99 | 100 | costs = [0., 0., 0.] 101 | ppl = get_perplexity(costs=costs) 102 | assert np.allclose(ppl['Perplexity'], 0., atol=1e6) 103 | 104 | 105 | def test_semantic_segmentation_accuracy(): 106 | # TODO 107 | pass 108 | 109 | 110 | def test_semantic_segmentation_meaniou(): 111 | # TODO 112 | pass 113 | 114 | 115 | def test_averagePrecision(): 116 | # TODO 117 | pass 118 | 119 | 120 | if __name__ == '__main__': 121 | pytest.main([__file__]) 122 | -------------------------------------------------------------------------------- /tests/extra/test_wrapper_localization_utilities.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | # TODO 4 | -------------------------------------------------------------------------------- /tests/extra/test_wrapper_read_write.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import pytest 3 | import sys 4 | import os 5 | import numpy 6 | from six import iteritems 7 | from keras_wrapper.extra.read_write import * 8 | from keras_wrapper.utils import flatten_list_of_lists 9 | 10 | 11 | def test_dirac(): 12 | assert dirac(1, 1) == 1 13 | assert dirac(2, 1) == 0 14 | 15 | 16 | def test_create_dir_if_not_exists(): 17 | create_dir_if_not_exists('test_directory') 18 | assert os.path.isdir('test_directory') 19 | 20 | 21 | def test_clean_dir(): 22 | clean_dir('test_directory') 23 | assert os.path.isdir('test_directory') 24 | 25 | 26 | def test_file2list(): 27 | reference_text = 'ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^'.decode('utf-8') if sys.version_info.major == 2 else 'ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^' 28 | stripped_list = file2list('tests/data/test_data.txt', stripfile=True) 29 | assert len(stripped_list) == 3 30 | assert stripped_list[1] == reference_text 31 | 32 | 33 | def test_numpy2hdf5(): 34 | filepath = 'test_file' 35 | data_name = 'test_data' 36 | my_np = np.random.rand(10, 10).astype('float32') 37 | numpy2hdf5(filepath, my_np, data_name=data_name) 38 | assert os.path.isfile(filepath) 39 | my_np_loaded = np.asarray(load_hdf5_simple(filepath, dataset_name=data_name)).astype('float32') 40 | assert np.all(my_np == my_np_loaded) 41 | 42 | 43 | def test_numpy2file(): 44 | filepath = 'test_file' 45 | my_np = np.random.rand(10, 10).astype('float32') 46 | numpy2file(filepath, my_np) 47 | assert os.path.isfile(filepath) 48 | my_np_loaded = np.asarray(np.load(filepath)).astype('float32') 49 | assert np.all(my_np == my_np_loaded) 50 | 51 | 52 | def test_listoflists2file(): 53 | mylist = [['This is a text file. Containing characters of different encodings.'], 54 | ['ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^'], 55 | ['首先 ,'] 56 | ] 57 | filepath = 'saved_list' 58 | listoflists2file(filepath, mylist) 59 | loaded_list = file2list('saved_list') 60 | flatten_list = [encode_list(sublist) for sublist in mylist] 61 | flatten_list = flatten_list_of_lists(flatten_list) 62 | assert loaded_list == flatten_list 63 | 64 | 65 | def test_list2file(): 66 | mylist = ['This is a text file. Containing characters of different encodings.', 67 | 'ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^', 68 | '首先 ,' 69 | ] 70 | filepath = 'saved_list' 71 | list2file(filepath, mylist) 72 | loaded_list = file2list('saved_list') 73 | my_encoded_list = encode_list(mylist) 74 | assert loaded_list == my_encoded_list 75 | 76 | 77 | def test_list2stdout(): 78 | mylist = ['This is a text file. Containing characters of different encodings.', 79 | 'ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^', 80 | '首先 ,' 81 | ] 82 | list2stdout(mylist) 83 | 84 | 85 | def test_nbest2file(): 86 | my_nbest_list = [ 87 | [[1, 'This is a text file. Containing characters of different encodings.', 0.1], 88 | [1, 'Other hypothesis. Containing characters of different encodings.', 0.2] 89 | ], 90 | [[2, 'ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^', 0.3]], 91 | [[3, '首先 ,', 90.3]] 92 | ] 93 | filepath = 'saved_nbest' 94 | nbest2file(filepath, my_nbest_list) 95 | nbest = file2list(filepath) 96 | assert nbest == encode_list(['1 ||| This is a text file. Containing characters of different encodings. ||| 0.1', 97 | '1 ||| Other hypothesis. Containing characters of different encodings. ||| 0.2', 98 | '2 ||| ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^ ||| 0.3', 99 | '3 ||| 首先 , ||| 90.3']) 100 | 101 | 102 | def test_dump_load_hdf5_simple(): 103 | filepath = 'test_file' 104 | data_name = 'test_data' 105 | data = np.random.rand(10, 10).astype('float32') 106 | dump_hdf5_simple(filepath, data_name, data) 107 | loaded_data = load_hdf5_simple(filepath, dataset_name=data_name) 108 | assert np.all(loaded_data == data) 109 | 110 | 111 | def test_dict2file(): 112 | filepath = 'saved_dict' 113 | mydict = {1: 'ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^', '首先': 9} 114 | title = None 115 | dict2file(mydict, filepath, title, permission='w') 116 | loaded_dict = file2list(filepath) 117 | assert loaded_dict == encode_list(['1:ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^', '首先:9']) 118 | title = 'Test dict' 119 | dict2file(mydict, filepath, title, permission='w') 120 | loaded_dict = file2list(filepath) 121 | assert loaded_dict == encode_list(['Test dict', '1:ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^', '首先:9']) 122 | 123 | 124 | def test_dict2pkl_pkl2dict(): 125 | filepath = 'saved_dict' 126 | mydict = {1: 'ẁñ á é í ó ú à è ì ò ù ä ë ï ö ü ^', '首先': 9} 127 | dict2pkl(mydict, filepath) 128 | loaded_dict = pkl2dict(filepath + '.pkl') 129 | assert loaded_dict == mydict 130 | 131 | 132 | if __name__ == '__main__': 133 | pytest.main([__file__]) 134 | -------------------------------------------------------------------------------- /tests/extra/test_wrapper_tokenizers.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import pytest 3 | from six import iteritems 4 | from keras_wrapper.extra.tokenizers import * 5 | 6 | 7 | def test_tokenize_basic(): 8 | untokenized_string = u'This, ¿is a , .sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 ,!!!\n\n' 9 | expected_string = u'This , ¿ is a , . sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 , ! ! ! ' 10 | tokenized_string = tokenize_basic(untokenized_string, lowercase=False) 11 | tokenized_string_lower = tokenize_basic(untokenized_string, lowercase=True) 12 | assert expected_string == tokenized_string 13 | assert expected_string.lower() == tokenized_string_lower 14 | 15 | 16 | def test_tokenize_aggressive(): 17 | untokenized_string = u'This, ¿is a , .sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 ,!!!\n\n' 18 | expected_string = u'This is a sentence with weird\xbb symbols ù ä ë ï ö ü ^首先' 19 | tokenized_string = tokenize_aggressive(untokenized_string, lowercase=False) 20 | tokenized_string_lower = tokenize_aggressive(untokenized_string, lowercase=True) 21 | assert expected_string == tokenized_string 22 | assert expected_string.lower() == tokenized_string_lower 23 | 24 | 25 | def test_tokenize_icann(): 26 | untokenized_string = u'This, ¿is a , .sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 ,!!!\n\n' 27 | expected_string = u'This , ¿is a , . sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 , ! ' 28 | tokenized_string_lower = tokenize_icann(untokenized_string) 29 | assert expected_string.lower() == tokenized_string_lower 30 | 31 | 32 | def test_tokenize_montreal(): 33 | untokenized_string = u'This, ¿is a , .sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 ,!!!\n\n' 34 | expected_string = u'This ¿is a sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 !!!' 35 | tokenized_string_lower = tokenize_montreal(untokenized_string) 36 | assert expected_string.lower() == tokenized_string_lower 37 | 38 | 39 | def test_tokenize_soft(): 40 | untokenized_string = u'This, ¿is a , .sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 ,!!!\n\n' 41 | expected_string = u'This , ¿is a , . sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 , ! ' 42 | tokenized_string = tokenize_soft(untokenized_string, lowercase=False) 43 | tokenized_string_lower = tokenize_soft(untokenized_string, lowercase=True) 44 | assert expected_string == tokenized_string 45 | assert expected_string.lower() == tokenized_string_lower 46 | 47 | 48 | def test_tokenize_none(): 49 | untokenized_string = u'This, ¿is a , .sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 ,!!!\n\n' 50 | expected_string = u'This, ¿is a , .sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 ,!!!' 51 | tokenized_string = tokenize_none(untokenized_string) 52 | assert expected_string == tokenized_string 53 | 54 | 55 | def test_tokenize_none_char(): 56 | untokenized_string = u'This, ¿is a > < , .sentence with weird\xbb symbols' 57 | expected_string = u'T h i s , ¿ i s a > < , . s e n t e n c e w i t h w e i r d \xbb s y m b o l s' 58 | tokenized_string = tokenize_none_char(untokenized_string) 59 | assert expected_string == tokenized_string 60 | 61 | 62 | def test_tokenize_CNN_sentence(): 63 | # TODO 64 | pass 65 | 66 | 67 | def test_tokenize_questions(): 68 | # TODO 69 | pass 70 | 71 | 72 | def test_tokenize_bpe(): 73 | # TODO 74 | pass 75 | 76 | 77 | def test_detokenize_none(): 78 | tokenized_string = u'This, ¿is a , .sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 ,!!!' 79 | expected_string = u'This, ¿is a , .sentence with weird\xbb symbols ù ä ë ï ö ü ^首先 ,!!!' 80 | detokenized_string = detokenize_none(tokenized_string) 81 | assert expected_string == detokenized_string 82 | 83 | 84 | def test_detokenize_none_char(): 85 | tokenized_string = u'T h i s , ¿ i s a > < , . s e n t e n c e w i t h w e i r d \xbb s y m b o l s' 86 | expected_string = u'This, ¿is a > < , .sentence with weird\xbb symbols' 87 | detokenized_string = detokenize_none_char(tokenized_string) 88 | assert expected_string == detokenized_string 89 | 90 | 91 | if __name__ == '__main__': 92 | pytest.main([__file__]) 93 | -------------------------------------------------------------------------------- /tests/general/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarcBS/multimodal_keras_wrapper/1349edaaa0e13092a72280bb24316b460ed841de/tests/general/__init__.py -------------------------------------------------------------------------------- /tests/general/test_model_wrapper.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from six import iteritems 3 | 4 | 5 | def test_model_wrapper(): 6 | pass 7 | 8 | if __name__ == '__main__': 9 | pytest.main([__file__]) 10 | -------------------------------------------------------------------------------- /tests/general/test_model_wrapper_ensemble.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from six import iteritems 3 | 4 | 5 | def test_model_wrapper_ensemble(): 6 | pass 7 | 8 | if __name__ == '__main__': 9 | pytest.main([__file__]) 10 | -------------------------------------------------------------------------------- /tests/general/test_wrapper_dataset.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from six import iteritems 3 | 4 | 5 | def test_dataset(): 6 | pass 7 | 8 | if __name__ == '__main__': 9 | pytest.main([__file__]) 10 | -------------------------------------------------------------------------------- /utils/README.md: -------------------------------------------------------------------------------- 1 | # Multimodal Keras Wrapper utils 2 | 3 | In this directory, you'll find some utilities for Models and Datasets from the MKW. 4 | The main scripts are the following: 5 | 6 | * **average_models.py**: Performs model averaging for multiple models. 7 | * **minimize_dataset.py**: Removing the data stored in a dataset instance. Keeps the rest of attributes of the dataset (types, ids, params, preprocessing...). 8 | 9 | -------------------------------------------------------------------------------- /utils/average_models.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import sys 4 | import os 5 | from keras_wrapper.utils import average_models 6 | sys.path.insert(1, os.path.abspath(".")) 7 | sys.path.insert(0, os.path.abspath("../")) 8 | 9 | logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', datefmt='%d/%m/%Y %H:%M:%S') 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | def parse_args(): 14 | """ 15 | Argument parser. 16 | :return: 17 | """ 18 | parser = argparse.ArgumentParser("Averages models") 19 | 20 | parser.add_argument("-d", "--dest", 21 | default='./model', 22 | required=False, 23 | help="Path to the averaged model. If not specified, the model is saved in './model'.") 24 | parser.add_argument("-v", "--verbose", required=False, default=0, type=int, help="Verbosity level") 25 | parser.add_argument("-w", "--weights", nargs="*", help="Weight given to each model in the averaging. You should provide the same number of weights than models." 26 | "By default, it applies the same weight to each model (1/N).", default=[]) 27 | parser.add_argument("-m", "--models", nargs="+", required=True, help="Path to the models") 28 | return parser.parse_args() 29 | 30 | 31 | def weighted_average(args): 32 | """ 33 | Apply a weighted average to the models. 34 | :param args: Options for the averaging function: 35 | * models: Path to the models. 36 | * dest: Path to the averaged model. If unspecified, the model is saved in './model' 37 | * weights: Weight given to each model in the averaging. Should be the same number of weights than models. 38 | If unspecified, it applies the same weight to each model (1/N). 39 | :return: 40 | """ 41 | logger.info("Averaging %d models" % len(args.models)) 42 | average_models(args.models, args.dest, weights=args.weights) 43 | logger.info('Averaging finished.') 44 | 45 | 46 | if __name__ == "__main__": 47 | 48 | args = parse_args() 49 | weighted_average(args) 50 | -------------------------------------------------------------------------------- /utils/minimize_dataset.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import argparse 3 | import os 4 | from keras_wrapper.dataset import loadDataset, saveDataset 5 | 6 | 7 | def parse_args(): 8 | """ 9 | Argument parser 10 | :return: 11 | """ 12 | parser = argparse.ArgumentParser("Minimizes a dataset by removing the data stored in it: Tranining, development and test. " 13 | "The rest of parameters are kept." 14 | "Useful for reloading datasets with new data.") 15 | parser.add_argument("-d", "--dataset", required=True, help="Stored instance of the dataset") 16 | parser.add_argument("-o", "--output", help="Output dataset file.", 17 | default="") 18 | return parser.parse_args() 19 | 20 | if __name__ == "__main__": 21 | 22 | args = parse_args() 23 | # Load dataset 24 | ds = loadDataset(args.dataset) 25 | # Reinitialize values to empty 26 | ds.loaded_train = [False, False] 27 | ds.loaded_val = [False, False] 28 | ds.loaded_test = [False, False] 29 | 30 | ds.loaded_raw_train = [False, False] 31 | ds.loaded_raw_val = [False, False] 32 | ds.loaded_raw_test = [False, False] 33 | 34 | ds.len_train = 0 35 | ds.len_val = 0 36 | ds.len_test = 0 37 | # Remove data 38 | for key in ds.X_train.keys(): 39 | ds.X_train[key] = None 40 | for key in ds.X_val.keys(): 41 | ds.X_val[key] = None 42 | for key in ds.X_test.keys(): 43 | ds.X_test[key] = None 44 | for key in ds.X_train.keys(): 45 | ds.X_train[key] = None 46 | for key in ds.Y_train.keys(): 47 | ds.Y_train[key] = None 48 | for key in ds.Y_val.keys(): 49 | ds.Y_val[key] = None 50 | for key in ds.Y_test.keys(): 51 | ds.Y_test[key] = None 52 | for key in ds.X_raw_train.keys(): 53 | ds.X_raw_train[key] = None 54 | for key in ds.X_raw_val.keys(): 55 | ds.X_raw_val[key] = None 56 | for key in ds.X_raw_test.keys(): 57 | ds.X_raw_test[key] = None 58 | for key in ds.Y_raw_train.keys(): 59 | ds.Y_raw_train[key] = None 60 | for key in ds.Y_raw_val.keys(): 61 | ds.Y_raw_val[key] = None 62 | for key in ds.Y_raw_test.keys(): 63 | ds.Y_raw_test[key] = None 64 | 65 | # Save dataset 66 | output_path = args.output if args.output else os.path.dirname(args.dataset) 67 | saveDataset(ds, output_path) 68 | --------------------------------------------------------------------------------