├── .gitignore ├── LICENSE ├── README.rst ├── cortex ├── __init__.py ├── analysis │ ├── __init__.py │ ├── load_mri.py │ ├── mri │ │ ├── __init__.py │ │ └── rois.py │ └── read_fmri.py ├── datasets │ ├── __init__.py │ ├── basic │ │ ├── __init__.py │ │ ├── caltech.py │ │ ├── cifar.py │ │ ├── euclidean.py │ │ ├── horses.py │ │ ├── mnist.py │ │ └── uci.py │ └── neuroimaging │ │ ├── __init__.py │ │ ├── fmri.py │ │ ├── mri.py │ │ ├── nifti_viewer.py │ │ ├── simTB.py │ │ ├── snp.py │ │ └── tests │ │ └── _test_snp.py ├── demos │ ├── __init__.py │ ├── demos_basic │ │ ├── __init__.py │ │ ├── classifier.py │ │ ├── classifier_mnist.yaml │ │ ├── eval_rbm.py │ │ ├── rbm_cifar.yaml │ │ ├── rbm_mnist.py │ │ ├── rbm_mnist.yaml │ │ ├── reweighted_vae.yaml │ │ ├── tests │ │ │ └── test_demos.py │ │ ├── vae.py │ │ └── vae_mnist.yaml │ └── demos_neuroimaging │ │ ├── __init__.py │ │ ├── rbm_ni.py │ │ ├── rbm_olin.yaml │ │ ├── rbm_vbm.yaml │ │ ├── vae.py │ │ └── vae_mri.yaml ├── inference │ ├── __init__.py │ ├── air.py │ ├── gdir.py │ ├── irvi.py │ ├── rws.py │ └── tests │ │ └── test_gdir.py ├── models │ ├── __init__.py │ ├── darn.py │ ├── deep_helmholtz.py │ ├── distributions.py │ ├── gru.py │ ├── helmholtz.py │ ├── layers.py │ ├── lstm.py │ ├── mlp.py │ ├── rbm.py │ ├── rnn.py │ └── tests │ │ ├── __init__.py │ │ ├── _test_gru.py │ │ ├── test_darn.py │ │ ├── test_mlp.py │ │ ├── test_rbm.py │ │ ├── test_rnn.py │ │ └── test_vae.py └── utils │ ├── __init__.py │ ├── extra.py │ ├── learning_scheduler.py │ ├── logger.py │ ├── monitor.py │ ├── op.py │ ├── preprocessor.py │ ├── tools.py │ ├── training.py │ ├── vis_utils.py │ └── viz_h │ ├── Visualization.ipynb │ └── tsne.py ├── doc ├── Makefile ├── _build │ ├── doctrees │ │ ├── cortex.doctree │ │ ├── demos.doctree │ │ ├── environment.pickle │ │ ├── index.doctree │ │ ├── modules.doctree │ │ ├── setup.doctree │ │ └── source │ │ │ ├── cortex.analysis.doctree │ │ │ ├── cortex.analysis.mri.doctree │ │ │ ├── cortex.datasets.basic.doctree │ │ │ ├── cortex.datasets.doctree │ │ │ ├── cortex.datasets.neuroimaging.doctree │ │ │ ├── cortex.demos.demos_basic.doctree │ │ │ ├── cortex.demos.demos_neuroimaging.doctree │ │ │ ├── cortex.demos.doctree │ │ │ ├── cortex.doctree │ │ │ ├── cortex.inference.doctree │ │ │ ├── cortex.models.doctree │ │ │ ├── cortex.models.tests.doctree │ │ │ ├── cortex.utils.doctree │ │ │ └── modules.doctree │ └── html │ │ ├── .buildinfo │ │ ├── _modules │ │ ├── cortex.html │ │ ├── cortex │ │ │ ├── analysis │ │ │ │ ├── load_mri.html │ │ │ │ ├── mri │ │ │ │ │ └── rois.html │ │ │ │ └── read_fmri.html │ │ │ ├── datasets.html │ │ │ ├── datasets │ │ │ │ ├── basic │ │ │ │ │ ├── caltech.html │ │ │ │ │ ├── cifar.html │ │ │ │ │ ├── euclidean.html │ │ │ │ │ ├── horses.html │ │ │ │ │ ├── mnist.html │ │ │ │ │ └── uci.html │ │ │ │ ├── neuroimaging.html │ │ │ │ └── neuroimaging │ │ │ │ │ ├── fmri.html │ │ │ │ │ ├── mri.html │ │ │ │ │ ├── nifti_viewer.html │ │ │ │ │ ├── simTB.html │ │ │ │ │ └── snp.html │ │ │ ├── demos.html │ │ │ ├── demos │ │ │ │ ├── demos_basic.html │ │ │ │ ├── demos_basic │ │ │ │ │ ├── classifier.html │ │ │ │ │ ├── eval_rbm.html │ │ │ │ │ ├── rbm_mnist.html │ │ │ │ │ └── vae.html │ │ │ │ ├── demos_neuroimaging.html │ │ │ │ └── demos_neuroimaging │ │ │ │ │ ├── rbm_ni.html │ │ │ │ │ └── vae.html │ │ │ ├── inference.html │ │ │ ├── inference │ │ │ │ ├── air.html │ │ │ │ ├── gdir.html │ │ │ │ ├── irvi.html │ │ │ │ └── rws.html │ │ │ ├── models.html │ │ │ ├── models │ │ │ │ ├── darn.html │ │ │ │ ├── deep_helmholtz.html │ │ │ │ ├── distributions.html │ │ │ │ ├── gru.html │ │ │ │ ├── helmholtz.html │ │ │ │ ├── layers.html │ │ │ │ ├── lstm.html │ │ │ │ ├── mlp.html │ │ │ │ ├── rbm.html │ │ │ │ ├── rnn.html │ │ │ │ └── tests │ │ │ │ │ ├── test_darn.html │ │ │ │ │ ├── test_lfmlp.html │ │ │ │ │ ├── test_mlp.html │ │ │ │ │ ├── test_rbm.html │ │ │ │ │ ├── test_rnn.html │ │ │ │ │ └── test_vae.html │ │ │ └── utils │ │ │ │ ├── costs.html │ │ │ │ ├── extra.html │ │ │ │ ├── learning_scheduler.html │ │ │ │ ├── logger.html │ │ │ │ ├── monitor.html │ │ │ │ ├── op.html │ │ │ │ ├── preprocessor.html │ │ │ │ ├── tools.html │ │ │ │ ├── training.html │ │ │ │ └── vis_utils.html │ │ └── index.html │ │ ├── _sources │ │ ├── cortex.txt │ │ ├── demos.txt │ │ ├── index.txt │ │ ├── modules.txt │ │ ├── setup.txt │ │ └── source │ │ │ ├── cortex.analysis.mri.txt │ │ │ ├── cortex.analysis.txt │ │ │ ├── cortex.datasets.basic.txt │ │ │ ├── cortex.datasets.neuroimaging.txt │ │ │ ├── cortex.datasets.txt │ │ │ ├── cortex.demos.demos_basic.txt │ │ │ ├── cortex.demos.demos_neuroimaging.txt │ │ │ ├── cortex.demos.txt │ │ │ ├── cortex.inference.txt │ │ │ ├── cortex.models.tests.txt │ │ │ ├── cortex.models.txt │ │ │ ├── cortex.txt │ │ │ ├── cortex.utils.txt │ │ │ └── modules.txt │ │ ├── _static │ │ ├── ajax-loader.gif │ │ ├── basic.css │ │ ├── classic.css │ │ ├── comment-bright.png │ │ ├── comment-close.png │ │ ├── comment.png │ │ ├── doctools.js │ │ ├── down-pressed.png │ │ ├── down.png │ │ ├── file.png │ │ ├── jquery-1.11.1.js │ │ ├── jquery.js │ │ ├── minus.png │ │ ├── plus.png │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── sidebar.js │ │ ├── underscore-1.3.1.js │ │ ├── underscore.js │ │ ├── up-pressed.png │ │ ├── up.png │ │ └── websupport.js │ │ ├── cortex.html │ │ ├── demos.html │ │ ├── genindex.html │ │ ├── index.html │ │ ├── modules.html │ │ ├── objects.inv │ │ ├── py-modindex.html │ │ ├── search.html │ │ ├── searchindex.js │ │ ├── setup.html │ │ └── source │ │ ├── cortex.analysis.html │ │ ├── cortex.analysis.mri.html │ │ ├── cortex.datasets.basic.html │ │ ├── cortex.datasets.html │ │ ├── cortex.datasets.neuroimaging.html │ │ ├── cortex.demos.demos_basic.html │ │ ├── cortex.demos.demos_neuroimaging.html │ │ ├── cortex.demos.html │ │ ├── cortex.html │ │ ├── cortex.inference.html │ │ ├── cortex.models.html │ │ ├── cortex.models.tests.html │ │ ├── cortex.utils.html │ │ └── modules.html ├── conf.py ├── demos.rst ├── index.rst ├── make.bat ├── setup.rst └── source │ ├── cortex.analysis.mri.rst │ ├── cortex.analysis.rst │ ├── cortex.datasets.basic.rst │ ├── cortex.datasets.neuroimaging.rst │ ├── cortex.datasets.rst │ ├── cortex.demos.demos_basic.rst │ ├── cortex.demos.demos_neuroimaging.rst │ ├── cortex.demos.rst │ ├── cortex.inference.rst │ ├── cortex.models.rst │ ├── cortex.models.tests.rst │ ├── cortex.rst │ ├── cortex.utils.rst │ └── modules.rst └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .DS_Store 3 | *.pdf 4 | *.gz 5 | *.zip 6 | *.aux 7 | *.log 8 | *.blg 9 | *.bbl 10 | *.out 11 | .DS_Store 12 | 13 | /.idea/ 14 | .idea/workspace.xml 15 | 16 | mine_tweets.py 17 | # Byte-compiled / optimized / DLL files 18 | __pycache__/ 19 | *.py[cod] 20 | 21 | # C extensions 22 | *.so 23 | 24 | # Distribution / packaging 25 | .Python 26 | env/ 27 | build/ 28 | develop-eggs/ 29 | dist/ 30 | downloads/ 31 | eggs/ 32 | lib/ 33 | lib64/ 34 | parts/ 35 | sdist/ 36 | var/ 37 | *.egg-info/ 38 | .installed.cfg 39 | *.egg 40 | *.pyc 41 | 42 | # PyInstaller 43 | # Usually these files are written by a python script from a template 44 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 45 | *.manifest 46 | *.spec 47 | 48 | # Installer logs 49 | pip-log.txt 50 | pip-delete-this-directory.txt 51 | 52 | # Unit test / coverage reports 53 | htmlcov/ 54 | .tox/ 55 | .coverage 56 | .cache 57 | nosetests.xml 58 | coverage.xml 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | 67 | # Sphinx documentation 68 | docs/_build/ 69 | 70 | # PyBuilder 71 | target/ 72 | 73 | obj/all_status.pkl 74 | obj/all_status_500.pkl 75 | obj/users_500.pkl 76 | obj/all_status.pkl 77 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Cortex: a deep learning toolbox for neuroimaging 2 | ======================= 3 | 4 | Cortex is a framework for training and evaluating neural networks using Theano. 5 | Cortex is not specific to, but includes tools for neuroimaging. Cortex is not 6 | meant to replace Theano, but is intended to be used as a compliment to scripting 7 | in python. It is very customizable, as all methods and classes are suggested 8 | templates, and pure Theano can be used when needed. 9 | 10 | .. warning:: 11 | 12 | Cortex is a brand-new project and is under rapid development. If you encounter 13 | any bugs or have any feature requests, please `email`_ or 14 | `create a GitHub issue`_. 15 | 16 | .. _email: erroneus@gmail.com 17 | .. _create a GitHub issue: https://github.com/rdevon/cortex/issues/new 18 | 19 | Features 20 | -------- 21 | 22 | Currently Cortex supports the following models: 23 | 24 | * Feed forward networks 25 | * RBMs 26 | * RNNs, GRUs, and LSTMs 27 | * Helmholtz machines as well as variational inference methods 28 | * Common datasets, such as MNIST and Caltech silhoettes 29 | * Neuroimaging datasets, such as MRI 30 | 31 | Installation 32 | ------------ 33 | 34 | You can install Cortex using the Python package manager ``pip``. 35 | 36 | .. code-block:: bash 37 | 38 | $ pip install cortex 39 | 40 | To get the most up-to-date version, you can install from the ``git`` repository: 41 | 42 | .. code-block:: bash 43 | 44 | $ pip install git+git://github.com/rdevon/cortex.git 45 | 46 | However, currently the demos give the best example of how to script using cortex. 47 | So, if this is your first time using cortex, it is recommended to clone from the github repository: 48 | 49 | .. code-block:: bash 50 | 51 | $ git clone https://github.com/rdevon/cortex.git 52 | $ cd cortex 53 | $ python setup.py install 54 | 55 | If you don't have administrative rights, add the ``--user`` switch to the 56 | install commands to install the packages in your home folder. If you want to 57 | update Cortex, simply repeat the first command with the ``--upgrade`` switch 58 | added to pull the latest version from GitHub. 59 | 60 | In either case, you need to run the setup script: 61 | 62 | .. code-block:: bash 63 | 64 | $ cortex-setup 65 | 66 | Follow the instructions; you will be asked to specify default data and out 67 | directories. These are necessary only for the demos, and can be customized in your 68 | ~/.cortexrc file. 69 | 70 | Requirements 71 | ------------ 72 | 73 | Basic Requirements 74 | __________________ 75 | 76 | .. _PyYAML: http://pyyaml.org/wiki/PyYAML 77 | .. _Theano: http://deeplearning.net/software/theano/ 78 | 79 | * Theano_ 80 | * PyYAML_ 81 | 82 | Neuroimaging Requirements 83 | _________________________ 84 | 85 | .. note:: 86 | 87 | These are not required for basic functionality, but are necessary for 88 | neuroimaging tools. `afni`_, in particular, needs to be installed manually. 89 | 90 | .. _h5py: http://www.h5py.org/ 91 | .. _nipy: http://nipy.org/ 92 | .. _afni: http://afni.nimh.nih.gov 93 | .. _nibabel: http://http://nipy.org/nibabel/ 94 | .. _sklearn: http://scikit-learn.org/stable/ 95 | 96 | * nipy_ 97 | * h5py_ 98 | * afni_ 99 | * nibabel_ 100 | * sklearn_ 101 | 102 | Demos 103 | ----- 104 | 105 | Cortex has several command-line demos of functionality. 106 | If the ``basic`` dataset was installed using ``cortex-setup``, then the 107 | following demos are available: 108 | 109 | .. code-block:: bash 110 | 111 | $ cortex-classifier-demo 112 | 113 | $ cortex-rbm-demo 114 | 115 | $ cortex-vae-demo 116 | 117 | If you installed the neuroimaging data, then the neuroimaging demos can be run: 118 | 119 | .. code-block:: bash 120 | 121 | $ cortex-rbm-vbm-demo 122 | 123 | $ cortex-rbm-olin-demo 124 | 125 | These are partial datasets used for demo purposes only. 126 | 127 | Documentation 128 | ------------- 129 | 130 | Source documentation can be found `here`_. 131 | 132 | .. _here: http://cortex.readthedocs.io/ -------------------------------------------------------------------------------- /cortex/__init__.py: -------------------------------------------------------------------------------- 1 | '''Setup scripts for Cortex. 2 | 3 | ''' 4 | 5 | import readline, glob 6 | from os import path 7 | import urllib2 8 | 9 | from datasets import fetch_basic_data 10 | from datasets.neuroimaging import fetch_neuroimaging_data 11 | from utils.tools import get_paths 12 | from utils.extra import complete_path, query_yes_no, write_path_conf 13 | 14 | 15 | def main(): 16 | readline.set_completer_delims(' \t\n;') 17 | readline.parse_and_bind('tab: complete') 18 | readline.set_completer(complete_path) 19 | print ('Welcome to Cortex: a deep learning toolbox for ' 20 | 'neuroimaging') 21 | print ('Cortex requires that you enter some paths for ' 22 | 'default dataset and output directories. These ' 23 | 'can be changed at any time and are customizable ' 24 | 'via the ~/.cortexrc file.') 25 | 26 | try: 27 | path_dict = get_paths() 28 | except ValueError: 29 | path_dict = dict() 30 | 31 | if '$data' in path_dict: 32 | data_path = raw_input( 33 | 'Default data path: [%s] ' % path_dict['$data']) or path_dict['$data'] 34 | else: 35 | data_path = raw_input('Default data path: ') 36 | data_path = path.expanduser(data_path) 37 | if not path.isdir(data_path): 38 | raise ValueError('path %s does not exist. Please create it.' % data_path) 39 | 40 | if '$outs' in path_dict: 41 | out_path = raw_input( 42 | 'Default output path: [%s] ' % path_dict['$outs']) or path_dict['$outs'] 43 | else: 44 | out_path = raw_input('Default output path: ') 45 | out_path = path.expanduser(data_path) 46 | if not path.isdir(out_path): 47 | raise ValueError('path %s does not exist. Please create it.' % out_path) 48 | write_path_conf(data_path, out_path) 49 | 50 | print ('Cortex demos require additional data that is not necessary for ' 51 | 'general use of the Cortex as a package.' 52 | 'This includes MNIST, Caltech Silhoettes, and some UCI dataset ' 53 | 'samples.') 54 | 55 | answer = query_yes_no('Download basic dataset? ') 56 | 57 | if answer: 58 | try: 59 | fetch_basic_data() 60 | except urllib2.HTTPError: 61 | print 'Error: basic dataset not found.' 62 | 63 | print ('Cortex also requires neuroimaging data for the neuroimaging data ' 64 | 'for the neuroimaging demos. These are large and can be skipped.') 65 | 66 | answer = query_yes_no('Download neuroimaging dataset? ') 67 | 68 | if answer: 69 | try: 70 | fetch_neuroimaging_data() 71 | except urllib2.HTTPError: 72 | print 'Error: neuroimaging dataset not found.' 73 | -------------------------------------------------------------------------------- /cortex/analysis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/cortex/analysis/__init__.py -------------------------------------------------------------------------------- /cortex/analysis/load_mri.py: -------------------------------------------------------------------------------- 1 | '''Utilities for handling nifti files. 2 | 3 | ''' 4 | 5 | import argparse 6 | from glob import glob 7 | import nibabel as nib 8 | from nipy import save_image, load_image 9 | import numpy as np 10 | import os 11 | from os import path 12 | import pickle 13 | import re 14 | import readline 15 | from scipy import io 16 | from scipy.stats import kurtosis 17 | from scipy.stats import skew 18 | import sys 19 | from sys import stdout 20 | import yaml 21 | 22 | from ..utils.extra import complete_path 23 | 24 | 25 | def find_niftis(source): 26 | '''Finds nifti files in a directory. 27 | 28 | Args: 29 | source (str): The source directory for niftis 30 | 31 | Returns: 32 | list: List of file paths. 33 | 34 | ''' 35 | return glob(path.join(source, '*.nii')) 36 | 37 | def read_niftis(file_list): 38 | '''Reads niftis from a file list into numpy array. 39 | 40 | Args: 41 | file_list (int): List of file paths. 42 | 43 | Returns: 44 | numpy.array: Array of data from nifti file list. 45 | list: New file list with bad files filtered. 46 | 47 | ''' 48 | 49 | data0 = load_image(file_list[0]).get_data() 50 | 51 | x, y, z = data0.shape 52 | print 'Found %d files with data shape is %r' % (len(file_list), data0.shape) 53 | n = len(file_list) 54 | 55 | data = [] 56 | 57 | new_file_list = [] 58 | for i, f in enumerate(file_list): 59 | print '%d) Loading subject from file: %s' % (i, f) 60 | 61 | nifti = load_image(f) 62 | subject_data = nifti.get_data() 63 | if subject_data.shape != (x, y, z): 64 | raise ValueError('Shape mismatch') 65 | data.append(subject_data) 66 | new_file_list.append(f) 67 | data = np.array(data).astype('float32') 68 | 69 | return data, new_file_list 70 | 71 | def save_mask(data, out_path): 72 | '''Save mask of data. 73 | 74 | Args: 75 | data (numpy.array): Data to mask 76 | out_path (str): Output path for mask. 77 | 78 | ''' 79 | 80 | print 'Getting mask' 81 | 82 | n, x, y, z = data.shape 83 | mask = np.zeros((x, y, z)) 84 | 85 | zero_freq = (data.reshape((n, x * y * z)) == 0).sum(1) * 1. / reduce( 86 | lambda x_, y_: x_ * y_, data.shape[1:]) 87 | 88 | for freq in zero_freq: 89 | assert isinstance(freq, float), freq 90 | if abs(zero_freq.mean() - freq) > .05: 91 | raise ValueError("Spurious datapoint, mean zeros frequency is" 92 | "%.2f, datapoint is %.2f" 93 | % (zero_freq.mean(), freq)) 94 | 95 | nonzero_avg = (data > 0).mean(axis=0) 96 | 97 | mask[np.where(nonzero_avg > .99)] = 1 98 | 99 | print 'Masked out %d out of %d voxels' % ((mask == 0).sum(), reduce( 100 | lambda x_, y_: x_ * y_, mask.shape)) 101 | 102 | np.save(out_path, mask) 103 | 104 | def load_niftis(source_dir, out_dir, name='mri', patterns=None): 105 | '''Loads niftis from a directory. 106 | 107 | Saves the data, paths, mask, and `sites`. 108 | 109 | Args: 110 | source_dir (str): Directory of nifti files. 111 | out_dir (str): Output directory for saving arrays, etc. 112 | name (str): Name of dataset. 113 | patterns (Optional[list]): list of glob for filtering files. 114 | 115 | ''' 116 | 117 | if patterns is not None: 118 | file_lists = [] 119 | for i, pattern in enumerate(patterns): 120 | file_list = glob(path.join(source_dir, pattern)) 121 | file_lists.append(file_list) 122 | else: 123 | file_lists = [find_niftis(source_dir)] 124 | 125 | base_file = file_lists[0][0] 126 | paths_file = path.join(out_dir, name + '_file_paths.npy') 127 | sites_file = path.join(out_dir, name + '_sites.npy') 128 | mask_file = path.join(out_dir, name + '_mask.npy') 129 | yaml_file = path.join(out_dir, name + '.yaml') 130 | tmp_dir = path.join(out_dir, name + '_tmp') 131 | if not path.isdir(tmp_dir): 132 | os.mkdir(tmp_dir) 133 | 134 | readline.set_completer_delims(' \t\n;') 135 | readline.parse_and_bind('tab: complete') 136 | readline.set_completer(complete_path) 137 | print ('The MRI dataset requires an anatomical nifti file to visualize' 138 | ' properly. Enter the path for the anatomical file or leave blank' 139 | ' if you plan not to use visualization or will enter into the yaml' 140 | ' file later.') 141 | 142 | anat_file = raw_input('Anat file: ') 143 | if anat_file == '': yaml_file = None 144 | 145 | datas = [] 146 | new_file_lists = [] 147 | data_paths = [] 148 | for i, file_list in enumerate(file_lists): 149 | data, new_file_list = read_niftis(file_list) 150 | new_file_lists.append(new_file_list) 151 | datas.append(data) 152 | data_path = path.join(out_dir, name + '_%d.npy' % i) 153 | data_paths.append(data_path) 154 | np.save(data_path, data) 155 | 156 | sites = [[0 if 'st' in f else 1 for f in fl] for fl in file_lists] 157 | sites = sites[0] + sites[1] 158 | 159 | save_mask(np.concatenate(datas, axis=0), mask_file) 160 | np.save(paths_file, new_file_lists) 161 | np.save(sites_file, sites) 162 | with open(yaml_file, 'w') as yf: 163 | yf.write( 164 | yaml.dump( 165 | dict(name=name, 166 | data=data_paths, 167 | mask=mask_file, 168 | nifti=base_file, 169 | sites=sites_file, 170 | tmp_path=tmp_dir, 171 | anat_file=anat_file 172 | ) 173 | ) 174 | ) 175 | 176 | def make_argument_parser(): 177 | '''Parses command-line arguments. 178 | 179 | ''' 180 | parser = argparse.ArgumentParser() 181 | 182 | parser.add_argument('source', 183 | help='source directory for all subjects.') 184 | parser.add_argument('out_path', 185 | help='output directory under args.name') 186 | parser.add_argument('-n', '--name', default='mri') 187 | parser.add_argument('-p', '--patterns', nargs='+', default=None) 188 | 189 | return parser 190 | 191 | def main(args=None): 192 | '''Main routine. 193 | 194 | ''' 195 | if args is None: 196 | args = sys.argv[1:] 197 | 198 | parser = make_argument_parser() 199 | args = parser.parse_args() 200 | 201 | source_dir = path.abspath(args.source) 202 | out_dir = path.abspath(args.out_path) 203 | 204 | if not path.isdir(out_dir): 205 | raise ValueError('No output directory found (%s)' % out_dir) 206 | 207 | load_niftis(source_dir, out_dir, args.name, patterns=args.patterns) 208 | 209 | if __name__ == '__main__': 210 | main() 211 | -------------------------------------------------------------------------------- /cortex/analysis/mri/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/cortex/analysis/mri/__init__.py -------------------------------------------------------------------------------- /cortex/analysis/read_fmri.py: -------------------------------------------------------------------------------- 1 | '''Utility for handling fMRI data. 2 | 3 | ''' 4 | 5 | import argparse 6 | from glob import glob 7 | import nibabel as nib 8 | from nipy import save_image, load_image 9 | import numpy as np 10 | import os 11 | from os import path 12 | import readline 13 | import sys 14 | import yaml 15 | 16 | from .load_mri import find_niftis, load_niftis, make_argument_parser 17 | from ..utils.extra import complete_path 18 | 19 | 20 | def read_niftis(file_list): 21 | '''Finds nifti files in a directory. 22 | 23 | Args: 24 | source (str): The source directory for niftis 25 | 26 | Returns: 27 | list: List of file paths. 28 | 29 | ''' 30 | data0 = load_image(file_list[0]).get_data() 31 | 32 | x, y, z, t = data0.shape 33 | print 'Found %d files with data shape is %r' % (len(file_list), data0.shape) 34 | 35 | data = [] 36 | 37 | new_file_list = [] 38 | for i, f in enumerate(file_list): 39 | print '%d) Loading subject from file: %s' % (i, f) 40 | 41 | nifti = load_image(f) 42 | subject_data = nifti.get_data() 43 | if subject_data.shape != (x, y, z, t): 44 | raise ValueError('Shape mismatch') 45 | subject_data -= subject_data.mean() 46 | subject_data /= subject_data.std() 47 | data.append(subject_data) 48 | new_file_list.append(f) 49 | data = np.array(data).transpose(0, 4, 1, 2, 3).astype('float32') 50 | 51 | return data, new_file_list 52 | 53 | def save_mask(data, out_path): 54 | '''Save mask of data. 55 | 56 | Args: 57 | data (numpy.array): Data to mask 58 | out_path (str): Output path for mask. 59 | 60 | ''' 61 | print 'Getting mask' 62 | 63 | s, n, x, y, z = data.shape 64 | mask = np.zeros((x, y, z)) 65 | _data = data.reshape((s * n, x, y, z)) 66 | 67 | mask[np.where(_data.mean(axis=0) > _data.mean())] = 1 68 | 69 | print 'Masked out %d out of %d voxels' % ((mask == 0).sum(), reduce( 70 | lambda x_, y_: x_ * y_, mask.shape)) 71 | 72 | np.save(out_path, mask) 73 | 74 | def load_niftis(source_dir, out_dir, name='fmri', patterns=None): 75 | '''Loads niftis from a directory. 76 | 77 | Saves the data, paths, mask, and `sites`. 78 | 79 | Args: 80 | source_dir (str): Directory of nifti files. 81 | out_dir (str): Output directory for saving arrays, etc. 82 | name (str): Name of dataset. 83 | patterns (Optional[list]): list of glob for filtering files. 84 | 85 | ''' 86 | 87 | if patterns is not None: 88 | file_lists = [] 89 | for i, pattern in enumerate(patterns): 90 | file_list = glob(path.join(source_dir, pattern)) 91 | file_lists.append(file_list) 92 | else: 93 | file_lists = [find_niftis(source_dir)] 94 | 95 | base_file = file_lists[0][0] 96 | paths_file = path.join(out_dir, name + '_file_paths.npy') 97 | sites_file = path.join(out_dir, name + '_sites.npy') 98 | mask_file = path.join(out_dir, name + '_mask.npy') 99 | yaml_file = path.join(out_dir, name + '.yaml') 100 | tmp_dir = path.join(out_dir, name + '_tmp') 101 | if not path.isdir(tmp_dir): 102 | os.mkdir(tmp_dir) 103 | 104 | readline.set_completer_delims(' \t\n;') 105 | readline.parse_and_bind('tab: complete') 106 | readline.set_completer(complete_path) 107 | print ('The fMRI dataset requires an anatomical nifti file to visualize' 108 | ' properly. Enter the path for the anatomical file or leave blank' 109 | ' if you plan not to use visualization or will enter into the yaml' 110 | ' file later.') 111 | 112 | anat_file = raw_input('Anat file: ') 113 | if anat_file == '': yaml_file = None 114 | 115 | datas = [] 116 | new_file_lists = [] 117 | data_paths = [] 118 | for i, file_list in enumerate(file_lists): 119 | data, new_file_list = read_niftis(file_list) 120 | new_file_lists.append(new_file_list) 121 | datas.append(data) 122 | data_path = path.join(out_dir, name + '_%d.npy' % i) 123 | data_paths.append(data_path) 124 | np.save(data_path, data) 125 | 126 | sites = [[0 if 'st' in f else 1 for f in fl] for fl in file_lists] 127 | sites = sites[0] + sites[1] 128 | 129 | save_mask(np.concatenate(datas, axis=0), mask_file) 130 | np.save(paths_file, new_file_lists) 131 | np.save(sites_file, sites) 132 | with open(yaml_file, 'w') as yf: 133 | yf.write( 134 | yaml.dump( 135 | dict(name=name, 136 | data=data_paths, 137 | mask=mask_file, 138 | nifti=base_file, 139 | sites=sites_file, 140 | tmp_path=tmp_dir, 141 | anat_file=anat_file 142 | ) 143 | ) 144 | ) 145 | 146 | def main(args=None): 147 | '''Main routine. 148 | 149 | ''' 150 | if args is None: 151 | args = sys.argv[1:] 152 | 153 | parser = make_argument_parser() 154 | args = parser.parse_args() 155 | 156 | source_dir = path.abspath(args.source) 157 | out_dir = path.abspath(args.out_path) 158 | 159 | if not path.isdir(out_dir): 160 | raise ValueError('No output directory found (%s)' % out_dir) 161 | 162 | load_niftis(source_dir, out_dir, args.name, patterns=args.patterns) 163 | 164 | if __name__ == '__main__': 165 | 166 | parser = make_argument_parser() 167 | args = parser.parse_args() 168 | 169 | source_dir = path.abspath(args.source) 170 | out_dir = path.abspath(args.out_path) 171 | 172 | if not path.isdir(out_dir): 173 | raise ValueError('No output directory found (%s)' % out_dir) 174 | 175 | load_niftis(source_dir, out_dir, args.name, patterns=args.patterns) 176 | -------------------------------------------------------------------------------- /cortex/datasets/basic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/cortex/datasets/basic/__init__.py -------------------------------------------------------------------------------- /cortex/datasets/basic/caltech.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Caltech 101 Silhouettes dataset 3 | ''' 4 | 5 | from scipy import io 6 | 7 | from .mnist import MNIST 8 | from ...utils import floatX 9 | 10 | 11 | class CALTECH(MNIST): 12 | def __init__(self, name='caltech', **kwargs): 13 | super(CALTECH, self).__init__(name=name, **kwargs) 14 | 15 | def get_data(self, source, mode): 16 | data_dict = io.loadmat(source) 17 | 18 | if mode == 'train': 19 | X = data_dict['train_data'] 20 | Y = data_dict['train_labels'] 21 | elif mode == 'valid': 22 | X = data_dict['val_data'] 23 | Y = data_dict['val_labels'] 24 | elif mode == 'test': 25 | X = data_dict['test_data'] 26 | Y = data_dict['test_labels'] 27 | else: 28 | raise ValueError() 29 | 30 | return X.astype(floatX), Y 31 | -------------------------------------------------------------------------------- /cortex/datasets/basic/cifar.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Module for cifar 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import cPickle 7 | import gzip 8 | import multiprocessing as mp 9 | import numpy as np 10 | from os import path 11 | import PIL 12 | import random 13 | import sys 14 | from sys import stdout 15 | import theano 16 | from theano import tensor as T 17 | import time 18 | import traceback 19 | 20 | from ...utils import floatX 21 | from ...utils.tools import ( 22 | concatenate, 23 | init_rngs, 24 | resolve_path, 25 | rng_, 26 | scan 27 | ) 28 | from ...utils.vis_utils import tile_raster_images 29 | 30 | 31 | def get_iter(inf=False, batch_size=128): 32 | return mnist_iterator(inf=inf, batch_size=batch_size) 33 | 34 | class CIFAR(object): 35 | '''CIFAR dataset. 36 | 37 | ''' 38 | def __init__(self, batch_size=128, source=None, 39 | restrict_digits=None, mode='train', shuffle=True, inf=False, 40 | stop=None, out_path=None): 41 | source = resolve_path(source) 42 | self.name = 'cifar' 43 | 44 | X, Y = self.get_data(source, mode) 45 | self.mode = mode 46 | 47 | self.image_shape = (32, 32) 48 | self.out_path = out_path 49 | 50 | if restrict_digits is None: 51 | n_classes = 10 52 | else: 53 | n_classes = len(restrict_digits) 54 | 55 | O = np.zeros((X.shape[0], n_classes), dtype='float32') 56 | 57 | if restrict_digits is None: 58 | for idx in xrange(X.shape[0]): 59 | O[idx, Y[idx]] = 1.; 60 | else: 61 | print 'Restricting to classes %s' % restrict_digits 62 | new_X = [] 63 | i = 0 64 | for j in xrange(X.shape[0]): 65 | if Y[j] in restrict_digits: 66 | new_X.append(X[j]) 67 | c_idx = restrict_digits.index(Y[j]) 68 | O[i, c_idx] = 1.; 69 | i += 1 70 | X = np.float32(new_X) 71 | 72 | if stop is not None: 73 | X = X[:stop] 74 | 75 | self.n = X.shape[0] 76 | print 'Data shape: %d x %d' % X.shape 77 | 78 | self.dims = dict(cifar=X.shape[1], label=len(np.unique(Y))) 79 | self.distributions = dict(cifar='gaussian', label='multinomial') 80 | 81 | self.shuffle = shuffle 82 | self.pos = 0 83 | self.bs = batch_size 84 | self.inf = inf 85 | self.next = self._next 86 | self.X = X 87 | self.O = O 88 | 89 | self.mean_image = self.X.mean(axis=0) 90 | self.X -= self.mean_image 91 | self.X /= self.X.std(axis=0) 92 | 93 | if self.shuffle: 94 | self.randomize() 95 | 96 | def get_data(self, source, mode, greyscale=True): 97 | if not greyscale: 98 | raise NotImplementedError() 99 | if source is None: 100 | raise ValueError('No source file provided') 101 | print 'Loading CIFAR-10 ({mode})'.format(mode=mode) 102 | 103 | X = [] 104 | Y = [] 105 | 106 | if mode == 'train': 107 | for i in xrange(1, 5): 108 | with open(path.join(source, 'data_batch_%d' % i)) as f: 109 | d = cPickle.load(f) 110 | X.append(d['data']) 111 | Y.append(d['labels']) 112 | elif mode == 'valid': 113 | with open(path.join(source, 'data_batch_5')) as f: 114 | d = cPickle.load(f) 115 | X.append(d['data']) 116 | Y.append(d['labels']) 117 | elif mode == 'test': 118 | with open(path.join(source, 'test_batch')) as f: 119 | d = cPickle.load(f) 120 | X.append(d['data']) 121 | Y.append(d['labels']) 122 | else: 123 | raise ValueError() 124 | 125 | X = np.concatenate(X) 126 | Y = np.concatenate(Y) 127 | 128 | if greyscale: 129 | div = X.shape[1] // 3 130 | X_r = X[:, :div] 131 | X_b = X[:, div:2*div] 132 | X_g = X[:, 2*div:] 133 | X = (X_r + X_b + X_g) / 3.0 134 | 135 | X = X.astype(floatX) 136 | X = X / float(X.max()) 137 | X = (X - X.mean(axis=0))# / X.std(axis=0) 138 | 139 | return X, Y 140 | 141 | def __iter__(self): 142 | return self 143 | 144 | def randomize(self): 145 | rnd_idx = np.random.permutation(np.arange(0, self.n, 1)) 146 | self.X = self.X[rnd_idx, :] 147 | self.O = self.O[rnd_idx, :] 148 | 149 | def next(self): 150 | raise NotImplementedError() 151 | 152 | def reset(self): 153 | self.pos = 0 154 | if self.shuffle: 155 | self.randomize() 156 | 157 | def _next(self, batch_size=None): 158 | if batch_size is None: 159 | batch_size = self.bs 160 | 161 | if self.pos == -1: 162 | self.reset() 163 | 164 | if not self.inf: 165 | raise StopIteration 166 | 167 | x = self.X[self.pos:self.pos+batch_size] 168 | y = self.O[self.pos:self.pos+batch_size] 169 | 170 | self.pos += batch_size 171 | if self.pos + batch_size > self.n: 172 | self.pos = -1 173 | 174 | return OrderedDict(cifar=x, labels=y) 175 | 176 | def save_images(self, x, imgfile, transpose=False, x_limit=None): 177 | if len(x.shape) == 2: 178 | x = x.reshape((x.shape[0], 1, x.shape[1])) 179 | 180 | if x_limit is not None and x.shape[0] > x_limit: 181 | x = np.concatenate([x, np.zeros((x_limit - x.shape[0] % x_limit, 182 | x.shape[1], 183 | x.shape[2])).astype('float32')], 184 | axis=0) 185 | x = x.reshape((x_limit, x.shape[0] * x.shape[1] // x_limit, x.shape[2])) 186 | 187 | tshape = x.shape[0], x.shape[1] 188 | x = x.reshape((x.shape[0] * x.shape[1], x.shape[2])) 189 | image = self.show(x.T, tshape, transpose=transpose) 190 | image.save(imgfile) 191 | 192 | def show(self, image, tshape, transpose=False): 193 | fshape = self.image_shape 194 | if transpose: 195 | X = image 196 | else: 197 | X = image.T 198 | 199 | return PIL.Image.fromarray(tile_raster_images( 200 | X=X, img_shape=fshape, tile_shape=tshape, 201 | tile_spacing=(1, 1))) 202 | 203 | def translate(self, x): 204 | return x -------------------------------------------------------------------------------- /cortex/datasets/basic/euclidean.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Simple dataset with random points arrange in a space. 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import matplotlib 7 | from matplotlib import pylab as plt 8 | import numpy as np 9 | from progressbar import ( 10 | Bar, 11 | ProgressBar, 12 | Timer 13 | ) 14 | import random 15 | import scipy 16 | 17 | from .. import Dataset 18 | from ...utils import floatX, intX, pi 19 | from ...utils.tools import init_rngs 20 | 21 | 22 | class Euclidean(Dataset): 23 | def __init__(self, dims=2, n_samples=10000, **kwargs): 24 | super(Euclidean, self).__init__(**kwargs) 25 | init_rngs(self, **kwargs) 26 | 27 | self.collection = None 28 | self.X = self.get_data(n_samples, dims) 29 | self.make_fibrous() 30 | 31 | self.n = self.X.shape[0] 32 | 33 | self.dims = dict() 34 | self.dims[self.name] = dims 35 | self.distributions = dict() 36 | self.distributions[self.name] = 'gaussian' 37 | 38 | self.mean_image = self.X.mean(axis=0) 39 | 40 | def gravity(self, x, y, r=0.1, G=0.0001): 41 | d = np.sqrt(((y[:, None, :] - x[None, :, :]) ** 2).sum(axis=2)) 42 | 43 | d_cb = d ** 3 44 | d_r = y[:, None, :] - x[None, :, :] 45 | 46 | f = -(G * d_r / d_cb[:, :, None]) 47 | c = (d < r).astype(intX) 48 | f = f * c[:, :, None] 49 | f = f.sum(axis=0) 50 | 51 | return f 52 | 53 | def make_circle(self, r=0.3, G=0.05): 54 | for k in xrange(10): 55 | x = self.X[:, 0] - 0.5 56 | y = self.X[:, 1] - 0.5 57 | alpha = np.sqrt(x ** 2 + y ** 2) / r 58 | d = np.array([x * (1 - alpha), y * (1 - alpha)]).astype(floatX).T 59 | f = G * d 60 | self.X += f 61 | self.X = np.clip(self.X, 0, 1) 62 | 63 | def make_spiral(self, r=0.25, G=0.0001): 64 | for k in range(10): 65 | x = self.X[:, 0] - 0.5 66 | y = self.X[:, 1] - 0.5 67 | theta = np.arctan2(x, y) 68 | ds = [r * (i + theta / (2 * np.pi)) for i in range(int(1 / r))] 69 | alphas = [np.sqrt(x ** 2 + y ** 2) / d for d in ds] 70 | for alpha in alphas: 71 | d = np.concatenate([(x * (1 - alpha))[:, None], (y * (1 - alpha))[:, None]], axis=1) 72 | f = -G * d / (d ** 2).sum(axis=1, keepdims=True) 73 | self.X += f 74 | self.X = np.clip(self.X, 0, 1) 75 | 76 | rs = np.arange(0, 0.7, 0.001) 77 | theta = 2 * np.pi * rs / r 78 | y = rs * np.sin(theta) + 0.5 79 | x = -rs * np.cos(theta) + 0.5 80 | spiral = zip(x, y) 81 | self.collection = matplotlib.collections.LineCollection([spiral], colors='k') 82 | 83 | def make_ex(self): 84 | x = self.rng.normal(loc=0.5, scale=0.05, size=self.X.shape).astype(floatX) 85 | t1 = self.rng.uniform(low=-0.5, high=0.5, size=(self.X.shape[0] // 2,)).astype(floatX) 86 | t2 = self.rng.uniform(low=-0.5, high=0.5, size=t1.shape).astype(floatX) 87 | self.X = np.concatenate([x[:x.shape[0]//2] + t1[:, None], x[x.shape[0]//2:] + t2[:, None] * np.array([1, -1])[None, :]]).astype(floatX) 88 | 89 | self.collection = matplotlib.collections.LineCollection([[(0, 0), (1, 1)], [(0, 1), (1, 0)]], colors='k') 90 | 91 | def make_modes(self, r=0.3, N=5, G=0.01): 92 | modes = [2 * np.pi * n / N for n in range(N)] 93 | self.X = np.concatenate([self.rng.normal( 94 | loc=0.5, scale=0.05, size=(self.X.shape[0] // N, self.X.shape[1])).astype(floatX) 95 | + np.array([(r * np.cos(mode)), (r * np.sin(mode))]).astype(floatX)[None, :] 96 | for mode in modes]) 97 | 98 | def make_bullseye(self, r=0.3, G=0.08): 99 | self.make_circle(r=r, G=G) 100 | self.X = np.concatenate( 101 | [self.X, 102 | self.rng.normal(loc=0.5, 103 | scale=0.05, 104 | size=(self.X.shape[0] // 10, 105 | self.X.shape[1]))]).astype(floatX) 106 | 107 | def make_fibrous(self, n_points=40): 108 | y = self.rng.uniform(size=(n_points, self.X.shape[1])).astype(floatX) 109 | 110 | for k in xrange(10): 111 | f = self.gravity(self.X, y) 112 | self.X += f 113 | self.X = np.clip(self.X, 0, 1) 114 | 115 | def randomize(self): 116 | rnd_idx = np.random.permutation(np.arange(0, self.n, 1)) 117 | self.X = self.X[rnd_idx, :] 118 | 119 | def get_data(self, n_points, dims): 120 | x = self.rng.uniform(size=(n_points, dims)).astype(floatX) 121 | return x 122 | 123 | def next(self, batch_size=None): 124 | if batch_size is None: 125 | batch_size = self.batch_size 126 | 127 | if self.pos == -1: 128 | self.reset() 129 | 130 | if not self.inf: 131 | raise StopIteration 132 | 133 | x = self.X[self.pos:self.pos+batch_size] 134 | 135 | self.pos += batch_size 136 | if self.pos + batch_size > self.n: 137 | self.pos = -1 138 | 139 | outs = OrderedDict() 140 | outs[self.name] = x 141 | 142 | return outs 143 | 144 | def save_images(self, X, imgfile, density=False): 145 | ax = plt.axes() 146 | x = X[:, 0] 147 | y = X[:, 1] 148 | if density: 149 | xy = np.vstack([x,y]) 150 | z = scipy.stats.gaussian_kde(xy)(xy) 151 | ax.scatter(x, y, c=z, marker='o', edgecolor='') 152 | else: 153 | ax.scatter(x, y, marker='o', c=range(x.shape[0]), 154 | cmap=plt.cm.coolwarm) 155 | 156 | if self.collection is not None: 157 | self.collection.set_transform(ax.transData) 158 | ax.add_collection(self.collection) 159 | 160 | 161 | ax.text(x[0], y[0], str('start'), transform=ax.transAxes) 162 | ax.axis([-0.2, 1.2, -0.2, 1.2]) 163 | fig = plt.gcf() 164 | 165 | plt.savefig(imgfile) 166 | plt.close() -------------------------------------------------------------------------------- /cortex/datasets/basic/horses.py: -------------------------------------------------------------------------------- 1 | import cPickle 2 | from glob import glob 3 | import gzip 4 | import multiprocessing as mp 5 | import numpy as np 6 | from os import path 7 | import PIL 8 | import random 9 | import sys 10 | from sys import stdout 11 | import theano 12 | from theano import tensor as T 13 | import traceback 14 | 15 | from .mnist import MNIST 16 | from ...utils.vis_utils import tile_raster_images 17 | 18 | 19 | def reshape_image(img, shape, crop_image=True): 20 | if crop_image: 21 | bbox = img.getbbox() 22 | img = img.crop(bbox) 23 | 24 | img.thumbnail(shape, PIL.Image.BILINEAR) 25 | new_img = PIL.Image.new('L', shape) 26 | offset_x = max((shape[0] - img.size[0]) / 2, 0) 27 | offset_y = max((shape[1] - img.size[1]) / 2, 0) 28 | offset_tuple = (offset_x, offset_y) 29 | new_img.paste(img, offset_tuple) 30 | return new_img 31 | 32 | class Horses(object): 33 | def __init__(self, batch_size=10, mode='train', 34 | source=None, inf=False, stop=None, shuffle=True, 35 | image_shape=None, out_path=None): 36 | 37 | assert source is not None 38 | print 'Loading horses ({mode})'.format(mode=mode) 39 | 40 | self.image_shape = image_shape 41 | 42 | data = [] 43 | for f in glob(path.join(path.abspath(source), '*.png')): 44 | img = PIL.Image.open(f) 45 | if self.image_shape is None: 46 | self.image_shape = img.size 47 | img = reshape_image(img, self.image_shape) 48 | data.append(np.array(img)) 49 | 50 | self.image_shape = self.image_shape[1], self.image_shape[0] 51 | 52 | X = np.array(data) 53 | X = X.reshape((X.shape[0], X.shape[1] * X.shape[2])) 54 | X = (X - X.min()) / (X.max() - X.min()) 55 | 56 | self.n = X.shape[0] 57 | if stop is not None: 58 | X = X[:stop] 59 | self.n = stop 60 | self.dims = dict(horses=X.shape[1]) 61 | self.acts = dict(horses='T.nnet.sigmoid') 62 | 63 | self.shuffle = shuffle 64 | self.pos = 0 65 | self.bs = batch_size 66 | self.inf = inf 67 | self.X = X 68 | self.next = self._next 69 | self.mean_image = self.X.mean(axis=0) 70 | 71 | if self.shuffle: 72 | print 'Shuffling horses' 73 | self.randomize() 74 | 75 | def __iter__(self): 76 | return self 77 | 78 | def randomize(self): 79 | rnd_idx = np.random.permutation(np.arange(0, self.n, 1)) 80 | self.X = self.X[rnd_idx, :] 81 | 82 | def next(self): 83 | raise NotImplementedError() 84 | 85 | def _next(self, batch_size=None): 86 | if batch_size is None: 87 | batch_size = self.bs 88 | 89 | if self.pos == -1: 90 | self.reset() 91 | 92 | if not self.inf: 93 | raise StopIteration 94 | 95 | x = self.X[self.pos:self.pos+batch_size] 96 | 97 | self.pos += batch_size 98 | 99 | return x, None 100 | 101 | def save_images(self, x, imgfile, transpose=False, x_limit=None): 102 | if len(x.shape) == 2: 103 | x = x.reshape((x.shape[0], 1, x.shape[1])) 104 | 105 | if x_limit is not None and x.shape[0] > x_limit: 106 | x = np.concatenate([x, np.zeros((x_limit - x.shape[0] % x_limit, 107 | x.shape[1], 108 | x.shape[2])).astype('float32')], 109 | axis=0) 110 | x = x.reshape((x_limit, x.shape[0] * x.shape[1] // x_limit, x.shape[2])) 111 | 112 | tshape = x.shape[0], x.shape[1] 113 | x = x.reshape((x.shape[0] * x.shape[1], x.shape[2])) 114 | image = self.show(x.T, tshape, transpose=transpose) 115 | image.save(imgfile) 116 | 117 | def show(self, image, tshape, transpose=False): 118 | fshape = self.dims 119 | if transpose: 120 | X = image 121 | else: 122 | X = image.T 123 | 124 | return PIL.Image.fromarray(tile_raster_images( 125 | X=X, img_shape=fshape, tile_shape=tshape, 126 | tile_spacing=(1, 1))) 127 | -------------------------------------------------------------------------------- /cortex/datasets/basic/mnist.py: -------------------------------------------------------------------------------- 1 | ''' 2 | MNIST dataset 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import cPickle 7 | import gzip 8 | import multiprocessing as mp 9 | import numpy as np 10 | from os import path 11 | import PIL 12 | import random 13 | import sys 14 | from sys import stdout 15 | import theano 16 | from theano import tensor as T 17 | import time 18 | import traceback 19 | 20 | from .. import BasicDataset, Dataset 21 | from ...utils.tools import ( 22 | concatenate, 23 | init_rngs, 24 | resolve_path, 25 | rng_, 26 | scan 27 | ) 28 | from ...utils.vis_utils import tile_raster_images 29 | 30 | 31 | class MNIST(BasicDataset): 32 | '''MNIST dataset iterator. 33 | 34 | ''' 35 | def __init__(self, source=None, restrict_digits=None, mode='train', 36 | binarize=False, name='mnist', 37 | out_path=None, **kwargs): 38 | '''Init function for MNIST. 39 | 40 | Args: 41 | source (str): Path to source gzip file. 42 | restrict_digits (Optional[list]): list of digits to restrict 43 | iterator to. 44 | mode (str): `train`, `test`, or `valid`. 45 | out_path (Optional[str]): path for saving visualization output. 46 | name (str): name of dataset. 47 | **kwargs: extra keyword arguments passed to BasicDataset 48 | 49 | ''' 50 | 51 | source = resolve_path(source) 52 | 53 | if source is None: 54 | raise ValueError('No source file provided') 55 | print 'Loading {name} ({mode}) from {source}'.format( 56 | name=name, mode=mode, source=source) 57 | 58 | X, Y = self.get_data(source, mode) 59 | 60 | if restrict_digits is not None: 61 | X = np.array([x for i, x in enumerate(X) if Y[i] in restrict_digits]) 62 | Y = np.array([y for i, y in enumerate(Y) if Y[i] in restrict_digits]) 63 | 64 | data = {name: X, 'label': Y} 65 | distributions = {name: 'binomial', 'label': 'multinomial'} 66 | 67 | super(MNIST, self).__init__(data, distributions=distributions, 68 | name=name, mode=mode, **kwargs) 69 | 70 | self.image_shape = (28, 28) 71 | self.out_path = out_path 72 | 73 | if binarize: 74 | self.data[name] = rng_.binomial( 75 | p=self.data[name], size=self.data[name].shape, n=1).astype('float32') 76 | 77 | if self.shuffle: 78 | self.randomize() 79 | 80 | def get_data(self, source, mode): 81 | '''Fetch data from gzip pickle. 82 | 83 | Args: 84 | source (str): path to source. 85 | mode (str): `train`, `test`, or `valid`. 86 | 87 | ''' 88 | with gzip.open(source, 'rb') as f: 89 | x = cPickle.load(f) 90 | 91 | if mode == 'train': 92 | X = np.float32(x[0][0]) 93 | Y = np.float32(x[0][1]) 94 | elif mode == 'valid': 95 | X = np.float32(x[1][0]) 96 | Y = np.float32(x[1][1]) 97 | elif mode == 'test': 98 | X = np.float32(x[2][0]) 99 | Y = np.float32(x[2][1]) 100 | else: 101 | raise ValueError() 102 | 103 | return X, Y 104 | 105 | def save_images(self, x, imgfile, transpose=False, x_limit=None): 106 | '''Saves visualization. 107 | 108 | Args: 109 | x (numpy.array): array to be visualized. 110 | imgfile (str): output file. 111 | transpose (bool): if True, then transpose images. 112 | x_limit (bool): limit montage to x samples in the x direction. 113 | 114 | ''' 115 | if len(x.shape) == 2: 116 | x = x.reshape((x.shape[0], 1, x.shape[1])) 117 | 118 | if x_limit is not None and x.shape[0] > x_limit: 119 | x = np.concatenate([x, np.zeros((x_limit - x.shape[0] % x_limit, 120 | x.shape[1], 121 | x.shape[2])).astype('float32')], 122 | axis=0) 123 | x = x.reshape((x_limit, x.shape[0] * x.shape[1] // x_limit, x.shape[2])) 124 | 125 | if transpose: 126 | x = x.reshape((x.shape[0], x.shape[1], self.image_shape[0], self.image_shape[1])) 127 | x = x.transpose(0, 1, 3, 2) 128 | x = x.reshape((x.shape[0], x.shape[1], self.image_shape[0] * self.image_shape[1])) 129 | 130 | tshape = x.shape[0], x.shape[1] 131 | x = x.reshape((x.shape[0] * x.shape[1], x.shape[2])) 132 | image = self.show(x.T, tshape) 133 | image.save(imgfile) 134 | 135 | def show(self, image, tshape): 136 | '''Convers to PIL.image. 137 | 138 | Args: 139 | image (numpy.array) 140 | tshape (tuple). 141 | 142 | Returns: 143 | PIL.Image: image to visualize. 144 | 145 | ''' 146 | fshape = self.image_shape 147 | X = image.T 148 | 149 | return PIL.Image.fromarray(tile_raster_images( 150 | X=X, img_shape=fshape, tile_shape=tshape, 151 | tile_spacing=(1, 1))) 152 | 153 | def translate(self, x): 154 | return x 155 | -------------------------------------------------------------------------------- /cortex/datasets/basic/uci.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Iterator for UCI dataset 3 | ''' 4 | 5 | import h5py 6 | import numpy as np 7 | 8 | from ...utils import floatX 9 | from ...utils.tools import ( 10 | concatenate, 11 | init_rngs, 12 | rng_, 13 | scan 14 | ) 15 | 16 | class UCI(object): 17 | def __init__(self, batch_size=100, source=None, mode='train', shuffle=True, 18 | inf=False, name='uci', stop=None): 19 | 20 | if source is None: 21 | raise ValueError('No source file provided') 22 | print 'Loading {name} ({mode} from {source})'.format( 23 | name=name, mode=mode, source=source) 24 | 25 | X = self.get_data(source, mode) 26 | if stop is not None: 27 | X = X[:stop] 28 | self.n = X.shape[0] 29 | self.dims = dict() 30 | self.dims[name] = X.shape[1] 31 | self.acts = dict() 32 | self.acts[name] = 'T.nnet.sigmoid' 33 | 34 | self.shuffle = shuffle 35 | self.pos = 0 36 | self.bs = batch_size 37 | self.inf = inf 38 | self.next = self._next 39 | 40 | self.X = X 41 | self.mean_image = np.zeros((X.shape[1])).astype(floatX) 42 | 43 | if self.shuffle: 44 | self.randomize() 45 | 46 | def get_data(self, source, mode): 47 | with h5py.File(source, 'r') as f: 48 | X = f[mode] 49 | X = X[:X.shape[0]].astype(floatX) 50 | 51 | return X 52 | 53 | def __iter__(self): 54 | return self 55 | 56 | def randomize(self): 57 | rnd_idx = np.random.permutation(np.arange(0, self.n, 1)) 58 | self.X = self.X[rnd_idx, :] 59 | 60 | def next(self): 61 | raise NotImplementedError() 62 | 63 | def reset(self): 64 | self.pos = 0 65 | if self.shuffle: 66 | self.randomize() 67 | 68 | def _next(self, batch_size=None): 69 | if batch_size is None: 70 | batch_size = self.bs 71 | 72 | if self.pos == -1: 73 | self.reset() 74 | 75 | if not self.inf: 76 | raise StopIteration 77 | 78 | x = self.X[self.pos:self.pos+batch_size] 79 | 80 | self.pos += batch_size 81 | if self.pos + batch_size > self.n: 82 | self.pos = -1 83 | 84 | return x, None 85 | 86 | def save_images(self, x, imgfile, transpose=False, x_limit=None): 87 | pass 88 | -------------------------------------------------------------------------------- /cortex/datasets/neuroimaging/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Neuroimaging data classes and utilities 3 | ''' 4 | 5 | import numpy as np 6 | import os 7 | from os import path 8 | import yaml 9 | 10 | from ...utils.tools import resolve_path 11 | from ...utils.extra import download_data, unzip 12 | 13 | 14 | def fetch_neuroimaging_data(): 15 | '''Fetch the neuroimaging dataset for demos. 16 | 17 | ''' 18 | url = 'http://mialab.mrn.org/data/neuroimaging/neuroimaging.zip' 19 | out_dir = resolve_path('$data') 20 | download_data(url, out_dir) 21 | unzip(path.join(out_dir, 'neuroimaging.zip'), out_dir) 22 | os.remove(path.join(out_dir, 'neuroimaging.zip')) 23 | 24 | ni_dir = path.join(out_dir, 'neuroimaging') 25 | 26 | unzip(path.join(ni_dir, 'VBM_test.zip'), ni_dir) 27 | os.remove(path.join(ni_dir, 'VBM_test.zip')) 28 | 29 | unzip(path.join(ni_dir, 'AOD_test.zip'), ni_dir) 30 | os.remove(path.join(ni_dir, 'AOD_test.zip')) 31 | 32 | yaml_file = path.join(ni_dir, 'VBM_test', 'VBM.yaml') 33 | with open(yaml_file, 'w') as yf: 34 | yf.write( 35 | yaml.dump( 36 | dict( 37 | anat_file=path.join(ni_dir, 'ch2better_whitebg_aligned2EPI_V4.nii'), 38 | data=[path.join(ni_dir, 'VBM_test', 'VBM_0.npy'), 39 | path.join(ni_dir, 'VBM_test', 'VBM_1.npy')], 40 | mask=path.join(ni_dir, 'VBM_test', 'VBM_mask.npy'), 41 | name='VBM', 42 | nifti=path.join(ni_dir, 'base_nifti.nii'), 43 | sites=path.join(ni_dir, 'VBM_test', 'VBM_sites.npy'), 44 | tmp_path=path.join(ni_dir, 'VBM_test', 'VBM_tmp') 45 | ) 46 | ) 47 | ) 48 | 49 | 50 | yaml_file = path.join(ni_dir, 'AOD_test', 'AOD.yaml') 51 | with open(yaml_file, 'w') as yf: 52 | yf.write( 53 | yaml.dump( 54 | dict( 55 | anat_file=path.join(ni_dir, 'ch2better_whitebg_aligned2EPI_V4'), 56 | data=[path.join(ni_dir, 'AOD_test', 'AOD_0.npy'), 57 | path.join(ni_dir, 'AOD_test', 'AOD_1.npy')], 58 | mask=path.join(ni_dir, 'AOD_test', 'AOD_mask.npy'), 59 | name='AOD', 60 | nifti=path.join(ni_dir, 'base_nifti.nii'), 61 | tmp_path=path.join(ni_dir, 'AOD_test', 'AOD_tmp') 62 | ) 63 | ) 64 | ) 65 | 66 | def resolve(dataset): 67 | '''Resolve neuroimaging dataset. 68 | 69 | Args: 70 | dataset (str): dataset name 71 | 72 | ''' 73 | from .fmri import FMRI, FMRI_IID 74 | from .mri import MRI 75 | from .snp import SNP 76 | 77 | if dataset == 'fmri': 78 | C = FMRI 79 | elif dataset == 'fmri_iid': 80 | C = FMRI_IID 81 | elif dataset == 'mri': 82 | C = MRI 83 | elif dataset == 'snp': 84 | C = SNP 85 | else: 86 | raise ValueError(dataset) 87 | return C 88 | 89 | 90 | def medfilt(x, k): 91 | ''' 92 | Apply a length-k median filter to a 1D array x. 93 | 94 | Boundaries are extended by repeating endpoints. 95 | 96 | Args: 97 | x (numpy.array) 98 | k (int) 99 | 100 | Returns: 101 | numpy.array 102 | ''' 103 | assert k % 2 == 1, 'Median filter length must be odd.' 104 | assert x.ndim == 1, 'Input must be one-dimensional.' 105 | k2 = (k - 1) // 2 106 | y = np.zeros((len(x), k), dtype=x.dtype) 107 | y[:, k2] = x 108 | for i in range(k2): 109 | j = k2 - i 110 | y[j:, i] = x[:-j] 111 | y[:j, i] = x[0] 112 | y[:-j, -(i+1)] = x[j:] 113 | y[-j:, -(i+1)] = x[-1] 114 | return np.median(y, axis=1) -------------------------------------------------------------------------------- /cortex/datasets/neuroimaging/fmri.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Module for fMRI data 3 | ''' 4 | 5 | import cPickle 6 | from collections import OrderedDict 7 | from glob import glob 8 | import nipy 9 | from nipy.core.api import Image 10 | import numpy as np 11 | import os 12 | from os import path 13 | import pprint 14 | import random 15 | import theano 16 | import yaml 17 | 18 | from ...analysis.mri import rois 19 | from .. import Dataset, make_one_hot 20 | from .mri import MRI 21 | from . import nifti_viewer 22 | from ...utils import floatX 23 | from ...utils.tools import resolve_path 24 | 25 | 26 | class FMRI_IID(MRI): 27 | '''fMRI data treated as IID. 28 | 29 | Use this dataset if you plan to use a model that needs identical and 30 | independently sampled data. 31 | 32 | Attributes: 33 | extras (dict): dictionary of additional arrays for analysis. 34 | 35 | ''' 36 | def __init__(self, name='fmri_iid', **kwargs): 37 | super(FMRI_IID, self).__init__(name=name, **kwargs) 38 | 39 | def get_data(self, source): 40 | '''Fetch the fMRI dataset. 41 | 42 | fMRI dataset source is a yaml file. 43 | An example format of said yaml is:: 44 | 45 | name: 'aod', 46 | data: [ 47 | '/Users/devon/Data/AOD/AOD_0.npy', 48 | '/Users/devon/Data/AOD/AOD_1.npy' 49 | ], 50 | mask: '/Users/devon/Data/AOD/AOD_mask.npy', 51 | nifti: '//Users/devon/Data/VBM/H000A.nii', 52 | tmp_path: '/Users/devon/Data/mri_tmp/', 53 | anat_file: '/Users/devon/Data/ch2better_whitebg_aligned2EPI_V4.nii', 54 | 55 | ''' 56 | print('Loading file locations from %s' % source) 57 | source_dict = yaml.load(open(source)) 58 | print('Source locations: %s' % pprint.pformat(source_dict)) 59 | 60 | def unpack_source(name=None, nifti=None, mask=None, anat_file=None, 61 | tmp_path=None, pca=None, data=None, **kwargs): 62 | return (name, nifti, mask, anat_file, tmp_path, pca, data, kwargs) 63 | 64 | (name, nifti_file, mask_file, self.anat_file, 65 | self.tmp_path, self.pca_file, data_files, extras) = unpack_source( 66 | **source_dict) 67 | 68 | self.base_nifti_file = nifti_file 69 | if not path.isdir(self.tmp_path): 70 | os.mkdir(self.tmp_path) 71 | 72 | mask = np.load(mask_file) 73 | if not np.all(np.bitwise_or(mask == 0, mask == 1)): 74 | raise ValueError("Mask has incorrect values.") 75 | self.mask = mask 76 | 77 | if self.pca_file is not None: 78 | try: 79 | with open(self.pca_file, 'rb') as f: 80 | self.pca = cPickle.load(f) 81 | except (IOError, EOFError): 82 | self.pca = None 83 | else: 84 | self.pca = None 85 | 86 | self.extras = dict((k, np.load(v)) for k, v in extras.iteritems()) 87 | 88 | if isinstance(data_files, str): 89 | data_files = [data_files] 90 | X = [] 91 | Y = [] 92 | for i, data_file in enumerate(data_files): 93 | print 'Loading %s' % data_file 94 | X_ = np.load(data_file) 95 | X.append(X_.astype(floatX)) 96 | Y.append((np.zeros((X_.shape[0] * X_.shape[1],)) + i).astype(floatX)) 97 | 98 | X = np.concatenate(X, axis=0) 99 | Y = np.concatenate(Y, axis=0) 100 | 101 | self.n_subjects, self.n_scans, _, _, _ = X.shape 102 | X = X.reshape((X.shape[0] * X.shape[1],) + X.shape[2:]) 103 | 104 | return X, Y 105 | 106 | 107 | class FMRI(FMRI_IID): 108 | '''fMRI dataset class. 109 | 110 | Treats fMRI as sequences, instead as IID as with FMRI_IID. 111 | 112 | Attributes: 113 | window (int): window size of fMRI batches. 114 | stride (int): stride of fMRI batches. 115 | n (int): number of subjects. 116 | idx (list): indices of subject, scan-window pairs. 117 | 118 | ''' 119 | 120 | def __init__(self, name='fmri', window=10, stride=1, idx=None, **kwargs): 121 | '''Init function for fMRI. 122 | 123 | Args: 124 | name (str): name of dataset. 125 | window (int): window size of fMRI batches. 126 | stride (int): stride of fMRI batches. 127 | idx (list): indices of dataset (subjects). 128 | **kwargs: keyword arguments for initializaiton. 129 | 130 | ''' 131 | super(FMRI, self).__init__(name=name, **kwargs) 132 | 133 | self.window = window 134 | self.stride = stride 135 | 136 | self.X = self.X.reshape((self.n_subjects, self.n_scans, self.X.shape[1])) 137 | self.Y = self.Y.reshape((self.n_subjects, self.n_scans, self.Y.shape[1])) 138 | 139 | if idx is not None: 140 | self.X = self.X[idx] 141 | self.Y = self.Y[idx] 142 | self.n_subjects = len(idx) 143 | 144 | scan_idx = range(0, self.n_scans - window + 1, stride) 145 | scan_idx_e = scan_idx * self.n_subjects 146 | subject_idx = range(self.n_subjects) 147 | # Similar to np.repeat, but using list comprehension. 148 | subject_idx_e = [i for j in [[s] * len(scan_idx) for s in subject_idx] 149 | for i in j] 150 | # idx is list of (subject, scan) 151 | self.idx = zip(subject_idx_e, scan_idx_e) 152 | self.n = len(self.idx) 153 | 154 | if self.shuffle: 155 | self.randomize() 156 | 157 | def randomize(self): 158 | '''Randomize the fMRI dataset. 159 | 160 | Shuffles the idx. 161 | 162 | ''' 163 | rnd_idx = np.random.permutation(np.arange(0, self.n, 1)) 164 | self.idx = [self.idx[i] for i in rnd_idx] 165 | 166 | def next(self, batch_size=None): 167 | '''Draws the next batch of windowed fMRI. 168 | 169 | Args: 170 | batch_size (Optional[int]): number of windows in batch. 171 | 172 | Returns: 173 | dict: dictionary of batched data 174 | 175 | ''' 176 | if batch_size is None: 177 | batch_size = self.batch_size 178 | 179 | if self.pos == -1: 180 | self.reset() 181 | raise StopIteration 182 | 183 | idxs = [self.idx[i] for i in range(self.pos, self.pos+batch_size)] 184 | x = np.array([self.X[i][j:j+self.window] for i, j in idxs]).astype(floatX).transpose(1, 0, 2) 185 | y = np.array([self.Y[i][j:j+self.window] for i, j in idxs]).astype(floatX).transpose(1, 0, 2) 186 | 187 | self.pos += batch_size 188 | 189 | if self.pos + batch_size > self.n: 190 | self.pos = -1 191 | 192 | rval = { 193 | self.name: x, 194 | 'group': y 195 | } 196 | 197 | return rval 198 | -------------------------------------------------------------------------------- /cortex/datasets/neuroimaging/simTB.py: -------------------------------------------------------------------------------- 1 | ''' 2 | SimTB dataset class. 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import numpy as np 7 | 8 | from .. import Dataset 9 | from utils import floatX 10 | from utils.tools import warn_kwargs 11 | 12 | 13 | class SimTB(Dataset): 14 | def __init__(self, source=None, **kwargs): 15 | kwargs = super(SimTB, self).__init__(**kwargs) 16 | 17 | if source is None: 18 | raise ValueError('No source provided') 19 | 20 | # Fetch simTB data from "source" source can be file, directory, etc. 21 | self.X = self.get_data(source) 22 | self.n = self.X.shape[0] 23 | 24 | # Reference for the dimension of the dataset. A dict is used for 25 | # multimodal data (e.g., mri and labels) 26 | self.dims = dict() 27 | self.dims[self.name] = self.X.shape[1] 28 | 29 | # This is reference for models to decide how the data should be modelled 30 | # E.g. with a binomial or gaussian variable 31 | self.distributions = dict() 32 | self.distributions[self.name] = 'gaussian' 33 | 34 | # We will probably center the data in the main script using this 35 | # global mean image. 36 | self.mean_image = self.X.mean(axis=0) 37 | 38 | warn_kwargs(self, kwargs) 39 | 40 | def get_data(self, source): 41 | ''' 42 | Fetch the data from source. 43 | ''' 44 | 45 | raise NotImplementedError('Eswar todo') 46 | 47 | def next(self, batch_size=None): 48 | ''' 49 | Iterate the data. 50 | ''' 51 | 52 | if batch_size is None: 53 | batch_size = self.batch_size 54 | 55 | if self.pos == -1: 56 | self.reset() 57 | 58 | if not self.inf: 59 | raise StopIteration 60 | 61 | x = self.X[self.pos:self.pos+batch_size] 62 | 63 | self.pos += batch_size 64 | if self.pos + batch_size > self.n: 65 | self.pos = -1 66 | 67 | outs = OrderedDict() 68 | outs[self.name] = x 69 | 70 | return outs 71 | 72 | def save_images(self, x, imgfile): 73 | ''' 74 | Save images for visualization. 75 | ''' 76 | raise NotImplementedError('TODO') 77 | -------------------------------------------------------------------------------- /cortex/datasets/neuroimaging/snp.py: -------------------------------------------------------------------------------- 1 | '''SNP dataset class. 2 | 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import numpy as np 7 | from scipy.io import loadmat 8 | from .. import BasicDataset, Dataset 9 | from ...utils.tools import warn_kwargs 10 | 11 | 12 | class SNP(BasicDataset): 13 | '''SNP dataset class. 14 | 15 | Currently only handled continuous preprocessed data. 16 | Discrete data TODO 17 | 18 | ''' 19 | def __init__(self, source=None, name='snp', mode='train', convert_one_hot=True, idx=None, **kwargs): 20 | '''Initialize the SNP dataset. 21 | 22 | Args: 23 | source: (str): Path to source file. 24 | name: (str): ID of dataset. 25 | idx: (Optional[list]): List of indices for train/test/validation 26 | split. 27 | 28 | ''' 29 | if source is None: 30 | raise ValueError('No source provided') 31 | 32 | # Fetch SNP data from "source" 33 | X, Y = self.get_data(source) 34 | data = {name: X, 'label': Y} 35 | 36 | # balance data for traning, valid, and test parts 37 | balance = False 38 | if idx is not None: 39 | balance=True 40 | data[name] = data[name][idx] 41 | data['label'] = data['label'][idx] 42 | 43 | distributions = {name: 'gaussian', 'label': 'multinomial'} 44 | super(SNP, self).__init__(data, name=name, balance=balance, distributions=distributions, mode=mode, **kwargs) 45 | 46 | self.mean_image = self.data[name].mean(axis=0) 47 | 48 | def get_data(self, source): 49 | '''Fetch the data from source. 50 | 51 | Genetic data is in the matrix format with size Subjec*SNP 52 | SNP can be either preprocessed or notprocessed 53 | Labels is a vector with diagnosis info 54 | Patients are coded with 1 and health control coded with 2 55 | 56 | Args: 57 | source (dict): file names of genetic data and labels 58 | {'snp' key for genetic data 59 | 'labels' key for diagnosis } 60 | 61 | ''' 62 | from utils.tools import get_paths 63 | data_path = get_paths()['$snp_data'] 64 | print('Loading genetic data from %s' % data_path) 65 | X = loadmat(data_path + '/' + source['snp']) 66 | Y = loadmat(data_path + '/' + source['label']) 67 | X = np.float32(X[X.keys()[2]]) 68 | Y = np.float32(Y[Y.keys()[0]]) 69 | Y.resize(max(Y.shape,)) 70 | return X, Y 71 | -------------------------------------------------------------------------------- /cortex/datasets/neuroimaging/tests/_test_snp.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Test SNP 3 | ''' 4 | 5 | from datasets.snp import SNP 6 | 7 | def test_snp_data(source_path = {'snp': 'SNP_processed_pgc108.mat' ,'labels': 'pheno.mat' }, batch_size=20): 8 | train = SNP(source = source_path, batch_size=batch_size) 9 | # test batching 10 | print train.Y.shape == (249, 1) 11 | print train.X.shape == (249, 4475) 12 | #import ipdb 13 | #ipdb.set_trace() 14 | 15 | # test next 16 | rval = train.next(10) 17 | next_10 = rval['snp'], rval['labels'] 18 | print next_10[0].shape == (10, 4475) 19 | print next_10[1].shape == (10, 1) 20 | 21 | #print next10.shape 22 | train.shuffle = False 23 | train.reset() 24 | rval = train.next(5) 25 | next_5 = rval['snp'], rval['labels'] 26 | print next_5[0].shape == (5, 4475) 27 | print next_5[1].shape == (5, 1) 28 | 29 | train.shuffle = True 30 | train.reset() 31 | rval = train.next(5) 32 | next_5_nd = rval['snp'], rval['labels'] 33 | #test reset 34 | print next_5[0] 35 | print next_5_nd[0] 36 | 37 | return train 38 | -------------------------------------------------------------------------------- /cortex/demos/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/cortex/demos/__init__.py -------------------------------------------------------------------------------- /cortex/demos/demos_basic/__init__.py: -------------------------------------------------------------------------------- 1 | '''For command-line scripts of demos 2 | 3 | ''' 4 | 5 | from os import path 6 | import sys 7 | 8 | from ...utils.training import set_experiment 9 | 10 | 11 | packagedir = __path__[0] 12 | d = path.join(path.dirname(packagedir), '../..') 13 | 14 | def run_demo(yaml_file, train): 15 | args = dict(experiment=yaml_file) 16 | 17 | exp_dict = set_experiment(args) 18 | train(**exp_dict) 19 | 20 | def run_classifier_demo(): 21 | from .classifier import train 22 | 23 | yaml_file = path.join(d, 'classifier_mnist.yaml') 24 | run_demo(yaml_file, train) 25 | 26 | def run_rbm_demo(): 27 | from .rbm_mnist import train 28 | 29 | yaml_file = path.join(d, 'rbm_mnist.yaml') 30 | run_demo(yaml_file, train) 31 | 32 | def run_vae_demo(): 33 | from .vae import train 34 | 35 | yaml_file = path.join(d, 'vae_mnist.yaml') 36 | run_demo(yaml_file, train) -------------------------------------------------------------------------------- /cortex/demos/demos_basic/classifier_mnist.yaml: -------------------------------------------------------------------------------- 1 | { 2 | name: 'classifier_mnist', 3 | learning_args: { 4 | epochs: 1000, 5 | optimizer: 'sgd', 6 | learning_rate: 0.01 7 | }, 8 | classifier: { 9 | input_layer: 'mnist', 10 | dim_hs: [200, 100], 11 | }, 12 | preprocessing: [ 13 | 'center' 14 | ], 15 | dataset_args: { 16 | dataset: 'mnist', 17 | keys: ['mnist', 'label'], 18 | source: '$data/basic/mnist.pkl.gz' 19 | } 20 | } -------------------------------------------------------------------------------- /cortex/demos/demos_basic/eval_rbm.py: -------------------------------------------------------------------------------- 1 | '''Eval script for pretrained RBMs. 2 | 3 | ''' 4 | 5 | import numpy as np 6 | import theano 7 | from theano import tensor as T 8 | 9 | from cortex.datasets import load_data 10 | from cortex.models.rbm import unpack 11 | from cortex.utils import floatX 12 | from cortex.utils.preprocessor import Preprocessor 13 | from cortex.utils.training import ( 14 | make_argument_parser_test, 15 | reload_model 16 | ) 17 | from cortex.utils.tools import ( 18 | get_trng, 19 | load_model, 20 | print_profile, 21 | print_section 22 | ) 23 | 24 | 25 | def evaluate( 26 | model_to_load=None, 27 | preprocessing=None, 28 | dataset_args=None, 29 | out_path=None, 30 | mode='test', 31 | **kwargs): 32 | 33 | if dataset_args is None: dataset_args = dict() 34 | if preprocessing is None: preprocessing = [] 35 | 36 | # ======================================================================== 37 | print_section('Setting up data') 38 | train_batch_size = None 39 | test_batch_size = None 40 | test_batch_size = None 41 | if mode == 'train': 42 | train_batch_size = 10 43 | elif mode == 'test': 44 | test_batch_size = 10 45 | elif mode == 'valid': 46 | valid_batch_size = 10 47 | train, valid, test = load_data( 48 | train_batch_size=train_batch_size, 49 | test_batch_size=test_batch_size, 50 | valid_batch_size=valid_batch_size, 51 | **dataset_args) 52 | if mode == 'train': 53 | data_iter = train 54 | elif mode == 'test': 55 | data_iter = test 56 | elif mode == 'valid': 57 | data_iter = valid 58 | 59 | # ======================================================================== 60 | print_section('Setting model and variables') 61 | dim_in = data_iter.dims[data_iter.name] 62 | 63 | X = T.matrix('x', dtype=floatX) 64 | X.tag.test_value = np.zeros((10, dim_in), dtype=X.dtype) 65 | trng = get_trng() 66 | 67 | preproc = Preprocessor(preprocessing) 68 | X_i = preproc(X, data_iter=train) 69 | 70 | # ======================================================================== 71 | print_section('Loading model and forming graph') 72 | 73 | models, _ = load_model(model_to_load, unpack, data_iter=data_iter) 74 | model = models['rbm'] 75 | tparams = model.set_tparams() 76 | print_profile(tparams) 77 | 78 | # ======================================================================== 79 | print_section('Testing') 80 | results, z_updates = model.update_partition_function(M=20, K=10000) 81 | f_update_partition = theano.function([], results.values(), updates=z_updates) 82 | outs = f_update_partition() 83 | out_dict = dict((k, v) for k, v in zip(results.keys(), outs)) 84 | for k, v in out_dict.iteritems(): 85 | if k == 'log_ws': 86 | print k, v[-10:] 87 | print v.shape 88 | print v.mean() 89 | else: 90 | print k, v 91 | 92 | nll = model.estimate_nll(X) 93 | f_nll = theano.function([X], nll) 94 | print f_nll(data_iter.X) 95 | 96 | if __name__ == '__main__': 97 | parser = make_argument_parser_test() 98 | args = parser.parse_args() 99 | exp_dict = reload_model(args) 100 | evaluate(**exp_dict) -------------------------------------------------------------------------------- /cortex/demos/demos_basic/rbm_cifar.yaml: -------------------------------------------------------------------------------- 1 | { 2 | name: 'rbm_cifar', 3 | learning_args: { 4 | epochs: 1000, 5 | optimizer: 'sgd', 6 | learning_rate: 0.001, 7 | excludes: ['rbm_visible_log_sigma'] 8 | }, 9 | inference_args: { 10 | n_chains: 10, 11 | persistent: True, 12 | n_steps: 1 13 | }, 14 | dim_h: 200, 15 | test_every: 10, 16 | dataset_args: { 17 | dataset: 'cifar', 18 | source: '$data/basic/cifar-10-batches-py' 19 | } 20 | } -------------------------------------------------------------------------------- /cortex/demos/demos_basic/rbm_mnist.yaml: -------------------------------------------------------------------------------- 1 | { 2 | name: 'rbm_mnist', 3 | learning_args: { 4 | epochs: 1000, 5 | optimizer: 'sgd', 6 | learning_rate: 0.01 7 | }, 8 | inference_args: { 9 | n_chains: 10, 10 | persistent: True, 11 | n_steps: 1 12 | }, 13 | dim_h: 200, 14 | test_every: 10, 15 | dataset_args: { 16 | dataset: 'mnist', 17 | source: '$data/basic/mnist_binarized_salakhutdinov.pkl.gz' 18 | } 19 | } -------------------------------------------------------------------------------- /cortex/demos/demos_basic/reweighted_vae.yaml: -------------------------------------------------------------------------------- 1 | { 2 | name: 'vae_mnist', 3 | prior: 'gaussian', 4 | dim_h: 200, 5 | learning_args: { 6 | epochs: 1000, 7 | optimizer: 'rmsprop', 8 | learning_rate: 0.0001, 9 | reweight: True 10 | }, 11 | rec_args: { 12 | input_layer: 'mnist', 13 | dim_hs: [500] 14 | }, 15 | gen_args: { 16 | output: 'mnist', 17 | dim_hs: [500] 18 | }, 19 | test_every: 1, 20 | dataset_args: { 21 | dataset: 'mnist', 22 | source: '$data/basic/mnist_binarized_salakhutdinov.pkl.gz' 23 | } 24 | } -------------------------------------------------------------------------------- /cortex/demos/demos_basic/tests/test_demos.py: -------------------------------------------------------------------------------- 1 | '''Tests the demos. 2 | ''' 3 | 4 | from os import path 5 | 6 | from cortex.demos.demos_basic import classifier 7 | from cortex.demos.demos_basic import rbm_mnist 8 | from cortex.demos.demos_basic import vae 9 | from cortex.utils.tools import load_experiment 10 | 11 | d = path.abspath(path.dirname(path.realpath(__file__)) + '/..') 12 | 13 | def test_classifier(epochs=5): 14 | yaml = path.join(d, 'classifier_mnist.yaml') 15 | exp_dict = load_experiment(yaml) 16 | exp_dict['learning_args']['epochs'] = epochs 17 | exp_dict['dataset_args']['stop'] = 100 18 | classifier.train(**exp_dict) 19 | 20 | def test_rbm(epochs=5, yaml='rbm_mnist.yaml'): 21 | yaml = path.join(d, 'rbm_mnist.yaml') 22 | exp_dict = load_experiment(yaml) 23 | exp_dict['learning_args']['epochs'] = epochs 24 | exp_dict['dataset_args']['stop'] = 100 25 | rbm_mnist.train(**exp_dict) 26 | 27 | def test_rbm_cifar(epochs=5): 28 | test_rbm(epochs=epochs, yaml='rbm_cifar.yaml') 29 | 30 | def test_vae(epochs=5): 31 | yaml = path.join(d, 'vae_mnist.yaml') 32 | exp_dict = load_experiment(yaml) 33 | exp_dict['learning_args']['epochs'] = epochs 34 | exp_dict['dataset_args']['stop'] = 100 35 | vae.train(**exp_dict) -------------------------------------------------------------------------------- /cortex/demos/demos_basic/vae_mnist.yaml: -------------------------------------------------------------------------------- 1 | { 2 | name: 'vae_mnist', 3 | prior: 'gaussian', 4 | dim_h: 200, 5 | learning_args: { 6 | epochs: 1000, 7 | optimizer: 'rmsprop', 8 | learning_rate: 0.0001 9 | }, 10 | rec_args: { 11 | input_layer: 'mnist', 12 | dim_hs: [500] 13 | }, 14 | gen_args: { 15 | output: 'mnist', 16 | dim_hs: [500] 17 | }, 18 | test_every: 1, 19 | dataset_args: { 20 | dataset: 'mnist', 21 | source: '$data/basic/mnist_binarized_salakhutdinov.pkl.gz' 22 | } 23 | } -------------------------------------------------------------------------------- /cortex/demos/demos_neuroimaging/__init__.py: -------------------------------------------------------------------------------- 1 | '''For command-line scripts of demos 2 | 3 | ''' 4 | 5 | from os import path 6 | import sys 7 | 8 | from ...utils.training import set_experiment 9 | 10 | 11 | packagedir = __path__[0] 12 | d = path.join(path.dirname(packagedir), '../..') 13 | 14 | def run_demo(yaml_file, train): 15 | args = dict(experiment=yaml_file) 16 | 17 | exp_dict = set_experiment(args) 18 | train(**exp_dict) 19 | 20 | def run_rbm_vbm_demo(): 21 | from .rbm_ni import train 22 | 23 | yaml_file = path.join(d, 'rbm_vbm.yaml') 24 | run_demo(yaml_file, train) 25 | 26 | def run_rbm_olin_demo(): 27 | from .rbm_ni import train 28 | 29 | yaml_file = path.join(d, 'rbm_olin.yaml') 30 | run_demo(yaml_file, train) -------------------------------------------------------------------------------- /cortex/demos/demos_neuroimaging/rbm_olin.yaml: -------------------------------------------------------------------------------- 1 | { 2 | name: 'rbm_olin', 3 | learning_args: { 4 | epochs: 1000, 5 | optimizer: 'sgd', 6 | learning_rate: 0.0001, 7 | l1_decay: 0.1 8 | }, 9 | inference_args: { 10 | n_chains: 10, 11 | persistent: True, 12 | n_steps: 1 13 | }, 14 | dim_h: 60, 15 | test_every: 10, 16 | show_every: 10, 17 | dataset_args: { 18 | dataset: 'fmri_iid', 19 | source: '$data/neuroimaging/AOD_test/AOD.yaml' 20 | } 21 | } -------------------------------------------------------------------------------- /cortex/demos/demos_neuroimaging/rbm_vbm.yaml: -------------------------------------------------------------------------------- 1 | { 2 | name: 'rbm_vbm', 3 | learning_args: { 4 | epochs: 1000, 5 | optimizer: 'sgd', 6 | learning_rate: 0.001, 7 | l1_decay: 0.1 8 | }, 9 | inference_args: { 10 | n_chains: 10, 11 | persistent: True, 12 | n_steps: 1 13 | }, 14 | dim_h: 60, 15 | test_every: 10, 16 | show_every: 10, 17 | dataset_args: { 18 | dataset: 'mri', 19 | source: '$data/neuroimaging/VBM_test/VBM.yaml' 20 | } 21 | } -------------------------------------------------------------------------------- /cortex/demos/demos_neuroimaging/vae_mri.yaml: -------------------------------------------------------------------------------- 1 | { 2 | name: 'vae_mri_single_laplace', 3 | prior: 'laplace', 4 | dim_h: 60, 5 | learning_args: { 6 | batch_size: 10, 7 | learning_rate: 0.0001, 8 | optimizer: 'rmsprop', 9 | epochs: 1200, 10 | l2_decay: 0.0002, 11 | excludes: [] 12 | }, 13 | rec_args: { 14 | input_layer: 'mri', 15 | dim_hs: [500], 16 | h_act: 'T.tanh', 17 | weight_scale: 0.00001 18 | }, 19 | gen_args: { 20 | dim_hs: [], 21 | h_act: 'T.tanh', 22 | output: 'mri', 23 | weight_scale: 0.00001 24 | }, 25 | dataset_args: { 26 | dataset: 'mri', 27 | distribution: 'gaussian', 28 | source: '$data/neuroimaging/VBM/data.yaml', 29 | } 30 | } -------------------------------------------------------------------------------- /cortex/inference/__init__.py: -------------------------------------------------------------------------------- 1 | '''Inference methods. 2 | 3 | ''' 4 | 5 | from .air import AIR, DeepAIR 6 | from .gdir import MomentumGDIR 7 | from .rws import RWS, DeepRWS 8 | 9 | 10 | def resolve(model, inference_method=None, deep=False, **inference_args): 11 | '''Resolves the inference method. 12 | 13 | Args: 14 | model (Helmholtz): helmholtz model that we are doing inference with. 15 | inference_method: (str): inference method. 16 | deep (bool): deep or no. 17 | **inference_args: extra keyword args for inference. 18 | 19 | Returns: 20 | IRVI: inference method 21 | 22 | ''' 23 | if deep: 24 | if inference_method == 'momentum': 25 | raise NotImplementedError(inference_method) 26 | return DeepMomentumGDIR(model, **inference_args) 27 | elif inference_method == 'rws': 28 | return DeepRWS(model, **inference_args) 29 | elif inference_method == 'air': 30 | return DeepAIR(model, **inference_args) 31 | elif instance_method is None: 32 | return None 33 | else: 34 | raise ValueError(inference_method) 35 | else: 36 | if inference_method == 'momentum': 37 | return MomentumGDIR(model, **inference_args) 38 | elif inference_method == 'rws': 39 | return RWS(model, **inference_args) 40 | elif inference_method == 'air': 41 | return AIR(model, **inference_args) 42 | elif inference_method is None: 43 | return None 44 | else: 45 | raise ValueError(inference_method) 46 | -------------------------------------------------------------------------------- /cortex/inference/air.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Adaptive importance sampling inference. 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import theano 7 | from theano import tensor as T 8 | 9 | from .irvi import IRVI, DeepIRVI 10 | from ..utils import floatX 11 | from ..utils.tools import ( 12 | get_w_tilde, 13 | scan, 14 | warn_kwargs 15 | ) 16 | 17 | 18 | class AIR(IRVI): 19 | ''' 20 | Adaptive importance refinement (AIR). 21 | 22 | Inference procedure to refine the posterior using adaptive importance 23 | sampling (AIS) 24 | ''' 25 | def __init__(self, 26 | model, 27 | name='AIR', 28 | pass_gradients=False, 29 | **kwargs): 30 | '''Init function for AIR 31 | 32 | Args: 33 | model: Helmholtz object. 34 | name: str 35 | pass_gradients: bool (optional) 36 | kwargs: dict, remaining IRVI arguments. 37 | ''' 38 | 39 | super(AIR, self).__init__(model, name=name, 40 | pass_gradients=pass_gradients, 41 | **kwargs) 42 | 43 | def step_infer(self, r, q, y, *params): 44 | '''Step inference function for IRVI.inference scan. 45 | 46 | Args: 47 | r: theano randomstream variable 48 | q: T.tensor. Current approximate posterior parameters 49 | y: T.tensor. Data sample 50 | params: list of shared variables 51 | Returns: 52 | q: T.tensor. New approximate posterior parameters 53 | cost: T.scalar float. Negative lower bound of current parameters 54 | ''' 55 | 56 | model = self.model 57 | prior_params = model.get_prior_params(*params) 58 | 59 | h = (r <= q[None, :, :]).astype(floatX) 60 | py = model.p_y_given_h(h, *params) 61 | log_py_h = -model.conditional.neg_log_prob(y[None, :, :], py) 62 | log_ph = -model.prior.step_neg_log_prob(h, *prior_params) 63 | log_qh = -model.posterior.neg_log_prob(h, q[None, :, :]) 64 | log_p = log_py_h + log_ph - log_qh 65 | w_tilde = get_w_tilde(log_p) 66 | cost = -log_p.mean() 67 | q_ = (w_tilde[:, :, None] * h).sum(axis=0) 68 | q = self.inference_rate * q_ + (1 - self.inference_rate) * q 69 | return q, cost 70 | 71 | def init_infer(self, q): 72 | return [] 73 | 74 | def unpack_infer(self, outs): 75 | return outs 76 | 77 | def params_infer(self): 78 | return [] 79 | 80 | 81 | class DeepAIR(DeepIRVI): 82 | def __init__(self, 83 | model, 84 | name='AIR', 85 | pass_gradients=False, 86 | **kwargs): 87 | 88 | super(DeepAIR, self).__init__(model, name=name, 89 | pass_gradients=pass_gradients, 90 | **kwargs) 91 | 92 | def step_infer(self, *params): 93 | model = self.model 94 | 95 | params = list(params) 96 | rs = params[:model.n_layers] 97 | qs = params[model.n_layers:2*model.n_layers] 98 | y = params[2*model.n_layers] 99 | params = params[1+2*model.n_layers:] 100 | prior_params = model.get_prior_params(*params) 101 | 102 | hs = [] 103 | new_qs = [] 104 | 105 | for l, (q, r) in enumerate(zip(qs, rs)): 106 | h = (r <= q[None, :, :]).astype(floatX) 107 | hs.append(h) 108 | 109 | ys = [y[None, :, :]] + hs[:-1] 110 | p_ys = [model.p_y_given_h(h, l, *params) for l, h in enumerate(hs)] 111 | 112 | log_ph = -model.prior.step_neg_log_prob(hs[-1], *prior_params) 113 | log_py_h = T.constant(0.).astype(floatX) 114 | log_qh = T.constant(0.).astype(floatX) 115 | for l in xrange(model.n_layers): 116 | log_py_h += -model.conditionals[l].neg_log_prob(ys[l], p_ys[l]) 117 | log_qh += -model.posteriors[l].neg_log_prob(hs[l], qs[l][None, :, :]) 118 | 119 | log_p = log_py_h + log_ph - log_qh 120 | w_tilde = get_w_tilde(log_p) 121 | cost = -log_p.mean() 122 | 123 | for q, h in zip(qs, hs): 124 | q_ = (w_tilde[:, :, None] * h).sum(axis=0) 125 | new_qs.append(self.inference_rate * q_ + (1 - self.inference_rate) * q) 126 | 127 | return tuple(new_qs) + (cost,) 128 | 129 | def init_infer(self, qs): 130 | return [] 131 | 132 | def unpack_infer(self, outs): 133 | return outs[:-1], outs[-1] 134 | 135 | def params_infer(self): 136 | return [] 137 | -------------------------------------------------------------------------------- /cortex/inference/gdir.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Gradient-Descent Iterative Refinement 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import theano 7 | from theano import tensor as T 8 | 9 | from .irvi import IRVI 10 | from ..utils import floatX 11 | from ..utils.tools import ( 12 | scan, 13 | update_dict_of_lists, 14 | ) 15 | 16 | 17 | class GDIR(IRVI): 18 | def __init__(self, 19 | model, 20 | name='GDIR', 21 | pass_gradients=True, 22 | **kwargs): 23 | 24 | super(GDIR, self).__init__(model, name=name, 25 | pass_gradients=pass_gradients, 26 | **kwargs) 27 | 28 | def e_step(self, epsilon, q, y, *params): 29 | model = self.model 30 | prior_params = model.get_prior_params(*params) 31 | h = model.prior.step_sample(epsilon, q) 32 | py = model.p_y_given_h(h, *params) 33 | 34 | consider_constant = [y] + list(params) 35 | 36 | log_py_h = -model.conditional.neg_log_prob(y[None, :, :], py) 37 | if model.prior.has_kl: 38 | KL_q_p = model.prior.step_kl_divergence(q, *prior_params) 39 | else: 40 | log_ph = -model.prior.neg_log_prob(h) 41 | log_qh = -model.posterior.neg_log_prob(h, q[None, :, :]) 42 | KL_q_p = (log_qh - log_ph).mean(axis=0) 43 | y_energy = -log_py_h.mean(axis=0) 44 | 45 | cost = (y_energy + KL_q_p).mean(axis=0) 46 | grad = theano.grad(cost, wrt=q, consider_constant=consider_constant) 47 | 48 | cost = y_energy.mean() 49 | return cost, grad 50 | 51 | 52 | class MomentumGDIR(GDIR): 53 | def __init__(self, model, momentum=0.9, name='momentum_GDIR', **kwargs): 54 | self.momentum = momentum 55 | super(MomentumGDIR, self).__init__(model, name=name, **kwargs) 56 | 57 | def step_infer(self, epsilon, q, dq_, y, m, *params): 58 | l = self.inference_rate 59 | cost, grad = self.e_step(epsilon, q, y, *params) 60 | dq = (-l * grad + m * dq_).astype(floatX) 61 | q = (q + dq).astype(floatX) 62 | return q, dq, cost 63 | 64 | def init_infer(self, q): 65 | return [T.zeros_like(q)] 66 | 67 | def unpack_infer(self, outs): 68 | qs, dqs, costs = outs 69 | return qs, costs 70 | 71 | def params_infer(self): 72 | return [T.constant(self.momentum).astype(floatX)] 73 | 74 | 75 | -------------------------------------------------------------------------------- /cortex/inference/rws.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Reweighted wake-sleep inference 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import theano 7 | from theano import tensor as T 8 | 9 | from ..utils import floatX 10 | from ..utils.tools import ( 11 | log_sum_exp, 12 | warn_kwargs 13 | ) 14 | 15 | 16 | class RWS(object): 17 | def __init__(self, 18 | model, 19 | name='RWS', 20 | **kwargs): 21 | self.name = name 22 | self.model = model 23 | warn_kwargs(self, **kwargs) 24 | 25 | def __call__(self, x, y, n_posterior_samples=10, qk=None): 26 | model = self.model 27 | 28 | print 'Doing RWS, %d samples' % n_posterior_samples 29 | q = model.posterior.feed(x) 30 | 31 | if qk is None: 32 | q_c = q.copy() 33 | else: 34 | q_c = qk 35 | 36 | r = model.init_inference_samples( 37 | (n_posterior_samples, y.shape[0], model.dim_h)) 38 | 39 | h = (r <= q_c[None, :, :]).astype(floatX) 40 | py = model.conditional.feed(h) 41 | 42 | log_py_h = -model.conditional.neg_log_prob(y[None, :, :], py) 43 | log_ph = -model.prior.neg_log_prob(h) 44 | log_qh = -model.posterior.neg_log_prob(h, q[None, :, :]) 45 | 46 | assert log_py_h.ndim == log_ph.ndim == log_qh.ndim 47 | 48 | if qk is None: 49 | log_p = log_sum_exp(log_py_h + log_ph - log_qh, axis=0) - T.log(n_posterior_samples) 50 | else: 51 | log_qkh = -model.posterior.neg_log_prob(h, qk[None, :, :]) 52 | log_p = log_sum_exp(log_py_h + log_ph - log_qkh, axis=0) - T.log(n_posterior_samples) 53 | 54 | log_pq = log_py_h + log_ph - log_qh - T.log(n_posterior_samples) 55 | w_norm = log_sum_exp(log_pq, axis=0) 56 | log_w = log_pq - T.shape_padleft(w_norm) 57 | w_tilde = T.exp(log_w) 58 | 59 | y_energy = -(w_tilde * log_py_h).sum(axis=0) 60 | prior_energy = -(w_tilde * log_ph).sum(axis=0) 61 | h_energy = -(w_tilde * log_qh).sum(axis=0) 62 | 63 | nll = -log_p 64 | prior_entropy = model.prior.entropy() 65 | q_entropy = model.posterior.entropy(q_c) 66 | 67 | assert prior_energy.ndim == h_energy.ndim == y_energy.ndim, (prior_energy.ndim, h_energy.ndim, y_energy.ndim) 68 | 69 | cost = (y_energy + prior_energy + h_energy).sum(0) 70 | lower_bound = (y_energy + prior_energy - q_entropy).mean() 71 | 72 | results = OrderedDict({ 73 | '-log p(x|h)': y_energy.mean(0), 74 | '-log p(h)': prior_energy.mean(0), 75 | '-log q(h)': h_energy.mean(0), 76 | '-log p(x)': nll.mean(0), 77 | 'H(p)': prior_entropy, 78 | 'H(q)': q_entropy.mean(0), 79 | 'lower_bound': lower_bound, 80 | 'cost': cost 81 | }) 82 | 83 | samples = OrderedDict(py=py) 84 | constants = [w_tilde, q_c] 85 | return results, samples, constants, theano.OrderedUpdates() 86 | 87 | def test(self, x, y, n_posterior_samples=10, qk=None): 88 | results, samples, constants, updates = self( 89 | x, y, n_posterior_samples=n_posterior_samples, qk=qk) 90 | 91 | return results, samples, None, updates 92 | 93 | class DeepRWS(object): 94 | def __init__(self, 95 | model, 96 | name='RWS', 97 | **kwargs): 98 | self.name = name 99 | self.model = model 100 | warn_kwargs(self, **kwargs) 101 | 102 | def __call__(self, x, y, n_posterior_samples=10, qk=None, sample_posterior=False): 103 | qks = qk 104 | model = self.model 105 | 106 | print 'Doing RWS, %d samples' % n_posterior_samples 107 | qs = [] 108 | qcs = [] 109 | state = x[None, :, :] 110 | for l in xrange(model.n_layers): 111 | q = model.posteriors[l].feed(state).mean(axis=0) 112 | qs.append(q) 113 | if sample_posterior: 114 | state, _ = model.posteriors[l].sample(q, n_samples=n_posterior_samples) 115 | else: 116 | state = q[None, :, :] 117 | if qks is None: 118 | qcs.append(q.copy()) 119 | else: 120 | qcs.append(qks[l]) 121 | 122 | hs = [] 123 | for l, qc in enumerate(qcs): 124 | r = model.trng.uniform((n_posterior_samples, y.shape[0], model.dim_hs[l]), dtype=floatX) 125 | h = (r <= qc[None, :, :]).astype(floatX) 126 | hs.append(h) 127 | 128 | p_ys = [conditional.feed(h) for h, conditional in zip(hs, model.conditionals)] 129 | ys = [y[None, :, :]] + hs[:-1] 130 | 131 | log_py_h = T.constant(0.).astype(floatX) 132 | log_qh = T.constant(0.).astype(floatX) 133 | log_qch = T.constant(0.).astype(floatX) 134 | for l in xrange(model.n_layers): 135 | log_py_h -= model.conditionals[l].neg_log_prob(ys[l], p_ys[l]) 136 | log_qh -= model.posteriors[l].neg_log_prob(hs[l], qs[l]) 137 | log_qch -= model.posteriors[l].neg_log_prob(hs[l], qcs[l]) 138 | log_ph = -model.prior.neg_log_prob(hs[-1]) 139 | 140 | assert log_py_h.ndim == log_ph.ndim == log_qh.ndim 141 | 142 | log_p = log_sum_exp(log_py_h + log_ph - log_qch, axis=0) - T.log(n_posterior_samples) 143 | 144 | log_pq = log_py_h + log_ph - log_qh - T.log(n_posterior_samples) 145 | w_norm = log_sum_exp(log_pq, axis=0) 146 | log_w = log_pq - T.shape_padleft(w_norm) 147 | w_tilde = T.exp(log_w) 148 | 149 | y_energy = -(w_tilde * log_py_h).sum(axis=0) 150 | prior_energy = -(w_tilde * log_ph).sum(axis=0) 151 | h_energy = -(w_tilde * log_qh).sum(axis=0) 152 | 153 | nll = -log_p 154 | prior_entropy = model.prior.entropy() 155 | q_entropy = T.constant(0.).astype(floatX) 156 | for l, qc in enumerate(qcs): 157 | q_entropy += model.posteriors[l].entropy(qc) 158 | 159 | cost = (y_energy + prior_energy + h_energy).sum(0) 160 | lower_bound = (y_energy + prior_energy - q_entropy).mean() 161 | 162 | results = OrderedDict({ 163 | '-log p(x|h)': y_energy.mean(0), 164 | '-log p(h)': prior_energy.mean(0), 165 | '-log q(h)': h_energy.mean(0), 166 | '-log p(x)': nll.mean(0), 167 | 'H(p)': prior_entropy, 168 | 'H(q)': q_entropy.mean(0), 169 | 'lower_bound': lower_bound, 170 | 'cost': cost 171 | }) 172 | 173 | samples = OrderedDict( 174 | py=p_ys[0] 175 | ) 176 | 177 | constants = [w_tilde] + qcs 178 | return results, samples, constants 179 | -------------------------------------------------------------------------------- /cortex/inference/tests/test_gdir.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Test GDIR 3 | ''' 4 | 5 | import numpy as np 6 | import theano 7 | from theano import tensor as T 8 | 9 | from cortex.datasets.basic.euclidean import Euclidean 10 | from cortex.inference.gdir import MomentumGDIR 11 | from cortex.models.tests import test_vae 12 | from cortex.utils import floatX 13 | from cortex.utils.tools import resolve_path 14 | 15 | def test_build_gdir(model=None, **inference_args): 16 | if model is None: 17 | data_iter = Euclidean(batch_size=27, dim_in=17) 18 | model = test_vae.test_build_GBN(dim_in=data_iter.dims[data_iter.name]) 19 | gdir = MomentumGDIR(model, **inference_args) 20 | return gdir 21 | 22 | def test_infer(): 23 | data_iter = Euclidean(batch_size=27, dim_in=17) 24 | gbn = test_vae.test_build_GBN(dim_in=data_iter.dims[data_iter.name]) 25 | 26 | inference_args = dict( 27 | n_inference_steps=7, 28 | pass_gradients=True 29 | ) 30 | 31 | gdir = test_build_gdir(gbn, **inference_args) 32 | 33 | X = T.matrix('x', dtype=floatX) 34 | 35 | rval, constants, updates = gdir.inference(X, X) 36 | 37 | f = theano.function([X], rval.values(), updates=updates) 38 | x = data_iter.next()[data_iter.name] 39 | 40 | results, samples, full_results, updates = gdir(X, X) 41 | f = theano.function([X], results.values(), updates=updates) 42 | 43 | print f(x) 44 | -------------------------------------------------------------------------------- /cortex/models/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Base Layer class. 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import copy 7 | import theano 8 | 9 | from cortex.utils.tools import ( 10 | warn_kwargs, 11 | _p 12 | ) 13 | 14 | 15 | class Layer(object): 16 | '''Basic layer class. 17 | 18 | Attributes: 19 | name (str): name of layer. 20 | params (dict): dictionary of numpy.arrays 21 | excludes (list): list of parameters to exclude from learning. 22 | learn (bool): if False, do not change params. 23 | n_params (int): number of parameters 24 | 25 | ''' 26 | def __init__(self, name='', excludes=[], learn=True, **kwargs): 27 | '''Init function for Layer. 28 | 29 | Args: 30 | name (str): name of layer. 31 | excludes (list): list of parameters to exclude from learning. 32 | learn (bool): if False, do not change params. 33 | **kwargs: extra kwargs 34 | 35 | ''' 36 | self.name = name 37 | self.params = None 38 | self.excludes = excludes 39 | self.learn = learn 40 | self.set_params() 41 | self.n_params = len(self.params) 42 | warn_kwargs(kwargs) 43 | 44 | def copy(self): 45 | '''Copy the Layer. 46 | 47 | ''' 48 | return copy.deepcopy(self) 49 | 50 | def set_params(self): 51 | '''Initialize the parameters. 52 | 53 | ''' 54 | raise NotImplementedError() 55 | 56 | def set_tparams(self): 57 | '''Sets the tensor parameters. 58 | 59 | ''' 60 | if self.params is None: 61 | raise ValueError('Params not set yet') 62 | tparams = OrderedDict() 63 | for kk, pp in self.params.iteritems(): 64 | tp = theano.shared(self.params[kk], name=kk) 65 | tparams[_p(self.name, kk)] = tp 66 | self.__dict__[kk] = tp 67 | 68 | return OrderedDict((k, v) for k, v in tparams.iteritems() if k not in [_p(self.name, e) for e in self.excludes]) 69 | 70 | def get_excludes(self): 71 | '''Fetches the excluded parameters. 72 | 73 | ''' 74 | if self.learn: 75 | return [_p(self.name, e) for e in self.excludes] 76 | else: 77 | return [_p(self.name, k) for k in self.params.keys()] 78 | -------------------------------------------------------------------------------- /cortex/models/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/cortex/models/tests/__init__.py -------------------------------------------------------------------------------- /cortex/models/tests/_test_gru.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Module for testing GRU. 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import numpy as np 7 | import random 8 | import theano 9 | from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams 10 | from theano import tensor as T 11 | 12 | from models.gru import GRU 13 | import test_mlp 14 | from utils import floatX 15 | 16 | 17 | sigmoid = lambda x: 1.0 / (1.0 + np.exp(-x)) 18 | 19 | def test_make_gru(dim_in=31, dim_h=11, dim_out=None, 20 | i_net=None, a_net=None, o_net=None, c_net=None): 21 | print 'Testing GRU formation' 22 | 23 | if i_net is None: 24 | i_net = dict( 25 | dim_h=17, 26 | n_layers=2, 27 | h_act='T.tanh', 28 | weight_scale=0.1, 29 | ) 30 | if a_net is None: 31 | a_net = dict( 32 | dim_h=19, 33 | n_layers=2, 34 | h_act='T.tanh', 35 | weight_scale=0.1 36 | ) 37 | if o_net is None: 38 | o_net = dict( 39 | dim_h=23, 40 | n_layers=2, 41 | weight_scale=0.1, 42 | distribution='binomial' 43 | ) 44 | 45 | nets = dict(i_net=i_net, a_net=a_net, o_net=o_net, c_net=c_net) 46 | 47 | trng = RandomStreams(101) 48 | 49 | rnn = GRU.factory(dim_in=dim_in, dim_hs=[dim_h], dim_out=dim_out, **nets) 50 | rnn.set_tparams() 51 | print 'GRU formed correctly' 52 | 53 | return rnn 54 | 55 | def test_step(rnn=None, X=T.tensor3('X', dtype=floatX), 56 | H0=T.matrix('H0', dtype=floatX), x=None, h0=None, 57 | window=5, batch_size=9): 58 | 59 | if rnn is None: 60 | rnn = test_make_gru() 61 | dim_in = rnn.dim_in 62 | dim_h = rnn.dim_hs[0] 63 | 64 | if x is None: 65 | x = np.random.randint(0, 2, size=(window, batch_size, dim_in)).astype(floatX) 66 | if h0 is None: 67 | h0 = np.random.normal(loc=0, scale=1.0, size=(x.shape[1], dim_h)).astype(floatX) 68 | 69 | input_dict = test_mlp.test_feed_forward(mlp=rnn.input_net, X=X, x=x, 70 | distribution='centered_binomial') 71 | aux_dict = test_mlp.test_feed_forward(mlp=rnn.input_net_aux, X=X, x=x, 72 | distribution='centered_binomial') 73 | 74 | H1 = rnn._step(1, aux_dict['Preact'][0], input_dict['Preact'][0], H0, 75 | *rnn.get_params()) 76 | 77 | def step(h_, y_a, y_i): 78 | preact = np.dot(h_, rnn.params['Ura0']) + y_a 79 | r = sigmoid(preact[:, :rnn.dim_hs[0]]) 80 | u = sigmoid(preact[:, rnn.dim_hs[0]:]) 81 | preactx = np.dot(h_, rnn.params['Urb0']) * r + y_i 82 | h = np.tanh(preactx) 83 | h = u * h + (1. - u) * h_ 84 | return h 85 | 86 | h = step(h0, aux_dict['preact'][0], input_dict['preact'][0]) 87 | 88 | f = theano.function([X, H0], H1) 89 | h_test = f(x, h0) 90 | assert np.allclose(h_test, h, atol=1e-7), (np.max(np.abs(h_test - h))) 91 | 92 | rnn_dict, updates = rnn(X, h0s=[H0]) 93 | tinps = [X, H0] 94 | inps = [x, h0] 95 | rnn_values = [v[0] if isinstance(v, list) else v for v in rnn_dict.values()] 96 | 97 | f = theano.function(tinps, rnn_values, updates=updates) 98 | 99 | vals = f(*inps) 100 | v_dict = OrderedDict((k, v) for k, v in zip(rnn_dict.keys(), vals)) 101 | 102 | hs = [] 103 | h = h0 104 | for t in xrange(window): 105 | h = step(h, aux_dict['preact'][t], input_dict['preact'][t]) 106 | hs.append(h) 107 | 108 | hs = np.array(hs).astype(floatX) 109 | 110 | assert np.allclose(hs[0], v_dict['hs'][0], atol=1e-7), (hs[0] - v_dict['hs'][0]) 111 | print 'RNN Hiddens test out' 112 | 113 | out_dict = test_mlp.test_feed_forward( 114 | mlp=rnn.output_net, X=T.tensor3('H', dtype=floatX), x=hs) 115 | 116 | p = out_dict['y'] 117 | 118 | assert np.allclose(p, v_dict['p'], atol=1e-4), (p - v_dict['p']) 119 | 120 | return OrderedDict(p=p, P=v_dict['p']) 121 | -------------------------------------------------------------------------------- /cortex/models/tests/test_darn.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Tests for DARN model. 3 | ''' 4 | 5 | import numpy as np 6 | import theano 7 | from theano import tensor as T 8 | 9 | from cortex.models.darn import ( 10 | AutoRegressor, 11 | DARN 12 | ) 13 | from cortex.models.helmholtz import Helmholtz 14 | from cortex.utils import floatX 15 | 16 | 17 | sigmoid = lambda x: 1.0 / (1.0 + np.exp(-x)) 18 | 19 | def test_autoregressor(dim=3, n_samples=5): 20 | ar = AutoRegressor(dim) 21 | ar.params['b'] += 0.1 22 | tparams = ar.set_tparams() 23 | 24 | X = T.matrix('X', dtype=floatX) 25 | nlp = ar.neg_log_prob(X) 26 | p = ar.get_prob(X, *ar.get_params()) 27 | W = T.tril(ar.W, k=-1) 28 | z = T.dot(X, W) + ar.b 29 | 30 | x = np.random.randint(0, 2, size=(n_samples, dim)).astype(floatX) 31 | 32 | f = theano.function([X], [nlp, p, z, W]) 33 | nlp_t, p_t, z_t, W_t = f(x) 34 | print x.shape, nlp_t.shape 35 | z_np = np.zeros((n_samples, dim)).astype(floatX) + ar.params['b'][None, :] 36 | 37 | for i in xrange(dim): 38 | print i 39 | for j in xrange(i + 1, dim): 40 | print i, j 41 | z_np[:, i] += ar.params['W'][j, i] * x[:, j] 42 | 43 | assert np.allclose(z_t, z_np), (z_t, z_np) 44 | p_np = sigmoid(z_np) 45 | assert np.allclose(p_t, p_np, atol=1e-4), (p_t - p_np) 46 | 47 | p_np = np.clip(p_np, 1e-7, 1 - 1e-7) 48 | nlp_np = (- x * np.log(p_np) - (1 - x) * np.log(1 - p_np)).sum(axis=1) 49 | 50 | assert np.allclose(nlp_t, nlp_np, atol=1e-3), (nlp_t - nlp_np) 51 | 52 | samples, updates = ar.sample(n_samples=n_samples) 53 | 54 | f = theano.function([], samples, updates=updates) 55 | print f() 56 | 57 | def test_darn(dim_in=5, dim_h=3, dim_out=7, n_samples=13): 58 | darn = DARN(dim_in, dim_h, dim_out, 2, h_act='T.tanh', out_act='T.nnet.sigmoid') 59 | tparams = darn.set_tparams() 60 | 61 | X = T.matrix('X', dtype=floatX) 62 | H = T.matrix('H', dtype=floatX) 63 | C = darn(H) 64 | NLP = darn.neg_log_prob(X, C) 65 | 66 | f = theano.function([X, H], [C, NLP]) 67 | 68 | x = np.random.randint(0, 2, size=(n_samples, dim_out)).astype(floatX) 69 | h = np.random.randint(0, 2, size=(n_samples, dim_in)).astype(floatX) 70 | 71 | c_t, nlp_t = f(x, h) 72 | print c_t.shape 73 | 74 | d_np = np.tanh(np.dot(h, darn.params['W0']) + darn.params['b0']) 75 | c_np = np.dot(d_np, darn.params['W1']) + darn.params['b1'] 76 | 77 | assert np.allclose(c_t, c_np), (c_t, c_np) 78 | 79 | z_np = np.zeros((n_samples, dim_out)).astype(floatX) + darn.params['bar'][None, :] + c_np 80 | 81 | for i in xrange(dim_out): 82 | for j in xrange(i + 1, dim_out): 83 | z_np[:, i] += darn.params['War'][j, i] * x[:, j] 84 | 85 | p_np = sigmoid(z_np) 86 | 87 | p_np = np.clip(p_np, 1e-7, 1 - 1e-7) 88 | nlp_np = (- x * np.log(p_np) - (1 - x) * np.log(1 - p_np)).sum(axis=1) 89 | 90 | assert np.allclose(nlp_t, nlp_np), (nlp_t, nlp_np) 91 | 92 | samples, updates_s = darn.sample(C, n_samples=n_samples-1) 93 | f = theano.function([H], samples, updates=updates_s) 94 | print f(h) -------------------------------------------------------------------------------- /cortex/models/tests/test_mlp.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Module for testing MLPs. 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import numpy as np 7 | import theano 8 | from theano import tensor as T 9 | 10 | from cortex.models.mlp import MLP 11 | from cortex.utils import floatX 12 | 13 | 14 | sigmoid = 'lambda x: 1.0 / (1.0 + np.exp(-x))' 15 | tanh = 'lambda x: np.tanh(x)' 16 | softplus = 'lambda x: np.log(1.0 + np.exp(x))' 17 | 18 | 19 | def test_make_mlp(dim_in=13, dim_h=17, dim_out=19, n_layers=2, 20 | h_act='T.nnet.softplus', distribution='binomial'): 21 | mlp = MLP(dim_in, dim_h, dim_out, n_layers, h_act=h_act, 22 | distribution=distribution) 23 | mlp.set_tparams() 24 | return mlp 25 | 26 | def test_feed_forward(mlp=None, X=T.matrix('X', dtype=floatX), x=None, distribution='binomial'): 27 | if mlp is None: 28 | mlp = test_make_mlp() 29 | Z = mlp.preact(X) 30 | Y = mlp.feed(X) 31 | 32 | batch_size = 23 33 | if x is None: 34 | x = np.random.randint(0, 2, size=(batch_size, mlp.dim_in)).astype(floatX) 35 | 36 | z = x 37 | for l in xrange(mlp.n_layers): 38 | W = mlp.params['W%d' % l] 39 | b = mlp.params['b%d' % l] 40 | 41 | z = np.dot(z, W) + b 42 | if l != mlp.n_layers - 1: 43 | activ = mlp.h_act 44 | if activ == 'T.nnet.sigmoid': 45 | activ = sigmoid 46 | elif activ == 'T.tanh': 47 | activ = tanh 48 | elif activ == 'T.nnet.softplus': 49 | activ = softplus 50 | elif activ == 'lambda x: x': 51 | pass 52 | else: 53 | raise ValueError(activ) 54 | z = eval(activ)(z) 55 | assert not np.any(np.isnan(z)) 56 | 57 | if distribution == 'binomial': 58 | activ = sigmoid 59 | elif distribution == 'centered_binomial': 60 | activ = sigmoid 61 | elif distribution == 'gaussian': 62 | activ = 'lambda x: x' 63 | else: 64 | raise ValueError(distribution) 65 | print distribution, activ 66 | print 67 | y = eval(activ)(z) 68 | assert not np.any(np.isnan(y)) 69 | 70 | f = theano.function([X], Y) 71 | y_test = f(x) 72 | assert not np.any(np.isnan(y_test)), y_test 73 | 74 | assert y.shape == y_test.shape, (y.shape, y_test.shape) 75 | 76 | assert np.allclose(y, y_test, atol=1e-4), (np.max(np.abs(y - y_test))) 77 | 78 | return OrderedDict(y=y, preact=z, Y=Y, Preact=Z) -------------------------------------------------------------------------------- /cortex/models/tests/test_rbm.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Module for RBM tests. 3 | ''' 4 | 5 | import theano 6 | from theano import tensor as T 7 | 8 | from cortex.datasets.basic import euclidean 9 | from cortex.models import rbm 10 | from cortex.utils import floatX 11 | 12 | 13 | def test_build(dim_h=11, dim_v=13): 14 | model = rbm.RBM(dim_v, dim_h) 15 | model.set_tparams() 16 | return model 17 | 18 | def test_sample(n_steps=3, dim_v=13, batch_size=7): 19 | data_iter = euclidean.Euclidean(dims=dim_v, batch_size=batch_size) 20 | x = data_iter.next()[data_iter.name] 21 | 22 | model = test_build(dim_v=dim_v) 23 | 24 | X = T.matrix('X', dtype=floatX) 25 | ph0 = model.ph_v(X) 26 | r = model.trng.uniform(size=(X.shape[0], model.dim_h)) 27 | h_p = (r <= ph0).astype(floatX) 28 | 29 | outs, updates = model.sample(h_p, n_steps=n_steps) 30 | keys = outs.keys() 31 | 32 | f = theano.function([X], outs.values(), updates=updates) 33 | values = f(x) 34 | 35 | outs = model(X, n_chains=batch_size, n_steps=n_steps) 36 | results, samples, updates, constants = outs 37 | f = theano.function([X], results.values(), updates=updates) 38 | f(x) 39 | -------------------------------------------------------------------------------- /cortex/models/tests/test_rnn.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Tests for RNN 3 | ''' 4 | 5 | from collections import OrderedDict 6 | import numpy as np 7 | import theano 8 | from theano import tensor as T 9 | 10 | from cortex.datasets.basic.euclidean import Euclidean 11 | from cortex.models.rnn import RNN 12 | from cortex.utils import floatX 13 | 14 | 15 | sigmoid = lambda x: (1. / (1 + np.exp(-x))) * 0.9999 + 0.000005 16 | 17 | def test_build(dim_in=13, dim_h=17): 18 | rnn = RNN(dim_in, [dim_h]) 19 | rnn.set_tparams() 20 | 21 | return rnn 22 | 23 | def test_recurrent(dim_in=13, dim_h=17, n_samples=107, window=7): 24 | rnn = test_build(dim_in, dim_h) 25 | 26 | data_iter = Euclidean(n_samples=n_samples, dims=dim_in, batch_size=window) 27 | x = data_iter.next()[data_iter.name] 28 | 29 | test_dict = OrderedDict() 30 | 31 | X = T.matrix('x', dtype=floatX) 32 | 33 | Y = rnn.call_seqs(X, None, 0, *rnn.get_sample_params())[0] 34 | y = np.dot(x, rnn.input_net.params['W0']) + rnn.input_net.params['b0'] 35 | test_dict['RNN preact from data'] = (X, Y, x, y, theano.OrderedUpdates()) 36 | 37 | H0 = T.alloc(0., X.shape[0], rnn.dim_hs[0]).astype(floatX) 38 | H = rnn._step(1, Y, H0, rnn.Ur0) 39 | h0 = np.zeros((x.shape[0], rnn.dim_hs[0])).astype(floatX) 40 | h = np.tanh(np.dot(h0, rnn.params['Ur0']) + y) 41 | test_dict['step reccurent'] = (X, H, x, h, theano.OrderedUpdates()) 42 | 43 | P = rnn.output_net.feed(H) 44 | p = sigmoid(np.dot(h, rnn.output_net.params['W0']) + rnn.output_net.params['b0']) 45 | test_dict['output'] = (X, P, x, p, theano.OrderedUpdates()) 46 | 47 | for k, v in test_dict.iteritems(): 48 | print 'Testing %s' % k 49 | inp, out, inp_np, out_np, updates = v 50 | f = theano.function([inp], out, updates=updates) 51 | out_actual = f(inp_np) 52 | if not np.allclose(out_np, out_actual): 53 | print 'np', out_np 54 | print 'theano', out_actual 55 | assert False 56 | -------------------------------------------------------------------------------- /cortex/models/tests/test_vae.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Tests for VAE 3 | ''' 4 | 5 | import numpy as np 6 | import theano 7 | from theano import tensor as T 8 | 9 | from cortex.datasets.basic.euclidean import Euclidean 10 | from cortex.inference import gdir 11 | from cortex.models.helmholtz import Helmholtz 12 | from cortex.utils import floatX 13 | from cortex.utils.tools import print_profile, resolve_path 14 | 15 | 16 | def test_build_GBN(dim_in=17, dim_h=13): 17 | rec_args = dict(input_layer='input') 18 | gen_args = dict(output='input') 19 | distributions = dict(input='gaussian') 20 | dims = dict(input=dim_in) 21 | gbn = Helmholtz.factory(dim_h, distributions=distributions, dims=dims, 22 | rec_args=rec_args, gen_args=gen_args) 23 | tparams = gbn.set_tparams() 24 | 25 | print_profile(tparams) 26 | 27 | return gbn 28 | 29 | def test_call(): 30 | data_iter = Euclidean(batch_size=27, dim_in=17) 31 | gbn = test_build_GBN(dim_in=data_iter.dims[data_iter.name]) 32 | 33 | X = T.matrix('x', dtype=floatX) 34 | results, samples, _, _ = gbn(X, X, n_posterior_samples=7) 35 | 36 | f = theano.function([X], samples.values() + results.values()) 37 | 38 | x = data_iter.next()[data_iter.name] 39 | f(x) -------------------------------------------------------------------------------- /cortex/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Init for utils. 3 | 4 | """ 5 | 6 | import numpy as np 7 | import theano 8 | 9 | 10 | intX = 'int64' 11 | floatX = theano.config.floatX 12 | pi = theano.shared(np.pi).astype(floatX) 13 | e = theano.shared(np.e).astype(floatX) -------------------------------------------------------------------------------- /cortex/utils/extra.py: -------------------------------------------------------------------------------- 1 | '''Extra functions not used for learning. 2 | 3 | ''' 4 | 5 | import glob 6 | import os 7 | from os import path 8 | from progressbar import ( 9 | Bar, 10 | Percentage, 11 | ProgressBar, 12 | Timer 13 | ) 14 | import sys 15 | import urllib2 16 | import zipfile 17 | 18 | 19 | def complete_path(text, state): 20 | '''Completes a path for readline. 21 | 22 | ''' 23 | return (glob.glob(text + '*') + [None])[state] 24 | 25 | 26 | def download_data(url, out_path): 27 | '''Downloads the data from a url. 28 | 29 | Args: 30 | url (str): url of the data. 31 | out_path (str): Output directory or full file path. 32 | 33 | ''' 34 | 35 | if path.isdir(out_path): 36 | file_name = path.join(out_path, url.split('/')[-1]) 37 | else: 38 | d = path.abspath(os.path.join(out_path, os.pardir)) 39 | if not path.isdir(d): 40 | raise IOError('Directory %s does not exist' % d) 41 | file_name = out_path 42 | 43 | u = urllib2.urlopen(url) 44 | with open(file_name, 'wb') as f: 45 | meta = u.info() 46 | file_size = int(meta.getheaders("Content-Length")[0]) 47 | 48 | file_size_dl = 0 49 | block_sz = 8192 50 | 51 | widgets = ['Dowloading to %s (' % file_name, Timer(), '): ', Bar()] 52 | pbar = ProgressBar(widgets=widgets, maxval=file_size).start() 53 | 54 | while True: 55 | buffer = u.read(block_sz) 56 | if not buffer: 57 | break 58 | 59 | file_size_dl += len(buffer) 60 | f.write(buffer) 61 | pbar.update(file_size_dl) 62 | print 63 | 64 | def unzip(source, out_path): 65 | '''Unzip function. 66 | 67 | Arguments: 68 | source (str): path to zip file 69 | out_path (str): path to out_file 70 | 71 | ''' 72 | print 'Unzipping %s to %s' % (source, out_path) 73 | 74 | if not zipfile.is_zipfile(source): 75 | raise ValueError('%s is not a zipfile' % source) 76 | 77 | if not path.isdir(out_path): 78 | raise ValueError('%s is not a directory' % out_path) 79 | 80 | with zipfile.ZipFile(source) as zf: 81 | zf.extractall(out_path) 82 | 83 | def write_path_conf(data_path, out_path): 84 | '''Writes basic configure file. 85 | 86 | Args: 87 | data_path (str): path to data. 88 | out_path (str): path to outputs. 89 | 90 | ''' 91 | d = path.expanduser('~') 92 | with open(path.join(d, '.cortexrc'), 'w') as f: 93 | f.write('[PATHS]\n') 94 | f.write('$data: %s\n' % path.abspath(data_path)) 95 | f.write('$outs: %s\n' % path.abspath(out_path)) 96 | 97 | def query_yes_no(question, default='yes'): 98 | '''Ask a yes/no question via raw_input() and return their answer. 99 | 100 | Args: 101 | question (str) 102 | default (Optional[str]) 103 | 104 | Returns: 105 | str 106 | 107 | ''' 108 | valid = {'yes': True, 'y': True, 'ye': True, 'Y': True, 'Ye': True, 109 | 'no': False, 'n': False, 'N': False, 'No': False} 110 | if default is None: 111 | prompt = ' [y/n] ' 112 | elif default == 'yes': 113 | prompt = ' [Y/n] ' 114 | elif default == 'no': 115 | prompt = ' [y/N] ' 116 | else: 117 | raise ValueError('invalid default answer: `%s`' % default) 118 | 119 | while True: 120 | sys.stdout.write(question + prompt) 121 | choice = raw_input().lower() 122 | if default is not None and choice == '': 123 | return valid[default] 124 | elif choice in valid: 125 | return valid[choice] 126 | else: 127 | sys.stdout.write('Please respond with `yes` or `no` ' 128 | '(or `y` or `n`).\n') -------------------------------------------------------------------------------- /cortex/utils/learning_scheduler.py: -------------------------------------------------------------------------------- 1 | '''Scheduler for learning rates. 2 | ''' 3 | 4 | from collections import OrderedDict 5 | 6 | 7 | def unpack(learning_rate=None, decay_rate=None, schedule=None): 8 | return learning_rate, decay_rate, schedule 9 | 10 | 11 | class Scheduler(object): 12 | '''Scheduler for learning rates. 13 | 14 | Attributes: 15 | d (OrderedDict): dictionary of learning rates and decays, schedules. 16 | 17 | ''' 18 | def __init__(self, verbose=True, **kwargs): 19 | '''Init function of Scheduler. 20 | 21 | kwargs correspond to the model name and their respective schedules. 22 | Currently, each key is the name of a model while each value is the 23 | schedule dictionary. The schedule dictionary should include a learning 24 | rate and can have either a decay rate or a schedule. 25 | 26 | Args: 27 | verbose (bool): sets verbosity. 28 | **kwargs: keyword args of the scheduled learning rates 29 | 30 | ''' 31 | self.d = OrderedDict() 32 | self.verbose = verbose 33 | for k, v in kwargs.iteritems(): 34 | self.d[k] = OrderedDict() 35 | if isinstance(v, float): 36 | self.d[k]['learning_rate'] = v 37 | elif isinstance(v, (dict, OrderedDict)): 38 | def unpack(learning_rate=None, decay_rate=None, schedule=None): 39 | return learning_rate, decay_rate, schedule 40 | 41 | learning_rate, decay_rate, schedule = unpack(**v) 42 | 43 | if learning_rate is None: 44 | raise ValueError('Must includes learning rate for %s' % k) 45 | 46 | if (decay_rate is not None) and (schedule is not None): 47 | raise ValueError('Provide either decay rate OR scheduler OR neither' 48 | ', not both.') 49 | self.d[k]['decay_rate'] = decay_rate 50 | self.d[k]['schedule'] = schedule 51 | self.d[k]['learning_rate'] = learning_rate 52 | 53 | def __getitem__(self, k): 54 | return self.d[k] 55 | 56 | def __call__(self, e): 57 | '''Update the learning rates and return list. 58 | 59 | Args: 60 | e (int): the epoch. 61 | 62 | Returns: 63 | list: list of current learning rates. 64 | 65 | ''' 66 | 67 | for k, v in self.d.iteritems(): 68 | learning_rate, decay_rate, schedule = unpack(**v) 69 | 70 | if decay_rate is None and schedule is None: continue 71 | if decay_rate is not None: 72 | self.d[k]['learning_rate'] *= decay_rate 73 | elif schedule is not None and e in schedule.keys(): 74 | self.d[k]['learning_rate'] = schedule[e] 75 | if self.verbose: 76 | print 'Changing learning rate for %s to %.5f' % (k, self.d[k]['learning_rate']) 77 | 78 | return [v['learning_rate'] for v in self.d.values()] 79 | -------------------------------------------------------------------------------- /cortex/utils/logger.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Module for general logger. 3 | ''' 4 | 5 | import logging 6 | 7 | loggers = {} 8 | 9 | def setup_custom_logger(name, level): 10 | global loggers 11 | 12 | logger = loggers.get(name, None) 13 | if logger is not None: 14 | logger.setLevel(level) 15 | return logger 16 | formatter = logging.Formatter(fmt='%(asctime)s:%(levelname)s:' 17 | '%(module)s:%(message)s') 18 | 19 | handler = logging.StreamHandler() 20 | handler.setFormatter(formatter) 21 | 22 | logger = logging.getLogger(name) 23 | logger.setLevel(level) 24 | logger.addHandler(handler) 25 | loggers[name] = logger 26 | return logger -------------------------------------------------------------------------------- /cortex/utils/monitor.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Module for monitor class. 3 | ''' 4 | import matplotlib 5 | matplotlib.use('Agg') 6 | from matplotlib import pylab as plt 7 | from collections import OrderedDict 8 | import cPickle as pkl 9 | import numpy as np 10 | import os 11 | import pprint 12 | import signal 13 | import time 14 | 15 | from tools import check_bad_nums 16 | from tools import update_dict_of_lists 17 | 18 | 19 | class SimpleMonitor(object): 20 | '''Simple monitor for displaying and saving results. 21 | 22 | Basic template monitor. Should be interchangeable in training for 23 | customized versions. 24 | 25 | Attributes: 26 | d: OrderedDict: dictionary of results. 27 | d_valid: OrderedDict: dictionary of results for validation. 28 | ''' 29 | def __init__(self, *args): 30 | self.d = OrderedDict() 31 | self.d_valid = OrderedDict() 32 | 33 | def update(self, **kwargs): 34 | update_dict_of_lists(self.d, **kwargs) 35 | 36 | def update_valid(self, **kwargs): 37 | update_dict_of_lists(self.d_valid, **kwargs) 38 | 39 | def add(self, **kwargs): 40 | for k, v in kwargs.iteritems(): 41 | self.d[k] = v 42 | 43 | def simple_display(self, d): 44 | length = len('\t' ) + max(len(k) for k in d.keys()) 45 | for k, vs in d.iteritems(): 46 | s = '\t%s' % k 47 | s += ' ' * (length - len(s)) 48 | s += ' |\t%.4f' % vs 49 | print s 50 | 51 | def display(self): 52 | '''Displays the stats. 53 | 54 | This uses some basic heuristics to get stats into rows with validation 55 | (if exists) as well as difference from last step. 56 | ''' 57 | d = OrderedDict() 58 | for k in sorted(self.d): 59 | if not k.startswith('d_'): 60 | d[k] = [self.d[k][-1]] 61 | if k in self.d_valid.keys(): 62 | d[k].append(self.d_valid[k][-1]) 63 | if len(self.d_valid[k]) > 1: 64 | d[k].append(self.d_valid[k][-1] - self.d_valid[k][-2]) 65 | else: 66 | d[k].append(None) 67 | 68 | length = len('\t' ) + max(len(k) for k in d.keys()) + len(' (train / valid) |') 69 | for k, vs in d.iteritems(): 70 | s = '\t%s' % k 71 | if len(vs) > 1 and vs[1] is not None: 72 | s += ' (train / valid)' 73 | s += ' ' * (length - len(s)) 74 | s += ' |\t%.4f' % vs[0] 75 | if len(vs) > 1 and vs[1] is not None: 76 | s += ' / %.4f ' % vs[1] 77 | if len(vs) > 2: 78 | s += '\t' + unichr(0x394).encode('utf-8') + '=%.4f' % vs[2] 79 | print s 80 | 81 | def save(self, out_path): 82 | '''Saves a figure for the monitor 83 | 84 | Args: 85 | out_path: str 86 | ''' 87 | 88 | plt.clf() 89 | np.set_printoptions(precision=4) 90 | font = { 91 | 'size': 7 92 | } 93 | matplotlib.rc('font', **font) 94 | y = 2 95 | x = ((len(self.d) - 1) // y) + 1 96 | fig, axes = plt.subplots(y, x) 97 | fig.set_size_inches(20, 8) 98 | 99 | for j, (k, v) in enumerate(self.d.iteritems()): 100 | ax = axes[j // x, j % x] 101 | ax.plot(v, label=k) 102 | if k in self.d_valid.keys(): 103 | ax.plot(self.d_valid[k], label=k + '(valid)') 104 | ax.set_title(k) 105 | ax.legend() 106 | 107 | plt.tight_layout() 108 | plt.savefig(out_path, facecolor=(1, 1, 1)) 109 | plt.close() 110 | 111 | def save_stats(self, out_path): 112 | '''Saves the monitor dictionary. 113 | 114 | Args: 115 | out_path: str 116 | ''' 117 | 118 | np.savez(out_path, **self.d) 119 | 120 | def save_stats_valid(self, out_path): 121 | '''Saves the valid monitor dictionary. 122 | 123 | Args: 124 | out_path: str 125 | ''' 126 | np.savez(out_path, **self.d_valid) 127 | -------------------------------------------------------------------------------- /cortex/utils/preprocessor.py: -------------------------------------------------------------------------------- 1 | '''Convenience class for preprocessing data. 2 | 3 | Not meant to be general, but feel free to subclass if it's useful 4 | ''' 5 | 6 | from collections import OrderedDict 7 | import theano 8 | 9 | from cortex.utils import floatX 10 | 11 | 12 | class Preprocessor(object): 13 | '''Preprocessor class. 14 | 15 | Attributes: 16 | processes: OrderedDict, preprocessing steps in order. 17 | ''' 18 | 19 | keys = ['center'] 20 | keyvals = [] 21 | 22 | def __init__(self, proc_list): 23 | '''Init method. 24 | 25 | Args: 26 | proc_list: list. 27 | ''' 28 | self.processes = OrderedDict() 29 | for proc in proc_list: 30 | if not isinstance(proc, list): 31 | if proc not in self.keys: 32 | raise ValueError('Processing step, %s, not supported' % proc) 33 | self.processes[proc] = True 34 | else: 35 | assert len(proc) == 2 36 | if proc[0] not in self.keyvals: 37 | raise ValueError('Processing step, %s, not supported' % proc[0]) 38 | self.processes[proc[0]] = proc[1] 39 | 40 | def center(self, X, data_iter=None): 41 | '''Center input.''' 42 | assert data_iter is not None 43 | print 'Centering input with {mode} dataset mean image'.format(mode=data_iter.mode) 44 | X_mean = theano.shared(data_iter.mean_image.astype(floatX), name='X_mean') 45 | X_i = X - X_mean 46 | return X_i 47 | 48 | def __call__(self, X, data_iter=None): 49 | '''Run preprocessing.''' 50 | for process in self.processes: 51 | if process == 'center': 52 | X = self.center(X, data_iter=data_iter) 53 | return X 54 | -------------------------------------------------------------------------------- /cortex/utils/vis_utils.py: -------------------------------------------------------------------------------- 1 | ''' This file contains different utility functions that are not connected 2 | in anyway to the networks presented in the tutorials, but rather help in 3 | processing the outputs into a more understandable way. 4 | For example ``tile_raster_images`` helps in generating a easy to grasp 5 | image from a set of samples or weights. 6 | ''' 7 | 8 | 9 | import numpy 10 | 11 | 12 | def scale_to_unit_interval(ndar, eps=1e-8): 13 | ''' Scales all values in the ndarray ndar to be between 0 and 1 ''' 14 | ndar = ndar.copy() 15 | ndar -= ndar.min() 16 | ndar *= 1.0 / (ndar.max() + eps) 17 | return ndar 18 | 19 | 20 | def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0), 21 | scale_rows_to_unit_interval=True, 22 | output_pixel_vals=True): 23 | ''' 24 | Transform an array with one flattened image per row, into an array in 25 | which images are reshaped and layed out like tiles on a floor. 26 | This function is useful for visualizing datasets whose rows are images, 27 | and also columns of matrices for transforming those rows 28 | (such as the first layer of a neural net). 29 | :type X: a 2-D ndarray or a tuple of 4 channels, elements of which can 30 | be 2-D ndarrays or None; 31 | :param X: a 2-D array in which every row is a flattened image. 32 | :type img_shape: tuple; (height, width) 33 | :param img_shape: the original shape of each image 34 | :type tile_shape: tuple; (rows, cols) 35 | :param tile_shape: the number of images to tile (rows, cols) 36 | :param output_pixel_vals: if output should be pixel values (i.e. int8 37 | values) or floats 38 | :param scale_rows_to_unit_interval: if the values need to be scaled before 39 | being plotted to [0,1] or not 40 | :returns: array suitable for viewing as an image. 41 | (See:`PIL.Image.fromarray`.) 42 | :rtype: a 2-d array with same dtype as X. 43 | ''' 44 | 45 | assert len(img_shape) == 2 46 | assert len(tile_shape) == 2 47 | assert len(tile_spacing) == 2 48 | 49 | # The expression below can be re-written in a more C style as 50 | # follows : 51 | # 52 | # out_shape = [0,0] 53 | # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] - 54 | # tile_spacing[0] 55 | # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] - 56 | # tile_spacing[1] 57 | out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp 58 | in zip(img_shape, tile_shape, tile_spacing)] 59 | 60 | if isinstance(X, tuple): 61 | assert len(X) == 4 62 | # Create an output numpy ndarray to store the image 63 | if output_pixel_vals: 64 | out_array = numpy.zeros((out_shape[0], out_shape[1], 4), 65 | dtype='uint8') 66 | else: 67 | out_array = numpy.zeros((out_shape[0], out_shape[1], 4), 68 | dtype=X.dtype) 69 | 70 | #colors default to 0, alpha defaults to 1 (opaque) 71 | if output_pixel_vals: 72 | channel_defaults = [0, 0, 0, 255] 73 | else: 74 | channel_defaults = [0., 0., 0., 1.] 75 | 76 | for i in xrange(4): 77 | if X[i] is None: 78 | # if channel is None, fill it with zeros of the correct 79 | # dtype 80 | dt = out_array.dtype 81 | if output_pixel_vals: 82 | dt = 'uint8' 83 | out_array[:, :, i] = numpy.zeros(out_shape, 84 | dtype=dt) + channel_defaults[i] 85 | else: 86 | # use a recurrent call to compute the channel and store it 87 | # in the output 88 | out_array[:, :, i] = tile_raster_images( 89 | X[i], img_shape, tile_shape, tile_spacing, 90 | scale_rows_to_unit_interval, output_pixel_vals) 91 | return out_array 92 | 93 | else: 94 | # if we are dealing with only one channel 95 | H, W = img_shape 96 | Hs, Ws = tile_spacing 97 | 98 | # generate a matrix to store the output 99 | dt = X.dtype 100 | if output_pixel_vals: 101 | dt = 'uint8' 102 | out_array = numpy.zeros(out_shape, dtype=dt) 103 | 104 | for tile_row in xrange(tile_shape[0]): 105 | for tile_col in xrange(tile_shape[1]): 106 | if tile_row * tile_shape[1] + tile_col < X.shape[0]: 107 | this_x = X[tile_row * tile_shape[1] + tile_col] 108 | if scale_rows_to_unit_interval: 109 | # if we should scale values to be between 0 and 1 110 | # do this by calling the `scale_to_unit_interval` 111 | # function 112 | this_img = scale_to_unit_interval( 113 | this_x.reshape(img_shape)) 114 | else: 115 | this_img = this_x.reshape(img_shape) 116 | # add the slice to the corresponding position in the 117 | # output array 118 | c = 1 119 | if output_pixel_vals: 120 | c = 255 121 | out_array[ 122 | tile_row * (H + Hs): tile_row * (H + Hs) + H, 123 | tile_col * (W + Ws): tile_col * (W + Ws) + W 124 | ] = this_img * c 125 | return out_array -------------------------------------------------------------------------------- /cortex/utils/viz_h/tsne.py: -------------------------------------------------------------------------------- 1 | # 2 | # tsne.py 3 | # 4 | # Implementation of t-SNE in Python. The implementation was tested on Python 2.5.1, and it requires a working 5 | # installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the 6 | # results of this example, a working installation of matplotlib is required. 7 | # The example can be run by executing: ipython tsne.py -pylab 8 | # 9 | # 10 | # Created by Laurens van der Maaten on 20-12-08. 11 | # Copyright (c) 2008 Tilburg University. All rights reserved. 12 | 13 | import numpy as Math 14 | import pylab as Plot 15 | 16 | def Hbeta(D = Math.array([]), beta = 1.0): 17 | """Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution.""" 18 | 19 | # Compute P-row and corresponding perplexity 20 | P = Math.exp(-D.copy() * beta); 21 | sumP = sum(P); 22 | H = Math.log(sumP) + beta * Math.sum(D * P) / sumP; 23 | P = P / sumP; 24 | return H, P; 25 | 26 | 27 | def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0): 28 | """Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity.""" 29 | 30 | # Initialize some variables 31 | print "Computing pairwise distances..." 32 | (n, d) = X.shape; 33 | sum_X = Math.sum(Math.square(X), 1); 34 | D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X); 35 | P = Math.zeros((n, n)); 36 | beta = Math.ones((n, 1)); 37 | logU = Math.log(perplexity); 38 | 39 | # Loop over all datapoints 40 | for i in range(n): 41 | 42 | # Print progress 43 | if i % 500 == 0: 44 | print "Computing P-values for point ", i, " of ", n, "..." 45 | 46 | # Compute the Gaussian kernel and entropy for the current precision 47 | betamin = -Math.inf; 48 | betamax = Math.inf; 49 | Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))]; 50 | (H, thisP) = Hbeta(Di, beta[i]); 51 | 52 | # Evaluate whether the perplexity is within tolerance 53 | Hdiff = H - logU; 54 | tries = 0; 55 | while Math.abs(Hdiff) > tol and tries < 50: 56 | 57 | # If not, increase or decrease precision 58 | if Hdiff > 0: 59 | betamin = beta[i].copy(); 60 | if betamax == Math.inf or betamax == -Math.inf: 61 | beta[i] = beta[i] * 2; 62 | else: 63 | beta[i] = (beta[i] + betamax) / 2; 64 | else: 65 | betamax = beta[i].copy(); 66 | if betamin == Math.inf or betamin == -Math.inf: 67 | beta[i] = beta[i] / 2; 68 | else: 69 | beta[i] = (beta[i] + betamin) / 2; 70 | 71 | # Recompute the values 72 | (H, thisP) = Hbeta(Di, beta[i]); 73 | Hdiff = H - logU; 74 | tries = tries + 1; 75 | 76 | # Set the final row of P 77 | P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP; 78 | 79 | # Return final P-matrix 80 | print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta)) 81 | return P; 82 | 83 | 84 | def pca(X = Math.array([]), no_dims = 50): 85 | """Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions.""" 86 | 87 | print "Preprocessing the data using PCA..." 88 | (n, d) = X.shape; 89 | X = X - Math.tile(Math.mean(X, 0), (n, 1)); 90 | (l, M) = Math.linalg.eig(Math.dot(X.T, X)); 91 | Y = Math.dot(X, M[:,0:no_dims]); 92 | return Y; 93 | 94 | 95 | def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0): 96 | """Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions. 97 | The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array.""" 98 | 99 | # Check inputs 100 | if X.dtype != "float64": 101 | print "Error: array X should have type float64."; 102 | return -1; 103 | #if no_dims.__class__ != "": # doesn't work yet! 104 | # print "Error: number of dimensions should be an integer."; 105 | # return -1; 106 | 107 | # Initialize variables 108 | X = pca(X, initial_dims).real; 109 | (n, d) = X.shape; 110 | max_iter = 1000; 111 | initial_momentum = 0.5; 112 | final_momentum = 0.8; 113 | eta = 500; 114 | min_gain = 0.01; 115 | Y = Math.random.randn(n, no_dims); 116 | dY = Math.zeros((n, no_dims)); 117 | iY = Math.zeros((n, no_dims)); 118 | gains = Math.ones((n, no_dims)); 119 | 120 | # Compute P-values 121 | P = x2p(X, 1e-5, perplexity); 122 | P = P + Math.transpose(P); 123 | P = P / Math.sum(P); 124 | P = P * 4; # early exaggeration 125 | P = Math.maximum(P, 1e-12); 126 | 127 | # Run iterations 128 | for iter in range(max_iter): 129 | 130 | # Compute pairwise affinities 131 | sum_Y = Math.sum(Math.square(Y), 1); 132 | num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y)); 133 | num[range(n), range(n)] = 0; 134 | Q = num / Math.sum(num); 135 | Q = Math.maximum(Q, 1e-12); 136 | 137 | # Compute gradient 138 | PQ = P - Q; 139 | for i in range(n): 140 | dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0); 141 | 142 | # Perform the update 143 | if iter < 20: 144 | momentum = initial_momentum 145 | else: 146 | momentum = final_momentum 147 | gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0)); 148 | gains[gains < min_gain] = min_gain; 149 | iY = momentum * iY - eta * (gains * dY); 150 | Y = Y + iY; 151 | Y = Y - Math.tile(Math.mean(Y, 0), (n, 1)); 152 | 153 | # Compute current value of cost function 154 | if (iter + 1) % 10 == 0: 155 | C = Math.sum(P * Math.log(P / Q)); 156 | print "Iteration ", (iter + 1), ": error is ", C 157 | 158 | # Stop lying about P-values 159 | if iter == 100: 160 | P = P / 4; 161 | 162 | # Return solution 163 | return Y; 164 | 165 | 166 | if __name__ == "__main__": 167 | print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset." 168 | print "Running example on 2,500 MNIST digits..." 169 | X = Math.loadtxt("mnist2500_X.txt"); 170 | labels = Math.loadtxt("mnist2500_labels.txt"); 171 | Y = tsne(X, 2, 50, 20.0); 172 | Plot.scatter(Y[:,0], Y[:,1], 20, labels); 173 | -------------------------------------------------------------------------------- /doc/_build/doctrees/cortex.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/cortex.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/demos.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/demos.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/environment.pickle -------------------------------------------------------------------------------- /doc/_build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/index.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/modules.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/modules.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/setup.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/setup.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.analysis.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.analysis.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.analysis.mri.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.analysis.mri.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.datasets.basic.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.datasets.basic.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.datasets.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.datasets.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.datasets.neuroimaging.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.datasets.neuroimaging.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.demos.demos_basic.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.demos.demos_basic.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.demos.demos_neuroimaging.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.demos.demos_neuroimaging.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.demos.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.demos.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.inference.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.inference.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.models.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.models.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.models.tests.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.models.tests.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/cortex.utils.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/cortex.utils.doctree -------------------------------------------------------------------------------- /doc/_build/doctrees/source/modules.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/doctrees/source/modules.doctree -------------------------------------------------------------------------------- /doc/_build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 6f405fb2ff5b136821974bddf996edbd 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/cortex.txt: -------------------------------------------------------------------------------- 1 | cortex package 2 | ============== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.analysis 10 | cortex.datasets 11 | cortex.demos 12 | cortex.inference 13 | cortex.models 14 | cortex.utils 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: cortex 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/demos.txt: -------------------------------------------------------------------------------- 1 | Demos 2 | ===== 3 | 4 | Cortex has several command-line demos of functionality. 5 | If the ``basic`` dataset was installed using ``cortex-setup``, then the 6 | following demos are available: 7 | 8 | .. code-block:: bash 9 | 10 | $ cortex-classifier-demo 11 | 12 | $ cortex-rbm-demo 13 | 14 | $ cortex-vae-demo 15 | 16 | If you installed the neuroimaging data, then the neuroimaging demos can be run: 17 | 18 | .. code-block:: bash 19 | 20 | $ cortex-rbm-vbm-demo 21 | 22 | $ cortex-rbm-olin-demo 23 | 24 | These are partial datasets used for demo purposes only. 25 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/index.txt: -------------------------------------------------------------------------------- 1 | Cortex documentation 2 | ================================= 3 | Cortex is a framework for training and evaluating neural networks using Theano. 4 | Cortex is not specific to, but includes tools for neuroimaging. Cortex is not 5 | meant to replace Theano, but is intended to be used as a compliment to scripting 6 | in python. It is very customizable, as all methods and classes are suggested 7 | templates, and pure Theano can be used when needed. 8 | 9 | .. warning:: 10 | Cortex is a brand-new project and is under rapid development. If you encounter 11 | any bugs or have any feature requests, please `email`_ or 12 | `create a GitHub issue`_. 13 | 14 | .. _email: erroneus@gmail.com 15 | .. _create a GitHub issue: https://github.com/dhjelm/cortex/issues/new 16 | 17 | .. _tutorials: 18 | 19 | Tutorials 20 | --------- 21 | .. toctree:: 22 | :maxdepth: 1 23 | 24 | setup 25 | demos 26 | 27 | Features 28 | -------- 29 | 30 | Currently Cortex supports the following models: 31 | 32 | * Feed forward networks 33 | * RBMs 34 | * RNNs, GRUs, and LSTMs 35 | * Helmholtz machines as well as variational inference methods 36 | * Common datasets, such as MNIST and Caltech silhoettes 37 | * Neuroimaging datasets, such as MRI 38 | 39 | Indices and tables 40 | ================== 41 | * :ref:`genindex` 42 | * :ref:`modindex` 43 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/modules.txt: -------------------------------------------------------------------------------- 1 | cortex 2 | ====== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | cortex 8 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/setup.txt: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | You can install Cortex using the Python package manager ``pip``. 5 | 6 | .. code-block:: bash 7 | 8 | $ pip install cortex 9 | 10 | To get the most up-to-date version, you can install from the ``git`` repository: 11 | 12 | .. code-block:: bash 13 | 14 | $ pip install git+git://github.com/rdevon/cortex.git 15 | 16 | However, currently the demos give the best example of how to script using cortex. 17 | So, if this is your first time using cortex, it is recommended to clone from the github repository: 18 | 19 | .. code-block:: bash 20 | 21 | $ git clone https://github.com/rdevon/cortex.git 22 | $ cd cortex 23 | $ python setup.py install 24 | 25 | If you don't have administrative rights, add the ``--user`` switch to the 26 | install commands to install the packages in your home folder. If you want to 27 | update Cortex, simply repeat the first command with the ``--upgrade`` switch 28 | added to pull the latest version from GitHub. 29 | 30 | In either case, you need to run the setup script: 31 | 32 | .. code-block:: bash 33 | 34 | $ cortex-setup 35 | 36 | Follow the instructions; you will be asked to specify default data and out 37 | directories. These are necessary only for the demos, and can be customized in your 38 | ~/.cortexrc file. 39 | 40 | Basic Requirements 41 | __________________ 42 | 43 | .. _PyYAML: http://pyyaml.org/wiki/PyYAML 44 | .. _Theano: http://deeplearning.net/software/theano/ 45 | 46 | * Theano_ 47 | * PyYAML_ 48 | 49 | Neuroimaging Requirements 50 | _________________________ 51 | 52 | .. note:: 53 | 54 | .. _h5py: http://www.h5py.org/ 55 | .. _nipy: http://nipy.org/ 56 | .. _afni: http://afni.nimh.nih.gov 57 | .. _nibabel: http://http://nipy.org/nibabel/ 58 | .. _sklearn: http://scikit-learn.org/stable/ 59 | 60 | These are not required for basic functionality, but are necessary for 61 | neuroimaging tools. `afni`_, in particular, needs to be installed manually. 62 | 63 | * nipy_ 64 | * h5py_ 65 | * afni_ 66 | * nibabel_ 67 | * sklearn_ 68 | 69 | Documentation 70 | ------------- 71 | 72 | If you want to build a local copy of the documentation, follow the instructions 73 | at the :doc:`documentation development guidelines `. -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.analysis.mri.txt: -------------------------------------------------------------------------------- 1 | cortex.analysis.mri package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.analysis.mri.rois module 8 | ------------------------------- 9 | 10 | .. automodule:: cortex.analysis.mri.rois 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: cortex.analysis.mri 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.analysis.txt: -------------------------------------------------------------------------------- 1 | cortex.analysis package 2 | ======================= 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.analysis.mri 10 | 11 | Submodules 12 | ---------- 13 | 14 | cortex.analysis.load_mri module 15 | ------------------------------- 16 | 17 | .. automodule:: cortex.analysis.load_mri 18 | :members: 19 | :undoc-members: 20 | :show-inheritance: 21 | 22 | cortex.analysis.read_fmri module 23 | -------------------------------- 24 | 25 | .. automodule:: cortex.analysis.read_fmri 26 | :members: 27 | :undoc-members: 28 | :show-inheritance: 29 | 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: cortex.analysis 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.datasets.basic.txt: -------------------------------------------------------------------------------- 1 | cortex.datasets.basic package 2 | ============================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.datasets.basic.caltech module 8 | ------------------------------------ 9 | 10 | .. automodule:: cortex.datasets.basic.caltech 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.datasets.basic.cifar module 16 | ---------------------------------- 17 | 18 | .. automodule:: cortex.datasets.basic.cifar 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.datasets.basic.euclidean module 24 | -------------------------------------- 25 | 26 | .. automodule:: cortex.datasets.basic.euclidean 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.datasets.basic.horses module 32 | ----------------------------------- 33 | 34 | .. automodule:: cortex.datasets.basic.horses 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cortex.datasets.basic.mnist module 40 | ---------------------------------- 41 | 42 | .. automodule:: cortex.datasets.basic.mnist 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | cortex.datasets.basic.uci module 48 | -------------------------------- 49 | 50 | .. automodule:: cortex.datasets.basic.uci 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | 56 | Module contents 57 | --------------- 58 | 59 | .. automodule:: cortex.datasets.basic 60 | :members: 61 | :undoc-members: 62 | :show-inheritance: 63 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.datasets.neuroimaging.txt: -------------------------------------------------------------------------------- 1 | cortex.datasets.neuroimaging package 2 | ==================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.datasets.neuroimaging.fmri module 8 | ---------------------------------------- 9 | 10 | .. automodule:: cortex.datasets.neuroimaging.fmri 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.datasets.neuroimaging.mri module 16 | --------------------------------------- 17 | 18 | .. automodule:: cortex.datasets.neuroimaging.mri 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.datasets.neuroimaging.nifti_viewer module 24 | ------------------------------------------------ 25 | 26 | .. automodule:: cortex.datasets.neuroimaging.nifti_viewer 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.datasets.neuroimaging.simTB module 32 | ----------------------------------------- 33 | 34 | .. automodule:: cortex.datasets.neuroimaging.simTB 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cortex.datasets.neuroimaging.snp module 40 | --------------------------------------- 41 | 42 | .. automodule:: cortex.datasets.neuroimaging.snp 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: cortex.datasets.neuroimaging 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.datasets.txt: -------------------------------------------------------------------------------- 1 | cortex.datasets package 2 | ======================= 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.datasets.basic 10 | cortex.datasets.neuroimaging 11 | 12 | Module contents 13 | --------------- 14 | 15 | .. automodule:: cortex.datasets 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.demos.demos_basic.txt: -------------------------------------------------------------------------------- 1 | cortex.demos.demos_basic package 2 | ================================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.demos.demos_basic.classifier module 8 | ------------------------------------------ 9 | 10 | .. automodule:: cortex.demos.demos_basic.classifier 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.demos.demos_basic.eval_rbm module 16 | ---------------------------------------- 17 | 18 | .. automodule:: cortex.demos.demos_basic.eval_rbm 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.demos.demos_basic.rbm_mnist module 24 | ----------------------------------------- 25 | 26 | .. automodule:: cortex.demos.demos_basic.rbm_mnist 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.demos.demos_basic.vae module 32 | ----------------------------------- 33 | 34 | .. automodule:: cortex.demos.demos_basic.vae 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: cortex.demos.demos_basic 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.demos.demos_neuroimaging.txt: -------------------------------------------------------------------------------- 1 | cortex.demos.demos_neuroimaging package 2 | ======================================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.demos.demos_neuroimaging.rbm_ni module 8 | --------------------------------------------- 9 | 10 | .. automodule:: cortex.demos.demos_neuroimaging.rbm_ni 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.demos.demos_neuroimaging.vae module 16 | ------------------------------------------ 17 | 18 | .. automodule:: cortex.demos.demos_neuroimaging.vae 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: cortex.demos.demos_neuroimaging 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.demos.txt: -------------------------------------------------------------------------------- 1 | cortex.demos package 2 | ==================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.demos.demos_basic 10 | cortex.demos.demos_neuroimaging 11 | 12 | Module contents 13 | --------------- 14 | 15 | .. automodule:: cortex.demos 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.inference.txt: -------------------------------------------------------------------------------- 1 | cortex.inference package 2 | ======================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.inference.air module 8 | --------------------------- 9 | 10 | .. automodule:: cortex.inference.air 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.inference.gdir module 16 | ---------------------------- 17 | 18 | .. automodule:: cortex.inference.gdir 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.inference.irvi module 24 | ---------------------------- 25 | 26 | .. automodule:: cortex.inference.irvi 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.inference.rws module 32 | --------------------------- 33 | 34 | .. automodule:: cortex.inference.rws 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: cortex.inference 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.models.tests.txt: -------------------------------------------------------------------------------- 1 | cortex.models.tests package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.models.tests.test_darn module 8 | ------------------------------------ 9 | 10 | .. automodule:: cortex.models.tests.test_darn 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.models.tests.test_mlp module 16 | ----------------------------------- 17 | 18 | .. automodule:: cortex.models.tests.test_mlp 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.models.tests.test_rbm module 24 | ----------------------------------- 25 | 26 | .. automodule:: cortex.models.tests.test_rbm 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.models.tests.test_rnn module 32 | ----------------------------------- 33 | 34 | .. automodule:: cortex.models.tests.test_rnn 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cortex.models.tests.test_vae module 40 | ----------------------------------- 41 | 42 | .. automodule:: cortex.models.tests.test_vae 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: cortex.models.tests 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.models.txt: -------------------------------------------------------------------------------- 1 | cortex.models package 2 | ===================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.models.tests 10 | 11 | Submodules 12 | ---------- 13 | 14 | cortex.models.darn module 15 | ------------------------- 16 | 17 | .. automodule:: cortex.models.darn 18 | :members: 19 | :undoc-members: 20 | :show-inheritance: 21 | 22 | cortex.models.deep_helmholtz module 23 | ----------------------------------- 24 | 25 | .. automodule:: cortex.models.deep_helmholtz 26 | :members: 27 | :undoc-members: 28 | :show-inheritance: 29 | 30 | cortex.models.distributions module 31 | ---------------------------------- 32 | 33 | .. automodule:: cortex.models.distributions 34 | :members: 35 | :undoc-members: 36 | :show-inheritance: 37 | 38 | cortex.models.gru module 39 | ------------------------ 40 | 41 | .. automodule:: cortex.models.gru 42 | :members: 43 | :undoc-members: 44 | :show-inheritance: 45 | 46 | cortex.models.helmholtz module 47 | ------------------------------ 48 | 49 | .. automodule:: cortex.models.helmholtz 50 | :members: 51 | :undoc-members: 52 | :show-inheritance: 53 | 54 | cortex.models.layers module 55 | --------------------------- 56 | 57 | .. automodule:: cortex.models.layers 58 | :members: 59 | :undoc-members: 60 | :show-inheritance: 61 | 62 | cortex.models.lstm module 63 | ------------------------- 64 | 65 | .. automodule:: cortex.models.lstm 66 | :members: 67 | :undoc-members: 68 | :show-inheritance: 69 | 70 | cortex.models.mlp module 71 | ------------------------ 72 | 73 | .. automodule:: cortex.models.mlp 74 | :members: 75 | :undoc-members: 76 | :show-inheritance: 77 | 78 | cortex.models.rbm module 79 | ------------------------ 80 | 81 | .. automodule:: cortex.models.rbm 82 | :members: 83 | :undoc-members: 84 | :show-inheritance: 85 | 86 | cortex.models.rnn module 87 | ------------------------ 88 | 89 | .. automodule:: cortex.models.rnn 90 | :members: 91 | :undoc-members: 92 | :show-inheritance: 93 | 94 | 95 | Module contents 96 | --------------- 97 | 98 | .. automodule:: cortex.models 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.txt: -------------------------------------------------------------------------------- 1 | cortex package 2 | ============== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.analysis 10 | cortex.datasets 11 | cortex.demos 12 | cortex.inference 13 | cortex.models 14 | cortex.utils 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: cortex 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/cortex.utils.txt: -------------------------------------------------------------------------------- 1 | cortex.utils package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.utils.extra module 8 | ------------------------- 9 | 10 | .. automodule:: cortex.utils.extra 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.utils.learning_scheduler module 16 | -------------------------------------- 17 | 18 | .. automodule:: cortex.utils.learning_scheduler 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.utils.logger module 24 | -------------------------- 25 | 26 | .. automodule:: cortex.utils.logger 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.utils.monitor module 32 | --------------------------- 33 | 34 | .. automodule:: cortex.utils.monitor 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cortex.utils.op module 40 | ---------------------- 41 | 42 | .. automodule:: cortex.utils.op 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | cortex.utils.preprocessor module 48 | -------------------------------- 49 | 50 | .. automodule:: cortex.utils.preprocessor 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | cortex.utils.tools module 56 | ------------------------- 57 | 58 | .. automodule:: cortex.utils.tools 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | cortex.utils.training module 64 | ---------------------------- 65 | 66 | .. automodule:: cortex.utils.training 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | cortex.utils.vis_utils module 72 | ----------------------------- 73 | 74 | .. automodule:: cortex.utils.vis_utils 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | 80 | Module contents 81 | --------------- 82 | 83 | .. automodule:: cortex.utils 84 | :members: 85 | :undoc-members: 86 | :show-inheritance: 87 | -------------------------------------------------------------------------------- /doc/_build/html/_sources/source/modules.txt: -------------------------------------------------------------------------------- 1 | cortex 2 | ====== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | cortex 8 | -------------------------------------------------------------------------------- /doc/_build/html/_static/ajax-loader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/_static/ajax-loader.gif -------------------------------------------------------------------------------- /doc/_build/html/_static/classic.css: -------------------------------------------------------------------------------- 1 | /* 2 | * default.css_t 3 | * ~~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- default theme. 6 | * 7 | * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | @import url("basic.css"); 13 | 14 | /* -- page layout ----------------------------------------------------------- */ 15 | 16 | body { 17 | font-family: sans-serif; 18 | font-size: 100%; 19 | background-color: #11303d; 20 | color: #000; 21 | margin: 0; 22 | padding: 0; 23 | } 24 | 25 | div.document { 26 | background-color: #1c4e63; 27 | } 28 | 29 | div.documentwrapper { 30 | float: left; 31 | width: 100%; 32 | } 33 | 34 | div.bodywrapper { 35 | margin: 0 0 0 230px; 36 | } 37 | 38 | div.body { 39 | background-color: #ffffff; 40 | color: #000000; 41 | padding: 0 20px 30px 20px; 42 | } 43 | 44 | div.footer { 45 | color: #ffffff; 46 | width: 100%; 47 | padding: 9px 0 9px 0; 48 | text-align: center; 49 | font-size: 75%; 50 | } 51 | 52 | div.footer a { 53 | color: #ffffff; 54 | text-decoration: underline; 55 | } 56 | 57 | div.related { 58 | background-color: #133f52; 59 | line-height: 30px; 60 | color: #ffffff; 61 | } 62 | 63 | div.related a { 64 | color: #ffffff; 65 | } 66 | 67 | div.sphinxsidebar { 68 | } 69 | 70 | div.sphinxsidebar h3 { 71 | font-family: 'Trebuchet MS', sans-serif; 72 | color: #ffffff; 73 | font-size: 1.4em; 74 | font-weight: normal; 75 | margin: 0; 76 | padding: 0; 77 | } 78 | 79 | div.sphinxsidebar h3 a { 80 | color: #ffffff; 81 | } 82 | 83 | div.sphinxsidebar h4 { 84 | font-family: 'Trebuchet MS', sans-serif; 85 | color: #ffffff; 86 | font-size: 1.3em; 87 | font-weight: normal; 88 | margin: 5px 0 0 0; 89 | padding: 0; 90 | } 91 | 92 | div.sphinxsidebar p { 93 | color: #ffffff; 94 | } 95 | 96 | div.sphinxsidebar p.topless { 97 | margin: 5px 10px 10px 10px; 98 | } 99 | 100 | div.sphinxsidebar ul { 101 | margin: 10px; 102 | padding: 0; 103 | color: #ffffff; 104 | } 105 | 106 | div.sphinxsidebar a { 107 | color: #98dbcc; 108 | } 109 | 110 | div.sphinxsidebar input { 111 | border: 1px solid #98dbcc; 112 | font-family: sans-serif; 113 | font-size: 1em; 114 | } 115 | 116 | 117 | 118 | /* -- hyperlink styles ------------------------------------------------------ */ 119 | 120 | a { 121 | color: #355f7c; 122 | text-decoration: none; 123 | } 124 | 125 | a:visited { 126 | color: #355f7c; 127 | text-decoration: none; 128 | } 129 | 130 | a:hover { 131 | text-decoration: underline; 132 | } 133 | 134 | 135 | 136 | /* -- body styles ----------------------------------------------------------- */ 137 | 138 | div.body h1, 139 | div.body h2, 140 | div.body h3, 141 | div.body h4, 142 | div.body h5, 143 | div.body h6 { 144 | font-family: 'Trebuchet MS', sans-serif; 145 | background-color: #f2f2f2; 146 | font-weight: normal; 147 | color: #20435c; 148 | border-bottom: 1px solid #ccc; 149 | margin: 20px -20px 10px -20px; 150 | padding: 3px 0 3px 10px; 151 | } 152 | 153 | div.body h1 { margin-top: 0; font-size: 200%; } 154 | div.body h2 { font-size: 160%; } 155 | div.body h3 { font-size: 140%; } 156 | div.body h4 { font-size: 120%; } 157 | div.body h5 { font-size: 110%; } 158 | div.body h6 { font-size: 100%; } 159 | 160 | a.headerlink { 161 | color: #c60f0f; 162 | font-size: 0.8em; 163 | padding: 0 4px 0 4px; 164 | text-decoration: none; 165 | } 166 | 167 | a.headerlink:hover { 168 | background-color: #c60f0f; 169 | color: white; 170 | } 171 | 172 | div.body p, div.body dd, div.body li, div.body blockquote { 173 | text-align: justify; 174 | line-height: 130%; 175 | } 176 | 177 | div.admonition p.admonition-title + p { 178 | display: inline; 179 | } 180 | 181 | div.admonition p { 182 | margin-bottom: 5px; 183 | } 184 | 185 | div.admonition pre { 186 | margin-bottom: 5px; 187 | } 188 | 189 | div.admonition ul, div.admonition ol { 190 | margin-bottom: 5px; 191 | } 192 | 193 | div.note { 194 | background-color: #eee; 195 | border: 1px solid #ccc; 196 | } 197 | 198 | div.seealso { 199 | background-color: #ffc; 200 | border: 1px solid #ff6; 201 | } 202 | 203 | div.topic { 204 | background-color: #eee; 205 | } 206 | 207 | div.warning { 208 | background-color: #ffe4e4; 209 | border: 1px solid #f66; 210 | } 211 | 212 | p.admonition-title { 213 | display: inline; 214 | } 215 | 216 | p.admonition-title:after { 217 | content: ":"; 218 | } 219 | 220 | pre { 221 | padding: 5px; 222 | background-color: #eeffcc; 223 | color: #333333; 224 | line-height: 120%; 225 | border: 1px solid #ac9; 226 | border-left: none; 227 | border-right: none; 228 | } 229 | 230 | code { 231 | background-color: #ecf0f3; 232 | padding: 0 1px 0 1px; 233 | font-size: 0.95em; 234 | } 235 | 236 | th { 237 | background-color: #ede; 238 | } 239 | 240 | .warning code { 241 | background: #efc2c2; 242 | } 243 | 244 | .note code { 245 | background: #d6d6d6; 246 | } 247 | 248 | .viewcode-back { 249 | font-family: sans-serif; 250 | } 251 | 252 | div.viewcode-block:target { 253 | background-color: #f4debf; 254 | border-top: 1px solid #ac9; 255 | border-bottom: 1px solid #ac9; 256 | } 257 | 258 | div.code-block-caption { 259 | color: #efefef; 260 | background-color: #1c4e63; 261 | } -------------------------------------------------------------------------------- /doc/_build/html/_static/comment-bright.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/_static/comment-bright.png -------------------------------------------------------------------------------- /doc/_build/html/_static/comment-close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/_static/comment-close.png -------------------------------------------------------------------------------- /doc/_build/html/_static/comment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/_static/comment.png -------------------------------------------------------------------------------- /doc/_build/html/_static/down-pressed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/_static/down-pressed.png -------------------------------------------------------------------------------- /doc/_build/html/_static/down.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/_static/down.png -------------------------------------------------------------------------------- /doc/_build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/_static/file.png -------------------------------------------------------------------------------- /doc/_build/html/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/_static/minus.png -------------------------------------------------------------------------------- /doc/_build/html/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/_static/plus.png -------------------------------------------------------------------------------- /doc/_build/html/_static/pygments.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #eeffcc; } 3 | .highlight .c { color: #408090; font-style: italic } /* Comment */ 4 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */ 6 | .highlight .o { color: #666666 } /* Operator */ 7 | .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ 8 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ 9 | .highlight .cp { color: #007020 } /* Comment.Preproc */ 10 | .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ 11 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ 12 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ 13 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 14 | .highlight .ge { font-style: italic } /* Generic.Emph */ 15 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 16 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 17 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 18 | .highlight .go { color: #333333 } /* Generic.Output */ 19 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ 20 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 21 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 22 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 23 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ 24 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ 25 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ 26 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */ 27 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ 28 | .highlight .kt { color: #902000 } /* Keyword.Type */ 29 | .highlight .m { color: #208050 } /* Literal.Number */ 30 | .highlight .s { color: #4070a0 } /* Literal.String */ 31 | .highlight .na { color: #4070a0 } /* Name.Attribute */ 32 | .highlight .nb { color: #007020 } /* Name.Builtin */ 33 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ 34 | .highlight .no { color: #60add5 } /* Name.Constant */ 35 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ 36 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ 37 | .highlight .ne { color: #007020 } /* Name.Exception */ 38 | .highlight .nf { color: #06287e } /* Name.Function */ 39 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ 40 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ 41 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ 42 | .highlight .nv { color: #bb60d5 } /* Name.Variable */ 43 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ 44 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 45 | .highlight .mb { color: #208050 } /* Literal.Number.Bin */ 46 | .highlight .mf { color: #208050 } /* Literal.Number.Float */ 47 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */ 48 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */ 49 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */ 50 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ 51 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */ 52 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ 53 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ 54 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ 55 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ 56 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ 57 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */ 58 | .highlight .sr { color: #235388 } /* Literal.String.Regex */ 59 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ 60 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */ 61 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ 62 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ 63 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ 64 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ 65 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /doc/_build/html/_static/sidebar.js: -------------------------------------------------------------------------------- 1 | /* 2 | * sidebar.js 3 | * ~~~~~~~~~~ 4 | * 5 | * This script makes the Sphinx sidebar collapsible. 6 | * 7 | * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds 8 | * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton 9 | * used to collapse and expand the sidebar. 10 | * 11 | * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden 12 | * and the width of the sidebar and the margin-left of the document 13 | * are decreased. When the sidebar is expanded the opposite happens. 14 | * This script saves a per-browser/per-session cookie used to 15 | * remember the position of the sidebar among the pages. 16 | * Once the browser is closed the cookie is deleted and the position 17 | * reset to the default (expanded). 18 | * 19 | * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. 20 | * :license: BSD, see LICENSE for details. 21 | * 22 | */ 23 | 24 | $(function() { 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | // global elements used by the functions. 34 | // the 'sidebarbutton' element is defined as global after its 35 | // creation, in the add_sidebar_button function 36 | var bodywrapper = $('.bodywrapper'); 37 | var sidebar = $('.sphinxsidebar'); 38 | var sidebarwrapper = $('.sphinxsidebarwrapper'); 39 | 40 | // for some reason, the document has no sidebar; do not run into errors 41 | if (!sidebar.length) return; 42 | 43 | // original margin-left of the bodywrapper and width of the sidebar 44 | // with the sidebar expanded 45 | var bw_margin_expanded = bodywrapper.css('margin-left'); 46 | var ssb_width_expanded = sidebar.width(); 47 | 48 | // margin-left of the bodywrapper and width of the sidebar 49 | // with the sidebar collapsed 50 | var bw_margin_collapsed = '.8em'; 51 | var ssb_width_collapsed = '.8em'; 52 | 53 | // colors used by the current theme 54 | var dark_color = $('.related').css('background-color'); 55 | var light_color = $('.document').css('background-color'); 56 | 57 | function sidebar_is_collapsed() { 58 | return sidebarwrapper.is(':not(:visible)'); 59 | } 60 | 61 | function toggle_sidebar() { 62 | if (sidebar_is_collapsed()) 63 | expand_sidebar(); 64 | else 65 | collapse_sidebar(); 66 | } 67 | 68 | function collapse_sidebar() { 69 | sidebarwrapper.hide(); 70 | sidebar.css('width', ssb_width_collapsed); 71 | bodywrapper.css('margin-left', bw_margin_collapsed); 72 | sidebarbutton.css({ 73 | 'margin-left': '0', 74 | 'height': bodywrapper.height() 75 | }); 76 | sidebarbutton.find('span').text('»'); 77 | sidebarbutton.attr('title', _('Expand sidebar')); 78 | document.cookie = 'sidebar=collapsed'; 79 | } 80 | 81 | function expand_sidebar() { 82 | bodywrapper.css('margin-left', bw_margin_expanded); 83 | sidebar.css('width', ssb_width_expanded); 84 | sidebarwrapper.show(); 85 | sidebarbutton.css({ 86 | 'margin-left': ssb_width_expanded-12, 87 | 'height': bodywrapper.height() 88 | }); 89 | sidebarbutton.find('span').text('«'); 90 | sidebarbutton.attr('title', _('Collapse sidebar')); 91 | document.cookie = 'sidebar=expanded'; 92 | } 93 | 94 | function add_sidebar_button() { 95 | sidebarwrapper.css({ 96 | 'float': 'left', 97 | 'margin-right': '0', 98 | 'width': ssb_width_expanded - 28 99 | }); 100 | // create the button 101 | sidebar.append( 102 | '
«
' 103 | ); 104 | var sidebarbutton = $('#sidebarbutton'); 105 | light_color = sidebarbutton.css('background-color'); 106 | // find the height of the viewport to center the '<<' in the page 107 | var viewport_height; 108 | if (window.innerHeight) 109 | viewport_height = window.innerHeight; 110 | else 111 | viewport_height = $(window).height(); 112 | sidebarbutton.find('span').css({ 113 | 'display': 'block', 114 | 'margin-top': (viewport_height - sidebar.position().top - 20) / 2 115 | }); 116 | 117 | sidebarbutton.click(toggle_sidebar); 118 | sidebarbutton.attr('title', _('Collapse sidebar')); 119 | sidebarbutton.css({ 120 | 'color': '#FFFFFF', 121 | 'border-left': '1px solid ' + dark_color, 122 | 'font-size': '1.2em', 123 | 'cursor': 'pointer', 124 | 'height': bodywrapper.height(), 125 | 'padding-top': '1px', 126 | 'margin-left': ssb_width_expanded - 12 127 | }); 128 | 129 | sidebarbutton.hover( 130 | function () { 131 | $(this).css('background-color', dark_color); 132 | }, 133 | function () { 134 | $(this).css('background-color', light_color); 135 | } 136 | ); 137 | } 138 | 139 | function set_position_from_cookie() { 140 | if (!document.cookie) 141 | return; 142 | var items = document.cookie.split(';'); 143 | for(var k=0; k 3 | 4 | 5 | 6 | 7 | 8 | 9 | cortex package — cortex 0.1a documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 42 | 43 |
44 |
45 |
46 |
47 | 48 |
49 |

cortex package

50 |
51 |

Subpackages

52 |
53 |
    54 |
55 |
56 |
57 |
58 |

Module contents

59 |

Setup scripts for Cortex.

60 |
61 |
62 | cortex.main()[source]
63 |
64 | 65 |
66 |
67 | 68 | 69 |
70 |
71 |
72 | 102 |
103 |
104 | 116 | 120 | 121 | -------------------------------------------------------------------------------- /doc/_build/html/demos.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Demos — cortex 0.1a documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 46 | 47 |
48 |
49 |
50 |
51 | 52 |
53 |

Demos

54 |

Cortex has several command-line demos of functionality. 55 | If the basic dataset was installed using cortex-setup, then the 56 | following demos are available:

57 |
$ cortex-classifier-demo
 58 | 
 59 | $ cortex-rbm-demo
 60 | 
 61 | $ cortex-vae-demo
 62 | 
63 |
64 |

If you installed the neuroimaging data, then the neuroimaging demos can be run:

65 |
$ cortex-rbm-vbm-demo
 66 | 
 67 | $ cortex-rbm-olin-demo
 68 | 
69 |
70 |

These are partial datasets used for demo purposes only.

71 |
72 | 73 | 74 |
75 |
76 |
77 | 101 |
102 |
103 | 118 | 122 | 123 | -------------------------------------------------------------------------------- /doc/_build/html/modules.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | cortex — cortex 0.1a documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 42 | 43 |
44 |
45 |
46 |
47 | 48 |
49 |

cortex

50 |
51 | 60 |
61 |
62 | 63 | 64 |
65 |
66 |
67 | 88 |
89 |
90 | 102 | 106 | 107 | -------------------------------------------------------------------------------- /doc/_build/html/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rdevon/cortex_old/e53b54e73c50cabe072ab91ab368b328abadb338/doc/_build/html/objects.inv -------------------------------------------------------------------------------- /doc/_build/html/search.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Search — cortex 0.1a documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 50 | 51 |
52 |
53 |
54 |
55 | 56 |

Search

57 |
58 | 59 |

60 | Please activate JavaScript to enable the search 61 | functionality. 62 |

63 |
64 |

65 | From here you can search these documents. Enter your search 66 | words into the box below and click "search". Note that the search 67 | function will automatically search for all of the words. Pages 68 | containing fewer words won't appear in the result list. 69 |

70 |
71 | 72 | 73 | 74 |
75 | 76 |
77 | 78 |
79 | 80 |
81 |
82 |
83 | 87 |
88 |
89 | 101 | 105 | 106 | -------------------------------------------------------------------------------- /doc/demos.rst: -------------------------------------------------------------------------------- 1 | Demos 2 | ===== 3 | 4 | Cortex has several command-line demos of functionality. 5 | If the ``basic`` dataset was installed using ``cortex-setup``, then the 6 | following demos are available: 7 | 8 | .. code-block:: bash 9 | 10 | $ cortex-classifier-demo 11 | 12 | $ cortex-rbm-demo 13 | 14 | $ cortex-vae-demo 15 | 16 | If you installed the neuroimaging data, then the neuroimaging demos can be run: 17 | 18 | .. code-block:: bash 19 | 20 | $ cortex-rbm-vbm-demo 21 | 22 | $ cortex-rbm-olin-demo 23 | 24 | These are partial datasets used for demo purposes only. 25 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | Cortex documentation 2 | ================================= 3 | Cortex is a framework for training and evaluating neural networks using Theano. 4 | Cortex is not specific to, but includes tools for neuroimaging. Cortex is not 5 | meant to replace Theano, but is intended to be used as a compliment to scripting 6 | in python. It is very customizable, as all methods and classes are suggested 7 | templates, and pure Theano can be used when needed. 8 | 9 | .. warning:: 10 | Cortex is a brand-new project and is under rapid development. If you encounter 11 | any bugs or have any feature requests, please `email`_ or 12 | `create a GitHub issue`_. 13 | 14 | .. _email: erroneus@gmail.com 15 | .. _create a GitHub issue: https://github.com/dhjelm/cortex/issues/new 16 | 17 | .. _tutorials: 18 | 19 | Tutorials 20 | --------- 21 | .. toctree:: 22 | :maxdepth: 1 23 | 24 | setup 25 | demos 26 | 27 | Features 28 | -------- 29 | 30 | Currently Cortex supports the following models: 31 | 32 | * Feed forward networks 33 | * RBMs 34 | * RNNs, GRUs, and LSTMs 35 | * Helmholtz machines as well as variational inference methods 36 | * Common datasets, such as MNIST and Caltech silhoettes 37 | * Neuroimaging datasets, such as MRI 38 | 39 | Indices and tables 40 | ================== 41 | * :ref:`genindex` 42 | * :ref:`modindex` 43 | -------------------------------------------------------------------------------- /doc/setup.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | You can install Cortex using the Python package manager ``pip``. 5 | 6 | .. code-block:: bash 7 | 8 | $ pip install cortex 9 | 10 | To get the most up-to-date version, you can install from the ``git`` repository: 11 | 12 | .. code-block:: bash 13 | 14 | $ pip install git+git://github.com/rdevon/cortex.git 15 | 16 | However, currently the demos give the best example of how to script using cortex. 17 | So, if this is your first time using cortex, it is recommended to clone from the github repository: 18 | 19 | .. code-block:: bash 20 | 21 | $ git clone https://github.com/rdevon/cortex.git 22 | $ cd cortex 23 | $ python setup.py install 24 | 25 | If you don't have administrative rights, add the ``--user`` switch to the 26 | install commands to install the packages in your home folder. If you want to 27 | update Cortex, simply repeat the first command with the ``--upgrade`` switch 28 | added to pull the latest version from GitHub. 29 | 30 | In either case, you need to run the setup script: 31 | 32 | .. code-block:: bash 33 | 34 | $ cortex-setup 35 | 36 | Follow the instructions; you will be asked to specify default data and out 37 | directories. These are necessary only for the demos, and can be customized in your 38 | ~/.cortexrc file. 39 | 40 | Basic Requirements 41 | __________________ 42 | 43 | .. _PyYAML: http://pyyaml.org/wiki/PyYAML 44 | .. _Theano: http://deeplearning.net/software/theano/ 45 | 46 | * Theano_ 47 | * PyYAML_ 48 | 49 | Neuroimaging Requirements 50 | _________________________ 51 | 52 | .. note:: 53 | 54 | .. _h5py: http://www.h5py.org/ 55 | .. _nipy: http://nipy.org/ 56 | .. _afni: http://afni.nimh.nih.gov 57 | .. _nibabel: http://http://nipy.org/nibabel/ 58 | .. _sklearn: http://scikit-learn.org/stable/ 59 | 60 | These are not required for basic functionality, but are necessary for 61 | neuroimaging tools. `afni`_, in particular, needs to be installed manually. 62 | 63 | * nipy_ 64 | * h5py_ 65 | * afni_ 66 | * nibabel_ 67 | * sklearn_ 68 | 69 | Documentation 70 | ------------- 71 | 72 | If you want to build a local copy of the documentation, follow the instructions 73 | at the :doc:`documentation development guidelines `. -------------------------------------------------------------------------------- /doc/source/cortex.analysis.mri.rst: -------------------------------------------------------------------------------- 1 | cortex.analysis.mri package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.analysis.mri.rois module 8 | ------------------------------- 9 | 10 | .. automodule:: cortex.analysis.mri.rois 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: cortex.analysis.mri 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /doc/source/cortex.analysis.rst: -------------------------------------------------------------------------------- 1 | cortex.analysis package 2 | ======================= 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.analysis.mri 10 | 11 | Submodules 12 | ---------- 13 | 14 | cortex.analysis.load_mri module 15 | ------------------------------- 16 | 17 | .. automodule:: cortex.analysis.load_mri 18 | :members: 19 | :undoc-members: 20 | :show-inheritance: 21 | 22 | cortex.analysis.read_fmri module 23 | -------------------------------- 24 | 25 | .. automodule:: cortex.analysis.read_fmri 26 | :members: 27 | :undoc-members: 28 | :show-inheritance: 29 | 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: cortex.analysis 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /doc/source/cortex.datasets.basic.rst: -------------------------------------------------------------------------------- 1 | cortex.datasets.basic package 2 | ============================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.datasets.basic.caltech module 8 | ------------------------------------ 9 | 10 | .. automodule:: cortex.datasets.basic.caltech 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.datasets.basic.cifar module 16 | ---------------------------------- 17 | 18 | .. automodule:: cortex.datasets.basic.cifar 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.datasets.basic.euclidean module 24 | -------------------------------------- 25 | 26 | .. automodule:: cortex.datasets.basic.euclidean 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.datasets.basic.horses module 32 | ----------------------------------- 33 | 34 | .. automodule:: cortex.datasets.basic.horses 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cortex.datasets.basic.mnist module 40 | ---------------------------------- 41 | 42 | .. automodule:: cortex.datasets.basic.mnist 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | cortex.datasets.basic.uci module 48 | -------------------------------- 49 | 50 | .. automodule:: cortex.datasets.basic.uci 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | 56 | Module contents 57 | --------------- 58 | 59 | .. automodule:: cortex.datasets.basic 60 | :members: 61 | :undoc-members: 62 | :show-inheritance: 63 | -------------------------------------------------------------------------------- /doc/source/cortex.datasets.neuroimaging.rst: -------------------------------------------------------------------------------- 1 | cortex.datasets.neuroimaging package 2 | ==================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.datasets.neuroimaging.fmri module 8 | ---------------------------------------- 9 | 10 | .. automodule:: cortex.datasets.neuroimaging.fmri 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.datasets.neuroimaging.mri module 16 | --------------------------------------- 17 | 18 | .. automodule:: cortex.datasets.neuroimaging.mri 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.datasets.neuroimaging.nifti_viewer module 24 | ------------------------------------------------ 25 | 26 | .. automodule:: cortex.datasets.neuroimaging.nifti_viewer 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.datasets.neuroimaging.simTB module 32 | ----------------------------------------- 33 | 34 | .. automodule:: cortex.datasets.neuroimaging.simTB 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cortex.datasets.neuroimaging.snp module 40 | --------------------------------------- 41 | 42 | .. automodule:: cortex.datasets.neuroimaging.snp 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: cortex.datasets.neuroimaging 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /doc/source/cortex.datasets.rst: -------------------------------------------------------------------------------- 1 | cortex.datasets package 2 | ======================= 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.datasets.basic 10 | cortex.datasets.neuroimaging 11 | 12 | Module contents 13 | --------------- 14 | 15 | .. automodule:: cortex.datasets 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | -------------------------------------------------------------------------------- /doc/source/cortex.demos.demos_basic.rst: -------------------------------------------------------------------------------- 1 | cortex.demos.demos_basic package 2 | ================================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.demos.demos_basic.classifier module 8 | ------------------------------------------ 9 | 10 | .. automodule:: cortex.demos.demos_basic.classifier 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.demos.demos_basic.eval_rbm module 16 | ---------------------------------------- 17 | 18 | .. automodule:: cortex.demos.demos_basic.eval_rbm 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.demos.demos_basic.rbm_mnist module 24 | ----------------------------------------- 25 | 26 | .. automodule:: cortex.demos.demos_basic.rbm_mnist 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.demos.demos_basic.vae module 32 | ----------------------------------- 33 | 34 | .. automodule:: cortex.demos.demos_basic.vae 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: cortex.demos.demos_basic 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /doc/source/cortex.demos.demos_neuroimaging.rst: -------------------------------------------------------------------------------- 1 | cortex.demos.demos_neuroimaging package 2 | ======================================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.demos.demos_neuroimaging.rbm_ni module 8 | --------------------------------------------- 9 | 10 | .. automodule:: cortex.demos.demos_neuroimaging.rbm_ni 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.demos.demos_neuroimaging.vae module 16 | ------------------------------------------ 17 | 18 | .. automodule:: cortex.demos.demos_neuroimaging.vae 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: cortex.demos.demos_neuroimaging 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /doc/source/cortex.demos.rst: -------------------------------------------------------------------------------- 1 | cortex.demos package 2 | ==================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.demos.demos_basic 10 | cortex.demos.demos_neuroimaging 11 | 12 | Module contents 13 | --------------- 14 | 15 | .. automodule:: cortex.demos 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | -------------------------------------------------------------------------------- /doc/source/cortex.inference.rst: -------------------------------------------------------------------------------- 1 | cortex.inference package 2 | ======================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.inference.air module 8 | --------------------------- 9 | 10 | .. automodule:: cortex.inference.air 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.inference.gdir module 16 | ---------------------------- 17 | 18 | .. automodule:: cortex.inference.gdir 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.inference.irvi module 24 | ---------------------------- 25 | 26 | .. automodule:: cortex.inference.irvi 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.inference.rws module 32 | --------------------------- 33 | 34 | .. automodule:: cortex.inference.rws 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: cortex.inference 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /doc/source/cortex.models.rst: -------------------------------------------------------------------------------- 1 | cortex.models package 2 | ===================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.models.tests 10 | 11 | Submodules 12 | ---------- 13 | 14 | cortex.models.darn module 15 | ------------------------- 16 | 17 | .. automodule:: cortex.models.darn 18 | :members: 19 | :undoc-members: 20 | :show-inheritance: 21 | 22 | cortex.models.deep_helmholtz module 23 | ----------------------------------- 24 | 25 | .. automodule:: cortex.models.deep_helmholtz 26 | :members: 27 | :undoc-members: 28 | :show-inheritance: 29 | 30 | cortex.models.distributions module 31 | ---------------------------------- 32 | 33 | .. automodule:: cortex.models.distributions 34 | :members: 35 | :undoc-members: 36 | :show-inheritance: 37 | 38 | cortex.models.gru module 39 | ------------------------ 40 | 41 | .. automodule:: cortex.models.gru 42 | :members: 43 | :undoc-members: 44 | :show-inheritance: 45 | 46 | cortex.models.helmholtz module 47 | ------------------------------ 48 | 49 | .. automodule:: cortex.models.helmholtz 50 | :members: 51 | :undoc-members: 52 | :show-inheritance: 53 | 54 | cortex.models.layers module 55 | --------------------------- 56 | 57 | .. automodule:: cortex.models.layers 58 | :members: 59 | :undoc-members: 60 | :show-inheritance: 61 | 62 | cortex.models.lstm module 63 | ------------------------- 64 | 65 | .. automodule:: cortex.models.lstm 66 | :members: 67 | :undoc-members: 68 | :show-inheritance: 69 | 70 | cortex.models.mlp module 71 | ------------------------ 72 | 73 | .. automodule:: cortex.models.mlp 74 | :members: 75 | :undoc-members: 76 | :show-inheritance: 77 | 78 | cortex.models.rbm module 79 | ------------------------ 80 | 81 | .. automodule:: cortex.models.rbm 82 | :members: 83 | :undoc-members: 84 | :show-inheritance: 85 | 86 | cortex.models.rnn module 87 | ------------------------ 88 | 89 | .. automodule:: cortex.models.rnn 90 | :members: 91 | :undoc-members: 92 | :show-inheritance: 93 | 94 | 95 | Module contents 96 | --------------- 97 | 98 | .. automodule:: cortex.models 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | -------------------------------------------------------------------------------- /doc/source/cortex.models.tests.rst: -------------------------------------------------------------------------------- 1 | cortex.models.tests package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.models.tests.test_darn module 8 | ------------------------------------ 9 | 10 | .. automodule:: cortex.models.tests.test_darn 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.models.tests.test_mlp module 16 | ----------------------------------- 17 | 18 | .. automodule:: cortex.models.tests.test_mlp 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.models.tests.test_rbm module 24 | ----------------------------------- 25 | 26 | .. automodule:: cortex.models.tests.test_rbm 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.models.tests.test_rnn module 32 | ----------------------------------- 33 | 34 | .. automodule:: cortex.models.tests.test_rnn 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cortex.models.tests.test_vae module 40 | ----------------------------------- 41 | 42 | .. automodule:: cortex.models.tests.test_vae 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: cortex.models.tests 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /doc/source/cortex.rst: -------------------------------------------------------------------------------- 1 | cortex package 2 | ============== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cortex.analysis 10 | cortex.datasets 11 | cortex.demos 12 | cortex.inference 13 | cortex.models 14 | cortex.utils 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: cortex 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /doc/source/cortex.utils.rst: -------------------------------------------------------------------------------- 1 | cortex.utils package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cortex.utils.extra module 8 | ------------------------- 9 | 10 | .. automodule:: cortex.utils.extra 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cortex.utils.learning_scheduler module 16 | -------------------------------------- 17 | 18 | .. automodule:: cortex.utils.learning_scheduler 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cortex.utils.logger module 24 | -------------------------- 25 | 26 | .. automodule:: cortex.utils.logger 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cortex.utils.monitor module 32 | --------------------------- 33 | 34 | .. automodule:: cortex.utils.monitor 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cortex.utils.op module 40 | ---------------------- 41 | 42 | .. automodule:: cortex.utils.op 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | cortex.utils.preprocessor module 48 | -------------------------------- 49 | 50 | .. automodule:: cortex.utils.preprocessor 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | cortex.utils.tools module 56 | ------------------------- 57 | 58 | .. automodule:: cortex.utils.tools 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | cortex.utils.training module 64 | ---------------------------- 65 | 66 | .. automodule:: cortex.utils.training 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | cortex.utils.vis_utils module 72 | ----------------------------- 73 | 74 | .. automodule:: cortex.utils.vis_utils 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | 80 | Module contents 81 | --------------- 82 | 83 | .. automodule:: cortex.utils 84 | :members: 85 | :undoc-members: 86 | :show-inheritance: 87 | -------------------------------------------------------------------------------- /doc/source/modules.rst: -------------------------------------------------------------------------------- 1 | cortex 2 | ====== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | cortex 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Setup for Cortex 3 | ''' 4 | 5 | import readline, glob 6 | from setuptools import setup, find_packages 7 | from setuptools.command.install import install 8 | from codecs import open 9 | from os import path 10 | 11 | here = path.abspath(path.dirname(__file__)) 12 | 13 | with open(path.join(here, 'README.rst'), encoding='utf-8') as f: 14 | long_description = f.read() 15 | 16 | 17 | setup( 18 | name='cortex', 19 | version='0.12a', 20 | description='cortex: a deep learning toolbox for neuroimaging', 21 | long_description=long_description, 22 | url='https://github.com/rdevon/cortex', 23 | author='Devon Hjelm', 24 | author_email='erroneus@gmail.com', 25 | license='GPL', 26 | dependency_links=['git+https://github.com/Theano/Theano.git#egg=Theano'], 27 | classifiers=[ 28 | 'Development Status :: 3 - Alpha', 29 | 'License :: OSI Approved :: GNU General Public License (GPL)', 30 | 'Programming Language :: Python :: 2.7', 31 | ], 32 | keywords='deep learning neuroimaging', 33 | packages=find_packages(exclude=['contrib', 'docs', 'tests']), 34 | install_requires=['nibabel', 'nipy', 'sklearn'], 35 | entry_points={ 36 | 'console_scripts': [ 37 | 'cortex-setup=cortex:main', 38 | 'cortex-classifier-demo=cortex.demos.basic:run_classifier_demo', 39 | 'cortex-rbm-demo=cortex.demos.demos_basic:run_rbm_demo', 40 | 'cortex-vae-demo=cortex.demos.demos_basic:run_vae_demo', 41 | 'cortex-rbm-vbm-demo=cortex.demos.demos_neuroimaging:run_rbm_vbm_demo', 42 | 'cortex-rbm-olin-demo=cortex.demos.demos_neuroimaging:run_rbm_olin_demo', 43 | 'cortex-read-mri=cortex.analysis.load_mri:main', 44 | 'cortex-read-fmri=cortex.analysis.read_fmri:main' 45 | ] 46 | }, 47 | data_files=[ 48 | 'cortex/demos/demos_basic/classifier_mnist.yaml', 49 | 'cortex/demos/demos_basic/rbm_mnist.yaml', 50 | 'cortex/demos/demos_basic/vae_mnist.yaml', 51 | 'cortex/demos/demos_neuroimaging/rbm_vbm.yaml', 52 | 'cortex/demos/demos_neuroimaging/rbm_olin.yaml' 53 | ] 54 | ) 55 | --------------------------------------------------------------------------------