├── .gitignore ├── .nojekyll ├── LICENSE ├── README ├── _config.yml ├── _images ├── 76f909efd8.pdf ├── 76f909efd8.png ├── 8018522b91.pdf ├── 8018522b91.png ├── ade1b1ac02.pdf ├── ade1b1ac02.png ├── e0de41fe35.pdf ├── e0de41fe35.png ├── graphviz-126d1229a2ba3485cf56fea510bbd85613654994.png └── graphviz-126d1229a2ba3485cf56fea510bbd85613654994.png.map ├── _modules ├── index.html └── ocupy │ ├── fixmat.html │ ├── loader.html │ ├── measures.html │ └── stimuli.html ├── _sources ├── fixmat.txt ├── index.txt ├── loader.txt ├── measures.txt ├── parallel.txt └── stimuli.txt ├── _static ├── basic.css ├── default.css ├── doctools.js ├── file.png ├── jquery.js ├── logo.png ├── minus.png ├── plus.png ├── pygments.css ├── searchtools.js ├── sidebar.js └── underscore.js ├── doc ├── Makefile ├── conf.py ├── datamat.txt ├── fixmat.txt ├── index.txt ├── loader.txt ├── logo.png ├── measures.txt ├── parallel.txt ├── pyocupy.svg └── stimuli.txt ├── fixmat.html ├── genindex.html ├── index.html ├── loader.html ├── measures.html ├── objects.inv ├── ocupy ├── __init__.py ├── bounds.py ├── datamat.py ├── datamat_tools.py ├── fixmat.py ├── loader.py ├── measures.py ├── model.py ├── parallel.py ├── saccade_geometry.py ├── samples2fix.py ├── sim_tools.py ├── simulator.py ├── spline_base.py ├── stimuli.py ├── tests │ ├── __init__.py │ ├── fixmat_demo.mat │ ├── roc_reference.mat │ ├── test_bounds.py │ ├── test_fixmat.py │ ├── test_fixmat_compute_fdm.py │ ├── test_loader.py │ ├── test_measures.py │ ├── test_spline_base.py │ ├── test_stimuli.py │ ├── test_utils.py │ └── test_xvalidation.py ├── utils.py └── xvalidation.py ├── parallel.html ├── plot_directive └── inline │ ├── 76f909efd8.hires.png │ ├── 76f909efd8.pdf │ ├── 8018522b91.hires.png │ ├── 8018522b91.pdf │ ├── ade1b1ac02.hires.png │ ├── ade1b1ac02.pdf │ ├── e0de41fe35.hires.png │ └── e0de41fe35.pdf ├── py-modindex.html ├── search.html ├── searchindex.js ├── setup.py └── stimuli.html /.gitignore: -------------------------------------------------------------------------------- 1 | .svn 2 | build 3 | dist 4 | ocupy.egg-info 5 | *.pyc 6 | *~ 7 | .*.swp 8 | *DS_STORE 9 | -------------------------------------------------------------------------------- /.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/.nojekyll -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Ocupy - Oculography Analysis Tools 2 | ---------------------------------- 3 | 4 | Ocupy provides functionality for analysis of grouped data: 5 | 6 | * Datamat objects for reading of and filtering by group meta-data and data 7 | * Corresponding objects for stimulus data, aligned to Datamat objects 8 | * Measures for prediction quality for eye-tracking data: AUC, NSS, KL, EMD. 9 | * Lower and upper bound calculation for prediction quality of attention models. 10 | * RPC Client/Server for parallel task execution on a grid [deprecated] 11 | * Evaluation (with cross-validation) of attention models 12 | 13 | 14 | Install 15 | ------- 16 | 17 | To install ocupy into your Python environment, type: 18 | 19 | python setup.py build 20 | python setup.py test 21 | python setup.py install 22 | 23 | The core requirements for ocupy, numpy, scipy and h5py, are automatically 24 | installed if not found on your Python path. 25 | 26 | If you want to use the parallel module you'll need to install Twisted manually. 27 | 28 | 29 | Documentation 30 | ------------- 31 | 32 | The documentation is generated by sphinx (http://sphinx.pocoo.org): 33 | 34 | cd doc 35 | make html 36 | 37 | This will create a set of html files in doc/_build/html. You have to 38 | have sphinx and matplotlib installed. Also, ocupy has to be on your 39 | path, if you didn't install it add this directory to the python path: 40 | 41 | export PYTHONPATH=$PYTHONPATH:`pwd` 42 | 43 | 44 | Copyright & License 45 | ------------------- 46 | 47 | Copyright (C) 2010, 2011, 2012 WhiteMatter Labs GmbH -- http://whitematter.de 48 | 49 | Licensed under GPLv2 or later, see file LICENSE in this source tree 50 | 51 | Ocupy was developed jointly by WhiteMatter Labs GmbH and the Neurobiopsychology 52 | Group at the Unversity of Osnabrueck's Institute of Cognitive Science. 53 | 54 | Project Lead: Niklas Wilming 55 | 56 | Contributors: Robert Muil, Torsten Betz, Hannah Knepper, Johannes Steger, Kilian Klimek 57 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | exclude: _static, _images, _modules 2 | -------------------------------------------------------------------------------- /_images/76f909efd8.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_images/76f909efd8.pdf -------------------------------------------------------------------------------- /_images/76f909efd8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_images/76f909efd8.png -------------------------------------------------------------------------------- /_images/8018522b91.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_images/8018522b91.pdf -------------------------------------------------------------------------------- /_images/8018522b91.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_images/8018522b91.png -------------------------------------------------------------------------------- /_images/ade1b1ac02.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_images/ade1b1ac02.pdf -------------------------------------------------------------------------------- /_images/ade1b1ac02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_images/ade1b1ac02.png -------------------------------------------------------------------------------- /_images/e0de41fe35.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_images/e0de41fe35.pdf -------------------------------------------------------------------------------- /_images/e0de41fe35.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_images/e0de41fe35.png -------------------------------------------------------------------------------- /_images/graphviz-126d1229a2ba3485cf56fea510bbd85613654994.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_images/graphviz-126d1229a2ba3485cf56fea510bbd85613654994.png -------------------------------------------------------------------------------- /_images/graphviz-126d1229a2ba3485cf56fea510bbd85613654994.png.map: -------------------------------------------------------------------------------- 1 | 2 | 3 | -------------------------------------------------------------------------------- /_modules/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | Overview: module code — ocupy v0.1 documentation 11 | 12 | 13 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 40 | 41 |
42 |
43 |
44 |
45 | 46 |

All modules for which code is available

47 | 53 | 54 |
55 |
56 |
57 |
58 |
59 | 62 | 74 | 75 |
76 |
77 |
78 |
79 | 91 | 95 | 96 | -------------------------------------------------------------------------------- /_sources/index.txt: -------------------------------------------------------------------------------- 1 | .. ocupy documentation master file, created by 2 | sphinx-quickstart on Wed Dec 22 16:29:57 2010. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to ocupy's documentation! 7 | ======================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | fixmat.txt 15 | parallel.txt 16 | stimuli.txt 17 | loader.txt 18 | measures.txt 19 | 20 | Indices and tables 21 | ================== 22 | 23 | * :ref:`genindex` 24 | * :ref:`modindex` 25 | * :ref:`search` 26 | 27 | -------------------------------------------------------------------------------- /_sources/loader.txt: -------------------------------------------------------------------------------- 1 | Loader 2 | ====== 3 | 4 | .. automodule:: ocupy.loader 5 | :members: 6 | 7 | 8 | -------------------------------------------------------------------------------- /_sources/measures.txt: -------------------------------------------------------------------------------- 1 | Measures 2 | ======== 3 | 4 | .. automodule:: ocupy.measures 5 | :members: 6 | 7 | -------------------------------------------------------------------------------- /_sources/parallel.txt: -------------------------------------------------------------------------------- 1 | Parallel 2 | ======== 3 | 4 | .. module:: res.analysis.parallel 5 | 6 | This module implements functionality to parallelize massively 7 | parallel tasks. A massively parallel task consists of repeatedly 8 | carrying out the same computation. Each individual computation 9 | might depend on different parameters but there exist no dpendencies 10 | between different tasks. 11 | 12 | This module provides three different classes to achieve massive 13 | prallelization: :ref:`TaskStore `, :ref:`TaskManager ` and :ref:`Worker ` 14 | 15 | Each module characterizes one of three steps that is necessary: 16 | 1. Provide a task description and an ordering of tasks 17 | 2. Make tasks available for parallel processing 18 | 3. Carry out a task 19 | 20 | These classes work together to process tasks in a parallel fashion. 21 | The TaskStore provides an interface that allows to iterate over 22 | individual task descriptions. The TaskManager is a XMLRPC server 23 | which provides task descriptions for workers. A Worker is a XMLRPC 24 | client that connects to a task manager and retrieves a task description, 25 | executes the task and sends back the results. 26 | 27 | 28 | Organizing tasks for distribution 29 | --------------------------------- 30 | .. _TaskStore: 31 | 32 | Preparing your own tasks for parallel computation starts with providing a 33 | custom TaskStore object. A task store organizes how a complete task can be 34 | divided into smaller tasks. Let's consider an example. Say we want to compute 35 | how well a single subject can be predicted by some other random subject. In 36 | this case, a task might be the calculation of one prediction score. The entire task 37 | is to calculate scores for predicting each subject with each other subject. 38 | If we have 48 subjects, we have 48*47 individual tasks. 39 | 40 | A single task is usually described by a dictionary that has as keys the name 41 | of a parameter and as value the value of a parameter:: 42 | 43 | for (index, task) in task_store: print task 44 | {'index':1,'predicted_sub':10,'predicting_sub':10} 45 | 46 | Internally a task store deals only with a linear index into all possible tasks. 47 | To provide a custom task store you have implement a class that inherits from parallel.TaskStore 48 | and implements four functions: 49 | 50 | 1. get(index, \*params) - A function that returns a task description 51 | 2. sub2ind(\*params) - A function that maps parameters to a linear index 52 | 3. ind2sub(index) - A function that maps a linear index to a set of parameters 53 | 4. update_results(task_index, task_results) - A function that takes the results for a task and saves 'em / organizes 'em. 54 | 55 | An example implementation is given below:: 56 | 57 | class ISTaskStore(parallel.TaskStore): 58 | def __init__(self, partitions = 100,ind = None, filename = None): 59 | parallel.TaskStore.__init__(self,partitions, ind, None) 60 | self.num_tasks = 48 * 47 # This is required! 61 | self.results = np.nan * np.ones((48,47)) 62 | def get(self, index,predicting, predicted): 63 | if index == None: 64 | index = self.sub2ind(predicting, predicted) 65 | return {'index':index, 'predicting':predicting, 'predicted':predicted} 66 | 67 | def sub2ind(self, predicting, predicted): 68 | # parallel.sub2ind maps 1 <- (1,1), 2 <- (1,2) ... 47 <- (1,47), 48 <- (2,1) etc. 69 | return parallel.sub2ind((predicting, predicted), (48,47)) 70 | 71 | def ind2sub(self, index): 72 | # parallel.sub2ind maps 1 -> (1,1), 2 -> (1,2), ... 47 -> (1,47), 48 -> (2,1) etc. 73 | return parallel.ind2sub(index,(48,47)) 74 | 75 | def update_results(self, task_index, task_results): 76 | for cur_res in task_results: 77 | # Find position for this result in result matrix 78 | ind = cur_res['index'] 79 | params = self.ind2sub(ind) 80 | self.results[params] = cur_res['result'] 81 | 82 | Let's see what this does:: 83 | 84 | from res.analysis import parallel 85 | ts = ISTaskStore() 86 | ts.get(None, 1,1) 87 | {'index': 0, 'predicted': 1, 'predicting': 1} 88 | ts.get(None, 2,1) 89 | {'index': 47, 'predicted': 1, 'predicting': 2} 90 | 91 | 92 | The task store provides an iterator interface that allows to iterate over all 93 | tasks in the task store.:: 94 | 95 | for task in ts: print task 96 | # Will list all tasks 97 | 98 | An important property of a task store is that it can *partition* itself into 99 | smaller groups of subtask. Often computing the result of a single task comes 100 | with significant overhead, thus each worker receives a group of tasks (which 101 | is represented again by a TaskStore object) and then iterates over all tasks 102 | in the store. Every task store object can be instantiated with a list of valid 103 | indices, such that iterating through the store iterates only through these tasks.:: 104 | 105 | ts = ISTaskStore(indices=[1,2,3]) 106 | for task in ts: print task 107 | (1, {'index': 1, 'predicting': 1, 'predicted': 2}) 108 | (2, {'index': 2, 'predicting': 1, 'predicted': 3}) 109 | (3, {'index': 3, 'predicting': 1, 'predicted': 4}) 110 | 111 | 112 | Another important function of a task store is *update_results(self, task_id, task_description)* 113 | It is called by the server whenever the results for a partition were returned by 114 | a worker. This function has to be implemented by you and gives you a 115 | chance to put the results back into a form that you can interpret and *save*! 116 | 117 | One more thing: TaskStores and Workers usually come in pairs. To avoid that you 118 | use a worker that was not intended to be used with a specific TaskStore a task store needs 119 | to be able to identify itself. It therefore needs to have a field .ident which 120 | needs to be set by you. 121 | 122 | 123 | Making tasks available for processing 124 | ------------------------------------- 125 | .. _TaskManager: 126 | 127 | When a task store object is available we can start a server that waits 128 | for workers to pick up tasks. To do so, we create an instance of 129 | TaskManager and run it:: 130 | 131 | from twisted.internet import reactor 132 | from twisted.web import xmlrpc, server 133 | r = parallel.TaskManager(task_store) 134 | reactor.listenTCP(7080, server.Site(r)) 135 | reactor.run() 136 | 137 | It is best to do this in a screen and then detach the screen. 138 | You can check that it is running by firing up ipython and typing:: 139 | 140 | import xmlrpclib 141 | s = xmlrpclib.Server('http://localhost:7080') 142 | print s.status() 143 | 100 Jobs are still wating for execution 144 | 0 Jobs are being processed 145 | 0 Jobs are done 146 | 147 | Remember that the server iterates over partitions (and the default number 148 | of partitions is 100), thus for the server one job is one partition of the tasks. 149 | You can query the server with the server object:: 150 | 151 | k = s.get_task() 152 | s.reschedule() # Reschedule all tasks that are being processed 153 | 154 | Getting things done 155 | ------------------- 156 | .. _Worker: 157 | 158 | When everything is in place (task store available, server started) it is time to 159 | actually do the work. For this we create instances of parallel.Worker. 160 | 161 | Each worker instance will then connect to the server, call server.get_task(), iterate over 162 | the returned task_store and call *compute(self, index, task_description)* for each task. 163 | It collects the output for each compute call and returns it to the server and then quits. 164 | 165 | Often the worker needs access to data that needs to be loaded beforehand. Thus, the 166 | constructor of parallel. Worker calls *setup(self)* before starting the computation. 167 | This gives you a chance to organize the necessary data. 168 | 169 | The only tasks left to you are implementing a setup and a compute method. Here is an example:: 170 | 171 | class ISWorker(parallel.Worker): 172 | 173 | def setup(self): 174 | prefix = '/net/space/users/nwilming/' 175 | data = fixmat.FixmatFactory(os.path.join(prefix,'fixmat.mat')) 176 | self.data = data[(data.on_image == True) & (ismember(data.fix, range(2,17)))] 177 | 178 | def compute(self, index, task_description): 179 | predicted = task_description['predicted'] 180 | predicting = task_description['predicting'] 181 | rescale = 0.5 182 | (auc, nss, kl) = roc.intersubject_scores(self.data, 7, [1], 183 | predicting, range(2,65), 184 | predicted, controls = False, scale_factor = rescale) 185 | 186 | result = {'index': index, 187 | 'auc' : float(auc), 188 | 'nss' : float(nss), 189 | 'kl' : float(kl)} 190 | result.update(task_description) 191 | return result 192 | 193 | To start a worker, instantiate it and call it's run() function. The rest happens 194 | automatically. 195 | 196 | The real power of this approach lies in using the GridEngine to start as many workers as 197 | there are task partitions. The GridEngine then starts as many workers as is possible. 198 | 199 | 200 | Reference 201 | --------- 202 | 203 | .. autoclass:: TaskStore 204 | :members: 205 | 206 | .. autoclass:: TaskManager 207 | :members: 208 | 209 | .. autoclass:: Worker 210 | :members: 211 | -------------------------------------------------------------------------------- /_sources/stimuli.txt: -------------------------------------------------------------------------------- 1 | Organization of stimuli: categories, images and features 2 | ======================================================== 3 | 4 | This module aims at providing easy access to images and feature maps. 5 | It respects that images are organized in different categories. 6 | 7 | Using this module has several advantages over simply loading images or 8 | feature maps at runtime: 9 | 10 | #. **Alignment**: You can iterate over images and access all fixations made on this image. The access is handled transparently for you. 11 | #. **Abstraction**: If treated right, it doesn't matter where or how your data is organized. Stored in the web? Strange naming conventions? No problem, but you will have to adapt some minor things in the backend. The interface however stays. 12 | 13 | 14 | The graph below describes the dependency structure of classes within the stimulus module. The general idea is that images are organized in categories (one category maps to many images) and that images provide access to the raw image data and to features of the image (again a 1:n mapping). Data is loaded via a proxy class which abstracts from the actual access (this is a n:1 mapping). 15 | 16 | .. graphviz:: 17 | 18 | digraph Stimuli { 19 | node [shape = record,height=.1]; 20 | node0[label = " Category 1 | Category 2"]; 21 | node1[label = " Image 1 | ... | Image n"]; 22 | node2[label = " Image 1 | ... | Image n"]; 23 | node3[label = " Image Data | Feature 1 | ... | Feature n"]; 24 | node4[label = " Image Data | Feature 1 | ... | Feature n"]; 25 | "node0":f0 -> "node1":f0; 26 | "node0":f1 -> "node2":f0; 27 | "node1":f0 -> "node3":f0; 28 | "node2":f0 -> "node4":f0; 29 | "node4":f0 -> Loader; 30 | "node4":f1 -> Loader; 31 | "node4":f2 -> Loader; 32 | "node3":f0 -> Loader; 33 | "node3":f1 -> Loader; 34 | "node3":f2 -> Loader; 35 | } 36 | 37 | The remainder of this document describes the interface of the stimuli 38 | module and how it interacts with the loader module. 39 | 40 | Loading Data from somewhere 41 | --------------------------- 42 | To access images and feature maps, we need to be able to load them into 43 | memory. This task is carried out by the *loader*. It encapsulates the 44 | direct data access, such that we do not have to worry about it later. 45 | 46 | Let's look at an example: 47 | 48 | >>> l = loader.LoadFromDisk('my-images', 'my-features', size = (768,1024)) 49 | >>> l.get_image(2,17) 50 | -> Returns image 17 in category 2 51 | >>> l.get_feature(2,17,'BYCHigh') 52 | -> Returns feature BYCHigh for image 17 in category 2 53 | 54 | In this case we use a loader that reads features and images from the hard disk. 55 | In the constructor, we specify where the images and features are located. A 56 | neat functionality is that the 'LoadFromDisk' loader can automatically resize images and 57 | features to the same size (given by the size argument). By default 58 | 'LoadFromDisk' 59 | respects the following file layout: my-images/category/category_image.png 60 | and my-features/category/feature/category_image.mat 61 | If you use a different naming scheme it is easy to rip the standard out and plug 62 | in your naming scheme. All you have to do is to replace LoadFromDisk's 'path' function: 63 | 64 | >>> l = loader.LoadFromDisk('my-images', 'my-features', size = (768,1024)) 65 | >>> def my_path(self, category = None, image = None, feature = None): 66 | if not category is None: 67 | return self.impath 68 | if not image is None: 69 | assert not category is None, "The category has to be given if the image is given" 70 | return os.path.join(self.impath, '%i_%i.png'%(category,image)) 71 | if not feature is None: 72 | assert category != None and image != None, ("If a feature name is" + 73 | "given the category and image also have to be given.") 74 | return os.path.join(self.impath, '%i_%i_%f.mat'%(category,image,feature)) 75 | >>> l.path = my_path 76 | 77 | 78 | And now the loader will respect a my-images/category_image.png and 79 | my-images/category_image_feature.mat naming scheme. You can of course also inherit 80 | from LoadFromDisk and overwrite the path method if you want to use this naming 81 | scheme more often. 82 | 83 | To use a completely different loader (say, one that pulls stimuli from a 84 | SQL database) you have to implement your own. To achieve this, inherit 85 | from loader.Loader and fill all methods specified there with life. 86 | 87 | 88 | Working with the stimulus module 89 | -------------------------------- 90 | To abstract the data access is a first step but does not provide much in terms of 91 | convenience. The stimuli module defines three classes that organize stimuli into 92 | categories, images and features: 93 | 94 | - The **Categories** class encapsulates different categories that are available 95 | - The **Images** class represents all images within a category 96 | - The **Image** class provides direct access to the image data and feature maps of the image 97 | 98 | I think the interface is pretty much self-explaining: 99 | 100 | >>> l = loader.LoadFromDisk('path-to-my-images', 'path-to-my-features', size = (768,1024)) 101 | >>> inp = stimuli.Categories(l, features=None,img_per_cat = {2:range(16,26),9:range(1,51)}) 102 | >>> inp[2][17].data # yields image 17 in category 2 103 | >>> inp[2][17]['BYCHigh'] # yields feature BYCHigh 104 | >>> for cat in inp: 105 | for img in cat: 106 | img.data # Gives the image 107 | img['BYCHigh'] # Gives feature BYCHigh 108 | 109 | In this case, I specified all possible category / image combinations. 110 | Often we want to access images and features that have been arranged by 111 | some structure beforehand. The most obvious case is that we have a fixmat that already specifies all possible category, and image combinations. To create a stimuli object that is aligned to a fixmat we can use the **FixmatStimuliFactory**: 112 | 113 | >>> fm = fixmat.DirectoryFixmatFactory('path-to-fixmats') 114 | >>> l = loader.LoadFromDisk(impath = 'path-to-imgs', ftrpath = 'path-to-ftrs') 115 | >>> stim = stimuli.FixmatStimuliFactory(fm, l) 116 | 117 | Alternatively we can use the **DirectoryStimuliFactory** to automatically index all categories and files in a directory: 118 | 119 | >>> stim = stimuli.DirectoryStimuliFactory(l) 120 | >>> stim.categories() 121 | [2,9] 122 | >>> stim[2].images() 123 | [16, 17, ..., 25] 124 | >>> stim[9].images() 125 | [1, 2, ..., 50] 126 | 127 | This, however, works only for the default file layout structure (cat/cat_img.png, cat/ftr/cat_img.mat). 128 | 129 | Combining fixation data and stimulus data 130 | ----------------------------------------- 131 | 132 | In many cases, we want to iterate over images and fixation data at the same 133 | time. The stimuli module automatically aligns fixation and image data. 134 | 135 | To use this functionality the data proxy (i.e. categories, images or features) 136 | must be created with a FixMat: 137 | 138 | >>> fm = fixmat.DirectoryFixmatFactory('my-fixmats') 139 | >>> inp = stimuli.Categories(l,fixations = fm) 140 | >>> inp[2][17].fixations 141 | -> FixMat that contains only fixations on image 17 fron category 2 142 | 143 | If the data proxy is initialized with a set of possible category / image 144 | combinations we can also iterate over it. In this case it is probably 145 | handy to use the FixmatStimuliFactory which extracts all possible 146 | category / image combinations from a fixmat: 147 | 148 | >>> inp = stimuli.FixmatStimuliFactory(fm,l) 149 | >>> for cat in inp: 150 | for img in cat: 151 | img.fixations 152 | 153 | 154 | Reference 155 | --------- 156 | .. autofunction:: ocupy.stimuli.FixmatStimuliFactory 157 | .. autofunction:: ocupy.stimuli.DirectoryStimuliFactory 158 | 159 | .. automodule:: ocupy.stimuli 160 | :members: 161 | 162 | .. automodule:: ocupy.loader 163 | :members: 164 | 165 | -------------------------------------------------------------------------------- /_static/basic.css: -------------------------------------------------------------------------------- 1 | /* 2 | * basic.css 3 | * ~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- basic theme. 6 | * 7 | * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /* -- main layout ----------------------------------------------------------- */ 13 | 14 | div.clearer { 15 | clear: both; 16 | } 17 | 18 | /* -- relbar ---------------------------------------------------------------- */ 19 | 20 | div.related { 21 | width: 100%; 22 | font-size: 90%; 23 | } 24 | 25 | div.related h3 { 26 | display: none; 27 | } 28 | 29 | div.related ul { 30 | margin: 0; 31 | padding: 0 0 0 10px; 32 | list-style: none; 33 | } 34 | 35 | div.related li { 36 | display: inline; 37 | } 38 | 39 | div.related li.right { 40 | float: right; 41 | margin-right: 5px; 42 | } 43 | 44 | /* -- sidebar --------------------------------------------------------------- */ 45 | 46 | div.sphinxsidebarwrapper { 47 | padding: 10px 5px 0 10px; 48 | } 49 | 50 | div.sphinxsidebar { 51 | float: left; 52 | width: 230px; 53 | margin-left: -100%; 54 | font-size: 90%; 55 | } 56 | 57 | div.sphinxsidebar ul { 58 | list-style: none; 59 | } 60 | 61 | div.sphinxsidebar ul ul, 62 | div.sphinxsidebar ul.want-points { 63 | margin-left: 20px; 64 | list-style: square; 65 | } 66 | 67 | div.sphinxsidebar ul ul { 68 | margin-top: 0; 69 | margin-bottom: 0; 70 | } 71 | 72 | div.sphinxsidebar form { 73 | margin-top: 10px; 74 | } 75 | 76 | div.sphinxsidebar input { 77 | border: 1px solid #98dbcc; 78 | font-family: sans-serif; 79 | font-size: 1em; 80 | } 81 | 82 | img { 83 | border: 0; 84 | } 85 | 86 | /* -- search page ----------------------------------------------------------- */ 87 | 88 | ul.search { 89 | margin: 10px 0 0 20px; 90 | padding: 0; 91 | } 92 | 93 | ul.search li { 94 | padding: 5px 0 5px 20px; 95 | background-image: url(file.png); 96 | background-repeat: no-repeat; 97 | background-position: 0 7px; 98 | } 99 | 100 | ul.search li a { 101 | font-weight: bold; 102 | } 103 | 104 | ul.search li div.context { 105 | color: #888; 106 | margin: 2px 0 0 30px; 107 | text-align: left; 108 | } 109 | 110 | ul.keywordmatches li.goodmatch a { 111 | font-weight: bold; 112 | } 113 | 114 | /* -- index page ------------------------------------------------------------ */ 115 | 116 | table.contentstable { 117 | width: 90%; 118 | } 119 | 120 | table.contentstable p.biglink { 121 | line-height: 150%; 122 | } 123 | 124 | a.biglink { 125 | font-size: 1.3em; 126 | } 127 | 128 | span.linkdescr { 129 | font-style: italic; 130 | padding-top: 5px; 131 | font-size: 90%; 132 | } 133 | 134 | /* -- general index --------------------------------------------------------- */ 135 | 136 | table.indextable { 137 | width: 100%; 138 | } 139 | 140 | table.indextable td { 141 | text-align: left; 142 | vertical-align: top; 143 | } 144 | 145 | table.indextable dl, table.indextable dd { 146 | margin-top: 0; 147 | margin-bottom: 0; 148 | } 149 | 150 | table.indextable tr.pcap { 151 | height: 10px; 152 | } 153 | 154 | table.indextable tr.cap { 155 | margin-top: 10px; 156 | background-color: #f2f2f2; 157 | } 158 | 159 | img.toggler { 160 | margin-right: 3px; 161 | margin-top: 3px; 162 | cursor: pointer; 163 | } 164 | 165 | div.modindex-jumpbox { 166 | border-top: 1px solid #ddd; 167 | border-bottom: 1px solid #ddd; 168 | margin: 1em 0 1em 0; 169 | padding: 0.4em; 170 | } 171 | 172 | div.genindex-jumpbox { 173 | border-top: 1px solid #ddd; 174 | border-bottom: 1px solid #ddd; 175 | margin: 1em 0 1em 0; 176 | padding: 0.4em; 177 | } 178 | 179 | /* -- general body styles --------------------------------------------------- */ 180 | 181 | a.headerlink { 182 | visibility: hidden; 183 | } 184 | 185 | h1:hover > a.headerlink, 186 | h2:hover > a.headerlink, 187 | h3:hover > a.headerlink, 188 | h4:hover > a.headerlink, 189 | h5:hover > a.headerlink, 190 | h6:hover > a.headerlink, 191 | dt:hover > a.headerlink { 192 | visibility: visible; 193 | } 194 | 195 | div.body p.caption { 196 | text-align: inherit; 197 | } 198 | 199 | div.body td { 200 | text-align: left; 201 | } 202 | 203 | .field-list ul { 204 | padding-left: 1em; 205 | } 206 | 207 | .first { 208 | margin-top: 0 !important; 209 | } 210 | 211 | p.rubric { 212 | margin-top: 30px; 213 | font-weight: bold; 214 | } 215 | 216 | img.align-left, .figure.align-left, object.align-left { 217 | clear: left; 218 | float: left; 219 | margin-right: 1em; 220 | } 221 | 222 | img.align-right, .figure.align-right, object.align-right { 223 | clear: right; 224 | float: right; 225 | margin-left: 1em; 226 | } 227 | 228 | img.align-center, .figure.align-center, object.align-center { 229 | display: block; 230 | margin-left: auto; 231 | margin-right: auto; 232 | } 233 | 234 | .align-left { 235 | text-align: left; 236 | } 237 | 238 | .align-center { 239 | clear: both; 240 | text-align: center; 241 | } 242 | 243 | .align-right { 244 | text-align: right; 245 | } 246 | 247 | /* -- sidebars -------------------------------------------------------------- */ 248 | 249 | div.sidebar { 250 | margin: 0 0 0.5em 1em; 251 | border: 1px solid #ddb; 252 | padding: 7px 7px 0 7px; 253 | background-color: #ffe; 254 | width: 40%; 255 | float: right; 256 | } 257 | 258 | p.sidebar-title { 259 | font-weight: bold; 260 | } 261 | 262 | /* -- topics ---------------------------------------------------------------- */ 263 | 264 | div.topic { 265 | border: 1px solid #ccc; 266 | padding: 7px 7px 0 7px; 267 | margin: 10px 0 10px 0; 268 | } 269 | 270 | p.topic-title { 271 | font-size: 1.1em; 272 | font-weight: bold; 273 | margin-top: 10px; 274 | } 275 | 276 | /* -- admonitions ----------------------------------------------------------- */ 277 | 278 | div.admonition { 279 | margin-top: 10px; 280 | margin-bottom: 10px; 281 | padding: 7px; 282 | } 283 | 284 | div.admonition dt { 285 | font-weight: bold; 286 | } 287 | 288 | div.admonition dl { 289 | margin-bottom: 0; 290 | } 291 | 292 | p.admonition-title { 293 | margin: 0px 10px 5px 0px; 294 | font-weight: bold; 295 | } 296 | 297 | div.body p.centered { 298 | text-align: center; 299 | margin-top: 25px; 300 | } 301 | 302 | /* -- tables ---------------------------------------------------------------- */ 303 | 304 | table.docutils { 305 | border: 0; 306 | border-collapse: collapse; 307 | } 308 | 309 | table.docutils td, table.docutils th { 310 | padding: 1px 8px 1px 5px; 311 | border-top: 0; 312 | border-left: 0; 313 | border-right: 0; 314 | border-bottom: 1px solid #aaa; 315 | } 316 | 317 | table.field-list td, table.field-list th { 318 | border: 0 !important; 319 | } 320 | 321 | table.footnote td, table.footnote th { 322 | border: 0 !important; 323 | } 324 | 325 | th { 326 | text-align: left; 327 | padding-right: 5px; 328 | } 329 | 330 | table.citation { 331 | border-left: solid 1px gray; 332 | margin-left: 1px; 333 | } 334 | 335 | table.citation td { 336 | border-bottom: none; 337 | } 338 | 339 | /* -- other body styles ----------------------------------------------------- */ 340 | 341 | ol.arabic { 342 | list-style: decimal; 343 | } 344 | 345 | ol.loweralpha { 346 | list-style: lower-alpha; 347 | } 348 | 349 | ol.upperalpha { 350 | list-style: upper-alpha; 351 | } 352 | 353 | ol.lowerroman { 354 | list-style: lower-roman; 355 | } 356 | 357 | ol.upperroman { 358 | list-style: upper-roman; 359 | } 360 | 361 | dl { 362 | margin-bottom: 15px; 363 | } 364 | 365 | dd p { 366 | margin-top: 0px; 367 | } 368 | 369 | dd ul, dd table { 370 | margin-bottom: 10px; 371 | } 372 | 373 | dd { 374 | margin-top: 3px; 375 | margin-bottom: 10px; 376 | margin-left: 30px; 377 | } 378 | 379 | dt:target, .highlighted { 380 | background-color: #fbe54e; 381 | } 382 | 383 | dl.glossary dt { 384 | font-weight: bold; 385 | font-size: 1.1em; 386 | } 387 | 388 | .field-list ul { 389 | margin: 0; 390 | padding-left: 1em; 391 | } 392 | 393 | .field-list p { 394 | margin: 0; 395 | } 396 | 397 | .refcount { 398 | color: #060; 399 | } 400 | 401 | .optional { 402 | font-size: 1.3em; 403 | } 404 | 405 | .versionmodified { 406 | font-style: italic; 407 | } 408 | 409 | .system-message { 410 | background-color: #fda; 411 | padding: 5px; 412 | border: 3px solid red; 413 | } 414 | 415 | .footnote:target { 416 | background-color: #ffa; 417 | } 418 | 419 | .line-block { 420 | display: block; 421 | margin-top: 1em; 422 | margin-bottom: 1em; 423 | } 424 | 425 | .line-block .line-block { 426 | margin-top: 0; 427 | margin-bottom: 0; 428 | margin-left: 1.5em; 429 | } 430 | 431 | .guilabel, .menuselection { 432 | font-family: sans-serif; 433 | } 434 | 435 | .accelerator { 436 | text-decoration: underline; 437 | } 438 | 439 | .classifier { 440 | font-style: oblique; 441 | } 442 | 443 | /* -- code displays --------------------------------------------------------- */ 444 | 445 | pre { 446 | overflow: auto; 447 | overflow-y: hidden; /* fixes display issues on Chrome browsers */ 448 | } 449 | 450 | td.linenos pre { 451 | padding: 5px 0px; 452 | border: 0; 453 | background-color: transparent; 454 | color: #aaa; 455 | } 456 | 457 | table.highlighttable { 458 | margin-left: 0.5em; 459 | } 460 | 461 | table.highlighttable td { 462 | padding: 0 0.5em 0 0.5em; 463 | } 464 | 465 | tt.descname { 466 | background-color: transparent; 467 | font-weight: bold; 468 | font-size: 1.2em; 469 | } 470 | 471 | tt.descclassname { 472 | background-color: transparent; 473 | } 474 | 475 | tt.xref, a tt { 476 | background-color: transparent; 477 | font-weight: bold; 478 | } 479 | 480 | h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { 481 | background-color: transparent; 482 | } 483 | 484 | .viewcode-link { 485 | float: right; 486 | } 487 | 488 | .viewcode-back { 489 | float: right; 490 | font-family: sans-serif; 491 | } 492 | 493 | div.viewcode-block:target { 494 | margin: -1px -10px; 495 | padding: 0 10px; 496 | } 497 | 498 | /* -- math display ---------------------------------------------------------- */ 499 | 500 | img.math { 501 | vertical-align: middle; 502 | } 503 | 504 | div.body div.math p { 505 | text-align: center; 506 | } 507 | 508 | span.eqno { 509 | float: right; 510 | } 511 | 512 | /* -- printout stylesheet --------------------------------------------------- */ 513 | 514 | @media print { 515 | div.document, 516 | div.documentwrapper, 517 | div.bodywrapper { 518 | margin: 0 !important; 519 | width: 100%; 520 | } 521 | 522 | div.sphinxsidebar, 523 | div.related, 524 | div.footer, 525 | #top-link { 526 | display: none; 527 | } 528 | } 529 | -------------------------------------------------------------------------------- /_static/default.css: -------------------------------------------------------------------------------- 1 | /* 2 | * default.css_t 3 | * ~~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- default theme. 6 | * 7 | * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | @import url("basic.css"); 13 | 14 | /* -- page layout ----------------------------------------------------------- */ 15 | 16 | body { 17 | font-family: sans-serif; 18 | font-size: 100%; 19 | background-color: #11303d; 20 | color: #000; 21 | margin: 0; 22 | padding: 0; 23 | } 24 | 25 | div.document { 26 | background-color: #1c4e63; 27 | } 28 | 29 | div.documentwrapper { 30 | float: left; 31 | width: 100%; 32 | } 33 | 34 | div.bodywrapper { 35 | margin: 0 0 0 230px; 36 | } 37 | 38 | div.body { 39 | background-color: #ffffff; 40 | color: #000000; 41 | padding: 0 20px 30px 20px; 42 | } 43 | 44 | div.footer { 45 | color: #ffffff; 46 | width: 100%; 47 | padding: 9px 0 9px 0; 48 | text-align: center; 49 | font-size: 75%; 50 | } 51 | 52 | div.footer a { 53 | color: #ffffff; 54 | text-decoration: underline; 55 | } 56 | 57 | div.related { 58 | background-color: #133f52; 59 | line-height: 30px; 60 | color: #ffffff; 61 | } 62 | 63 | div.related a { 64 | color: #ffffff; 65 | } 66 | 67 | div.sphinxsidebar { 68 | } 69 | 70 | div.sphinxsidebar h3 { 71 | font-family: 'Trebuchet MS', sans-serif; 72 | color: #ffffff; 73 | font-size: 1.4em; 74 | font-weight: normal; 75 | margin: 0; 76 | padding: 0; 77 | } 78 | 79 | div.sphinxsidebar h3 a { 80 | color: #ffffff; 81 | } 82 | 83 | div.sphinxsidebar h4 { 84 | font-family: 'Trebuchet MS', sans-serif; 85 | color: #ffffff; 86 | font-size: 1.3em; 87 | font-weight: normal; 88 | margin: 5px 0 0 0; 89 | padding: 0; 90 | } 91 | 92 | div.sphinxsidebar p { 93 | color: #ffffff; 94 | } 95 | 96 | div.sphinxsidebar p.topless { 97 | margin: 5px 10px 10px 10px; 98 | } 99 | 100 | div.sphinxsidebar ul { 101 | margin: 10px; 102 | padding: 0; 103 | color: #ffffff; 104 | } 105 | 106 | div.sphinxsidebar a { 107 | color: #98dbcc; 108 | } 109 | 110 | div.sphinxsidebar input { 111 | border: 1px solid #98dbcc; 112 | font-family: sans-serif; 113 | font-size: 1em; 114 | } 115 | 116 | 117 | 118 | /* -- hyperlink styles ------------------------------------------------------ */ 119 | 120 | a { 121 | color: #355f7c; 122 | text-decoration: none; 123 | } 124 | 125 | a:visited { 126 | color: #355f7c; 127 | text-decoration: none; 128 | } 129 | 130 | a:hover { 131 | text-decoration: underline; 132 | } 133 | 134 | 135 | 136 | /* -- body styles ----------------------------------------------------------- */ 137 | 138 | div.body h1, 139 | div.body h2, 140 | div.body h3, 141 | div.body h4, 142 | div.body h5, 143 | div.body h6 { 144 | font-family: 'Trebuchet MS', sans-serif; 145 | background-color: #f2f2f2; 146 | font-weight: normal; 147 | color: #20435c; 148 | border-bottom: 1px solid #ccc; 149 | margin: 20px -20px 10px -20px; 150 | padding: 3px 0 3px 10px; 151 | } 152 | 153 | div.body h1 { margin-top: 0; font-size: 200%; } 154 | div.body h2 { font-size: 160%; } 155 | div.body h3 { font-size: 140%; } 156 | div.body h4 { font-size: 120%; } 157 | div.body h5 { font-size: 110%; } 158 | div.body h6 { font-size: 100%; } 159 | 160 | a.headerlink { 161 | color: #c60f0f; 162 | font-size: 0.8em; 163 | padding: 0 4px 0 4px; 164 | text-decoration: none; 165 | } 166 | 167 | a.headerlink:hover { 168 | background-color: #c60f0f; 169 | color: white; 170 | } 171 | 172 | div.body p, div.body dd, div.body li { 173 | text-align: justify; 174 | line-height: 130%; 175 | } 176 | 177 | div.admonition p.admonition-title + p { 178 | display: inline; 179 | } 180 | 181 | div.admonition p { 182 | margin-bottom: 5px; 183 | } 184 | 185 | div.admonition pre { 186 | margin-bottom: 5px; 187 | } 188 | 189 | div.admonition ul, div.admonition ol { 190 | margin-bottom: 5px; 191 | } 192 | 193 | div.note { 194 | background-color: #eee; 195 | border: 1px solid #ccc; 196 | } 197 | 198 | div.seealso { 199 | background-color: #ffc; 200 | border: 1px solid #ff6; 201 | } 202 | 203 | div.topic { 204 | background-color: #eee; 205 | } 206 | 207 | div.warning { 208 | background-color: #ffe4e4; 209 | border: 1px solid #f66; 210 | } 211 | 212 | p.admonition-title { 213 | display: inline; 214 | } 215 | 216 | p.admonition-title:after { 217 | content: ":"; 218 | } 219 | 220 | pre { 221 | padding: 5px; 222 | background-color: #eeffcc; 223 | color: #333333; 224 | line-height: 120%; 225 | border: 1px solid #ac9; 226 | border-left: none; 227 | border-right: none; 228 | } 229 | 230 | tt { 231 | background-color: #ecf0f3; 232 | padding: 0 1px 0 1px; 233 | font-size: 0.95em; 234 | } 235 | 236 | th { 237 | background-color: #ede; 238 | } 239 | 240 | .warning tt { 241 | background: #efc2c2; 242 | } 243 | 244 | .note tt { 245 | background: #d6d6d6; 246 | } 247 | 248 | .viewcode-back { 249 | font-family: sans-serif; 250 | } 251 | 252 | div.viewcode-block:target { 253 | background-color: #f4debf; 254 | border-top: 1px solid #ac9; 255 | border-bottom: 1px solid #ac9; 256 | } -------------------------------------------------------------------------------- /_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Sphinx JavaScript utilties for all documentation. 6 | * 7 | * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | /** 18 | * make the code below compatible with browsers without 19 | * an installed firebug like debugger 20 | if (!window.console || !console.firebug) { 21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir", 22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", 23 | "profile", "profileEnd"]; 24 | window.console = {}; 25 | for (var i = 0; i < names.length; ++i) 26 | window.console[names[i]] = function() {}; 27 | } 28 | */ 29 | 30 | /** 31 | * small helper function to urldecode strings 32 | */ 33 | jQuery.urldecode = function(x) { 34 | return decodeURIComponent(x).replace(/\+/g, ' '); 35 | } 36 | 37 | /** 38 | * small helper function to urlencode strings 39 | */ 40 | jQuery.urlencode = encodeURIComponent; 41 | 42 | /** 43 | * This function returns the parsed url parameters of the 44 | * current request. Multiple values per key are supported, 45 | * it will always return arrays of strings for the value parts. 46 | */ 47 | jQuery.getQueryParameters = function(s) { 48 | if (typeof s == 'undefined') 49 | s = document.location.search; 50 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 51 | var result = {}; 52 | for (var i = 0; i < parts.length; i++) { 53 | var tmp = parts[i].split('=', 2); 54 | var key = jQuery.urldecode(tmp[0]); 55 | var value = jQuery.urldecode(tmp[1]); 56 | if (key in result) 57 | result[key].push(value); 58 | else 59 | result[key] = [value]; 60 | } 61 | return result; 62 | }; 63 | 64 | /** 65 | * small function to check if an array contains 66 | * a given item. 67 | */ 68 | jQuery.contains = function(arr, item) { 69 | for (var i = 0; i < arr.length; i++) { 70 | if (arr[i] == item) 71 | return true; 72 | } 73 | return false; 74 | }; 75 | 76 | /** 77 | * highlight a given string on a jquery object by wrapping it in 78 | * span elements with the given class name. 79 | */ 80 | jQuery.fn.highlightText = function(text, className) { 81 | function highlight(node) { 82 | if (node.nodeType == 3) { 83 | var val = node.nodeValue; 84 | var pos = val.toLowerCase().indexOf(text); 85 | if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { 86 | var span = document.createElement("span"); 87 | span.className = className; 88 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 89 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 90 | document.createTextNode(val.substr(pos + text.length)), 91 | node.nextSibling)); 92 | node.nodeValue = val.substr(0, pos); 93 | } 94 | } 95 | else if (!jQuery(node).is("button, select, textarea")) { 96 | jQuery.each(node.childNodes, function() { 97 | highlight(this); 98 | }); 99 | } 100 | } 101 | return this.each(function() { 102 | highlight(this); 103 | }); 104 | }; 105 | 106 | /** 107 | * Small JavaScript module for the documentation. 108 | */ 109 | var Documentation = { 110 | 111 | init : function() { 112 | this.fixFirefoxAnchorBug(); 113 | this.highlightSearchWords(); 114 | this.initIndexTable(); 115 | }, 116 | 117 | /** 118 | * i18n support 119 | */ 120 | TRANSLATIONS : {}, 121 | PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, 122 | LOCALE : 'unknown', 123 | 124 | // gettext and ngettext don't access this so that the functions 125 | // can safely bound to a different name (_ = Documentation.gettext) 126 | gettext : function(string) { 127 | var translated = Documentation.TRANSLATIONS[string]; 128 | if (typeof translated == 'undefined') 129 | return string; 130 | return (typeof translated == 'string') ? translated : translated[0]; 131 | }, 132 | 133 | ngettext : function(singular, plural, n) { 134 | var translated = Documentation.TRANSLATIONS[singular]; 135 | if (typeof translated == 'undefined') 136 | return (n == 1) ? singular : plural; 137 | return translated[Documentation.PLURALEXPR(n)]; 138 | }, 139 | 140 | addTranslations : function(catalog) { 141 | for (var key in catalog.messages) 142 | this.TRANSLATIONS[key] = catalog.messages[key]; 143 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); 144 | this.LOCALE = catalog.locale; 145 | }, 146 | 147 | /** 148 | * add context elements like header anchor links 149 | */ 150 | addContextElements : function() { 151 | $('div[id] > :header:first').each(function() { 152 | $('\u00B6'). 153 | attr('href', '#' + this.id). 154 | attr('title', _('Permalink to this headline')). 155 | appendTo(this); 156 | }); 157 | $('dt[id]').each(function() { 158 | $('\u00B6'). 159 | attr('href', '#' + this.id). 160 | attr('title', _('Permalink to this definition')). 161 | appendTo(this); 162 | }); 163 | }, 164 | 165 | /** 166 | * workaround a firefox stupidity 167 | */ 168 | fixFirefoxAnchorBug : function() { 169 | if (document.location.hash && $.browser.mozilla) 170 | window.setTimeout(function() { 171 | document.location.href += ''; 172 | }, 10); 173 | }, 174 | 175 | /** 176 | * highlight the search words provided in the url in the text 177 | */ 178 | highlightSearchWords : function() { 179 | var params = $.getQueryParameters(); 180 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; 181 | if (terms.length) { 182 | var body = $('div.body'); 183 | window.setTimeout(function() { 184 | $.each(terms, function() { 185 | body.highlightText(this.toLowerCase(), 'highlighted'); 186 | }); 187 | }, 10); 188 | $('') 190 | .appendTo($('.sidebar .this-page-menu')); 191 | } 192 | }, 193 | 194 | /** 195 | * init the domain index toggle buttons 196 | */ 197 | initIndexTable : function() { 198 | var togglers = $('img.toggler').click(function() { 199 | var src = $(this).attr('src'); 200 | var idnum = $(this).attr('id').substr(7); 201 | $('tr.cg-' + idnum).toggle(); 202 | if (src.substr(-9) == 'minus.png') 203 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); 204 | else 205 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); 206 | }).css('display', ''); 207 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { 208 | togglers.click(); 209 | } 210 | }, 211 | 212 | /** 213 | * helper function to hide the search marks again 214 | */ 215 | hideSearchWords : function() { 216 | $('.sidebar .this-page-menu li.highlight-link').fadeOut(300); 217 | $('span.highlighted').removeClass('highlighted'); 218 | }, 219 | 220 | /** 221 | * make the url absolute 222 | */ 223 | makeURL : function(relativeURL) { 224 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; 225 | }, 226 | 227 | /** 228 | * get the current relative url 229 | */ 230 | getCurrentURL : function() { 231 | var path = document.location.pathname; 232 | var parts = path.split(/\//); 233 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { 234 | if (this == '..') 235 | parts.pop(); 236 | }); 237 | var url = parts.join('/'); 238 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1); 239 | } 240 | }; 241 | 242 | // quick alias for translations 243 | _ = Documentation.gettext; 244 | 245 | $(document).ready(function() { 246 | Documentation.init(); 247 | }); 248 | -------------------------------------------------------------------------------- /_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_static/file.png -------------------------------------------------------------------------------- /_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_static/logo.png -------------------------------------------------------------------------------- /_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_static/minus.png -------------------------------------------------------------------------------- /_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/_static/plus.png -------------------------------------------------------------------------------- /_static/pygments.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #eeffcc; } 3 | .highlight .c { color: #408090; font-style: italic } /* Comment */ 4 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */ 6 | .highlight .o { color: #666666 } /* Operator */ 7 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ 8 | .highlight .cp { color: #007020 } /* Comment.Preproc */ 9 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ 10 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ 11 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 12 | .highlight .ge { font-style: italic } /* Generic.Emph */ 13 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 14 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 15 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 16 | .highlight .go { color: #303030 } /* Generic.Output */ 17 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ 18 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 19 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 20 | .highlight .gt { color: #0040D0 } /* Generic.Traceback */ 21 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ 22 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ 23 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ 24 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */ 25 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ 26 | .highlight .kt { color: #902000 } /* Keyword.Type */ 27 | .highlight .m { color: #208050 } /* Literal.Number */ 28 | .highlight .s { color: #4070a0 } /* Literal.String */ 29 | .highlight .na { color: #4070a0 } /* Name.Attribute */ 30 | .highlight .nb { color: #007020 } /* Name.Builtin */ 31 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ 32 | .highlight .no { color: #60add5 } /* Name.Constant */ 33 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ 34 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ 35 | .highlight .ne { color: #007020 } /* Name.Exception */ 36 | .highlight .nf { color: #06287e } /* Name.Function */ 37 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ 38 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ 39 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ 40 | .highlight .nv { color: #bb60d5 } /* Name.Variable */ 41 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ 42 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 43 | .highlight .mf { color: #208050 } /* Literal.Number.Float */ 44 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */ 45 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */ 46 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */ 47 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ 48 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */ 49 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ 50 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ 51 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ 52 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ 53 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ 54 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */ 55 | .highlight .sr { color: #235388 } /* Literal.String.Regex */ 56 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ 57 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */ 58 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ 59 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ 60 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ 61 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ 62 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /_static/sidebar.js: -------------------------------------------------------------------------------- 1 | /* 2 | * sidebar.js 3 | * ~~~~~~~~~~ 4 | * 5 | * This script makes the Sphinx sidebar collapsible. 6 | * 7 | * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds 8 | * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton 9 | * used to collapse and expand the sidebar. 10 | * 11 | * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden 12 | * and the width of the sidebar and the margin-left of the document 13 | * are decreased. When the sidebar is expanded the opposite happens. 14 | * This script saves a per-browser/per-session cookie used to 15 | * remember the position of the sidebar among the pages. 16 | * Once the browser is closed the cookie is deleted and the position 17 | * reset to the default (expanded). 18 | * 19 | * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. 20 | * :license: BSD, see LICENSE for details. 21 | * 22 | */ 23 | 24 | $(function() { 25 | // global elements used by the functions. 26 | // the 'sidebarbutton' element is defined as global after its 27 | // creation, in the add_sidebar_button function 28 | var bodywrapper = $('.bodywrapper'); 29 | var sidebar = $('.sphinxsidebar'); 30 | var sidebarwrapper = $('.sphinxsidebarwrapper'); 31 | 32 | // original margin-left of the bodywrapper and width of the sidebar 33 | // with the sidebar expanded 34 | var bw_margin_expanded = bodywrapper.css('margin-left'); 35 | var ssb_width_expanded = sidebar.width(); 36 | 37 | // margin-left of the bodywrapper and width of the sidebar 38 | // with the sidebar collapsed 39 | var bw_margin_collapsed = '.8em'; 40 | var ssb_width_collapsed = '.8em'; 41 | 42 | // colors used by the current theme 43 | var dark_color = $('.related').css('background-color'); 44 | var light_color = $('.document').css('background-color'); 45 | 46 | function sidebar_is_collapsed() { 47 | return sidebarwrapper.is(':not(:visible)'); 48 | } 49 | 50 | function toggle_sidebar() { 51 | if (sidebar_is_collapsed()) 52 | expand_sidebar(); 53 | else 54 | collapse_sidebar(); 55 | } 56 | 57 | function collapse_sidebar() { 58 | sidebarwrapper.hide(); 59 | sidebar.css('width', ssb_width_collapsed); 60 | bodywrapper.css('margin-left', bw_margin_collapsed); 61 | sidebarbutton.css({ 62 | 'margin-left': '0', 63 | 'height': bodywrapper.height() 64 | }); 65 | sidebarbutton.find('span').text('»'); 66 | sidebarbutton.attr('title', _('Expand sidebar')); 67 | document.cookie = 'sidebar=collapsed'; 68 | } 69 | 70 | function expand_sidebar() { 71 | bodywrapper.css('margin-left', bw_margin_expanded); 72 | sidebar.css('width', ssb_width_expanded); 73 | sidebarwrapper.show(); 74 | sidebarbutton.css({ 75 | 'margin-left': ssb_width_expanded-12, 76 | 'height': bodywrapper.height() 77 | }); 78 | sidebarbutton.find('span').text('«'); 79 | sidebarbutton.attr('title', _('Collapse sidebar')); 80 | document.cookie = 'sidebar=expanded'; 81 | } 82 | 83 | function add_sidebar_button() { 84 | sidebarwrapper.css({ 85 | 'float': 'left', 86 | 'margin-right': '0', 87 | 'width': ssb_width_expanded - 28 88 | }); 89 | // create the button 90 | sidebar.append( 91 | '
«
' 92 | ); 93 | var sidebarbutton = $('#sidebarbutton'); 94 | light_color = sidebarbutton.css('background-color'); 95 | // find the height of the viewport to center the '<<' in the page 96 | var viewport_height; 97 | if (window.innerHeight) 98 | viewport_height = window.innerHeight; 99 | else 100 | viewport_height = $(window).height(); 101 | sidebarbutton.find('span').css({ 102 | 'display': 'block', 103 | 'margin-top': (viewport_height - sidebar.position().top - 20) / 2 104 | }); 105 | 106 | sidebarbutton.click(toggle_sidebar); 107 | sidebarbutton.attr('title', _('Collapse sidebar')); 108 | sidebarbutton.css({ 109 | 'color': '#FFFFFF', 110 | 'border-left': '1px solid ' + dark_color, 111 | 'font-size': '1.2em', 112 | 'cursor': 'pointer', 113 | 'height': bodywrapper.height(), 114 | 'padding-top': '1px', 115 | 'margin-left': ssb_width_expanded - 12 116 | }); 117 | 118 | sidebarbutton.hover( 119 | function () { 120 | $(this).css('background-color', dark_color); 121 | }, 122 | function () { 123 | $(this).css('background-color', light_color); 124 | } 125 | ); 126 | } 127 | 128 | function set_position_from_cookie() { 129 | if (!document.cookie) 130 | return; 131 | var items = document.cookie.split(';'); 132 | for(var k=0; k=e.computed&&(e={value:f,computed:g})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);var e={computed:Infinity};b.each(a,function(f,g,h){g=c?c.call(d,f,g,h):f;gf?1:0}),"value")};b.sortedIndex=function(a,c,d){d=d||b.identity;for(var e=0,f=a.length;e>1;d(a[g])=0})})};b.zip=function(){for(var a=b.toArray(arguments),c=b.max(b.pluck(a,"length")),d=new Array(c),e=0;e0?f-c:c-f)>=0)return e;e[g++]=f}};b.bind=function(a,c){var d=b.rest(arguments,2);return function(){return a.apply(c||j,d.concat(b.toArray(arguments)))}};b.bindAll=function(a){var c=b.rest(arguments);if(c.length==0)c=b.functions(a);b.each(c,function(d){a[d]=b.bind(a[d],a)}); 10 | return a};b.delay=function(a,c){var d=b.rest(arguments,2);return setTimeout(function(){return a.apply(a,d)},c)};b.defer=function(a){return b.delay.apply(b,[a,1].concat(b.rest(arguments)))};b.wrap=function(a,c){return function(){var d=[a].concat(b.toArray(arguments));return c.apply(c,d)}};b.compose=function(){var a=b.toArray(arguments);return function(){for(var c=b.toArray(arguments),d=a.length-1;d>=0;d--)c=[a[d].apply(this,c)];return c[0]}};b.keys=function(a){if(b.isArray(a))return b.range(0,a.length); 11 | var c=[];for(var d in a)q.call(a,d)&&c.push(d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=function(a){return b.select(b.keys(a),function(c){return b.isFunction(a[c])}).sort()};b.extend=function(a,c){for(var d in c)a[d]=c[d];return a};b.clone=function(a){if(b.isArray(a))return a.slice(0);return b.extend({},a)};b.tap=function(a,c){c(a);return a};b.isEqual=function(a,c){if(a===c)return true;var d=typeof a;if(d!=typeof c)return false;if(a==c)return true;if(!a&&c||a&&!c)return false; 12 | if(a.isEqual)return a.isEqual(c);if(b.isDate(a)&&b.isDate(c))return a.getTime()===c.getTime();if(b.isNaN(a)&&b.isNaN(c))return true;if(b.isRegExp(a)&&b.isRegExp(c))return a.source===c.source&&a.global===c.global&&a.ignoreCase===c.ignoreCase&&a.multiline===c.multiline;if(d!=="object")return false;if(a.length&&a.length!==c.length)return false;d=b.keys(a);var e=b.keys(c);if(d.length!=e.length)return false;for(var f in a)if(!b.isEqual(a[f],c[f]))return false;return true};b.isEmpty=function(a){return b.keys(a).length== 13 | 0};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=function(a){return!!(a&&a.concat&&a.unshift)};b.isArguments=function(a){return a&&b.isNumber(a.length)&&!b.isArray(a)&&!r.call(a,"length")};b.isFunction=function(a){return!!(a&&a.constructor&&a.call&&a.apply)};b.isString=function(a){return!!(a===""||a&&a.charCodeAt&&a.substr)};b.isNumber=function(a){return p.call(a)==="[object Number]"};b.isDate=function(a){return!!(a&&a.getTimezoneOffset&&a.setUTCFullYear)};b.isRegExp=function(a){return!!(a&& 14 | a.test&&a.exec&&(a.ignoreCase||a.ignoreCase===false))};b.isNaN=function(a){return b.isNumber(a)&&isNaN(a)};b.isNull=function(a){return a===null};b.isUndefined=function(a){return typeof a=="undefined"};b.noConflict=function(){j._=n;return this};b.identity=function(a){return a};b.breakLoop=function(){throw m;};var s=0;b.uniqueId=function(a){var c=s++;return a?a+c:c};b.template=function(a,c){a=new Function("obj","var p=[],print=function(){p.push.apply(p,arguments);};with(obj){p.push('"+a.replace(/[\r\t\n]/g, 15 | " ").replace(/'(?=[^%]*%>)/g,"\t").split("'").join("\\'").split("\t").join("'").replace(/<%=(.+?)%>/g,"',$1,'").split("<%").join("');").split("%>").join("p.push('")+"');}return p.join('');");return c?a(c):a};b.forEach=b.each;b.foldl=b.inject=b.reduce;b.foldr=b.reduceRight;b.filter=b.select;b.every=b.all;b.some=b.any;b.head=b.first;b.tail=b.rest;b.methods=b.functions;var l=function(a,c){return c?b(a).chain():a};b.each(b.functions(b),function(a){var c=b[a];i.prototype[a]=function(){var d=b.toArray(arguments); 16 | o.call(d,this._wrapped);return l(c.apply(b,d),this._chain)}});b.each(["pop","push","reverse","shift","sort","splice","unshift"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){c.apply(this._wrapped,arguments);return l(this._wrapped,this._chain)}});b.each(["concat","join","slice"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){return l(c.apply(this._wrapped,arguments),this._chain)}});i.prototype.chain=function(){this._chain=true;return this};i.prototype.value=function(){return this._wrapped}})(); 17 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " singlehtml to make a single large HTML file" 22 | @echo " pickle to make pickle files" 23 | @echo " json to make JSON files" 24 | @echo " htmlhelp to make HTML files and a HTML help project" 25 | @echo " qthelp to make HTML files and a qthelp project" 26 | @echo " devhelp to make HTML files and a Devhelp project" 27 | @echo " epub to make an epub" 28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 29 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 30 | @echo " text to make text files" 31 | @echo " man to make manual pages" 32 | @echo " changes to make an overview of all changed/added/deprecated items" 33 | @echo " linkcheck to check all external links for integrity" 34 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 35 | 36 | clean: 37 | -rm -rf $(BUILDDIR)/* 38 | 39 | html: 40 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 41 | @echo 42 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 43 | 44 | dirhtml: 45 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 48 | 49 | singlehtml: 50 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 51 | @echo 52 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 53 | 54 | pickle: 55 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 56 | @echo 57 | @echo "Build finished; now you can process the pickle files." 58 | 59 | json: 60 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 61 | @echo 62 | @echo "Build finished; now you can process the JSON files." 63 | 64 | htmlhelp: 65 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 66 | @echo 67 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 68 | ".hhp project file in $(BUILDDIR)/htmlhelp." 69 | 70 | qthelp: 71 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 72 | @echo 73 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 74 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 75 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/resanalysis.qhcp" 76 | @echo "To view the help file:" 77 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/resanalysis.qhc" 78 | 79 | devhelp: 80 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 81 | @echo 82 | @echo "Build finished." 83 | @echo "To view the help file:" 84 | @echo "# mkdir -p $$HOME/.local/share/devhelp/resanalysis" 85 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/resanalysis" 86 | @echo "# devhelp" 87 | 88 | epub: 89 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 90 | @echo 91 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 92 | 93 | latex: 94 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 95 | @echo 96 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 97 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 98 | "(use \`make latexpdf' here to do that automatically)." 99 | 100 | latexpdf: 101 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 102 | @echo "Running LaTeX files through pdflatex..." 103 | make -C $(BUILDDIR)/latex all-pdf 104 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 105 | 106 | text: 107 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 108 | @echo 109 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 110 | 111 | man: 112 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 113 | @echo 114 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 115 | 116 | changes: 117 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 118 | @echo 119 | @echo "The overview file is in $(BUILDDIR)/changes." 120 | 121 | linkcheck: 122 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 123 | @echo 124 | @echo "Link check complete; look for any errors in the above output " \ 125 | "or in $(BUILDDIR)/linkcheck/output.txt." 126 | 127 | doctest: 128 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 129 | @echo "Testing of doctests in the sources finished, look at the " \ 130 | "results in $(BUILDDIR)/doctest/output.txt." 131 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # res.analysis documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Dec 22 16:29:57 2010. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | #sys.path.insert(0, os.path.abspath('.')) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # If your documentation needs a minimal Sphinx version, state it here. 24 | #needs_sphinx = '1.0' 25 | 26 | # Add any Sphinx extension module names here, as strings. They can be extensions 27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 | extensions = ['matplotlib.sphinxext.only_directives', 'matplotlib.sphinxext.plot_directive', 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode','sphinx.ext.inheritance_diagram'] 29 | 30 | # Add any paths that contain templates here, relative to this directory. 31 | templates_path = ['_templates'] 32 | 33 | # The suffix of source filenames. 34 | source_suffix = '.txt' 35 | 36 | # The encoding of source files. 37 | #source_encoding = 'utf-8-sig' 38 | 39 | # The master toctree document. 40 | master_doc = 'index' 41 | 42 | # General information about the project. 43 | project = u'res.analysis' 44 | copyright = u'2010, WhiteMatter Labs GmbH' 45 | 46 | # The version info for the project you're documenting, acts as replacement for 47 | # |version| and |release|, also used in various other places throughout the 48 | # built documents. 49 | # 50 | # The short X.Y version. 51 | version = '0.01' 52 | # The full version, including alpha/beta/rc tags. 53 | release = '0.01' 54 | 55 | # The language for content autogenerated by Sphinx. Refer to documentation 56 | # for a list of supported languages. 57 | #language = None 58 | 59 | # There are two options for replacing |today|: either, you set today to some 60 | # non-false value, then it is used: 61 | #today = '' 62 | # Else, today_fmt is used as the format for a strftime call. 63 | #today_fmt = '%B %d, %Y' 64 | 65 | # List of patterns, relative to source directory, that match files and 66 | # directories to ignore when looking for source files. 67 | exclude_patterns = ['_build'] 68 | 69 | # The reST default role (used for this markup: `text`) to use for all documents. 70 | #default_role = None 71 | 72 | # If true, '()' will be appended to :func: etc. cross-reference text. 73 | #add_function_parentheses = True 74 | 75 | # If true, the current module name will be prepended to all description 76 | # unit titles (such as .. function::). 77 | #add_module_names = True 78 | 79 | # If true, sectionauthor and moduleauthor directives will be shown in the 80 | # output. They are ignored by default. 81 | #show_authors = False 82 | 83 | # The name of the Pygments (syntax highlighting) style to use. 84 | pygments_style = 'sphinx' 85 | 86 | # A list of ignored prefixes for module index sorting. 87 | #modindex_common_prefix = [] 88 | 89 | 90 | # -- Options for HTML output --------------------------------------------------- 91 | 92 | # The theme to use for HTML and HTML Help pages. See the documentation for 93 | # a list of builtin themes. 94 | html_theme = 'default' 95 | 96 | # Theme options are theme-specific and customize the look and feel of a theme 97 | # further. For a list of options available for each theme, see the 98 | # documentation. 99 | #html_theme_options = {} 100 | 101 | # Add any paths that contain custom themes here, relative to this directory. 102 | #html_theme_path = [] 103 | 104 | # The name for this set of Sphinx documents. If None, it defaults to 105 | # " v documentation". 106 | #html_title = None 107 | 108 | # A shorter title for the navigation bar. Default is the same as html_title. 109 | #html_short_title = None 110 | 111 | # The name of an image file (relative to this directory) to place at the top 112 | # of the sidebar. 113 | html_logo = 'logo.png' 114 | 115 | # The name of an image file (within the static path) to use as favicon of the 116 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 117 | # pixels large. 118 | #html_favicon = None 119 | 120 | # Add any paths that contain custom static files (such as style sheets) here, 121 | # relative to this directory. They are copied after the builtin static files, 122 | # so a file named "default.css" will overwrite the builtin "default.css". 123 | html_static_path = ['_static'] 124 | 125 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 126 | # using the given strftime format. 127 | #html_last_updated_fmt = '%b %d, %Y' 128 | 129 | # If true, SmartyPants will be used to convert quotes and dashes to 130 | # typographically correct entities. 131 | #html_use_smartypants = True 132 | 133 | # Custom sidebar templates, maps document names to template names. 134 | #html_sidebars = {} 135 | 136 | # Additional templates that should be rendered to pages, maps page names to 137 | # template names. 138 | #html_additional_pages = {} 139 | 140 | # If false, no module index is generated. 141 | #html_domain_indices = True 142 | 143 | # If false, no index is generated. 144 | #html_use_index = True 145 | 146 | # If true, the index is split into individual pages for each letter. 147 | #html_split_index = False 148 | 149 | # If true, links to the reST sources are added to the pages. 150 | #html_show_sourcelink = True 151 | 152 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 153 | #html_show_sphinx = True 154 | 155 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 156 | #html_show_copyright = True 157 | 158 | # If true, an OpenSearch description file will be output, and all pages will 159 | # contain a tag referring to it. The value of this option must be the 160 | # base URL from which the finished HTML is served. 161 | #html_use_opensearch = '' 162 | 163 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 164 | #html_file_suffix = None 165 | 166 | # Output file base name for HTML help builder. 167 | htmlhelp_basename = 'ocupydoc' 168 | 169 | 170 | # -- Options for LaTeX output -------------------------------------------------- 171 | 172 | # The paper size ('letter' or 'a4'). 173 | #latex_paper_size = 'letter' 174 | 175 | # The font size ('10pt', '11pt' or '12pt'). 176 | #latex_font_size = '10pt' 177 | 178 | # Grouping the document tree into LaTeX files. List of tuples 179 | # (source start file, target name, title, author, documentclass [howto/manual]). 180 | latex_documents = [ 181 | ('index', 'ocupy.tex', u'Ocupy Documentation', 182 | u'Niklas Wilming, Torsten Betz, Hannah Knepper', 'manual'), 183 | ] 184 | 185 | # The name of an image file (relative to this directory) to place at the top of 186 | # the title page. 187 | latex_logo = 'logo.png' 188 | 189 | # For "manual" documents, if this is true, then toplevel headings are parts, 190 | # not chapters. 191 | #latex_use_parts = False 192 | 193 | # If true, show page references after internal links. 194 | #latex_show_pagerefs = False 195 | 196 | # If true, show URL addresses after external links. 197 | #latex_show_urls = False 198 | 199 | # Additional stuff for the LaTeX preamble. 200 | #latex_preamble = '' 201 | 202 | # Documents to append as an appendix to all manuals. 203 | #latex_appendices = [] 204 | 205 | # If false, no module index is generated. 206 | #latex_domain_indices = True 207 | 208 | 209 | # -- Options for manual page output -------------------------------------------- 210 | 211 | # One entry per manual page. List of tuples 212 | # (source start file, name, description, authors, manual section). 213 | man_pages = [ 214 | ('index', 'ocupy', u'Ocupy Documentation', 215 | [u'Niklas Wilming, Torsten Betz, Hannah Knepper'], 1) 216 | ] 217 | -------------------------------------------------------------------------------- /doc/datamat.txt: -------------------------------------------------------------------------------- 1 | DataMat 2 | ======= 3 | 4 | .. module:: ocupy.datamat 5 | 6 | This module contains a data structure, the datamat, that allows convenient analysis of 7 | event based data. Event-based consits of discrete events that have other information 8 | associated with it. For example in eye-tracking analysis (where ocupy comes from), 9 | fixations might be considered events. Each fixation has associated information such as 10 | the duration of the fixation, which observer made the fixation, where it was etc. 11 | 12 | A datamat groups many events into one structure and provides an easy interface 13 | to access and filter the information. 14 | 15 | This document describes the most important properties of datamats in three steps. 16 | First, it explains how datamats can be created from existing data, second it shows 17 | some typical examples of how the datamat can be used and third it shows how a fixation 18 | density map can be computed. 19 | 20 | 21 | 22 | Generating a datamat 23 | ------------------- 24 | .. Note:: Currently datamats can not be created 25 | from raw EDF files by just using python. This means, you have to use 26 | matlab and edfread to convert the raw data into a matlab datamat. Sorry. 27 | 28 | There are several ways to create a datamat: 29 | 1. Convert a dictionary of key:ndarray pairs with the VectorFactory 30 | 2. Use the AccumulatorFactory to add key:value dictionaries to create a datamat. 31 | 3. The DatamatAccumulator collects datamats and joins them speedily into a datamat. 32 | 4. Load a single matlab datamat file from disk (datamat.FixmatFactory) 33 | 5. Concatenate all datamats in a directory (datamat.DirectoryFixmatFactory 34 | 35 | The VectorFactory 36 | ^^^^^^^^^^^^^^^^^ 37 | 38 | The VectorFactory creates a datamat from a dictionary that contains numpy.ndarrays 39 | as fields. 40 | 41 | >>> from ocupy.datamat import VectorFactory 42 | >>> fields = {'duration':[145,323,243,123,231], 'image':[1,2,3,4,5]} 43 | >>> parameters = {'num_images':5} 44 | >>> dm = VectorFactory(fields, parameters) 45 | 46 | The AccumulatorFactory 47 | ^^^^^^^^^^^^^^^^^^^^^^ 48 | 49 | The accumulator factory accumulates single events and concatenates these into a 50 | datamat. Events are represented by dictionaries that whose keys will be the 51 | fieldnames and the values are stored in the fields of the datamat. Missing 52 | information is encoded as np.nan. 53 | 54 | >>> from ocupy.datamat import AccumulatorFactory 55 | >>> acc = AccumulatorFactory() 56 | >>> for _ in range(100): 57 | fields = {'duration':2, 'image':} 58 | acc.update(fields) 59 | >>> acc.get_dm(params = {'num_images':5}) # Returns the datamat 60 | 61 | 62 | The DatamatAccumulator 63 | ^^^^^^^^^^^^^^^^^^^^^^ 64 | 65 | The datamat accumulator is very similar to the AccumulatorFactory but takes 66 | datamats as arguments. The main difference to the datamata.join() function 67 | that this factory allocates memory for all joins before it starts joining. 68 | This is much more efficient since it avoids unnecessary copying of memory. 69 | 70 | 71 | Using the FixmatFactory 72 | ^^^^^^^^^^^^^^^^^^^^^^^ 73 | 74 | This factory loads a matlab struct and converts it into a datamat. It is located 75 | in the fixmat module. 76 | 77 | The matlab struct needs to be contain fields with the same length for this to work. 78 | The var_name argument specifies which variable to load from the mat file. 79 | 80 | >>> from ocupy.fixmat import FixmatFactory 81 | >>> fm = FixmatFactory('../ocupy/tests/datamat_demo.mat', var_name = 'fixmat') 82 | 83 | .. autofunction:: FixmatFactory 84 | 85 | Using the DirectoryFixmatFactory 86 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 87 | 88 | The DirectoryFixmatFactory loads the specified variabel from all \*.mat files 89 | in a directory (assuming they're datamats) and concatenates these into on big datamat. 90 | 91 | >>> from ocupy.fixmat import DirectoryFixmatFactory 92 | >>> fm = FixmatFactory('demo/', var_name='fixmat') 93 | 94 | You can also use globbing to select which files you want: 95 | 96 | >>> fm = FixmatFactory('demo/', 'fix*.mat') 97 | 98 | .. autofunction:: DirectoryFixmatFactory 99 | 100 | 101 | Working with datamats 102 | -------------------- 103 | 104 | Let's go through the process step-by-step with a demo datamat that is placed in 105 | *ocupy/tests/fixmat_demo.mat*:: 106 | 107 | >>> from ocupy import fixmat 108 | >>> fm = fixmat.FixmatFactory('../ocupy/tests/fixmat_demo.mat') 109 | >>> print fm 110 | Fixmat with 2092 fixations and the following data fields: 111 | Field Name | Length | Type | Values 112 | ---------------------+---------------+------------+---------------- 113 | category | 2092 | int32 | [7 8] 114 | end | 2092 | int32 | Many 115 | fix | 2092 | uint8 | Many 116 | SUBJECTINDEX | 2092 | uint8 | [36] 117 | start | 2092 | int32 | Many 118 | on_image | 2092 | uint8 | [0 1] 119 | y | 2092 | float32 | Many 120 | x | 2092 | float32 | Many 121 | filenumber | 2092 | uint16 | Many 122 | condition | 2092 | uint8 | [1] 123 | ---------------------+---------------+------------+---------------- 124 | Parameter Name | Value 125 | ---------------------+--------------------------------------------- 126 | image_size | [960, 1280] 127 | pixels_per_degree | 45 128 | 129 | This loads the datamat and prints a pretty table summarizing the contents of the 130 | datamat. Please notice the distinction between *parameters* and *fields*. Both 131 | can be accessed by using the dot notation:: 132 | 133 | >>> fm.x[1:10] 134 | array([ 945.12036133, 582.62030029, 559.28320312, 932.50823975, 135 | 965.1854248 , 925.03417969, 1216.84509277, 810.5010376 , 136 | 525.40301514], dtype=float32) 137 | >>> fm.image_size 138 | [960, 1280] 139 | 140 | The next example shows how you would compute an FDM of the entire datamat 141 | (a.k.a. the spatial bias). 142 | 143 | .. plot:: 144 | :include-source: 145 | 146 | import numpy as np 147 | from ocupy import fixmat 148 | import pylab as plot 149 | fm = fixmat.FixmatFactory('../ocupy/tests/datamat_demo.mat') 150 | fdm = fixmat.compute_fdm(fm,scale_factor=0.25) 151 | plot.imshow(fdm) 152 | plot.show() 153 | 154 | Most often only a subset of the data is of interest. The datamat can be filtered by 155 | using square brackets: 156 | 157 | >>> fm = fm[fm.category == 7] 158 | >>> print fm 159 | Fixmat with 932 fixations and the following data fields: 160 | Field Name | Length | Type | Values 161 | ---------------------+---------------+------------+---------------- 162 | category | 932 | int32 | [7] 163 | end | 932 | int32 | Many 164 | fix | 932 | uint8 | Many 165 | SUBJECTINDEX | 932 | uint8 | [36] 166 | start | 932 | int32 | Many 167 | on_image | 932 | uint8 | [0 1] 168 | y | 932 | float32 | Many 169 | x | 932 | float32 | Many 170 | filenumber | 932 | uint16 | Many 171 | condition | 932 | uint8 | [1] 172 | ---------------------+---------------+------------+---------------- 173 | Parameter Name | Value 174 | ---------------------+--------------------------------------------- 175 | image_size | [960, 1280] 176 | pixels_per_degree | 45 177 | 178 | 179 | How does this work? The expression in the brackets is evaluated for every entry in 180 | the *fm.category* field. This results in a vector of booleans that has the same size 181 | as the *fm.category* field. Using the square brackets applies this logical index to all 182 | fields of the datamat. Several logical expressions can be combined with logical operators. 183 | In contrast to matlab the *logical* operators have stronger binding than the *comparison* 184 | operators. Thus, *fm.category == 1 & fm.SUBJECTINDEX == 2* will produce an error because 185 | python will try to bind *1 & fm.SUBJECTINDEX*. To filter correctly, you have to use 186 | parenthesis (see next example). 187 | 188 | .. plot:: 189 | :include-source: 190 | 191 | import numpy as np 192 | from ocupy import fixmat 193 | import pylab as plot 194 | fm = fixmat.FixmatFactory('../ocupy/test/datamat_demo.mat') 195 | fm = fm[(fm.filenumber == 1) & (fm.category == 7)] 196 | fdm = fixmat.compute_fdm(fm,scale_factor=0.25) 197 | plot.imshow(fdm) 198 | plot.show() 199 | 200 | In matlab we would traditionally use for loops over all unique values of a field 201 | to iterate over the field. In python this is easier, we can simply use the :func:`by_field` 202 | method:: 203 | 204 | fm = TestFixmatFactory(categories = [1, 2, 3], 205 | filenumbers = [1, 2, 3], subjectindices = [1, 2, 3]) 206 | for cat_mat in fm.by_field('category'): 207 | for img_mat in cat_mat.by_field('filenumber'): 208 | # The img_mat now contains all fixation data for one image 209 | # in one cagegory 210 | pass 211 | 212 | There are some other usefull functions (:func:`add_field`, :func:`join`, :func:`parameters` and 213 | :func:`fieldnames`). See the following reference section for more details. 214 | 215 | 216 | .. autoclass:: DataMat 217 | :members: 218 | 219 | Computing a fixation density map 220 | --------------------------------- 221 | 222 | 223 | .. plot:: 224 | :include-source: 225 | 226 | import numpy as np 227 | from ocupy import fixmat 228 | import pylab as plot 229 | points = np.random.random((2,100))*500 230 | fm = fixmat.TestFixmatFactory(points = points, params = {'image_size' : [500,500]}) 231 | fdm = fixmat.compute_fdm(fm) 232 | plot.imshow(fdm) 233 | plot.show() 234 | .. autofunction:: compute_fdm 235 | 236 | -------------------------------------------------------------------------------- /doc/fixmat.txt: -------------------------------------------------------------------------------- 1 | FixMat 2 | ====== 3 | 4 | .. module:: ocupy.fixmat 5 | 6 | A fixmat is a subclassed datamat that mainly contains some legacy code 7 | to maintain compatibility. Perhaps the only really usefull function in here 8 | is the compute_fdm function. 9 | 10 | The next example shows how you would compute an FDM of the entire fixmat 11 | (a.k.a. the spatial bias). 12 | 13 | .. plot:: 14 | :include-source: 15 | 16 | import numpy as np 17 | from ocupy import fixmat 18 | import pylab as plot 19 | fm = fixmat.FixmatFactory('../ocupy/tests/fixmat_demo.mat') 20 | fdm = fixmat.compute_fdm(fm,scale_factor=0.25) 21 | plot.imshow(fdm) 22 | plot.show() 23 | 24 | Most often only a subset of the data is of interest. The fixmat can be filtered by using square brackets: 25 | 26 | >>> fm = fm[fm.category == 7] 27 | >>> print fm 28 | Fixmat with 932 fixations and the following data fields: 29 | Field Name | Length | Type | Values 30 | ---------------------+---------------+------------+---------------- 31 | category | 932 | int32 | [7] 32 | end | 932 | int32 | Many 33 | fix | 932 | uint8 | Many 34 | SUBJECTINDEX | 932 | uint8 | [36] 35 | start | 932 | int32 | Many 36 | on_image | 932 | uint8 | [0 1] 37 | y | 932 | float32 | Many 38 | x | 932 | float32 | Many 39 | filenumber | 932 | uint16 | Many 40 | condition | 932 | uint8 | [1] 41 | ---------------------+---------------+------------+---------------- 42 | Parameter Name | Value 43 | ---------------------+--------------------------------------------- 44 | image_size | [960, 1280] 45 | pixels_per_degree | 45 46 | 47 | 48 | How does this work? The expression in the brackets is evaluated for every entry in 49 | the *fm.category* field. This results in a vector of booleans that has the same size 50 | as the *fm.category* field. Using the square brackets applies this logical index to all 51 | fields of the fixmat. Several logical expressions can be combined with logical operators. 52 | In contrast to matlab the *logical* operators have stronger binding than the *comparison* 53 | operators. Thus, *fm.category == 1 & fm.SUBJECTINDEX == 2* will produce an error because 54 | will try to bind *1 & fm.SUBJECTINDEX*. To filter correctly, you have to use parenthesis 55 | (see next example). 56 | 57 | .. plot:: 58 | :include-source: 59 | 60 | import numpy as np 61 | from ocupy import fixmat 62 | import pylab as plot 63 | fm = fixmat.FixmatFactory('../ocupy/test/fixmat_demo.mat') 64 | fm = fm[(fm.filenumber == 1) & (fm.category == 7)] 65 | fdm = fixmat.compute_fdm(fm,scale_factor=0.25) 66 | plot.imshow(fdm) 67 | plot.show() 68 | 69 | In matlab we would traditionally use for loops over all unique values of a field 70 | to iterate over the field. In python this is easier, we can simply use the :func:`by_field` 71 | method:: 72 | 73 | fm = TestFixmatFactory(categories = [1, 2, 3], filenumbers = [1, 2, 3], subjectindices = [1, 2, 3]) 74 | for cat_mat in fm.by_field('category'): 75 | for img_mat in cat_mat.by_field('filenumber'): 76 | # The img_mat now contains all fixation data for one image 77 | # in one cagegory 78 | pass 79 | 80 | There is a short-cut for iterating over categories and images (because it is so common): 81 | 82 | .. plot:: 83 | :include-source: 84 | 85 | import numpy as np 86 | from ocupy import fixmat 87 | import pylab as plot 88 | fm = fixmat.FixmatFactory('../ocupy/tests/fixmat_demo.mat') 89 | fm = fm[(fm.filenumber > 36) & (fm.filenumber < 40)] 90 | for cat,(cat_mat,_) in enumerate(fm.by_cat()): 91 | for img,(img_mat,_) in enumerate(cat_mat.by_filenumber()): 92 | fdm = fixmat.compute_fdm(img_mat,scale_factor=0.25) 93 | plot.subplot(2,3, (3*cat)+img) 94 | plot.xticks([]) 95 | plot.yticks([]) 96 | plot.imshow(fdm) 97 | plot.show() 98 | 99 | There are some other usefull functions (:func:`add_field`, :func:`join`, :func:`parameters` and 100 | :func:`fieldnames`). See the following reference section for more details. 101 | 102 | 103 | .. autoclass:: FixMat 104 | :members: 105 | 106 | Computing a fixation density map 107 | --------------------------------- 108 | 109 | 110 | .. plot:: 111 | :include-source: 112 | 113 | import numpy as np 114 | from ocupy import fixmat 115 | import pylab as plot 116 | points = np.random.random((2,100))*500 117 | fm = fixmat.TestFixmatFactory(points = points, params = {'image_size' : [500,500]}) 118 | fdm = fixmat.compute_fdm(fm) 119 | plot.imshow(fdm) 120 | plot.show() 121 | .. autofunction:: compute_fdm 122 | 123 | -------------------------------------------------------------------------------- /doc/index.txt: -------------------------------------------------------------------------------- 1 | .. ocupy documentation master file, created by 2 | sphinx-quickstart on Wed Dec 22 16:29:57 2010. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to ocupy's documentation! 7 | ======================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | datamat.txt 15 | fixmat.txt 16 | stimuli.txt 17 | loader.txt 18 | measures.txt 19 | bounds.txt 20 | 21 | Indices and tables 22 | ================== 23 | 24 | * :ref:`genindex` 25 | * :ref:`modindex` 26 | * :ref:`search` 27 | 28 | -------------------------------------------------------------------------------- /doc/loader.txt: -------------------------------------------------------------------------------- 1 | Loader 2 | ====== 3 | 4 | .. automodule:: ocupy.loader 5 | :members: 6 | 7 | 8 | -------------------------------------------------------------------------------- /doc/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/doc/logo.png -------------------------------------------------------------------------------- /doc/measures.txt: -------------------------------------------------------------------------------- 1 | Measures 2 | ======== 3 | 4 | .. automodule:: ocupy.measures 5 | :members: 6 | 7 | -------------------------------------------------------------------------------- /doc/parallel.txt: -------------------------------------------------------------------------------- 1 | Parallel 2 | ======== 3 | 4 | .. module:: res.analysis.parallel 5 | 6 | This module implements functionality to parallelize massively 7 | parallel tasks. A massively parallel task consists of repeatedly 8 | carrying out the same computation. Each individual computation 9 | might depend on different parameters but there exist no dpendencies 10 | between different tasks. 11 | 12 | This module provides three different classes to achieve massive 13 | prallelization: :ref:`TaskStore `, :ref:`TaskManager ` and :ref:`Worker ` 14 | 15 | Each module characterizes one of three steps that is necessary: 16 | 1. Provide a task description and an ordering of tasks 17 | 2. Make tasks available for parallel processing 18 | 3. Carry out a task 19 | 20 | These classes work together to process tasks in a parallel fashion. 21 | The TaskStore provides an interface that allows to iterate over 22 | individual task descriptions. The TaskManager is a XMLRPC server 23 | which provides task descriptions for workers. A Worker is a XMLRPC 24 | client that connects to a task manager and retrieves a task description, 25 | executes the task and sends back the results. 26 | 27 | 28 | Organizing tasks for distribution 29 | --------------------------------- 30 | .. _TaskStore: 31 | 32 | Preparing your own tasks for parallel computation starts with providing a 33 | custom TaskStore object. A task store organizes how a complete task can be 34 | divided into smaller tasks. Let's consider an example. Say we want to compute 35 | how well a single subject can be predicted by some other random subject. In 36 | this case, a task might be the calculation of one prediction score. The entire task 37 | is to calculate scores for predicting each subject with each other subject. 38 | If we have 48 subjects, we have 48*47 individual tasks. 39 | 40 | A single task is usually described by a dictionary that has as keys the name 41 | of a parameter and as value the value of a parameter:: 42 | 43 | for (index, task) in task_store: print task 44 | {'index':1,'predicted_sub':10,'predicting_sub':10} 45 | 46 | Internally a task store deals only with a linear index into all possible tasks. 47 | To provide a custom task store you have implement a class that inherits from parallel.TaskStore 48 | and implements four functions: 49 | 50 | 1. get(index, \*params) - A function that returns a task description 51 | 2. sub2ind(\*params) - A function that maps parameters to a linear index 52 | 3. ind2sub(index) - A function that maps a linear index to a set of parameters 53 | 4. update_results(task_index, task_results) - A function that takes the results for a task and saves 'em / organizes 'em. 54 | 55 | An example implementation is given below:: 56 | 57 | class ISTaskStore(parallel.TaskStore): 58 | def __init__(self, partitions = 100,ind = None, filename = None): 59 | parallel.TaskStore.__init__(self,partitions, ind, None) 60 | self.num_tasks = 48 * 47 # This is required! 61 | self.results = np.nan * np.ones((48,47)) 62 | def get(self, index,predicting, predicted): 63 | if index == None: 64 | index = self.sub2ind(predicting, predicted) 65 | return {'index':index, 'predicting':predicting, 'predicted':predicted} 66 | 67 | def sub2ind(self, predicting, predicted): 68 | # parallel.sub2ind maps 1 <- (1,1), 2 <- (1,2) ... 47 <- (1,47), 48 <- (2,1) etc. 69 | return parallel.sub2ind((predicting, predicted), (48,47)) 70 | 71 | def ind2sub(self, index): 72 | # parallel.sub2ind maps 1 -> (1,1), 2 -> (1,2), ... 47 -> (1,47), 48 -> (2,1) etc. 73 | return parallel.ind2sub(index,(48,47)) 74 | 75 | def update_results(self, task_index, task_results): 76 | for cur_res in task_results: 77 | # Find position for this result in result matrix 78 | ind = cur_res['index'] 79 | params = self.ind2sub(ind) 80 | self.results[params] = cur_res['result'] 81 | 82 | Let's see what this does:: 83 | 84 | from res.analysis import parallel 85 | ts = ISTaskStore() 86 | ts.get(None, 1,1) 87 | {'index': 0, 'predicted': 1, 'predicting': 1} 88 | ts.get(None, 2,1) 89 | {'index': 47, 'predicted': 1, 'predicting': 2} 90 | 91 | 92 | The task store provides an iterator interface that allows to iterate over all 93 | tasks in the task store.:: 94 | 95 | for task in ts: print task 96 | # Will list all tasks 97 | 98 | An important property of a task store is that it can *partition* itself into 99 | smaller groups of subtask. Often computing the result of a single task comes 100 | with significant overhead, thus each worker receives a group of tasks (which 101 | is represented again by a TaskStore object) and then iterates over all tasks 102 | in the store. Every task store object can be instantiated with a list of valid 103 | indices, such that iterating through the store iterates only through these tasks.:: 104 | 105 | ts = ISTaskStore(indices=[1,2,3]) 106 | for task in ts: print task 107 | (1, {'index': 1, 'predicting': 1, 'predicted': 2}) 108 | (2, {'index': 2, 'predicting': 1, 'predicted': 3}) 109 | (3, {'index': 3, 'predicting': 1, 'predicted': 4}) 110 | 111 | 112 | Another important function of a task store is *update_results(self, task_id, task_description)* 113 | It is called by the server whenever the results for a partition were returned by 114 | a worker. This function has to be implemented by you and gives you a 115 | chance to put the results back into a form that you can interpret and *save*! 116 | 117 | One more thing: TaskStores and Workers usually come in pairs. To avoid that you 118 | use a worker that was not intended to be used with a specific TaskStore a task store needs 119 | to be able to identify itself. It therefore needs to have a field .ident which 120 | needs to be set by you. 121 | 122 | 123 | Making tasks available for processing 124 | ------------------------------------- 125 | .. _TaskManager: 126 | 127 | When a task store object is available we can start a server that waits 128 | for workers to pick up tasks. To do so, we create an instance of 129 | TaskManager and run it:: 130 | 131 | from twisted.internet import reactor 132 | from twisted.web import xmlrpc, server 133 | r = parallel.TaskManager(task_store) 134 | reactor.listenTCP(7080, server.Site(r)) 135 | reactor.run() 136 | 137 | It is best to do this in a screen and then detach the screen. 138 | You can check that it is running by firing up ipython and typing:: 139 | 140 | import xmlrpclib 141 | s = xmlrpclib.Server('http://localhost:7080') 142 | print s.status() 143 | 100 Jobs are still wating for execution 144 | 0 Jobs are being processed 145 | 0 Jobs are done 146 | 147 | Remember that the server iterates over partitions (and the default number 148 | of partitions is 100), thus for the server one job is one partition of the tasks. 149 | You can query the server with the server object:: 150 | 151 | k = s.get_task() 152 | s.reschedule() # Reschedule all tasks that are being processed 153 | 154 | Getting things done 155 | ------------------- 156 | .. _Worker: 157 | 158 | When everything is in place (task store available, server started) it is time to 159 | actually do the work. For this we create instances of parallel.Worker. 160 | 161 | Each worker instance will then connect to the server, call server.get_task(), iterate over 162 | the returned task_store and call *compute(self, index, task_description)* for each task. 163 | It collects the output for each compute call and returns it to the server and then quits. 164 | 165 | Often the worker needs access to data that needs to be loaded beforehand. Thus, the 166 | constructor of parallel. Worker calls *setup(self)* before starting the computation. 167 | This gives you a chance to organize the necessary data. 168 | 169 | The only tasks left to you are implementing a setup and a compute method. Here is an example:: 170 | 171 | class ISWorker(parallel.Worker): 172 | 173 | def setup(self): 174 | prefix = '/net/space/users/nwilming/' 175 | data = fixmat.FixmatFactory(os.path.join(prefix,'fixmat.mat')) 176 | self.data = data[(data.on_image == True) & (ismember(data.fix, range(2,17)))] 177 | 178 | def compute(self, index, task_description): 179 | predicted = task_description['predicted'] 180 | predicting = task_description['predicting'] 181 | rescale = 0.5 182 | (auc, nss, kl) = roc.intersubject_scores(self.data, 7, [1], 183 | predicting, range(2,65), 184 | predicted, controls = False, scale_factor = rescale) 185 | 186 | result = {'index': index, 187 | 'auc' : float(auc), 188 | 'nss' : float(nss), 189 | 'kl' : float(kl)} 190 | result.update(task_description) 191 | return result 192 | 193 | To start a worker, instantiate it and call it's run() function. The rest happens 194 | automatically. 195 | 196 | The real power of this approach lies in using the GridEngine to start as many workers as 197 | there are task partitions. The GridEngine then starts as many workers as is possible. 198 | 199 | 200 | Reference 201 | --------- 202 | 203 | .. autoclass:: TaskStore 204 | :members: 205 | 206 | .. autoclass:: TaskManager 207 | :members: 208 | 209 | .. autoclass:: Worker 210 | :members: 211 | -------------------------------------------------------------------------------- /doc/stimuli.txt: -------------------------------------------------------------------------------- 1 | Organization of stimuli: categories, images and features 2 | ======================================================== 3 | 4 | This module aims at providing easy access to images and feature maps. 5 | It respects that images are organized in different categories. 6 | 7 | Using this module has several advantages over simply loading images or 8 | feature maps at runtime: 9 | 10 | #. **Alignment**: You can iterate over images and access all fixations made on this image. The access is handled transparently for you. 11 | #. **Abstraction**: If treated right, it doesn't matter where or how your data is organized. Stored in the web? Strange naming conventions? No problem, but you will have to adapt some minor things in the backend. The interface however stays. 12 | 13 | 14 | The graph below describes the dependency structure of classes within the stimulus module. The general idea is that images are organized in categories (one category maps to many images) and that images provide access to the raw image data and to features of the image (again a 1:n mapping). Data is loaded via a proxy class which abstracts from the actual access (this is a n:1 mapping). 15 | 16 | .. graphviz:: 17 | 18 | digraph Stimuli { 19 | node [shape = record,height=.1]; 20 | node0[label = " Category 1 | Category 2"]; 21 | node1[label = " Image 1 | ... | Image n"]; 22 | node2[label = " Image 1 | ... | Image n"]; 23 | node3[label = " Image Data | Feature 1 | ... | Feature n"]; 24 | node4[label = " Image Data | Feature 1 | ... | Feature n"]; 25 | "node0":f0 -> "node1":f0; 26 | "node0":f1 -> "node2":f0; 27 | "node1":f0 -> "node3":f0; 28 | "node2":f0 -> "node4":f0; 29 | "node4":f0 -> Loader; 30 | "node4":f1 -> Loader; 31 | "node4":f2 -> Loader; 32 | "node3":f0 -> Loader; 33 | "node3":f1 -> Loader; 34 | "node3":f2 -> Loader; 35 | } 36 | 37 | The remainder of this document describes the interface of the stimuli 38 | module and how it interacts with the loader module. 39 | 40 | Loading Data from somewhere 41 | --------------------------- 42 | To access images and feature maps, we need to be able to load them into 43 | memory. This task is carried out by the *loader*. It encapsulates the 44 | direct data access, such that we do not have to worry about it later. 45 | 46 | Let's look at an example: 47 | 48 | >>> l = loader.LoadFromDisk('my-images', 'my-features', size = (768,1024)) 49 | >>> l.get_image(2,17) 50 | -> Returns image 17 in category 2 51 | >>> l.get_feature(2,17,'BYCHigh') 52 | -> Returns feature BYCHigh for image 17 in category 2 53 | 54 | In this case we use a loader that reads features and images from the hard disk. 55 | In the constructor, we specify where the images and features are located. A 56 | neat functionality is that the 'LoadFromDisk' loader can automatically resize images and 57 | features to the same size (given by the size argument). By default 58 | 'LoadFromDisk' 59 | respects the following file layout: my-images/cat/cat_image.png 60 | and my-features/cat/feature/cat_image.mat 61 | If you use a different naming scheme it is easy to rip the standard out and plug 62 | in your naming scheme. All you have to do is to replace LoadFromDisk's 'path' function: 63 | 64 | >>> l = loader.LoadFromDisk('my-images', 'my-features', size = (768,1024)) 65 | >>> def my_path(self, *args): 66 | if len(args) == 1: 67 | return self.impath 68 | elif len(args) == 2: 69 | return os.path.join(self.impath, '%i_%i.png'%(args[0],args[1])) 70 | elif len(args) == 3: 71 | return os.path.join(self.impath, '%i_%i_%f.mat'%(args[0],args[1],args[2])) 72 | >>> l.path = my_path 73 | 74 | 75 | And now the loader will respect a my-images/cat_image.png and 76 | my-images/cat_image_feature.mat naming scheme. You can of course also inherit 77 | from LoadFromDisk and overwrite the path method if you want to use this naming 78 | scheme more often. 79 | 80 | To use a completely different loader (say, one that pulls stimuli from a 81 | SQL database) you have to implement your own. To achieve this, inherit 82 | from loader.Loader and fill all methods specified there with life. 83 | 84 | 85 | Working with the stimulus module 86 | -------------------------------- 87 | To abstract the data access is a first step but does not provide much in terms of 88 | convenience. The stimuli module defines three classes that organize stimuli into 89 | categories, images and features: 90 | 91 | - The **Categories** class encapsulates different categories that are available 92 | - The **Images** class represents all images within a category 93 | - The **Image** class provides direct access to the image data and feature maps of the image 94 | 95 | I think the interface is pretty much self-explaining: 96 | 97 | >>> l = loader.LoadFromDisk('path-to-my-images', 'path-to-my-features', size = (768,1024)) 98 | >>> inp = stimuli.Categories(l, features=None,img_per_cat = {2:range(16,26),9:range(1,51)}) 99 | >>> inp[2][17].data # yields image 17 in category 2 100 | >>> inp[2][17]['BYCHigh'] # yields feature BYCHigh 101 | >>> for cat in inp: 102 | for img in cat: 103 | img.data # Gives the image 104 | img['BYCHigh'] # Gives feature BYCHigh 105 | 106 | In this case, I specified all possible category / image combinations. 107 | Often we want to access images and features that have been arranged by 108 | some structure beforehand. The most obvious case is that we have a fixmat that already specifies all possible category, and image combinations. To create a stimuli object that is aligned to a fixmat we can use the **FixmatStimuliFactory**: 109 | 110 | >>> fm = fixmat.DirectoryFixmatFactory('path-to-fixmats') 111 | >>> l = loader.LoadFromDisk(impath = 'path-to-imgs', ftrpath = 'path-to-ftrs') 112 | >>> stim = stimuli.FixmatStimuliFactory(fm, l) 113 | 114 | Alternatively we can use the **DirectoryStimuliFactory** to automatically index all categories and files in a directory: 115 | 116 | >>> stim = stimuli.DirectoryStimuliFactory(l) 117 | >>> stim.categories() 118 | [2,9] 119 | >>> stim[2].images() 120 | [16, 17, ..., 25] 121 | >>> stim[9].images() 122 | [1, 2, ..., 50] 123 | 124 | This, however, works only for the default file layout structure (cat/cat_img.png, cat/ftr/cat_img.mat). 125 | 126 | Combining fixation data and stimulus data 127 | ----------------------------------------- 128 | 129 | In many cases, we want to iterate over images and fixation data at the same 130 | time. The stimuli module automatically aligns fixation and image data. 131 | 132 | To use this functionality the data proxy (i.e. categories, images or features) 133 | must be created with a FixMat: 134 | 135 | >>> fm = fixmat.DirectoryFixmatFactory('my-fixmats') 136 | >>> inp = stimuli.Categories(l,fixations = fm) 137 | >>> inp[2][17].fixations 138 | -> FixMat that contains only fixations on image 17 fron category 2 139 | 140 | If the data proxy is initialized with a set of possible category / image 141 | combinations we can also iterate over it. In this case it is probably 142 | handy to use the FixmatStimuliFactory which extracts all possible 143 | category / image combinations from a fixmat: 144 | 145 | >>> inp = stimuli.FixmatStimuliFactory(fm,l) 146 | >>> for cat in inp: 147 | for img in cat: 148 | img.fixations 149 | 150 | 151 | Reference 152 | --------- 153 | .. autofunction:: ocupy.stimuli.FixmatStimuliFactory 154 | .. autofunction:: ocupy.stimuli.DirectoryStimuliFactory 155 | 156 | .. automodule:: ocupy.stimuli 157 | 158 | .. automodule:: ocupy.loader 159 | 160 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | Welcome to ocupy’s documentation! — ocupy v0.1 documentation 11 | 12 | 13 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 44 | 45 |
46 |
47 |
48 |
49 | 50 | 80 |
81 |

Indices and tables

82 | 87 |
88 | 89 | 90 |
91 |
92 |
93 |
94 |
95 | 98 |

Table Of Contents

99 | 105 | 106 |

Next topic

107 |

FixMat

109 |

This Page

110 | 114 | 126 | 127 |
128 |
129 |
130 |
131 | 146 | 150 | 151 | -------------------------------------------------------------------------------- /objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/objects.inv -------------------------------------------------------------------------------- /ocupy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/__init__.py -------------------------------------------------------------------------------- /ocupy/datamat_tools.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on Feb 1, 2012 3 | 4 | @author: rmuil 5 | 6 | Things here should eventually be incorporated into the DataMat class itself. 7 | ''' 8 | from .utils import factorise_strings 9 | from numpy import array 10 | 11 | def factorise_field(dm, field_name, boundary_char = None, parameter_name=None): 12 | """This removes a common beginning from the data of the fields, placing 13 | the common element in a parameter and the different endings in the fields. 14 | 15 | if parameter_name is None, then it will be _common. 16 | 17 | So far, it's probably only useful for the file_name. 18 | 19 | TODO: remove field entirely if no unique elements exist. 20 | """ 21 | 22 | old_data = dm.field(field_name) 23 | 24 | if isinstance(old_data[0], str) or isinstance(old_data[0], str): 25 | (new_data, common) = factorise_strings(old_data, boundary_char) 26 | new_data = array(new_data) 27 | else: 28 | raise NotImplementedError('factorising of fields not implemented for anything but string/unicode objects') 29 | 30 | if len(common) > 0: 31 | dm.__dict__[field_name] = new_data 32 | if parameter_name is None: 33 | parameter_name = field_name + '_common' 34 | dm.add_parameter(parameter_name, common) -------------------------------------------------------------------------------- /ocupy/model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This module defines an interface for models.""" 3 | 4 | class Model(object): 5 | """ 6 | Abstract base class for models. 7 | 8 | This is a minimalistic interface, that offers an interface for 9 | training, predicting, loading and saving for models. This suffices 10 | to carry out a full evaluation of different models. This can be done 11 | in combination with the cross-validation module. 12 | """ 13 | 14 | def __init__(self, name): 15 | """ 16 | Every model is required to have a name 17 | """ 18 | self.name = name 19 | 20 | def train(self, fixmat, categories): 21 | """ 22 | Trains a model / fit parameters of the model. 23 | 24 | Input: 25 | fixmat: FixMat 26 | Fixation data that can be used for training the model. 27 | categories: Categories object 28 | Stimuli (which are aligned to the fixation data) that can be 29 | used for training the model. 30 | 31 | After this method has been called, the model is in a state in which it 32 | can generate predictions - nothing has to be returned by this 33 | function. 34 | """ 35 | raise NotImplementedError 36 | 37 | def predict(self, test_stim, predicted_stims): 38 | """ 39 | Generates predictions for all elements of test_stim and puts them 40 | into predicted_stims 41 | 42 | Input: 43 | test_stim: Categories object 44 | The stimuli to generate predictions for. 45 | predicted_stims: Categories object 46 | A categories object that is writable. The predict method 47 | iterates over all categories and images in test_stim and puts 48 | the generated predictions as a new feature (called 49 | 'prediction') into this categories object. 50 | Raises: 51 | PredictionError: A prediction error is raised if for some reason a 52 | prediction can not be generated. 53 | 54 | """ 55 | raise NotImplementedError 56 | 57 | def save(self, path): 58 | """ 59 | Saves model to path such that loading it can restore the state of the 60 | model. 61 | """ 62 | raise NotImplementedError 63 | 64 | def load(self, path): 65 | """ 66 | Loads model from path and restores the state of the saved model. 67 | """ 68 | raise NotImplementedError 69 | 70 | 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /ocupy/saccade_geometry.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from multiprocessing import pool 3 | from scipy import nanmean 4 | from ocupy.simulator import anglendiff, reshift 5 | from scipy.optimize import leastsq 6 | 7 | """ 8 | These values are used by some of the scripts and determine bin width 9 | for the raster plots. 10 | """ 11 | e_dist = np.arange(-10.5, 11.5, 1) 12 | e_angle = np.arange(0, 181, 1) 13 | 14 | 15 | def prepare_data(fm, max_back, dur_cap=700): 16 | ''' 17 | Computes angle and length differences up to given order and deletes 18 | suspiciously long fixations. 19 | 20 | Input 21 | fm: Fixmat 22 | Fixmat for which to comput angle and length differences 23 | max_back: Int 24 | Computes delta angle and amplitude up to order max_back. 25 | dur_cap: Int 26 | Longest allowed fixation duration 27 | 28 | Output 29 | fm: Fixmat 30 | Filtered fixmat that aligns to the other outputs. 31 | durations: ndarray 32 | Duration for each fixation in fm 33 | forward_angle: 34 | Angle between previous and next saccade. 35 | 36 | ''' 37 | durations = np.roll(fm.end - fm.start, 1).astype(float) 38 | angles, lengths, ads, lds = anglendiff(fm, roll=max_back, return_abs=True) 39 | # durations and ads are aligned in a way that an entry in ads 40 | # encodes the angle of the saccade away from a fixation in 41 | # durations 42 | forward_angle = abs(reshift(ads[0])).astype(float) 43 | ads = [abs(reshift(a)) for a in ads] 44 | # Now filter out weird fixation durations 45 | id_in = durations > dur_cap 46 | durations[id_in] = np.nan 47 | forward_angle[id_in] = np.nan 48 | return fm, durations, forward_angle, ads, lds 49 | 50 | 51 | def saccadic_momentum_effect(durations, forward_angle, 52 | summary_stat=nanmean): 53 | """ 54 | Computes the mean fixation duration at forward angles. 55 | """ 56 | durations_per_da = np.nan * np.ones((len(e_angle) - 1,)) 57 | for i, (bo, b1) in enumerate(zip(e_angle[:-1], e_angle[1:])): 58 | idx = ( 59 | bo <= forward_angle) & ( 60 | forward_angle < b1) & ( 61 | ~np.isnan(durations)) 62 | durations_per_da[i] = summary_stat(durations[idx]) 63 | return durations_per_da 64 | 65 | 66 | def ior_effect(durations, angle_diffs, length_diffs, 67 | summary_stat=np.mean, parallel=True, min_samples=20): 68 | """ 69 | Computes a measure of fixation durations at delta angle and delta 70 | length combinations. 71 | """ 72 | raster = np.empty((len(e_dist) - 1, len(e_angle) - 1), dtype=object) 73 | for a, (a_low, a_upp) in enumerate(zip(e_angle[:-1], e_angle[1:])): 74 | for d, (d_low, d_upp) in enumerate(zip(e_dist[:-1], e_dist[1:])): 75 | idx = ((d_low <= length_diffs) & (length_diffs < d_upp) & 76 | (a_low <= angle_diffs) & (angle_diffs < a_upp)) 77 | if sum(idx) < min_samples: 78 | raster[d, a] = np.array([np.nan]) 79 | else: 80 | raster[d, a] = durations[idx] 81 | if parallel: 82 | p = pool.Pool(3) 83 | result = p.map(summary_stat, list(raster.flatten())) 84 | p.terminate() 85 | else: 86 | result = list(map(summary_stat, list(raster.flatten()))) 87 | for idx, value in enumerate(result): 88 | i, j = np.unravel_index(idx, raster.shape) 89 | raster[i, j] = value 90 | return raster 91 | 92 | 93 | def predict_fixation_duration( 94 | durations, angles, length_diffs, dataset=None, params=None): 95 | """ 96 | Fits a non-linear piecewise regression to fixtaion durations for a fixmat. 97 | 98 | Returns corrected fixation durations. 99 | """ 100 | if dataset is None: 101 | dataset = np.ones(durations.shape) 102 | corrected_durations = np.nan * np.ones(durations.shape) 103 | for i, ds in enumerate(np.unique(dataset)): 104 | e = lambda v, x, y, z: (leastsq_dual_model(x, z, *v) - y) 105 | v0 = [120, 220.0, -.1, 0.5, .1, .1] 106 | id_ds = dataset == ds 107 | idnan = ( 108 | ~np.isnan(angles)) & ( 109 | ~np.isnan(durations)) & ( 110 | ~np.isnan(length_diffs)) 111 | v, s = leastsq( 112 | e, v0, args=( 113 | angles[ 114 | idnan & id_ds], durations[ 115 | idnan & id_ds], length_diffs[ 116 | idnan & id_ds]), maxfev=10000) 117 | corrected_durations[id_ds] = (durations[id_ds] - 118 | (leastsq_dual_model(angles[id_ds], length_diffs[id_ds], *v))) 119 | if params is not None: 120 | params['v' + str(i)] = v 121 | params['s' + str(i)] = s 122 | return corrected_durations 123 | 124 | 125 | def subject_predictions(fm, field='SUBJECTINDEX', 126 | method=predict_fixation_duration, data=None): 127 | ''' 128 | Calculates the saccadic momentum effect for individual subjects. 129 | 130 | Removes any effect of amplitude differences. 131 | 132 | The parameters are fitted on unbinned data. The effects are 133 | computed on binned data. See e_dist and e_angle for the binning 134 | parameter. 135 | ''' 136 | if data is None: 137 | fma, dura, faa, adsa, ldsa = prepare_data(fm, dur_cap=700, max_back=5) 138 | adsa = adsa[0] 139 | ldsa = ldsa[0] 140 | else: 141 | fma, dura, faa, adsa, ldsa = data 142 | fma = fma.copy() # [ones(fm.x.shape)] 143 | sub_effects = [] 144 | sub_predictions = [] 145 | parameters = [] 146 | for i, fmsub in enumerate(np.unique(fma.field(field))): 147 | id = fma.field(field) == fmsub 148 | #_, dur, fa, ads, lds = prepare_data(fmsub, dur_cap = 700, max_back=5) 149 | dur, fa, ads, lds = dura[id], faa[id], adsa[id], ldsa[id] 150 | params = {} 151 | _ = method(dur, fa, lds, params=params) 152 | ps = params['v0'] 153 | ld_corrected = leastsq_only_dist(lds, ps[4], ps[5]) 154 | prediction = leastsq_only_angle(fa, ps[0], ps[1], ps[2], ps[3]) 155 | sub_predictions += [saccadic_momentum_effect(prediction, fa)] 156 | sub_effects += [saccadic_momentum_effect(dur - ld_corrected, fa)] 157 | parameters += [ps] 158 | return np.array(sub_effects), np.array(sub_predictions), parameters 159 | 160 | 161 | def leastsq_dual_model( 162 | fa, dl, split, intercept, slope1, slope2, slope3, slope4): 163 | breakdummy = fa < split 164 | split2 = 0 165 | breakdummy2 = dl < split2 166 | reg_full = np.array([np.ones(fa.shape) * intercept, 167 | slope1 * fa, 168 | slope2 * ((fa - split) * breakdummy), 169 | slope3 * dl, 170 | slope4 * ((dl - split2) * breakdummy2)]) 171 | return reg_full.sum(0) 172 | 173 | 174 | def leastsq_only_dist(dl, slope3, slope4): 175 | split2 = 0 176 | breakdummy2 = dl < split2 177 | reg_full = np.array([slope3 * dl, 178 | slope4 * ((dl - split2) * breakdummy2)]) 179 | return reg_full.sum(0) 180 | 181 | 182 | def leastsq_only_angle(fa, split, intercept, slope1, slope2): 183 | breakdummy = fa < split 184 | reg_full = np.array([np.ones(fa.shape) * intercept, 185 | slope1 * fa, 186 | slope2 * ((fa - split) * breakdummy)]) 187 | return reg_full.sum(0) 188 | -------------------------------------------------------------------------------- /ocupy/samples2fix.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | ''' 3 | Convert eye-tracking samples to fixations 4 | ''' 5 | import numpy as np 6 | from ocupy.datamat import AccumulatorFactory 7 | 8 | velocity_window_size = 3 9 | 10 | 11 | def get_velocity(samplemat, Hz, blinks=None): 12 | ''' 13 | Compute velocity of eye-movements. 14 | 15 | Samplemat must contain fields 'x' and 'y', specifying the x,y coordinates 16 | of gaze location. The function assumes that the values in x,y are sampled 17 | continously at a rate specified by 'Hz'. 18 | ''' 19 | Hz = float(Hz) 20 | distance = ((np.diff(samplemat.x) ** 2) + 21 | (np.diff(samplemat.y) ** 2)) ** .5 22 | distance = np.hstack(([distance[0]], distance)) 23 | if blinks is not None: 24 | distance[blinks[1:]] = np.nan 25 | win = np.ones((velocity_window_size)) / float(velocity_window_size) 26 | velocity = np.convolve(distance, win, mode='same') 27 | velocity = velocity / (velocity_window_size / Hz) 28 | acceleration = np.diff(velocity) / (1. / Hz) 29 | acceleration = abs(np.hstack(([acceleration[0]], acceleration))) 30 | return velocity, acceleration 31 | 32 | 33 | def saccade_detection(samplemat, Hz=200, threshold=30, 34 | acc_thresh=2000, min_duration=21, min_movement=.35, 35 | ignore_blinks=False): 36 | ''' 37 | Detect saccades in a stream of gaze location samples. 38 | 39 | Coordinates in samplemat are assumed to be in degrees. 40 | 41 | Saccades are detect by a velocity/acceleration threshold approach. 42 | A saccade starts when a) the velocity is above threshold, b) the 43 | acceleration is above acc_thresh at least once during the interval 44 | defined by the velocity threshold, c) the saccade lasts at least min_duration 45 | ms and d) the distance between saccade start and enpoint is at least 46 | min_movement degrees. 47 | ''' 48 | if ignore_blinks: 49 | velocity, acceleration = get_velocity(samplemat, float(Hz), blinks=samplemat.blinks) 50 | else: 51 | velocity, acceleration = get_velocity(samplemat, float(Hz)) 52 | 53 | saccades = (velocity > threshold) 54 | #print velocity[samplemat.blinks[1:]] 55 | #print saccades[samplemat.blinks[1:]] 56 | 57 | borders = np.where(np.diff(saccades.astype(int)))[0] + 1 58 | if velocity[1] > threshold: 59 | borders = np.hstack(([0], borders)) 60 | saccade = 0 * np.ones(samplemat.x.shape) 61 | 62 | # Only count saccades when acceleration also surpasses threshold 63 | for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): 64 | if sum(acceleration[start:end] > acc_thresh) >= 1: 65 | saccade[start:end] = 1 66 | 67 | borders = np.where(np.diff(saccade.astype(int)))[0] + 1 68 | if saccade[0] == 0: 69 | borders = np.hstack(([0], borders)) 70 | for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): 71 | if (1000*(end - start) / float(Hz)) < (min_duration): 72 | saccade[start:end] = 1 73 | 74 | # Delete saccade between fixations that are too close together. 75 | dists_ok = False 76 | while not dists_ok: 77 | dists_ok = True 78 | num_merges = 0 79 | for i, (lfixstart, lfixend, start, end, nfixstart, nfixend) in enumerate(zip( 80 | borders[0::2], borders[1::2], 81 | borders[1::2], borders[2::2], 82 | borders[2::2], borders[3::2])): 83 | lastx = samplemat.x[lfixstart:lfixend].mean() 84 | lasty = samplemat.y[lfixstart:lfixend].mean() 85 | nextx = samplemat.x[nfixstart:nfixend].mean() 86 | nexty = samplemat.y[nfixstart:nfixend].mean() 87 | if (1000*(lfixend - lfixstart) / float(Hz)) < (min_duration): 88 | saccade[lfixstart:lfixend] = 1 89 | continue 90 | distance = ((nextx - lastx) ** 2 + (nexty - lasty) ** 2) ** .5 91 | if distance < min_movement: 92 | num_merges += 1 93 | dists_ok = False 94 | saccade[start:end] = 0 95 | borders = np.where(np.diff(saccade.astype(int)))[0] + 1 96 | if saccade[0] == 0: 97 | borders = np.hstack(([0], borders)) 98 | return saccade.astype(bool) 99 | 100 | 101 | def fixation_detection(samplemat, saccades, Hz=200, samples2fix=None, 102 | respect_trial_borders=False, sample_times=None): 103 | ''' 104 | Detect Fixation from saccades. 105 | 106 | Fixations are defined as intervals between saccades. This function 107 | also calcuates start and end times (in ms) for each fixation. 108 | Input: 109 | samplemat: datamat 110 | Contains the recorded samples and associated metadata. 111 | saccades: ndarray 112 | Logical vector that is True for samples that belong to a saccade. 113 | Hz: Float 114 | Number of samples per second. 115 | samples2fix: Dict 116 | There is usually metadata associated with the samples (e.g. the 117 | trial number). This dictionary can be used to specify how the 118 | metadata should be collapsed for one fixation. It contains 119 | field names from samplemat as keys and functions as values that 120 | return one value when they are called with all samples for one 121 | fixation. In addition the function can raise an 'InvalidFixation' 122 | exception to signal that the fixation should be discarded. 123 | ''' 124 | if samples2fix is None: 125 | samples2fix = {} 126 | fixations = ~saccades 127 | acc = AccumulatorFactory() 128 | if not respect_trial_borders: 129 | borders = np.where(np.diff(fixations.astype(int)))[0] + 1 130 | else: 131 | borders = np.where( 132 | ~(np.diff(fixations.astype(int)) == 0) | 133 | ~(np.diff(samplemat.trial.astype(int)) == 0))[0] + 1 134 | 135 | fixations = 0 * saccades.copy() 136 | if not saccades[0]: 137 | borders = np.hstack(([0], borders)) 138 | #lasts,laste = borders[0], borders[1] 139 | for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): 140 | 141 | current = {} 142 | for k in samplemat.fieldnames(): 143 | if k in list(samples2fix.keys()): 144 | current[k] = samples2fix[k](samplemat, k, start, end) 145 | else: 146 | current[k] = np.mean(samplemat.field(k)[start:end]) 147 | current['start_sample'] = start 148 | current['end_sample'] = end 149 | fixations[start:end] = 1 150 | # Calculate start and end time in ms 151 | if sample_times is None: 152 | current['start'] = 1000 * start / Hz 153 | current['end'] = 1000 * end / Hz 154 | else: 155 | current['start'] = sample_times[start] 156 | current['end'] = sample_times[end] 157 | 158 | #lasts, laste = start,end 159 | acc.update(current) 160 | 161 | return acc.get_dm(params=samplemat.parameters()), fixations.astype(bool) 162 | 163 | 164 | class InvalidFixation(Exception): 165 | pass 166 | -------------------------------------------------------------------------------- /ocupy/sim_tools.py: -------------------------------------------------------------------------------- 1 | from math import ceil 2 | import numpy as np 3 | import random 4 | from . import spline_base 5 | 6 | 7 | def anglendiff(fm, roll = 1, return_abs=False): 8 | angle_diffs = [] 9 | length_diffs = [] 10 | lengths = [] 11 | angles = [] 12 | 13 | for r in range(1, roll+1): 14 | heights = (fm.y - np.roll(fm.y,r)).astype(float) 15 | widths = (fm.x - np.roll(fm.x,r)).astype(float) 16 | 17 | heights[fm.fix<=min(fm.fix)+r-1]=float('nan') 18 | widths[fm.fix<=min(fm.fix)+r-1]=float('nan') 19 | 20 | lengths.append((widths**2+heights**2)**.5) 21 | angles.append(np.degrees(np.arctan2(heights,widths))) 22 | 23 | length_diffs.append(lengths[0] - np.roll(lengths[r-1],1)) 24 | 25 | # -360: straight saccades, -180: return saccades, 26 | # 0: straight saccades, 180: return saccades, 27 | # 360: no return saccades 28 | angle_diffs.append(angles[0] - np.roll(angles[r-1],1)) 29 | 30 | if return_abs==True: 31 | return angles, lengths, angle_diffs, length_diffs 32 | 33 | else: 34 | return angle_diffs, length_diffs 35 | 36 | def createHist(ld, ad, 37 | bins=[np.linspace(-36.5,36.5,74), np.linspace(-0.5,180.5,182)]): 38 | H, xedges, yedges = np.histogram2d(ld[~np.isnan(ld)], ad[~np.isnan(ad)], 39 | bins=bins) 40 | H = H / sum(sum(H)) 41 | #H[:,0]*=2 42 | #H[:,-1]*=2 43 | return H 44 | 45 | def compute_cumsum(fm, arg): 46 | if arg == 'la': 47 | ang, len, ad, ld = anglendiff(fm, return_abs=True) 48 | screen_diag = int(ceil((fm.image_size[0]**2+ 49 | fm.image_size[1]**2)**0.5)) 50 | y_arg = len[0][np.roll(fm.fix==min(fm.fix),1)] 51 | x_arg = reshift(ang[0][np.roll(fm.fix==min(fm.fix),1)]) 52 | bins = [list(range(screen_diag+1)), np.linspace(-180.5,180.5,362)] 53 | 54 | elif arg == 'coo': 55 | indexes = fm.fix==min(fm.fix) 56 | y_arg = fm.y[indexes] 57 | x_arg = fm.x[indexes] 58 | bins = [list(range(fm.image_size[0]+1)), list(range(fm.image_size[1]+1))] 59 | 60 | elif arg == 'len': 61 | trajLen = np.roll(fm.fix,1)[fm.fix==min(fm.fix)] 62 | val, borders = np.histogram(trajLen, bins=1000) 63 | cumsum = np.cumsum(val.astype(float) / val.sum()) 64 | return cumsum, borders 65 | 66 | else: 67 | raise ValueError(("Not a valid argument, "+ 68 | "choose from 'la', 'coo' and 'len'.")) 69 | 70 | H = createHist(y_arg, x_arg, bins=bins) 71 | return np.cumsum(np.concatenate(H)), H.shape 72 | 73 | def drawFrom(cumsum, borders=[]): 74 | if len(borders)==0: 75 | return (cumsum>=random.random()).nonzero()[0][0] 76 | else: 77 | return borders[(cumsum>=random.random()).nonzero()[0][0]] 78 | 79 | def reshift(I): 80 | # Output -180 to +180 81 | if type(I)==list: 82 | I = np.array(I) 83 | 84 | if type(I)==np.ndarray: 85 | while((I>180).sum()>0 or (I<-180).sum()>0): 86 | I[I>180] = I[I>180]-360 87 | I[I<-180] = I[I<-180]+360 88 | 89 | if (type(I) == int or type(I)==np.float64 or 90 | type(I)==float or type(I)==np.float): 91 | while (I>180 or I<-180): 92 | if I > 180: 93 | I-=360 94 | if I < -180: 95 | I+=360 96 | 97 | return I 98 | 99 | def spline(ad,ld,collapse=True,xdim=[-36,36]): 100 | ld = ld[~np.isnan(ld)] 101 | ad = reshift(ad[~np.isnan(ad)]) 102 | samples = list(zip(ld,ad)) 103 | 104 | if collapse: # von 0 bis 181 105 | e_y = np.linspace(-36.5,36.5,74) 106 | e_x = np.linspace(-0.5,180.5,182) 107 | ad = abs(ad) 108 | K = createHist(ld,ad) 109 | H = spline_base.spline_pdf(np.array(samples), 110 | e_y, e_x, nr_knots_y = 3, nr_knots_x = 19,hist=K) 111 | 112 | else: 113 | e_x = np.linspace(-180.5,179.5,361) 114 | e_y = np.linspace(xdim[0],xdim[1],(xdim[1]*2)+1) 115 | ad[ad>179.5]-=360 116 | samples = list(zip(ld,ad)) 117 | 118 | H = spline_base.spline_pdf(np.array(samples), e_y, e_x, 119 | nr_knots_y = 3, nr_knots_x = 19) 120 | return H/H.sum() 121 | 122 | -------------------------------------------------------------------------------- /ocupy/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/tests/__init__.py -------------------------------------------------------------------------------- /ocupy/tests/fixmat_demo.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/tests/fixmat_demo.mat -------------------------------------------------------------------------------- /ocupy/tests/roc_reference.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/tests/roc_reference.mat -------------------------------------------------------------------------------- /ocupy/tests/test_bounds.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import unittest 5 | import numpy as np 6 | 7 | from ocupy import fixmat, bounds, measures 8 | 9 | 10 | class TestBounds(unittest.TestCase): 11 | def setUp(self): 12 | self.fm = fixmat.TestFixmatFactory(categories = [1,2,3], 13 | filenumbers = [1,2,3,4,5,6], 14 | subjectindices = [1, 2, 3, 4, 5, 6], 15 | params = {'pixels_per_degree':1, 'image_size':[100,500]}) 16 | measures.set_scores([measures.roc_model, 17 | measures.kldiv_model, 18 | measures.nss_model]) 19 | # test for category > 0 assertion 20 | def test_intersubject_scores(self): 21 | auc, kl, nss = bounds.intersubject_scores(self.fm, 22 | 1, [1,2,3,4], [1, 2, 3, 4], [5, 6], [5, 6]) 23 | self.assertTrue(auc > 0.99) 24 | auc, kl, nss = bounds.intersubject_scores(self.fm, 25 | 1, [1,2,3,4], [1, 2, 3, 4], [5, 6], [5, 6], 26 | controls = True) 27 | self.assertTrue(auc > 0.99) 28 | auc, kl, nss = bounds.intersubject_scores(self.fm, 29 | 1, [7], [7], [5, 6], [5, 6], 30 | controls = True) 31 | self.assertTrue(np.isnan(auc) and np.isnan(kl) and np.isnan(nss)) 32 | 33 | def test_intersubject_scores_2(self): 34 | auc, kl, nss = bounds.intersubject_scores_random_subjects(self.fm, 35 | 1, 1, 5, 1) 36 | self.assertTrue(auc > 0.99) 37 | self.assertRaises(ValueError, lambda : bounds.intersubject_scores_random_subjects(self.fm, 38 | 1, 1, 6, 1)) 39 | self.assertRaises(ValueError, lambda : bounds.intersubject_scores_random_subjects(self.fm, 40 | 1, 6, 1, 6)) 41 | auc, kl, nss = bounds.intersubject_scores_random_subjects(self.fm, 42 | 1, 100, 1, 5) 43 | self.assertTrue(np.isnan(auc) and np.isnan(kl) and np.isnan(nss)) 44 | 45 | def check_bounds(self, auc): 46 | self.assertEqual(len(list(auc.keys())), 3) 47 | for cat in np.unique(self.fm.category): 48 | self.assertEqual(len(auc[cat]), 6) 49 | for val in auc[cat]: 50 | self.assertTrue(val > 0.99) 51 | 52 | 53 | def test_upper_bound(self): 54 | # The test fixmat has no variance, so the upper bound 55 | # should be really high. 56 | auc, kl, nss = bounds.upper_bound(self.fm) 57 | self.check_bounds(auc) 58 | auc, kl, nss = bounds.upper_bound(self.fm, 3) 59 | self.check_bounds(auc) 60 | self.assertRaises(AssertionError, lambda: bounds.upper_bound(self.fm, 100)) 61 | 62 | def test_lower_bound(self): 63 | # The test fixmat has no variance, so the lower bound 64 | # should be really high. 65 | auc, kl, nss = bounds.lower_bound(self.fm) 66 | self.check_bounds(auc) 67 | auc, kl, nss = bounds.lower_bound(self.fm, nr_subs = 3) 68 | self.check_bounds(auc) 69 | auc, kl, nss = bounds.lower_bound(self.fm, nr_imgs = 3) 70 | self.check_bounds(auc) 71 | auc, kl, nss = bounds.lower_bound(self.fm, nr_subs = 3, nr_imgs = 3) 72 | self.check_bounds(auc) 73 | self.assertRaises(AssertionError, lambda: bounds.lower_bound(self.fm, nr_imgs = 100)) 74 | self.assertRaises(AssertionError, lambda: bounds.lower_bound(self.fm, nr_subs = 100)) 75 | 76 | def tearDown(self): 77 | self.fm = None 78 | 79 | if __name__ == '__main__': 80 | unittest.main() 81 | -------------------------------------------------------------------------------- /ocupy/tests/test_fixmat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import os 5 | import unittest 6 | 7 | from pkgutil import get_data 8 | from tempfile import NamedTemporaryFile 9 | 10 | import numpy as np 11 | from scipy.io import loadmat 12 | 13 | from ocupy import fixmat, stimuli, loader 14 | 15 | from . import test_loader 16 | 17 | 18 | class TestFixmat(unittest.TestCase): 19 | 20 | # Test the interface 21 | def test_interface(self): 22 | fm = fixmat.TestFixmatFactory(categories = [1,2,3], 23 | filenumbers = [1,2,3,4,5,6], 24 | subjectindices = [1, 2, 3, 4, 5, 6], 25 | params = {'pixels_per_degree':10, 'image_size':[100,500]}) 26 | # We can produce a pretty table with the fixmats parameters by 27 | # printin it: 28 | print(fm) 29 | # Check parameter access 30 | self.assertTrue( fm.pixels_per_degree == 10) 31 | self.assertTrue( fm.image_size[0] == 100 and fm.image_size[1] == 500 ) 32 | 33 | # Check that all fields can be accessed 34 | self.assertTrue( (np.unique(fm.SUBJECTINDEX) == np.array([1,2,3,4,5,6])).all() ) 35 | self.assertTrue( (np.unique(fm.category) == np.array([1,2,3])).all() ) 36 | self.assertTrue( (np.unique(fm.filenumber) == np.array([1,2,3,4,5,6])).all() ) 37 | self.assertTrue( len(fm.x) == len(fm.y) and len(fm.y) == len(fm.SUBJECTINDEX) ) 38 | self.assertTrue( len(fm.SUBJECTINDEX) == len(fm.filenumber) ) 39 | 40 | # Test filtering 41 | fm_sub1 = fm[fm.SUBJECTINDEX == 1] 42 | self.assertTrue( (np.unique(fm_sub1.SUBJECTINDEX) == np.array([1])).all() ) 43 | self.assertTrue( (np.unique(fm_sub1.category) == np.array([1,2,3])).all() ) 44 | self.assertTrue( (np.unique(fm_sub1.filenumber) == np.array([1,2,3,4,5,6])).all() ) 45 | self.assertTrue( len(fm_sub1.x) == len(fm_sub1.y) and len(fm_sub1.y) == len(fm_sub1.SUBJECTINDEX) ) 46 | self.assertTrue( len(fm_sub1.SUBJECTINDEX) == len(fm_sub1.filenumber) ) 47 | 48 | fm_cmp = fm.filter(fm.SUBJECTINDEX == 1) 49 | for (a,b) in zip(fm_cmp.x, fm_sub1.x): self.assertTrue( a==b ) 50 | 51 | # Test save and load 52 | fm_sub1.save('/tmp/test_fixmat') 53 | fm_cmp = fixmat.load('/tmp/test_fixmat') 54 | self.compare_fixmats(fm_sub1, fm_cmp) 55 | 56 | self.assertTrue( 'pixels_per_degree' in fm.parameters() ) 57 | self.assertTrue( 'image_size' in fm.parameters() ) 58 | 59 | # Test iterating over fixmat 60 | for (img, img_mat) in zip([1,2,3,4,5,6],fm.by_field('filenumber')): 61 | self.assertEqual( len(np.unique(img_mat.filenumber)), 1 ) 62 | self.assertEqual( np.unique(img_mat.filenumber)[0], img) 63 | 64 | # Test adding fields 65 | fm.add_field('x2', fm.x) 66 | for (x1,x2) in zip(fm.x, fm.x2): 67 | self.assertEqual(x1, x2) 68 | 69 | self.assertRaises(ValueError, lambda: fm.add_field('x3', [1])) 70 | self.assertRaises(ValueError, lambda: fm.add_field('x2', fm.x)) 71 | 72 | # Test removing fields: 73 | fm.rm_field('x2') 74 | self.assertRaises(ValueError, lambda: fm.rm_field('x2')) 75 | 76 | # Add a new subject 77 | fm_add = fixmat.TestFixmatFactory(categories = [7], filenumbers = [10], subjectindices = [100], 78 | params = {'pixels_per_degree':10,'image_size':[100,500]}) 79 | fm.join(fm_add) 80 | 81 | 82 | 83 | def compare_fixmats(self, a, b): 84 | for field in a.fieldnames(): 85 | for (v1, v2) in zip(a.__dict__[field], b.__dict__[field]): 86 | self.assertEqual(v1, v2) 87 | 88 | def gen_sub(self, subind,numfix): 89 | fm = fixmat.TestFixmatFactory(subjectindices = [subind], 90 | points = [list(range(0,numfix)),list(range(0,numfix))], 91 | categories = [1,2,3,4,5,6,7], 92 | filenumbers = [1,2,3,4,5,6,7]) 93 | return fm 94 | 95 | def test_getattr(self): 96 | # Test the 97 | # Set up a fake dir structure to generate an aligned fixmat 98 | img_per_cat = {1:list(range(1,11)), 2:list(range(1,11))} 99 | features = ['a', 'b'] 100 | path, ftrpath = test_loader.create_tmp_structure(img_per_cat, features = features) 101 | l = loader.LoadFromDisk(impath = path, 102 | ftrpath = ftrpath, 103 | size = (100,100)) 104 | inp = stimuli.Categories(l, img_per_cat, features) 105 | fm = fixmat.TestFixmatFactory(categories = [1,2], 106 | filenumbers = list(range(1,11)), 107 | subjectindices = [1, 2, 3, 4, 5, 6], 108 | params = {'pixels_per_degree':10, 'image_size':[100,100]}, 109 | categories_obj = inp) 110 | 111 | fm_err = fixmat.TestFixmatFactory(categories = [1,2], 112 | filenumbers = list(range(1,11)), 113 | subjectindices = [1, 2, 3, 4, 5, 6], 114 | params = {'pixels_per_degree':10, 'image_size':[100,100]}) 115 | 116 | # Now let's check if we can access all the images 117 | # and all the features. 118 | fm.add_feature_values(['a', 'b']) 119 | self.assertRaises(RuntimeError, lambda: fm_err.add_feature_values(['a', 'b'])) 120 | for cat_mat, cat_inp in fm.by_cat(): 121 | self.assertEqual(cat_mat.category[0], cat_inp.category) 122 | for img_mat, img_inp in cat_mat.by_filenumber(): 123 | self.assertEqual(img_mat.filenumber[0], img_inp.image) 124 | self.assertEqual(len(fm.a), len(fm.x)) 125 | self.assertEqual(len(fm.b), len(fm.x)) 126 | # Let's also check if make_reg_data works 127 | a, b = fm.make_reg_data(features) 128 | self.assertEqual(a.shape[1], len(fm.x)) 129 | self.assertEqual(a.shape[0], len(features)) 130 | self.assertEqual(b.shape[1], len(fm.x)) 131 | self.assertEqual(b.shape[0], len(features)) 132 | self.assertEqual(b.sum(), a.sum()) 133 | a, b = fm.make_reg_data(features, all_controls = True) 134 | self.assertEqual(a.shape[1], len(fm.x)) 135 | self.assertEqual(a.shape[0], len(features)) 136 | self.assertEqual(b.shape[1], len(fm.x)) 137 | self.assertEqual(b.shape[0], len(features)) 138 | self.assertEqual(b.sum(), a.sum()) 139 | test_loader.rm_tmp_structure(path) 140 | test_loader.rm_tmp_structure(ftrpath) 141 | 142 | def test_single(self): 143 | numfix = 1 144 | fm = self.gen_sub(1,numfix) 145 | for (cat, cat_mat) in enumerate(fm.by_field('category')): 146 | for (img, img_mat) in enumerate(cat_mat.by_field('filenumber')): 147 | self.assertEqual(len(img_mat.x), 1) 148 | self.assertEqual(img_mat.SUBJECTINDEX[0], 1) 149 | self.assertEqual(img_mat.filenumber[0], img+1) 150 | self.assertEqual(img_mat.category[0], cat+1) 151 | 152 | def test_multiple_subs(self): 153 | import random 154 | numfix = random.randint(10,100) 155 | numsubs = random.randint(10,20) 156 | fm_all = self.gen_sub(0,numfix) 157 | for i in range(1,numsubs): 158 | fm_all.join(self.gen_sub(i,numfix)) 159 | for (i,sub_mat) in enumerate(fm_all.by_field('SUBJECTINDEX')): 160 | for (cat,cat_mat) in enumerate(sub_mat.by_field('category')): 161 | for (img,img_mat) in enumerate(cat_mat.by_field('filenumber')): 162 | self.assertEqual(len(img_mat.x), numfix) 163 | self.assertEqual(img_mat.SUBJECTINDEX[0], i) 164 | self.assertEqual(img_mat.filenumber[0], img+1) 165 | self.assertEqual(img_mat.category[0], cat+1) 166 | 167 | def test_attribute_access(self): 168 | fm = self.gen_sub(0,100) 169 | fsi = fm.SUBJECTINDEX 170 | fx = fm.x 171 | fy = fm.y 172 | for k in range(0,100): 173 | self.assertEqual(fsi[k], 0) 174 | self.assertEqual(fx[k], k) 175 | self.assertEqual(fy[k], k) 176 | 177 | def test_factories(self): 178 | with NamedTemporaryFile(mode = 'w', prefix = 'fix_occ_test', 179 | suffix = '.mat') as ntf: 180 | ntf.write(get_data('ocupy.tests', 'fixmat_demo.mat')) 181 | ntf.seek(0) 182 | fm = fixmat.FixmatFactory(ntf.name) 183 | with NamedTemporaryFile(mode = 'w', prefix = 'fix_occ_test', 184 | suffix = '.mat') as ntf: 185 | ntf.write(get_data('ocupy.tests', 'fixmat_demo.mat')) 186 | ntf.seek(0) 187 | fm2 = fixmat.DirectoryFixmatFactory(os.path.dirname(ntf.name), glob_str = 'fix_occ_test*.mat' ) 188 | self.compare_fixmats(fm, fm2) 189 | self.assertRaises(ValueError, lambda: fixmat.DirectoryFixmatFactory('.', glob_str = 'xxx*.mat' )) 190 | 191 | def test_cmp2fixmat(self): 192 | # This test only works with some scipy versions. 193 | with NamedTemporaryFile(mode = 'w', prefix = 'fix_occ_test', 194 | suffix = '.mat') as ntf: 195 | ntf.write(get_data('ocupy.tests', 'fixmat_demo.mat')) 196 | ntf.seek(0) 197 | fm = fixmat.FixmatFactory(ntf.name) 198 | with NamedTemporaryFile(mode = 'w', prefix = 'fix_occ_test', 199 | suffix = '.mat') as ntf: 200 | ntf.write(get_data('ocupy.tests', 'fixmat_demo.mat')) 201 | ntf.seek(0) 202 | fm_ref = loadmat(ntf.name, struct_as_record=True)['fixmat'][0][0] 203 | for field in fm._fields: 204 | l1 = fm_ref[field] 205 | l2 = fm.__dict__[field] 206 | self.assertEqual(l1.size, l2.size) 207 | self.assertTrue((l1 == l2).all()) 208 | 209 | def test_copying(self): 210 | fm = fixmat.TestFixmatFactory(categories = [7], 211 | filenumbers = [10], 212 | subjectindices = [100], 213 | params = {'pixels_per_degree':10,'image_size':[100,500]}) 214 | fm.x[0] = 18728001 215 | fm_copied = fm.copy() 216 | fm_copied.pixels_per_degree = 100 217 | self.assertFalse(fm.pixels_per_degree == fm_copied.pixels_per_degree) 218 | fm_copied.x[0] = 1 219 | self.assertFalse(fm.x[0] == fm_copied.x[0]) 220 | 221 | 222 | if __name__ == '__main__': 223 | unittest.main() 224 | -------------------------------------------------------------------------------- /ocupy/tests/test_fixmat_compute_fdm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from scipy.ndimage.filters import gaussian_filter 8 | 9 | from ocupy import fixmat 10 | 11 | 12 | class TestComputeFDM(unittest.TestCase): 13 | """``Test_compute_fdm(unittest.TestCase)`` 14 | Test the compute_fdm method of the fixmat class. 15 | What is tested for: 16 | - Assertions: The Program should throw an exception if: 17 | - the image_size attribute is empty, 18 | - fixations are from category == -1 19 | - fixations are not on the image 20 | - The resulting map should have the size as specified in 21 | image_size or as scale_factor*image_size 22 | - Values in the fdm should sum to 1 / the fdm should be a probability density 23 | distribution 24 | """ 25 | def setUp(self): 26 | self.fm = fixmat.TestFixmatFactory() 27 | 28 | # test for empty-image-size-assertion 29 | def test_empty_image_size(self): 30 | self.fm.image_size = [] 31 | self.fm.filter( self.fm.category > 0) 32 | self.assertRaises(AssertionError, fixmat.compute_fdm,self.fm) 33 | 34 | def test_scale_factor_assertion(self): 35 | """``test_scale_factor_assertion(self)`` 36 | Tests for assertion that scaling factor is greater than 0 37 | """ 38 | self.assertRaises(AssertionError, 39 | fixmat.compute_fdm,self.fm,scale_factor = -1) 40 | 41 | def test_no_fixations(self): 42 | """ ``test_no_fixations(self)`` 43 | Tests the assertion that the fixmat has at least one fixation. 44 | """ 45 | #returns an empty fixmat 46 | fm = self.fm.filter(self.fm.category == 3) 47 | self.assertRaises(RuntimeError, fixmat.compute_fdm, fm) 48 | 49 | # tests for properties of a probability density function, i.e. 50 | # elements must sum to one and be in the range of [0 1] 51 | def test_pdf(self): 52 | fdm = fixmat.compute_fdm(self.fm) 53 | # ensure that we have a probability density function 54 | if fdm.min()< 0: 55 | fdm = fdm - fdm.min() 56 | self.assertFalse(abs(fdm.sum()-1) > np.finfo('single').eps) 57 | 58 | def test_corners(self): 59 | """ ``test_corners(self)`` 60 | Tests whether handling of fixations in the corners is correct. 61 | It manually generates an fdm with four fixations in the corners of a 62 | 922x1272 array and compares it to the map generated by compute_fdm. 63 | The difference between the maps must not be larger than the machine 64 | precision for floats. 65 | """ 66 | yvec = [922, 922,0 ,0] 67 | xvec = [1272,0 ,1272,0] 68 | self.fm = fixmat.TestFixmatFactory(points = (xvec, yvec)) 69 | fdm = fixmat.compute_fdm(self.fm) 70 | # manually calculate the fdm 71 | fdm_man = np.zeros((922,1272)) 72 | fdm_man[0][0] = 1 73 | fdm_man[921][1271] = 1 74 | fdm_man[0][1271] = 1 75 | fdm_man[921][0] = 1 76 | # use default settings for fwhm, pixels_per_degree and scale_factor 77 | kernel_sigma = 2 * 36 * 1 78 | kernel_sigma = kernel_sigma / (2 * (2 * np.log(2)) ** .5) 79 | fdm_man = gaussian_filter(np.array(fdm_man), kernel_sigma, order=0, 80 | mode='constant') 81 | fdm_man = fdm_man / fdm_man.sum() 82 | diff = fdm - fdm_man 83 | self.assertFalse((diff > np.finfo('float').eps).any()) 84 | 85 | def test_scaling(self): 86 | """``test_scaling(self)`` 87 | Tests that the size of the resulting fdm corresponds to 88 | image_size*scale_factor. Tests with different image sizes. 89 | """ 90 | size = self.fm.image_size 91 | sf = [0.6, 1.6, 1.0/2.0] 92 | # if the scaling factor is not a float, this does not work but seems to 93 | # be a problem of the inbuilt function. 94 | for i in range(len(sf)): 95 | fdm = fixmat.compute_fdm(self.fm,scale_factor = sf[i]) 96 | self.assertEqual((int(size[0]*sf[i]),int(size[1]*sf[i])), 97 | np.shape(fdm)) 98 | 99 | def test_relative_bias(self): 100 | fm = fixmat.TestFixmatFactory(categories = [1,2], 101 | filenumbers = list(range(1,11)), 102 | subjectindices = [1, 2, 3, 4, 5, 6], 103 | params = {'pixels_per_degree':1, 'image_size':[10,10]}) 104 | rb = fixmat.relative_bias(fm) 105 | self.assertEqual(rb.shape,(21, 21)) 106 | self.assertEqual(rb[9][9],960) 107 | 108 | def tearDown(self): 109 | self.fm = None 110 | 111 | if __name__ == '__main__': 112 | unittest.main() 113 | -------------------------------------------------------------------------------- /ocupy/tests/test_loader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | from os.path import join 5 | import unittest 6 | from tempfile import mkdtemp 7 | 8 | from scipy.io import savemat 9 | import Image 10 | import numpy as np 11 | 12 | from ocupy import loader 13 | 14 | 15 | class TestLoader(unittest.TestCase): 16 | 17 | #Test the interface 18 | def test_load_from_disk(self): 19 | # Generate a tmp fake data structure 20 | img_per_cat = {2:list(range(1,10)), 8:list(range(30,40)), 111:list(range(6,15))} 21 | path,_ = create_tmp_structure(img_per_cat) 22 | l = loader.LoadFromDisk(impath = path) 23 | for cat in list(img_per_cat.keys()): 24 | for image in img_per_cat[cat]: 25 | l.get_image(cat,image) 26 | self.assertTrue(l.test_for_category(cat)) 27 | self.assertTrue(l.test_for_image(cat, image)) 28 | # Test checks for non existing images 29 | no_img_per_cat = {0:list(range(1,10)), 7:list(range(30,40)), 110:list(range(6,15))} 30 | for cat in list(no_img_per_cat.keys()): 31 | for image in no_img_per_cat[cat]: 32 | self.assertTrue(not l.test_for_category(cat)) 33 | self.assertTrue(not l.test_for_image(cat, image)) 34 | no_img_per_cat = {2:list(range(11,20)), 8:list(range(20,30)), 111:list(range(16,40))} 35 | for cat in list(no_img_per_cat.keys()): 36 | for image in no_img_per_cat[cat]: 37 | self.assertTrue(not l.test_for_image(cat, image)) 38 | rm_tmp_structure(path) 39 | 40 | def test_load_from_disk_scaling(self): 41 | # Generate a tmp fake data structure 42 | img_per_cat = {2:list(range(1,10)), 8:list(range(30,40)), 111:list(range(6,15))} 43 | path,_ = create_tmp_structure(img_per_cat) 44 | l = loader.LoadFromDisk(impath = path, size = (10,10)) 45 | for cat in list(img_per_cat.keys()): 46 | for image in img_per_cat[cat]: 47 | im = l.get_image(cat,image) 48 | self.assertTrue(im.shape[0] == 10 and im.shape[1] == 10) 49 | self.assertTrue(l.test_for_category(cat)) 50 | self.assertTrue(l.test_for_image(cat, image)) 51 | rm_tmp_structure(path) 52 | 53 | def test_save_to_disk(self): 54 | path = mkdtemp() 55 | ftrpath = mkdtemp() 56 | im_tmp = np.ones((100,100)) 57 | l = loader.SaveToDisk(impath = path,ftrpath = ftrpath, size = (10,10)) 58 | # Generate a tmp fake data structure 59 | img_per_cat = {2:list(range(1,10)), 8:list(range(30,40)), 111:list(range(6,15))} 60 | for cat in list(img_per_cat.keys()): 61 | # Create category dir 62 | for image in img_per_cat[cat]: 63 | l.save_image(cat, image, im_tmp) 64 | for f in ['a', 'b']: 65 | l.save_feature(cat, image, f, np.ones((10,10))) 66 | l = loader.LoadFromDisk(impath = path, ftrpath = ftrpath, size = (10,10)) 67 | for cat in list(img_per_cat.keys()): 68 | for image in img_per_cat[cat]: 69 | im = l.get_image(cat,image) 70 | self.assertTrue(im.shape[0] == 10 and im.shape[1] == 10) 71 | self.assertTrue(l.test_for_category(cat)) 72 | self.assertTrue(l.test_for_image(cat, image)) 73 | self.assertTrue(l.test_for_feature(cat, image, 'a'), True) 74 | os.system('rm -rf %s' %path) 75 | os.system('rm -rf %s' %ftrpath) 76 | 77 | 78 | def test_testloader(self): 79 | img_per_cat = {1: list(range(1,10)), 2: list(range(1,10))} 80 | l = loader.TestLoader(img_per_cat = img_per_cat, features = ['a', 'b']) 81 | for cat in range(1,10): 82 | for img in range(1,100): 83 | if cat in list(img_per_cat.keys()) and img in img_per_cat[cat]: 84 | self.assertEqual(l.test_for_category(cat), True) 85 | self.assertEqual(l.test_for_image(cat, img), True) 86 | self.assertEqual(l.test_for_feature(cat, img, 'a'), True) 87 | l.get_image(cat, img) 88 | l.get_feature(cat, img, 'a') 89 | l.get_feature(cat, img, 'b') 90 | self.assertTrue(l.test_for_feature(cat, img, 'a')) 91 | else: 92 | self.assertEqual(l.test_for_image(cat, img), False) 93 | self.assertRaises(IndexError, lambda: l.get_image(cat, img)) 94 | 95 | def create_tmp_structure(img_per_cat, features = None): 96 | im_tmp = Image.fromarray(np.ones((1,1))).convert('RGB') 97 | path = mkdtemp() 98 | ftr_path = None 99 | if features: 100 | ftr_path = mkdtemp() 101 | for cat in list(img_per_cat.keys()): 102 | # Create category dir 103 | os.mkdir(join(path, str(cat))) 104 | if features: 105 | for feature in features: 106 | os.makedirs(join(ftr_path, str(cat), str(feature))) 107 | for image in img_per_cat[cat]: 108 | im_tmp.save(join(path,str(cat),'%i_%i.png'%(cat, image))) 109 | if features: 110 | for feature in features: 111 | savemat(join(ftr_path,str(cat), 112 | feature, '%i_%i.mat'%(cat, image)), 113 | {'output':np.ones((1,1))}) 114 | return path, ftr_path 115 | 116 | def rm_tmp_structure(path): 117 | os.system('rm -rf %s' %path) 118 | 119 | 120 | 121 | if __name__ == '__main__': 122 | unittest.main() 123 | -------------------------------------------------------------------------------- /ocupy/tests/test_measures.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from ocupy import measures 4 | from ocupy import fixmat 5 | import scipy 6 | 7 | class TestMeasures(unittest.TestCase): 8 | 9 | def test_prediction_scores(self): 10 | measures.set_scores([measures.roc_model, 11 | measures.kldiv_model, 12 | measures.nss_model]) 13 | fm = fixmat.TestFixmatFactory(categories = [1,2,3], 14 | filenumbers = [1,2,3,4,5,6], 15 | subjectindices = [1, 2, 3, 4, 5, 6], 16 | params = {'pixels_per_degree':10, 'image_size':[100,500]}) 17 | fdm = fixmat.compute_fdm(fm) 18 | measures.set_scores([measures.roc_model, 19 | measures.kldiv_model, 20 | measures.nss_model]) 21 | scores = measures.prediction_scores(fdm, fm) 22 | self.assertEqual(len(scores), 3) 23 | measures.set_scores([measures.roc_model]) 24 | scores = measures.prediction_scores(fdm, fm) 25 | self.assertEqual(len(scores), 1) 26 | measures.set_scores([measures.roc_model, 27 | measures.kldiv_model, 28 | measures.nss_model]) 29 | scores = measures.prediction_scores(fdm, fm) 30 | self.assertEqual(len(scores), 3) 31 | 32 | def test_kldiv(self): 33 | arr = scipy.random.random((21,13)) 34 | fm = fixmat.TestFixmatFactory(categories = [1,2,3], 35 | filenumbers = [1,2,3,4,5,6], 36 | subjectindices = [1, 2, 3, 4, 5, 6], 37 | params = {'pixels_per_degree':10, 'image_size':[100,500]}) 38 | 39 | kl = measures.kldiv(arr, arr) 40 | self.assertEqual(kl, 0, 41 | "KL Divergence between same distribution should be 0") 42 | kl = measures.kldiv(None, None, distp = fm, distq = fm, scale_factor = 0.25) 43 | self.assertEqual(kl, 0, 44 | "KL Divergence between same distribution should be 0") 45 | fdm = fixmat.compute_fdm(fm) 46 | kl = measures.kldiv_model(fdm, fm) 47 | self.assertTrue(kl < 10**-13, 48 | "KL Divergence between same distribution should be almost 0") 49 | fm.x = np.array([]) 50 | fm.y = np.array([]) 51 | 52 | kl = measures.kldiv(None, None, distp = fm, distq = fm, scale_factor = 0.25) 53 | self.assertTrue(np.isnan(kl)) 54 | 55 | def test_correlation(self): 56 | fm = fixmat.TestFixmatFactory(categories = [1,2,3], 57 | filenumbers = [1,2,3,4,5,6], 58 | subjectindices = [1, 2, 3, 4, 5, 6], 59 | params = {'pixels_per_degree':1, 'image_size':[100,500]}) 60 | # Arr has zero variance, should return nan 61 | arr = np.ones(fm.image_size) 62 | corr = measures.correlation_model(arr, fm) 63 | self.assertTrue(np.isnan(corr)) 64 | # With itself should give 1 65 | fdm = fixmat.compute_fdm(fm) 66 | corr = measures.correlation_model(fdm,fm) 67 | self.assertEqual(corr,1) 68 | # Anti-correlation should give -1 69 | corr = measures.correlation_model(-1*fdm,fm) 70 | self.assertEqual(corr,-1) 71 | 72 | def test_nss_values(self): 73 | fm = fixmat.TestFixmatFactory(categories = [1,2,3], 74 | filenumbers = [1,2,3,4,5,6], 75 | subjectindices = [1, 2, 3, 4, 5, 6], 76 | params = {'pixels_per_degree':0.1, 'image_size':[200,500]}) 77 | # Arr has zero variance, should return nan 78 | arr = np.ones(fm.image_size) 79 | nss = measures.nss_model(arr, fm) 80 | self.assertTrue(np.isnan(nss)) 81 | # With itself should yield a high value 82 | fdm = fixmat.compute_fdm(fm) 83 | nss = measures.nss_model(fdm, fm) 84 | self.assertTrue(nss>15) 85 | # Fixations at these locations should give nss < 0 86 | nss = measures.nss(fdm, [[100, 101, 102, 103, 104, 105],[0, 0, 0, 0, 0, 0]]) 87 | self.assertTrue(nss < 0) 88 | 89 | def test_emd(self): 90 | try: 91 | import opencv 92 | except ImportError: 93 | print("Skipping EMD test - no opencv available") 94 | return 95 | opencv # pyflakes 96 | fm = fixmat.TestFixmatFactory(categories = [1,2,3], 97 | filenumbers = [1,2,3,4,5,6], 98 | subjectindices = [1, 2, 3, 4, 5, 6], 99 | params = {'pixels_per_degree':1, 'image_size':[20,50]}) 100 | arr = np.ones(fm.image_size) 101 | fdm = fixmat.compute_fdm(fm) 102 | e = measures.emd_model(arr, fm) 103 | self.assertTrue(e > 0) 104 | e = measures.emd(fdm, fdm) 105 | self.assertEqual(e, 0) 106 | 107 | 108 | def test_fast_roc(self): 109 | self.assertTrue(measures.fast_roc([1],[0])[0] == 1) 110 | self.assertTrue(measures.fast_roc([1],[1])[0] == 0.5) 111 | #self.assertTrue(measures.roc([0],[1])[0] == 0) 112 | self.assertTrue(np.isnan(measures.fast_roc([],[1])[0])) 113 | self.assertTrue(np.isnan(measures.fast_roc([1],[])[0])) 114 | self.assertRaises(RuntimeError, lambda: measures.fast_roc([np.nan],[0])) 115 | self.assertRaises(RuntimeError, lambda: measures.fast_roc([0],[np.nan])) 116 | 117 | # test auc linearity 118 | actuals = np.random.standard_normal(1000)+2 119 | controls = np.random.standard_normal(1000) 120 | auc_complete = measures.fast_roc(actuals, controls)[0] 121 | auc_partial = [measures.fast_roc(actuals[k*100:(k+1)*100],controls)[0] for k in range(10)] 122 | self.assertAlmostEqual(auc_complete,np.array(auc_partial).mean()) 123 | 124 | # test symmetry 125 | actuals = np.random.standard_normal(1000)+2 126 | controls = np.random.standard_normal(1000) 127 | self.assertAlmostEqual(measures.fast_roc(actuals, controls)[0] + measures.fast_roc(controls, actuals)[0],1) 128 | 129 | 130 | def test_exact_roc(self): 131 | self.assertTrue(measures.exact_roc([1],[0])[0] == 1) 132 | self.assertTrue(measures.exact_roc([1],[1])[0] == 0.5) 133 | self.assertTrue(measures.exact_roc([0],[1])[0] == 0) 134 | self.assertTrue(np.isnan(measures.exact_roc([],[1])[0])) 135 | self.assertTrue(np.isnan(measures.exact_roc([1],[])[0])) 136 | self.assertRaises(RuntimeError, lambda: measures.exact_roc([np.nan],[0])) 137 | self.assertRaises(RuntimeError, lambda: measures.exact_roc([0],[np.nan])) 138 | # test auc linearity 139 | actuals = np.random.standard_normal(1000)+2 140 | controls = np.random.standard_normal(1000) 141 | auc_complete = measures.exact_roc(actuals, controls)[0] 142 | auc_partial = [measures.exact_roc(actuals[k*100:(k+1)*100],controls)[0] for k in range(10)] 143 | self.assertAlmostEqual(auc_complete,np.array(auc_partial).mean()) 144 | 145 | # test symmetry 146 | actuals = np.random.standard_normal(1000)+2 147 | controls = np.random.standard_normal(1000) 148 | self.assertAlmostEqual(measures.exact_roc(actuals, controls)[0] + measures.exact_roc(controls, actuals)[0],1) 149 | 150 | def skip_intersubject_auc(self): 151 | points = list(zip(list(range(1,16)),list(range(1,16)))) 152 | fm = fixmat.TestFactory(points = points, 153 | filenumbers = list(range(1,11)), subjectindices = list(range(1,11))) 154 | (auc1, _, _) = measures.intersubject_scores_random_subjects(fm, 1, 1, 2, 2, False) 155 | (auc2, _, _) = measures.intersubject_scores_random_subjects(fm, 1, 1, 2, 2, True) 156 | 157 | def skip_intersubject_auc_scaled(self): 158 | points = list(zip(list(range(1,16)),list(range(1,16)))) 159 | fm = fixmat.TestFactory(points = points, 160 | filenumbers = list(range(1,11)), subjectindices = list(range(1,11))) 161 | (auc1, _, _) = measures.intersubject_scores(fm, 1,[1], [1], [2], [2], controls=False, scale_factor = 0.5) 162 | (auc2, _, _) = measures.intersubject_scores(fm,1, [1], [1], [2],[2], controls=True, scale_factor = 0.5) 163 | 164 | 165 | def test_nss(self): 166 | fm = fixmat.TestFixmatFactory(points=list(zip([0,500,1000],[1,10,10])), 167 | params = {'image_size':[100,10]}) 168 | fm.SUBJECTINDEX = np.array([1,1,1]) 169 | fm.filenumber = np.array([1,1,1]) 170 | fm.category = np.array([1,1,1]) 171 | fm.x = np.array([0,50,1000]) 172 | fm.y = np.array([1,10,10]) 173 | fm.fix = np.array([1,2,3]) 174 | fm._num_fix = 3 175 | fdm = fixmat.compute_fdm(fm[(fm.x<10) & (fm.y<10)]) 176 | self.assertRaises(IndexError, lambda: measures.nss(fdm, (fm.y, fm.x))) 177 | 178 | def skip_emd(self): 179 | fm1 = fixmat.TestFactory(params = {'image_size':[93,128]}) 180 | fm2 = fixmat.TestFactory(points=list(zip(list(range(10,50)),list(range(10,50)))),params = {'image_size':[93,128]}) 181 | self.assertEqual(measures.emd_model(fixmat.compute_fdm(fm1), fm1), 0) 182 | self.assertTrue(not (measures.emd_model(fixmat.compute_fdm(fm1), fm2) == 0)) 183 | 184 | 185 | if __name__ == '__main__': 186 | unittest.main() 187 | -------------------------------------------------------------------------------- /ocupy/tests/test_spline_base.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import random 3 | import numpy as np 4 | 5 | from ocupy import measures, spline_base as sb 6 | 7 | from scipy.stats.kde import gaussian_kde 8 | from scikits.learn import linear_model 9 | 10 | class Test1DSplines(unittest.TestCase): 11 | 12 | def skip_cosine_fit(self): 13 | x = np.cos(np.linspace(0,np.pi*2, 99)) 14 | noise = (np.random.random((99,))-.5) 15 | for num_splines in range(5,20,2): 16 | splines = sb.spline_base1d(99, num_splines, spline_order = 5) 17 | model = linear_model.BayesianRidge() 18 | model.fit(splines, x+noise) 19 | y = model.predict(splines) 20 | self.assertTrue(np.corrcoef(x+noise, y)[0,1]**2>0.7) 21 | 22 | class Test2DSplines(unittest.TestCase): 23 | 24 | def skip_2Dcosine_fit(self): 25 | x = np.cos(np.linspace(0,np.pi*2, 99)) 26 | X = np.dot(x[:,np.newaxis],x[:,np.newaxis].T) 27 | noise = (np.random.random(X.shape)-.5)*0.1 28 | for num_splines in range(2,20,2): 29 | splines = sb.spline_base2d(99,99, num_splines, spline_order = 3) 30 | model = linear_model.BayesianRidge() 31 | model.fit(splines.T, (X+noise).flat) 32 | y = model.predict(splines.T) 33 | self.assertTrue( np.corrcoef(X+noise, y.reshape(X.shape))[0,1]**2 > 0.7) 34 | 35 | class TestCompareMethods(object): 36 | 37 | def setUp(self): 38 | # Generate fake density 39 | self.shape = (40,120) 40 | self.base = sb.spline_base2d(self.shape[0],self.shape[1], 20, 20, 10) 41 | self.coef = np.random.random((self.base.shape[0],)) 42 | self.target = np.dot(self.base.T, self.coef).reshape(self.shape) 43 | self.target += np.min(self.target.flat) 44 | self.target = self.target /np.sum(self.target) 45 | self.csum = np.cumsum(self.target.flat) 46 | 47 | def new_samples(self): 48 | self.train_samples = [np.unravel_index(draw_from(self.csum), self.shape) for x in range(5000)] 49 | xs = [x for x,y in self.train_samples] 50 | ys = [y for x,y in self.train_samples] 51 | self.train_samples = np.array((xs,ys)).T 52 | 53 | 54 | def test_compare(self): 55 | kls = [] 56 | for k in range(5): 57 | self.new_samples() 58 | kls += self.eval_fits() 59 | print(np.mean(np.array(kls))) 60 | 61 | def eval_fits(self): 62 | print('Fit Splines') 63 | spline_fit, hist = sb.fit2d(self.train_samples, 64 | np.linspace(0,self.shape[0], self.shape[0]+1), 65 | np.linspace(0,self.shape[1], self.shape[1]+1)) 66 | spline_kl = measures.kldiv(spline_fit, self.target) 67 | hist_kl = measures.kldiv(hist, self.target) 68 | try: 69 | print('Fit KDE') 70 | kde_est = gaussian_kde(self.train_samples) 71 | x,y = np.mgrid[0:self.shape[0], self.shape[1]] 72 | kde_fit = kde_est.evaluate((np.array(x), np.array(y))) 73 | kde_kl = measures.kldiv(kde_fit, self.target) 74 | except: 75 | kde_kl = np.nan 76 | print('LinAlgError') 77 | print(spline_kl, hist_kl, kde_kl) 78 | return spline_kl, hist_kl, kde_kl 79 | 80 | def r_squared(x,y): 81 | return (1 - np.linalg.norm(x - y)**2 / np.linalg.norm(x)**2) 82 | 83 | def draw_from(cumsum, borders=None): 84 | """ 85 | Draws a value from a cumulative sum. 86 | 87 | Parameters: 88 | cumsum : array 89 | Cumulative sum from which shall be drawn. 90 | borders : array, optional 91 | If given, sets the value borders for entries in the cumsum-vector. 92 | Returns: 93 | int : Index of the cumulative sum element drawn. 94 | """ 95 | if not borders is None: 96 | return (cumsum>=random.random()).nonzero()[0][0] 97 | else: 98 | return borders[(cumsum>=random.random()).nonzero()[0][0]] 99 | 100 | if __name__ == '__main__': 101 | test = TestCompareMethods() 102 | test.setUp() 103 | test.test_compare() 104 | -------------------------------------------------------------------------------- /ocupy/tests/test_stimuli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import os 5 | from pkgutil import get_data 6 | import unittest 7 | from tempfile import NamedTemporaryFile 8 | 9 | from ocupy import fixmat, stimuli, loader 10 | 11 | from . import test_loader 12 | 13 | class TestStimuli(unittest.TestCase): 14 | 15 | def setUp(self): 16 | self.features = list(range(1,11)) 17 | self.inputs = {'a':{1:self.features,2:self.features,3:self.features},'b':{10:self.features}} 18 | self.test_loader = loader.TestLoader(self.inputs) 19 | self.inp = stimuli.Categories(self.test_loader,self.inputs) 20 | 21 | 22 | def test_directory_stimuli_factory(self): 23 | img_per_cat = {1:list(range(1,11)), 2:list(range(1,11))} 24 | features = ['a', 'b'] 25 | path, ftrpath = test_loader.create_tmp_structure(img_per_cat, features = features) 26 | l = loader.LoadFromDisk(impath = path, 27 | ftrpath = ftrpath, 28 | size = (100,100)) 29 | inp = stimuli.DirectoryStimuliFactory(l) 30 | for cat in list(img_per_cat.keys()): 31 | inp[cat] 32 | for img in img_per_cat[cat]: 33 | inp[cat][img] 34 | inp[cat][img]['a'] 35 | inp[cat][img].data 36 | 37 | test_loader.rm_tmp_structure(path) 38 | test_loader.rm_tmp_structure(ftrpath) 39 | 40 | 41 | def test_load_phantom_input(self): 42 | self.assertRaises(IndexError, lambda : self.inp['c']) 43 | self.assertRaises(IndexError, lambda : self.inp['a'][5]) 44 | self.assertRaises(IndexError, lambda : self.inp['a'][1][55]) 45 | 46 | 47 | def test_fixations(self): 48 | img_per_cat = {7:list(range(1,65)),8:list(range(1,65))} 49 | l = loader.TestLoader(img_per_cat, size = (10,10)) 50 | with NamedTemporaryFile(mode = 'w', prefix = 'fix_occ_test', 51 | suffix = '.mat') as ntf: 52 | ntf.write(get_data('ocupy.tests', 'fixmat_demo.mat')) 53 | ntf.seek(0) 54 | fm = fixmat.FixmatFactory(ntf.name) 55 | fm = fm[fm.category>0] 56 | inp = stimuli.FixmatStimuliFactory(fm,l) 57 | # Now we can iterate over the input object and get fixations on each image 58 | for cat in inp: 59 | for img in cat: 60 | self.assertEqual(img.fixations.filenumber[0], img.image) 61 | self.assertEqual(img.fixations.category[0], img.category) 62 | inp = stimuli.Categories(l, img_per_cat) 63 | self.assertRaises(RuntimeError, lambda : inp.fixations) 64 | self.assertRaises(RuntimeError, lambda : inp[7].fixations) 65 | self.assertRaises(RuntimeError, lambda : inp[7][1].fixations) 66 | 67 | def test_general(self): 68 | #Create an input object: There are two ways, one is the 69 | features = ['a','b','c','d'] 70 | img_per_cat = {7:list(range(1,65)),8:list(range(1,65))} 71 | l = loader.TestLoader(img_per_cat,features, size = (10,10)) 72 | inp = stimuli.Categories(l, img_per_cat, features) 73 | # In this case it should have two categories (2,9) with 10 and 50 images 74 | # Let's check this 75 | self.assertTrue(7 in inp.categories()) 76 | self.assertTrue(8 in inp.categories()) 77 | self.assertEqual(len(inp[7].images()), 64) 78 | self.assertEqual(len(inp[8].images()), 64) 79 | # Good, now we can access some data 80 | img = inp[7][16].data 81 | self.assertTrue(img.shape[0] == 10 and img.shape[1] == 10) 82 | # In general we can use [] to access elements in the object 83 | # But we can also iterate trough these objects 84 | # This should take a long time because it loads all images 85 | # from disk 86 | for cat in inp: 87 | for img in cat: 88 | img.image 89 | img.category 90 | img.data 91 | 92 | def test_fixmat_input_factory(self): 93 | # Another way to create an input object is with the 94 | # FixmatInputFactory 95 | features = ['a','b','c','d'] 96 | img_per_cat = {7:list(range(1,65)),8:list(range(1,65))} 97 | l = loader.TestLoader(img_per_cat,features, size = (10,10)) 98 | with NamedTemporaryFile(mode = 'w', prefix = 'fix_occ_test', 99 | suffix = '.mat') as ntf: 100 | ntf.write(get_data('ocupy.tests', 'fixmat_demo.mat')) 101 | ntf.seek(0) 102 | fm = fixmat.FixmatFactory(ntf.name) 103 | fm = fm[fm.category>0] 104 | inp = stimuli.FixmatStimuliFactory(fm,l) 105 | # Now we can iterate over the input object and get fixations on each image 106 | for cat in inp: 107 | for img in cat: 108 | self.assertEqual(img.fixations.filenumber[0], img.image) 109 | self.assertEqual(img.fixations.category[0], img.category) 110 | 111 | 112 | def compare_fixmats(self, a,b): 113 | for field in a.fieldnames(): 114 | for (v1, v2) in zip(a.__dict__[field], b.__dict__[field]): 115 | self.assertEqual(v1, v2) 116 | 117 | 118 | 119 | if __name__ == '__main__': 120 | unittest.main() 121 | -------------------------------------------------------------------------------- /ocupy/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import unittest 5 | import numpy as np 6 | 7 | from ocupy import fixmat, utils 8 | 9 | class TestUtils(unittest.TestCase): 10 | def setUp(self): 11 | self.fm = fixmat.TestFixmatFactory(categories = [1,2,3], 12 | filenumbers = [1,2,3,4,5,6], 13 | subjectindices = [1, 2, 3, 4, 5, 6], 14 | params = {'pixels_per_degree':1, 'image_size':[100,500]}) 15 | 16 | def test_imresize(self): 17 | arr = np.random.random((121, 111)) 18 | arr_small = utils.imresize(arr, (121, 111)) 19 | self.assertTrue(((arr-arr_small)**2).sum() < 10**-10) 20 | 21 | def test_ismember(self): 22 | a = np.array(list(range(1,100,2))) 23 | for x in range(1,100,2): 24 | self.assertEqual(utils.ismember(x, a), [True]) 25 | for x in range(2,100,2): 26 | self.assertEqual(utils.ismember(x, a), [False]) 27 | base = list(range(1, 100, 2)) 28 | base.extend(base) 29 | a = np.array(base) 30 | for x in range(1,100,2): 31 | self.assertEqual(utils.ismember(x, a.astype(float)), [True]) 32 | for x in range(2,100,2): 33 | self.assertEqual(utils.ismember(x, a.astype(float)), [False]) 34 | 35 | def test_dict_2_mat(self): 36 | d = {2:list(range(1,100)), 3:list(range(1,10)), 4: list(range(1,110))} 37 | self.assertRaises(RuntimeError, lambda: utils.dict_2_mat(d)) 38 | 39 | d = {2:list(range(1,100)), 3:list(range(1,100)), 4: list(range(1,100))} 40 | m = utils.dict_2_mat(d) 41 | for c in range(1, 5): 42 | if c in list(d.keys()): 43 | self.assertTrue(((np.array(d[c]) == m[c]).all())) 44 | else: 45 | self.assertTrue(np.isnan(m[c]).all()) 46 | 47 | 48 | if __name__ == '__main__': 49 | unittest.main() 50 | -------------------------------------------------------------------------------- /ocupy/tests/test_xvalidation.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | 5 | from ocupy import fixmat, stimuli, loader 6 | from ocupy.xvalidation import SimpleXValidation 7 | 8 | class TestXValidation(unittest.TestCase): 9 | 10 | def test_xvalidation(self): 11 | img_per_cat = {} 12 | [img_per_cat.update({cat: list(range(1,50))}) for cat in range(1,11)] 13 | fm = fixmat.TestFixmatFactory(categories = list(range(1,11)), subjectindices = list(range(1,11)), 14 | filenumbers = list(range(1,11))) 15 | l = loader.TestLoader(img_per_cat,size = (10,10)) 16 | stim = stimuli.FixmatStimuliFactory(fm,l) 17 | img_ratio = 0.3 18 | sub_ratio = 0.3 19 | data_slices = SimpleXValidation(fm, stim,img_ratio,sub_ratio,10) 20 | for i,(fm_train, cat_train, fm_test, cat_test) in enumerate(data_slices.generate()): 21 | self.assertEqual (len(np.unique(fm_train.SUBJECTINDEX)), 22 | round(len(np.unique(fm.SUBJECTINDEX))*(1-sub_ratio))) 23 | self.assertEqual(len(np.unique(fm_test.SUBJECTINDEX)), 24 | round(len(np.unique(fm.SUBJECTINDEX))*(sub_ratio))) 25 | for (test,train,reference) in zip(cat_test, 26 | cat_train, 27 | stim): 28 | self.assertEqual(test.category, reference.category) 29 | self.assertEqual(len(test.images()), 30 | len(reference.images())*img_ratio) 31 | self.assertEqual(len(train.images()), 32 | len(reference.images())*(1-img_ratio)) 33 | 34 | self.assertTrue((np.sort(np.unique(fm_train[fm_train.category==test.category].filenumber)) == 35 | np.sort( train.images())).all()) 36 | 37 | if __name__ == '__main__': 38 | unittest.main() 39 | -------------------------------------------------------------------------------- /ocupy/xvalidation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | A cross-validation interface and an implementation of a simple 4 | cross-validation scheme. 5 | """ 6 | 7 | import numpy as np 8 | 9 | from ocupy.utils import randsample, ismember 10 | from ocupy.stimuli import Categories 11 | 12 | 13 | class XValidation(object): 14 | """ 15 | Interface for a cross-validation object. 16 | """ 17 | def __init__(self, categories, fixmat, num_slices): 18 | """Initializes xvalidation object with categories and fixmat.""" 19 | raise NotImplementedError 20 | 21 | def generate(self, subject_partition, image_partition): 22 | """Generates num_slices of data where subject_partition define how 23 | many subjects go into test and training set and image_partition does 24 | the same for images.""" 25 | raise NotImplementedError 26 | 27 | 28 | class SimpleXValidation(XValidation): 29 | """ 30 | SimpleXValidation performs a simple hold-out cross-validation. 31 | """ 32 | def __init__(self, fm, categories, 33 | subject_hold_out = .3, 34 | image_hold_out=.3, 35 | num_slices = 10): 36 | self.fm = fm 37 | self.num_slices = num_slices 38 | self.categories = categories 39 | self.subject_hold_out = subject_hold_out 40 | self.image_hold_out = image_hold_out 41 | 42 | def generate(self): 43 | """ 44 | Generator for creating the cross-validation slices. 45 | 46 | Returns 47 | A tuple of that contains two fixmats (training and test) 48 | and two Category objects (test and train). 49 | """ 50 | for _ in range(0, self.num_slices): 51 | #1. separate fixmat into test and training fixmat 52 | subjects = np.unique(self.fm.SUBJECTINDEX) 53 | test_subs = randsample(subjects, 54 | self.subject_hold_out*len(subjects)) 55 | train_subs = [x for x in subjects if x not in test_subs] 56 | test_fm = self.fm[ismember(self.fm.SUBJECTINDEX, test_subs)] 57 | train_fm = self.fm[ismember(self.fm.SUBJECTINDEX, train_subs)] 58 | 59 | #2. distribute images 60 | test_imgs = {} 61 | train_imgs = {} 62 | id_test = (test_fm.x <1) & False 63 | id_train = (train_fm.x <1) & False 64 | for cat in self.categories: 65 | imgs = cat.images() 66 | test_imgs.update({cat.category:randsample(imgs, 67 | self.image_hold_out*len(imgs)).tolist()}) 68 | train_imgs.update({cat.category:[x for x in imgs 69 | if not ismember(x, test_imgs[cat.category])]}) 70 | id_test = id_test | ((ismember(test_fm.filenumber, 71 | test_imgs[cat.category])) & 72 | (test_fm.category == cat.category)) 73 | id_train = id_train | ((ismember(train_fm.filenumber, 74 | train_imgs[cat.category])) & 75 | (train_fm.category == cat.category)) 76 | 77 | 78 | #3. Create categories objects and yield result 79 | test_stimuli = Categories(self.categories.loader, test_imgs, 80 | features=self.categories._features, 81 | fixations=test_fm) 82 | train_stimuli = Categories(self.categories.loader, train_imgs, 83 | features=self.categories._features, 84 | fixations=train_fm) 85 | yield (train_fm[id_train], 86 | train_stimuli, 87 | test_fm[id_test], 88 | test_stimuli) 89 | 90 | 91 | -------------------------------------------------------------------------------- /plot_directive/inline/76f909efd8.hires.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/plot_directive/inline/76f909efd8.hires.png -------------------------------------------------------------------------------- /plot_directive/inline/76f909efd8.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/plot_directive/inline/76f909efd8.pdf -------------------------------------------------------------------------------- /plot_directive/inline/8018522b91.hires.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/plot_directive/inline/8018522b91.hires.png -------------------------------------------------------------------------------- /plot_directive/inline/8018522b91.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/plot_directive/inline/8018522b91.pdf -------------------------------------------------------------------------------- /plot_directive/inline/ade1b1ac02.hires.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/plot_directive/inline/ade1b1ac02.hires.png -------------------------------------------------------------------------------- /plot_directive/inline/ade1b1ac02.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/plot_directive/inline/ade1b1ac02.pdf -------------------------------------------------------------------------------- /plot_directive/inline/e0de41fe35.hires.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/plot_directive/inline/e0de41fe35.hires.png -------------------------------------------------------------------------------- /plot_directive/inline/e0de41fe35.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nwilming/ocupy/a0bd64f822576feaa502939d6bafd1183b237d16/plot_directive/inline/e0de41fe35.pdf -------------------------------------------------------------------------------- /py-modindex.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | Python Module Index — ocupy v0.1 documentation 11 | 12 | 13 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 43 | 44 |
45 |
46 |
47 |
48 | 49 | 50 |

Python Module Index

51 | 52 |
53 | o | 54 | r 55 |
56 | 57 | 58 | 59 | 61 | 62 | 64 | 67 | 68 | 69 | 72 | 73 | 74 | 77 | 78 | 79 | 82 | 83 | 84 | 87 | 88 | 90 | 91 | 93 | 96 | 97 | 98 | 101 |
 
60 | o
65 | ocupy 66 |
    70 | ocupy.fixmat 71 |
    75 | ocupy.loader 76 |
    80 | ocupy.measures 81 |
    85 | ocupy.stimuli 86 |
 
89 | r
94 | res 95 |
    99 | res.analysis.parallel 100 |
102 | 103 | 104 |
105 |
106 |
107 |
108 |
109 | 112 | 124 | 125 |
126 |
127 |
128 |
129 | 141 | 145 | 146 | -------------------------------------------------------------------------------- /search.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | Search — ocupy v0.1 documentation 11 | 12 | 13 | 22 | 23 | 24 | 25 | 26 | 27 | 30 | 31 | 32 | 33 | 34 | 46 | 47 |
48 |
49 |
50 |
51 | 52 |

Search

53 |
54 | 55 |

56 | Please activate JavaScript to enable the search 57 | functionality. 58 |

59 |
60 |

61 | From here you can search these documents. Enter your search 62 | words into the box below and click "search". Note that the search 63 | function will automatically search for all of the words. Pages 64 | containing fewer words won't appear in the result list. 65 |

66 |
67 | 68 | 69 | 70 |
71 | 72 |
73 | 74 |
75 | 76 |
77 |
78 |
79 |
80 |
81 | 84 |
85 |
86 |
87 |
88 | 100 | 104 | 105 | -------------------------------------------------------------------------------- /searchindex.js: -------------------------------------------------------------------------------- 1 | Search.setIndex({objects:{"ocupy.loader":{SaveToDisk:[1,3,1],Loader:[1,3,1],LoadFromDisk:[1,3,1],TestLoader:[1,3,1]},"ocupy.loader.LoadFromDisk":{get_feature:[1,2,1],get_image:[1,2,1],test_for_category:[1,2,1],test_for_feature:[1,2,1],path:[1,2,1],test_for_image:[1,2,1]},"res.analysis.parallel":{TaskStore:[5,3,1],Worker:[5,3,1],TaskManager:[5,3,1]},"ocupy.fixmat":{DirectoryFixmatFactory:[4,1,1],FixmatFactory:[4,1,1],FixMat:[4,3,1],TestFixmatFactory:[4,1,1],compute_fdm:[4,1,1]},"ocupy.stimuli.Categories":{content:[1,2,1],fixations:[1,4,1],categories:[1,2,1]},"res.analysis.parallel.TaskStore":{get:[5,2,1],partition:[5,2,1],from_dict:[5,2,1],ind2sub:[5,2,1],to_dict:[5,2,1],sub2ind:[5,2,1],update_results:[5,2,1]},"ocupy.fixmat.FixMat":{load:[4,5,1],join:[4,2,1],by_filenumber:[4,2,1],parameters:[4,2,1],by_field:[4,2,1],filter:[4,2,1],add_field:[4,2,1],rm_field:[4,2,1],fieldnames:[4,2,1],by_cat:[4,2,1],make_reg_data:[4,2,1],save:[4,2,1],add_feature_values:[4,2,1]},"res.analysis.parallel.TaskManager":{xmlrpc_save2file:[5,2,1],xmlrpc_reschedule:[5,2,1],xmlrpc_get_task:[5,2,1],xmlrpc_status:[5,2,1],xmlrpc_exit:[5,2,1],xmlrpc_task_done:[5,2,1]},"ocupy.stimuli.Image":{fixations:[1,4,1],data:[1,4,1],features:[1,2,1]},"ocupy.loader.TestLoader":{test_for_image:[1,2,1],test_for_category:[1,2,1]},"ocupy.loader.SaveToDisk":{save_image:[1,2,1],save_feature:[1,2,1]},"ocupy.stimuli.Images":{images:[1,2,1]},"ocupy.stimuli":{Images:[1,3,1],Image:[1,3,1],DirectoryStimuliFactory:[1,1,1],FixmatStimuliFactory:[1,1,1],Categories:[1,3,1]},"ocupy.measures":{funky_test_measure:[2,1,1],emd:[2,1,1],correlation_model:[2,1,1],exact_roc:[2,1,1],roc_model:[2,1,1],chao_shen:[2,1,1],nss_model:[2,1,1],kldiv_cs_model:[2,1,1],emd_model:[2,1,1],kldiv:[2,1,1],set_scores:[2,1,1],kldiv_model:[2,1,1],nss:[2,1,1],prediction_scores:[2,1,1],fast_roc:[2,1,1]},"res.analysis":{parallel:[5,0,1]},ocupy:{stimuli:[1,0,1],fixmat:[4,0,1],measures:[2,0,1],loader:[1,0,1]},"res.analysis.parallel.Worker":{run:[5,2,1],setup:[5,2,1],compute:[5,2,1]},"ocupy.loader.Loader":{get_feature:[1,2,1],test_for_feature:[1,2,1],test_for_category:[1,2,1],get_image:[1,2,1],save_feature:[1,2,1],path:[1,2,1],save_image:[1,2,1],test_for_image:[1,2,1]}},terms:{represent:5,all:[1,2,4,5],concept:5,edf:4,whatev:5,queri:[1,5,3],roc:[2,5],four:5,signific:5,follow:[1,4,5,3],disk:[1,4,3],cagegori:4,test_for_categori:[1,3],row:4,content:[0,1,4],auc:[2,5],depend:[1,5],graph:1,descript:5,send:5,fixmatfil:4,init:5,matlab:4,under:2,ctegori:1,sourc:[1,2,4,3],everi:[2,4,5],string:[1,4,3],fals:[1,2,4,5,3],ytick:4,condit:4,veri:4,retriev:5,strang:1,chao_shen:2,list:[1,2,4,5],prefix:5,iter:[1,4,5],factori:4,"try":4,vector:4,taskmanag:5,distp:2,setup:5,dir:4,pleas:4,impli:5,smaller:5,xmlrpc_get_task:5,direct:[1,4],consequ:5,second:[2,4],pass:[2,4],linear:5,compat:4,index:[0,1,4,5,3],what:[4,5],appear:2,compar:2,neg:2,section:4,abl:[1,5],invok:4,access:[1,4,5,3],featuremap:1,xmlrpc_exit:5,"new":[1,4,5,3],net:5,method:[1,4,5,3],contrast:4,metadata:4,full:4,gener:[0,1,4,5],never:4,here:5,add_field:4,let:[1,4,5],gridengin:5,featurefold:1,parenthesi:4,valu:[2,4,5],wait:5,convert:[1,2,4,3],task_id:5,current:[4,5],observ:2,prediction_scor:2,pick:[4,5],implement:[1,2,5,3],chanc:5,get_task:5,via:[1,5,3],useful:4,appli:4,modul:[0,1,2,3,4,5],cat_mat:4,"while":4,filenam:5,"boolean":[1,4,3],"1st":2,should:[1,2,4,3],plot:[2,4],from:[0,1,2,3,4,5],describ:[1,4,5],would:4,fron:1,distinct:4,doubl:2,two:[2,4],next:4,predict:[2,5],call:[1,4,5,3],recommend:4,taken:2,type:[4,5],minor:1,more:[1,4,5],sort:4,acc:4,evalu:[1,2,4],desir:4,set_scor:2,peopl:4,get_statu:5,factor:4,uint8:4,notic:4,site:5,visual:4,worri:1,numpi:[1,2,4,3],known:4,ground_truth:2,must:[1,4],none:[1,2,4,5,3],join:[1,4,5],prepar:5,work:[0,1,2,4,5],predicting_sub:5,uniqu:4,cat:[1,4,3],can:[1,2,4,5,3],category_image_featur:1,purpos:4,def:[1,5],deg:4,control:[2,4,5],encapsul:[1,3],give:[1,5],process:[0,4,5],indic:[0,5],fixtat:4,serial:5,cours:1,end:[1,4],divid:5,rather:4,anoth:5,update_result:5,by_field:4,write:4,how:[1,4,5],anyon:4,sever:[1,4],all_control:4,return_result:5,updat:5,map:[0,1,2,4,5],mat:[1,4,5,3],after:4,lab:4,befor:[1,5,3],xtick:4,stim:1,data:[0,1,2,3,4,5],parallel:[0,5],physic:[1,3],memori:1,"short":4,third:4,bind:[2,4],correspond:[1,2,4,5],assign:1,inform:[4,5],maintain:4,combin:[0,1,4],allow:[2,5],make_reg_data:4,order:[2,5],dpendenc:5,over:[1,4,5],becaus:[2,4],through:[4,5],still:[4,5],fwhm:4,compute_fdm:4,paramet:[1,2,4,5],fir:5,group:[4,5],obviou:1,img:[1,4,3],chosen:4,fix:[2,4,5],taskstor:5,cat_img:1,reschedul:5,therefor:5,might:[4,5],easier:4,them:1,"float":[4,5],"return":[1,2,4,5,3],thei:[1,4,3],python:[2,4],handi:1,initi:1,subtask:5,half:4,by_cat:4,now:[1,4],term:[1,2],somewher:[0,1],name:[1,2,4,5,3],neccessari:4,task_index:5,separ:2,savetodisk:[1,3],achiev:[1,5],categories_obj:4,ctr_loc:2,each:5,found:4,mean:4,subset:[4,5],replac:[1,3],arg2:2,hard:[1,3],gaussian:4,meta:4,"static":4,connect:[4,5],happen:5,extract:[1,4],event:4,out:[1,4,5],ftr:[1,3],num_task:5,matrix:[1,2,4,5,3],space:5,goe:4,categori:[0,1,4,3],adapt:1,internet:5,print:[4,5],distq:2,forth:[4,5],correct:2,matric:4,common:[1,4,3],model:[1,2,4],proxi:1,given:[1,4,5],standard:1,cut:4,base:5,dictionari:[2,4,5],put:5,rm_field:4,care:5,angl:4,frequenc:4,omit:4,filter:[1,2,4],thing:[0,1,5],length:4,place:[4,5],think:1,fixmat_demo:4,first:[1,2,4],oper:4,dimens:4,rang:[1,5],directli:[4,5],kernel:4,carri:[1,5],arrai:[2,4],ocupi:[0,1,2,4,3],number:[1,4,5],alreadi:1,done:[0,5],messag:5,pixels_per_degre:4,massiv:5,size:[1,2,4,3],differ:[1,2,4,5,3],exact_roc:2,convent:1,script:4,associ:[1,4,5],interact:1,imshow:4,construct:[1,4,3],tradition:4,attach:4,salienc:2,termin:5,scheme:[1,3],store:[1,2,5],subplot:4,option:[1,2,4,3],specifi:[1,2,4],task:[0,1,5],intersubject_scor:5,correlation_model:2,pars:4,xmlrpc_statu:5,than:4,png:[1,4,3],conveni:[1,4],whenev:5,provid:[1,4,5],remov:4,rate:2,structur:[1,4,5],matter:1,f_all:4,str:[1,3],were:[4,5],posit:[2,4,5],arrang:1,other:[4,5],thu:[1,4,5,3],fixat:[0,1,2,4],randomli:4,"function":[1,2,4,5,3],sai:[1,4,5],pxels_per_degre:4,comput:[0,2,4,5],beforehand:[1,5],argument:[1,2,5],raw:[1,4],have:[1,2,4,5],tabl:[0,4],need:[1,2,5,3],seem:4,element:2,probabl:[1,2,4],squar:4,diverg:2,self:[1,5],note:4,also:[1,2,4,5],exampl:[1,2,4,5],take:[1,4,5],which:[1,2,4,5,3],eyetrack:2,singl:[1,4,5],distribut:[0,2,5],normal:2,wate:5,object:[1,2,4,5],most:[1,4],regular:4,thi:[1,2,4,5,3],pair:5,"class":[1,4,5,3],sub:5,correctli:4,url:[1,5,3],later:1,stronger:4,doe:[1,4,5],bracket:4,runtim:1,determin:2,usual:[4,5],width:4,dot:4,listentcp:5,reactor:5,show:4,random:[4,5],distanc:2,threshold:2,identifi:[1,5,3],precondit:1,find:5,xml:5,absolut:4,onli:[1,2,4,5],locat:[1,2,4,3],just:4,pretti:[1,4],"true":[1,2,4,5,3],explain:[1,4],configur:5,on_imag:[4,5],cur_r:5,emd_model:2,dict:5,nss_model:2,folder:1,loadfromdisk:[1,3],get:[0,5],express:4,autom:4,save_imag:[1,3],requir:[2,4,5],layout:1,emd:2,bar:2,organ:[0,1,5,3],yield:[1,4],"default":[1,2,4,5],to_dict:5,partit:5,contain:[1,2,4,5],eart:2,ftrpath:[1,3],kldiv_cs_model:2,where:[1,4],view:4,fieldnam:4,bychigh:1,set:[1,2,4,5],fdm:[2,4],fixmatfactori:[4,5],iswork:5,see:[4,5],result:[4,5],arg:[1,3],best:5,subject:[4,5],statu:5,detect:4,fm_new:4,stuck:4,databas:1,boundari:4,enumer:4,img_per_cat:[1,3],state:5,score:[2,5],between:[2,4,5],"import":[4,5],experi:1,approach:5,screen:[4,5],attribut:4,altern:1,accord:4,kei:[2,5],roc_model:2,numer:2,nwilm:5,subjectindic:4,get_featur:[1,3],corrcoef:2,job:5,entir:[4,5],xmlrpc_reschedul:5,otherwis:[1,4,3],ipython:5,come:5,notat:4,both:4,image_s:[2,4],float64:4,howev:1,against:2,etc:5,leibler:2,instanc:[4,5],logic:4,mani:[1,4,5],load:[0,1,4,5,3],xmlrpc_task_don:5,simpli:[1,4,3],test_for_imag:[1,3],point:[2,4],instanti:5,inspir:4,testfactori:4,rpc:5,fashion:5,path:[1,4,5,3],respect:1,assum:[1,2,4,3],summar:4,backend:1,quit:5,creat:[1,4,5],subfold:1,add_feature_valu:4,convolut:4,three:[1,4,5],been:[1,4],much:1,category_imag:[1,3],scanpath:2,interest:4,kldiv_model:2,funky_test_measur:2,field:[4,5],life:1,fire:5,imag:[0,1,2,4,3],worker:5,search:0,assert:1,coordin:[2,4],togeth:5,els:4,impath:[1,3],present:[1,4],"case":[1,2,4,5],ident:5,look:1,align:1,properti:[4,5],werer:2,pixel:4,aim:1,defin:[1,2,4,5],calcul:5,neat:1,kick:4,error:4,fm_filter:4,region:4,loop:4,advantag:1,demo:4,non:4,rip:1,itself:5,testfixmatfactori:4,fixmat:[0,1,2,4,5],"__init__":5,directoryfixmatfactori:[1,4],welcom:0,minim:4,perform:5,make:[0,5],belong:[1,3],same:[1,4,5],score_list:2,handl:[1,4],complex:5,document:[0,1,4],complet:[1,5],http:5,upon:5,hand:2,user:[4,5],pdf:4,typic:4,pull:1,stateless:5,lower:2,appropri:[1,3],entri:4,stimuli:[0,1,4,3],well:[4,5],inherit:[1,5],client:5,save_featur:[1,3],stimulu:[0,1],interpol:2,everyth:5,prallel:5,left:5,interpret:5,ctr_size:2,facto:5,execut:5,nan:[4,5],pylab:4,rest:5,edfread:4,aspect:4,heavili:4,yet:4,web:[1,5],detach:5,easi:1,img_mat:4,character:5,except:2,param:[4,5],opencv:2,add:[1,4,5,3],uint16:4,densiti:[0,2,4],input:[1,2,4,3],save:[1,4,5,3],approxim:[2,4],float32:4,real:5,int32:4,recreat:5,transpar:1,read:1,big:4,swig:2,bia:[2,4],ind2sub:5,loader:[0,1,3],rescal:5,measur:[0,1,2],like:4,specif:5,server:5,collect:5,from_dict:5,necessari:[4,5],output:[2,4,5],resiz:[1,3],directorystimulifactori:1,page:0,right:1,often:[1,4,5],deal:5,shen:2,creation:5,some:[1,2,4,5],back:5,intern:5,sampl:4,scale:4,glob_str:4,per:4,tracker:4,exit:5,meaningful:5,task_stor:5,foo:2,localhost:5,refer:[0,1,4,5],run:5,power:5,dtype:4,step:[1,4,5],fulli:5,poss:5,plug:1,comparison:4,about:1,actual:[1,2,5],column:4,istaskstor:5,unfortun:2,constructor:[1,4,5],xmlrpc_save2fil:5,fast_roc:2,produc:4,own:[1,5],devic:4,within:1,automat:[1,4,5],ismemb:5,wrap:[2,4],chang:2,"_categori":1,nss:[2,5],storag:[1,3],your:[1,5,3],manag:5,wai:[1,4,3],area:[2,4],xmlrpc:5,overwrit:1,custom:5,avail:[0,1,2,4,5],start:[4,5],interfac:[1,4,5,3],includ:2,fraction:4,arg1:2,overhead:5,individu:5,treat:1,analysi:5,scale_factor:[2,4,5],repeatedli:5,form:[1,5,3],tupl:[2,4,5],idea:1,task_descript:5,link:4,line:4,highest:4,sub2ind:5,concaten:4,hire:4,made:[1,2,4],filenumb:[1,4],consist:[4,5],possibl:[1,2,5],whether:4,smooth:4,maximum:4,record:4,below:[1,5],sum:2,trapezoid:2,problem:1,remaind:1,similar:4,expect:[1,5],curv:2,featur:[0,1,4,3],feature_list:4,predicted_sub:5,certain:4,"abstract":[1,3],inp:1,chao:2,doesn:1,repres:[1,4,5,3],twist:5,exist:[1,4,5,3],file:[1,4,5],ind:5,by_filenumb:4,mover:2,check:[4,5],fill:[1,2],again:[1,5],fixmatstimulifactori:1,want:[1,5],when:5,detail:4,my_path:1,subjectindex:4,num_partit:5,valid:[1,5],bool:4,rememb:5,spatial:[2,4],test:[1,2,4,3],you:[1,4,5,3],"2dn":2,kullback:2,repeat:4,intend:5,potenti:2,ndarrai:[1,2,3],get_imag:[1,3],testload:[1,3],consid:[1,5],sql:1,test_for_featur:[1,3],pool:4,stai:1,receiv:5,furthermor:1,directori:[1,4],xmlrpclib:5,kldiv:2,task_result:5,time:[1,2,4,5],avoid:5},objtypes:{"0":"py:module","1":"py:function","2":"py:method","3":"py:class","4":"py:attribute","5":"py:staticmethod"},titles:["Welcome to ocupy’s documentation!","Organization of stimuli: categories, images and features","Measures","Loader","FixMat","Parallel"],objnames:{"0":"Python module","1":"Python function","2":"Python method","3":"Python class","4":"Python attribute","5":"Python static method"},filenames:["index","stimuli","measures","loader","fixmat","parallel"]}) -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | version = '0.1' 4 | name='ocupy' 5 | 6 | setup(name=name, 7 | version=version, 8 | description="Oculography Analysis Toolbox", 9 | long_description= 10 | """Ocupy provides functions for eye-tracking data analysis: 11 | 12 | * FixMat objects for reading of and filtering by fixation- and meta-data 13 | * Corresponding objects for stimulus data, aligned to FixMat objects 14 | * Measures for prediction quality for eye-tracking data: AUC, NSS, KL, EMD. 15 | * Lower and upper bound calculation for prediction quality of 16 | attention models. 17 | * RPC Client/Server for parallel task execution on a grid 18 | * Evaluation (with cross-validation) of attention models 19 | """, 20 | license='GPL v.2, see LICENSE', 21 | author='WhiteMatter Labs GmbH', 22 | author_email='nwilming@uos.de', 23 | url='http://github.com/nwilming/ocupy/', 24 | packages=find_packages(exclude=['ez_setup']), 25 | classifiers=[ 26 | 'Development Status :: 5 - Production/Stable', 27 | 'Environment :: Console', 28 | 'Intended Audience :: Science/Research', 29 | 'License :: OSI Approved :: GNU General Public License (GPL)', 30 | 'Operating System :: OS Independent', 31 | 'Topic :: Scientific/Engineering :: Information Analysis', 32 | 'Topic :: Software Development :: Libraries :: Python Modules', 33 | ], 34 | zip_safe=True, 35 | include_package_data=True, 36 | setup_requires=[ 37 | 'numpy', 38 | ], 39 | install_requires=[ 40 | 'numpy', 41 | 'scipy', 42 | 'PIL', 43 | 'h5py' 44 | ], 45 | test_suite='ocupy.tests', 46 | extras_require={ 47 | 'doc':['sphinx','matplotlib'] 48 | }, 49 | ) 50 | --------------------------------------------------------------------------------