├── .gitignore ├── .travis.yml ├── LICENSE ├── README.txt ├── brainx ├── __init__.py ├── detect_modules.py ├── metrics.py ├── modularity.py ├── nodal_roles.py ├── notebooks │ ├── .ipynb_checkpoints │ │ └── detect_partition_degeneracy-checkpoint.ipynb │ └── detect_partition_degeneracy.ipynb ├── nxplot.py ├── recarrutil.py ├── tests │ ├── __init__.py │ ├── example_plots.py │ ├── example_plots_random.py │ ├── jazz.net │ ├── tdata_corr_txt │ │ ├── 101_Block01.txt │ │ ├── 101_Block02.txt │ │ ├── 102_Block01.txt │ │ └── 102_Block02.txt │ ├── test_metrics.py │ ├── test_modularity.py │ ├── test_nodal_roles.py │ ├── test_util.py │ └── test_weighted_modularity.py ├── util.py ├── version.py └── weighted_modularity.py ├── doc ├── Makefile ├── api │ └── index.rst ├── conf.py ├── index.rst ├── ipython_notebooks │ └── Degree.ipynb ├── research_notes │ ├── index.rst │ └── preprocessing.rst ├── sphinxext │ ├── README.txt │ ├── docscrape.py │ ├── docscrape_sphinx.py │ ├── inheritance_diagram.py │ ├── ipython_console_highlighting.py │ ├── numpydoc.py │ └── only_directives.py └── tools │ ├── apigen.py │ └── build_modref_templates.py ├── requirements.txt ├── setup.py ├── setup_egg.py └── tools ├── run_tests.sh └── travis_setup.sh /.gitignore: -------------------------------------------------------------------------------- 1 | *.a 2 | *.o 3 | *.py[co] 4 | *.so 5 | .dll 6 | *.sw[nop] 7 | *~ 8 | .#* 9 | [#]*# 10 | *.tar 11 | *.tgz 12 | *.gz 13 | *.bz2 14 | .coverage 15 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.7" 4 | - "3.4" 5 | before_install: 6 | - tools/travis_setup.sh 7 | install: 8 | - python setup.py install 9 | - pip install nose ipython runipy 10 | script: 11 | - tools/run_tests.sh 12 | after_success: 13 | - coveralls 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2006-2009, NIPY Developers 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of the NIPY Developers nor the names of any 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /README.txt: -------------------------------------------------------------------------------- 1 | ================================================ 2 | Brainx: network analysis for neuroimaging data 3 | ================================================ 4 | 5 | Brainx provides a set of tools, based on the NetworkX graph theory package, for 6 | the analysis of graph properties of neuroimaging data. 7 | 8 | 9 | Installation 10 | ============ 11 | 12 | For a normal installation, simply type:: 13 | 14 | python setup.py install [other options here] 15 | 16 | To install using setuptools support, use:: 17 | 18 | python setup_egg.py install [other options here] 19 | 20 | For example, to install using a development-mode setup in your personal user 21 | directory, use:: 22 | 23 | python setup_egg.py develop --prefix=$HOME/.local 24 | 25 | 26 | Testing 27 | ======= 28 | 29 | To run the test suite, once you have installed it as per the above 30 | instructions, simply use:: 31 | 32 | nosetests brainx 33 | 34 | or for more informative details:: 35 | 36 | nosetests -vvs brainx 37 | 38 | For further information, type ``nosetests -h``. 39 | 40 | 41 | License information 42 | =================== 43 | 44 | Brainx is licensed under the terms of the new BSD license. See the file 45 | "LICENSE" for information on the history of this software, terms & conditions 46 | for usage, and a DISCLAIMER OF ALL WARRANTIES. 47 | -------------------------------------------------------------------------------- /brainx/__init__.py: -------------------------------------------------------------------------------- 1 | """Top-level init file for brainx package. 2 | """ 3 | 4 | def patch_nx(): 5 | """Temporary fix for NX's watts_strogatz routine, which has a bug in versions 1.1-1.3 6 | """ 7 | 8 | import networkx as nx 9 | 10 | # Quick test to see if we get the broken version 11 | g = nx.watts_strogatz_graph(2, 0, 0) 12 | 13 | if g.number_of_nodes() != 2: 14 | # Buggy version detected. Create a patched version and apply it to nx 15 | 16 | nx._watts_strogatz_graph_ori = nx.watts_strogatz_graph 17 | 18 | def patched_ws(n, k, p, seed=None): 19 | if k<2: 20 | g = nx.Graph() 21 | g.add_nodes_from(range(n)) 22 | return g 23 | else: 24 | return nx._watts_strogatz_graph_ori(n, k, p, seed) 25 | 26 | patched_ws.__doc__ = nx._watts_strogatz_graph_ori.__doc__ 27 | 28 | # Applying monkeypatch now 29 | import warnings 30 | warnings.warn("Monkeypatching NetworkX's Watts-Strogatz routine") 31 | 32 | nx.watts_strogatz_graph = patched_ws 33 | 34 | 35 | patch_nx() 36 | -------------------------------------------------------------------------------- /brainx/detect_modules.py: -------------------------------------------------------------------------------- 1 | """DEPRECATED - use the modularity module instead.""" 2 | 3 | import warnings 4 | warnings.warn(__doc__, DeprecationWarning) 5 | 6 | # Backwards compatibility 7 | from modularity import * 8 | -------------------------------------------------------------------------------- /brainx/metrics.py: -------------------------------------------------------------------------------- 1 | """Compute various useful metrics. 2 | """ 3 | 4 | #----------------------------------------------------------------------------- 5 | # Imports 6 | #----------------------------------------------------------------------------- 7 | 8 | import networkx as nx 9 | import numpy as np 10 | from scipy import sparse 11 | 12 | #----------------------------------------------------------------------------- 13 | # Functions 14 | #----------------------------------------------------------------------------- 15 | 16 | def inter_node_distances(graph): 17 | """Compute the shortest path lengths between all nodes in graph. 18 | 19 | This performs the same operation as NetworkX's 20 | all_pairs_shortest_path_lengths with two exceptions: Here, self 21 | paths are excluded from the dictionary returned, and the distance 22 | between disconnected nodes is set to infinity. The latter 23 | difference is consistent with the Brain Connectivity Toolbox for 24 | Matlab. 25 | 26 | Parameters 27 | ---------- 28 | graph: networkx Graph 29 | An undirected graph. 30 | 31 | Returns 32 | ------- 33 | lengths: dictionary 34 | Dictionary of shortest path lengths keyed by source and target. 35 | 36 | """ 37 | lengths = nx.all_pairs_shortest_path_length(graph) 38 | node_labels = sorted(lengths) 39 | for src in node_labels: 40 | lengths[src].pop(src) 41 | for targ in node_labels: 42 | if src != targ: 43 | try: 44 | lengths[src][targ] 45 | except KeyError: 46 | lengths[src][targ] = np.inf 47 | return lengths 48 | 49 | 50 | def compute_sigma(arr,clustarr,lparr): 51 | """ Function for computing sigma given a graph array arr and clust and lp 52 | arrays from a pseudorandom graph for a particular block b.""" 53 | 54 | gc = arr['clust']#np.squeeze(arr['clust']) 55 | glp = arr['lp']#np.squeeze(arr['lp']) 56 | out = (gc/clustarr)/(glp/lparr) 57 | 58 | 59 | return out 60 | 61 | 62 | def nodal_pathlengths(graph): 63 | """Compute mean path length for each node. 64 | 65 | Parameters 66 | ---------- 67 | graph: networkx Graph 68 | An undirected graph. 69 | 70 | Returns 71 | ------- 72 | nodal_means: numpy array 73 | An array with each node's mean shortest path length to all other 74 | nodes. The array is in ascending order of node labels. 75 | 76 | Notes 77 | ----- 78 | Per the Brain Connectivity Toolbox for Matlab, the distance between 79 | one node and another that cannot be reached from it is set to 80 | infinity. 81 | 82 | """ 83 | lengths = inter_node_distances(graph) 84 | nodal_means = [np.mean(list(lengths[src].values())) for src in sorted(lengths)] 85 | return np.array(nodal_means) 86 | 87 | 88 | def assert_no_selfloops(graph): 89 | """Raise an error if the graph graph has any selfloops. 90 | """ 91 | if graph.nodes_with_selfloops(): 92 | raise ValueError("input graph can not have selfloops") 93 | 94 | 95 | def path_lengths(graph): 96 | """Compute array of all shortest path lengths for the given graph. 97 | 98 | The length of the output array is the number of unique pairs of nodes that 99 | have a connecting path, so in general it is not known in advance. 100 | 101 | This assumes the graph is undirected, as for any pair of reachable nodes, 102 | once we've seen the pair we do not keep the path length value for the 103 | inverse path. 104 | 105 | Parameters 106 | ---------- 107 | graph : an undirected graph object. 108 | """ 109 | 110 | assert_no_selfloops(graph) 111 | 112 | length = nx.all_pairs_shortest_path_length(graph) 113 | paths = [] 114 | seen = set() 115 | for src,targets in length.items(): 116 | seen.add(src) 117 | neigh = set(targets.keys()) - seen 118 | paths.extend(targets[targ] for targ in neigh) 119 | 120 | 121 | return np.array(paths) 122 | 123 | 124 | #@profile 125 | def path_lengthsSPARSE(graph): 126 | """Compute array of all shortest path lengths for the given graph. 127 | 128 | XXX - implementation using scipy.sparse. This might be faster for very 129 | sparse graphs, but so far for our cases the overhead of handling the sparse 130 | matrices doesn't seem to be worth it. We're leaving it in for now, in case 131 | we revisit this later and it proves useful. 132 | 133 | The length of the output array is the number of unique pairs of nodes that 134 | have a connecting path, so in general it is not known in advance. 135 | 136 | This assumes the graph is undirected, as for any pair of reachable nodes, 137 | once we've seen the pair we do not keep the path length value for the 138 | inverse path. 139 | 140 | Parameters 141 | ---------- 142 | graph : an undirected graph object. 143 | """ 144 | 145 | assert_no_selfloops(graph) 146 | 147 | length = nx.all_pairs_shortest_path_length(graph) 148 | 149 | nnod = graph.number_of_nodes() 150 | paths_mat = sparse.dok_matrix((nnod,nnod)) 151 | 152 | for src,targets in length.items(): 153 | for targ,val in targets.items(): 154 | paths_mat[src,targ] = val 155 | 156 | return sparse.triu(paths_mat,1).data 157 | 158 | 159 | def glob_efficiency(graph): 160 | """Compute array of global efficiency for the given graph. 161 | 162 | Global efficiency: returns a list of the inverse path length matrix 163 | across all nodes The mean of this value is equal to the global efficiency 164 | of the network.""" 165 | 166 | return 1.0/path_lengths(graph) 167 | 168 | 169 | def nodal_efficiency(graph): 170 | """Return array with nodal efficiency for each node in graph. 171 | 172 | See Achard and Bullmore (2007, PLoS Comput Biol) for the definition 173 | of nodal efficiency. 174 | 175 | Parameters 176 | ---------- 177 | graph: networkx Graph 178 | An undirected graph. 179 | 180 | Returns 181 | ------- 182 | nodal_efficiencies: numpy array 183 | An array with the nodal efficiency for each node in graph, in 184 | the order specified by node_labels. The array is in ascending 185 | order of node labels. 186 | 187 | Notes 188 | ----- 189 | Per the Brain Connectivity Toolbox for Matlab, the distance between 190 | one node and another that cannot be reached from it is set to 191 | infinity. 192 | 193 | """ 194 | lengths = inter_node_distances(graph) 195 | nodal_efficiencies = np.zeros(len(lengths), dtype=float) 196 | for src in sorted(lengths): 197 | inverse_paths = [1.0 / val for val in lengths[src].values()] 198 | nodal_efficiencies[src] = np.mean(inverse_paths) 199 | return nodal_efficiencies 200 | 201 | 202 | def local_efficiency(graph): 203 | """Compute array of global efficiency for the given grap.h 204 | 205 | Local efficiency: returns a list of paths that represent the nodal 206 | efficiencies across all nodes with their direct neighbors""" 207 | 208 | nodepaths=[] 209 | length=nx.all_pairs_shortest_path_length(graph) 210 | for n in graph.nodes(): 211 | nneighb= nx.neighbors(graph,n) 212 | 213 | paths=[] 214 | for src,targets in length.items(): 215 | for targ,val in targets.items(): 216 | val=float(val) 217 | if src==targ: 218 | continue 219 | if src in nneighb and targ in nneighb: 220 | 221 | paths.append(1/val) 222 | 223 | p=np.array(paths) 224 | psize=np.size(p) 225 | if (psize==0): 226 | p=np.array(0) 227 | 228 | nodepaths.append(p.mean()) 229 | 230 | return np.array(nodepaths) 231 | 232 | 233 | def local_efficiency(graph): 234 | """Compute array of local efficiency for the given graph. 235 | 236 | Local efficiency: returns a list of paths that represent the nodal 237 | efficiencies across all nodes with their direct neighbors""" 238 | 239 | assert_no_selfloops(graph) 240 | 241 | nodepaths = [] 242 | length = nx.all_pairs_shortest_path_length(graph) 243 | for n in graph: 244 | nneighb = set(nx.neighbors(graph,n)) 245 | 246 | paths = [] 247 | for nei in nneighb: 248 | other_neighbors = nneighb - set([nei]) 249 | nei_len = length[nei] 250 | paths.extend( [nei_len[o] for o in other_neighbors] ) 251 | 252 | if paths: 253 | p = 1.0 / np.array(paths,float) 254 | nodepaths.append(p.mean()) 255 | else: 256 | nodepaths.append(0.0) 257 | 258 | return np.array(nodepaths) 259 | 260 | 261 | def dynamical_importance(graph): 262 | """Compute dynamical importance for graph. 263 | 264 | Ref: Restrepo, Ott, Hunt. Phys. Rev. Lett. 97, 094102 (2006) 265 | """ 266 | # spectrum of the original graph 267 | eigvals = nx.adjacency_spectrum(graph) 268 | lambda0 = eigvals[0] 269 | # Now, loop over all nodes in graph, and for each, make a copy of graph, remove 270 | # that node, and compute the change in lambda. 271 | nnod = graph.number_of_nodes() 272 | dyimp = np.empty(nnod,float) 273 | for n in range(nnod): 274 | gn = graph.copy() 275 | gn.remove_node(n) 276 | lambda_n = nx.adjacency_spectrum(gn)[0] 277 | dyimp[n] = lambda0 - lambda_n 278 | # Final normalization 279 | dyimp /= lambda0 280 | return dyimp 281 | 282 | 283 | def weighted_degree(graph): 284 | """Return an array of degrees that takes weights into account. 285 | 286 | For unweighted graphs, this is the same as the normal degree() method 287 | (though we return an array instead of a list). 288 | """ 289 | amat = nx.adj_matrix(graph).A # get a normal array out of it 290 | return abs(amat).sum(0) # weights are sums across rows 291 | 292 | 293 | def graph_summary(graph): 294 | """Compute a set of statistics summarizing the structure of a graph. 295 | 296 | Parameters 297 | ---------- 298 | graph : a graph object. 299 | 300 | threshold : float, optional 301 | 302 | Returns 303 | ------- 304 | Mean values for: lp, clust, glob_eff, loc_eff, in a dict. 305 | """ 306 | 307 | # Average path length 308 | lp = path_lengths(graph) 309 | clust = np.array(list(nx.clustering(graph).values())) 310 | glob_eff = glob_efficiency(graph) 311 | loc_eff = local_efficiency(graph) 312 | 313 | return dict( lp=lp.mean(), clust=clust.mean(), glob_eff=glob_eff.mean(), 314 | loc_eff=loc_eff.mean() ) 315 | 316 | 317 | def nodal_summaryOut(graph): 318 | """Compute statistics for individual nodes. 319 | 320 | Parameters 321 | ---------- 322 | graph: networkx graph 323 | An undirected graph. 324 | 325 | Returns 326 | ------- 327 | dictionary 328 | The keys of this dictionary are lp (which refers to path 329 | length), clust (clustering coefficient), b_cen (betweenness 330 | centrality), c_cen (closeness centrality), nod_eff (nodal 331 | efficiency), loc_eff (local efficiency), and deg (degree). The 332 | values are arrays (or lists, in some cases) of metrics, in 333 | ascending order of node labels. 334 | 335 | """ 336 | lp = nodal_pathlengths(graph) 337 | clust_dict = nx.clustering(graph) 338 | clust = np.array([clust_dict[n] for n in sorted(clust_dict)]) 339 | b_cen_dict = nx.betweenness_centrality(graph) 340 | b_cen = np.array([b_cen_dict[n] for n in sorted(b_cen_dict)]) 341 | c_cen_dict = nx.closeness_centrality(graph) 342 | c_cen = np.array([c_cen_dict[n] for n in sorted(c_cen_dict)]) 343 | nod_eff = nodal_efficiency(graph) 344 | loc_eff = local_efficiency(graph) 345 | deg_dict = graph.degree() 346 | deg = [deg_dict[n] for n in sorted(deg_dict)] 347 | return dict(lp=lp, clust=clust, b_cen=b_cen, c_cen=c_cen, nod_eff=nod_eff, 348 | loc_eff=loc_eff, deg=deg) 349 | -------------------------------------------------------------------------------- /brainx/nodal_roles.py: -------------------------------------------------------------------------------- 1 | #Author: Maxwell Bertolero, bertolero@berkeley.edu, bertolero@berkeley.edu 2 | 3 | import numpy as np 4 | from random import choice 5 | import networkx as nx 6 | 7 | def within_community_degree(weighted_partition, edgeless = np.nan, catch_edgeless_node=True): 8 | ''' Computes "within-module degree" (z-score) for each node (Guimera 2005, Nature) 9 | 10 | ------ 11 | Parameters 12 | ------ 13 | weighted_partition: Louvain Weighted Partition 14 | louvain = weighted_modularity.LouvainCommunityDetection(graph) 15 | weighted_partitions = louvain.run() 16 | weighted_partition = weighted_partition[0], where index is the partition level 17 | edgeless : int 18 | number to replace edgeless nodes with 19 | default = 0.0 20 | catch_edgeless_node: Boolean 21 | raise ValueError if node degree is zero 22 | default = True 23 | saves wcd of these nodes as edgeless variable if False 24 | 25 | ------ 26 | Returns 27 | ------ 28 | within_community_degree: dict 29 | Dictionary of the within community degree of each node. 30 | 31 | ''' 32 | wc_dict = {} 33 | for c, community in enumerate(weighted_partition.communities): 34 | community_degrees = [] 35 | for node in community: #get average within-community-degree 36 | node_degree = weighted_partition.node_degree(node) 37 | if node_degree == 0.0: #catch edgeless nodes, this shouldn't count towards avg wcd 38 | if catch_edgeless_node: 39 | raise ValueError("Node {} is edgeless".format(node)) 40 | continue 41 | community_degrees.append(weighted_partition.node_degree_by_community(node)[c]) 42 | std = np.std(community_degrees) # std of community's degrees 43 | mean = np.mean(community_degrees) # mean of community's degrees 44 | for node in community: #get node's within_community-degree z-score 45 | if weighted_partition.node_degree(node) == 0: 46 | wc_dict[node] = edgeless 47 | continue 48 | within_community_degree = weighted_partition.node_degree_by_community(node)[c] 49 | if std == 0.0: #so we don't divide by 0 50 | wc_dict[node] = (float(within_community_degree) - float(mean)) #z_score 51 | continue 52 | wc_dict[node] = ((float(within_community_degree) - float(mean)) / std) #z_score 53 | return wc_dict 54 | 55 | def participation_coefficient(weighted_partition, edgeless =np.nan, catch_edgeless_node=True): 56 | ''' 57 | Computes the participation coefficient for each node (Guimera 2005, Nature) 58 | 59 | ------ 60 | Parameters 61 | ------ 62 | weighted_partition: Louvain Weighted Partition 63 | louvain = weighted_modularity.LouvainCommunityDetection(graph) 64 | weighted_partitions = louvain.run() 65 | weighted_partition = weighted_partition[0], where index is the partition level 66 | catch_edgeless_node: Boolean 67 | raise ValueError if node degree is zero 68 | default = True 69 | 70 | ------ 71 | Returns 72 | ------ 73 | participation_coefficient: dict 74 | Dictionary of the participation coefficient of each node. 75 | ''' 76 | pc_dict = {} 77 | for node in weighted_partition.graph: 78 | node_degree = weighted_partition.node_degree(node) 79 | if node_degree == 0.0: 80 | if catch_edgeless_node: 81 | raise ValueError("Node {} is edgeless".format(node)) 82 | pc_dict[node] = edgeless 83 | continue 84 | pc = 0.0 85 | for community_degree in weighted_partition.node_degree_by_community(node): 86 | pc = pc + ((float(community_degree)/float(node_degree))**2) 87 | pc = 1-pc 88 | pc_dict[node] = pc 89 | return pc_dict 90 | -------------------------------------------------------------------------------- /brainx/nxplot.py: -------------------------------------------------------------------------------- 1 | """Plotting utilities for networks""" 2 | 3 | #----------------------------------------------------------------------------- 4 | # Imports 5 | #----------------------------------------------------------------------------- 6 | 7 | # Third-party 8 | import numpy as np 9 | 10 | import matplotlib 11 | from matplotlib import cm 12 | from matplotlib.colors import colorConverter 13 | from matplotlib.collections import LineCollection 14 | from matplotlib.patches import FancyArrow 15 | from matplotlib import pyplot as plt, mpl 16 | import matplotlib.cbook as cb 17 | 18 | import networkx as nx 19 | 20 | # From this project 21 | import util 22 | import metrics 23 | 24 | #----------------------------------------------------------------------------- 25 | # Functions 26 | #----------------------------------------------------------------------------- 27 | def draw_matrix(mat,th1=None,th2=None,clim=None,cmap=None): 28 | """Draw a matrix, optionally thresholding it. 29 | """ 30 | if th1 is not None: 31 | m2 = util.thresholded_arr(mat,th1,th2) 32 | else: 33 | m2 = mat 34 | ax = plt.matshow(m2,cmap=cmap) 35 | if clim is not None: 36 | ax.set_clim(*clim) 37 | plt.colorbar() 38 | return ax 39 | 40 | 41 | def draw_arrows(G,pos,edgelist=None,ax=None,edge_color='k',alpha=1.0, 42 | width=1): 43 | """Draw arrows on a set of edges""" 44 | 45 | 46 | if ax is None: 47 | ax = plt.gca() 48 | 49 | if edgelist is None: 50 | edgelist = G.edges() 51 | 52 | if not edgelist or len(edgelist)==0: # no edges! 53 | return 54 | 55 | # set edge positions 56 | edge_pos = np.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist]) 57 | 58 | arrow_colors = ( colorConverter.to_rgba('k', alpha), ) 59 | a_pos = [] 60 | 61 | # Radius of the nodes in world coordinates 62 | radius = 0.5 63 | head_length = 0.31 64 | overhang = 0.1 65 | 66 | #ipvars('edge_pos') # dbg 67 | 68 | for src,dst in edge_pos: 69 | dd = dst-src 70 | nd = np.linalg.norm(dd) 71 | if nd==0: # source and target at same position 72 | continue 73 | 74 | s = 1.0-radius/nd 75 | dd *= s 76 | x1,y1 = src 77 | dx,dy = dd 78 | ax.arrow(x1,y1,dx,dy,lw=width,width=width,head_length=head_length, 79 | fc=edge_color,ec='none',alpha=alpha,overhang=overhang) 80 | 81 | 82 | def draw_graph(G, 83 | labels=None, 84 | node_colors=None, 85 | node_shapes=None, 86 | node_scale=1.0, 87 | edge_style='solid', 88 | edge_cmap=None, 89 | colorbar=False, 90 | vrange=None, 91 | layout=nx.circular_layout, 92 | title=None, 93 | font_family='sans-serif', 94 | font_size=9, 95 | stretch_factor=1.0, 96 | edge_alpha=True): 97 | """Draw a weighted graph with options to visualize link weights. 98 | 99 | The resulting diagram uses the rank of each node as its size, and the 100 | weight of each link (after discarding thresholded values, see below) as the 101 | link opacity. 102 | 103 | It maps edge weight to color as well as line opacity and thickness, 104 | allowing the color part to be hardcoded over a value range (to permit valid 105 | cross-figure comparisons for different graphs, so the same color 106 | corresponds to the same link weight even if each graph has a different 107 | range of weights). The nodes sizes are proportional to their degree, 108 | computed as the sum of the weights of all their links. The layout defaults 109 | to circular, but any nx layout function can be passed in, as well as a 110 | statically precomputed layout. 111 | 112 | Parameters 113 | ---------- 114 | G : weighted graph 115 | The values must be of the form (v1,v2), with all v2 in [0,1]. v1 are 116 | used for colors, v2 for thickness/opacity. 117 | 118 | labels : list or dict, optional. 119 | An indexable object that maps nodes to strings. If not given, the 120 | string form of each node is used as a label. If False, no labels are 121 | drawn. 122 | 123 | node_colors : list or dict, optional. 124 | An indexable object that maps nodes to valid matplotlib color specs. See 125 | matplotlib's plot() function for details. 126 | 127 | node_shapes : list or dict, optional. 128 | An indexable object that maps nodes to valid matplotlib shape specs. See 129 | matplotlib's scatter() function for details. If not given, circles are 130 | used. 131 | 132 | node_scale : float, optional 133 | A scale factor to globally stretch or shrink all nodes symbols by. 134 | 135 | edge_style : string, optional 136 | Line style for the edges, defaults to 'solid'. 137 | 138 | edge_cmap : matplotlib colormap, optional. 139 | A callable that returns valid color specs, like matplotlib colormaps. 140 | If not given, edges are colored black. 141 | 142 | colorbar : bool 143 | If true, automatically add a colorbar showing the mapping of graph weight 144 | values to colors. 145 | 146 | vrange : pair of floats 147 | If given, this indicates the total range of values that the weights can 148 | in principle occupy, and is used to set the lower/upper range of the 149 | colormap. This allows you to set the range of multiple different figures 150 | to the same values, even if each individual graph has range variations, 151 | so that visual color comparisons across figures are valid. 152 | 153 | layout : function or layout dict, optional 154 | A NetworkX-like layout function or the result of a precomputed layout for 155 | the given graph. NetworkX produces layouts as dicts keyed by nodes and 156 | with (x,y) pairs of coordinates as values, any function that produces 157 | this kind of output is acceptable. Defaults to nx.circular_layout. 158 | 159 | title : string, optional. 160 | If given, title to put on the main plot. 161 | 162 | font_family : string, optional. 163 | Font family used for the node labels and title. 164 | 165 | font_size : int, optional. 166 | Font size used for the node labels and title. 167 | 168 | stretch_factor : float, optional 169 | A global scaling factor to make the graph larger (or smaller if <1). 170 | This can be used to separate the nodes if they start overlapping. 171 | 172 | edge_alpha: bool, optional 173 | Whether to weight the transparency of each edge by a factor equivalent to 174 | its relative weight 175 | 176 | Returns 177 | ------- 178 | fig 179 | The matplotlib figure object with the plot. 180 | """ 181 | # A few hardcoded constants, though their effect can always be controlled 182 | # via user-settable parameters. 183 | figsize = [6,6] 184 | # For the size of the node symbols 185 | node_size_base = 1000 186 | node_min_size = 200 187 | default_node_shape = 'o' 188 | # Default colors if none given 189 | default_node_color = 'r' 190 | default_edge_color = 'k' 191 | # Max edge width 192 | max_width = 13 193 | font_family = 'sans-serif' 194 | 195 | # We'll use the nodes a lot, let's make a numpy array of them 196 | nodes = np.array(sorted(G.nodes())) 197 | nnod = len(nodes) 198 | 199 | # Build a 'weighted degree' array obtained by adding the (absolute value) 200 | # of the weights for all edges pointing to each node: 201 | amat = nx.adj_matrix(G).A # get a normal array out of it 202 | degarr = abs(amat).sum(0) # weights are sums across rows 203 | 204 | # Map the degree to the 0-1 range so we can use it for sizing the nodes. 205 | try: 206 | odegree = util.rescale_arr(degarr,0,1) 207 | # Make an array of node sizes based on node degree 208 | node_sizes = odegree * node_size_base + node_min_size 209 | except ZeroDivisionError: 210 | # All nodes same size 211 | node_sizes = np.empty(nnod,float) 212 | node_sizes.fill(0.5 * node_size_base + node_min_size) 213 | 214 | # Adjust node size list. We square the scale factor because in mpl, node 215 | # sizes represent area, not linear size, but it's more intuitive for the 216 | # user to think of linear factors (the overall figure scale factor is also 217 | # linear). 218 | node_sizes *= node_scale**2 219 | 220 | # Set default node properties 221 | if node_colors is None: 222 | node_colors = [default_node_color]*nnod 223 | 224 | if node_shapes is None: 225 | node_shapes = [default_node_shape]*nnod 226 | 227 | # Set default edge colormap 228 | if edge_cmap is None: 229 | # Make an object with the colormap API, that maps all input values to 230 | # the default color (with proper alhpa) 231 | edge_cmap = ( lambda val, alpha: 232 | colorConverter.to_rgba(default_edge_color,alpha) ) 233 | 234 | # if vrange is None, we set the color range from the values, else the user 235 | # can specify it 236 | 237 | # e[2] is edge value: edges_iter returns (i,j,data) 238 | #gvals = np.array([ e[2]['weight'] for e in G.edges(data=True) ]) #CG -no 239 | #longer has weight as a key? 240 | gvals = np.array([ e[2] for e in G.edges(data=True)]) 241 | gvmin, gvmax = gvals.min(), gvals.max() 242 | #gvrange = gvmax-gvmin 243 | gvrange = gvmax['weight'] - gvmin['weight'] 244 | if vrange is None: 245 | vrange = gvmin,gvmax 246 | # Now, construct the normalization for the colormap 247 | cnorm = mpl.colors.Normalize(vmin=vrange[0], vmax=vrange[1]) 248 | 249 | # Create the actual plot where the graph will be displayed 250 | figsize = np.array(figsize,float) 251 | figsize *= stretch_factor 252 | 253 | fig = plt.figure(figsize=figsize) 254 | 255 | # If a colorbar is required, make a set of axes for both the main graph and 256 | # the colorbar, otherwise let nx do its thing 257 | if colorbar: 258 | # Make axes for both the graph and the colorbar 259 | left0, width0, sep = 0.01, 0.73, 0.03 260 | left, bottom, width, height = left0+width0+sep, 0.05, 0.03, 0.9 261 | ax_graph = fig.add_axes([left0,bottom, width0, height]) 262 | ax_cbar = fig.add_axes([left,bottom, width, height]) 263 | # Set the current axes to be the graph ones for nx to draw into 264 | fig.sca(ax_graph) 265 | 266 | # Compute positions for all nodes - nx has several algorithms 267 | if callable(layout): 268 | pos = layout(G) 269 | else: 270 | # The user can also provide a precomputed layout 271 | pos = layout 272 | 273 | # Draw nodes 274 | for nod in nodes: 275 | nx.draw_networkx_nodes(G,pos,nodelist=[nod], 276 | node_color=node_colors[nod], 277 | node_shape=node_shapes[nod], 278 | node_size=node_sizes[nod]) 279 | #CG: commented out above.... not working? 280 | #nx.draw_networkx_nodes(G,pos, 281 | # node_color=node_colors, 282 | # node_shape=node_shapes, 283 | # node_size=node_sizes) 284 | 285 | # Draw edges 286 | if not isinstance(G,nx.DiGraph): 287 | # Undirected graph, simple lines for edges 288 | # We need the size of the value range to properly scale colors 289 | #vsize = vrange[1] - vrange[0] #CG commented out 290 | vsize = vrange[1]['weight'] - vrange[0]['weight'] #CG 291 | gvals_normalized = G.metadata['vals_norm'] 292 | for (u,v,y) in G.edges(data=True): 293 | # The graph value is the weight, and the normalized values are in 294 | # [0,1], used for thickness/transparency 295 | alpha = gvals_normalized[u,v] 296 | # Scale the color choice to the specified vrange, so that 297 | #ecol = (y['weight']-vrange[0])/vsize #CG commented out 298 | ecol = (y['weight'] - vrange[0]['weight'])/vsize #CG 299 | #print 'u,v:',u,v,'y:',y,'ecol:',ecol # dbg 300 | 301 | if edge_alpha: 302 | fade = alpha 303 | else: 304 | fade=1.0 305 | 306 | edge_color = [ tuple(edge_cmap(ecol,fade)) ] 307 | 308 | 309 | draw_networkx_edges(G, pos, edgelist=[(u,v)], 310 | width=alpha*max_width, 311 | edge_color=edge_color, 312 | style=edge_style) 313 | else: 314 | # Directed graph, use arrows. 315 | # XXX - this is currently broken. 316 | raise NotImplementedError("arrow drawing currently broken") 317 | 318 | ## for (u,v,x) in G.edges(data=True): 319 | ## y,w = x 320 | ## draw_arrows(G,pos,edgelist=[(u,v)], 321 | ## edge_color=[w], 322 | ## alpha=w, 323 | ## edge_cmap=edge_cmap, 324 | ## width=w*max_width) 325 | 326 | # Draw labels. If not given, we use the string form of the nodes. If 327 | # labels is False, no labels are drawn. 328 | if labels is None: 329 | labels = map(str,nodes) 330 | 331 | if labels: 332 | lab_idx = range(len(labels)) 333 | labels_dict = dict(zip(lab_idx,labels)) 334 | nx.draw_networkx_labels(G,pos,labels_dict,font_size=font_size, 335 | font_family=font_family) 336 | 337 | if title: 338 | plt.title(title,fontsize=font_size) 339 | 340 | # Turn off x and y axes labels in pylab 341 | plt.xticks([]) 342 | plt.yticks([]) 343 | 344 | # Add a colorbar if requested 345 | if colorbar: 346 | cb1 = mpl.colorbar.ColorbarBase(ax_cbar, cmap=edge_cmap, norm=cnorm) 347 | else: 348 | # With no colorbar, at least adjust the margins so there's less dead 349 | # border around the graph (the colorbar code automatically sets those 350 | # for us above) 351 | e = 0.08 352 | plt.subplots_adjust(e,e,1-e,1-e) 353 | 354 | # Always return the MPL figure object so the user can further manipulate it 355 | return fig 356 | 357 | 358 | def pick_lesion_colors(lesions,subnets): 359 | ss = {} 360 | for col,net in subnets.items(): 361 | ss[col] = set(net) 362 | 363 | les_colors = {} 364 | for lesion in lesions: 365 | for col,net in ss.items(): 366 | if lesion in net: 367 | les_colors[lesion] = col 368 | break 369 | else: 370 | raise ValueError("lesion %s not in given subnets" % lesion) 371 | return les_colors 372 | 373 | 374 | def lab2node(labels,labels_dict): 375 | return [labels_dict[ll] for ll in labels] 376 | 377 | 378 | def draw_lesion_graph(G,bl,labels=None,subnets=None,lesion_nodes=None): 379 | """ 380 | Parameters 381 | 382 | subnets : dict 383 | A colors,label list dict of subnetworks that covers the graph 384 | """ 385 | if labels is None: 386 | labels = G.nodes() 387 | 388 | if subnets is None: 389 | subnets = dict(b=G.nodes()) 390 | 391 | all_nodes = set(labels) 392 | 393 | lab_idx = range(len(labels)) 394 | labels_dict = dict(zip(labels,lab_idx)) 395 | idx2lab_dict = dict(zip(lab_idx,labels)) 396 | 397 | # Check that all subnets cover the whole graph 398 | subnet_nodes = [] 399 | for ss in subnets.values(): 400 | subnet_nodes.extend(ss) 401 | subnet_nodes = set(subnet_nodes) 402 | assert subnet_nodes == all_nodes 403 | 404 | # Check that the optional lesion list is contained in all the nodes 405 | if lesion_nodes is None: 406 | lesion_nodes = set() 407 | else: 408 | lesion_nodes = set(lesion_nodes) 409 | 410 | assert lesion_nodes.issubset(all_nodes),\ 411 | "lesion nodes:%s not a subset of nodes" % lesion_nodes 412 | 413 | # Make a table that maps lesion nodes to colors 414 | lesion_colors = pick_lesion_colors(lesion_nodes,subnets) 415 | 416 | # Compute positions for all nodes - nx has several algorithms 417 | pos = nx.circular_layout(G) 418 | 419 | # Create the actual plot where the graph will be displayed 420 | #fig = plt.figure() 421 | 422 | #plt.subplot(1,12,bl+1) 423 | 424 | good_nodes = all_nodes - lesion_nodes 425 | # Draw nodes 426 | for node_color,nodes in subnets.items(): 427 | nodelabels = set(nodes) - lesion_nodes 428 | nodelist = lab2node(nodelabels,labels_dict) 429 | nx.draw_networkx_nodes(G,pos,nodelist=nodelist, 430 | node_color=node_color,node_size=700, 431 | node_shape='o') 432 | 433 | for nod in lesion_nodes: 434 | nx.draw_networkx_nodes(G,pos,nodelist=[labels_dict[nod]], 435 | node_color=lesion_colors[nod],node_size=700, 436 | node_shape='s') 437 | 438 | # Draw edges 439 | draw_networkx_edges(G,pos) 440 | 441 | # Draw labels 442 | nx.draw_networkx_labels(G,pos,idx2lab_dict) 443 | 444 | 445 | ### Patched version for networx draw_networkx_edges, sent to Aric. 446 | def draw_networkx_edges(G, pos, 447 | edgelist=None, 448 | width=1.0, 449 | edge_color='k', 450 | style='solid', 451 | alpha=None, 452 | edge_cmap=None, 453 | edge_vmin=None, 454 | edge_vmax=None, 455 | ax=None, 456 | arrows=True, 457 | **kwds): 458 | """Draw the edges of the graph G 459 | 460 | This draws only the edges of the graph G. 461 | 462 | pos is a dictionary keyed by vertex with a two-tuple 463 | of x-y positions as the value. 464 | See networkx.layout for functions that compute node positions. 465 | 466 | edgelist is an optional list of the edges in G to be drawn. 467 | If provided, only the edges in edgelist will be drawn. 468 | 469 | edgecolor can be a list of matplotlib color letters such as 'k' or 470 | 'b' that lists the color of each edge; the list must be ordered in 471 | the same way as the edge list. Alternatively, this list can contain 472 | numbers and those number are mapped to a color scale using the color 473 | map edge_cmap. Finally, it can also be a list of (r,g,b) or (r,g,b,a) 474 | tuples, in which case these will be used directly to color the edges. If 475 | the latter mode is used, you should not provide a value for alpha, as it 476 | would be applied globally to all lines. 477 | 478 | For directed graphs, "arrows" (actually just thicker stubs) are drawn 479 | at the head end. Arrows can be turned off with keyword arrows=False. 480 | 481 | See draw_networkx for the list of other optional parameters. 482 | 483 | """ 484 | try: 485 | import matplotlib 486 | import matplotlib.pylab as pylab 487 | import numpy as np 488 | from matplotlib.colors import colorConverter,Colormap 489 | from matplotlib.collections import LineCollection 490 | except ImportError: 491 | raise ImportError("Matplotlib required for draw()") 492 | except RuntimeError: 493 | pass # unable to open display 494 | 495 | if ax is None: 496 | ax=pylab.gca() 497 | 498 | if edgelist is None: 499 | edgelist=G.edges() 500 | 501 | if not edgelist or len(edgelist)==0: # no edges! 502 | return None 503 | 504 | # set edge positions 505 | edge_pos=np.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist]) 506 | 507 | if not cb.iterable(width): 508 | lw = (width,) 509 | else: 510 | lw = width 511 | 512 | if not cb.is_string_like(edge_color) \ 513 | and cb.iterable(edge_color) \ 514 | and len(edge_color)==len(edge_pos): 515 | if np.alltrue([cb.is_string_like(c) 516 | for c in edge_color]): 517 | # (should check ALL elements) 518 | # list of color letters such as ['k','r','k',...] 519 | edge_colors = tuple([colorConverter.to_rgba(c,alpha) 520 | for c in edge_color]) 521 | elif np.alltrue([not cb.is_string_like(c) 522 | for c in edge_color]): 523 | # If color specs are given as (rgb) or (rgba) tuples, we're OK 524 | if np.alltrue([cb.iterable(c) and len(c) in (3,4) 525 | for c in edge_color]): 526 | edge_colors = tuple(edge_color) 527 | alpha=None 528 | else: 529 | # numbers (which are going to be mapped with a colormap) 530 | edge_colors = None 531 | else: 532 | raise ValueError('edge_color must consist of either color names or numbers') 533 | else: 534 | if len(edge_color)==1: 535 | edge_colors = ( colorConverter.to_rgba(edge_color, alpha), ) 536 | else: 537 | raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges') 538 | edge_collection = LineCollection(edge_pos, 539 | colors = edge_colors, 540 | linewidths = lw, 541 | antialiaseds = (1,), 542 | linestyle = style, 543 | transOffset = ax.transData, 544 | ) 545 | 546 | # Note: there was a bug in mpl regarding the handling of alpha values for 547 | # each line in a LineCollection. It was fixed in matplotlib in r7184 and 548 | # r7189 (June 6 2009). We should then not set the alpha value globally, 549 | # since the user can instead provide per-edge alphas now. Only set it 550 | # globally if provided as a scalar. 551 | if cb.is_numlike(alpha): 552 | edge_collection.set_alpha(alpha) 553 | 554 | # need 0.87.7 or greater for edge colormaps. No checks done, this will 555 | # just not work with an older mpl 556 | if edge_colors is None: 557 | if edge_cmap is not None: assert(isinstance(edge_cmap, Colormap)) 558 | edge_collection.set_array(np.asarray(edge_color)) 559 | edge_collection.set_cmap(edge_cmap) 560 | if edge_vmin is not None or edge_vmax is not None: 561 | edge_collection.set_clim(edge_vmin, edge_vmax) 562 | else: 563 | edge_collection.autoscale() 564 | pylab.sci(edge_collection) 565 | 566 | arrow_collection=None 567 | 568 | if G.is_directed() and arrows: 569 | 570 | # a directed graph hack 571 | # draw thick line segments at head end of edge 572 | # waiting for someone else to implement arrows that will work 573 | arrow_colors = ( colorConverter.to_rgba('k', alpha), ) 574 | a_pos=[] 575 | p=1.0-0.25 # make head segment 25 percent of edge length 576 | for src,dst in edge_pos: 577 | x1,y1=src 578 | x2,y2=dst 579 | dx=x2-x1 # x offset 580 | dy=y2-y1 # y offset 581 | d=np.sqrt(float(dx**2+dy**2)) # length of edge 582 | if d==0: # source and target at same position 583 | continue 584 | if dx==0: # vertical edge 585 | xa=x2 586 | ya=dy*p+y1 587 | if dy==0: # horizontal edge 588 | ya=y2 589 | xa=dx*p+x1 590 | else: 591 | theta=np.arctan2(dy,dx) 592 | xa=p*d*np.cos(theta)+x1 593 | ya=p*d*np.sin(theta)+y1 594 | 595 | a_pos.append(((xa,ya),(x2,y2))) 596 | 597 | arrow_collection = LineCollection(a_pos, 598 | colors = arrow_colors, 599 | linewidths = [4*ww for ww in lw], 600 | antialiaseds = (1,), 601 | transOffset = ax.transData, 602 | ) 603 | 604 | # update view 605 | minx = np.amin(np.ravel(edge_pos[:,:,0])) 606 | maxx = np.amax(np.ravel(edge_pos[:,:,0])) 607 | miny = np.amin(np.ravel(edge_pos[:,:,1])) 608 | maxy = np.amax(np.ravel(edge_pos[:,:,1])) 609 | 610 | w = maxx-minx 611 | h = maxy-miny 612 | padx, pady = 0.05*w, 0.05*h 613 | corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady) 614 | ax.update_datalim( corners) 615 | ax.autoscale_view() 616 | 617 | edge_collection.set_zorder(1) # edges go behind nodes 618 | ax.add_collection(edge_collection) 619 | if arrow_collection: 620 | arrow_collection.set_zorder(1) # edges go behind nodes 621 | ax.add_collection(arrow_collection) 622 | 623 | return edge_collection 624 | -------------------------------------------------------------------------------- /brainx/recarrutil.py: -------------------------------------------------------------------------------- 1 | """Some utilities for manipulating recarrays. 2 | 3 | Warning 4 | ------- 5 | 6 | This module should *never* be imported as 'import *' 7 | """ 8 | 9 | import numpy as np 10 | import numpy.testing as nt 11 | import sys 12 | 13 | # The functionality in this module is now better provided by 14 | # Pandas' DataFrame -- http://pandas.pydata.org/ 15 | sys.stderr.write('brainx.recarrutil will be removed,' 16 | ' install pandas instead\n') 17 | 18 | # XXX - It's probably OK to import something, but for now let's ban * imports 19 | # altogether . 20 | __all__ = [] 21 | 22 | #----------------------------------------------------------------------------- 23 | # Functions and public utilities 24 | #----------------------------------------------------------------------------- 25 | 26 | def extrude(arr,flatten=False): 27 | """Create a view of a recarray with one extra 'extruded' dimension. 28 | 29 | XXX - document more... 30 | """ 31 | 32 | dt = arr.dtype 33 | 34 | fieldtypes = [ v[0] for v in dt.fields.values() ] 35 | 36 | if len(set(fieldtypes)) > 1: 37 | raise ValueError("dtype of recarray must be uniform") 38 | newdtype = fieldtypes[0] 39 | 40 | nfields = len(dt.fields) 41 | 42 | # If axis is None, for a normal array this means flatten everything and 43 | # return a single number. In our case, we actually want to keep the last 44 | # dimension (the "extruded" one) alive so that we can reconstruct the 45 | # recarray in the end. 46 | if flatten: 47 | newshape = (arr.size,nfields) 48 | else: 49 | newshape = arr.shape + (nfields,) 50 | 51 | # Make the new temp array we'll work with 52 | return np.reshape(arr.view(newdtype),newshape) 53 | 54 | 55 | def intrude(arr,dtype): 56 | """Intrude a recarray by 'flattening' its last dimension into a composite 57 | dtype. 58 | 59 | XXX - finish doc 60 | """ 61 | outshape = arr.shape[:-1] 62 | return (np.reshape(arr.view(dtype),outshape)).view(np.recarray) 63 | 64 | 65 | def offset_axis(axis): 66 | """Axis handling logic that is generic to all reductions.""" 67 | flatten = axis is None 68 | if flatten: 69 | axis = 0 70 | else: 71 | if axis < 0: 72 | # The case of a negative input axis needs compensation, because we 73 | # are adding a dimension by ourselves 74 | axis -= 1 75 | return flatten, axis 76 | 77 | 78 | def reduction_factory(name): 79 | """Create a reduction operation for a given method name. 80 | """ 81 | def op(arr, axis=None): 82 | # XXX what the hell is this logic? 83 | flatten, axis = offset_axis(axis) 84 | 85 | newarr = extrude(arr,flatten) 86 | # Do the operation on the new array 87 | method = getattr(newarr,name) 88 | result = method(axis) 89 | # Make the output back into a recarray of the original dtype 90 | return intrude(result, arr.dtype) 91 | 92 | doc = "%s of a recarray, preserving its structure." % name 93 | op.__doc__ = doc 94 | op.func_name = name 95 | return op 96 | 97 | 98 | # For methods in the array interface that take an axis argument, the pattern is 99 | # always the same: extrude, operate, intrude. So we just auto-generate these 100 | # functions here. 101 | reduction_names = ['mean', 'std', 'var', 'min', 'max', 102 | 'sum', 'cumsum', 'prod', 'cumprod' ] 103 | 104 | for fname in reduction_names: 105 | exec("%s = reduction_factory('%s')" % (fname, fname)) 106 | 107 | def binop_factory(func): 108 | """Create a binary operation for a given name. 109 | """ 110 | def op(a1, a2, out=None): 111 | 112 | new_a1 = extrude(a1) 113 | new_a2 = extrude(a2) 114 | if out is not None: 115 | out = extrude(out) 116 | 117 | # Do the operation on the new array 118 | if out is None: 119 | result = func(new_a1, new_a2) 120 | else: 121 | result = func(new_a1, new_a2, out) 122 | # Make the output back into a recarray of the original dtype 123 | return intrude(result, a1.dtype) 124 | 125 | doc = "Binary %s of two recarrays, preserving their structure." % name 126 | op.__doc__ = doc 127 | op.func_name = name 128 | return op 129 | 130 | 131 | # For methods in the array interface that take an axis argument, the pattern is 132 | # always the same: extrude, operate, intrude. So we just auto-generate these 133 | # functions here. 134 | binops = [('add', np.add), ('subtract', np.subtract), 135 | ('multiply', np.multiply), ('divide', np.divide) ] 136 | #binops = [('add',np.add), np.subtract, np.multiply, np.divide ] 137 | for name, func in binops: 138 | exec("%s = binop_factory(func)" % name) 139 | 140 | 141 | #----------------------------------------------------------------------------- 142 | # Tests 143 | #----------------------------------------------------------------------------- 144 | 145 | def test_mean_zero(): 146 | dt = np.dtype(dict(names=['x','y'], formats=[float,float])) 147 | z = np.zeros((2,3), dt) 148 | nt.assert_equal(mean(z),z) 149 | return 1 150 | 151 | 152 | def mk_xyz(): 153 | """Test utility, make x, y, z arrays.""" 154 | dt = np.dtype(dict(names=['x','y'],formats=[float,float])) 155 | x = np.arange(6,dtype=float).reshape(2,3) 156 | y = np.arange(10,16,dtype=float).reshape(2,3) 157 | z = np.empty( (2,3), dt).view(np.recarray) 158 | z.x = x 159 | z.y = y 160 | return x, y, z 161 | 162 | 163 | def mk_xyzw(): 164 | """Test utility, make x, y, z, w arrays.""" 165 | x, y, z = mk_xyz() 166 | w = z.copy() 167 | w.x *= 2 168 | w.y *= 2 169 | return x, y, z, w 170 | 171 | 172 | def test_reductions(): 173 | x, y, z = mk_xyz() 174 | for fname in reduction_names: 175 | reduction = eval(fname) 176 | xmeth = getattr(x, fname) 177 | ymeth = getattr(y, fname) 178 | for axis in [None,0,1,-1,-2]: 179 | zred = reduction(z,axis) 180 | nt.assert_equal(zred.x, xmeth(axis)) 181 | nt.assert_equal(zred.y, ymeth(axis)) 182 | 183 | 184 | def test_binops(): 185 | x, y, z, w = mk_xyzw() 186 | binop_names = [n for (n, op) in binops] 187 | for fname in binop_names: 188 | op = eval(fname) 189 | npop = getattr(np, fname) 190 | opres = op(z,w) 191 | nt.assert_equal(opres.x, npop(z.x, w.x)) 192 | nt.assert_equal(opres.y, npop(z.y, w.y)) 193 | 194 | # Test support utilities 195 | 196 | def eval_tests(testgen): 197 | """Little utility to consume a nose-compliant test generator. 198 | 199 | Returns 200 | ------- 201 | The number of executed tests. An exception is raised if any fails.""" 202 | return len([ t[0](*t[1:]) for t in testgen() ]) 203 | 204 | # Mark it as not being a test itself, so nose doesn't try to run it 205 | eval_tests.__test__ = False 206 | 207 | 208 | def run_test_suite(): 209 | """Call all our tests in sequence. 210 | 211 | This lets us run the script as a test suite without needing nose or any 212 | other test runner for simple cases""" 213 | from time import clock 214 | 215 | # Initialize counters 216 | ntests = 0 217 | start = clock() 218 | 219 | # Call the tests and count them 220 | ntests += test_mean_zero() 221 | ntests += eval_tests(test_reductions) 222 | ntests += eval_tests(test_binops) 223 | 224 | # Stop clock and summarize 225 | end = clock() 226 | print('-'*70) 227 | print("Ran %s tests in %.3f" % (ntests, end-start)) 228 | print('\nOK') 229 | 230 | run_test_suite.__test__ = False 231 | 232 | 233 | # If run as a script, just run all the tests and print summary if successful 234 | if __name__ == '__main__': 235 | run_test_suite() 236 | -------------------------------------------------------------------------------- /brainx/tests/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def get_tdata_corr_txt_dir(): 4 | """Return the directory with text correlation sample files""" 5 | 6 | return os.path.join(os.path.dirname(__file__),'tdata_corr_txt') 7 | -------------------------------------------------------------------------------- /brainx/tests/example_plots.py: -------------------------------------------------------------------------------- 1 | """Directed graph plot example.""" 2 | 3 | import networkx as nx 4 | import numpy as np 5 | from matplotlib import pyplot as plt, cm 6 | 7 | import sys 8 | sys.path.insert(0,'../..') 9 | 10 | from brainx import util 11 | from brainx import nxplot 12 | 13 | reload(util) 14 | reload(nxplot) 15 | 16 | #----------------------------------------------------------------------------- 17 | # main 18 | #----------------------------------------------------------------------------- 19 | 20 | size = 11 21 | th2 = 0.9 22 | th1 = -th2 23 | 24 | # Split the node list in half, and use two colors for each group 25 | split = size/2 26 | nodes = range(size) 27 | head,tail = nodes[:split],nodes[split:] 28 | labels = ['%s%s' % (chr(i),chr(i+32)) for i in range(65,65+size)] 29 | #labels = map(str,nodes) 30 | colors = ['y' for _ in head] + ['r' for _ in tail] 31 | 32 | mat = util.symm_rand_arr(size) 33 | mat = 2*mat-1 # map values to [-1,1] range 34 | util.fill_diagonal(mat,0) # diag==0 so we don't get self-links 35 | pfig, G = nxplot.draw_graph(mat, threshold=th1,threshold2=th2, 36 | labels=labels, 37 | colors=colors, 38 | edge_cmap=cm.PuOr 39 | #edge_cmap=cm.RdBu, 40 | #edge_cmap=cm.jet, 41 | ) 42 | 43 | #dmat = np.random.rand(size,size) 44 | #nxplot.draw_graph(mat,dmat=dmat) 45 | #print 'Mat:\n',mat 46 | 47 | plt.show() 48 | -------------------------------------------------------------------------------- /brainx/tests/example_plots_random.py: -------------------------------------------------------------------------------- 1 | """Directed graph plot example.""" 2 | 3 | import networkx as nx 4 | import numpy as np 5 | from matplotlib import pyplot as plt, cm 6 | 7 | import sys 8 | sys.path.insert(0,'../..') 9 | 10 | from brainx import util 11 | from brainx import nxplot 12 | 13 | reload(util) 14 | reload(nxplot) 15 | 16 | #----------------------------------------------------------------------------- 17 | # main 18 | #----------------------------------------------------------------------------- 19 | 20 | size = 11 21 | th2 = 0.6 22 | th1 = -th2 23 | 24 | # Split the node list in half, and use two colors for each group 25 | split = size/2 26 | nodes = range(size) 27 | head,tail = nodes[:split],nodes[split:] 28 | labels = ['%s%s' % (chr(i),chr(i+32)) for i in range(65,65+size)] 29 | #labels = map(str,nodes) 30 | colors = ['y' for _ in head] + ['r' for _ in tail] 31 | 32 | mat = util.symm_rand_arr(size) 33 | mat = 2*mat-1 # map values to [-1,1] range 34 | util.fill_diagonal(mat,0) # diag==0 so we don't get self-links 35 | 36 | layout = nx.circular_layout 37 | 38 | G = util.mat2graph(mat, threshold=th1,threshold2=th2) 39 | 40 | pfig = nxplot.draw_graph(G, 41 | labels=labels, 42 | node_colors=colors, 43 | layout = layout, 44 | title = layout.func_name, 45 | #edge_cmap=cm.PuOr 46 | edge_cmap=cm.RdBu, 47 | #edge_cmap=cm.jet, 48 | colorbar=True, 49 | ) 50 | 51 | if 0: 52 | layout_funcs = [ nx.circular_layout, 53 | nx.fruchterman_reingold_layout, 54 | nx.graphviz_layout, 55 | nx.pydot_layout, 56 | nx.pygraphviz_layout, 57 | nx.random_layout, 58 | nx.shell_layout, 59 | nx.spectral_layout, 60 | nx.spring_layout, 61 | ] 62 | 63 | for layout in layout_funcs: 64 | pfig, G = nxplot.draw_graph(mat, threshold=th1,threshold2=th2, 65 | labels=labels, 66 | colors=colors, 67 | layout_function = layout, 68 | title = layout.func_name, 69 | edge_cmap=cm.PuOr 70 | #edge_cmap=cm.RdBu, 71 | #edge_cmap=cm.jet, 72 | ) 73 | 74 | #dmat = np.random.rand(size,size) 75 | #nxplot.draw_graph(mat,dmat=dmat) 76 | #print 'Mat:\n',mat 77 | 78 | plt.show() 79 | -------------------------------------------------------------------------------- /brainx/tests/test_metrics.py: -------------------------------------------------------------------------------- 1 | """Tests for the metrics module""" 2 | 3 | #----------------------------------------------------------------------------- 4 | # Imports 5 | #----------------------------------------------------------------------------- 6 | 7 | from unittest import TestCase 8 | 9 | # Third party 10 | import networkx as nx 11 | import nose.tools as nt 12 | import numpy as np 13 | import numpy.testing as npt 14 | 15 | # Our own imports 16 | from brainx import metrics 17 | 18 | #----------------------------------------------------------------------------- 19 | # Functions 20 | #----------------------------------------------------------------------------- 21 | 22 | class NodalMetricsTestCase(TestCase): 23 | 24 | def setUp(self): 25 | # Distances for all node pairs: 26 | # 0-1: 1 1-2: 1 2-3: 1 3-4: 1 27 | # 0-2: 1 1-3: 2 2-4: 2 28 | # 0-3: 2 1-4: 3 29 | # 0-4: 3 30 | self.corr_mat = np.array([[0.0, 0.0, 0.0, 0.0, 0.0], 31 | [0.5, 0.0, 0.0, 0.0, 0.0], 32 | [0.3, 0.4, 0.0, 0.0, 0.0], 33 | [0.0, 0.0, 0.7, 0.0, 0.0], 34 | [0.0, 0.0, 0.0, 0.4, 0.0]]) 35 | self.n_nodes = self.corr_mat.shape[0] 36 | self.g = nx.from_numpy_matrix(self.corr_mat) 37 | 38 | def test_inter_node_distances_conn(self): 39 | distances = metrics.inter_node_distances(self.g) 40 | desired = {0: {1: 1, 2: 1, 3: 2, 4: 3}, 41 | 1: {0: 1, 2: 1, 3: 2, 4: 3}, 42 | 2: {0: 1, 1: 1, 3: 1, 4: 2}, 43 | 3: {0: 2, 1: 2, 2: 1, 4: 1}, 44 | 4: {0: 3, 1: 3, 2: 2, 3: 1}} 45 | self.assertEqual(distances, desired) 46 | 47 | def test_inter_node_distances_disconn(self): 48 | self.g.remove_edge(2, 3) 49 | # Now all nodes still have at least one edge, but not all nodes are 50 | # reachable from all others. 51 | distances = metrics.inter_node_distances(self.g) 52 | # Distances for all node pairs: 53 | # 0-1: 1 1-2: 1 2-3: Inf 3-4: 1 54 | # 0-2: 1 1-3: Inf 2-4: Inf 55 | # 0-3: Inf 1-4: Inf 56 | # 0-4: Inf 57 | desired = {0: {1: 1, 2: 1, 3: np.inf, 4: np.inf}, 58 | 1: {0: 1, 2: 1, 3: np.inf, 4: np.inf}, 59 | 2: {0: 1, 1: 1, 3: np.inf, 4: np.inf}, 60 | 3: {0: np.inf, 1: np.inf, 2: np.inf, 4: 1}, 61 | 4: {0: np.inf, 1: np.inf, 2: np.inf, 3: 1}} 62 | self.assertEqual(distances, desired) 63 | 64 | def test_nodal_pathlengths_conn(self): 65 | mean_path_lengths = metrics.nodal_pathlengths(self.g) 66 | desired = 1.0 / (self.n_nodes - 1) * np.array([1 + 1 + 2 + 3, 67 | 1 + 1 + 2 + 3, 68 | 1 + 1 + 1 + 2, 69 | 2 + 2 + 1 + 1, 70 | 3 + 3 + 2 + 1]) 71 | npt.assert_array_almost_equal(mean_path_lengths, desired) 72 | 73 | def test_nodal_pathlengths_disconn(self): 74 | self.g.remove_edge(2, 3) 75 | # Now all nodes still have at least one edge, but not all nodes are 76 | # reachable from all others. 77 | path_lengths = metrics.nodal_pathlengths(self.g) 78 | # Distances for all node pairs: 79 | # 0-1: 1 1-2: 1 2-3: Inf 3-4: 1 80 | # 0-2: 1 1-3: Inf 2-4: Inf 81 | # 0-3: Inf 1-4: Inf 82 | # 0-4: Inf 83 | desired = (1.0 / (self.n_nodes - 1) * 84 | np.array([1 + 1 + np.inf + np.inf, 85 | 1 + 1 + np.inf + np.inf, 86 | 1 + 1 + np.inf + np.inf, 87 | np.inf + np.inf + np.inf + 1, 88 | np.inf + np.inf + np.inf + 1])) 89 | npt.assert_array_almost_equal(path_lengths, desired) 90 | 91 | def test_nodal_efficiency_conn(self): 92 | n_eff_array = metrics.nodal_efficiency(self.g) 93 | desired = (1.0 / (self.n_nodes - 1) * 94 | np.array([1 + 1 + 1 / 2.0 + 1 / 3.0, 95 | 1 + 1 + 1 / 2.0 + 1 / 3.0, 96 | 1 + 1 + 1 + 1 / 2.0, 97 | 1 / 2.0 + 1 / 2.0 + 1 + 1, 98 | 1 / 3.0 + 1 / 3.0 + 1 / 2.0 + 1])) 99 | npt.assert_array_almost_equal(n_eff_array, desired) 100 | 101 | def test_nodal_efficiency_disconn(self): 102 | self.g.remove_edge(2, 3) 103 | # Now all nodes still have at least one edge, but not all nodes are 104 | # reachable from all others. 105 | n_eff_array = metrics.nodal_efficiency(self.g) 106 | # Distances for all node pairs: 107 | # 0-1: 1 1-2: 1 2-3: Inf 3-4: 1 108 | # 0-2: 1 1-3: Inf 2-4: Inf 109 | # 0-3: Inf 1-4: Inf 110 | # 0-4: Inf 111 | desired = (1.0 / (self.n_nodes - 1) * 112 | np.array([1 + 1 + 1 / np.inf + 1 / np.inf, 113 | 1 + 1 + 1 / np.inf + 1 / np.inf, 114 | 1 + 1 + 1 / np.inf + 1 / np.inf, 115 | 1 / np.inf + 1 / np.inf + 1 / np.inf + 1, 116 | 1 / np.inf + 1 / np.inf + 1 / np.inf + 1])) 117 | npt.assert_array_almost_equal(n_eff_array, desired) 118 | 119 | 120 | def test_path_lengths(): 121 | """Very primitive tests, just using complete graphs which are easy. Better 122 | than nothing...""" 123 | for nnod in [2,4,6]: 124 | g = nx.complete_graph(nnod) 125 | nedges = nnod*(nnod-1)/2 126 | path_lengths = metrics.path_lengths(g) 127 | # Check that we get the right size array 128 | nt.assert_equals(nedges, len(path_lengths)) 129 | # Check that all lengths are 1 130 | pl_true = np.ones_like(path_lengths) 131 | npt.assert_equal(pl_true, path_lengths) 132 | 133 | -------------------------------------------------------------------------------- /brainx/tests/test_nodal_roles.py: -------------------------------------------------------------------------------- 1 | #test nodal roles 2 | import unittest 3 | import networkx as nx 4 | from brainx import nodal_roles as nr 5 | from brainx import weighted_modularity as wm 6 | 7 | class TestNodalRoles(unittest.TestCase): 8 | def test_disconnected_communites(self): 9 | graph = nx.Graph([(0,1),(1,2),(2,0),(3,4),(3,5),(4,5)]) 10 | partition = wm.WeightedPartition(graph, communities=[set([0, 1, 2]), set([3, 4, 5])]) 11 | wcd = nr.within_community_degree(partition) 12 | self.assertAlmostEqual(wcd, {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}) 13 | pc = nr.participation_coefficient(partition) 14 | self.assertEqual(pc, {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0}) 15 | def test_high_low_pc(self): 16 | graph = nx.Graph([(0,1),(1,2),(2,0),(0,3),(3,4),(3,5),(4,5)]) 17 | partition = wm.WeightedPartition(graph, communities=[set([0, 1, 2]), set([3, 4, 5])]) 18 | pc = nr.participation_coefficient(partition) 19 | self.assertAlmostEqual(pc,{0: 0.4444444444444444, 1: 0.0, 2: 0.0, 3: 0.4444444444444444, 4: 0.0, 5: 0.0}) 20 | def test_high_low_wcd(self): 21 | graph = nx.Graph([(0,1),(0,2),(0,3),(0,4),(0,5),(6,7),(7,8),(8,6)]) 22 | partition = wm.WeightedPartition(graph, communities=[set([0, 1, 2, 3, 4, 5]), set([8, 6, 7])]) 23 | wcd = nr.within_community_degree(partition) 24 | self.assertAlmostEqual(wcd, {0: 2.2360679774997898, 1: -0.44721359549995804, 2: -0.44721359549995804, 3: -0.44721359549995804, 4: -0.44721359549995804, 5: -0.44721359549995804, 6: 0.0, 7: 0.0, 8: 0.0}) 25 | -------------------------------------------------------------------------------- /brainx/tests/test_util.py: -------------------------------------------------------------------------------- 1 | """Tests for the util module""" 2 | 3 | #----------------------------------------------------------------------------- 4 | # Imports 5 | #----------------------------------------------------------------------------- 6 | import unittest 7 | 8 | # Third party 9 | import nose.tools as nt 10 | import numpy as np 11 | import numpy.testing as npt 12 | 13 | # Our own 14 | from brainx import util 15 | 16 | #----------------------------------------------------------------------------- 17 | # Functions 18 | #----------------------------------------------------------------------------- 19 | 20 | def make_testlists(): 21 | """ setup some data for the conversion Tests 22 | eg lislist_to_listset""" 23 | listlist = [[0,1,2],[3,4,5],[6,7,8,9]] 24 | dictset = {val:set(item) for val, item in enumerate(listlist)} 25 | listset = [set(item) for item in listlist] 26 | return listlist, listset, dictset 27 | 28 | def test_dictset_to_listset(): 29 | _, listset, dictset = make_testlists() 30 | new_list_set = util.dictset_to_listset(dictset) 31 | npt.assert_equal(new_list_set, listset) 32 | with npt.assert_raises(ValueError): 33 | # catch bad type 34 | tmp = util.dictset_to_listset(listset) 35 | 36 | def test_listset_to_dictset(): 37 | _, listset, dictset = make_testlists() 38 | new_dict_set = util.listset_to_dictset(listset) 39 | npt.assert_equal(new_dict_set, dictset) 40 | # capture wrong input type 41 | with npt.assert_raises(ValueError): 42 | tmp = util.listset_to_dictset(dictset) 43 | 44 | def test_no_repeats_in_listlist(): 45 | jnk = [[0,1,2],[3,4,5]] # all unique 46 | nt.assert_true(util._no_repeats_in_listlist(jnk)) 47 | jnk = [[0,1,2], [0,1,2]] # repeats 48 | nt.assert_false(util._no_repeats_in_listlist(jnk)) 49 | with npt.assert_raises(ValueError): 50 | util._no_repeats_in_listlist({0:0}) 51 | with npt.assert_raises(ValueError): 52 | util._no_repeats_in_listlist([set([0,1,2])]) 53 | 54 | def test_contains_only(): 55 | listlist, listset, dictset = make_testlists() 56 | nt.assert_true(util._contains_only(listlist, list)) 57 | nt.assert_true(util._contains_only(listset, set)) 58 | nt.assert_true(util._contains_only(dictset, set)) 59 | nt.assert_false(util._contains_only([1,2,3], set)) 60 | 61 | 62 | def test_listlist_to_listset(): 63 | listlist, listset, _ = make_testlists() 64 | new_listset = util.listlist_to_listset(listlist) 65 | npt.assert_equal(new_listset, listset) 66 | with npt.assert_raises(ValueError): 67 | util.listlist_to_listset([[0,1,2],[0,1,2]]) 68 | with npt.assert_raises(ValueError): 69 | util.listlist_to_listset({}) 70 | 71 | def test_slice_data(): 72 | subcond, blocks, subjects, nodes = 5, 10, 20, 4 73 | data_4d = np.ones((blocks, subjects, nodes, nodes)) 74 | data_5d = np.ones((subcond, blocks, subjects, nodes, nodes)) 75 | sym_4d = util.slice_data(data_4d, subjects - 1 , blocks - 1 ) 76 | sym_5d = util.slice_data(data_5d, subjects -1 , blocks-1, subcond-1) 77 | npt.assert_equal(sym_4d.shape, (nodes, nodes)) 78 | npt.assert_equal(sym_5d.shape, (nodes, nodes)) 79 | npt.assert_raises(IndexError, util.slice_data, data_5d, subjects, blocks) 80 | 81 | 82 | def test_all_positive(): 83 | jnk = np.random.random(40) 84 | npt.assert_equal(util.all_positive(jnk), True) 85 | # zeros counted as positive 86 | jnk[0] = 0 87 | npt.assert_equal(util.all_positive(jnk), True) 88 | # find real negative 89 | jnk = jnk - 0.5 90 | npt.assert_equal(util.all_positive(jnk), False) 91 | 92 | def test_make_cost_thresh_lookup(): 93 | adj_mat = np.zeros((10,10)) 94 | ind = np.triu_indices(10,1) 95 | thresholds = np.linspace(.1, .8, 45) 96 | adj_mat[ind] = thresholds 97 | lookup = util.make_cost_thresh_lookup(adj_mat) 98 | 99 | npt.assert_equal(sorted(thresholds, reverse=True), lookup.weight) 100 | npt.assert_equal(lookup[0].cost < lookup[-1].cost, True) 101 | # costs in ascending order 102 | ## last vector is same as second vector rounded to 2 decimals 103 | npt.assert_almost_equal(lookup.actual_cost, lookup.cost, decimal=2) 104 | # add nan to adj_mat to raise error 105 | adj_mat[2,:] = np.nan 106 | npt.assert_raises(ValueError, util.make_cost_thresh_lookup, adj_mat) 107 | 108 | 109 | def test_cost_size(): 110 | n_nodes = 5 111 | ## NOTE DeprecationWarnings are ignored by default in 2.7 112 | #npt.assert_warns(UserWarning, util.cost_size, n_nodes) 113 | 114 | 115 | 116 | class TestCost2Thresh(unittest.TestCase): 117 | def setUp(self): 118 | nnodes, nsub, nblocks, nsubblocks = 45, 20, 6, 2 119 | prng = np.random.RandomState(42) 120 | self.data_5d = prng.random_sample((nsubblocks, nblocks, 121 | nsub, nnodes, nnodes)) 122 | ind = np.triu_indices(nnodes, k=1) 123 | nedges = (np.empty((nnodes, nnodes))[ind]).shape[0] 124 | costs, _, _ = util.cost_size(nnodes) 125 | self.nedges = nedges 126 | self.costs = costs 127 | self.lookup = np.zeros((nsubblocks, nblocks, nsub,2, nedges)) 128 | bigcost =np.tile(costs[1:], nblocks*nsubblocks*nsub) 129 | bigcost.shape = (nsubblocks, nblocks, nsub, nedges) 130 | self.lookup[:,:,:,1,:] = bigcost 131 | for sblock in range(nsubblocks): 132 | for block in range(nblocks): 133 | for sid in range(nsub): 134 | tmp = self.data_5d[sblock, block, sid] 135 | self.lookup[sblock,block,sid,0,:] = sorted(tmp[ind], 136 | reverse=True) 137 | 138 | def test_cost2thresh2(self): 139 | thr = util.cost2thresh2(self.costs[100], 0,0,0,self.lookup) 140 | real_thr = self.lookup[0,0,0,0,100-1] 141 | npt.assert_almost_equal(thr, real_thr, decimal=7) 142 | 143 | def test_cost2thresh(self): 144 | lookup = self.lookup[0].squeeze() 145 | thr = util.cost2thresh(self.costs[100],0,0,lookup) 146 | real_thr = lookup[0,0,0,100-1]# costs padded by zero 147 | npt.assert_almost_equal(thr, real_thr, decimal=7) 148 | 149 | def test_format_matrix(self): 150 | bool_matrix = util.format_matrix2(self.data_5d, 0,0,0, 151 | self.lookup, self.costs[100]) 152 | npt.assert_equal(bool_matrix.sum(), 100 -1) 153 | thresh_matrix = util.format_matrix2(self.data_5d, 0,0,0, 154 | self.lookup, self.costs[100],asbool = False) 155 | npt.assert_equal(bool_matrix.sum()== thresh_matrix.sum(), False) 156 | npt.assert_almost_equal(thresh_matrix.sum(), 157 | 94.183321784530804, decimal=7) 158 | ## test format_matrix call on format_matrix2 159 | bool_matrix_sm = util.format_matrix(self.data_5d[0].squeeze(), 160 | 0,0, self.lookup[0].squeeze(), self.costs[100]) 161 | npt.assert_equal(bool_matrix.sum(), bool_matrix_sm.sum()) 162 | 163 | 164 | def test_threshold_adjacency_matrix(self): 165 | adj_matrix = self.data_5d[0,0,0].squeeze() 166 | mask, real_cost = util.threshold_adjacency_matrix(adj_matrix, 0) 167 | npt.assert_equal(mask.sum(), 0) 168 | npt.assert_equal(real_cost, 0) 169 | mask, real_cost = util.threshold_adjacency_matrix(adj_matrix, .9) 170 | npt.assert_equal(mask.sum(), 1800) 171 | npt.assert_equal(real_cost, 0.9) 172 | 173 | def test_find_true_cost(self): 174 | adj_matrix = self.data_5d[0,0,0].squeeze() 175 | mask, real_cost = util.threshold_adjacency_matrix(adj_matrix, 0.2) 176 | true_cost = util.find_true_cost(mask) 177 | npt.assert_equal(real_cost, true_cost) 178 | ## test on rounded array 179 | adj_matrix = self.data_5d[0,0,0].squeeze().round(decimals = 1) 180 | mask, expected_cost = util.threshold_adjacency_matrix(adj_matrix, 0.2) 181 | true_cost = util.find_true_cost(mask) 182 | ## the cost of the thresholded matrix will be less than expected 183 | npt.assert_equal(real_cost > true_cost, True) 184 | 185 | 186 | 187 | 188 | def test_apply_cost(): 189 | corr_mat = np.array([[0.0, 0.5, 0.3, 0.2, 0.1], 190 | [0.5, 0.0, 0.4, 0.1, 0.2], 191 | [0.3, 0.4, 0.0, 0.7, 0.2], 192 | [0.2, 0.1, 0.7, 0.0, 0.4], 193 | [0.1, 0.2, 0.2, 0.4, 0.0]]) 194 | # A five-node undirected graph has ten possible edges. Thus, the result 195 | # here should be a graph with five edges. 196 | possible_edges = 10 197 | cost = 0.5 198 | thresholded_corr_mat, threshold = util.apply_cost(corr_mat, cost, 199 | possible_edges) 200 | nt.assert_true(np.allclose(thresholded_corr_mat, 201 | np.array([[0.0, 0.0, 0.0, 0.0, 0.0], 202 | [0.5, 0.0, 0.0, 0.0, 0.0], 203 | [0.3, 0.4, 0.0, 0.0, 0.0], 204 | [0.0, 0.0, 0.7, 0.0, 0.0], 205 | [0.0, 0.0, 0.0, 0.4, 0.0]]))) 206 | nt.assert_almost_equal(threshold, 0.3) 207 | # Check the case in which cost requires that one of several identical edges 208 | # be kept and the others removed. apply_cost should keep all of these 209 | # identical edges. 210 | # 211 | # To test this, I need to update only a value in the lower triangle. The 212 | # function zeroes out the upper triangle immediately. 213 | corr_mat[2, 0] = 0.2 214 | thresholded_corr_mat, threshold = util.apply_cost(corr_mat, cost, 215 | possible_edges) 216 | nt.assert_true(np.allclose(thresholded_corr_mat, 217 | np.array([[0.0, 0.0, 0.0, 0.0, 0.0], 218 | [0.5, 0.0, 0.0, 0.0, 0.0], 219 | [0.2, 0.4, 0.0, 0.0, 0.0], 220 | [0.2, 0.0, 0.7, 0.0, 0.0], 221 | [0.0, 0.2, 0.2, 0.4, 0.0]]))) 222 | nt.assert_almost_equal(threshold, 0.2) 223 | 224 | 225 | def assert_graphs_equal(g,h): 226 | """Trivial 'equality' check for graphs""" 227 | if not(g.nodes()==h.nodes() and g.edges()==h.edges()): 228 | raise AssertionError("Graphs not equal") 229 | 230 | 231 | def test_regular_lattice(): 232 | for n in [8,11,16]: 233 | # Be careful not to try and run with k > n-1, as the naive formula 234 | # below becomes invalid. 235 | for k in [2,4,7]: 236 | a = util.regular_lattice(n,k) 237 | msg = 'n,k = %s' % ( (n,k), ) 238 | nedge = n * (k/2) # even part of k 239 | nt.assert_equal,a.number_of_edges(),nedge,msg 240 | 241 | def test_diag_stack(): 242 | """Manual verification of simple stacking.""" 243 | a = np.empty((2,2)) 244 | a.fill(1) 245 | b = np.empty((3,3)) 246 | b.fill(2) 247 | c = np.empty((2,3)) 248 | c.fill(3) 249 | 250 | d = util.diag_stack((a,b,c)) 251 | 252 | d_true = np.array([[ 1., 1., 0., 0., 0., 0., 0., 0.], 253 | [ 1., 1., 0., 0., 0., 0., 0., 0.], 254 | [ 0., 0., 2., 2., 2., 0., 0., 0.], 255 | [ 0., 0., 2., 2., 2., 0., 0., 0.], 256 | [ 0., 0., 2., 2., 2., 0., 0., 0.], 257 | [ 0., 0., 0., 0., 0., 3., 3., 3.], 258 | [ 0., 0., 0., 0., 0., 3., 3., 3.]]) 259 | 260 | npt.assert_equal(d, d_true) 261 | 262 | 263 | def test_no_empty_modules(): 264 | """Test the utility that validates partitions against empty modules. 265 | """ 266 | a = {0: [1,2], 1:[3,4]} 267 | b = a.copy() 268 | b[2] = [] 269 | util.assert_no_empty_modules(a) 270 | nt.assert_raises(ValueError, util.assert_no_empty_modules, b) 271 | 272 | def test_rescale_arr(): 273 | array = np.arange(5) 274 | scaled = util.rescale_arr(array, 3, 6) 275 | npt.assert_equal(scaled.min(), 3) 276 | scaled = util.rescale_arr(array, -10, 10) 277 | npt.assert_equal(scaled.min(), -10) 278 | npt.assert_equal(scaled.max(), 10) 279 | 280 | def test_normalize(): 281 | array = np.arange(5) 282 | result = util.normalize(array) 283 | npt.assert_equal(result.min(), 0) 284 | npt.assert_equal(result.max(), 1) 285 | npt.assert_raises(ValueError, util.normalize, array, 'blueberry', (0,2)) 286 | 287 | 288 | -------------------------------------------------------------------------------- /brainx/tests/test_weighted_modularity.py: -------------------------------------------------------------------------------- 1 | """Tests for the weighted_modularity module""" 2 | 3 | #----------------------------------------------------------------------------- 4 | # Imports 5 | #----------------------------------------------------------------------------- 6 | import os 7 | import unittest 8 | 9 | 10 | # Third party 11 | import networkx as nx 12 | import nose.tools as nt 13 | import numpy as np 14 | import numpy.testing as npt 15 | 16 | # Our own 17 | from .. import util 18 | from .. import weighted_modularity as wm 19 | 20 | 21 | def get_test_data(): 22 | """ grabs local txt file with adj matrices 23 | Returns 24 | ======= 25 | graph : networkx graph 26 | communities : list of sets 27 | """ 28 | pth, _ = os.path.split(__file__) 29 | testdir = os.path.join(pth, 'tdata_corr_txt') 30 | data_file = os.path.join(testdir, '101_Block01.txt') 31 | mat = np.loadtxt(data_file) 32 | mat[mat<0] = 0 33 | graph = nx.from_numpy_matrix(mat) 34 | # graph has 85 nodes, make generic communities 35 | communities = [set(range(42)), set(range(42,86))] 36 | return graph, communities 37 | 38 | class TestWeightedPartition(unittest.TestCase): 39 | 40 | def setUp(self): 41 | ## generate a default graph and communities 42 | graph, communities = get_test_data() 43 | self.graph = graph 44 | self.communities = communities 45 | 46 | def test_init(self): 47 | part = wm.WeightedPartition(self.graph) 48 | self.assertEqual(type(part.degrees), type({})) 49 | npt.assert_array_almost_equal(part.total_edge_weight, 1500.5653444) 50 | # generated communities 51 | comm = [set([node]) for node in self.graph.nodes()] 52 | self.assertEqual(part.communities, comm) 53 | # test communities cannot be replaced by garbage 54 | with self.assertRaises(TypeError): 55 | part.communities = 11 56 | # doesnt work if nodes are missing from partition 57 | with self.assertRaises(ValueError): 58 | part.communities = [set([1,2,3])] 59 | # but we can pass a valid community partition 60 | part.communities = comm 61 | self.assertEqual(part.communities, comm) 62 | 63 | def test_communities_degree(self): 64 | ## if no community, method will raise error 65 | part = wm.WeightedPartition(self.graph) 66 | part = wm.WeightedPartition(self.graph, self.communities) 67 | cdegree = part.communities_degree() 68 | self.assertEqual(round(cdegree[0]), 1462.0) 69 | 70 | 71 | def test_set_communities(self): 72 | part = wm.WeightedPartition(self.graph, self.communities) 73 | self.assertEqual(part.communities, self.communities) 74 | with self.assertRaises(TypeError): 75 | # raise error if not list of sets 76 | part.set_communities(part.communities[0]) 77 | with self.assertRaises(TypeError): 78 | part.set_communities('a') 79 | with self.assertRaises(ValueError): 80 | ## missing nodes 81 | comm = self.graph.nodes()[:-3] 82 | part.set_communities([set(comm)]) 83 | 84 | def test_allnodes_in_communities(self): 85 | """checks communities contain all nodes 86 | with no repetition""" 87 | part = wm.WeightedPartition(self.graph) 88 | self.assertTrue(part._allnodes_in_communities(self.communities)) 89 | self.assertFalse(part._allnodes_in_communities([self.communities[0]])) 90 | 91 | 92 | def test_get_node_community(self): 93 | part = wm.WeightedPartition(self.graph, self.communities) 94 | self.assertEqual(part.get_node_community(0), 0) 95 | self.assertEqual(part.get_node_community(self.graph.nodes()[-1]),1) 96 | with self.assertRaises(ValueError): 97 | part.get_node_community(-1) 98 | part = wm.WeightedPartition(self.graph) 99 | self.assertEqual(part.get_node_community(0), 0) 100 | 101 | def test_node_degree(self): 102 | part = wm.WeightedPartition(self.graph) # one comm per node 103 | node = 0 104 | res = part.node_degree(node) 105 | npt.assert_almost_equal(res, 37.94151675 ) 106 | 107 | def test_modularity(self): 108 | part = wm.WeightedPartition(self.graph, self.communities) 109 | npt.assert_almost_equal(part.modularity(), 0.0555463) 110 | 111 | 112 | def test_degree_by_community(self): 113 | part = wm.WeightedPartition(self.graph) # one comm per node 114 | ## summ of all links in or out of communities 115 | ## since one per scommunity, just weighted degree of each node 116 | tot_per_comm = part.degree_by_community() 117 | degw = list(self.graph.degree(weight='weight').values()) 118 | self.assertEqual(tot_per_comm, degw) 119 | ## This isnt true of we have communities with multiple nodes 120 | part_2comm = wm.WeightedPartition(self.graph, self.communities) 121 | self.assertEqual(part_2comm == degw, False) 122 | 123 | def test_degree_within_community(self): 124 | part = wm.WeightedPartition(self.graph) # one comm per node 125 | weights = part.degree_within_community() 126 | ## this inlcudes self links so 127 | self.assertEqual(weights[0], 1.0) 128 | 129 | 130 | 131 | def test_node_degree_by_community(self): 132 | part = wm.WeightedPartition(self.graph) # one comm per node 133 | node = 0 134 | node2comm_weights = part.node_degree_by_community(node) 135 | # self loops not added to weight 136 | # so communities made only of node should be zero 137 | npt.assert_equal(node2comm_weights[0],0) 138 | # this should be equal to weight between two nodes 139 | neighbor = 1 140 | expected = self.graph[node][neighbor]['weight'] 141 | npt.assert_equal(node2comm_weights[neighbor],expected) 142 | part = wm.WeightedPartition(self.graph, self.communities) 143 | node2comm_weights = part.node_degree_by_community(node) 144 | npt.assert_equal(len(node2comm_weights), 2) 145 | 146 | 147 | class TestLouvainCommunityDetection(unittest.TestCase): 148 | 149 | def setUp(self): 150 | ## generate a default graph and communities 151 | graph, communities = get_test_data() 152 | self.graph = graph 153 | self.communities = communities 154 | self.louvain = wm.LouvainCommunityDetection(graph) 155 | self.louvain_comm = wm.LouvainCommunityDetection(graph, communities) 156 | 157 | def test_init(self): 158 | louvain = self.louvain 159 | self.assertEqual(louvain.graph, self.graph) 160 | self.assertEqual(louvain.initial_communities, None) 161 | self.assertEqual(louvain.minthr, 0.0000001) 162 | 163 | 164 | def test_communities_without_node(self): 165 | part = wm.WeightedPartition(self.graph) # one comm per node 166 | node = 0 167 | updated_comm = self.louvain._communities_without_node(part, node) 168 | self.assertEqual(updated_comm[0], set([])) 169 | part = wm.WeightedPartition(self.graph, self.communities) 170 | updated_comm = self.louvain_comm._communities_without_node(part, node) 171 | ## make sure we dont break communities from original partition 172 | self.assertEqual(part.communities, self.communities) 173 | self.assertEqual(0 not in updated_comm[0], True) 174 | 175 | def test_communities_nodes_alledgesw(self): 176 | part = wm.WeightedPartition(self.graph, self.communities) 177 | node = 0 178 | weights = self.louvain_comm._communities_nodes_alledgesw(part, node) 179 | npt.assert_almost_equal(weights[0], 1424.0220362) 180 | ## test with possible empty node set 181 | part = wm.WeightedPartition(self.graph) 182 | weights = self.louvain._communities_nodes_alledgesw(part, node) 183 | self.assertEqual(weights[0], 0) 184 | # other communities are made up of just one node 185 | self.assertEqual(weights[1], self.graph.degree(weight='weight')[1]) 186 | 187 | 188 | def test_calc_delta_modularity(self): 189 | part = wm.WeightedPartition(self.graph) # one comm per node 190 | node = 0 191 | change = self.louvain._calc_delta_modularity(node, part) 192 | self.assertEqual(len(change), len(part.communities)) 193 | # change is an array 194 | self.assertEqual(change.shape[0], len(part.communities)) 195 | self.assertEqual(change[0] < change[1], True) 196 | # this is one comm per node, so once removed from own 197 | # comm, this delta_weight will be zero 198 | self.assertEqual(change[node] , 0) 199 | 200 | def test_move_node(self): 201 | part = wm.WeightedPartition(self.graph) # one comm per node 202 | #move first node to second community 203 | node = 0 204 | comm = 1 205 | newpart = self.louvain._move_node(part, node, comm) 206 | self.assertEqual(set([0,1]) in newpart.communities, True) 207 | ## what happens if node or comm missing 208 | with self.assertRaises(ValueError): 209 | newpart = self.louvain._move_node(part, -1, comm) 210 | invalid_communities = len(part.communities) + 1 211 | with self.assertRaises(IndexError): 212 | newpart = self.louvain._move_node(part, node, invalid_communities) 213 | 214 | def test_gen_dendogram(self): 215 | graph = nx.Graph() 216 | nodeslist = [0,1,2,3,4] 217 | graph.add_nodes_from(nodeslist, weight=True) 218 | louvain = wm.LouvainCommunityDetection(graph) 219 | self.assertRaises(IOError, louvain._gen_dendogram) 220 | 221 | def test_run(self): 222 | karate = nx.karate_club_graph() 223 | louvain = wm.LouvainCommunityDetection(karate) 224 | final_partitions = louvain.run() 225 | self.assertEqual(final_partitions[-1].modularity() > .38, 226 | True) 227 | self.assertEqual(len(final_partitions), 2) 228 | 229 | 230 | def test_combine(self): 231 | 232 | first = [set([0,1,2]), set([3,4,5]), set([6,7])] 233 | second = [set([0,2]), set([1])] 234 | npt.assert_raises(ValueError, self.louvain._combine, second, first) 235 | res = self.louvain._combine(first, second) 236 | npt.assert_equal(res, [set([0,1,2,6,7]), set([3,4,5])]) 237 | 238 | def test_meta_graph(): 239 | graph, communities = get_test_data() 240 | part = wm.WeightedPartition(graph) 241 | metagraph,_ = wm.meta_graph(part) 242 | ## each node is a comm, so no change to metagraph 243 | npt.assert_equal(metagraph.nodes(), graph.nodes()) 244 | ## two communitties 245 | part = wm.WeightedPartition(graph, communities) 246 | metagraph,mapping = wm.meta_graph(part) 247 | npt.assert_equal(metagraph.nodes(), [0,1]) 248 | npt.assert_equal(metagraph.edges(), [(0,0),(0,1), (1,1)]) 249 | # mapping should map new node 0 to communities[0] 250 | npt.assert_equal(mapping[0], communities[0]) 251 | ## weight should not be lost between graphs 252 | npt.assert_almost_equal(metagraph.size(weight='weight'), 253 | graph.size(weight='weight')) 254 | -------------------------------------------------------------------------------- /brainx/version.py: -------------------------------------------------------------------------------- 1 | """brainx version/release information""" 2 | 3 | # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" 4 | _version_major = 0 5 | _version_minor = 1 6 | _version_micro = '' # use '' for first of series, number for 1 and above 7 | _version_extra = 'dev' 8 | #_version_extra = '' # Uncomment this for full releases 9 | 10 | # Construct full version string from these. 11 | _ver = [_version_major, _version_minor] 12 | if _version_micro: 13 | _ver.append(_version_micro) 14 | if _version_extra: 15 | _ver.append(_version_extra) 16 | 17 | __version__ = '.'.join(map(str, _ver)) 18 | 19 | classifiers = ["Development Status :: 3 - Alpha", 20 | "Environment :: Console", 21 | "Intended Audience :: Science/Research", 22 | "License :: OSI Approved :: BSD License", 23 | "Operating System :: OS Independent", 24 | "Programming Language :: Python", 25 | "Topic :: Scientific/Engineering"] 26 | 27 | description = "Brainx: timeseries analysis for neuroscience data" 28 | 29 | # Note: this long_description is actually a copy/paste from the top-level 30 | # README.txt, so that it shows up nicely on PyPI. So please remember to edit 31 | # it only in one place and sync it correctly. 32 | long_description = """ 33 | ================================================ 34 | Brainx: network analysis for neuroimaging data 35 | ================================================ 36 | 37 | Brainx provides a set of tools, based on the NetworkX graph theory package, for 38 | the analysis of graph properties of neuroimaging data. 39 | 40 | 41 | Installation 42 | ============ 43 | 44 | For a normal installation, simply type:: 45 | 46 | python setup.py install [other options here] 47 | 48 | To install using setuptools support, use:: 49 | 50 | python setup_egg.py install [other options here] 51 | 52 | For example, to install using a development-mode setup in your personal user 53 | directory, use:: 54 | 55 | python setup_egg.py develop --prefix=$HOME/.local 56 | 57 | 58 | License information 59 | =================== 60 | 61 | Brainx is licensed under the terms of the new BSD license. See the file 62 | "LICENSE" for information on the history of this software, terms & conditions 63 | for usage, and a DISCLAIMER OF ALL WARRANTIES. 64 | """ 65 | 66 | # Other constants for distutils setup() call 67 | 68 | name = "brainx" 69 | maintainer = "Nipy Developers" 70 | maintainer_email = "nipy-devel@neuroimaging.scipy.org" 71 | url = "http://nipy.org/brainx" 72 | download_url = "http://github.com/nipy/brainx/downloads" 73 | license = "Simplified BSD" 74 | author = "Brainx developers" 75 | author_email = "nipy-devel@neuroimaging.scipy.org" 76 | platforms = "OS Independent" 77 | version = __version__ 78 | packages = ['brainx', 79 | 'brainx.tests', 80 | ] 81 | package_data = {"brainx": ["LICENSE"]} 82 | -------------------------------------------------------------------------------- /brainx/weighted_modularity.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import copy 4 | import numpy as np 5 | import networkx as nx 6 | from . import util 7 | 8 | 9 | class WeightedPartition(object): 10 | """Represent a weighted Graph Partition 11 | 12 | The main object keeping track of the nodes in each partition is the 13 | communities attribute. 14 | """ 15 | def __init__(self, graph, communities=None): 16 | """ initialize partition of graph, with optional communities 17 | 18 | Parameters 19 | ---------- 20 | graph : networkx graph 21 | communities : list of sets, optional 22 | a list of sets with nodes in each set 23 | if communities is None, will initialize with 24 | one per node 25 | 26 | Returns 27 | ------- 28 | part : WeightedPartition object 29 | """ 30 | # assert graph has edge weights, and no negative weights 31 | mat = nx.adjacency_matrix(graph).todense() 32 | if mat.min() < 0: 33 | raise ValueError('Graph has invalid negative weights') 34 | 35 | self.graph = nx.from_numpy_matrix(mat) 36 | if communities is None: 37 | self._communities = self._init_communities_from_nodes() 38 | else: 39 | self.set_communities(communities) 40 | self.total_edge_weight = graph.size(weight='weight') 41 | self.degrees = graph.degree(weight='weight') 42 | 43 | @property 44 | def communities(self): 45 | """list of sets decribing the communities""" 46 | return self._communities 47 | 48 | @communities.setter 49 | def communities(self, value): 50 | self.set_communities(value) 51 | 52 | def _init_communities_from_nodes(self): 53 | """ creates a new communities with one node per community 54 | eg nodes = [0,1,2] -> communities = [set([0]), set([1]), set([2])] 55 | """ 56 | return [set([node]) for node in self.graph.nodes()] 57 | 58 | 59 | def communities_degree(self): 60 | """ calculates the joint degree of a community""" 61 | communities_degrees = [] 62 | for com in self.communities: 63 | tmp = np.sum([self.graph.degree(weight='weight')[x] for x in com]) 64 | communities_degrees.append(tmp) 65 | return communities_degrees 66 | 67 | def get_node_community(self, node): 68 | """returns the node's community""" 69 | try: 70 | return [val for val,x in enumerate(self.communities) if node in x][0] 71 | except IndexError: 72 | if not node in self.graph.nodes(): 73 | raise ValueError('node:{0} is not in the graph'.format(node)) 74 | else: 75 | raise StandardError('cannot find community for node '\ 76 | '{0}'.format(node)) 77 | 78 | def set_communities(self, communities): 79 | """ set the partition communities to the input communities""" 80 | if self._allnodes_in_communities(communities): 81 | self._communities = communities 82 | else: 83 | raise ValueError('missing nodes {0}'.format(communities)) 84 | 85 | 86 | def _allnodes_in_communities(self, communities): 87 | """ checks all nodes are represented in communities, also catches 88 | duplicate nodes""" 89 | if not (isinstance(communities, list) and \ 90 | util._contains_only(communities, set)): 91 | raise TypeError('communities should be list of sets, not '\ 92 | '{}'.format(communities)) 93 | ## simple count to check for all nodes 94 | return len(self.graph.nodes()) == \ 95 | len([item for com in communities for item in com]) 96 | 97 | def node_degree(self, node): 98 | """ find the summed weight of all node edges 99 | """ 100 | return self.graph.degree(weight='weight')[node] 101 | 102 | 103 | def node_degree_by_community(self, node): 104 | """ Find the number of links from a node to each community 105 | Returns 106 | ------- 107 | comm_weights : list 108 | list holding the weighted degree of a node to each community 109 | """ 110 | comm_weights = [0] * len(self.communities) 111 | for neighbor, data in self.graph[node].items(): 112 | if neighbor == node: 113 | continue 114 | tmpcomm = self.get_node_community(neighbor) 115 | comm_weights[tmpcomm] += data.get('weight', 1) 116 | return comm_weights 117 | 118 | 119 | def degree_by_community(self): 120 | """ sum of all edges within or between communities 121 | for each community 122 | Returns 123 | ------- 124 | weights : list 125 | list is size of total number of communities""" 126 | comm = self.communities 127 | weights = [0] * len(comm) 128 | all_degree_weights = self.graph.degree(weight='weight') 129 | for node, weight in all_degree_weights.items(): 130 | node_comm = self.get_node_community(node) 131 | weights[node_comm] += weight 132 | return weights 133 | 134 | def degree_within_community(self): 135 | """ sum of weighted edges strictly inside each community 136 | including self loops""" 137 | comm = self.communities 138 | weights = [0] * len(comm) 139 | comm = self.communities 140 | for val, nodeset in enumerate(comm): 141 | for node in nodeset: 142 | nodes_within = set([x for x in self.graph[node].keys() \ 143 | if x in nodeset]) 144 | if len(nodes_within) < 1: 145 | continue 146 | if node in nodes_within: 147 | weights[val] += self.graph[node][node]['weight'] 148 | nodes_within.remove(node) 149 | weights[val] += np.sum(self.graph[node][x]['weight']/ 2. \ 150 | for x in nodes_within) 151 | return weights 152 | 153 | 154 | def modularity(self): 155 | """Calculates the proportion of within community edges compared to 156 | between community edges for all nodes in graph with given partition 157 | 158 | Parameters 159 | ---------- 160 | partition : weighted graph partition object 161 | 162 | Returns 163 | ------- 164 | modularity : float 165 | value reflecting the relation of within community connection 166 | to across community connections 167 | 168 | 169 | References 170 | ---------- 171 | .. [1] M. Newman, "Fast algorithm for detecting community structure 172 | in networks", Physical Review E vol. 69(6), 2004. 173 | 174 | """ 175 | if self.graph.is_directed(): 176 | raise TypeError('only valid on non directed graphs') 177 | 178 | m2 = self.total_edge_weight 179 | internal_connect = np.array(self.degree_within_community()) 180 | total = np.array(self.degree_by_community()) 181 | return np.sum(internal_connect/m2 - (total/(2*m2))**2) 182 | 183 | 184 | 185 | class LouvainCommunityDetection(object): 186 | """ Uses the Louvain Community Detection algorithm to detect 187 | communities in networks 188 | 189 | Parameters 190 | ---------- 191 | graph : netwrokx Graph object 192 | communities : list of sets, optional 193 | initial identified communties 194 | minthr : float, optional 195 | minimum threshold value for change in modularity 196 | default(0.0000001) 197 | 198 | Methods 199 | ------- 200 | run() 201 | run the algorithm to find partitions at multiple levels 202 | 203 | Examples 204 | -------- 205 | >>> louvain = LouvainCommunityDetection(graph) 206 | >>> partitions = louvain.run() 207 | >>> ## best partition 208 | >>> partitions[-1].modularity() 209 | 210 | References 211 | ---------- 212 | .. [1] VD Blondel, JL Guillaume, R Lambiotte, E Lefebvre, "Fast 213 | unfolding of communities in large networks", Journal of Statistical 214 | Mechanics: Theory and Experiment vol.10, P10008 2008. 215 | 216 | """ 217 | 218 | def __init__(self, graph, communities=None, minthr=0.0000001): 219 | """initialize the algorithm with a graph and (optional) initial 220 | community partition , use minthr to provide a stopping limit 221 | for the algorith (based on change in modularity)""" 222 | self.graph = graph 223 | self.initial_communities = communities 224 | self.minthr = minthr 225 | 226 | def run(self): 227 | """ run the algorithm to find partitions in graph 228 | 229 | Returns 230 | ------- 231 | partitions : list 232 | a list containing instances of a WeightedPartition with the 233 | community partition reflecting that level of the algorithm 234 | The last item in the list is the final partition 235 | The first item was the initial partition 236 | """ 237 | dendogram = self._gen_dendogram() 238 | partitions = self._partitions_from_dendogram(dendogram) 239 | return [WeightedPartition(self.graph, part) for part in partitions] 240 | 241 | 242 | def _gen_dendogram(self): 243 | """generate dendogram based on muti-levels of partitioning 244 | """ 245 | if type(self.graph) != nx.Graph : 246 | raise TypeError("Bad graph type, use only non directed graph") 247 | 248 | #special case, when there is no link 249 | #the best partition is everyone in its communities 250 | if self.graph.number_of_edges() == 0 : 251 | raise IOError('graph has no edges why do you want to partition?') 252 | 253 | current_graph = self.graph.copy() 254 | part = WeightedPartition(self.graph, self.initial_communities) 255 | # first pass 256 | mod = part.modularity() 257 | dendogram = list() 258 | new_part = self._one_level(part, self.minthr) 259 | new_mod = new_part.modularity() 260 | 261 | dendogram.append(new_part) 262 | mod = new_mod 263 | current_graph, _ = meta_graph(new_part) 264 | 265 | while True : 266 | partition = WeightedPartition(current_graph) 267 | newpart = self._one_level(partition, self.minthr) 268 | new_mod = newpart.modularity() 269 | if new_mod - mod < self.minthr : 270 | break 271 | 272 | dendogram.append(newpart) 273 | mod = new_mod 274 | current_graph,_ = meta_graph(newpart) 275 | return dendogram 276 | 277 | def _one_level(self, part, min_modularity= .0000001): 278 | """run one level of patitioning""" 279 | curr_mod = part.modularity() 280 | modified = True 281 | while modified: 282 | modified = False 283 | all_nodes = [x for x in part.graph.nodes()] 284 | np.random.shuffle(all_nodes) 285 | for node in all_nodes: 286 | node_comm = part.get_node_community(node) 287 | delta_mod = self._calc_delta_modularity(node, part) 288 | #print node, delta_mod 289 | if delta_mod.max() <= 0.0: 290 | # no increase by moving this node 291 | continue 292 | best_comm = delta_mod.argmax() 293 | if not best_comm == node_comm: 294 | new_part = self._move_node(part, node, best_comm) 295 | part = new_part 296 | modified = True 297 | new_mod = part.modularity() 298 | change_in_modularity = new_mod - curr_mod 299 | if change_in_modularity < min_modularity: 300 | return part 301 | return part 302 | 303 | def _calc_delta_modularity(self, node, part): 304 | """calculate the increase(s) in modularity if node is moved to other 305 | communities 306 | deltamod = inC - totc * ki / total_weight""" 307 | noded = part.node_degree(node) 308 | dnc = part.node_degree_by_community(node) 309 | totc = self._communities_nodes_alledgesw(part, node) 310 | total_weight = part.total_edge_weight 311 | # cast to arrays to improve calc 312 | dnc = np.array(dnc) 313 | totc = np.array(totc) 314 | return dnc - totc*noded / (total_weight*2) 315 | 316 | @staticmethod 317 | def _communities_without_node(part, node): 318 | """ returns a version of the partition with the node 319 | removed, may result in empty communities""" 320 | node_comm = part.get_node_community(node) 321 | newpart = copy.deepcopy(part.communities) 322 | newpart[node_comm].remove(node) 323 | return newpart 324 | 325 | def _communities_nodes_alledgesw(self, part, removed_node): 326 | """ returns the sum of all weighted edges to nodes in each 327 | community, once the removed_node is removed 328 | this refers to totc in Blondel paper""" 329 | comm_wo_node = self._communities_without_node(part, removed_node) 330 | weights = [0] * len(comm_wo_node) 331 | ## make a list of all nodes degree weights 332 | all_degree_weights = list(part.graph.degree(weight='weight').values()) 333 | all_degree_weights = np.array(all_degree_weights) 334 | for val, nodeset in enumerate(comm_wo_node): 335 | node_index = np.array(list(nodeset)) #index of nodes in community 336 | #sum the weighted degree of nodes in community 337 | if len(node_index)<1: 338 | continue 339 | weights[val] = np.sum(all_degree_weights[node_index]) 340 | return weights 341 | 342 | @staticmethod 343 | def _move_node(part, node, new_comm): 344 | """generate a new partition with node put into new community 345 | designated by index (new_comm) into existing part.communities""" 346 | ## copy 347 | new_community = [x.copy() for x in part.communities] 348 | ## update 349 | curr_node_comm = part.get_node_community(node) 350 | ## remove 351 | new_community[curr_node_comm].remove(node) 352 | new_community[new_comm].add(node) 353 | # Remove any empty sets from ne 354 | new_community = [x for x in new_community if len(x) > 0] 355 | return WeightedPartition(part.graph, new_community) 356 | 357 | def _partitions_from_dendogram(self, dendo): 358 | """ returns community partitions based on results in dendogram 359 | """ 360 | all_partitions = [] 361 | init_part = dendo[0].communities 362 | all_partitions.append(init_part) 363 | for partition in dendo[1:]: 364 | init_part = self._combine(init_part, partition.communities) 365 | all_partitions.append(init_part) 366 | return all_partitions 367 | 368 | @staticmethod 369 | def _combine(prev, next): 370 | """combines nodes in sets (prev) based on mapping defined by 371 | (next) (which now treats a previous communitity as a node) 372 | but maintains specification of all original nodes 373 | 374 | Parameters 375 | ---------- 376 | prev : list of sets 377 | communities partition 378 | next : list of sets 379 | next level communities partition 380 | 381 | Examples 382 | -------- 383 | >>> prev = [set([0,1,2]), set([3,4]), set([5,6])] 384 | >>> next = [set([0,1]), set([2])] 385 | >>> result = _combine(prev, next) 386 | [set([0, 1, 2, 3, 4]), set([5,6])] 387 | """ 388 | expected_len = np.max([x for sublist in next for x in sublist]) 389 | if not len(prev) == expected_len + 1: 390 | raise ValueError('Number of nodes in next does not'\ 391 | ' match number of communities in prev') 392 | ret = [] 393 | for itemset in next: 394 | newset = set() 395 | for tmps in itemset: 396 | newset.update(prev[tmps]) 397 | ret.append(newset) 398 | return ret 399 | 400 | 401 | def meta_graph(partition): 402 | """creates a new graph object based on input graph and partition 403 | 404 | Takes WeightedPartition object with specified communities and 405 | creates a new graph object where 406 | 1. communities are now the nodes in the new graph 407 | 2. the new edges are created based on the node to node connections (weights) 408 | from communities in the original graph, and weighted accordingly, 409 | (this includes self-loops) 410 | 411 | Returns 412 | ------- 413 | metagraph : networkX graph 414 | mapping : dict 415 | dict showing the mapping from newnode -> original community nodes 416 | """ 417 | metagraph = nx.Graph() 418 | # new nodes are communities 419 | newnodes = [val for val,_ in enumerate(partition.communities)] 420 | mapping = {val:nodes for val,nodes in enumerate(partition.communities)} 421 | metagraph.add_nodes_from(newnodes, weight=0.0) 422 | 423 | for node1, node2, data in partition.graph.edges_iter(data=True): 424 | node1_community = partition.get_node_community(node1) 425 | node2_community = partition.get_node_community(node2) 426 | try: 427 | tmpw = metagraph[node1_community][node2_community]['weight'] 428 | except KeyError: 429 | tmpw = 0 430 | metagraph.add_edge( 431 | node1_community, 432 | node2_community, 433 | weight = tmpw + data['weight']) 434 | 435 | return metagraph, mapping 436 | 437 | 438 | 439 | 440 | 441 | 442 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | 9 | # Internal variables. 10 | PAPEROPT_a4 = -D latex_paper_size=a4 11 | PAPEROPT_letter = -D latex_paper_size=letter 12 | ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 13 | 14 | .PHONY: help clean html dirhtml api pickle json htmlhelp qthelp latex changes linkcheck doctest 15 | 16 | help: 17 | @echo "Please use \`make ' where is one of" 18 | @echo " html to make standalone HTML files" 19 | @echo " dirhtml to make HTML files named index.html in directories" 20 | @echo " pickle to make pickle files" 21 | @echo " json to make JSON files" 22 | @echo " htmlhelp to make HTML files and a HTML help project" 23 | @echo " qthelp to make HTML files and a qthelp project" 24 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 25 | @echo " changes to make an overview of all changed/added/deprecated items" 26 | @echo " linkcheck to check all external links for integrity" 27 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 28 | 29 | clean: 30 | -rm -rf _build/* 31 | 32 | html: api 33 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html 34 | @echo 35 | @echo "Build finished. The HTML pages are in _build/html." 36 | 37 | dirhtml: 38 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml 39 | @echo 40 | @echo "Build finished. The HTML pages are in _build/dirhtml." 41 | 42 | api: 43 | python tools/build_modref_templates.py 44 | @echo "Build API docs finished." 45 | 46 | 47 | pickle: 48 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle 49 | @echo 50 | @echo "Build finished; now you can process the pickle files." 51 | 52 | json: 53 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json 54 | @echo 55 | @echo "Build finished; now you can process the JSON files." 56 | 57 | htmlhelp: 58 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp 59 | @echo 60 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 61 | ".hhp project file in _build/htmlhelp." 62 | 63 | qthelp: 64 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp 65 | @echo 66 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 67 | ".qhcp project file in _build/qthelp, like this:" 68 | @echo "# qcollectiongenerator _build/qthelp/Brainx.qhcp" 69 | @echo "To view the help file:" 70 | @echo "# assistant -collectionFile _build/qthelp/Brainx.qhc" 71 | 72 | latex: 73 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex 74 | @echo 75 | @echo "Build finished; the LaTeX files are in _build/latex." 76 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 77 | "run these through (pdf)latex." 78 | 79 | changes: 80 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes 81 | @echo 82 | @echo "The overview file is in _build/changes." 83 | 84 | linkcheck: 85 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck 86 | @echo 87 | @echo "Link check complete; look for any errors in the above output " \ 88 | "or in _build/linkcheck/output.txt." 89 | 90 | doctest: 91 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest 92 | @echo "Testing of doctests in the sources finished, look at the " \ 93 | "results in _build/doctest/output.txt." 94 | -------------------------------------------------------------------------------- /doc/api/index.rst: -------------------------------------------------------------------------------- 1 | .. _api-index: 2 | 3 | ##### 4 | API 5 | ##### 6 | 7 | :Release: |version| 8 | :Date: |today| 9 | 10 | .. include:: generated/gen.rst 11 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Brainx documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Sep 2 14:05:06 2009. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | #sys.path.append(os.path.abspath('.')) 20 | 21 | sys.path.append(os.path.abspath('sphinxext')) 22 | 23 | # -- General configuration ----------------------------------------------------- 24 | 25 | # Add any Sphinx extension module names here, as strings. They can be extensions 26 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 27 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 28 | 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 29 | 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 30 | 'sphinx.ext.ifconfig', 31 | 32 | 'numpydoc', 33 | 'inheritance_diagram', 34 | 'ipython_console_highlighting', 35 | 'only_directives', 36 | ] 37 | 38 | # Add any paths that contain templates here, relative to this directory. 39 | templates_path = ['_templates'] 40 | 41 | # The suffix of source filenames. 42 | source_suffix = '.rst' 43 | 44 | # The encoding of source files. 45 | #source_encoding = 'utf-8' 46 | 47 | # The master toctree document. 48 | master_doc = 'index' 49 | 50 | # General information about the project. 51 | project = u'Brainx' 52 | copyright = u'2009, Caterina Gratton, Emi Nomura, Fernando Perez' 53 | 54 | # The version info for the project you're documenting, acts as replacement for 55 | # |version| and |release|, also used in various other places throughout the 56 | # built documents. 57 | # 58 | # The short X.Y version. 59 | version = '0.1' 60 | # The full version, including alpha/beta/rc tags. 61 | release = '0.1' 62 | 63 | # The language for content autogenerated by Sphinx. Refer to documentation 64 | # for a list of supported languages. 65 | #language = None 66 | 67 | # There are two options for replacing |today|: either, you set today to some 68 | # non-false value, then it is used: 69 | #today = '' 70 | # Else, today_fmt is used as the format for a strftime call. 71 | #today_fmt = '%B %d, %Y' 72 | 73 | # List of documents that shouldn't be included in the build. 74 | #unused_docs = [] 75 | 76 | # List of directories, relative to source directory, that shouldn't be searched 77 | # for source files. 78 | exclude_trees = ['_build'] 79 | 80 | # The reST default role (used for this markup: `text`) to use for all documents. 81 | #default_role = None 82 | 83 | # If true, '()' will be appended to :func: etc. cross-reference text. 84 | #add_function_parentheses = True 85 | 86 | # If true, the current module name will be prepended to all description 87 | # unit titles (such as .. function::). 88 | #add_module_names = True 89 | 90 | # If true, sectionauthor and moduleauthor directives will be shown in the 91 | # output. They are ignored by default. 92 | #show_authors = False 93 | 94 | # The name of the Pygments (syntax highlighting) style to use. 95 | pygments_style = 'sphinx' 96 | 97 | # A list of ignored prefixes for module index sorting. 98 | #modindex_common_prefix = [] 99 | 100 | 101 | # -- Options for HTML output --------------------------------------------------- 102 | 103 | # The theme to use for HTML and HTML Help pages. Major themes that come with 104 | # Sphinx are currently 'default' and 'sphinxdoc'. 105 | html_theme = 'sphinxdoc' 106 | #html_theme = 'default' 107 | 108 | # Theme options are theme-specific and customize the look and feel of a theme 109 | # further. For a list of options available for each theme, see the 110 | # documentation. 111 | #html_theme_options = {} 112 | 113 | # Add any paths that contain custom themes here, relative to this directory. 114 | #html_theme_path = [] 115 | 116 | # The name for this set of Sphinx documents. If None, it defaults to 117 | # " v documentation". 118 | #html_title = None 119 | 120 | # A shorter title for the navigation bar. Default is the same as html_title. 121 | #html_short_title = None 122 | 123 | # The name of an image file (relative to this directory) to place at the top 124 | # of the sidebar. 125 | #html_logo = None 126 | 127 | # The name of an image file (within the static path) to use as favicon of the 128 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 129 | # pixels large. 130 | #html_favicon = None 131 | 132 | # Add any paths that contain custom static files (such as style sheets) here, 133 | # relative to this directory. They are copied after the builtin static files, 134 | # so a file named "default.css" will overwrite the builtin "default.css". 135 | html_static_path = ['_static'] 136 | 137 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 138 | # using the given strftime format. 139 | #html_last_updated_fmt = '%b %d, %Y' 140 | 141 | # If true, SmartyPants will be used to convert quotes and dashes to 142 | # typographically correct entities. 143 | #html_use_smartypants = True 144 | 145 | # Custom sidebar templates, maps document names to template names. 146 | #html_sidebars = {} 147 | 148 | # Additional templates that should be rendered to pages, maps page names to 149 | # template names. 150 | #html_additional_pages = {} 151 | 152 | # If false, no module index is generated. 153 | #html_use_modindex = True 154 | 155 | # If false, no index is generated. 156 | #html_use_index = True 157 | 158 | # If true, the index is split into individual pages for each letter. 159 | #html_split_index = False 160 | 161 | # If true, links to the reST sources are added to the pages. 162 | #html_show_sourcelink = True 163 | 164 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 165 | #html_show_sphinx = True 166 | 167 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 168 | #html_show_copyright = True 169 | 170 | # If true, an OpenSearch description file will be output, and all pages will 171 | # contain a tag referring to it. The value of this option must be the 172 | # base URL from which the finished HTML is served. 173 | #html_use_opensearch = '' 174 | 175 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 176 | #html_file_suffix = '' 177 | 178 | # Output file base name for HTML help builder. 179 | htmlhelp_basename = 'Brainxdoc' 180 | 181 | 182 | # -- Options for LaTeX output -------------------------------------------------- 183 | 184 | # The paper size ('letter' or 'a4'). 185 | #latex_paper_size = 'letter' 186 | 187 | # The font size ('10pt', '11pt' or '12pt'). 188 | #latex_font_size = '10pt' 189 | 190 | # Grouping the document tree into LaTeX files. List of tuples 191 | # (source start file, target name, title, author, documentclass [howto/manual]). 192 | latex_documents = [ 193 | ('index', 'Brainx.tex', u'Brainx Documentation', 194 | u'Caterina Gratton, Emi Nomura, Fernando Perez', 'manual'), 195 | ] 196 | 197 | # The name of an image file (relative to this directory) to place at the top of 198 | # the title page. 199 | #latex_logo = None 200 | 201 | # For "manual" documents, if this is true, then toplevel headings are parts, 202 | # not chapters. 203 | #latex_use_parts = False 204 | 205 | # Additional stuff for the LaTeX preamble. 206 | #latex_preamble = '' 207 | 208 | # Documents to append as an appendix to all manuals. 209 | #latex_appendices = [] 210 | 211 | # If false, no module index is generated. 212 | #latex_use_modindex = True 213 | 214 | 215 | # Example configuration for intersphinx: refer to the Python standard library. 216 | intersphinx_mapping = {'http://docs.python.org/': None} 217 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. Brainx documentation master file, created by 2 | sphinx-quickstart on Wed Sep 2 14:05:06 2009. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Brainx's documentation! 7 | ================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | research_notes/index 15 | 16 | api/index.rst 17 | 18 | 19 | Indices and tables 20 | ================== 21 | 22 | * :ref:`genindex` 23 | * :ref:`modindex` 24 | * :ref:`search` 25 | 26 | -------------------------------------------------------------------------------- /doc/research_notes/index.rst: -------------------------------------------------------------------------------- 1 | .. _research_notes: 2 | 3 | .. This is the source doc for the nipy users guide. The users guide 4 | includes the FAQ (a directory below), and glossary. 5 | 6 | ================ 7 | Research Notes 8 | ================ 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | 13 | preprocessing 14 | 15 | -------------------------------------------------------------------------------- /doc/research_notes/preprocessing.rst: -------------------------------------------------------------------------------- 1 | ========================================== 2 | Pre-processing steps in Rest.TMS dataset 3 | ========================================== 4 | 5 | The data lives in:: 6 | 7 | /r/d2/despo/enomura/data/Rest.Lesion/Data/NNN 8 | 9 | where NNN runs 101..122. Each directory has a subdir like 20090402/ and one 10 | called Total/ that contains the processed data in a NIfTI/ subdir. 11 | 12 | The TMS data is similarly organized, but in:: 13 | 14 | /home/despo/cgratton/data/Rest.TMS/Data 15 | 16 | The following code (in red) was used to preprocess the Rest.TMS data. Most of 17 | this was executed with Alloy, which is a MATLAB script that sends out AFNI 18 | commands to the shell. The only SPM step is normalizing. AFNI has an 19 | auto-normalize command, but it writes out files in talairach space, making it 20 | difficult to use the AAL atlas. 21 | 22 | 1. DICOM conversion -- Anatomical scans:: 23 | 24 | to3d -anat -prefix 101-T1.nii *.dcm 25 | 3drefit -deoblique -xorigin cen -yorigin cen -zorigin cen 101-T1.nii 26 | 3dresample -orient RPI -prefix 101-T1.nii -inset 101-T1.nii 27 | 28 | 2. DICOM conversion -- Functional scans:: 29 | 30 | to3d -epan -skip_outliers -assume_dicom_mosaic -time:zt 24 435 2 alt+z2 -prefix 101-EPI-001.nii *.dcm 31 | 3drefit -deoblique -xorigin cen -yorigin cen -zorigin cen 101-EPI-001.nii 32 | 3dresample -orient RPI -prefix 101-EPI-001.nii -inset 101-EPI-001.nii 33 | 34 | 3. Generate Mean image:: 35 | 36 | 3dTstat -prefix 101-Mean.nii 101-EPI-001.nii 37 | 38 | 4. Generate Mask image:: 39 | 40 | 3dAutomask -prefix 101-Mask.nii 101-Mean.nii 41 | 42 | 5. Volume registration:: 43 | 44 | 3dvolreg -twodup -verbose -tshift 0 -base 101-Mean.nii -maxdisp1D 101-MD1D.txt -1Dfile 101-EPI-001-1D.txt -prefix 101-EPI-001-CoReg.nii 101-EPI-001.nii 45 | 46 | [Note: all blocks are registered to the Mean image, which is the first block. 47 | This block is also used to coregister the anatomical image] 48 | 49 | 6. Smoothing:: 50 | 51 | 3dmerge -doall -1blur_fwhm 5 -prefix 101-EPI-001-CoReg-Smooth.nii 101-EPI-001-CoReg.nii 52 | 53 | 7. Anatomical alignment:: 54 | 55 | 3dZcutup -keep 80 240 -prefix 101-T1-Cut.nii 101-T1.nii 56 | 3drefit -deoblique -xorigin cen -yorigin cen -zorigin cen 101-T1-Cut.nii 57 | 3dresample -orient RPI -prefix 101-T1-Cut.nii -inset 101-T1-Cut.nii 58 | 59 | lpc_align.py -epi 101-Mean.nii -anat 101-T1-Cut.nii -strip_anat_skull no -suffix -CoReg 60 | 61 | [Note: the zcutup command trims the image so that it only contains the brain. 62 | The following commands then recenter the brain and make sure it is oriented 63 | properly] 64 | 65 | 8. Normalize 66 | 67 | Create a centered SPM T1.nii template image that is centered (just done once):: 68 | 69 | 3drefit -xorigin cen -yorigin cen -zorigin cen T1.nii 70 | 71 | MATLAB/SPM method: 72 | Click on 'Normalize' 73 | Use the *sn.mat file generated previously to write out the T1 file 74 | Uses the 'T1_center' template, which puts the brain in MNI space 75 | Write out a voxel size of 1x1x1 76 | 77 | AFNI method: 78 | @auto_tlrc 79 | [fill more in later...for now we are not using this method] 80 | 81 | 9. Atlas templates 82 | 83 | The AAL template: 84 | original file is not centered, is oriented LPI and is an analyze file 85 | 86 | Commands:: 87 | 88 | 3dcopy aal.hdr aal.nii 89 | 3drefit -xorigin cen -yorigin cen -zorigin cen aal.nii 90 | 3drefit -orient RPI aal.nii 91 | 92 | Resample aal to match the T1 centered template (just done once) (it should be 93 | in cgratton/data/Rest.TMS/Data/Masks/norm_mni_aal). Then:: 94 | 95 | 3dresample -master T1_center.nii -prefix aal_r.nii -inset aal.nii 96 | 97 | 10. reverse normalize using the sn.mat file created in step 8 (this is the 98 | script Emi wrote, which we can use 99 | 100 | For the AAL template, use the aal_r.nii ROI and the appropriate version of the 101 | script (reversenorm_tmsrest_spm5_aal) 102 | 103 | For the Dosenbach ROIs, change the directory to 'norm_mni_spm5' (check this) 104 | 105 | Steps in the reverse normalize script: 106 | 107 | i. load the EPI file in native space 108 | ii. find where it is non-zero 109 | iii. find the size of the normal space ROI 110 | iv. get the coordinates of the ROI in mm in native space 111 | v. find the coordinates of these in voxel coordinates in native space 112 | vi. 113 | 114 | For the lesion patients: 115 | 8a. Segment to create the sn.mat file 116 | 117 | Move the coordinates within the sn.mat file to match the centered template 118 | -------------------------------------------------------------------------------- /doc/sphinxext/README.txt: -------------------------------------------------------------------------------- 1 | =================== 2 | Sphinx Extensions 3 | =================== 4 | 5 | We've copied these sphinx extensions over from nipy-core. Any edits 6 | should be done upstream in nipy-core, not here in nitime! 7 | 8 | These are a few sphinx extensions we are using to build the nipy 9 | documentation. In this file we list where they each come from, since we intend 10 | to always push back upstream any modifications or improvements we make to them. 11 | 12 | It's worth noting that some of these are being carried (as copies) by more 13 | than one project. Hopefully once they mature a little more, they will be 14 | incorproated back into sphinx itself, so that all projects can use a common 15 | base. 16 | 17 | * From numpy: 18 | * docscrape.py 19 | * docscrape_sphinx.py 20 | * numpydoc.py 21 | 22 | * From matplotlib: 23 | * inheritance_diagram.py 24 | * ipython_console_highlighting.py 25 | * only_directives.py 26 | -------------------------------------------------------------------------------- /doc/sphinxext/docscrape.py: -------------------------------------------------------------------------------- 1 | """Extract reference documentation from the NumPy source tree. 2 | 3 | """ 4 | 5 | from __future__ import print_statement 6 | 7 | import inspect 8 | import textwrap 9 | import re 10 | import pydoc 11 | from StringIO import StringIO 12 | from warnings import warn 13 | 14 | class Reader(object): 15 | """A line-based string reader. 16 | 17 | """ 18 | def __init__(self, data): 19 | """ 20 | Parameters 21 | ---------- 22 | data : str 23 | String with lines separated by '\n'. 24 | 25 | """ 26 | if isinstance(data,list): 27 | self._str = data 28 | else: 29 | self._str = data.split('\n') # store string as list of lines 30 | 31 | self.reset() 32 | 33 | def __getitem__(self, n): 34 | return self._str[n] 35 | 36 | def reset(self): 37 | self._l = 0 # current line nr 38 | 39 | def read(self): 40 | if not self.eof(): 41 | out = self[self._l] 42 | self._l += 1 43 | return out 44 | else: 45 | return '' 46 | 47 | def seek_next_non_empty_line(self): 48 | for l in self[self._l:]: 49 | if l.strip(): 50 | break 51 | else: 52 | self._l += 1 53 | 54 | def eof(self): 55 | return self._l >= len(self._str) 56 | 57 | def read_to_condition(self, condition_func): 58 | start = self._l 59 | for line in self[start:]: 60 | if condition_func(line): 61 | return self[start:self._l] 62 | self._l += 1 63 | if self.eof(): 64 | return self[start:self._l+1] 65 | return [] 66 | 67 | def read_to_next_empty_line(self): 68 | self.seek_next_non_empty_line() 69 | def is_empty(line): 70 | return not line.strip() 71 | return self.read_to_condition(is_empty) 72 | 73 | def read_to_next_unindented_line(self): 74 | def is_unindented(line): 75 | return (line.strip() and (len(line.lstrip()) == len(line))) 76 | return self.read_to_condition(is_unindented) 77 | 78 | def peek(self,n=0): 79 | if self._l + n < len(self._str): 80 | return self[self._l + n] 81 | else: 82 | return '' 83 | 84 | def is_empty(self): 85 | return not ''.join(self._str).strip() 86 | 87 | 88 | class NumpyDocString(object): 89 | def __init__(self,docstring): 90 | docstring = textwrap.dedent(docstring).split('\n') 91 | 92 | self._doc = Reader(docstring) 93 | self._parsed_data = { 94 | 'Signature': '', 95 | 'Summary': [''], 96 | 'Extended Summary': [], 97 | 'Parameters': [], 98 | 'Returns': [], 99 | 'Raises': [], 100 | 'Warns': [], 101 | 'Other Parameters': [], 102 | 'Attributes': [], 103 | 'Methods': [], 104 | 'See Also': [], 105 | 'Notes': [], 106 | 'Warnings': [], 107 | 'References': '', 108 | 'Examples': '', 109 | 'index': {} 110 | } 111 | 112 | self._parse() 113 | 114 | def __getitem__(self,key): 115 | return self._parsed_data[key] 116 | 117 | def __setitem__(self,key,val): 118 | if not self._parsed_data.has_key(key): 119 | warn("Unknown section %s" % key) 120 | else: 121 | self._parsed_data[key] = val 122 | 123 | def _is_at_section(self): 124 | self._doc.seek_next_non_empty_line() 125 | 126 | if self._doc.eof(): 127 | return False 128 | 129 | l1 = self._doc.peek().strip() # e.g. Parameters 130 | 131 | if l1.startswith('.. index::'): 132 | return True 133 | 134 | l2 = self._doc.peek(1).strip() # ---------- or ========== 135 | return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) 136 | 137 | def _strip(self,doc): 138 | i = 0 139 | j = 0 140 | for i,line in enumerate(doc): 141 | if line.strip(): break 142 | 143 | for j,line in enumerate(doc[::-1]): 144 | if line.strip(): break 145 | 146 | return doc[i:len(doc)-j] 147 | 148 | def _read_to_next_section(self): 149 | section = self._doc.read_to_next_empty_line() 150 | 151 | while not self._is_at_section() and not self._doc.eof(): 152 | if not self._doc.peek(-1).strip(): # previous line was empty 153 | section += [''] 154 | 155 | section += self._doc.read_to_next_empty_line() 156 | 157 | return section 158 | 159 | def _read_sections(self): 160 | while not self._doc.eof(): 161 | data = self._read_to_next_section() 162 | name = data[0].strip() 163 | 164 | if name.startswith('..'): # index section 165 | yield name, data[1:] 166 | elif len(data) < 2: 167 | yield StopIteration 168 | else: 169 | yield name, self._strip(data[2:]) 170 | 171 | def _parse_param_list(self,content): 172 | r = Reader(content) 173 | params = [] 174 | while not r.eof(): 175 | header = r.read().strip() 176 | if ' : ' in header: 177 | arg_name, arg_type = header.split(' : ')[:2] 178 | else: 179 | arg_name, arg_type = header, '' 180 | 181 | desc = r.read_to_next_unindented_line() 182 | desc = dedent_lines(desc) 183 | 184 | params.append((arg_name,arg_type,desc)) 185 | 186 | return params 187 | 188 | 189 | _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" 190 | r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) 191 | def _parse_see_also(self, content): 192 | """ 193 | func_name : Descriptive text 194 | continued text 195 | another_func_name : Descriptive text 196 | func_name1, func_name2, :meth:`func_name`, func_name3 197 | 198 | """ 199 | items = [] 200 | 201 | def parse_item_name(text): 202 | """Match ':role:`name`' or 'name'""" 203 | m = self._name_rgx.match(text) 204 | if m: 205 | g = m.groups() 206 | if g[1] is None: 207 | return g[3], None 208 | else: 209 | return g[2], g[1] 210 | raise ValueError("%s is not a item name" % text) 211 | 212 | def push_item(name, rest): 213 | if not name: 214 | return 215 | name, role = parse_item_name(name) 216 | items.append((name, list(rest), role)) 217 | del rest[:] 218 | 219 | current_func = None 220 | rest = [] 221 | 222 | for line in content: 223 | if not line.strip(): continue 224 | 225 | m = self._name_rgx.match(line) 226 | if m and line[m.end():].strip().startswith(':'): 227 | push_item(current_func, rest) 228 | current_func, line = line[:m.end()], line[m.end():] 229 | rest = [line.split(':', 1)[1].strip()] 230 | if not rest[0]: 231 | rest = [] 232 | elif not line.startswith(' '): 233 | push_item(current_func, rest) 234 | current_func = None 235 | if ',' in line: 236 | for func in line.split(','): 237 | push_item(func, []) 238 | elif line.strip(): 239 | current_func = line 240 | elif current_func is not None: 241 | rest.append(line.strip()) 242 | push_item(current_func, rest) 243 | return items 244 | 245 | def _parse_index(self, section, content): 246 | """ 247 | .. index: default 248 | :refguide: something, else, and more 249 | 250 | """ 251 | def strip_each_in(lst): 252 | return [s.strip() for s in lst] 253 | 254 | out = {} 255 | section = section.split('::') 256 | if len(section) > 1: 257 | out['default'] = strip_each_in(section[1].split(','))[0] 258 | for line in content: 259 | line = line.split(':') 260 | if len(line) > 2: 261 | out[line[1]] = strip_each_in(line[2].split(',')) 262 | return out 263 | 264 | def _parse_summary(self): 265 | """Grab signature (if given) and summary""" 266 | if self._is_at_section(): 267 | return 268 | 269 | summary = self._doc.read_to_next_empty_line() 270 | summary_str = " ".join([s.strip() for s in summary]).strip() 271 | if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): 272 | self['Signature'] = summary_str 273 | if not self._is_at_section(): 274 | self['Summary'] = self._doc.read_to_next_empty_line() 275 | else: 276 | self['Summary'] = summary 277 | 278 | if not self._is_at_section(): 279 | self['Extended Summary'] = self._read_to_next_section() 280 | 281 | def _parse(self): 282 | self._doc.reset() 283 | self._parse_summary() 284 | 285 | for (section,content) in self._read_sections(): 286 | if not section.startswith('..'): 287 | section = ' '.join([s.capitalize() for s in section.split(' ')]) 288 | if section in ('Parameters', 'Attributes', 'Methods', 289 | 'Returns', 'Raises', 'Warns'): 290 | self[section] = self._parse_param_list(content) 291 | elif section.startswith('.. index::'): 292 | self['index'] = self._parse_index(section, content) 293 | elif section == 'See Also': 294 | self['See Also'] = self._parse_see_also(content) 295 | else: 296 | self[section] = content 297 | 298 | # string conversion routines 299 | 300 | def _str_header(self, name, symbol='-'): 301 | return [name, len(name)*symbol] 302 | 303 | def _str_indent(self, doc, indent=4): 304 | out = [] 305 | for line in doc: 306 | out += [' '*indent + line] 307 | return out 308 | 309 | def _str_signature(self): 310 | if self['Signature']: 311 | return [self['Signature'].replace('*','\*')] + [''] 312 | else: 313 | return [''] 314 | 315 | def _str_summary(self): 316 | if self['Summary']: 317 | return self['Summary'] + [''] 318 | else: 319 | return [] 320 | 321 | def _str_extended_summary(self): 322 | if self['Extended Summary']: 323 | return self['Extended Summary'] + [''] 324 | else: 325 | return [] 326 | 327 | def _str_param_list(self, name): 328 | out = [] 329 | if self[name]: 330 | out += self._str_header(name) 331 | for param,param_type,desc in self[name]: 332 | out += ['%s : %s' % (param, param_type)] 333 | out += self._str_indent(desc) 334 | out += [''] 335 | return out 336 | 337 | def _str_section(self, name): 338 | out = [] 339 | if self[name]: 340 | out += self._str_header(name) 341 | out += self[name] 342 | out += [''] 343 | return out 344 | 345 | def _str_see_also(self, func_role): 346 | if not self['See Also']: return [] 347 | out = [] 348 | out += self._str_header("See Also") 349 | last_had_desc = True 350 | for func, desc, role in self['See Also']: 351 | if role: 352 | link = ':%s:`%s`' % (role, func) 353 | elif func_role: 354 | link = ':%s:`%s`' % (func_role, func) 355 | else: 356 | link = "`%s`_" % func 357 | if desc or last_had_desc: 358 | out += [''] 359 | out += [link] 360 | else: 361 | out[-1] += ", %s" % link 362 | if desc: 363 | out += self._str_indent([' '.join(desc)]) 364 | last_had_desc = True 365 | else: 366 | last_had_desc = False 367 | out += [''] 368 | return out 369 | 370 | def _str_index(self): 371 | idx = self['index'] 372 | out = [] 373 | out += ['.. index:: %s' % idx.get('default','')] 374 | for section, references in idx.items(): 375 | if section == 'default': 376 | continue 377 | out += [' :%s: %s' % (section, ', '.join(references))] 378 | return out 379 | 380 | def __str__(self, func_role=''): 381 | out = [] 382 | out += self._str_signature() 383 | out += self._str_summary() 384 | out += self._str_extended_summary() 385 | for param_list in ('Parameters','Returns','Raises'): 386 | out += self._str_param_list(param_list) 387 | out += self._str_section('Warnings') 388 | out += self._str_see_also(func_role) 389 | for s in ('Notes','References','Examples'): 390 | out += self._str_section(s) 391 | out += self._str_index() 392 | return '\n'.join(out) 393 | 394 | 395 | def indent(str,indent=4): 396 | indent_str = ' '*indent 397 | if str is None: 398 | return indent_str 399 | lines = str.split('\n') 400 | return '\n'.join(indent_str + l for l in lines) 401 | 402 | def dedent_lines(lines): 403 | """Deindent a list of lines maximally""" 404 | return textwrap.dedent("\n".join(lines)).split("\n") 405 | 406 | def header(text, style='-'): 407 | return text + '\n' + style*len(text) + '\n' 408 | 409 | 410 | class FunctionDoc(NumpyDocString): 411 | def __init__(self, func, role='func', doc=None): 412 | self._f = func 413 | self._role = role # e.g. "func" or "meth" 414 | if doc is None: 415 | doc = inspect.getdoc(func) or '' 416 | try: 417 | NumpyDocString.__init__(self, doc) 418 | except ValueError, e: 419 | print('*'*78) 420 | print("ERROR: '%s' while parsing `%s`" % (e, self._f)) 421 | print('*'*78) 422 | #print "Docstring follows:" 423 | #print doclines 424 | #print '='*78 425 | 426 | if not self['Signature']: 427 | func, func_name = self.get_func() 428 | try: 429 | # try to read signature 430 | argspec = inspect.getargspec(func) 431 | argspec = inspect.formatargspec(*argspec) 432 | argspec = argspec.replace('*','\*') 433 | signature = '%s%s' % (func_name, argspec) 434 | except TypeError, e: 435 | signature = '%s()' % func_name 436 | self['Signature'] = signature 437 | 438 | def get_func(self): 439 | func_name = getattr(self._f, '__name__', self.__class__.__name__) 440 | if inspect.isclass(self._f): 441 | func = getattr(self._f, '__call__', self._f.__init__) 442 | else: 443 | func = self._f 444 | return func, func_name 445 | 446 | def __str__(self): 447 | out = '' 448 | 449 | func, func_name = self.get_func() 450 | signature = self['Signature'].replace('*', '\*') 451 | 452 | roles = {'func': 'function', 453 | 'meth': 'method'} 454 | 455 | if self._role: 456 | if not roles.has_key(self._role): 457 | print("Warning: invalid role %s" % self._role) 458 | out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), 459 | func_name) 460 | 461 | out += super(FunctionDoc, self).__str__(func_role=self._role) 462 | return out 463 | 464 | 465 | class ClassDoc(NumpyDocString): 466 | def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None): 467 | if not inspect.isclass(cls): 468 | raise ValueError("Initialise using a class. Got %r" % cls) 469 | self._cls = cls 470 | 471 | if modulename and not modulename.endswith('.'): 472 | modulename += '.' 473 | self._mod = modulename 474 | self._name = cls.__name__ 475 | self._func_doc = func_doc 476 | 477 | if doc is None: 478 | doc = pydoc.getdoc(cls) 479 | 480 | NumpyDocString.__init__(self, doc) 481 | 482 | @property 483 | def methods(self): 484 | return [name for name,func in inspect.getmembers(self._cls) 485 | if not name.startswith('_') and callable(func)] 486 | 487 | def __str__(self): 488 | out = '' 489 | out += super(ClassDoc, self).__str__() 490 | out += "\n\n" 491 | 492 | #for m in self.methods: 493 | # print "Parsing `%s`" % m 494 | # out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n' 495 | # out += '.. index::\n single: %s; %s\n\n' % (self._name, m) 496 | 497 | return out 498 | 499 | 500 | -------------------------------------------------------------------------------- /doc/sphinxext/docscrape_sphinx.py: -------------------------------------------------------------------------------- 1 | import re, inspect, textwrap, pydoc 2 | from docscrape import NumpyDocString, FunctionDoc, ClassDoc 3 | 4 | class SphinxDocString(NumpyDocString): 5 | # string conversion routines 6 | def _str_header(self, name, symbol='`'): 7 | return ['.. rubric:: ' + name, ''] 8 | 9 | def _str_field_list(self, name): 10 | return [':' + name + ':'] 11 | 12 | def _str_indent(self, doc, indent=4): 13 | out = [] 14 | for line in doc: 15 | out += [' '*indent + line] 16 | return out 17 | 18 | def _str_signature(self): 19 | return [''] 20 | if self['Signature']: 21 | return ['``%s``' % self['Signature']] + [''] 22 | else: 23 | return [''] 24 | 25 | def _str_summary(self): 26 | return self['Summary'] + [''] 27 | 28 | def _str_extended_summary(self): 29 | return self['Extended Summary'] + [''] 30 | 31 | def _str_param_list(self, name): 32 | out = [] 33 | if self[name]: 34 | out += self._str_field_list(name) 35 | out += [''] 36 | for param,param_type,desc in self[name]: 37 | out += self._str_indent(['**%s** : %s' % (param.strip(), 38 | param_type)]) 39 | out += [''] 40 | out += self._str_indent(desc,8) 41 | out += [''] 42 | return out 43 | 44 | def _str_section(self, name): 45 | out = [] 46 | if self[name]: 47 | out += self._str_header(name) 48 | out += [''] 49 | content = textwrap.dedent("\n".join(self[name])).split("\n") 50 | out += content 51 | out += [''] 52 | return out 53 | 54 | def _str_see_also(self, func_role): 55 | out = [] 56 | if self['See Also']: 57 | see_also = super(SphinxDocString, self)._str_see_also(func_role) 58 | out = ['.. seealso::', ''] 59 | out += self._str_indent(see_also[2:]) 60 | return out 61 | 62 | def _str_warnings(self): 63 | out = [] 64 | if self['Warnings']: 65 | out = ['.. warning::', ''] 66 | out += self._str_indent(self['Warnings']) 67 | return out 68 | 69 | def _str_index(self): 70 | idx = self['index'] 71 | out = [] 72 | if len(idx) == 0: 73 | return out 74 | 75 | out += ['.. index:: %s' % idx.get('default','')] 76 | for section, references in idx.items(): 77 | if section == 'default': 78 | continue 79 | elif section == 'refguide': 80 | out += [' single: %s' % (', '.join(references))] 81 | else: 82 | out += [' %s: %s' % (section, ','.join(references))] 83 | return out 84 | 85 | def _str_references(self): 86 | out = [] 87 | if self['References']: 88 | out += self._str_header('References') 89 | if isinstance(self['References'], str): 90 | self['References'] = [self['References']] 91 | out.extend(self['References']) 92 | out += [''] 93 | return out 94 | 95 | def __str__(self, indent=0, func_role="obj"): 96 | out = [] 97 | out += self._str_signature() 98 | out += self._str_index() + [''] 99 | out += self._str_summary() 100 | out += self._str_extended_summary() 101 | for param_list in ('Parameters', 'Attributes', 'Methods', 102 | 'Returns','Raises'): 103 | out += self._str_param_list(param_list) 104 | out += self._str_warnings() 105 | out += self._str_see_also(func_role) 106 | out += self._str_section('Notes') 107 | out += self._str_references() 108 | out += self._str_section('Examples') 109 | out = self._str_indent(out,indent) 110 | return '\n'.join(out) 111 | 112 | class SphinxFunctionDoc(SphinxDocString, FunctionDoc): 113 | pass 114 | 115 | class SphinxClassDoc(SphinxDocString, ClassDoc): 116 | pass 117 | 118 | def get_doc_object(obj, what=None, doc=None): 119 | if what is None: 120 | if inspect.isclass(obj): 121 | what = 'class' 122 | elif inspect.ismodule(obj): 123 | what = 'module' 124 | elif callable(obj): 125 | what = 'function' 126 | else: 127 | what = 'object' 128 | if what == 'class': 129 | return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc) 130 | elif what in ('function', 'method'): 131 | return SphinxFunctionDoc(obj, '', doc=doc) 132 | else: 133 | if doc is None: 134 | doc = pydoc.getdoc(obj) 135 | return SphinxDocString(doc) 136 | 137 | -------------------------------------------------------------------------------- /doc/sphinxext/inheritance_diagram.py: -------------------------------------------------------------------------------- 1 | """ 2 | Defines a docutils directive for inserting inheritance diagrams. 3 | 4 | Provide the directive with one or more classes or modules (separated 5 | by whitespace). For modules, all of the classes in that module will 6 | be used. 7 | 8 | Example:: 9 | 10 | Given the following classes: 11 | 12 | class A: pass 13 | class B(A): pass 14 | class C(A): pass 15 | class D(B, C): pass 16 | class E(B): pass 17 | 18 | .. inheritance-diagram: D E 19 | 20 | Produces a graph like the following: 21 | 22 | A 23 | / \ 24 | B C 25 | / \ / 26 | E D 27 | 28 | The graph is inserted as a PNG+image map into HTML and a PDF in 29 | LaTeX. 30 | """ 31 | 32 | import inspect 33 | import os 34 | import re 35 | import subprocess 36 | try: 37 | from hashlib import md5 38 | except ImportError: 39 | from md5 import md5 40 | 41 | from docutils.nodes import Body, Element 42 | from docutils.parsers.rst import directives 43 | from sphinx.roles import xfileref_role 44 | 45 | def my_import(name): 46 | """Module importer - taken from the python documentation. 47 | 48 | This function allows importing names with dots in them.""" 49 | 50 | mod = __import__(name) 51 | components = name.split('.') 52 | for comp in components[1:]: 53 | mod = getattr(mod, comp) 54 | return mod 55 | 56 | class DotException(Exception): 57 | pass 58 | 59 | class InheritanceGraph(object): 60 | """ 61 | Given a list of classes, determines the set of classes that 62 | they inherit from all the way to the root "object", and then 63 | is able to generate a graphviz dot graph from them. 64 | """ 65 | def __init__(self, class_names, show_builtins=False): 66 | """ 67 | *class_names* is a list of child classes to show bases from. 68 | 69 | If *show_builtins* is True, then Python builtins will be shown 70 | in the graph. 71 | """ 72 | self.class_names = class_names 73 | self.classes = self._import_classes(class_names) 74 | self.all_classes = self._all_classes(self.classes) 75 | if len(self.all_classes) == 0: 76 | raise ValueError("No classes found for inheritance diagram") 77 | self.show_builtins = show_builtins 78 | 79 | py_sig_re = re.compile(r'''^([\w.]*\.)? # class names 80 | (\w+) \s* $ # optionally arguments 81 | ''', re.VERBOSE) 82 | 83 | def _import_class_or_module(self, name): 84 | """ 85 | Import a class using its fully-qualified *name*. 86 | """ 87 | try: 88 | path, base = self.py_sig_re.match(name).groups() 89 | except: 90 | raise ValueError( 91 | "Invalid class or module '%s' specified for inheritance diagram" % name) 92 | fullname = (path or '') + base 93 | path = (path and path.rstrip('.')) 94 | if not path: 95 | path = base 96 | try: 97 | module = __import__(path, None, None, []) 98 | # We must do an import of the fully qualified name. Otherwise if a 99 | # subpackage 'a.b' is requested where 'import a' does NOT provide 100 | # 'a.b' automatically, then 'a.b' will not be found below. This 101 | # second call will force the equivalent of 'import a.b' to happen 102 | # after the top-level import above. 103 | my_import(fullname) 104 | 105 | except ImportError: 106 | raise ValueError( 107 | "Could not import class or module '%s' specified for inheritance diagram" % name) 108 | 109 | try: 110 | todoc = module 111 | for comp in fullname.split('.')[1:]: 112 | todoc = getattr(todoc, comp) 113 | except AttributeError: 114 | raise ValueError( 115 | "Could not find class or module '%s' specified for inheritance diagram" % name) 116 | 117 | # If a class, just return it 118 | if inspect.isclass(todoc): 119 | return [todoc] 120 | elif inspect.ismodule(todoc): 121 | classes = [] 122 | for cls in todoc.__dict__.values(): 123 | if inspect.isclass(cls) and cls.__module__ == todoc.__name__: 124 | classes.append(cls) 125 | return classes 126 | 127 | raise ValueError( 128 | "'%s' does not resolve to a class or module" % name) 129 | 130 | def _import_classes(self, class_names): 131 | """ 132 | Import a list of classes. 133 | """ 134 | classes = [] 135 | for name in class_names: 136 | classes.extend(self._import_class_or_module(name)) 137 | return classes 138 | 139 | def _all_classes(self, classes): 140 | """ 141 | Return a list of all classes that are ancestors of *classes*. 142 | """ 143 | all_classes = {} 144 | 145 | def recurse(cls): 146 | all_classes[cls] = None 147 | for c in cls.__bases__: 148 | if c not in all_classes: 149 | recurse(c) 150 | 151 | for cls in classes: 152 | recurse(cls) 153 | 154 | return all_classes.keys() 155 | 156 | def class_name(self, cls, parts=0): 157 | """ 158 | Given a class object, return a fully-qualified name. This 159 | works for things I've tested in matplotlib so far, but may not 160 | be completely general. 161 | """ 162 | module = cls.__module__ 163 | if module == '__builtin__': 164 | fullname = cls.__name__ 165 | else: 166 | fullname = "%s.%s" % (module, cls.__name__) 167 | if parts == 0: 168 | return fullname 169 | name_parts = fullname.split('.') 170 | return '.'.join(name_parts[-parts:]) 171 | 172 | def get_all_class_names(self): 173 | """ 174 | Get all of the class names involved in the graph. 175 | """ 176 | return [self.class_name(x) for x in self.all_classes] 177 | 178 | # These are the default options for graphviz 179 | default_graph_options = { 180 | "rankdir": "LR", 181 | "size": '"8.0, 12.0"' 182 | } 183 | default_node_options = { 184 | "shape": "box", 185 | "fontsize": 10, 186 | "height": 0.25, 187 | "fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans", 188 | "style": '"setlinewidth(0.5)"' 189 | } 190 | default_edge_options = { 191 | "arrowsize": 0.5, 192 | "style": '"setlinewidth(0.5)"' 193 | } 194 | 195 | def _format_node_options(self, options): 196 | return ','.join(["%s=%s" % x for x in options.items()]) 197 | def _format_graph_options(self, options): 198 | return ''.join(["%s=%s;\n" % x for x in options.items()]) 199 | 200 | def generate_dot(self, fd, name, parts=0, urls={}, 201 | graph_options={}, node_options={}, 202 | edge_options={}): 203 | """ 204 | Generate a graphviz dot graph from the classes that 205 | were passed in to __init__. 206 | 207 | *fd* is a Python file-like object to write to. 208 | 209 | *name* is the name of the graph 210 | 211 | *urls* is a dictionary mapping class names to http urls 212 | 213 | *graph_options*, *node_options*, *edge_options* are 214 | dictionaries containing key/value pairs to pass on as graphviz 215 | properties. 216 | """ 217 | g_options = self.default_graph_options.copy() 218 | g_options.update(graph_options) 219 | n_options = self.default_node_options.copy() 220 | n_options.update(node_options) 221 | e_options = self.default_edge_options.copy() 222 | e_options.update(edge_options) 223 | 224 | fd.write('digraph %s {\n' % name) 225 | fd.write(self._format_graph_options(g_options)) 226 | 227 | for cls in self.all_classes: 228 | if not self.show_builtins and cls in __builtins__.values(): 229 | continue 230 | 231 | name = self.class_name(cls, parts) 232 | 233 | # Write the node 234 | this_node_options = n_options.copy() 235 | url = urls.get(self.class_name(cls)) 236 | if url is not None: 237 | this_node_options['URL'] = '"%s"' % url 238 | fd.write(' "%s" [%s];\n' % 239 | (name, self._format_node_options(this_node_options))) 240 | 241 | # Write the edges 242 | for base in cls.__bases__: 243 | if not self.show_builtins and base in __builtins__.values(): 244 | continue 245 | 246 | base_name = self.class_name(base, parts) 247 | fd.write(' "%s" -> "%s" [%s];\n' % 248 | (base_name, name, 249 | self._format_node_options(e_options))) 250 | fd.write('}\n') 251 | 252 | def run_dot(self, args, name, parts=0, urls={}, 253 | graph_options={}, node_options={}, edge_options={}): 254 | """ 255 | Run graphviz 'dot' over this graph, returning whatever 'dot' 256 | writes to stdout. 257 | 258 | *args* will be passed along as commandline arguments. 259 | 260 | *name* is the name of the graph 261 | 262 | *urls* is a dictionary mapping class names to http urls 263 | 264 | Raises DotException for any of the many os and 265 | installation-related errors that may occur. 266 | """ 267 | try: 268 | dot = subprocess.Popen(['dot'] + list(args), 269 | stdin=subprocess.PIPE, stdout=subprocess.PIPE, 270 | close_fds=True) 271 | except OSError: 272 | raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?") 273 | except ValueError: 274 | raise DotException("'dot' called with invalid arguments") 275 | except: 276 | raise DotException("Unexpected error calling 'dot'") 277 | 278 | self.generate_dot(dot.stdin, name, parts, urls, graph_options, 279 | node_options, edge_options) 280 | dot.stdin.close() 281 | result = dot.stdout.read() 282 | returncode = dot.wait() 283 | if returncode != 0: 284 | raise DotException("'dot' returned the errorcode %d" % returncode) 285 | return result 286 | 287 | class inheritance_diagram(Body, Element): 288 | """ 289 | A docutils node to use as a placeholder for the inheritance 290 | diagram. 291 | """ 292 | pass 293 | 294 | def inheritance_diagram_directive(name, arguments, options, content, lineno, 295 | content_offset, block_text, state, 296 | state_machine): 297 | """ 298 | Run when the inheritance_diagram directive is first encountered. 299 | """ 300 | node = inheritance_diagram() 301 | 302 | class_names = arguments 303 | 304 | # Create a graph starting with the list of classes 305 | graph = InheritanceGraph(class_names) 306 | 307 | # Create xref nodes for each target of the graph's image map and 308 | # add them to the doc tree so that Sphinx can resolve the 309 | # references to real URLs later. These nodes will eventually be 310 | # removed from the doctree after we're done with them. 311 | for name in graph.get_all_class_names(): 312 | refnodes, x = xfileref_role( 313 | 'class', ':class:`%s`' % name, name, 0, state) 314 | node.extend(refnodes) 315 | # Store the graph object so we can use it to generate the 316 | # dot file later 317 | node['graph'] = graph 318 | # Store the original content for use as a hash 319 | node['parts'] = options.get('parts', 0) 320 | node['content'] = " ".join(class_names) 321 | return [node] 322 | 323 | def get_graph_hash(node): 324 | return md5(node['content'] + str(node['parts'])).hexdigest()[-10:] 325 | 326 | def html_output_graph(self, node): 327 | """ 328 | Output the graph for HTML. This will insert a PNG with clickable 329 | image map. 330 | """ 331 | graph = node['graph'] 332 | parts = node['parts'] 333 | 334 | graph_hash = get_graph_hash(node) 335 | name = "inheritance%s" % graph_hash 336 | path = '_images' 337 | dest_path = os.path.join(setup.app.builder.outdir, path) 338 | if not os.path.exists(dest_path): 339 | os.makedirs(dest_path) 340 | png_path = os.path.join(dest_path, name + ".png") 341 | path = setup.app.builder.imgpath 342 | 343 | # Create a mapping from fully-qualified class names to URLs. 344 | urls = {} 345 | for child in node: 346 | if child.get('refuri') is not None: 347 | urls[child['reftitle']] = child.get('refuri') 348 | elif child.get('refid') is not None: 349 | urls[child['reftitle']] = '#' + child.get('refid') 350 | 351 | # These arguments to dot will save a PNG file to disk and write 352 | # an HTML image map to stdout. 353 | image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'], 354 | name, parts, urls) 355 | return ('%s' % 356 | (path, name, name, image_map)) 357 | 358 | def latex_output_graph(self, node): 359 | """ 360 | Output the graph for LaTeX. This will insert a PDF. 361 | """ 362 | graph = node['graph'] 363 | parts = node['parts'] 364 | 365 | graph_hash = get_graph_hash(node) 366 | name = "inheritance%s" % graph_hash 367 | dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images')) 368 | if not os.path.exists(dest_path): 369 | os.makedirs(dest_path) 370 | pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf")) 371 | 372 | graph.run_dot(['-Tpdf', '-o%s' % pdf_path], 373 | name, parts, graph_options={'size': '"6.0,6.0"'}) 374 | return '\n\\includegraphics{%s}\n\n' % pdf_path 375 | 376 | def visit_inheritance_diagram(inner_func): 377 | """ 378 | This is just a wrapper around html/latex_output_graph to make it 379 | easier to handle errors and insert warnings. 380 | """ 381 | def visitor(self, node): 382 | try: 383 | content = inner_func(self, node) 384 | except DotException, e: 385 | # Insert the exception as a warning in the document 386 | warning = self.document.reporter.warning(str(e), line=node.line) 387 | warning.parent = node 388 | node.children = [warning] 389 | else: 390 | source = self.document.attributes['source'] 391 | self.body.append(content) 392 | node.children = [] 393 | return visitor 394 | 395 | def do_nothing(self, node): 396 | pass 397 | 398 | def setup(app): 399 | setup.app = app 400 | setup.confdir = app.confdir 401 | 402 | app.add_node( 403 | inheritance_diagram, 404 | latex=(visit_inheritance_diagram(latex_output_graph), do_nothing), 405 | html=(visit_inheritance_diagram(html_output_graph), do_nothing)) 406 | app.add_directive( 407 | 'inheritance-diagram', inheritance_diagram_directive, 408 | False, (1, 100, 0), parts = directives.nonnegative_int) 409 | -------------------------------------------------------------------------------- /doc/sphinxext/ipython_console_highlighting.py: -------------------------------------------------------------------------------- 1 | """reST directive for syntax-highlighting ipython interactive sessions. 2 | """ 3 | 4 | #----------------------------------------------------------------------------- 5 | # Needed modules 6 | 7 | # Standard library 8 | import re 9 | 10 | # Third party 11 | from pygments.lexer import Lexer, do_insertions 12 | from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer, 13 | PythonTracebackLexer) 14 | from pygments.token import Comment, Generic 15 | 16 | from sphinx import highlighting 17 | 18 | 19 | #----------------------------------------------------------------------------- 20 | # Global constants 21 | line_re = re.compile('.*?\n') 22 | 23 | #----------------------------------------------------------------------------- 24 | # Code begins - classes and functions 25 | 26 | class IPythonConsoleLexer(Lexer): 27 | """ 28 | For IPython console output or doctests, such as: 29 | 30 | .. sourcecode:: ipython 31 | 32 | In [1]: a = 'foo' 33 | 34 | In [2]: a 35 | Out[2]: 'foo' 36 | 37 | In [3]: print a 38 | foo 39 | 40 | In [4]: 1 / 0 41 | 42 | Notes: 43 | 44 | - Tracebacks are not currently supported. 45 | 46 | - It assumes the default IPython prompts, not customized ones. 47 | """ 48 | 49 | name = 'IPython console session' 50 | aliases = ['ipython'] 51 | mimetypes = ['text/x-ipython-console'] 52 | input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)") 53 | output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)") 54 | continue_prompt = re.compile(" \.\.\.+:") 55 | tb_start = re.compile("\-+") 56 | 57 | def get_tokens_unprocessed(self, text): 58 | pylexer = PythonLexer(**self.options) 59 | tblexer = PythonTracebackLexer(**self.options) 60 | 61 | curcode = '' 62 | insertions = [] 63 | for match in line_re.finditer(text): 64 | line = match.group() 65 | input_prompt = self.input_prompt.match(line) 66 | continue_prompt = self.continue_prompt.match(line.rstrip()) 67 | output_prompt = self.output_prompt.match(line) 68 | if line.startswith("#"): 69 | insertions.append((len(curcode), 70 | [(0, Comment, line)])) 71 | elif input_prompt is not None: 72 | insertions.append((len(curcode), 73 | [(0, Generic.Prompt, input_prompt.group())])) 74 | curcode += line[input_prompt.end():] 75 | elif continue_prompt is not None: 76 | insertions.append((len(curcode), 77 | [(0, Generic.Prompt, continue_prompt.group())])) 78 | curcode += line[continue_prompt.end():] 79 | elif output_prompt is not None: 80 | insertions.append((len(curcode), 81 | [(0, Generic.Output, output_prompt.group())])) 82 | curcode += line[output_prompt.end():] 83 | else: 84 | if curcode: 85 | for item in do_insertions(insertions, 86 | pylexer.get_tokens_unprocessed(curcode)): 87 | yield item 88 | curcode = '' 89 | insertions = [] 90 | yield match.start(), Generic.Output, line 91 | if curcode: 92 | for item in do_insertions(insertions, 93 | pylexer.get_tokens_unprocessed(curcode)): 94 | yield item 95 | 96 | #----------------------------------------------------------------------------- 97 | # Register the extension as a valid pygments lexer 98 | highlighting.lexers['ipython'] = IPythonConsoleLexer() 99 | -------------------------------------------------------------------------------- /doc/sphinxext/numpydoc.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======== 3 | numpydoc 4 | ======== 5 | 6 | Sphinx extension that handles docstrings in the Numpy standard format. [1] 7 | 8 | It will: 9 | 10 | - Convert Parameters etc. sections to field lists. 11 | - Convert See Also section to a See also entry. 12 | - Renumber references. 13 | - Extract the signature from the docstring, if it can't be determined otherwise. 14 | 15 | .. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard 16 | 17 | """ 18 | 19 | import os, re, pydoc 20 | from docscrape_sphinx import get_doc_object, SphinxDocString 21 | import inspect 22 | 23 | def mangle_docstrings(app, what, name, obj, options, lines, 24 | reference_offset=[0]): 25 | if what == 'module': 26 | # Strip top title 27 | title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', 28 | re.I|re.S) 29 | lines[:] = title_re.sub('', "\n".join(lines)).split("\n") 30 | else: 31 | doc = get_doc_object(obj, what, "\n".join(lines)) 32 | lines[:] = str(doc).split("\n") 33 | 34 | if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ 35 | obj.__name__: 36 | if hasattr(obj, '__module__'): 37 | v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__)) 38 | else: 39 | v = dict(full_name=obj.__name__) 40 | lines += ['', '.. htmlonly::', ''] 41 | lines += [' %s' % x for x in 42 | (app.config.numpydoc_edit_link % v).split("\n")] 43 | 44 | # replace reference numbers so that there are no duplicates 45 | references = [] 46 | for l in lines: 47 | l = l.strip() 48 | if l.startswith('.. ['): 49 | try: 50 | references.append(int(l[len('.. ['):l.index(']')])) 51 | except ValueError: 52 | print("WARNING: invalid reference in %s docstring" % name) 53 | 54 | # Start renaming from the biggest number, otherwise we may 55 | # overwrite references. 56 | references.sort() 57 | if references: 58 | for i, line in enumerate(lines): 59 | for r in references: 60 | new_r = reference_offset[0] + r 61 | lines[i] = lines[i].replace('[%d]_' % r, 62 | '[%d]_' % new_r) 63 | lines[i] = lines[i].replace('.. [%d]' % r, 64 | '.. [%d]' % new_r) 65 | 66 | reference_offset[0] += len(references) 67 | 68 | def mangle_signature(app, what, name, obj, options, sig, retann): 69 | # Do not try to inspect classes that don't define `__init__` 70 | if (inspect.isclass(obj) and 71 | 'initializes x; see ' in pydoc.getdoc(obj.__init__)): 72 | return '', '' 73 | 74 | if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return 75 | if not hasattr(obj, '__doc__'): return 76 | 77 | doc = SphinxDocString(pydoc.getdoc(obj)) 78 | if doc['Signature']: 79 | sig = re.sub("^[^(]*", "", doc['Signature']) 80 | return sig, '' 81 | 82 | def initialize(app): 83 | try: 84 | app.connect('autodoc-process-signature', mangle_signature) 85 | except: 86 | monkeypatch_sphinx_ext_autodoc() 87 | 88 | def setup(app, get_doc_object_=get_doc_object): 89 | global get_doc_object 90 | get_doc_object = get_doc_object_ 91 | 92 | app.connect('autodoc-process-docstring', mangle_docstrings) 93 | app.connect('builder-inited', initialize) 94 | app.add_config_value('numpydoc_edit_link', None, True) 95 | 96 | #------------------------------------------------------------------------------ 97 | # Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5) 98 | #------------------------------------------------------------------------------ 99 | 100 | def monkeypatch_sphinx_ext_autodoc(): 101 | global _original_format_signature 102 | import sphinx.ext.autodoc 103 | 104 | if sphinx.ext.autodoc.format_signature is our_format_signature: 105 | return 106 | 107 | print("[numpydoc] Monkeypatching sphinx.ext.autodoc ...") 108 | _original_format_signature = sphinx.ext.autodoc.format_signature 109 | sphinx.ext.autodoc.format_signature = our_format_signature 110 | 111 | def our_format_signature(what, obj): 112 | r = mangle_signature(None, what, None, obj, None, None, None) 113 | if r is not None: 114 | return r[0] 115 | else: 116 | return _original_format_signature(what, obj) 117 | -------------------------------------------------------------------------------- /doc/sphinxext/only_directives.py: -------------------------------------------------------------------------------- 1 | # 2 | # A pair of directives for inserting content that will only appear in 3 | # either html or latex. 4 | # 5 | 6 | from docutils.nodes import Body, Element 7 | from docutils.parsers.rst import directives 8 | 9 | class only_base(Body, Element): 10 | def dont_traverse(self, *args, **kwargs): 11 | return [] 12 | 13 | class html_only(only_base): 14 | pass 15 | 16 | class latex_only(only_base): 17 | pass 18 | 19 | def run(content, node_class, state, content_offset): 20 | text = '\n'.join(content) 21 | node = node_class(text) 22 | state.nested_parse(content, content_offset, node) 23 | return [node] 24 | 25 | def html_only_directive(name, arguments, options, content, lineno, 26 | content_offset, block_text, state, state_machine): 27 | return run(content, html_only, state, content_offset) 28 | 29 | def latex_only_directive(name, arguments, options, content, lineno, 30 | content_offset, block_text, state, state_machine): 31 | return run(content, latex_only, state, content_offset) 32 | 33 | def builder_inited(app): 34 | if app.builder.name == 'html': 35 | latex_only.traverse = only_base.dont_traverse 36 | else: 37 | html_only.traverse = only_base.dont_traverse 38 | 39 | def setup(app): 40 | app.add_directive('htmlonly', html_only_directive, True, (0, 0, 0)) 41 | app.add_directive('latexonly', latex_only_directive, True, (0, 0, 0)) 42 | app.add_node(html_only) 43 | app.add_node(latex_only) 44 | 45 | # This will *really* never see the light of day As it turns out, 46 | # this results in "broken" image nodes since they never get 47 | # processed, so best not to do this. 48 | # app.connect('builder-inited', builder_inited) 49 | 50 | # Add visit/depart methods to HTML-Translator: 51 | def visit_perform(self, node): 52 | pass 53 | def depart_perform(self, node): 54 | pass 55 | def visit_ignore(self, node): 56 | node.children = [] 57 | def depart_ignore(self, node): 58 | node.children = [] 59 | 60 | app.add_node(html_only, html=(visit_perform, depart_perform)) 61 | app.add_node(html_only, latex=(visit_ignore, depart_ignore)) 62 | app.add_node(latex_only, latex=(visit_perform, depart_perform)) 63 | app.add_node(latex_only, html=(visit_ignore, depart_ignore)) 64 | -------------------------------------------------------------------------------- /doc/tools/apigen.py: -------------------------------------------------------------------------------- 1 | """Attempt to generate templates for module reference with Sphinx 2 | 3 | XXX - we exclude extension modules 4 | 5 | To include extension modules, first identify them as valid in the 6 | ``_uri2path`` method, then handle them in the ``_parse_module`` script. 7 | 8 | We get functions and classes by parsing the text of .py files. 9 | Alternatively we could import the modules for discovery, and we'd have 10 | to do that for extension modules. This would involve changing the 11 | ``_parse_module`` method to work via import and introspection, and 12 | might involve changing ``discover_modules`` (which determines which 13 | files are modules, and therefore which module URIs will be passed to 14 | ``_parse_module``). 15 | 16 | NOTE: this is a modified version of a script originally shipped with the 17 | PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed 18 | project.""" 19 | 20 | # Stdlib imports 21 | import os 22 | import re 23 | 24 | # Functions and classes 25 | class ApiDocWriter(object): 26 | ''' Class for automatic detection and parsing of API docs 27 | to Sphinx-parsable reST format''' 28 | 29 | # only separating first two levels 30 | rst_section_levels = ['*', '=', '-', '~', '^'] 31 | 32 | def __init__(self, 33 | package_name, 34 | rst_extension='.rst', 35 | package_skip_patterns=None, 36 | module_skip_patterns=None, 37 | ): 38 | ''' Initialize package for parsing 39 | 40 | Parameters 41 | ---------- 42 | package_name : string 43 | Name of the top-level package. *package_name* must be the 44 | name of an importable package 45 | rst_extension : string, optional 46 | Extension for reST files, default '.rst' 47 | package_skip_patterns : None or sequence of {strings, regexps} 48 | Sequence of strings giving URIs of packages to be excluded 49 | Operates on the package path, starting at (including) the 50 | first dot in the package path, after *package_name* - so, 51 | if *package_name* is ``sphinx``, then ``sphinx.util`` will 52 | result in ``.util`` being passed for earching by these 53 | regexps. If is None, gives default. Default is: 54 | ['\.tests$'] 55 | module_skip_patterns : None or sequence 56 | Sequence of strings giving URIs of modules to be excluded 57 | Operates on the module name including preceding URI path, 58 | back to the first dot after *package_name*. For example 59 | ``sphinx.util.console`` results in the string to search of 60 | ``.util.console`` 61 | If is None, gives default. Default is: 62 | ['\.setup$', '\._'] 63 | ''' 64 | if package_skip_patterns is None: 65 | package_skip_patterns = ['\\.tests$'] 66 | if module_skip_patterns is None: 67 | module_skip_patterns = ['\\.setup$', '\\._'] 68 | self.package_name = package_name 69 | self.rst_extension = rst_extension 70 | self.package_skip_patterns = package_skip_patterns 71 | self.module_skip_patterns = module_skip_patterns 72 | 73 | def get_package_name(self): 74 | return self._package_name 75 | 76 | def set_package_name(self, package_name): 77 | ''' Set package_name 78 | 79 | >>> docwriter = ApiDocWriter('sphinx') 80 | >>> import sphinx 81 | >>> docwriter.root_path == sphinx.__path__[0] 82 | True 83 | >>> docwriter.package_name = 'docutils' 84 | >>> import docutils 85 | >>> docwriter.root_path == docutils.__path__[0] 86 | True 87 | ''' 88 | # It's also possible to imagine caching the module parsing here 89 | self._package_name = package_name 90 | self.root_module = __import__(package_name) 91 | self.root_path = self.root_module.__path__[0] 92 | self.written_modules = None 93 | 94 | package_name = property(get_package_name, set_package_name, None, 95 | 'get/set package_name') 96 | 97 | def _get_object_name(self, line): 98 | ''' Get second token in line 99 | >>> docwriter = ApiDocWriter('sphinx') 100 | >>> docwriter._get_object_name(" def func(): ") 101 | 'func' 102 | >>> docwriter._get_object_name(" class Klass(object): ") 103 | 'Klass' 104 | >>> docwriter._get_object_name(" class Klass: ") 105 | 'Klass' 106 | ''' 107 | name = line.split()[1].split('(')[0].strip() 108 | # in case we have classes which are not derived from object 109 | # ie. old style classes 110 | return name.rstrip(':') 111 | 112 | def _uri2path(self, uri): 113 | ''' Convert uri to absolute filepath 114 | 115 | Parameters 116 | ---------- 117 | uri : string 118 | URI of python module to return path for 119 | 120 | Returns 121 | ------- 122 | path : None or string 123 | Returns None if there is no valid path for this URI 124 | Otherwise returns absolute file system path for URI 125 | 126 | Examples 127 | -------- 128 | >>> docwriter = ApiDocWriter('sphinx') 129 | >>> import sphinx 130 | >>> modpath = sphinx.__path__[0] 131 | >>> res = docwriter._uri2path('sphinx.builder') 132 | >>> res == os.path.join(modpath, 'builder.py') 133 | True 134 | >>> res = docwriter._uri2path('sphinx') 135 | >>> res == os.path.join(modpath, '__init__.py') 136 | True 137 | >>> docwriter._uri2path('sphinx.does_not_exist') 138 | 139 | ''' 140 | if uri == self.package_name: 141 | return os.path.join(self.root_path, '__init__.py') 142 | path = uri.replace('.', os.path.sep) 143 | path = path.replace(self.package_name + os.path.sep, '') 144 | path = os.path.join(self.root_path, path) 145 | # XXX maybe check for extensions as well? 146 | if os.path.exists(path + '.py'): # file 147 | path += '.py' 148 | elif os.path.exists(os.path.join(path, '__init__.py')): 149 | path = os.path.join(path, '__init__.py') 150 | else: 151 | return None 152 | return path 153 | 154 | def _path2uri(self, dirpath): 155 | ''' Convert directory path to uri ''' 156 | relpath = dirpath.replace(self.root_path, self.package_name) 157 | if relpath.startswith(os.path.sep): 158 | relpath = relpath[1:] 159 | return relpath.replace(os.path.sep, '.') 160 | 161 | def _parse_module(self, uri): 162 | ''' Parse module defined in *uri* ''' 163 | filename = self._uri2path(uri) 164 | if filename is None: 165 | # nothing that we could handle here. 166 | return ([],[]) 167 | f = open(filename, 'rt') 168 | functions, classes = self._parse_lines(f) 169 | f.close() 170 | return functions, classes 171 | 172 | def _parse_lines(self, linesource): 173 | ''' Parse lines of text for functions and classes ''' 174 | functions = [] 175 | classes = [] 176 | for line in linesource: 177 | if line.startswith('def ') and line.count('('): 178 | # exclude private stuff 179 | name = self._get_object_name(line) 180 | if not name.startswith('_'): 181 | functions.append(name) 182 | elif line.startswith('class '): 183 | # exclude private stuff 184 | name = self._get_object_name(line) 185 | if not name.startswith('_'): 186 | classes.append(name) 187 | else: 188 | pass 189 | functions.sort() 190 | classes.sort() 191 | return functions, classes 192 | 193 | def generate_api_doc(self, uri): 194 | '''Make autodoc documentation template string for a module 195 | 196 | Parameters 197 | ---------- 198 | uri : string 199 | python location of module - e.g 'sphinx.builder' 200 | 201 | Returns 202 | ------- 203 | S : string 204 | Contents of API doc 205 | ''' 206 | # get the names of all classes and functions 207 | functions, classes = self._parse_module(uri) 208 | if not len(functions) and not len(classes): 209 | print('WARNING: Empty -',uri) # dbg 210 | return '' 211 | 212 | # Make a shorter version of the uri that omits the package name for 213 | # titles 214 | uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) 215 | 216 | ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' 217 | 218 | chap_title = uri_short 219 | ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title) 220 | + '\n\n') 221 | 222 | # Set the chapter title to read 'module' for all modules except for the 223 | # main packages 224 | if '.' in uri: 225 | title = 'Module: :mod:`' + uri_short + '`' 226 | else: 227 | title = ':mod:`' + uri_short + '`' 228 | ad += title + '\n' + self.rst_section_levels[2] * len(title) 229 | 230 | if len(classes): 231 | ad += '\nInheritance diagram for ``%s``:\n\n' % uri 232 | ad += '.. inheritance-diagram:: %s \n' % uri 233 | ad += ' :parts: 3\n' 234 | 235 | ad += '\n.. automodule:: ' + uri + '\n' 236 | ad += '\n.. currentmodule:: ' + uri + '\n' 237 | multi_class = len(classes) > 1 238 | multi_fx = len(functions) > 1 239 | if multi_class: 240 | ad += '\n' + 'Classes' + '\n' + \ 241 | self.rst_section_levels[2] * 7 + '\n' 242 | elif len(classes) and multi_fx: 243 | ad += '\n' + 'Class' + '\n' + \ 244 | self.rst_section_levels[2] * 5 + '\n' 245 | for c in classes: 246 | ad += '\n:class:`' + c + '`\n' \ 247 | + self.rst_section_levels[multi_class + 2 ] * \ 248 | (len(c)+9) + '\n\n' 249 | ad += '\n.. autoclass:: ' + c + '\n' 250 | # must NOT exclude from index to keep cross-refs working 251 | ad += ' :members:\n' \ 252 | ' :undoc-members:\n' \ 253 | ' :show-inheritance:\n' \ 254 | '\n' \ 255 | ' .. automethod:: __init__\n' 256 | if multi_fx: 257 | ad += '\n' + 'Functions' + '\n' + \ 258 | self.rst_section_levels[2] * 9 + '\n\n' 259 | elif len(functions) and multi_class: 260 | ad += '\n' + 'Function' + '\n' + \ 261 | self.rst_section_levels[2] * 8 + '\n\n' 262 | for f in functions: 263 | # must NOT exclude from index to keep cross-refs working 264 | ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n' 265 | return ad 266 | 267 | def _survives_exclude(self, matchstr, match_type): 268 | ''' Returns True if *matchstr* does not match patterns 269 | 270 | ``self.package_name`` removed from front of string if present 271 | 272 | Examples 273 | -------- 274 | >>> dw = ApiDocWriter('sphinx') 275 | >>> dw._survives_exclude('sphinx.okpkg', 'package') 276 | True 277 | >>> dw.package_skip_patterns.append('^\\.badpkg$') 278 | >>> dw._survives_exclude('sphinx.badpkg', 'package') 279 | False 280 | >>> dw._survives_exclude('sphinx.badpkg', 'module') 281 | True 282 | >>> dw._survives_exclude('sphinx.badmod', 'module') 283 | True 284 | >>> dw.module_skip_patterns.append('^\\.badmod$') 285 | >>> dw._survives_exclude('sphinx.badmod', 'module') 286 | False 287 | ''' 288 | if match_type == 'module': 289 | patterns = self.module_skip_patterns 290 | elif match_type == 'package': 291 | patterns = self.package_skip_patterns 292 | else: 293 | raise ValueError('Cannot interpret match type "%s"' 294 | % match_type) 295 | # Match to URI without package name 296 | L = len(self.package_name) 297 | if matchstr[:L] == self.package_name: 298 | matchstr = matchstr[L:] 299 | for pat in patterns: 300 | try: 301 | pat.search 302 | except AttributeError: 303 | pat = re.compile(pat) 304 | if pat.search(matchstr): 305 | return False 306 | return True 307 | 308 | def discover_modules(self): 309 | ''' Return module sequence discovered from ``self.package_name`` 310 | 311 | 312 | Parameters 313 | ---------- 314 | None 315 | 316 | Returns 317 | ------- 318 | mods : sequence 319 | Sequence of module names within ``self.package_name`` 320 | 321 | Examples 322 | -------- 323 | >>> dw = ApiDocWriter('sphinx') 324 | >>> mods = dw.discover_modules() 325 | >>> 'sphinx.util' in mods 326 | True 327 | >>> dw.package_skip_patterns.append('\.util$') 328 | >>> 'sphinx.util' in dw.discover_modules() 329 | False 330 | >>> 331 | ''' 332 | modules = [self.package_name] 333 | # raw directory parsing 334 | for dirpath, dirnames, filenames in os.walk(self.root_path): 335 | # Check directory names for packages 336 | root_uri = self._path2uri(os.path.join(self.root_path, 337 | dirpath)) 338 | for dirname in dirnames[:]: # copy list - we modify inplace 339 | package_uri = '.'.join((root_uri, dirname)) 340 | if (self._uri2path(package_uri) and 341 | self._survives_exclude(package_uri, 'package')): 342 | modules.append(package_uri) 343 | else: 344 | dirnames.remove(dirname) 345 | # Check filenames for modules 346 | for filename in filenames: 347 | module_name = filename[:-3] 348 | module_uri = '.'.join((root_uri, module_name)) 349 | if (self._uri2path(module_uri) and 350 | self._survives_exclude(module_uri, 'module')): 351 | modules.append(module_uri) 352 | return sorted(modules) 353 | 354 | def write_modules_api(self, modules,outdir): 355 | # write the list 356 | written_modules = [] 357 | for m in modules: 358 | api_str = self.generate_api_doc(m) 359 | if not api_str: 360 | continue 361 | # write out to file 362 | outfile = os.path.join(outdir, 363 | m + self.rst_extension) 364 | fileobj = open(outfile, 'wt') 365 | fileobj.write(api_str) 366 | fileobj.close() 367 | written_modules.append(m) 368 | self.written_modules = written_modules 369 | 370 | def write_api_docs(self, outdir): 371 | """Generate API reST files. 372 | 373 | Parameters 374 | ---------- 375 | outdir : string 376 | Directory name in which to store files 377 | We create automatic filenames for each module 378 | 379 | Returns 380 | ------- 381 | None 382 | 383 | Notes 384 | ----- 385 | Sets self.written_modules to list of written modules 386 | """ 387 | if not os.path.exists(outdir): 388 | os.mkdir(outdir) 389 | # compose list of modules 390 | modules = self.discover_modules() 391 | self.write_modules_api(modules,outdir) 392 | 393 | def write_index(self, outdir, froot='gen', relative_to=None): 394 | """Make a reST API index file from written files 395 | 396 | Parameters 397 | ---------- 398 | path : string 399 | Filename to write index to 400 | outdir : string 401 | Directory to which to write generated index file 402 | froot : string, optional 403 | root (filename without extension) of filename to write to 404 | Defaults to 'gen'. We add ``self.rst_extension``. 405 | relative_to : string 406 | path to which written filenames are relative. This 407 | component of the written file path will be removed from 408 | outdir, in the generated index. Default is None, meaning, 409 | leave path as it is. 410 | """ 411 | if self.written_modules is None: 412 | raise ValueError('No modules written') 413 | # Get full filename path 414 | path = os.path.join(outdir, froot+self.rst_extension) 415 | # Path written into index is relative to rootpath 416 | if relative_to is not None: 417 | relpath = outdir.replace(relative_to + os.path.sep, '') 418 | else: 419 | relpath = outdir 420 | idx = open(path,'wt') 421 | w = idx.write 422 | w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') 423 | w('.. toctree::\n\n') 424 | for f in self.written_modules: 425 | w(' %s\n' % os.path.join(relpath,f)) 426 | idx.close() 427 | -------------------------------------------------------------------------------- /doc/tools/build_modref_templates.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Script to auto-generate our API docs. 3 | """ 4 | # stdlib imports 5 | import os 6 | 7 | # local imports 8 | from apigen import ApiDocWriter 9 | 10 | #***************************************************************************** 11 | if __name__ == '__main__': 12 | package = 'brainx' 13 | outdir = os.path.join('api','generated') 14 | docwriter = ApiDocWriter(package) 15 | docwriter.package_skip_patterns += [r'\.fixes$', 16 | ] 17 | docwriter.write_api_docs(outdir) 18 | docwriter.write_index(outdir, 'gen', relative_to='api') 19 | print('%d files written' % len(docwriter.written_modules)) 20 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib>=1.1.0 2 | numpy>=1.6.1 3 | scipy>=0.9 4 | networkx>=1.9 5 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Installation script for brainx package. 3 | """ 4 | 5 | import os 6 | 7 | # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly 8 | # update it when the contents of directories change. 9 | if os.path.exists('MANIFEST'): os.remove('MANIFEST') 10 | 11 | from distutils.core import setup 12 | 13 | # Get version and release info, which is all stored in brainx/version.py 14 | ver_file = os.path.join('brainx', 'version.py') 15 | exec(open(ver_file).read()) 16 | 17 | opts = dict(name=name, 18 | maintainer=maintainer, 19 | maintainer_email=maintainer_email, 20 | description=description, 21 | long_description=long_description, 22 | url=url, 23 | download_url=download_url, 24 | license=license, 25 | classifiers=classifiers, 26 | author=author, 27 | author_email=author_email, 28 | platforms=platforms, 29 | version=version, 30 | packages=packages, 31 | package_data=package_data, 32 | ) 33 | 34 | # Only add setuptools-specific flags if the user called for setuptools, but 35 | # otherwise leave it alone 36 | import sys 37 | if 'setuptools' in sys.modules: 38 | opts['zip_safe'] = False 39 | 40 | # Now call the actual setup function 41 | if __name__ == '__main__': 42 | setup(**opts) 43 | -------------------------------------------------------------------------------- /setup_egg.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | execfile('setup.py') 3 | -------------------------------------------------------------------------------- /tools/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export PYTHONWARNINGS="all" 6 | 7 | for nb in brainx/notebooks/*.ipynb; do 8 | echo "Running: $nb" 9 | runipy -q $nb 10 | done 11 | 12 | if [[ $TRAVIS_PYTHON_VERSION == 3.* ]]; then 13 | export TEST_ARGS="--with-cov --cover-package brainx" 14 | else 15 | export TEST_ARGS="brainx" 16 | fi 17 | 18 | # Add `--with-doctest` below, once doctests have been fixed 19 | nosetests --exe -v $TEST_ARGS 20 | 21 | -------------------------------------------------------------------------------- /tools/travis_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | WHEELHOUSE="--no-index --find-links=http://wheels.scikit-image.org/" 5 | 6 | pip install wheel nose coveralls 7 | pip install -r requirements.txt $WHEELHOUSE 8 | sudo apt-get install libzmq3-dev 9 | pip install ipython runipy jsonschema 10 | 11 | --------------------------------------------------------------------------------