├── .github └── workflows │ └── ci.yaml ├── .gitignore ├── CHANGELOG.md ├── CREDITS ├── LICENSE ├── README.md ├── bct ├── __init__.py ├── algorithms │ ├── __init__.py │ ├── centrality.py │ ├── clustering.py │ ├── core.py │ ├── degree.py │ ├── distance.py │ ├── efficiency.py │ ├── generative.py │ ├── models.py │ ├── modularity.py │ ├── motifs.py │ ├── physical_connectivity.py │ ├── reference.py │ └── similarity.py ├── citations.py ├── due.py ├── motif34lib.mat ├── nbs.py ├── nbs_parallel.py ├── utils │ ├── __init__.py │ ├── miscellaneous_utilities.py │ ├── other.py │ └── visualization.py └── version.py ├── docs ├── Makefile ├── _build │ ├── doctrees │ │ ├── _templates │ │ │ └── function.doctree │ │ ├── bct.doctree │ │ ├── environment.pickle │ │ ├── index.doctree │ │ ├── modules.doctree │ │ └── stupid.doctree │ └── html │ │ ├── .buildinfo │ │ ├── _sources │ │ ├── _templates │ │ │ └── function.txt │ │ ├── bct.txt │ │ ├── index.txt │ │ ├── modules.txt │ │ └── stupid.txt │ │ ├── _static │ │ ├── ajax-loader.gif │ │ ├── basic.css │ │ ├── comment-bright.png │ │ ├── comment-close.png │ │ ├── comment.png │ │ ├── default.css │ │ ├── doctools.js │ │ ├── down-pressed.png │ │ ├── down.png │ │ ├── file.png │ │ ├── jquery.js │ │ ├── minus.png │ │ ├── plus.png │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── sidebar.js │ │ ├── underscore.js │ │ ├── up-pressed.png │ │ ├── up.png │ │ └── websupport.js │ │ ├── _templates │ │ └── function.html │ │ ├── bct.html │ │ ├── genindex.html │ │ ├── index.html │ │ ├── modules.html │ │ ├── np-modindex.html │ │ ├── objects.inv │ │ ├── py-modindex.html │ │ ├── search.html │ │ ├── searchindex.js │ │ └── stupid.html ├── _templates │ └── function.rst ├── bct.rst ├── conf.py ├── index.rst ├── modules.rst └── sphinxext │ └── numpy_ext │ ├── __init__.py │ ├── docscrape.py │ ├── docscrape_sphinx.py │ └── numpydoc.py ├── function_reference.html ├── requirements.txt ├── setup.py ├── test ├── __init__.py ├── basic_test.py ├── centrality_test.py ├── clustering_test.py ├── conftest.py ├── core_test.py ├── distance_test.py ├── duecredit_test.py ├── efficiency_test.py ├── failing_cases │ └── modularity_dir_example.csv ├── load_samples.py ├── mats │ ├── sample_data.mat │ ├── sample_data.npy │ ├── sample_directed.mat │ ├── sample_directed.npy │ ├── sample_directed_gc.mat │ ├── sample_directed_gc.npy │ ├── sample_group_dsi.mat │ ├── sample_group_dsi.npy │ ├── sample_group_fmri.mat │ ├── sample_group_fmri.npy │ ├── sample_group_qball.mat │ ├── sample_group_qball.npy │ ├── sample_partition.mat │ ├── sample_partition.npy │ ├── sample_pc.mat │ ├── sample_pc.npy │ ├── sample_signed.mat │ ├── sample_signed.npy │ ├── sample_signed_partition.mat │ ├── sample_zi.mat │ └── sample_zi.npy ├── modularity_derived_metrics_test.py ├── modularity_test.py ├── nbs_test.py ├── nodals_test.py ├── partition_distance_test.py ├── reference_test.py ├── simple_script.py └── very_long_test.py └── tox.ini /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | pull_request: 5 | 6 | jobs: 7 | test: 8 | name: test ${{ matrix.python_version }} 9 | runs-on: ubuntu-latest 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | python_version: 14 | - "3.10" 15 | - "3.9" 16 | - "3.8" 17 | - "3.7" 18 | steps: 19 | - uses: actions/setup-python@v2 20 | with: 21 | python-version: ${{ matrix.python_version }} 22 | - uses: actions/checkout@v2 23 | - name: Install tox-gh 24 | run: python -m pip install tox-gh 25 | - name: Setup test suite 26 | run: tox -vv --notest 27 | - name: Run test suite 28 | run: tox --skip-pkg-install 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | parts 13 | bin 14 | var 15 | sdist 16 | develop-eggs 17 | .installed.cfg 18 | lib 19 | lib64 20 | 21 | # Installer logs 22 | pip-log.txt 23 | 24 | # Unit test / coverage reports 25 | .coverage 26 | .tox 27 | nosetests.xml 28 | 29 | # Translations 30 | *.mo 31 | 32 | # Mr Developer 33 | .mr.developer.cfg 34 | .project 35 | .pydevproject 36 | 37 | # vim swap files 38 | *.swp 39 | 40 | .duecredit.p 41 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # In progress 2 | 3 | - Make randomness-based functions seedable 4 | - Test against more python versions 5 | 6 | # BCT 0.5.0 7 | 8 | - to do 9 | 10 | # BCT 0.4.1 11 | 12 | - Refactor code into multiple files 13 | - Fix bug in efficiency_bin 14 | - Fix bugs in modularity_louvain_und 15 | - Fix bugs in `participation_coef_b*` 16 | - Add some test cases 17 | 18 | # BCT 0.4.0 19 | 20 | - Add various new functions from Jan 2015 release of BCT 21 | - Fix various bugs documented in github issues 22 | 23 | # BCT 0.3.3 24 | 25 | - Fix small bug in `latmio_und_connected` causing failure for sparse matrices 26 | - Add non-networkx dependent algorithm to get_components (but less efficient) 27 | - Add an implementation of consensus clustering and fix bug in agreement 28 | - Fix bug causing `clustering_coef_bu` to always return 0 29 | - Remembered to update changelog 30 | - Fix some bugs in `modularity_louvain_dir` and related 31 | - Fix bug in NBS and add optional paired-sample test statistic (sviter) 32 | 33 | # BCT 0.3.2 34 | 35 | - Change several functions including threshold_proportional and binarize have `copy=True` as default argument 36 | - Fix bug in `threshold_proportional` where copying behavior did not work symmetric matrices. 37 | - Fix minor quirk in `threshold_proportional` where `np.round` rounds to nearest even number (optimizes floating point) which is discrepant with BCT 38 | - Add a test suite with some functions 39 | - Fix typo in `rich_club_bu` 40 | - Refactor `x[range(n),range(n)]` to `np.fill_diagonal` 41 | - Fix off-by-one bug in `moduality_[prob/fine]tune_und_sign` 42 | 43 | # BCT 0.3.1 44 | 45 | - Fix bug in NBS 46 | - Fix series of bugs in `null_models` 47 | 48 | # BCT 0.3 49 | 50 | - Added NBS 51 | - Added in all of the new functions from the Dec 2013 release of BCT 52 | - Fixed numerous bugs having to do with indexing errors in modularity 53 | - Fixed several odd bugs with `clustering_coef`, `efficiency`, `distance` 54 | 55 | For the next release, I clearly need a real test suite. 56 | -------------------------------------------------------------------------------- /CREDITS: -------------------------------------------------------------------------------- 1 | This file describes the authorship and change history of each function in BCT. 2 | At least, as much authorship that is noted in the documentation. 3 | 4 | In addition references to scientific literature are provided where available 5 | 6 | This file was last updated in 2014 7 | 8 | ADJACENCY_PLOT_UND 9 | Richard Betzel Indiana University 2013 10 | AGREEMENT 11 | (agreement agreement_weighted) 12 | Richard Betzel Indiana University 2013 13 | ALIGN_MATRICES 14 | Yusuke Adachi University of Tokyo 2010 15 | Olaf Sporns Indiana University 2010 16 | ASSORTATIVITY 17 | (assortativity_bin assortativity_wei) 18 | Olaf Sporns Indiana University 2007/2008 19 | Vassilis Tsiaras University of Crete 2009 20 | Murray Shanahan Imperial College London 2012 21 | Mika Rubinov University of Cambridge 2012 22 | Newman (2002) Phys Rev Lett 89:208701 23 | Foster et al. (2010) PNAS 107:10815–10820 24 | BACKBONE_WU 25 | Olaf Sporns Indiana University 2007/2008/2010/2012 26 | Hidalgo et al. (2007) Science 317, 482. 27 | Hagmann et al. (2008) PLoS Biol 28 | BETWEENNESS 29 | (betweenness_bin betweenness_wei) 30 | Mika Rubinov UNSW/Cambridge 2007-2012 31 | Brandes (2001) J Math Sociol 25:163-177. 32 | Kintali (2008) arXiv:0809.1906v2 [cs.DS] 33 | BREADTH 34 | (breadth breadthdist reachdist) 35 | Olaf Sporns Indiana University 2002/2007/2008 36 | CHARPATH 37 | Olaf Sporns Indiana University 2002/2007/2008 38 | Mika Rubinov U New South Wales 2010 39 | CLUSTERING_COEF 40 | (clustering_coef_bd clustering_coef_bu clustering_coef_wd clustering_coef_wu) 41 | Mika Rubinov U New South Wales 2007-2010 42 | Watts and Strogatz (1998) Nature 393:440-442. 43 | Onnela et al. (2005) Phys Rev E 71:065103 44 | Fagiolo (2007) Phys Rev E 76:026107. 45 | CONSENSUS_UND 46 | Richard Betzel Indiana University 2013 47 | Lancichinetti & Fortunato (2012). Scientific Reports. 48 | CYCPROB 49 | Olaf Sporns Indiana University 2002/2007/2008 50 | DEGREES 51 | (degrees_dir degrees_und) 52 | Olaf Sporns Indiana University 2002/2006/2008 53 | DENSITY 54 | (density_dir density_und) 55 | Olaf Sporns Indiana University 2002/2007/2008 56 | Tony Herdman SFU 2009/2010 57 | DISTANCE 58 | (distance_bin distance_wei distance_wei_floyd) 59 | Mika Rubinov UNSW/Cambridge 2007-2012 60 | Richard Betzel Indiana University 2012 61 | Andrea Avena Indiana University 2012 62 | Jonathan Clayden UCL 2013 63 | DIVERSITY_COEF_SIGN 64 | Mika Rubinov UNSW/Cambridge 2011 65 | Alex Fornito 2012 66 | Martin Monti 2012 67 | Shannon CE (1948) Bell Syst Tech J 27, 379–423. 68 | Rubinov and Sporns (2011) NeuroImage. 69 | EDGE_BETWEENNESS 70 | (edge_betweenness_bin edge_betweenness_wei) 71 | Mika Rubinov UNSW/Cambridge 2007-2012 72 | Brandes (2001) J Math Sociol 25:163-177. 73 | EDGE_NEI_OVERLAP 74 | (edge_nei_overlap_bd edge_nei_overlap_bu) 75 | Olaf Sporns Indiana University 2012 76 | Easley and Kleinberg (2010) Networks, Crowds, and Markets. 77 | EFFICIENCY 78 | (efficiency_bin efficiency_wei) 79 | Mika Rubinov UNSW/Cambridge 2008-2013 80 | Jonathan Clayden UCL 2013 81 | Latora and Marchiori (2001) Phys Rev Lett 87:198701. 82 | Onnela et al. (2005) Phys Rev E 71:065103 83 | Fagiolo (2007) Phys Rev E 76:026107. 84 | Rubinov M, Sporns O (2010) NeuroImage 52:1059-69 85 | EIGENVECTOR_CENTRALITY 86 | Xi-Nian Zuo Chinese Acad. Sciences 2010 87 | Richard Betzel Indiana University 2012 88 | Newman, MEJ (2002). The mathematics of networks. 89 | ERANGE 90 | Olaf Sporns Indiana University 2002/2007/2008 91 | FINDPATHS 92 | (findpaths findwalks) 93 | Olaf Sporns Indiana University 2002/2007/2008/2010 94 | Steve Williams 2010 95 | FLOW_COEF 96 | Olaf Sporns Indiana University 2007/2010/2012 97 | Honey et al. (2007) Proc Natl Acad Sci USA 98 | GET_COMPONENTS 99 | J Goni U Navarra/ Indiana U 2009/2011 100 | GRID_COMMUNITIES 101 | Richard Betzel Indiana University 2012 102 | GTOM 103 | J Goni U Navarra/ Indiana U 2009/2011 104 | Yip & Horvath (2007) BMC Bioinformatics 2007, 8:22 105 | Ravasz et al (2002) Science 297 (5586), 1551. 106 | JDEGREE 107 | Olaf Sporns Indiana University 2002/2006/2008 108 | KCORE 109 | (kcore_bd kcore_bu kcoreness_centrality_bd kcoreness_centrality_bu) 110 | Olaf Sporns Indiana University 2007-2012 111 | Hagmann et al. (2008) PLoS Biology 112 | LATMIO 113 | (latmio_dir latmio_dir_connected latmio_und latmio_und_connected) 114 | Mika Rubinov UNSW 2007-2010 115 | Jonathan Power Washington U St Louis 2008 116 | Olaf Sports Indiana University 2012 117 | Maslov and Sneppen (2002) Science 296:910 118 | Sporns and Zwi (2004) Neuroinformatics 2:145 119 | MAKE VARIOUS REGULAR/RANDOM NETWORKS 120 | (makeevenCIJ makefractalCIJ makelatticeCIJ makerandCIJ_dir makerandCIJ_und 121 | makerandCIJdegreesfixes makeringlatticeCIJ maketoeplitzCIJ) 122 | Olaf Sporns Indiana University 2005/2007/2008 123 | MATCHING_IND 124 | (matching_ind matching_ind_und) 125 | Olaf Sporns Indiana University 2002/2007/2008 126 | Richard Betzel Indiana University 2013 127 | MEAN FIRST PASSAGE TIME 128 | (mean_first_passage_time) 129 | Joaquin Goñi Indiana University 2012 130 | MODULARITY 131 | (modularity_dir modularity_und) 132 | Mika Rubinov UNSW/Cambridge 2008-2013 133 | Jonathan Power Washington U St Louis 2008 134 | Dani Bassett UC Santa Barbara 2010 135 | Alexandros Goulas Maastricht University 136 | Mason Porter Oxford University 137 | Jack Setford Oxford University 138 | Leicht and Newman (2008) Phys Rev Lett 100:118703. 139 | Reichardt and Bornholdt (2006) Phys Rev E 74:016110. 140 | Also see Good et al. (2010) Phys. Rev. E 81:046106. 141 | MODULARITY_FINETUNE 142 | (modularity_finetune_dir modularity_finetune_und modularity_finetune_und_sign 143 | modularity_probtune_und_sign) 144 | Mika Rubinov UNSW/Cambridge 2011/2013 145 | Sun et al. (2009) Europhysics Lett 86, 28004. 146 | Rubinov and Sporns (2011) NeuroImage. 147 | MODULARITY_LOUVAIN 148 | (modularity_louvain_dir modularity_louvain_und modularity_louvain_und_sign) 149 | Mika Rubinov UNSW/Cambridge 2010/2011/2013 150 | Blondel et al. (2008) J. Stat. Mech. P10008. 151 | Reichardt and Bornholdt (2006) Phys. Rev. E 74, 016110. 152 | Rubinov and Sporns (2011) NeuroImage. 153 | MODULE_DEGREE_ZSCORE 154 | Mika Rubinov UNSW 2008-2010 155 | Guimera R, Amaral L. Nature (2005) 433:895-900. 156 | MOTIFS 157 | Mika Rubinov UNSW 2007/2008 158 | Onnela et al. (2005) Phys Rev E 71:065103 159 | NBS 160 | Andrew Zalesky U Melbourne 2010 161 | Zalesky A, Fornito A, Bullmore ET (2010) NeuroImage.10.1016 162 | NULL_MODEL 163 | (null_model_dir_sign null_model_und_sign) 164 | Mika Rubinov U Cambridge 2011/2012 165 | Rubinov and Sporns (2011) Neuroimage 56:2068-79 166 | PAGERANK_CENTRALITY 167 | Xi-Nian Zuo Chinese Acad. Sciences 2011 168 | Richard Betzel Indiana University 2012 169 | Morrison JL et al (2005) BMC Bioinformatics, 6:233 170 | Boldi P, Santini M, Vigna S (2009) ACM Trans Inf Syst 27, 1-23. 171 | PARTICIPATION_COEF 172 | (participation_coef participation_coef_sign) 173 | Mika Rubinov UNSW/Cambridge 2008 174 | Alex Fornito U Melbourne 2011-2012 175 | Martin Monti 2012 176 | Guimera R, Amaral L. Nature (2005) 433:895-900. 177 | PARTITION_DISTANCE 178 | Mika Rubinov UNSW/Cambridge 2011 179 | Meila M (2007) J Multivar Anal 98, 873-895. 180 | RANDMIO 181 | (randmio_dir randmio_dir_connected randmio_und randmio_und_connected 182 | randmio_und_signed) 183 | Mika Rubinov UNSW/Cambridge 2007 184 | Jonathan Power Washington U St Louis 2008 185 | Dani Bassett UC Santa Barbara 2011 186 | Olaf Sporns Indiana University 2012 187 | Maslov and Sneppen (2002) Science 296:910 188 | RANDOMIZE_GRAPH_PARTIAL_UND 189 | Richard Betzel Indiana University 2013 190 | RANDOMIZER_BIN_UND 191 | Jonathan Power Washington U St Louis 2010 192 | Maslov and Sneppen (2002) Science 296:910 193 | RENTIAN_SCALING 194 | Danielle Bassett UC Santa Barbara 2010 195 | Bassett DS et al. (2010) PLoS Comput Biol 6(4):e1000748 196 | REORDER_MAT 197 | (reorder_MAT reorder_matrix reorder_mod) 198 | Yusuke Adachi University of Tokyo 2010 199 | Olaf Sporns Indiana University 2010 200 | Mika Rubinov UNSW/Cambridge 2011 201 | RETRIEVE_SHORTEST_PATH 202 | (retrieve_shortest_path) 203 | Andrea Avena Indiana University 2012 204 | Joaquin Goñi Indiana University 2012 205 | RICH_CLUB 206 | (rich_club_bd rich_club_bu rich_club_wd rich_club_wu) 207 | Martijn van den Heuvel Univ. Med. Ctr. Utrecht 2011 208 | Colizza et al. (2006) Nat. Phys. 2:110. 209 | Opsahl et al. Phys Rev Lett, 2008, 101(16) 210 | Martijn van den Heuvel, University Medical Center Utrecht, 2011 211 | SEARCH INFORMATION 212 | (search_information) 213 | Andrea Avena Indiana University 2014 214 | Joaquin Goñi Indiana University 2014 215 | SCORE 216 | Olaf Sporns Indiana University 2007/2008/2010/2012 217 | STRENGTHS 218 | (strengths_dir strengths_und strengths_und_sign) 219 | Olaf Sporns Indiana University 2002/2006/2008 220 | Mika Rubinov UNSW/Cambridge 2011 221 | SUBGRAPH_CENTRALITY 222 | Xi-Nian Zuo Chinese Acad. Sciences 2010 223 | Richard Betzel Indiana University 2012 224 | Estrada and Rodriguez-Velasquez (2005) Phys Rev E 71, 056103 225 | Estrada and Higham (2010) SIAM Rev 52, 696. 226 | THRESHOLD 227 | (threshold_absolute threshold_proportional) 228 | Mika Rubinov UNSW/Cambridge 2009-2010 229 | Roan LaPlante Martinos Ctr, MGH 2012 230 | TRANSITIVITY 231 | (transitivity_bd transitivity_bu transitivity_wd transitivity_wu) 232 | Mika Rubinov UNSW/Cambridge 2007 233 | Alexandros Goulas Maastricht U 2010 234 | Christoph Schmidt F. Schiller U. Jena 2013 235 | Rubinov M, Sporns O (2010) NeuroImage 52:1059-69 236 | Onnela et al. (2005) Phys Rev E 71:065103 237 | Fagiolo (2007) Phys Rev E 76:026107. 238 | Humphries et al. (2008) Plos ONE 3: e0002051 239 | WEIGHT_CONVERSION 240 | Mika Rubinov U Cambridge 2012 241 | WRITE_TO_PAJ 242 | Christopher Honey Indiana University 2007 243 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Brain Connectivity Toolbox for Python version 0.6.1 2 | 3 | Author: Roan LaPlante 4 | 5 | Tested against python 3.7+. 6 | 7 | ## Copyright information 8 | 9 | This program strictly observes the tenets of fundamentalist Theravada Mahasi 10 | style Buddhism. Any use of this program in violation of these aforementioned 11 | tenets or in violation of the principles described in the Visuddhimagga Sutta 12 | is strictly prohibited and punishable by extensive Mahayana style practice. 13 | By being or not being mindful of the immediate present moment sensations 14 | involved in the use of this program, you confer your acceptance of these terms 15 | and conditions. 16 | 17 | Note that the observation of the tenets of fundamentalist Theravada Mahasi 18 | style Buddhism and the Visuddhimagga Sutta is optional as long as the terms and 19 | conditions of the GNU GPLv3+ are upheld. 20 | 21 | ## Packages used 22 | 23 | BCTPY is written in pure python and requires only `scipy` and `numpy`. `scipy` is required for a couple of functions for its statistical and linear algebra 24 | packages which have some features not available in `numpy` alone. If you don't 25 | have `scipy`, most functions that do not need `scipy` functionality will still work. 26 | 27 | Note that graphs must be passed in as `numpy.array` rather than `numpy.matrix`. Other constraints/edge cases of the adjacency matrices (e.g. self-loops, negative weights) behave similarly to the matlab functions. 28 | 29 | A small number of functions also depend on networkx. This notably includes Network-Based Statistic, a nonparametric test for differences in undirected weighted graphs from different populations. Ideally this dependency should be removed in the future. 30 | 31 | Nosetests is used for the test suite. The test suite is not complete. 32 | 33 | ## About `bctpy` and other authors 34 | 35 | BCT is a matlab toolbox with many graph theoretical measures off of which `bctpy` 36 | is based. I did not write BCT (apart from small bugfixes I have submitted) 37 | and a quality of life improvements that I have taken liberties to add. 38 | With few exceptions, `bctpy` is a direct translation of matlab code to python. 39 | 40 | `bctpy` should be considered beta software, with BCT being the gold standard by 41 | comparison. I did my best to test all functionality in `bctpy`, but much of it is 42 | arcane math that flies over the head of this humble programmer. There *are* 43 | bugs lurking in `bctpy`, the question is not whether but how many. If you locate 44 | bugs, please consider submitting pull requests. 45 | 46 | Many thanks to Stefan Fuertinger for his assistance tracking down a number of 47 | bugs. Stefan Fuertinger has a similar software package dealing with brain 48 | network functionality at http://research.mssm.edu/simonyanlab/analytical-tools/ 49 | 50 | Many thanks to Chris Barnes for his assistance in documenting a number of issues and facilitating a number of test cases. 51 | 52 | Credit for writing BCT (the matlab version) goes to the following list of 53 | authors, especially Olaf Sporns and Mika Rubinov. 54 | 55 | - Olaf Sporns 56 | - Mikail Rubinov 57 | - Yusuke Adachi 58 | - Andrea Avena 59 | - Danielle Bassett 60 | - Richard Betzel 61 | - Joaquin Goni 62 | - Alexandros Goulas 63 | - Patric Hagmann 64 | - Christopher Honey 65 | - Martijn van den Heuvel 66 | - Rolf Kotter 67 | - Jonathan Power 68 | - Murray Shanahan 69 | - Andrew Zalesky 70 | 71 | In order to be a bit more compact I have removed the accreditations from the 72 | docstrings each functions. This does not in any way mean that I wish to take 73 | credit from the individual contributions. I have moved these accreditations 74 | to the credits file. 75 | -------------------------------------------------------------------------------- /bct/__init__.py: -------------------------------------------------------------------------------- 1 | from .algorithms import * 2 | from .utils import * 3 | from .nbs import * 4 | from .version import __version__, __version_info__ 5 | from .citations import BCTPY, RUBINOV2010 6 | 7 | from .due import due, BibTeX 8 | 9 | __citation__ = BCTPY 10 | 11 | due.cite(BibTeX(__citation__), description="Brain Connectivity Toolbox for Python", path="bct") 12 | due.cite(BibTeX(RUBINOV2010), description="Brain Connectivity Toolbox", path="bct") 13 | -------------------------------------------------------------------------------- /bct/algorithms/__init__.py: -------------------------------------------------------------------------------- 1 | from .centrality import * 2 | from .clustering import * 3 | from .efficiency import * 4 | from .generative import * 5 | from .modularity import * 6 | from .core import * 7 | from .degree import * 8 | from .distance import * 9 | from .motifs import * 10 | from .physical_connectivity import * 11 | from .reference import * 12 | from .similarity import * 13 | -------------------------------------------------------------------------------- /bct/algorithms/degree.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | import numpy as np 3 | from bct.utils import binarize 4 | 5 | 6 | def degrees_dir(CIJ): 7 | ''' 8 | Node degree is the number of links connected to the node. The indegree 9 | is the number of inward links and the outdegree is the number of 10 | outward links. 11 | 12 | Parameters 13 | ---------- 14 | CIJ : NxN np.ndarray 15 | directed binary/weighted connection matrix 16 | 17 | Returns 18 | ------- 19 | id : Nx1 np.ndarray 20 | node in-degree 21 | od : Nx1 np.ndarray 22 | node out-degree 23 | deg : Nx1 np.ndarray 24 | node degree (in-degree + out-degree) 25 | 26 | Notes 27 | ----- 28 | Inputs are assumed to be on the columns of the CIJ matrix. 29 | Weight information is discarded. 30 | ''' 31 | CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary 32 | id = np.sum(CIJ, axis=0) # indegree = column sum of CIJ 33 | od = np.sum(CIJ, axis=1) # outdegree = row sum of CIJ 34 | deg = id + od # degree = indegree+outdegree 35 | return id, od, deg 36 | 37 | 38 | def degrees_und(CIJ): 39 | ''' 40 | Node degree is the number of links connected to the node. 41 | 42 | Parameters 43 | ---------- 44 | CIJ : NxN np.ndarray 45 | undirected binary/weighted connection matrix 46 | 47 | Returns 48 | ------- 49 | deg : Nx1 np.ndarray 50 | node degree 51 | 52 | Notes 53 | ----- 54 | Weight information is discarded. 55 | ''' 56 | CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary 57 | return np.sum(CIJ, axis=0) 58 | 59 | 60 | def jdegree(CIJ): 61 | ''' 62 | This function returns a matrix in which the value of each element (u,v) 63 | corresponds to the number of nodes that have u outgoing connections 64 | and v incoming connections. 65 | 66 | Parameters 67 | ---------- 68 | CIJ : NxN np.ndarray 69 | directed binary/weighted connnection matrix 70 | 71 | Returns 72 | ------- 73 | J : ZxZ np.ndarray 74 | joint degree distribution matrix 75 | (shifted by one, replicates matlab one-based-indexing) 76 | J_od : int 77 | number of vertices with od>id 78 | J_id : int 79 | number of vertices with id>od 80 | J_bl : int 81 | number of vertices with id==od 82 | 83 | Notes 84 | ----- 85 | Weights are discarded. 86 | ''' 87 | CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary 88 | n = len(CIJ) 89 | id = np.sum(CIJ, axis=0) # indegree = column sum of CIJ 90 | od = np.sum(CIJ, axis=1) # outdegree = row sum of CIJ 91 | 92 | # create the joint degree distribution matrix 93 | # note: the matrix is shifted by one, to accomodate zero id and od in the 94 | # first row/column 95 | # upper triangular part of the matrix has vertices with od>id 96 | # lower triangular part has vertices with id>od 97 | # main diagonal has units with id=od 98 | 99 | szJ = np.max((id, od)) + 1 100 | J = np.zeros((szJ, szJ)) 101 | 102 | for i in range(n): 103 | J[id[i], od[i]] += 1 104 | 105 | J_od = np.sum(np.triu(J, 1)) 106 | J_id = np.sum(np.tril(J, -1)) 107 | J_bl = np.sum(np.diag(J)) 108 | return J, J_od, J_id, J_bl 109 | 110 | 111 | def strengths_dir(CIJ): 112 | ''' 113 | Node strength is the sum of weights of links connected to the node. The 114 | instrength is the sum of inward link weights and the outstrength is the 115 | sum of outward link weights. 116 | 117 | Parameters 118 | ---------- 119 | CIJ : NxN np.ndarray 120 | directed weighted connection matrix 121 | 122 | Returns 123 | ------- 124 | is : Nx1 np.ndarray 125 | node in-strength 126 | os : Nx1 np.ndarray 127 | node out-strength 128 | str : Nx1 np.ndarray 129 | node strength (in-strength + out-strength) 130 | 131 | Notes 132 | ----- 133 | Inputs are assumed to be on the columns of the CIJ matrix. 134 | ''' 135 | istr = np.sum(CIJ, axis=0) 136 | ostr = np.sum(CIJ, axis=1) 137 | return istr + ostr 138 | 139 | 140 | def strengths_und(CIJ): 141 | ''' 142 | Node strength is the sum of weights of links connected to the node. 143 | 144 | Parameters 145 | ---------- 146 | CIJ : NxN np.ndarray 147 | undirected weighted connection matrix 148 | 149 | Returns 150 | ------- 151 | str : Nx1 np.ndarray 152 | node strengths 153 | ''' 154 | return np.sum(CIJ, axis=0) 155 | 156 | 157 | def strengths_und_sign(W): 158 | ''' 159 | Node strength is the sum of weights of links connected to the node. 160 | 161 | Parameters 162 | ---------- 163 | W : NxN np.ndarray 164 | undirected connection matrix with positive and negative weights 165 | 166 | Returns 167 | ------- 168 | Spos : Nx1 np.ndarray 169 | nodal strength of positive weights 170 | Sneg : Nx1 np.ndarray 171 | nodal strength of positive weights 172 | vpos : float 173 | total positive weight 174 | vneg : float 175 | total negative weight 176 | ''' 177 | W = W.copy() 178 | n = len(W) 179 | np.fill_diagonal(W, 0) # clear diagonal 180 | Spos = np.sum(W * (W > 0), axis=0) # positive strengths 181 | Sneg = np.sum(W * (W < 0), axis=0) # negative strengths 182 | 183 | vpos = np.sum(W[W > 0]) # positive weight 184 | vneg = np.sum(W[W < 0]) # negative weight 185 | return Spos, Sneg, vpos, vneg 186 | -------------------------------------------------------------------------------- /bct/algorithms/models.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | import numpy as np 3 | 4 | from bct.utils import BCTParamError, get_rng 5 | from ..due import due, BibTex 6 | 7 | def mleme_constraint_model(nr_samples, W, ci=None, lo=None, li=None, lm=None, 8 | seed=None): 9 | ''' 10 | This function returns an ensemble of unbiasedly sampled networks with 11 | weighted node-strength and module-weight constraints. These constraints 12 | are soft in that they are satisfied on average for the full network 13 | ensemble but not, in general, for each individual network. 14 | 15 | Parameters 16 | ---------- 17 | W : np.ndarray 18 | NxN square directed weighted connectivity matrix. All inputs must be 19 | nonnegative integers. Real valued weights could be converted to 20 | integers through rescaling and rounding. 21 | ci : np.ndarray 22 | Nx1 module affiliation vector. Can be None if there are no module 23 | constraints. Must contain nonnegative integers. The default value 24 | is None. 25 | lo : np.ndarray 26 | Nx1 out strength constraing logical vector. This vector specifies 27 | out strength constraints for each node. Alternately, it can be 28 | True to constrain all out-strengths or None for no constraints. 29 | The default value is None. 30 | li : np.ndarray 31 | Nx1 in strength constraing logical vector. This vector specifies 32 | in strength constraints for each node. Alternately, it can be 33 | True to constrain all in-strengths or None for no constraints. 34 | The default value is None. 35 | lm : np.ndarray 36 | Mx1 module-weight constraint logical matrix where M is the number of 37 | modules. Specifies module-weight constraints for all pairs of modules. 38 | Can be True, 'all', or 2, to constrain all inter-module and 39 | intra-module weights, 'intra' or 1 to constrain all intra-module 40 | weights only, or None for no constraints. The default value is None. 41 | seed : hashable, optional 42 | If None (default), use the np.random's global random state to generate random numbers. 43 | Otherwise, use a new np.random.RandomState instance seeded with the given value. 44 | 45 | Returns 46 | ------- 47 | W0 : np.ndarray 48 | NxNxnr_samples an ensemble of sampled networks with constraints 49 | E0 : np.ndarray 50 | expected weights matrix 51 | P0 : np.ndarray 52 | probability matrix 53 | delt0 : float 54 | algorithm convergence error 55 | ''' 56 | rng = get_rng(seed) 57 | raise NotImplementedError() 58 | -------------------------------------------------------------------------------- /bct/algorithms/physical_connectivity.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | import numpy as np 3 | 4 | from ..utils.miscellaneous_utilities import get_rng 5 | 6 | 7 | def density_dir(CIJ): 8 | ''' 9 | Density is the fraction of present connections to possible connections. 10 | 11 | Parameters 12 | ---------- 13 | CIJ : NxN np.ndarray 14 | directed weighted/binary connection matrix 15 | 16 | Returns 17 | ------- 18 | kden : float 19 | density 20 | N : int 21 | number of vertices 22 | k : int 23 | number of edges 24 | 25 | Notes 26 | ----- 27 | Assumes CIJ is directed and has no self-connections. 28 | Weight information is discarded. 29 | ''' 30 | n = len(CIJ) 31 | k = np.size(np.where(CIJ.flatten())) 32 | kden = k / (n * n - n) 33 | return kden, n, k 34 | 35 | 36 | def density_und(CIJ): 37 | ''' 38 | Density is the fraction of present connections to possible connections. 39 | 40 | Parameters 41 | ---------- 42 | CIJ : NxN np.ndarray 43 | undirected (weighted/binary) connection matrix 44 | 45 | Returns 46 | ------- 47 | kden : float 48 | density 49 | N : int 50 | number of vertices 51 | k : int 52 | number of edges 53 | 54 | Notes 55 | ----- 56 | Assumes CIJ is undirected and has no self-connections. 57 | Weight information is discarded. 58 | ''' 59 | n = len(CIJ) 60 | k = np.size(np.where(np.triu(CIJ).flatten())) 61 | kden = k / ((n * n - n) / 2) 62 | return kden, n, k 63 | 64 | 65 | def rentian_scaling(A, xyz, n, seed=None): 66 | ''' 67 | Physical Rentian scaling (or more simply Rentian scaling) is a property 68 | of systems that are cost-efficiently embedded into physical space. It is 69 | what is called a "topo-physical" property because it combines information 70 | regarding the topological organization of the graph with information 71 | about the physical placement of connections. Rentian scaling is present 72 | in very large scale integrated circuits, the C. elegans neuronal network, 73 | and morphometric and diffusion-based graphs of human anatomical networks. 74 | Rentian scaling is determined by partitioning the system into cubes, 75 | counting the number of nodes inside of each cube (N), and the number of 76 | edges traversing the boundary of each cube (E). If the system displays 77 | Rentian scaling, these two variables N and E will scale with one another 78 | in loglog space. The Rent's exponent is given by the slope of log10(E) 79 | vs. log10(N), and can be reported alone or can be compared to the 80 | theoretical minimum Rent's exponent to determine how cost efficiently the 81 | network has been embedded into physical space. Note: if a system displays 82 | Rentian scaling, it does not automatically mean that the system is 83 | cost-efficiently embedded (although it does suggest that). Validation 84 | occurs when comparing to the theoretical minimum Rent's exponent for that 85 | system. 86 | 87 | Parameters 88 | ---------- 89 | A : NxN np.ndarray 90 | unweighted, binary, symmetric adjacency matrix 91 | xyz : Nx3 np.ndarray 92 | vector of node placement coordinates 93 | n : int 94 | Number of partitions to compute. Each partition is a data point; you 95 | want a large enough number to adequately compute Rent's exponent. 96 | seed : hashable, optional 97 | If None (default), use the np.random's global random state to generate random numbers. 98 | Otherwise, use a new np.random.RandomState instance seeded with the given value. 99 | 100 | Returns 101 | ------- 102 | N : Mx1 np.ndarray 103 | Number of nodes in each of the M partitions 104 | E : Mx1 np.ndarray 105 | 106 | Notes 107 | ----- 108 | Subsequent Analysis: 109 | Rentian scaling plots are then created by: figure; loglog(E,N,'*'); 110 | To determine the Rent's exponent, p, it is important not to use 111 | partitions which may 112 | be affected by boundary conditions. In Bassett et al. 2010 PLoS CB, only 113 | partitions with N randx[0] 148 | l2 = xyzn[:, 0] < randx[1] 149 | l3 = xyzn[:, 1] > randx[0] 150 | l4 = xyzn[:, 1] < randx[1] 151 | l5 = xyzn[:, 2] > randx[0] 152 | l6 = xyzn[:, 2] < randx[1] 153 | 154 | L, = np.where((l1 & l2 & l3 & l4 & l5 & l6).flatten()) 155 | if np.size(L): 156 | # count edges crossing at the boundary of the cube 157 | E[count] = np.sum(A[np.ix_(L, np.setdiff1d(range(m), L))]) 158 | # count nodes inside of the cube 159 | N[count] = np.size(L) 160 | count += 1 161 | 162 | return N, E 163 | -------------------------------------------------------------------------------- /bct/due.py: -------------------------------------------------------------------------------- 1 | # emacs: at the end of the file 2 | # ex: set sts=4 ts=4 sw=4 et: 3 | # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### # 4 | """ 5 | 6 | Stub file for a guaranteed safe import of duecredit constructs: if duecredit 7 | is not available. 8 | 9 | To use it, place it into your project codebase to be imported, e.g. copy as 10 | 11 | cp stub.py /path/tomodule/module/due.py 12 | 13 | Note that it might be better to avoid naming it duecredit.py to avoid shadowing 14 | installed duecredit. 15 | 16 | Then use in your code as 17 | 18 | from .due import due, Doi, BibTeX, Text 19 | 20 | See https://github.com/duecredit/duecredit/blob/master/README.md for examples. 21 | 22 | Origin: Originally a part of the duecredit 23 | Copyright: 2015-2019 DueCredit developers 24 | License: BSD-2 25 | """ 26 | 27 | __version__ = '0.0.8' 28 | 29 | 30 | class InactiveDueCreditCollector(object): 31 | """Just a stub at the Collector which would not do anything""" 32 | def _donothing(self, *args, **kwargs): 33 | """Perform no good and no bad""" 34 | pass 35 | 36 | def dcite(self, *args, **kwargs): 37 | """If I could cite I would""" 38 | def nondecorating_decorator(func): 39 | return func 40 | return nondecorating_decorator 41 | 42 | active = False 43 | activate = add = cite = dump = load = _donothing 44 | 45 | def __repr__(self): 46 | return self.__class__.__name__ + '()' 47 | 48 | 49 | def _donothing_func(*args, **kwargs): 50 | """Perform no good and no bad""" 51 | pass 52 | 53 | 54 | try: 55 | from duecredit import due, BibTeX, Doi, Url, Text 56 | if 'due' in locals() and not hasattr(due, 'cite'): 57 | raise RuntimeError( 58 | "Imported due lacks .cite. DueCredit is now disabled") 59 | except Exception as e: 60 | if not isinstance(e, ImportError): 61 | import logging 62 | logging.getLogger("duecredit").error( 63 | "Failed to import duecredit due to %s" % str(e)) 64 | # Initiate due stub 65 | due = InactiveDueCreditCollector() 66 | BibTeX = Doi = Url = Text = _donothing_func 67 | 68 | # Emacs mode definitions 69 | # Local Variables: 70 | # mode: python 71 | # py-indent-offset: 4 72 | # tab-width: 4 73 | # indent-tabs-mode: nil 74 | # End: 75 | -------------------------------------------------------------------------------- /bct/motif34lib.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/bct/motif34lib.mat -------------------------------------------------------------------------------- /bct/nbs.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | import numpy as np 3 | 4 | from .utils import BCTParamError, get_rng 5 | from .algorithms import get_components 6 | from .due import due, BibTeX 7 | from .citations import ZALESKY2010 8 | 9 | # FIXME considerable gains could be realized using vectorization, although 10 | # generating the null distribution will take a while 11 | 12 | 13 | @due.dcite(BibTeX(ZALESKY2010), description="Network-based statistic") 14 | def nbs_bct(x, y, thresh, k=1000, tail='both', paired=False, verbose=False, seed=None): 15 | ''' 16 | Performs the NBS for populations X and Y for a t-statistic threshold of 17 | alpha. 18 | 19 | Parameters 20 | ---------- 21 | x : NxNxP np.ndarray 22 | matrix representing the first population with P subjects. must be 23 | symmetric. 24 | y : NxNxQ np.ndarray 25 | matrix representing the second population with Q subjects. Q need not 26 | equal P. must be symmetric. 27 | thresh : float 28 | minimum t-value used as threshold 29 | k : int 30 | number of permutations used to estimate the empirical null 31 | distribution 32 | tail : {'left', 'right', 'both'} 33 | enables specification of particular alternative hypothesis 34 | 'left' : mean population of X < mean population of Y 35 | 'right' : mean population of Y < mean population of X 36 | 'both' : means are unequal (default) 37 | paired : bool 38 | use paired sample t-test instead of population t-test. requires both 39 | subject populations to have equal N. default value = False 40 | verbose : bool 41 | print some extra information each iteration. defaults value = False 42 | seed : hashable, optional 43 | If None (default), use the np.random's global random state to generate random numbers. 44 | Otherwise, use a new np.random.RandomState instance seeded with the given value. 45 | 46 | Returns 47 | ------- 48 | pval : Cx1 np.ndarray 49 | A vector of corrected p-values for each component of the networks 50 | identified. If at least one p-value is less than alpha, the omnibus 51 | null hypothesis can be rejected at alpha significance. The null 52 | hypothesis is that the value of the connectivity from each edge has 53 | equal mean across the two populations. 54 | adj : IxIxC np.ndarray 55 | an adjacency matrix identifying the edges comprising each component. 56 | edges are assigned indexed values. 57 | null : Kx1 np.ndarray 58 | A vector of K sampled from the null distribution of maximal component 59 | size. 60 | 61 | Notes 62 | ----- 63 | ALGORITHM DESCRIPTION 64 | The NBS is a nonparametric statistical test used to isolate the 65 | components of an N x N undirected connectivity matrix that differ 66 | significantly between two distinct populations. Each element of the 67 | connectivity matrix stores a connectivity value and each member of 68 | the two populations possesses a distinct connectivity matrix. A 69 | component of a connectivity matrix is defined as a set of 70 | interconnected edges. 71 | 72 | The NBS is essentially a procedure to control the family-wise error 73 | rate, in the weak sense, when the null hypothesis is tested 74 | independently at each of the N(N-1)/2 edges comprising the undirected 75 | connectivity matrix. The NBS can provide greater statistical power 76 | than conventional procedures for controlling the family-wise error 77 | rate, such as the false discovery rate, if the set of edges at which 78 | the null hypothesis is rejected constitues a large component or 79 | components. 80 | The NBS comprises fours steps: 81 | 1. Perform a two-sample T-test at each edge indepedently to test the 82 | hypothesis that the value of connectivity between the two 83 | populations come from distributions with equal means. 84 | 2. Threshold the T-statistic available at each edge to form a set of 85 | suprathreshold edges. 86 | 3. Identify any components in the adjacency matrix defined by the set 87 | of suprathreshold edges. These are referred to as observed 88 | components. Compute the size of each observed component 89 | identified; that is, the number of edges it comprises. 90 | 4. Repeat K times steps 1-3, each time randomly permuting members of 91 | the two populations and storing the size of the largest component 92 | identified for each permuation. This yields an empirical estimate 93 | of the null distribution of maximal component size. A corrected 94 | p-value for each observed component is then calculated using this 95 | null distribution. 96 | 97 | [1] Zalesky A, Fornito A, Bullmore ET (2010) Network-based statistic: 98 | Identifying differences in brain networks. NeuroImage. 99 | 10.1016/j.neuroimage.2010.06.041 100 | ''' 101 | rng = get_rng(seed) 102 | 103 | def ttest2_stat_only(x, y, tail): 104 | t = np.mean(x) - np.mean(y) 105 | n1, n2 = len(x), len(y) 106 | s = np.sqrt(((n1 - 1) * np.var(x, ddof=1) + (n2 - 1) 107 | * np.var(y, ddof=1)) / (n1 + n2 - 2)) 108 | denom = s * np.sqrt(1 / n1 + 1 / n2) 109 | if denom == 0: 110 | return 0 111 | if tail == 'both': 112 | return np.abs(t / denom) 113 | if tail == 'left': 114 | return -t / denom 115 | else: 116 | return t / denom 117 | 118 | def ttest_paired_stat_only(A, B, tail): 119 | n = len(A - B) 120 | df = n - 1 121 | sample_ss = np.sum((A - B)**2) - np.sum(A - B)**2 / n 122 | unbiased_std = np.sqrt(sample_ss / (n - 1)) 123 | z = np.mean(A - B) / unbiased_std 124 | t = z * np.sqrt(n) 125 | if tail == 'both': 126 | return np.abs(t) 127 | if tail == 'left': 128 | return -t 129 | else: 130 | return t 131 | 132 | if tail not in ('both', 'left', 'right'): 133 | raise BCTParamError('Tail must be both, left, right') 134 | 135 | ix, jx, nx = x.shape 136 | iy, jy, ny = y.shape 137 | 138 | if not ix == jx == iy == jy: 139 | raise BCTParamError('Population matrices are of inconsistent size') 140 | else: 141 | n = ix 142 | 143 | if paired and nx != ny: 144 | raise BCTParamError('Population matrices must be an equal size') 145 | 146 | # only consider upper triangular edges 147 | ixes = np.where(np.triu(np.ones((n, n)), 1)) 148 | 149 | # number of edges 150 | m = np.size(ixes, axis=1) 151 | 152 | # vectorize connectivity matrices for speed 153 | xmat, ymat = np.zeros((m, nx)), np.zeros((m, ny)) 154 | 155 | for i in range(nx): 156 | xmat[:, i] = x[:, :, i][ixes].squeeze() 157 | for i in range(ny): 158 | ymat[:, i] = y[:, :, i][ixes].squeeze() 159 | del x, y 160 | 161 | # perform t-test at each edge 162 | t_stat = np.zeros((m,)) 163 | for i in range(m): 164 | if paired: 165 | t_stat[i] = ttest_paired_stat_only(xmat[i, :], ymat[i, :], tail) 166 | else: 167 | t_stat[i] = ttest2_stat_only(xmat[i, :], ymat[i, :], tail) 168 | 169 | # threshold 170 | ind_t, = np.where(t_stat > thresh) 171 | 172 | if len(ind_t) == 0: 173 | raise BCTParamError("Unsuitable threshold") 174 | 175 | # suprathreshold adjacency matrix 176 | adj = np.zeros((n, n)) 177 | adj[(ixes[0][ind_t], ixes[1][ind_t])] = 1 178 | # adj[ixes][ind_t]=1 179 | adj = adj + adj.T 180 | 181 | a, sz = get_components(adj) 182 | 183 | # convert size from nodes to number of edges 184 | # only consider components comprising more than one node (e.g. a/l 1 edge) 185 | ind_sz, = np.where(sz > 1) 186 | ind_sz += 1 187 | nr_components = np.size(ind_sz) 188 | sz_links = np.zeros((nr_components,)) 189 | for i in range(nr_components): 190 | nodes, = np.where(ind_sz[i] == a) 191 | sz_links[i] = np.sum(adj[np.ix_(nodes, nodes)]) / 2 192 | adj[np.ix_(nodes, nodes)] *= (i + 2) 193 | 194 | # subtract 1 to delete any edges not comprising a component 195 | adj[np.where(adj)] -= 1 196 | 197 | if np.size(sz_links): 198 | max_sz = np.max(sz_links) 199 | else: 200 | # max_sz=0 201 | raise BCTParamError('True matrix is degenerate') 202 | print('max component size is %i' % max_sz) 203 | 204 | # estimate empirical null distribution of maximum component size by 205 | # generating k independent permutations 206 | print('estimating null distribution with %i permutations' % k) 207 | 208 | null = np.zeros((k,)) 209 | hit = 0 210 | for u in range(k): 211 | # randomize 212 | if paired: 213 | indperm = np.sign(0.5 - rng.rand(1, nx)) 214 | d = np.hstack((xmat, ymat)) * np.hstack((indperm, indperm)) 215 | else: 216 | d = np.hstack((xmat, ymat))[:, rng.permutation(nx + ny)] 217 | 218 | t_stat_perm = np.zeros((m,)) 219 | for i in range(m): 220 | if paired: 221 | t_stat_perm[i] = ttest_paired_stat_only( 222 | d[i, :nx], d[i, -nx:], tail) 223 | else: 224 | t_stat_perm[i] = ttest2_stat_only(d[i, :nx], d[i, -ny:], tail) 225 | 226 | ind_t, = np.where(t_stat_perm > thresh) 227 | 228 | adj_perm = np.zeros((n, n)) 229 | adj_perm[(ixes[0][ind_t], ixes[1][ind_t])] = 1 230 | adj_perm = adj_perm + adj_perm.T 231 | 232 | a, sz = get_components(adj_perm) 233 | 234 | ind_sz, = np.where(sz > 1) 235 | ind_sz += 1 236 | nr_components_perm = np.size(ind_sz) 237 | sz_links_perm = np.zeros((nr_components_perm)) 238 | for i in range(nr_components_perm): 239 | nodes, = np.where(ind_sz[i] == a) 240 | sz_links_perm[i] = np.sum(adj_perm[np.ix_(nodes, nodes)]) / 2 241 | 242 | if np.size(sz_links_perm): 243 | null[u] = np.max(sz_links_perm) 244 | else: 245 | null[u] = 0 246 | 247 | # compare to the true dataset 248 | if null[u] >= max_sz: 249 | hit += 1 250 | 251 | if verbose: 252 | print(('permutation %i of %i. Permutation max is %s. Observed max' 253 | ' is %s. P-val estimate is %.3f') % ( 254 | u + 1, k, null[u], max_sz, hit / (u + 1))) 255 | elif (u % (k / 10) == 0 or u == k - 1): 256 | print('permutation %i of %i. p-value so far is %.3f' % (u + 1, k, 257 | hit / (u + 1))) 258 | 259 | pvals = np.zeros((nr_components,)) 260 | print("nr_components: ", nr_components) 261 | # calculate p-vals 262 | for i in range(nr_components): 263 | pvals[i] = np.size(np.where(null >= sz_links[i])) / k 264 | 265 | return pvals, adj, null -------------------------------------------------------------------------------- /bct/nbs_parallel.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | import numpy as np 3 | import multiprocessing 4 | 5 | from .utils import BCTParamError, get_rng 6 | from .algorithms import get_components 7 | from .due import due, BibTeX 8 | from .citations import ZALESKY2010 9 | 10 | @due.dcite(BibTeX(ZALESKY2010), description="Network-based statistic") 11 | 12 | def ttest2_stat_only(x, y, tail): 13 | t = np.mean(x) - np.mean(y) 14 | n1, n2 = len(x), len(y) 15 | s = np.sqrt(((n1 - 1) * np.var(x, ddof=1) + (n2 - 1) 16 | * np.var(y, ddof=1)) / (n1 + n2 - 2)) 17 | denom = s * np.sqrt(1 / n1 + 1 / n2) 18 | if denom == 0: 19 | return 0 20 | if tail == 'both': 21 | return np.abs(t / denom) 22 | if tail == 'left': 23 | return -t / denom 24 | else: 25 | return t / denom 26 | 27 | def ttest_paired_stat_only(A, B, tail): 28 | n = len(A - B) 29 | df = n - 1 30 | sample_ss = np.sum((A - B)**2) - np.sum(A - B)**2 / n 31 | unbiased_std = np.sqrt(sample_ss / (n - 1)) 32 | z = np.mean(A - B) / unbiased_std 33 | t = z * np.sqrt(n) 34 | if tail == 'both': 35 | return np.abs(t) 36 | if tail == 'left': 37 | return -t 38 | else: 39 | return t 40 | 41 | def _permutation(args): 42 | seed, u, xmat, ymat, thresh, tail, paired, m, n, ixes, nx, ny, verbose, null, max_sz, hit, k = args 43 | 44 | if seed is None: 45 | seed = u 46 | rng = get_rng(seed) 47 | if paired: 48 | indperm = np.sign(0.5 - rng.rand(1, nx)) 49 | d = np.hstack((xmat, ymat)) * np.hstack((indperm, indperm)) 50 | else: 51 | d = np.hstack((xmat, ymat))[:, rng.permutation(nx + ny)] 52 | 53 | t_stat_perm = np.zeros((m,)) 54 | for i in range(m): 55 | if paired: 56 | t_stat_perm[i] = ttest_paired_stat_only( 57 | d[i, :nx], d[i, -nx:], tail) 58 | else: 59 | t_stat_perm[i] = ttest2_stat_only(d[i, :nx], d[i, -ny:], tail) 60 | 61 | ind_t, = np.where(t_stat_perm > thresh) 62 | 63 | adj_perm = np.zeros((n, n)) 64 | adj_perm[(ixes[0][ind_t], ixes[1][ind_t])] = 1 65 | adj_perm = adj_perm + adj_perm.T 66 | 67 | a, sz = get_components(adj_perm) 68 | 69 | ind_sz, = np.where(sz > 1) 70 | ind_sz += 1 71 | nr_components_perm = np.size(ind_sz) 72 | sz_links_perm = np.zeros((nr_components_perm)) 73 | for i in range(nr_components_perm): 74 | nodes, = np.where(ind_sz[i] == a) 75 | sz_links_perm[i] = np.sum(adj_perm[np.ix_(nodes, nodes)]) / 2 76 | 77 | if np.size(sz_links_perm): 78 | null[u] = np.max(sz_links_perm) 79 | else: 80 | null[u] = 0 81 | 82 | # compare to the true dataset 83 | if null[u] >= max_sz: 84 | hit += 1 85 | 86 | if verbose: 87 | print(('permutation %i of %i. Permutation max is %s. Observed max is %s.') % 88 | (u + 1, k, null[u], max_sz)) 89 | elif (u % (k / 10) == 0 or u == k - 1): 90 | print('permutation %i of %i.' % (u + 1, k)) 91 | return null 92 | 93 | def nbs_bct(x, y, thresh, k=1000, tail='both', paired=False, verbose=False, seed=None, workers=-1): 94 | 95 | if tail not in ('both', 'left', 'right'): 96 | raise BCTParamError('Tail must be both, left, right') 97 | 98 | ix, jx, nx = x.shape 99 | iy, jy, ny = y.shape 100 | 101 | if not ix == jx == iy == jy: 102 | raise BCTParamError('Population matrices are of inconsistent size') 103 | else: 104 | n = ix 105 | 106 | if paired and nx != ny: 107 | raise BCTParamError('Population matrices must be an equal size') 108 | 109 | # only consider upper triangular edges 110 | ixes = np.where(np.triu(np.ones((n, n)), 1)) 111 | 112 | # number of edges 113 | m = np.size(ixes, axis=1) 114 | 115 | # vectorize connectivity matrices for speed 116 | xmat, ymat = np.zeros((m, nx)), np.zeros((m, ny)) 117 | 118 | for i in range(nx): 119 | xmat[:, i] = x[:, :, i][ixes].squeeze() 120 | for i in range(ny): 121 | ymat[:, i] = y[:, :, i][ixes].squeeze() 122 | del x, y 123 | 124 | # perform t-test at each edge 125 | t_stat = np.zeros((m,)) 126 | for i in range(m): 127 | if paired: 128 | t_stat[i] = ttest_paired_stat_only(xmat[i, :], ymat[i, :], tail) 129 | else: 130 | t_stat[i] = ttest2_stat_only(xmat[i, :], ymat[i, :], tail) 131 | 132 | # threshold 133 | ind_t, = np.where(t_stat > thresh) 134 | 135 | if len(ind_t) == 0: 136 | raise BCTParamError("Unsuitable threshold") 137 | 138 | # suprathreshold adjacency matrix 139 | adj = np.zeros((n, n)) 140 | adj[(ixes[0][ind_t], ixes[1][ind_t])] = 1 141 | # adj[ixes][ind_t]=1 142 | adj = adj + adj.T 143 | 144 | a, sz = get_components(adj) 145 | 146 | # convert size from nodes to number of edges 147 | # only consider components comprising more than one node (e.g. a/l 1 edge) 148 | ind_sz, = np.where(sz > 1) 149 | ind_sz += 1 150 | nr_components = np.size(ind_sz) 151 | sz_links = np.zeros((nr_components,)) 152 | for i in range(nr_components): 153 | nodes, = np.where(ind_sz[i] == a) 154 | sz_links[i] = np.sum(adj[np.ix_(nodes, nodes)]) / 2 155 | adj[np.ix_(nodes, nodes)] *= (i + 2) 156 | 157 | # subtract 1 to delete any edges not comprising a component 158 | adj[np.where(adj)] -= 1 159 | 160 | if np.size(sz_links): 161 | max_sz = np.max(sz_links) 162 | else: 163 | # max_sz=0 164 | raise BCTParamError('True matrix is degenerate') 165 | print('max component size is %i' % max_sz) 166 | 167 | print('Estimating null distribution with %i permutations. P-values will be returned at the end of the test.' % k) 168 | 169 | null = np.zeros((k,)) 170 | hit = 0 171 | if workers == -1: 172 | workers = multiprocessing.cpu_count() 173 | 174 | pool = multiprocessing.Pool(workers) 175 | perm_args = [(seed, u, xmat, ymat, thresh, tail, paired, m, n, ixes, nx, ny, verbose, null, max_sz, hit, k) for u in range(k)] 176 | 177 | # Parallelize permutation 178 | null_dist = pool.map(_permutation, perm_args) 179 | 180 | pool.close() 181 | pool.join() 182 | 183 | null_dist = np.array(null_dist) 184 | null_dist = np.array([max(i) for i in null_dist.T]) 185 | 186 | pvals = np.zeros((nr_components,)) 187 | # calculate p-vals 188 | for i in range(nr_components): 189 | pvals[i] = np.size(np.where(null >= sz_links[i])) / k 190 | 191 | return pvals, adj, null_dist -------------------------------------------------------------------------------- /bct/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .miscellaneous_utilities import * 2 | from .visualization import * 3 | from .other import * 4 | -------------------------------------------------------------------------------- /bct/utils/miscellaneous_utilities.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | import random 3 | import numpy as np 4 | 5 | 6 | class BCTParamError(RuntimeError): 7 | pass 8 | 9 | 10 | def teachers_round(x): 11 | ''' 12 | Do rounding such that .5 always rounds to 1, and not bankers rounding. 13 | This is for compatibility with matlab functions, and ease of testing. 14 | ''' 15 | if ((x > 0) and (x % 1 >= 0.5)) or ((x < 0) and (x % 1 > 0.5)): 16 | return int(np.ceil(x)) 17 | else: 18 | return int(np.floor(x)) 19 | 20 | 21 | def pick_four_unique_nodes_quickly(n, seed=None): 22 | ''' 23 | This is equivalent to np.random.choice(n, 4, replace=False) 24 | 25 | Another fellow suggested np.random.random_sample(n).argpartition(4) which is 26 | clever but still substantially slower. 27 | ''' 28 | rng = get_rng(seed) 29 | k = rng.randint(n**4) 30 | a = k % n 31 | b = k // n % n 32 | c = k // n ** 2 % n 33 | d = k // n ** 3 % n 34 | if (a != b and a != c and a != d and b != c and b != d and c != d): 35 | return (a, b, c, d) 36 | else: 37 | # the probability of finding a wrong configuration is extremely low 38 | # unless for extremely small n. if n is extremely small the 39 | # computational demand is not a problem. 40 | 41 | # In my profiling it only took 0.4 seconds to include the uniqueness 42 | # check in 1 million runs of this function so I think it is OK. 43 | return pick_four_unique_nodes_quickly(n, rng) 44 | 45 | 46 | def cuberoot(x): 47 | ''' 48 | Correctly handle the cube root for negative weights, instead of uselessly 49 | crashing as in python or returning the wrong root as in matlab 50 | ''' 51 | return np.sign(x) * np.abs(x)**(1 / 3) 52 | 53 | 54 | def dummyvar(cis, return_sparse=False): 55 | ''' 56 | This is an efficient implementation of matlab's "dummyvar" command 57 | using sparse matrices. 58 | 59 | input: partitions, NxM array-like containing M partitions of N nodes 60 | into <=N distinct communities 61 | 62 | output: dummyvar, an NxR matrix containing R column variables (indicator 63 | variables) with N entries, where R is the total number of communities 64 | summed across each of the M partitions. 65 | 66 | i.e. 67 | r = sum((max(len(unique(partitions[i]))) for i in range(m))) 68 | ''' 69 | # num_rows is not affected by partition indexes 70 | n = np.size(cis, axis=0) 71 | m = np.size(cis, axis=1) 72 | r = np.sum((np.max(len(np.unique(cis[:, i])))) for i in range(m)) 73 | nnz = np.prod(cis.shape) 74 | 75 | ix = np.argsort(cis, axis=0) 76 | # s_cis=np.sort(cis,axis=0) 77 | # FIXME use the sorted indices to sort by row efficiently 78 | s_cis = cis[ix][:, range(m), range(m)] 79 | 80 | mask = np.hstack((((True,),) * m, (s_cis[:-1, :] != s_cis[1:, :]).T)) 81 | indptr, = np.where(mask.flat) 82 | indptr = np.append(indptr, nnz) 83 | 84 | import scipy.sparse as sp 85 | dv = sp.csc_matrix((np.repeat((1,), nnz), ix.T.flat, indptr), shape=(n, r)) 86 | return dv.toarray() 87 | 88 | 89 | def get_rng(seed=None): 90 | """ 91 | By default, or if `seed` is np.random, return the global RandomState 92 | instance used by np.random. 93 | If `seed` is a RandomState instance, return it unchanged. 94 | Otherwise, use the passed (hashable) argument to seed a new instance 95 | of RandomState and return it. 96 | 97 | Parameters 98 | ---------- 99 | seed : hashable or np.random.RandomState or np.random, optional 100 | 101 | Returns 102 | ------- 103 | np.random.RandomState 104 | """ 105 | if seed is None or seed == np.random: 106 | return np.random.mtrand._rand 107 | elif isinstance(seed, np.random.RandomState): 108 | return seed 109 | try: 110 | rstate = np.random.RandomState(seed) 111 | except ValueError: 112 | rstate = np.random.RandomState(random.Random(seed).randint(0, 2**32-1)) 113 | return rstate 114 | 115 | -------------------------------------------------------------------------------- /bct/utils/other.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | import numpy as np 3 | from .miscellaneous_utilities import BCTParamError 4 | 5 | 6 | def threshold_absolute(W, thr, copy=True): 7 | ''' 8 | This function thresholds the connectivity matrix by absolute weight 9 | magnitude. All weights below the given threshold, and all weights 10 | on the main diagonal (self-self connections) are set to 0. 11 | 12 | If copy is not set, this function will *modify W in place.* 13 | 14 | Parameters 15 | ---------- 16 | W : np.ndarray 17 | weighted connectivity matrix 18 | thr : float 19 | absolute weight threshold 20 | copy : bool 21 | if True, returns a copy of the matrix. Otherwise, modifies the matrix 22 | in place. Default value=True. 23 | 24 | Returns 25 | ------- 26 | W : np.ndarray 27 | thresholded connectivity matrix 28 | ''' 29 | if copy: 30 | W = W.copy() 31 | np.fill_diagonal(W, 0) # clear diagonal 32 | W[W < thr] = 0 # apply threshold 33 | return W 34 | 35 | 36 | def threshold_proportional(W, p, copy=True): 37 | ''' 38 | This function "thresholds" the connectivity matrix by preserving a 39 | proportion p (0> x = np.random.random_sample((10,10)) 66 | >> x_25 = threshold_proportional(x, .25) 67 | >> np.size(np.where(x_25)) #note this double counts each nonzero element 68 | 46 69 | >> x_125 = threshold_proportional(x, .125) 70 | >> np.size(np.where(x_125)) 71 | 22 72 | >> x_test = threshold_proportional(x_25, .5) 73 | >> np.size(np.where(x_test)) 74 | 46 75 | 76 | That is, the 50% thresholding of x_25 does nothing because >=50% of the 77 | elements in x_25 are aleady <=0. This behavior is the same as in BCT. Be 78 | careful with matrices that are both signed and sparse. 79 | ''' 80 | from .miscellaneous_utilities import teachers_round as round 81 | 82 | if p > 1 or p < 0: 83 | raise BCTParamError('Threshold must be in range [0,1]') 84 | if copy: 85 | W = W.copy() 86 | n = len(W) # number of nodes 87 | np.fill_diagonal(W, 0) # clear diagonal 88 | 89 | if np.allclose(W, W.T): # if symmetric matrix 90 | W[np.tril_indices(n)] = 0 # ensure symmetry is preserved 91 | ud = 2 # halve number of removed links 92 | else: 93 | ud = 1 94 | 95 | ind = np.where(W) # find all links 96 | 97 | I = np.argsort(W[ind])[::-1] # sort indices by magnitude 98 | 99 | en = int(round((n * n - n) * p / ud)) # number of links to be preserved 100 | 101 | W[(ind[0][I][en:], ind[1][I][en:])] = 0 # apply threshold 102 | #W[np.ix_(ind[0][I][en:], ind[1][I][en:])]=0 103 | 104 | if ud == 2: # if symmetric matrix 105 | W[:, :] = W + W.T # reconstruct symmetry 106 | 107 | return W 108 | 109 | 110 | def weight_conversion(W, wcm, copy=True): 111 | ''' 112 | W_bin = weight_conversion(W, 'binarize'); 113 | W_nrm = weight_conversion(W, 'normalize'); 114 | L = weight_conversion(W, 'lengths'); 115 | 116 | This function may either binarize an input weighted connection matrix, 117 | normalize an input weighted connection matrix or convert an input 118 | weighted connection matrix to a weighted connection-length matrix. 119 | 120 | Binarization converts all present connection weights to 1. 121 | 122 | Normalization scales all weight magnitudes to the range [0,1] and 123 | should be done prior to computing some weighted measures, such as the 124 | weighted clustering coefficient. 125 | 126 | Conversion of connection weights to connection lengths is needed 127 | prior to computation of weighted distance-based measures, such as 128 | distance and betweenness centrality. In a weighted connection network, 129 | higher weights are naturally interpreted as shorter lengths. The 130 | connection-lengths matrix here is defined as the inverse of the 131 | connection-weights matrix. 132 | 133 | If copy is not set, this function will *modify W in place.* 134 | 135 | Parameters 136 | ---------- 137 | W : NxN np.ndarray 138 | weighted connectivity matrix 139 | wcm : str 140 | weight conversion command. 141 | 'binarize' : binarize weights 142 | 'normalize' : normalize weights 143 | 'lengths' : convert weights to lengths (invert matrix) 144 | copy : bool 145 | if True, returns a copy of the matrix. Otherwise, modifies the matrix 146 | in place. Default value=True. 147 | 148 | Returns 149 | ------- 150 | W : NxN np.ndarray 151 | connectivity matrix with specified changes 152 | 153 | Notes 154 | ----- 155 | This function is included for compatibility with BCT. But there are 156 | other functions binarize(), normalize() and invert() which are simpler to 157 | call directly. 158 | ''' 159 | if wcm == 'binarize': 160 | return binarize(W, copy) 161 | elif wcm == 'normalize': 162 | return normalize(W, copy) 163 | elif wcm == 'lengths': 164 | return invert(W, copy) 165 | else: 166 | raise NotImplementedError('Unknown weight conversion command.') 167 | 168 | 169 | def binarize(W, copy=True): 170 | ''' 171 | Binarizes an input weighted connection matrix. If copy is not set, this 172 | function will *modify W in place.* 173 | 174 | Parameters 175 | ---------- 176 | W : NxN np.ndarray 177 | weighted connectivity matrix 178 | copy : bool 179 | if True, returns a copy of the matrix. Otherwise, modifies the matrix 180 | in place. Default value=True. 181 | 182 | Returns 183 | ------- 184 | W : NxN np.ndarray 185 | binary connectivity matrix 186 | ''' 187 | if copy: 188 | W = W.copy() 189 | W[W != 0] = 1 190 | return W 191 | 192 | 193 | def normalize(W, copy=True): 194 | ''' 195 | Normalizes an input weighted connection matrix. If copy is not set, this 196 | function will *modify W in place.* 197 | 198 | Parameters 199 | ---------- 200 | W : np.ndarray 201 | weighted connectivity matrix 202 | copy : bool 203 | if True, returns a copy of the matrix. Otherwise, modifies the matrix 204 | in place. Default value=True. 205 | 206 | Returns 207 | ------- 208 | W : np.ndarray 209 | normalized connectivity matrix 210 | ''' 211 | if copy: 212 | W = W.copy() 213 | W /= np.max(np.abs(W)) 214 | return W 215 | 216 | 217 | def invert(W, copy=True): 218 | ''' 219 | Inverts elementwise the weights in an input connection matrix. 220 | In other words, change the from the matrix of internode strengths to the 221 | matrix of internode distances. 222 | 223 | If copy is not set, this function will *modify W in place.* 224 | 225 | Parameters 226 | ---------- 227 | W : np.ndarray 228 | weighted connectivity matrix 229 | copy : bool 230 | if True, returns a copy of the matrix. Otherwise, modifies the matrix 231 | in place. Default value=True. 232 | 233 | Returns 234 | ------- 235 | W : np.ndarray 236 | inverted connectivity matrix 237 | ''' 238 | if copy: 239 | W = W.copy() 240 | E = np.where(W) 241 | W[E] = 1. / W[E] 242 | return W 243 | 244 | def logtransform(W, copy=True): 245 | ''' 246 | Makes a log transformation of the weights of an input matrix, such that each 247 | value W[i,j] will be -log(W[i,l]) 248 | 249 | If copy is not set, this function will *modify W in place.* 250 | 251 | Parameters 252 | ---------- 253 | W : np.ndarray 254 | weighted connectivity matrix 255 | copy : bool 256 | if True, returns a copy of the matrix. Otherwise, modifies the matrix 257 | in place. Default value=True 258 | 259 | Returns 260 | ------- 261 | W : np.ndarray 262 | Log transformed connectivity matrix 263 | ''' 264 | if copy: 265 | W = W.copy() 266 | if np.logical_or(W > 1, W <= 0).any(): 267 | raise ValueError("Connection strengths must be in the interval (0,1] " 268 | "to use the transform -log(w_ij)") 269 | W = -np.log(W) 270 | return W 271 | 272 | def autofix(W, copy=True): 273 | ''' 274 | Fix a bunch of common problems. More specifically, remove Inf and NaN, 275 | ensure exact binariness and symmetry (i.e. remove floating point 276 | instability), and zero diagonal. 277 | 278 | 279 | Parameters 280 | ---------- 281 | W : np.ndarray 282 | weighted connectivity matrix 283 | copy : bool 284 | if True, returns a copy of the matrix. Otherwise, modifies the matrix 285 | in place. Default value=True. 286 | 287 | Returns 288 | ------- 289 | W : np.ndarray 290 | connectivity matrix with fixes applied 291 | ''' 292 | if copy: 293 | W = W.copy() 294 | 295 | # zero diagonal 296 | np.fill_diagonal(W, 0) 297 | 298 | # remove np.inf and np.nan 299 | W[np.where(np.isinf(W))] = 0 300 | W[np.where(np.isnan(W))] = 0 301 | 302 | # ensure exact binarity 303 | u = np.unique(W) 304 | if np.all(np.logical_or(np.abs(u) < 1e-8, np.abs(u - 1) < 1e-8)): 305 | W = np.around(W, decimals=5) 306 | 307 | # ensure exact symmetry 308 | if np.allclose(W, W.T): 309 | W = np.around(W, decimals=5) 310 | 311 | return W 312 | -------------------------------------------------------------------------------- /bct/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.6.0' 2 | __version_info__ = tuple(int(i) for i in __version__.split('.')) 3 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/bct.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/bct.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/bct" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/bct" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/_build/doctrees/_templates/function.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/doctrees/_templates/function.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/bct.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/doctrees/bct.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/_build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/doctrees/index.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/modules.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/doctrees/modules.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/stupid.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/doctrees/stupid.doctree -------------------------------------------------------------------------------- /docs/_build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 1dc8f73e7172509b9346ee792891040b 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/_templates/function.txt: -------------------------------------------------------------------------------- 1 | {{ fullname }} 2 | {{ underline }} 3 | 4 | .. currentmodule:: {{ module }} 5 | 6 | .. autofunction:: {{ objname }} 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/bct.txt: -------------------------------------------------------------------------------- 1 | Brain Connectivity Toolbox 2 | ========================== 3 | 4 | .. currentmodule:: bct 5 | 6 | Centrality 7 | ---------- 8 | 9 | .. autofunction:: betweenness_bin 10 | .. autofunction:: betweenness_wei 11 | .. autofunction:: diversity_coef_sign 12 | .. autofunction:: edge_betweenness_bin 13 | .. autofunction:: edge_betweenness_wei 14 | .. autofunction:: eigenvector_centrality_und 15 | .. autofunction:: flow_coef_bd 16 | .. autofunction:: kcoreness_centrality_bd 17 | .. autofunction:: kcoreness_centrality_bu 18 | .. autofunction:: module_degree_zscore 19 | .. autofunction:: pagerank_centrality 20 | .. autofunction:: participation_coef 21 | .. autofunction:: participation_coef_sign 22 | .. autofunction:: subgraph_centrality 23 | 24 | Clustering 25 | ---------- 26 | 27 | .. autofunction:: agreement 28 | .. autofunction:: agreement_weighted 29 | .. autofunction:: clustering_coef_bd 30 | .. autofunction:: clustering_coef_bu 31 | .. autofunction:: clustering_coef_wd 32 | .. autofunction:: clustering_coef_wu 33 | .. autofunction:: consensus_und 34 | .. autofunction:: get_components 35 | .. autofunction:: transitivity_bd 36 | .. autofunction:: transitivity_bu 37 | .. autofunction:: transitivity_wd 38 | .. autofunction:: transitivity_wu 39 | 40 | Core 41 | ---- 42 | 43 | .. autofunction:: assortativity_bin 44 | .. autofunction:: assortativity_wei 45 | .. autofunction:: kcore_bd 46 | .. autofunction:: kcore_bu 47 | .. autofunction:: rich_club_bd 48 | .. autofunction:: rich_club_bu 49 | .. autofunction:: rich_club_wd 50 | .. autofunction:: rich_club_wu 51 | .. autofunction:: score_wu 52 | 53 | Degree 54 | ------ 55 | 56 | .. autofunction:: degrees_dir 57 | .. autofunction:: degrees_und 58 | .. autofunction:: jdegree 59 | .. autofunction:: strengths_dir 60 | .. autofunction:: strengths_und 61 | .. autofunction:: strengths_und_sign 62 | 63 | Distance 64 | -------- 65 | 66 | .. autofunction:: breadthdist 67 | .. autofunction:: breadth 68 | .. autofunction:: charpath 69 | .. autofunction:: cycprob 70 | .. autofunction:: distance_bin 71 | .. autofunction:: distance_wei 72 | .. autofunction:: efficiency_bin 73 | .. autofunction:: efficiency_wei 74 | .. autofunction:: findpaths 75 | .. autofunction:: findwalks 76 | .. autofunction:: reachdist 77 | 78 | Modularity 79 | ---------- 80 | 81 | .. autofunction:: ci2ls 82 | .. autofunction:: ls2ci 83 | .. autofunction:: community_louvain 84 | .. autofunction:: link_communities 85 | .. autofunction:: modularity_dir 86 | .. autofunction:: modularity_und 87 | .. autofunction:: modularity_und_sig 88 | .. autofunction:: modularity_finetune_und 89 | .. autofunction:: modularity_finetune_und_sign 90 | .. autofunction:: modularity_louvain_dir 91 | .. autofunction:: modularity_louvain_und 92 | .. autofunction:: modularity_louvain_und_sign 93 | .. autofunction:: modularity_probtune_und_sign 94 | .. autofunction:: partition_distance 95 | 96 | Motif 97 | ----- 98 | 99 | .. autofunction:: find_motif34 100 | .. autofunction:: motif3funct_bin 101 | .. autofunction:: motif3funct_wei 102 | .. autofunction:: motif3struct_bin 103 | .. autofunction:: motif3struct_wei 104 | .. autofunction:: motif4funct_bin 105 | .. autofunction:: motif4funct_wei 106 | .. autofunction:: motif4struct_bin 107 | .. autofunction:: motif4struct_wei 108 | 109 | Miscellaneous 110 | ------------- 111 | 112 | .. autofunction:: threshold_absolute 113 | .. autofunction:: threshold_proportional 114 | .. autofunction:: weight_conversion 115 | .. autofunction:: binarize 116 | .. autofunction:: normalize 117 | .. autofunction:: invert 118 | .. autofunction:: autofix 119 | 120 | Physical Connectivity 121 | --------------------- 122 | 123 | .. autofunction:: density_dir 124 | .. autofunction:: density_und 125 | .. autofunction:: rentian_scaling 126 | 127 | Reference 128 | --------- 129 | .. autofunction:: latmio_dir_connected 130 | .. autofunction:: latmio_dir 131 | .. autofunction:: latmio_und_connected 132 | .. autofunction:: latmio_und 133 | .. autofunction:: makeevenCIJ 134 | .. autofunction:: makefractalCIJ 135 | .. autofunction:: makerandCIJdegreesfixed 136 | .. autofunction:: makerandCIJ_dir 137 | .. autofunction:: makerandCIJ_und 138 | .. autofunction:: makeringlatticeCIJ 139 | .. autofunction:: maketoeplitzCIJ 140 | .. autofunction:: null_model_dir_sign 141 | .. autofunction:: null_model_und_sign 142 | .. autofunction:: randmio_dir_connected 143 | .. autofunction:: randmio_dir 144 | .. autofunction:: randmio_und_connected 145 | .. autofunction:: randmio_und 146 | .. autofunction:: randmio_und_signed 147 | .. autofunction:: randomize_graph_partial_und 148 | .. autofunction:: randomizer_bin_und 149 | 150 | Similarity 151 | ---------- 152 | 153 | .. autofunction:: edge_nei_overlap_bd 154 | .. autofunction:: edge_nei_overlap_bu 155 | .. autofunction:: gtom 156 | .. autofunction:: matching_ind 157 | .. autofunction:: matching_ind_und 158 | .. autofunction:: dice_pairwise_und 159 | .. autofunction:: corr_flat_und 160 | .. autofunction:: corr_flat_dir 161 | 162 | Visualization 163 | ------------- 164 | 165 | .. autofunction:: adjacency_plot_und 166 | .. autofunction:: align_matrices 167 | .. autofunction:: backbone_wu 168 | .. autofunction:: grid_communities 169 | .. autofunction:: reorderMAT 170 | .. autofunction:: reorder_matrix 171 | .. autofunction:: reorder_mod 172 | .. autofunction:: writetoPAJ 173 | 174 | Network Based Statistic 175 | ======================= 176 | 177 | .. currentmodule:: nbs 178 | 179 | .. autofunction:: nbs_bct 180 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/index.txt: -------------------------------------------------------------------------------- 1 | .. bct documentation master file, created by 2 | sphinx-quickstart on Tue Jul 21 11:30:20 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to bct's documentation! 7 | =============================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | 23 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/modules.txt: -------------------------------------------------------------------------------- 1 | bct 2 | === 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | bct 8 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/stupid.txt: -------------------------------------------------------------------------------- 1 | bct package 2 | =========== 3 | 4 | Submodules 5 | ---------- 6 | 7 | bct.bct module 8 | -------------- 9 | 10 | .. automodule:: bct.bct 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | bct.nbs module 16 | -------------- 17 | 18 | .. automodule:: bct.nbs 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: bct 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/_build/html/_static/ajax-loader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/_static/ajax-loader.gif -------------------------------------------------------------------------------- /docs/_build/html/_static/basic.css: -------------------------------------------------------------------------------- 1 | /* 2 | * basic.css 3 | * ~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- basic theme. 6 | * 7 | * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /* -- main layout ----------------------------------------------------------- */ 13 | 14 | div.clearer { 15 | clear: both; 16 | } 17 | 18 | /* -- relbar ---------------------------------------------------------------- */ 19 | 20 | div.related { 21 | width: 100%; 22 | font-size: 90%; 23 | } 24 | 25 | div.related h3 { 26 | display: none; 27 | } 28 | 29 | div.related ul { 30 | margin: 0; 31 | padding: 0 0 0 10px; 32 | list-style: none; 33 | } 34 | 35 | div.related li { 36 | display: inline; 37 | } 38 | 39 | div.related li.right { 40 | float: right; 41 | margin-right: 5px; 42 | } 43 | 44 | /* -- sidebar --------------------------------------------------------------- */ 45 | 46 | div.sphinxsidebarwrapper { 47 | padding: 10px 5px 0 10px; 48 | } 49 | 50 | div.sphinxsidebar { 51 | float: left; 52 | width: 230px; 53 | margin-left: -100%; 54 | font-size: 90%; 55 | } 56 | 57 | div.sphinxsidebar ul { 58 | list-style: none; 59 | } 60 | 61 | div.sphinxsidebar ul ul, 62 | div.sphinxsidebar ul.want-points { 63 | margin-left: 20px; 64 | list-style: square; 65 | } 66 | 67 | div.sphinxsidebar ul ul { 68 | margin-top: 0; 69 | margin-bottom: 0; 70 | } 71 | 72 | div.sphinxsidebar form { 73 | margin-top: 10px; 74 | } 75 | 76 | div.sphinxsidebar input { 77 | border: 1px solid #98dbcc; 78 | font-family: sans-serif; 79 | font-size: 1em; 80 | } 81 | 82 | div.sphinxsidebar #searchbox input[type="text"] { 83 | width: 170px; 84 | } 85 | 86 | div.sphinxsidebar #searchbox input[type="submit"] { 87 | width: 30px; 88 | } 89 | 90 | img { 91 | border: 0; 92 | max-width: 100%; 93 | } 94 | 95 | /* -- search page ----------------------------------------------------------- */ 96 | 97 | ul.search { 98 | margin: 10px 0 0 20px; 99 | padding: 0; 100 | } 101 | 102 | ul.search li { 103 | padding: 5px 0 5px 20px; 104 | background-image: url(file.png); 105 | background-repeat: no-repeat; 106 | background-position: 0 7px; 107 | } 108 | 109 | ul.search li a { 110 | font-weight: bold; 111 | } 112 | 113 | ul.search li div.context { 114 | color: #888; 115 | margin: 2px 0 0 30px; 116 | text-align: left; 117 | } 118 | 119 | ul.keywordmatches li.goodmatch a { 120 | font-weight: bold; 121 | } 122 | 123 | /* -- index page ------------------------------------------------------------ */ 124 | 125 | table.contentstable { 126 | width: 90%; 127 | } 128 | 129 | table.contentstable p.biglink { 130 | line-height: 150%; 131 | } 132 | 133 | a.biglink { 134 | font-size: 1.3em; 135 | } 136 | 137 | span.linkdescr { 138 | font-style: italic; 139 | padding-top: 5px; 140 | font-size: 90%; 141 | } 142 | 143 | /* -- general index --------------------------------------------------------- */ 144 | 145 | table.indextable { 146 | width: 100%; 147 | } 148 | 149 | table.indextable td { 150 | text-align: left; 151 | vertical-align: top; 152 | } 153 | 154 | table.indextable dl, table.indextable dd { 155 | margin-top: 0; 156 | margin-bottom: 0; 157 | } 158 | 159 | table.indextable tr.pcap { 160 | height: 10px; 161 | } 162 | 163 | table.indextable tr.cap { 164 | margin-top: 10px; 165 | background-color: #f2f2f2; 166 | } 167 | 168 | img.toggler { 169 | margin-right: 3px; 170 | margin-top: 3px; 171 | cursor: pointer; 172 | } 173 | 174 | div.modindex-jumpbox { 175 | border-top: 1px solid #ddd; 176 | border-bottom: 1px solid #ddd; 177 | margin: 1em 0 1em 0; 178 | padding: 0.4em; 179 | } 180 | 181 | div.genindex-jumpbox { 182 | border-top: 1px solid #ddd; 183 | border-bottom: 1px solid #ddd; 184 | margin: 1em 0 1em 0; 185 | padding: 0.4em; 186 | } 187 | 188 | /* -- general body styles --------------------------------------------------- */ 189 | 190 | a.headerlink { 191 | visibility: hidden; 192 | } 193 | 194 | h1:hover > a.headerlink, 195 | h2:hover > a.headerlink, 196 | h3:hover > a.headerlink, 197 | h4:hover > a.headerlink, 198 | h5:hover > a.headerlink, 199 | h6:hover > a.headerlink, 200 | dt:hover > a.headerlink { 201 | visibility: visible; 202 | } 203 | 204 | div.body p.caption { 205 | text-align: inherit; 206 | } 207 | 208 | div.body td { 209 | text-align: left; 210 | } 211 | 212 | .field-list ul { 213 | padding-left: 1em; 214 | } 215 | 216 | .first { 217 | margin-top: 0 !important; 218 | } 219 | 220 | p.rubric { 221 | margin-top: 30px; 222 | font-weight: bold; 223 | } 224 | 225 | img.align-left, .figure.align-left, object.align-left { 226 | clear: left; 227 | float: left; 228 | margin-right: 1em; 229 | } 230 | 231 | img.align-right, .figure.align-right, object.align-right { 232 | clear: right; 233 | float: right; 234 | margin-left: 1em; 235 | } 236 | 237 | img.align-center, .figure.align-center, object.align-center { 238 | display: block; 239 | margin-left: auto; 240 | margin-right: auto; 241 | } 242 | 243 | .align-left { 244 | text-align: left; 245 | } 246 | 247 | .align-center { 248 | text-align: center; 249 | } 250 | 251 | .align-right { 252 | text-align: right; 253 | } 254 | 255 | /* -- sidebars -------------------------------------------------------------- */ 256 | 257 | div.sidebar { 258 | margin: 0 0 0.5em 1em; 259 | border: 1px solid #ddb; 260 | padding: 7px 7px 0 7px; 261 | background-color: #ffe; 262 | width: 40%; 263 | float: right; 264 | } 265 | 266 | p.sidebar-title { 267 | font-weight: bold; 268 | } 269 | 270 | /* -- topics ---------------------------------------------------------------- */ 271 | 272 | div.topic { 273 | border: 1px solid #ccc; 274 | padding: 7px 7px 0 7px; 275 | margin: 10px 0 10px 0; 276 | } 277 | 278 | p.topic-title { 279 | font-size: 1.1em; 280 | font-weight: bold; 281 | margin-top: 10px; 282 | } 283 | 284 | /* -- admonitions ----------------------------------------------------------- */ 285 | 286 | div.admonition { 287 | margin-top: 10px; 288 | margin-bottom: 10px; 289 | padding: 7px; 290 | } 291 | 292 | div.admonition dt { 293 | font-weight: bold; 294 | } 295 | 296 | div.admonition dl { 297 | margin-bottom: 0; 298 | } 299 | 300 | p.admonition-title { 301 | margin: 0px 10px 5px 0px; 302 | font-weight: bold; 303 | } 304 | 305 | div.body p.centered { 306 | text-align: center; 307 | margin-top: 25px; 308 | } 309 | 310 | /* -- tables ---------------------------------------------------------------- */ 311 | 312 | table.docutils { 313 | border: 0; 314 | border-collapse: collapse; 315 | } 316 | 317 | table.docutils td, table.docutils th { 318 | padding: 1px 8px 1px 5px; 319 | border-top: 0; 320 | border-left: 0; 321 | border-right: 0; 322 | border-bottom: 1px solid #aaa; 323 | } 324 | 325 | table.field-list td, table.field-list th { 326 | border: 0 !important; 327 | } 328 | 329 | table.footnote td, table.footnote th { 330 | border: 0 !important; 331 | } 332 | 333 | th { 334 | text-align: left; 335 | padding-right: 5px; 336 | } 337 | 338 | table.citation { 339 | border-left: solid 1px gray; 340 | margin-left: 1px; 341 | } 342 | 343 | table.citation td { 344 | border-bottom: none; 345 | } 346 | 347 | /* -- other body styles ----------------------------------------------------- */ 348 | 349 | ol.arabic { 350 | list-style: decimal; 351 | } 352 | 353 | ol.loweralpha { 354 | list-style: lower-alpha; 355 | } 356 | 357 | ol.upperalpha { 358 | list-style: upper-alpha; 359 | } 360 | 361 | ol.lowerroman { 362 | list-style: lower-roman; 363 | } 364 | 365 | ol.upperroman { 366 | list-style: upper-roman; 367 | } 368 | 369 | dl { 370 | margin-bottom: 15px; 371 | } 372 | 373 | dd p { 374 | margin-top: 0px; 375 | } 376 | 377 | dd ul, dd table { 378 | margin-bottom: 10px; 379 | } 380 | 381 | dd { 382 | margin-top: 3px; 383 | margin-bottom: 10px; 384 | margin-left: 30px; 385 | } 386 | 387 | dt:target, .highlighted { 388 | background-color: #fbe54e; 389 | } 390 | 391 | dl.glossary dt { 392 | font-weight: bold; 393 | font-size: 1.1em; 394 | } 395 | 396 | .field-list ul { 397 | margin: 0; 398 | padding-left: 1em; 399 | } 400 | 401 | .field-list p { 402 | margin: 0; 403 | } 404 | 405 | .optional { 406 | font-size: 1.3em; 407 | } 408 | 409 | .versionmodified { 410 | font-style: italic; 411 | } 412 | 413 | .system-message { 414 | background-color: #fda; 415 | padding: 5px; 416 | border: 3px solid red; 417 | } 418 | 419 | .footnote:target { 420 | background-color: #ffa; 421 | } 422 | 423 | .line-block { 424 | display: block; 425 | margin-top: 1em; 426 | margin-bottom: 1em; 427 | } 428 | 429 | .line-block .line-block { 430 | margin-top: 0; 431 | margin-bottom: 0; 432 | margin-left: 1.5em; 433 | } 434 | 435 | .guilabel, .menuselection { 436 | font-family: sans-serif; 437 | } 438 | 439 | .accelerator { 440 | text-decoration: underline; 441 | } 442 | 443 | .classifier { 444 | font-style: oblique; 445 | } 446 | 447 | abbr, acronym { 448 | border-bottom: dotted 1px; 449 | cursor: help; 450 | } 451 | 452 | /* -- code displays --------------------------------------------------------- */ 453 | 454 | pre { 455 | overflow: auto; 456 | overflow-y: hidden; /* fixes display issues on Chrome browsers */ 457 | } 458 | 459 | td.linenos pre { 460 | padding: 5px 0px; 461 | border: 0; 462 | background-color: transparent; 463 | color: #aaa; 464 | } 465 | 466 | table.highlighttable { 467 | margin-left: 0.5em; 468 | } 469 | 470 | table.highlighttable td { 471 | padding: 0 0.5em 0 0.5em; 472 | } 473 | 474 | tt.descname { 475 | background-color: transparent; 476 | font-weight: bold; 477 | font-size: 1.2em; 478 | } 479 | 480 | tt.descclassname { 481 | background-color: transparent; 482 | } 483 | 484 | tt.xref, a tt { 485 | background-color: transparent; 486 | font-weight: bold; 487 | } 488 | 489 | h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { 490 | background-color: transparent; 491 | } 492 | 493 | .viewcode-link { 494 | float: right; 495 | } 496 | 497 | .viewcode-back { 498 | float: right; 499 | font-family: sans-serif; 500 | } 501 | 502 | div.viewcode-block:target { 503 | margin: -1px -10px; 504 | padding: 0 10px; 505 | } 506 | 507 | /* -- math display ---------------------------------------------------------- */ 508 | 509 | img.math { 510 | vertical-align: middle; 511 | } 512 | 513 | div.body div.math p { 514 | text-align: center; 515 | } 516 | 517 | span.eqno { 518 | float: right; 519 | } 520 | 521 | /* -- printout stylesheet --------------------------------------------------- */ 522 | 523 | @media print { 524 | div.document, 525 | div.documentwrapper, 526 | div.bodywrapper { 527 | margin: 0 !important; 528 | width: 100%; 529 | } 530 | 531 | div.sphinxsidebar, 532 | div.related, 533 | div.footer, 534 | #top-link { 535 | display: none; 536 | } 537 | } -------------------------------------------------------------------------------- /docs/_build/html/_static/comment-bright.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/_static/comment-bright.png -------------------------------------------------------------------------------- /docs/_build/html/_static/comment-close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/_static/comment-close.png -------------------------------------------------------------------------------- /docs/_build/html/_static/comment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/_static/comment.png -------------------------------------------------------------------------------- /docs/_build/html/_static/default.css: -------------------------------------------------------------------------------- 1 | /* 2 | * default.css_t 3 | * ~~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- default theme. 6 | * 7 | * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | @import url("basic.css"); 13 | 14 | /* -- page layout ----------------------------------------------------------- */ 15 | 16 | body { 17 | font-family: sans-serif; 18 | font-size: 100%; 19 | background-color: #11303d; 20 | color: #000; 21 | margin: 0; 22 | padding: 0; 23 | } 24 | 25 | div.document { 26 | background-color: #1c4e63; 27 | } 28 | 29 | div.documentwrapper { 30 | float: left; 31 | width: 100%; 32 | } 33 | 34 | div.bodywrapper { 35 | margin: 0 0 0 230px; 36 | } 37 | 38 | div.body { 39 | background-color: #ffffff; 40 | color: #000000; 41 | padding: 0 20px 30px 20px; 42 | } 43 | 44 | div.footer { 45 | color: #ffffff; 46 | width: 100%; 47 | padding: 9px 0 9px 0; 48 | text-align: center; 49 | font-size: 75%; 50 | } 51 | 52 | div.footer a { 53 | color: #ffffff; 54 | text-decoration: underline; 55 | } 56 | 57 | div.related { 58 | background-color: #133f52; 59 | line-height: 30px; 60 | color: #ffffff; 61 | } 62 | 63 | div.related a { 64 | color: #ffffff; 65 | } 66 | 67 | div.sphinxsidebar { 68 | } 69 | 70 | div.sphinxsidebar h3 { 71 | font-family: 'Trebuchet MS', sans-serif; 72 | color: #ffffff; 73 | font-size: 1.4em; 74 | font-weight: normal; 75 | margin: 0; 76 | padding: 0; 77 | } 78 | 79 | div.sphinxsidebar h3 a { 80 | color: #ffffff; 81 | } 82 | 83 | div.sphinxsidebar h4 { 84 | font-family: 'Trebuchet MS', sans-serif; 85 | color: #ffffff; 86 | font-size: 1.3em; 87 | font-weight: normal; 88 | margin: 5px 0 0 0; 89 | padding: 0; 90 | } 91 | 92 | div.sphinxsidebar p { 93 | color: #ffffff; 94 | } 95 | 96 | div.sphinxsidebar p.topless { 97 | margin: 5px 10px 10px 10px; 98 | } 99 | 100 | div.sphinxsidebar ul { 101 | margin: 10px; 102 | padding: 0; 103 | color: #ffffff; 104 | } 105 | 106 | div.sphinxsidebar a { 107 | color: #98dbcc; 108 | } 109 | 110 | div.sphinxsidebar input { 111 | border: 1px solid #98dbcc; 112 | font-family: sans-serif; 113 | font-size: 1em; 114 | } 115 | 116 | 117 | 118 | /* -- hyperlink styles ------------------------------------------------------ */ 119 | 120 | a { 121 | color: #355f7c; 122 | text-decoration: none; 123 | } 124 | 125 | a:visited { 126 | color: #355f7c; 127 | text-decoration: none; 128 | } 129 | 130 | a:hover { 131 | text-decoration: underline; 132 | } 133 | 134 | 135 | 136 | /* -- body styles ----------------------------------------------------------- */ 137 | 138 | div.body h1, 139 | div.body h2, 140 | div.body h3, 141 | div.body h4, 142 | div.body h5, 143 | div.body h6 { 144 | font-family: 'Trebuchet MS', sans-serif; 145 | background-color: #f2f2f2; 146 | font-weight: normal; 147 | color: #20435c; 148 | border-bottom: 1px solid #ccc; 149 | margin: 20px -20px 10px -20px; 150 | padding: 3px 0 3px 10px; 151 | } 152 | 153 | div.body h1 { margin-top: 0; font-size: 200%; } 154 | div.body h2 { font-size: 160%; } 155 | div.body h3 { font-size: 140%; } 156 | div.body h4 { font-size: 120%; } 157 | div.body h5 { font-size: 110%; } 158 | div.body h6 { font-size: 100%; } 159 | 160 | a.headerlink { 161 | color: #c60f0f; 162 | font-size: 0.8em; 163 | padding: 0 4px 0 4px; 164 | text-decoration: none; 165 | } 166 | 167 | a.headerlink:hover { 168 | background-color: #c60f0f; 169 | color: white; 170 | } 171 | 172 | div.body p, div.body dd, div.body li { 173 | text-align: justify; 174 | line-height: 130%; 175 | } 176 | 177 | div.admonition p.admonition-title + p { 178 | display: inline; 179 | } 180 | 181 | div.admonition p { 182 | margin-bottom: 5px; 183 | } 184 | 185 | div.admonition pre { 186 | margin-bottom: 5px; 187 | } 188 | 189 | div.admonition ul, div.admonition ol { 190 | margin-bottom: 5px; 191 | } 192 | 193 | div.note { 194 | background-color: #eee; 195 | border: 1px solid #ccc; 196 | } 197 | 198 | div.seealso { 199 | background-color: #ffc; 200 | border: 1px solid #ff6; 201 | } 202 | 203 | div.topic { 204 | background-color: #eee; 205 | } 206 | 207 | div.warning { 208 | background-color: #ffe4e4; 209 | border: 1px solid #f66; 210 | } 211 | 212 | p.admonition-title { 213 | display: inline; 214 | } 215 | 216 | p.admonition-title:after { 217 | content: ":"; 218 | } 219 | 220 | pre { 221 | padding: 5px; 222 | background-color: #eeffcc; 223 | color: #333333; 224 | line-height: 120%; 225 | border: 1px solid #ac9; 226 | border-left: none; 227 | border-right: none; 228 | } 229 | 230 | tt { 231 | background-color: #ecf0f3; 232 | padding: 0 1px 0 1px; 233 | font-size: 0.95em; 234 | } 235 | 236 | th { 237 | background-color: #ede; 238 | } 239 | 240 | .warning tt { 241 | background: #efc2c2; 242 | } 243 | 244 | .note tt { 245 | background: #d6d6d6; 246 | } 247 | 248 | .viewcode-back { 249 | font-family: sans-serif; 250 | } 251 | 252 | div.viewcode-block:target { 253 | background-color: #f4debf; 254 | border-top: 1px solid #ac9; 255 | border-bottom: 1px solid #ac9; 256 | } -------------------------------------------------------------------------------- /docs/_build/html/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Sphinx JavaScript utilities for all documentation. 6 | * 7 | * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | /** 18 | * make the code below compatible with browsers without 19 | * an installed firebug like debugger 20 | if (!window.console || !console.firebug) { 21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir", 22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", 23 | "profile", "profileEnd"]; 24 | window.console = {}; 25 | for (var i = 0; i < names.length; ++i) 26 | window.console[names[i]] = function() {}; 27 | } 28 | */ 29 | 30 | /** 31 | * small helper function to urldecode strings 32 | */ 33 | jQuery.urldecode = function(x) { 34 | return decodeURIComponent(x).replace(/\+/g, ' '); 35 | }; 36 | 37 | /** 38 | * small helper function to urlencode strings 39 | */ 40 | jQuery.urlencode = encodeURIComponent; 41 | 42 | /** 43 | * This function returns the parsed url parameters of the 44 | * current request. Multiple values per key are supported, 45 | * it will always return arrays of strings for the value parts. 46 | */ 47 | jQuery.getQueryParameters = function(s) { 48 | if (typeof s == 'undefined') 49 | s = document.location.search; 50 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 51 | var result = {}; 52 | for (var i = 0; i < parts.length; i++) { 53 | var tmp = parts[i].split('=', 2); 54 | var key = jQuery.urldecode(tmp[0]); 55 | var value = jQuery.urldecode(tmp[1]); 56 | if (key in result) 57 | result[key].push(value); 58 | else 59 | result[key] = [value]; 60 | } 61 | return result; 62 | }; 63 | 64 | /** 65 | * highlight a given string on a jquery object by wrapping it in 66 | * span elements with the given class name. 67 | */ 68 | jQuery.fn.highlightText = function(text, className) { 69 | function highlight(node) { 70 | if (node.nodeType == 3) { 71 | var val = node.nodeValue; 72 | var pos = val.toLowerCase().indexOf(text); 73 | if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { 74 | var span = document.createElement("span"); 75 | span.className = className; 76 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 77 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 78 | document.createTextNode(val.substr(pos + text.length)), 79 | node.nextSibling)); 80 | node.nodeValue = val.substr(0, pos); 81 | } 82 | } 83 | else if (!jQuery(node).is("button, select, textarea")) { 84 | jQuery.each(node.childNodes, function() { 85 | highlight(this); 86 | }); 87 | } 88 | } 89 | return this.each(function() { 90 | highlight(this); 91 | }); 92 | }; 93 | 94 | /** 95 | * Small JavaScript module for the documentation. 96 | */ 97 | var Documentation = { 98 | 99 | init : function() { 100 | this.fixFirefoxAnchorBug(); 101 | this.highlightSearchWords(); 102 | this.initIndexTable(); 103 | }, 104 | 105 | /** 106 | * i18n support 107 | */ 108 | TRANSLATIONS : {}, 109 | PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, 110 | LOCALE : 'unknown', 111 | 112 | // gettext and ngettext don't access this so that the functions 113 | // can safely bound to a different name (_ = Documentation.gettext) 114 | gettext : function(string) { 115 | var translated = Documentation.TRANSLATIONS[string]; 116 | if (typeof translated == 'undefined') 117 | return string; 118 | return (typeof translated == 'string') ? translated : translated[0]; 119 | }, 120 | 121 | ngettext : function(singular, plural, n) { 122 | var translated = Documentation.TRANSLATIONS[singular]; 123 | if (typeof translated == 'undefined') 124 | return (n == 1) ? singular : plural; 125 | return translated[Documentation.PLURALEXPR(n)]; 126 | }, 127 | 128 | addTranslations : function(catalog) { 129 | for (var key in catalog.messages) 130 | this.TRANSLATIONS[key] = catalog.messages[key]; 131 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); 132 | this.LOCALE = catalog.locale; 133 | }, 134 | 135 | /** 136 | * add context elements like header anchor links 137 | */ 138 | addContextElements : function() { 139 | $('div[id] > :header:first').each(function() { 140 | $('\u00B6'). 141 | attr('href', '#' + this.id). 142 | attr('title', _('Permalink to this headline')). 143 | appendTo(this); 144 | }); 145 | $('dt[id]').each(function() { 146 | $('\u00B6'). 147 | attr('href', '#' + this.id). 148 | attr('title', _('Permalink to this definition')). 149 | appendTo(this); 150 | }); 151 | }, 152 | 153 | /** 154 | * workaround a firefox stupidity 155 | */ 156 | fixFirefoxAnchorBug : function() { 157 | if (document.location.hash && $.browser.mozilla) 158 | window.setTimeout(function() { 159 | document.location.href += ''; 160 | }, 10); 161 | }, 162 | 163 | /** 164 | * highlight the search words provided in the url in the text 165 | */ 166 | highlightSearchWords : function() { 167 | var params = $.getQueryParameters(); 168 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; 169 | if (terms.length) { 170 | var body = $('div.body'); 171 | if (!body.length) { 172 | body = $('body'); 173 | } 174 | window.setTimeout(function() { 175 | $.each(terms, function() { 176 | body.highlightText(this.toLowerCase(), 'highlighted'); 177 | }); 178 | }, 10); 179 | $('') 181 | .appendTo($('#searchbox')); 182 | } 183 | }, 184 | 185 | /** 186 | * init the domain index toggle buttons 187 | */ 188 | initIndexTable : function() { 189 | var togglers = $('img.toggler').click(function() { 190 | var src = $(this).attr('src'); 191 | var idnum = $(this).attr('id').substr(7); 192 | $('tr.cg-' + idnum).toggle(); 193 | if (src.substr(-9) == 'minus.png') 194 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); 195 | else 196 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); 197 | }).css('display', ''); 198 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { 199 | togglers.click(); 200 | } 201 | }, 202 | 203 | /** 204 | * helper function to hide the search marks again 205 | */ 206 | hideSearchWords : function() { 207 | $('#searchbox .highlight-link').fadeOut(300); 208 | $('span.highlighted').removeClass('highlighted'); 209 | }, 210 | 211 | /** 212 | * make the url absolute 213 | */ 214 | makeURL : function(relativeURL) { 215 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; 216 | }, 217 | 218 | /** 219 | * get the current relative url 220 | */ 221 | getCurrentURL : function() { 222 | var path = document.location.pathname; 223 | var parts = path.split(/\//); 224 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { 225 | if (this == '..') 226 | parts.pop(); 227 | }); 228 | var url = parts.join('/'); 229 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1); 230 | } 231 | }; 232 | 233 | // quick alias for translations 234 | _ = Documentation.gettext; 235 | 236 | $(document).ready(function() { 237 | Documentation.init(); 238 | }); 239 | -------------------------------------------------------------------------------- /docs/_build/html/_static/down-pressed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/_static/down-pressed.png -------------------------------------------------------------------------------- /docs/_build/html/_static/down.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/_static/down.png -------------------------------------------------------------------------------- /docs/_build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/_static/file.png -------------------------------------------------------------------------------- /docs/_build/html/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/_static/minus.png -------------------------------------------------------------------------------- /docs/_build/html/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/_static/plus.png -------------------------------------------------------------------------------- /docs/_build/html/_static/pygments.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #eeffcc; } 3 | .highlight .c { color: #408090; font-style: italic } /* Comment */ 4 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */ 6 | .highlight .o { color: #666666 } /* Operator */ 7 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ 8 | .highlight .cp { color: #007020 } /* Comment.Preproc */ 9 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ 10 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ 11 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 12 | .highlight .ge { font-style: italic } /* Generic.Emph */ 13 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 14 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 15 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 16 | .highlight .go { color: #333333 } /* Generic.Output */ 17 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ 18 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 19 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 20 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 21 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ 22 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ 23 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ 24 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */ 25 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ 26 | .highlight .kt { color: #902000 } /* Keyword.Type */ 27 | .highlight .m { color: #208050 } /* Literal.Number */ 28 | .highlight .s { color: #4070a0 } /* Literal.String */ 29 | .highlight .na { color: #4070a0 } /* Name.Attribute */ 30 | .highlight .nb { color: #007020 } /* Name.Builtin */ 31 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ 32 | .highlight .no { color: #60add5 } /* Name.Constant */ 33 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ 34 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ 35 | .highlight .ne { color: #007020 } /* Name.Exception */ 36 | .highlight .nf { color: #06287e } /* Name.Function */ 37 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ 38 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ 39 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ 40 | .highlight .nv { color: #bb60d5 } /* Name.Variable */ 41 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ 42 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 43 | .highlight .mf { color: #208050 } /* Literal.Number.Float */ 44 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */ 45 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */ 46 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */ 47 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ 48 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */ 49 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ 50 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ 51 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ 52 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ 53 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ 54 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */ 55 | .highlight .sr { color: #235388 } /* Literal.String.Regex */ 56 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ 57 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */ 58 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ 59 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ 60 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ 61 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ 62 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /docs/_build/html/_static/sidebar.js: -------------------------------------------------------------------------------- 1 | /* 2 | * sidebar.js 3 | * ~~~~~~~~~~ 4 | * 5 | * This script makes the Sphinx sidebar collapsible. 6 | * 7 | * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds 8 | * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton 9 | * used to collapse and expand the sidebar. 10 | * 11 | * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden 12 | * and the width of the sidebar and the margin-left of the document 13 | * are decreased. When the sidebar is expanded the opposite happens. 14 | * This script saves a per-browser/per-session cookie used to 15 | * remember the position of the sidebar among the pages. 16 | * Once the browser is closed the cookie is deleted and the position 17 | * reset to the default (expanded). 18 | * 19 | * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. 20 | * :license: BSD, see LICENSE for details. 21 | * 22 | */ 23 | 24 | $(function() { 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | // global elements used by the functions. 34 | // the 'sidebarbutton' element is defined as global after its 35 | // creation, in the add_sidebar_button function 36 | var bodywrapper = $('.bodywrapper'); 37 | var sidebar = $('.sphinxsidebar'); 38 | var sidebarwrapper = $('.sphinxsidebarwrapper'); 39 | 40 | // for some reason, the document has no sidebar; do not run into errors 41 | if (!sidebar.length) return; 42 | 43 | // original margin-left of the bodywrapper and width of the sidebar 44 | // with the sidebar expanded 45 | var bw_margin_expanded = bodywrapper.css('margin-left'); 46 | var ssb_width_expanded = sidebar.width(); 47 | 48 | // margin-left of the bodywrapper and width of the sidebar 49 | // with the sidebar collapsed 50 | var bw_margin_collapsed = '.8em'; 51 | var ssb_width_collapsed = '.8em'; 52 | 53 | // colors used by the current theme 54 | var dark_color = $('.related').css('background-color'); 55 | var light_color = $('.document').css('background-color'); 56 | 57 | function sidebar_is_collapsed() { 58 | return sidebarwrapper.is(':not(:visible)'); 59 | } 60 | 61 | function toggle_sidebar() { 62 | if (sidebar_is_collapsed()) 63 | expand_sidebar(); 64 | else 65 | collapse_sidebar(); 66 | } 67 | 68 | function collapse_sidebar() { 69 | sidebarwrapper.hide(); 70 | sidebar.css('width', ssb_width_collapsed); 71 | bodywrapper.css('margin-left', bw_margin_collapsed); 72 | sidebarbutton.css({ 73 | 'margin-left': '0', 74 | 'height': bodywrapper.height() 75 | }); 76 | sidebarbutton.find('span').text('»'); 77 | sidebarbutton.attr('title', _('Expand sidebar')); 78 | document.cookie = 'sidebar=collapsed'; 79 | } 80 | 81 | function expand_sidebar() { 82 | bodywrapper.css('margin-left', bw_margin_expanded); 83 | sidebar.css('width', ssb_width_expanded); 84 | sidebarwrapper.show(); 85 | sidebarbutton.css({ 86 | 'margin-left': ssb_width_expanded-12, 87 | 'height': bodywrapper.height() 88 | }); 89 | sidebarbutton.find('span').text('«'); 90 | sidebarbutton.attr('title', _('Collapse sidebar')); 91 | document.cookie = 'sidebar=expanded'; 92 | } 93 | 94 | function add_sidebar_button() { 95 | sidebarwrapper.css({ 96 | 'float': 'left', 97 | 'margin-right': '0', 98 | 'width': ssb_width_expanded - 28 99 | }); 100 | // create the button 101 | sidebar.append( 102 | '
«
' 103 | ); 104 | var sidebarbutton = $('#sidebarbutton'); 105 | light_color = sidebarbutton.css('background-color'); 106 | // find the height of the viewport to center the '<<' in the page 107 | var viewport_height; 108 | if (window.innerHeight) 109 | viewport_height = window.innerHeight; 110 | else 111 | viewport_height = $(window).height(); 112 | sidebarbutton.find('span').css({ 113 | 'display': 'block', 114 | 'margin-top': (viewport_height - sidebar.position().top - 20) / 2 115 | }); 116 | 117 | sidebarbutton.click(toggle_sidebar); 118 | sidebarbutton.attr('title', _('Collapse sidebar')); 119 | sidebarbutton.css({ 120 | 'color': '#FFFFFF', 121 | 'border-left': '1px solid ' + dark_color, 122 | 'font-size': '1.2em', 123 | 'cursor': 'pointer', 124 | 'height': bodywrapper.height(), 125 | 'padding-top': '1px', 126 | 'margin-left': ssb_width_expanded - 12 127 | }); 128 | 129 | sidebarbutton.hover( 130 | function () { 131 | $(this).css('background-color', dark_color); 132 | }, 133 | function () { 134 | $(this).css('background-color', light_color); 135 | } 136 | ); 137 | } 138 | 139 | function set_position_from_cookie() { 140 | if (!document.cookie) 141 | return; 142 | var items = document.cookie.split(';'); 143 | for(var k=0; k 3 | 4 | 5 | 6 | 7 | 8 | 9 | <no title> — bct 0.4 documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 38 | 39 |
40 |
41 |
42 |
43 | 44 |

{{ fullname }} 45 | {{ underline }}

46 | 47 | 48 |
49 |
50 |
51 |
52 |
53 |

This Page

54 | 58 | 70 | 71 |
72 |
73 |
74 |
75 | 84 | 88 | 89 | -------------------------------------------------------------------------------- /docs/_build/html/index.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Welcome to bct’s documentation! — bct 0.4 documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 38 | 39 |
40 |
41 |
42 |
43 | 44 |
45 |

Welcome to bct’s documentation!

46 |

Contents:

47 |
48 |
    49 |
50 |
51 |
52 |
53 |

Indices and tables

54 | 59 |
60 | 61 | 62 |
63 |
64 |
65 |
66 |
67 |

Table Of Contents

68 | 72 | 73 |

This Page

74 | 78 | 90 | 91 |
92 |
93 |
94 |
95 | 104 | 108 | 109 | -------------------------------------------------------------------------------- /docs/_build/html/modules.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | bct — bct 0.4 documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 38 | 39 |
40 |
41 |
42 |
43 | 44 |
45 |

bct

46 | 66 |
67 | 68 | 69 |
70 |
71 |
72 |
73 |
74 |

This Page

75 | 79 | 91 | 92 |
93 |
94 |
95 |
96 | 105 | 109 | 110 | -------------------------------------------------------------------------------- /docs/_build/html/np-modindex.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Python Module Index — bct 0.4 documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 32 | 33 | 34 | 35 | 36 | 51 | 52 |
53 |
54 |
55 |
56 | 57 | 58 |

Python Module Index

59 | 60 |
61 | n 62 |
63 | 64 | 65 | 66 | 68 | 69 | 70 | 73 |
 
67 | n
71 | nbs 72 |
74 | 75 | 76 |
77 |
78 |
79 |
80 |
81 | 93 | 94 |
95 |
96 |
97 |
98 | 113 | 117 | 118 | -------------------------------------------------------------------------------- /docs/_build/html/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/_build/html/objects.inv -------------------------------------------------------------------------------- /docs/_build/html/py-modindex.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Python Module Index — bct 0.4 documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 32 | 33 | 34 | 35 | 36 | 51 | 52 |
53 |
54 |
55 |
56 | 57 | 58 |

Python Module Index

59 | 60 |
61 | n 62 |
63 | 64 | 65 | 66 | 68 | 69 | 70 | 73 |
 
67 | n
71 | nbs 72 |
74 | 75 | 76 |
77 |
78 |
79 |
80 |
81 | 93 | 94 |
95 |
96 |
97 |
98 | 113 | 117 | 118 | -------------------------------------------------------------------------------- /docs/_build/html/search.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Search — bct 0.4 documentation 10 | 11 | 12 | 13 | 14 | 23 | 24 | 25 | 26 | 27 | 28 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 46 | 47 |
48 |
49 |
50 |
51 | 52 |

Search

53 |
54 | 55 |

56 | Please activate JavaScript to enable the search 57 | functionality. 58 |

59 |
60 |

61 | From here you can search these documents. Enter your search 62 | words into the box below and click "search". Note that the search 63 | function will automatically search for all of the words. Pages 64 | containing fewer words won't appear in the result list. 65 |

66 |
67 | 68 | 69 | 70 |
71 | 72 |
73 | 74 |
75 | 76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 | 94 | 98 | 99 | -------------------------------------------------------------------------------- /docs/_templates/function.rst: -------------------------------------------------------------------------------- 1 | {{ fullname }} 2 | {{ underline }} 3 | 4 | .. currentmodule:: {{ module }} 5 | 6 | .. autofunction:: {{ objname }} 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/bct.rst: -------------------------------------------------------------------------------- 1 | Brain Connectivity Toolbox 2 | ========================== 3 | 4 | .. currentmodule:: bct 5 | 6 | Centrality 7 | ---------- 8 | 9 | .. autofunction:: betweenness_bin 10 | .. autofunction:: betweenness_wei 11 | .. autofunction:: diversity_coef_sign 12 | .. autofunction:: edge_betweenness_bin 13 | .. autofunction:: edge_betweenness_wei 14 | .. autofunction:: eigenvector_centrality_und 15 | .. autofunction:: flow_coef_bd 16 | .. autofunction:: kcoreness_centrality_bd 17 | .. autofunction:: kcoreness_centrality_bu 18 | .. autofunction:: module_degree_zscore 19 | .. autofunction:: pagerank_centrality 20 | .. autofunction:: participation_coef 21 | .. autofunction:: participation_coef_sign 22 | .. autofunction:: subgraph_centrality 23 | 24 | Clustering 25 | ---------- 26 | 27 | .. autofunction:: agreement 28 | .. autofunction:: agreement_weighted 29 | .. autofunction:: clustering_coef_bd 30 | .. autofunction:: clustering_coef_bu 31 | .. autofunction:: clustering_coef_wd 32 | .. autofunction:: clustering_coef_wu 33 | .. autofunction:: consensus_und 34 | .. autofunction:: get_components 35 | .. autofunction:: transitivity_bd 36 | .. autofunction:: transitivity_bu 37 | .. autofunction:: transitivity_wd 38 | .. autofunction:: transitivity_wu 39 | 40 | Core 41 | ---- 42 | 43 | .. autofunction:: assortativity_bin 44 | .. autofunction:: assortativity_wei 45 | .. autofunction:: kcore_bd 46 | .. autofunction:: kcore_bu 47 | .. autofunction:: rich_club_bd 48 | .. autofunction:: rich_club_bu 49 | .. autofunction:: rich_club_wd 50 | .. autofunction:: rich_club_wu 51 | .. autofunction:: score_wu 52 | 53 | Degree 54 | ------ 55 | 56 | .. autofunction:: degrees_dir 57 | .. autofunction:: degrees_und 58 | .. autofunction:: jdegree 59 | .. autofunction:: strengths_dir 60 | .. autofunction:: strengths_und 61 | .. autofunction:: strengths_und_sign 62 | 63 | Distance 64 | -------- 65 | 66 | .. autofunction:: breadthdist 67 | .. autofunction:: breadth 68 | .. autofunction:: charpath 69 | .. autofunction:: cycprob 70 | .. autofunction:: distance_bin 71 | .. autofunction:: distance_wei 72 | .. autofunction:: efficiency_bin 73 | .. autofunction:: efficiency_wei 74 | .. autofunction:: findpaths 75 | .. autofunction:: findwalks 76 | .. autofunction:: reachdist 77 | 78 | Modularity 79 | ---------- 80 | 81 | .. autofunction:: ci2ls 82 | .. autofunction:: ls2ci 83 | .. autofunction:: community_louvain 84 | .. autofunction:: link_communities 85 | .. autofunction:: modularity_dir 86 | .. autofunction:: modularity_und 87 | .. autofunction:: modularity_und_sig 88 | .. autofunction:: modularity_finetune_und 89 | .. autofunction:: modularity_finetune_und_sign 90 | .. autofunction:: modularity_louvain_dir 91 | .. autofunction:: modularity_louvain_und 92 | .. autofunction:: modularity_louvain_und_sign 93 | .. autofunction:: modularity_probtune_und_sign 94 | .. autofunction:: partition_distance 95 | 96 | Motif 97 | ----- 98 | 99 | .. autofunction:: find_motif34 100 | .. autofunction:: motif3funct_bin 101 | .. autofunction:: motif3funct_wei 102 | .. autofunction:: motif3struct_bin 103 | .. autofunction:: motif3struct_wei 104 | .. autofunction:: motif4funct_bin 105 | .. autofunction:: motif4funct_wei 106 | .. autofunction:: motif4struct_bin 107 | .. autofunction:: motif4struct_wei 108 | 109 | Miscellaneous 110 | ------------- 111 | 112 | .. autofunction:: threshold_absolute 113 | .. autofunction:: threshold_proportional 114 | .. autofunction:: weight_conversion 115 | .. autofunction:: binarize 116 | .. autofunction:: normalize 117 | .. autofunction:: invert 118 | .. autofunction:: autofix 119 | 120 | Physical Connectivity 121 | --------------------- 122 | 123 | .. autofunction:: density_dir 124 | .. autofunction:: density_und 125 | .. autofunction:: rentian_scaling 126 | 127 | Reference 128 | --------- 129 | .. autofunction:: latmio_dir_connected 130 | .. autofunction:: latmio_dir 131 | .. autofunction:: latmio_und_connected 132 | .. autofunction:: latmio_und 133 | .. autofunction:: makeevenCIJ 134 | .. autofunction:: makefractalCIJ 135 | .. autofunction:: makerandCIJdegreesfixed 136 | .. autofunction:: makerandCIJ_dir 137 | .. autofunction:: makerandCIJ_und 138 | .. autofunction:: makeringlatticeCIJ 139 | .. autofunction:: maketoeplitzCIJ 140 | .. autofunction:: null_model_dir_sign 141 | .. autofunction:: null_model_und_sign 142 | .. autofunction:: randmio_dir_connected 143 | .. autofunction:: randmio_dir 144 | .. autofunction:: randmio_und_connected 145 | .. autofunction:: randmio_und 146 | .. autofunction:: randmio_und_signed 147 | .. autofunction:: randomize_graph_partial_und 148 | .. autofunction:: randomizer_bin_und 149 | 150 | Similarity 151 | ---------- 152 | 153 | .. autofunction:: edge_nei_overlap_bd 154 | .. autofunction:: edge_nei_overlap_bu 155 | .. autofunction:: gtom 156 | .. autofunction:: matching_ind 157 | .. autofunction:: matching_ind_und 158 | .. autofunction:: dice_pairwise_und 159 | .. autofunction:: corr_flat_und 160 | .. autofunction:: corr_flat_dir 161 | 162 | Visualization 163 | ------------- 164 | 165 | .. autofunction:: adjacency_plot_und 166 | .. autofunction:: align_matrices 167 | .. autofunction:: backbone_wu 168 | .. autofunction:: grid_communities 169 | .. autofunction:: reorderMAT 170 | .. autofunction:: reorder_matrix 171 | .. autofunction:: reorder_mod 172 | .. autofunction:: writetoPAJ 173 | 174 | Network Based Statistic 175 | ======================= 176 | 177 | .. currentmodule:: nbs 178 | 179 | .. autofunction:: nbs_bct 180 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # bct documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Jul 21 11:30:20 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | #sys.path.insert(0, os.path.abspath('.')) 22 | 23 | curdir = os.path.dirname(__file__) 24 | sys.path.append(os.path.abspath(os.path.join( curdir, 'sphinxext' ))) 25 | 26 | # -- General configuration ------------------------------------------------ 27 | 28 | # If your documentation needs a minimal Sphinx version, state it here. 29 | #needs_sphinx = '1.0' 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | 35 | import numpy_ext.numpydoc 36 | extensions = [ 37 | 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'numpy_ext.numpydoc' 38 | ] 39 | 40 | # Add any paths that contain templates here, relative to this directory. 41 | templates_path = ['_templates'] 42 | 43 | # The suffix of source filenames. 44 | source_suffix = '.rst' 45 | 46 | # The encoding of source files. 47 | #source_encoding = 'utf-8-sig' 48 | 49 | # The master toctree document. 50 | master_doc = 'index' 51 | 52 | # General information about the project. 53 | project = u'bct' 54 | copyright = u'2015, rlaplant' 55 | 56 | # The version info for the project you're documenting, acts as replacement for 57 | # |version| and |release|, also used in various other places throughout the 58 | # built documents. 59 | # 60 | # The short X.Y version. 61 | version = '0.4' 62 | # The full version, including alpha/beta/rc tags. 63 | release = '0.4' 64 | 65 | # The language for content autogenerated by Sphinx. Refer to documentation 66 | # for a list of supported languages. 67 | #language = None 68 | 69 | # There are two options for replacing |today|: either, you set today to some 70 | # non-false value, then it is used: 71 | #today = '' 72 | # Else, today_fmt is used as the format for a strftime call. 73 | #today_fmt = '%B %d, %Y' 74 | 75 | # List of patterns, relative to source directory, that match files and 76 | # directories to ignore when looking for source files. 77 | exclude_patterns = ['_build'] 78 | 79 | # The reST default role (used for this markup: `text`) to use for all 80 | # documents. 81 | #default_role = None 82 | 83 | # If true, '()' will be appended to :func: etc. cross-reference text. 84 | #add_function_parentheses = True 85 | 86 | # If true, the current module name will be prepended to all description 87 | # unit titles (such as .. function::). 88 | #add_module_names = True 89 | 90 | # If true, sectionauthor and moduleauthor directives will be shown in the 91 | # output. They are ignored by default. 92 | #show_authors = False 93 | 94 | # The name of the Pygments (syntax highlighting) style to use. 95 | pygments_style = 'sphinx' 96 | 97 | # A list of ignored prefixes for module index sorting. 98 | #modindex_common_prefix = [] 99 | 100 | # If true, keep warnings as "system message" paragraphs in the built documents. 101 | #keep_warnings = False 102 | 103 | 104 | # -- Options for HTML output ---------------------------------------------- 105 | 106 | # The theme to use for HTML and HTML Help pages. See the documentation for 107 | # a list of builtin themes. 108 | html_theme = 'default' 109 | 110 | # Theme options are theme-specific and customize the look and feel of a theme 111 | # further. For a list of options available for each theme, see the 112 | # documentation. 113 | #html_theme_options = {} 114 | 115 | # Add any paths that contain custom themes here, relative to this directory. 116 | #html_theme_path = [] 117 | 118 | # The name for this set of Sphinx documents. If None, it defaults to 119 | # " v documentation". 120 | #html_title = None 121 | 122 | # A shorter title for the navigation bar. Default is the same as html_title. 123 | #html_short_title = None 124 | 125 | # The name of an image file (relative to this directory) to place at the top 126 | # of the sidebar. 127 | #html_logo = None 128 | 129 | # The name of an image file (within the static path) to use as favicon of the 130 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 131 | # pixels large. 132 | #html_favicon = None 133 | 134 | # Add any paths that contain custom static files (such as style sheets) here, 135 | # relative to this directory. They are copied after the builtin static files, 136 | # so a file named "default.css" will overwrite the builtin "default.css". 137 | html_static_path = ['_static'] 138 | 139 | # Add any extra paths that contain custom files (such as robots.txt or 140 | # .htaccess) here, relative to this directory. These files are copied 141 | # directly to the root of the documentation. 142 | #html_extra_path = [] 143 | 144 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 145 | # using the given strftime format. 146 | #html_last_updated_fmt = '%b %d, %Y' 147 | 148 | # If true, SmartyPants will be used to convert quotes and dashes to 149 | # typographically correct entities. 150 | #html_use_smartypants = True 151 | 152 | # Custom sidebar templates, maps document names to template names. 153 | #html_sidebars = {} 154 | 155 | # Additional templates that should be rendered to pages, maps page names to 156 | # template names. 157 | #html_additional_pages = {} 158 | 159 | # If false, no module index is generated. 160 | #html_domain_indices = True 161 | 162 | # If false, no index is generated. 163 | #html_use_index = True 164 | 165 | # If true, the index is split into individual pages for each letter. 166 | #html_split_index = False 167 | 168 | # If true, links to the reST sources are added to the pages. 169 | #html_show_sourcelink = True 170 | 171 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 172 | #html_show_sphinx = True 173 | 174 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 175 | #html_show_copyright = True 176 | 177 | # If true, an OpenSearch description file will be output, and all pages will 178 | # contain a tag referring to it. The value of this option must be the 179 | # base URL from which the finished HTML is served. 180 | #html_use_opensearch = '' 181 | 182 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 183 | #html_file_suffix = None 184 | 185 | # Output file base name for HTML help builder. 186 | htmlhelp_basename = 'bctdoc' 187 | 188 | 189 | # -- Options for LaTeX output --------------------------------------------- 190 | 191 | latex_elements = { 192 | # The paper size ('letterpaper' or 'a4paper'). 193 | #'papersize': 'letterpaper', 194 | 195 | # The font size ('10pt', '11pt' or '12pt'). 196 | #'pointsize': '10pt', 197 | 198 | # Additional stuff for the LaTeX preamble. 199 | #'preamble': '', 200 | } 201 | 202 | # Grouping the document tree into LaTeX files. List of tuples 203 | # (source start file, target name, title, 204 | # author, documentclass [howto, manual, or own class]). 205 | latex_documents = [ 206 | ('index', 'bct.tex', u'bct Documentation', 207 | u'rlaplant', 'manual'), 208 | ] 209 | 210 | # The name of an image file (relative to this directory) to place at the top of 211 | # the title page. 212 | #latex_logo = None 213 | 214 | # For "manual" documents, if this is true, then toplevel headings are parts, 215 | # not chapters. 216 | #latex_use_parts = False 217 | 218 | # If true, show page references after internal links. 219 | #latex_show_pagerefs = False 220 | 221 | # If true, show URL addresses after external links. 222 | #latex_show_urls = False 223 | 224 | # Documents to append as an appendix to all manuals. 225 | #latex_appendices = [] 226 | 227 | # If false, no module index is generated. 228 | #latex_domain_indices = True 229 | 230 | 231 | # -- Options for manual page output --------------------------------------- 232 | 233 | # One entry per manual page. List of tuples 234 | # (source start file, name, description, authors, manual section). 235 | man_pages = [ 236 | ('index', 'bct', u'bct Documentation', 237 | [u'rlaplant'], 1) 238 | ] 239 | 240 | # If true, show URL addresses after external links. 241 | #man_show_urls = False 242 | 243 | 244 | # -- Options for Texinfo output ------------------------------------------- 245 | 246 | # Grouping the document tree into Texinfo files. List of tuples 247 | # (source start file, target name, title, author, 248 | # dir menu entry, description, category) 249 | texinfo_documents = [ 250 | ('index', 'bct', u'bct Documentation', 251 | u'rlaplant', 'bct', 'One line description of project.', 252 | 'Miscellaneous'), 253 | ] 254 | 255 | # Documents to append as an appendix to all manuals. 256 | #texinfo_appendices = [] 257 | 258 | # If false, no module index is generated. 259 | #texinfo_domain_indices = True 260 | 261 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 262 | #texinfo_show_urls = 'footnote' 263 | 264 | # If true, do not generate a @detailmenu in the "Top" node's menu. 265 | #texinfo_no_detailmenu = False 266 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. bct documentation master file, created by 2 | sphinx-quickstart on Tue Jul 21 11:30:20 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to bct's documentation! 7 | =============================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | 23 | -------------------------------------------------------------------------------- /docs/modules.rst: -------------------------------------------------------------------------------- 1 | bct 2 | === 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | bct 8 | -------------------------------------------------------------------------------- /docs/sphinxext/numpy_ext/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/docs/sphinxext/numpy_ext/__init__.py -------------------------------------------------------------------------------- /docs/sphinxext/numpy_ext/docscrape_sphinx.py: -------------------------------------------------------------------------------- 1 | import re 2 | import inspect 3 | import textwrap 4 | import pydoc 5 | from .docscrape import NumpyDocString 6 | from .docscrape import FunctionDoc 7 | from .docscrape import ClassDoc 8 | 9 | 10 | class SphinxDocString(NumpyDocString): 11 | def __init__(self, docstring, config=None): 12 | config = {} if config is None else config 13 | self.use_plots = config.get('use_plots', False) 14 | NumpyDocString.__init__(self, docstring, config=config) 15 | 16 | # string conversion routines 17 | def _str_header(self, name, symbol='`'): 18 | return ['.. rubric:: ' + name, ''] 19 | 20 | def _str_field_list(self, name): 21 | return [':' + name + ':'] 22 | 23 | def _str_indent(self, doc, indent=4): 24 | out = [] 25 | for line in doc: 26 | out += [' ' * indent + line] 27 | return out 28 | 29 | def _str_signature(self): 30 | return [''] 31 | if self['Signature']: 32 | return ['``%s``' % self['Signature']] + [''] 33 | else: 34 | return [''] 35 | 36 | def _str_summary(self): 37 | return self['Summary'] + [''] 38 | 39 | def _str_extended_summary(self): 40 | return self['Extended Summary'] + [''] 41 | 42 | def _str_param_list(self, name): 43 | out = [] 44 | if self[name]: 45 | out += self._str_field_list(name) 46 | out += [''] 47 | for param, param_type, desc in self[name]: 48 | out += self._str_indent(['**%s** : %s' % (param.strip(), 49 | param_type)]) 50 | out += [''] 51 | out += self._str_indent(desc, 8) 52 | out += [''] 53 | return out 54 | 55 | @property 56 | def _obj(self): 57 | if hasattr(self, '_cls'): 58 | return self._cls 59 | elif hasattr(self, '_f'): 60 | return self._f 61 | return None 62 | 63 | def _str_member_list(self, name): 64 | """ 65 | Generate a member listing, autosummary:: table where possible, 66 | and a table where not. 67 | 68 | """ 69 | out = [] 70 | if self[name]: 71 | out += ['.. rubric:: %s' % name, ''] 72 | prefix = getattr(self, '_name', '') 73 | 74 | if prefix: 75 | prefix = '~%s.' % prefix 76 | 77 | autosum = [] 78 | others = [] 79 | for param, param_type, desc in self[name]: 80 | param = param.strip() 81 | if not self._obj or hasattr(self._obj, param): 82 | autosum += [" %s%s" % (prefix, param)] 83 | else: 84 | others.append((param, param_type, desc)) 85 | 86 | if autosum: 87 | # GAEL: Toctree commented out below because it creates 88 | # hundreds of sphinx warnings 89 | # out += ['.. autosummary::', ' :toctree:', ''] 90 | out += ['.. autosummary::', ''] 91 | out += autosum 92 | 93 | if others: 94 | maxlen_0 = max([len(x[0]) for x in others]) 95 | maxlen_1 = max([len(x[1]) for x in others]) 96 | hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 97 | fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) 98 | n_indent = maxlen_0 + maxlen_1 + 4 99 | out += [hdr] 100 | for param, param_type, desc in others: 101 | out += [fmt % (param.strip(), param_type)] 102 | out += self._str_indent(desc, n_indent) 103 | out += [hdr] 104 | out += [''] 105 | return out 106 | 107 | def _str_section(self, name): 108 | out = [] 109 | if self[name]: 110 | out += self._str_header(name) 111 | out += [''] 112 | content = textwrap.dedent("\n".join(self[name])).split("\n") 113 | out += content 114 | out += [''] 115 | return out 116 | 117 | def _str_see_also(self, func_role): 118 | out = [] 119 | if self['See Also']: 120 | see_also = super(SphinxDocString, self)._str_see_also(func_role) 121 | out = ['.. seealso::', ''] 122 | out += self._str_indent(see_also[2:]) 123 | return out 124 | 125 | def _str_warnings(self): 126 | out = [] 127 | if self['Warnings']: 128 | out = ['.. warning::', ''] 129 | out += self._str_indent(self['Warnings']) 130 | return out 131 | 132 | def _str_index(self): 133 | idx = self['index'] 134 | out = [] 135 | if len(idx) == 0: 136 | return out 137 | 138 | out += ['.. index:: %s' % idx.get('default', '')] 139 | for section, references in idx.iteritems(): 140 | if section == 'default': 141 | continue 142 | elif section == 'refguide': 143 | out += [' single: %s' % (', '.join(references))] 144 | else: 145 | out += [' %s: %s' % (section, ','.join(references))] 146 | return out 147 | 148 | def _str_references(self): 149 | out = [] 150 | if self['References']: 151 | out += self._str_header('References') 152 | if isinstance(self['References'], str): 153 | self['References'] = [self['References']] 154 | out.extend(self['References']) 155 | out += [''] 156 | # Latex collects all references to a separate bibliography, 157 | # so we need to insert links to it 158 | import sphinx # local import to avoid test dependency 159 | if sphinx.__version__ >= "0.6": 160 | out += ['.. only:: latex', ''] 161 | else: 162 | out += ['.. latexonly::', ''] 163 | items = [] 164 | for line in self['References']: 165 | m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) 166 | if m: 167 | items.append(m.group(1)) 168 | out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] 169 | return out 170 | 171 | def _str_examples(self): 172 | examples_str = "\n".join(self['Examples']) 173 | 174 | if (self.use_plots and 'import matplotlib' in examples_str 175 | and 'plot::' not in examples_str): 176 | out = [] 177 | out += self._str_header('Examples') 178 | out += ['.. plot::', ''] 179 | out += self._str_indent(self['Examples']) 180 | out += [''] 181 | return out 182 | else: 183 | return self._str_section('Examples') 184 | 185 | def __str__(self, indent=0, func_role="obj"): 186 | out = [] 187 | out += self._str_signature() 188 | out += self._str_index() + [''] 189 | out += self._str_summary() 190 | out += self._str_extended_summary() 191 | for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'): 192 | out += self._str_param_list(param_list) 193 | out += self._str_warnings() 194 | out += self._str_see_also(func_role) 195 | out += self._str_section('Notes') 196 | out += self._str_references() 197 | out += self._str_examples() 198 | for param_list in ('Methods',): 199 | out += self._str_member_list(param_list) 200 | out = self._str_indent(out, indent) 201 | return '\n'.join(out) 202 | 203 | 204 | class SphinxFunctionDoc(SphinxDocString, FunctionDoc): 205 | def __init__(self, obj, doc=None, config={}): 206 | self.use_plots = config.get('use_plots', False) 207 | FunctionDoc.__init__(self, obj, doc=doc, config=config) 208 | 209 | 210 | class SphinxClassDoc(SphinxDocString, ClassDoc): 211 | def __init__(self, obj, doc=None, func_doc=None, config={}): 212 | self.use_plots = config.get('use_plots', False) 213 | ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) 214 | 215 | 216 | class SphinxObjDoc(SphinxDocString): 217 | def __init__(self, obj, doc=None, config=None): 218 | self._f = obj 219 | SphinxDocString.__init__(self, doc, config=config) 220 | 221 | 222 | def get_doc_object(obj, what=None, doc=None, config={}): 223 | if what is None: 224 | if inspect.isclass(obj): 225 | what = 'class' 226 | elif inspect.ismodule(obj): 227 | what = 'module' 228 | elif callable(obj): 229 | what = 'function' 230 | else: 231 | what = 'object' 232 | if what == 'class': 233 | return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, 234 | config=config) 235 | elif what in ('function', 'method'): 236 | return SphinxFunctionDoc(obj, doc=doc, config=config) 237 | else: 238 | if doc is None: 239 | doc = pydoc.getdoc(obj) 240 | return SphinxObjDoc(obj, doc, config=config) 241 | -------------------------------------------------------------------------------- /docs/sphinxext/numpy_ext/numpydoc.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======== 3 | numpydoc 4 | ======== 5 | 6 | Sphinx extension that handles docstrings in the Numpy standard format. [1] 7 | 8 | It will: 9 | 10 | - Convert Parameters etc. sections to field lists. 11 | - Convert See Also section to a See also entry. 12 | - Renumber references. 13 | - Extract the signature from the docstring, if it can't be determined 14 | otherwise. 15 | 16 | .. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard 17 | 18 | """ 19 | 20 | from __future__ import unicode_literals 21 | 22 | import sys # Only needed to check Python version 23 | import os 24 | import re 25 | import pydoc 26 | from .docscrape_sphinx import get_doc_object 27 | from .docscrape_sphinx import SphinxDocString 28 | import inspect 29 | 30 | 31 | def mangle_docstrings(app, what, name, obj, options, lines, 32 | reference_offset=[0]): 33 | 34 | cfg = dict(use_plots=app.config.numpydoc_use_plots, 35 | show_class_members=app.config.numpydoc_show_class_members) 36 | 37 | if what == 'module': 38 | # Strip top title 39 | title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', 40 | re.I | re.S) 41 | lines[:] = title_re.sub('', "\n".join(lines)).split("\n") 42 | else: 43 | doc = get_doc_object(obj, what, "\n".join(lines), config=cfg) 44 | if sys.version_info[0] < 3: 45 | lines[:] = unicode(doc).splitlines() 46 | else: 47 | lines[:] = str(doc).splitlines() 48 | 49 | if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ 50 | obj.__name__: 51 | if hasattr(obj, '__module__'): 52 | v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__)) 53 | else: 54 | v = dict(full_name=obj.__name__) 55 | lines += [u'', u'.. htmlonly::', ''] 56 | lines += [u' %s' % x for x in 57 | (app.config.numpydoc_edit_link % v).split("\n")] 58 | 59 | # replace reference numbers so that there are no duplicates 60 | references = [] 61 | for line in lines: 62 | line = line.strip() 63 | m = re.match(r'^.. \[([a-z0-9_.-])\]', line, re.I) 64 | if m: 65 | references.append(m.group(1)) 66 | 67 | # start renaming from the longest string, to avoid overwriting parts 68 | references.sort(key=lambda x: -len(x)) 69 | if references: 70 | for i, line in enumerate(lines): 71 | for r in references: 72 | if re.match(r'^\d+$', r): 73 | new_r = "R%d" % (reference_offset[0] + int(r)) 74 | else: 75 | new_r = u"%s%d" % (r, reference_offset[0]) 76 | lines[i] = lines[i].replace(u'[%s]_' % r, 77 | u'[%s]_' % new_r) 78 | lines[i] = lines[i].replace(u'.. [%s]' % r, 79 | u'.. [%s]' % new_r) 80 | 81 | reference_offset[0] += len(references) 82 | 83 | 84 | def mangle_signature(app, what, name, obj, 85 | options, sig, retann): 86 | # Do not try to inspect classes that don't define `__init__` 87 | if (inspect.isclass(obj) and 88 | (not hasattr(obj, '__init__') or 89 | 'initializes x; see ' in pydoc.getdoc(obj.__init__))): 90 | return '', '' 91 | 92 | if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): 93 | return 94 | if not hasattr(obj, '__doc__'): 95 | return 96 | 97 | doc = SphinxDocString(pydoc.getdoc(obj)) 98 | if doc['Signature']: 99 | sig = re.sub("^[^(]*", "", doc['Signature']) 100 | return sig, '' 101 | 102 | 103 | def setup(app, get_doc_object_=get_doc_object): 104 | global get_doc_object 105 | get_doc_object = get_doc_object_ 106 | 107 | if sys.version_info[0] < 3: 108 | app.connect(b'autodoc-process-docstring', mangle_docstrings) 109 | app.connect(b'autodoc-process-signature', mangle_signature) 110 | else: 111 | app.connect('autodoc-process-docstring', mangle_docstrings) 112 | app.connect('autodoc-process-signature', mangle_signature) 113 | app.add_config_value('numpydoc_edit_link', None, False) 114 | app.add_config_value('numpydoc_use_plots', None, False) 115 | app.add_config_value('numpydoc_show_class_members', True, True) 116 | 117 | # Extra mangling domains 118 | app.add_domain(NumpyPythonDomain) 119 | app.add_domain(NumpyCDomain) 120 | 121 | #----------------------------------------------------------------------------- 122 | # Docstring-mangling domains 123 | #----------------------------------------------------------------------------- 124 | 125 | try: 126 | import sphinx # lazy to avoid test dependency 127 | except ImportError: 128 | CDomain = PythonDomain = object 129 | else: 130 | from sphinx.domains.c import CDomain 131 | from sphinx.domains.python import PythonDomain 132 | 133 | 134 | class ManglingDomainBase(object): 135 | directive_mangling_map = {} 136 | 137 | def __init__(self, *a, **kw): 138 | super(ManglingDomainBase, self).__init__(*a, **kw) 139 | self.wrap_mangling_directives() 140 | 141 | def wrap_mangling_directives(self): 142 | for name, objtype in self.directive_mangling_map.items(): 143 | self.directives[name] = wrap_mangling_directive( 144 | self.directives[name], objtype) 145 | 146 | 147 | class NumpyPythonDomain(ManglingDomainBase, PythonDomain): 148 | name = 'np' 149 | directive_mangling_map = { 150 | 'function': 'function', 151 | 'class': 'class', 152 | 'exception': 'class', 153 | 'method': 'function', 154 | 'classmethod': 'function', 155 | 'staticmethod': 'function', 156 | 'attribute': 'attribute', 157 | } 158 | 159 | 160 | class NumpyCDomain(ManglingDomainBase, CDomain): 161 | name = 'np-c' 162 | directive_mangling_map = { 163 | 'function': 'function', 164 | 'member': 'attribute', 165 | 'macro': 'function', 166 | 'type': 'class', 167 | 'var': 'object', 168 | } 169 | 170 | 171 | def wrap_mangling_directive(base_directive, objtype): 172 | class directive(base_directive): 173 | def run(self): 174 | env = self.state.document.settings.env 175 | 176 | name = None 177 | if self.arguments: 178 | m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) 179 | name = m.group(2).strip() 180 | 181 | if not name: 182 | name = self.arguments[0] 183 | 184 | lines = list(self.content) 185 | mangle_docstrings(env.app, objtype, name, None, None, lines) 186 | # local import to avoid testing dependency 187 | from docutils.statemachine import ViewList 188 | self.content = ViewList(lines, self.content.parent) 189 | 190 | return base_directive.run(self) 191 | 192 | return directive 193 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | pytest 4 | setuptools 5 | tox 6 | duecredit 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import setuptools 3 | 4 | 5 | def read(fname): 6 | return open(os.path.join(os.path.dirname(__file__), fname)).read() 7 | 8 | setuptools.setup( 9 | name="bctpy", 10 | version="0.6.1", 11 | maintainer="Roan LaPlante", 12 | maintainer_email="rlaplant@nmr.mgh.harvard.edu", 13 | description=("Brain Connectivity Toolbox for Python"), 14 | license="Visuddhimagga Sutta; GPLv3+", 15 | long_description=read('README.md'), 16 | datafiles=[('', ['README.md', 'LICENSE'])], 17 | classifiers=[ 18 | "Development Status :: 4 - Beta", 19 | "Environment :: X11 Applications", 20 | "Intended Audience :: Science/Research", 21 | "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", 22 | "Natural Language :: English", 23 | "Programming Language :: Python :: 2.7", 24 | "Programming Language :: Python :: 3.5", 25 | "Programming Language :: Python :: 3.6", 26 | "Programming Language :: Python :: 3.7", 27 | "Topic :: Scientific/Engineering :: Information Analysis", 28 | ], 29 | url="https://github.com/aestrivex/bctpy", 30 | platforms=['any'], 31 | packages=['bct', 'bct.algorithms', 'bct.utils'], 32 | install_requires=["numpy", "scipy"] 33 | ) 34 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/__init__.py -------------------------------------------------------------------------------- /test/basic_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import load_sample, load_directed_sample 2 | import bct 3 | import numpy as np 4 | 5 | 6 | def test_threshold_proportional(): 7 | x = load_sample() 8 | x = bct.threshold_proportional(x, .5, copy=True) 9 | assert np.allclose(np.sum(x), 22548.51206965) 10 | 11 | 12 | def test_threshold_proportional_nocopy(): 13 | x = load_sample() 14 | bct.threshold_proportional(x, .3, copy=False) 15 | assert np.allclose(np.sum(x), 15253.75425406) 16 | 17 | 18 | def test_threshold_proportional_directed(): 19 | x = load_directed_sample() 20 | bct.threshold_proportional(x, .28, copy=False) 21 | assert np.sum(x) == 3410 22 | # assert np.allclose( np.sum(x), 32852.72485433 ) 23 | 24 | 25 | def test_threshold_absolute(): 26 | x = load_sample() 27 | x = bct.threshold_absolute(x, 2.1) 28 | assert np.allclose(np.sum(x), 13280.17768104) 29 | 30 | 31 | def test_strengths_und(): 32 | x = load_sample() 33 | s = bct.strengths_und(x) 34 | assert np.allclose(np.sum(x), 38967.38702018) 35 | 36 | 37 | def test_degrees_und(): 38 | x = load_sample() 39 | s = bct.degrees_und(bct.threshold_proportional(x, .26)) 40 | assert np.sum(s) == 4916 41 | 42 | 43 | def test_binarize(): 44 | x = load_sample() 45 | s = bct.binarize(bct.threshold_proportional(x, .41)) 46 | assert np.sum(s) == 7752 47 | 48 | def test_autofix(): 49 | x = load_sample() 50 | s = bct.autofix(bct.binarize(bct.threshold_proportional(x, .41))) 51 | assert np.sum(s) == 7752 52 | 53 | def test_normalize(): 54 | x = load_sample() 55 | s = bct.normalize(bct.threshold_proportional(x, .79)) 56 | assert np.allclose(np.sum(s), 3327.96285964) 57 | 58 | 59 | def test_invert(): 60 | x = load_sample() 61 | s = bct.invert(bct.threshold_proportional(x, .13)) 62 | assert np.allclose(np.sum(s), 790.43107587) 63 | -------------------------------------------------------------------------------- /test/centrality_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import ( 2 | load_sample, load_signed_sample, load_sparse_sample, 3 | load_directed_low_modularity_sample, load_binary_directed_low_modularity_sample 4 | ) 5 | import numpy as np 6 | import bct 7 | 8 | 9 | def test_gateway_coef(): 10 | x = load_sample(thres=.41) 11 | ci, _ = bct.modularity_und(x) 12 | gp, gn = bct.gateway_coef_sign(x, ci) 13 | gpb, gnb = bct.gateway_coef_sign(x, ci, centrality_type='betweenness') 14 | assert np.allclose(np.sum(gp), 87.0141) 15 | assert np.allclose(np.sum(gpb), 87.0742) 16 | assert np.all(gn == 0) 17 | assert np.all(gnb == 0) 18 | -------------------------------------------------------------------------------- /test/clustering_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import ( 2 | load_sample, load_signed_sample, load_sparse_sample, 3 | load_directed_low_modularity_sample, load_binary_directed_low_modularity_sample 4 | ) 5 | import numpy as np 6 | import bct 7 | 8 | 9 | def test_cluscoef_wu(): 10 | x = load_sample(thres=.23) 11 | cc = bct.clustering_coef_wu(x) 12 | print(np.sum(cc), 187.95878414) 13 | assert np.allclose(np.sum(cc), 187.95878414) 14 | 15 | 16 | def test_transitivity_wu(): 17 | x = load_sample(thres=.23) 18 | t = bct.transitivity_wu(x) 19 | print(t, 1.32927829) 20 | assert np.allclose(t, 1.32927829) 21 | 22 | # test signed clustering so that the cuberoot functionality is tested 23 | # there is no equivalent matlab functionality 24 | 25 | 26 | def test_cluscoef_signed(): 27 | x = load_signed_sample(thres=.85) 28 | cc = bct.clustering_coef_wu(x) 29 | print(np.imag(np.sum(cc)), 0) 30 | assert np.imag(np.sum(cc)) == 0 31 | 32 | 33 | def test_transitivity_signed(): 34 | x = load_signed_sample(thres=.85) 35 | t = bct.transitivity_wu(x) 36 | print(np.imag(t), 0) 37 | assert np.imag(t) == 0 38 | 39 | # test functions dealing with components on very sparse dataset 40 | 41 | 42 | def test_component(): 43 | from scipy import stats 44 | x = load_sparse_sample() 45 | c1, cs1 = bct.get_components(x) 46 | 47 | 48 | print(np.max(c1), 19) 49 | assert np.max(c1) == 19 50 | 51 | print(np.max(cs1), 72) 52 | assert np.max(cs1) == 72 53 | 54 | 55 | def test_consensus(): 56 | x = load_sample(thres=.38) 57 | ci = bct.consensus_und(x, .1, reps=50) 58 | print(np.max(ci), 4) 59 | assert np.max(ci) == 4 60 | _, q = bct.modularity_und(x, kci=ci) 61 | print(q, 0.27) 62 | assert np.allclose(q, 0.27, atol=.01) 63 | 64 | 65 | def test_cluscoef_wd(): 66 | x = load_directed_low_modularity_sample(thres=.45) 67 | cc = bct.clustering_coef_wd(x) 68 | print(np.sum(cc), 289.30817909) 69 | assert np.allclose(np.sum(cc), 289.30817909) 70 | 71 | 72 | def test_transitivity_wd(): 73 | x = load_directed_low_modularity_sample(thres=.45) 74 | t = bct.transitivity_wd(x) 75 | print(t, 1.30727748) 76 | assert np.allclose(t, 1.30727748) 77 | 78 | 79 | def test_cluscoef_bu(): 80 | x = bct.binarize(load_sample(thres=.17), copy=False) 81 | cc = bct.clustering_coef_bu(x) 82 | print(np.sum(cc), 60.1016) 83 | assert np.allclose(np.sum(cc), 60.10160458) 84 | 85 | 86 | def test_transitivity_bu(): 87 | x = bct.binarize(load_sample(thres=.17), copy=False) 88 | t = bct.transitivity_bu(x) 89 | print(t, 0.42763) 90 | assert np.allclose(t, 0.42763107) 91 | 92 | 93 | def test_cluscoef_bd(): 94 | x = load_binary_directed_low_modularity_sample(thres=.41) 95 | cc = bct.clustering_coef_bd(x) 96 | print(np.sum(cc), 113.31145) 97 | assert np.allclose(np.sum(cc), 113.31145155) 98 | 99 | 100 | def test_transitivity_bd(): 101 | x = load_binary_directed_low_modularity_sample(thres=.41) 102 | t = bct.transitivity_bd(x) 103 | print(t, 0.50919) 104 | assert np.allclose(t, 0.50919493) 105 | 106 | 107 | def test_agreement_weighted(): 108 | # this function is very hard to use or interpret results from 109 | pass 110 | 111 | def test_agreement(): 112 | # Case 1: nodes > partitions 113 | input_1 = np.array([[1, 1, 1], 114 | [1, 2, 2]]).T 115 | correct_1 = np.array([[0, 1, 1], 116 | [1, 0, 2], 117 | [1, 2, 0]]) 118 | 119 | # Case 2: partitions > nodes 120 | input_2 = np.array([[1, 1, 1], 121 | [1, 2, 2], 122 | [2, 2, 1], 123 | [1, 2, 2]]).T 124 | correct_2 = np.array([[0, 2, 1], 125 | [2, 0, 3], 126 | [1, 3, 0]]) 127 | 128 | print('correct:') 129 | print(correct_1) 130 | output_1 = bct.agreement(input_1) 131 | print('outputs:') 132 | print(output_1) 133 | assert (output_1 == correct_1).all() 134 | for buffsz in range(1, 3): 135 | output_1_buffered = bct.agreement(input_1, buffsz=buffsz) 136 | print(output_1_buffered) 137 | assert (output_1_buffered == correct_1).all() 138 | 139 | print('correct:') 140 | print(correct_2) 141 | output_2 = bct.agreement(input_2) 142 | print('outputs:') 143 | print(output_2) 144 | assert (output_2 == correct_2).all() 145 | for buffsz in range(1, 5): 146 | output_2_buffered = bct.agreement(input_2, buffsz=buffsz) 147 | print(output_2_buffered) 148 | assert (output_2_buffered == correct_2).all() 149 | 150 | def test_path_transitivity(): 151 | x = load_sample(thres=.31) 152 | xn = bct.normalize(x) 153 | Ti = bct.path_transitivity(x, transform='inv') 154 | Tl = bct.path_transitivity(xn, transform='log') 155 | 156 | print(np.sum(Ti), np.sum(Tl)) 157 | assert np.allclose(np.sum(Ti), 8340.8, atol=.01) 158 | assert np.allclose(np.sum(Tl), 8621.1, atol=.01) 159 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | def pytest_configure(config): 5 | config.addinivalue_line( 6 | "markers", "long: mark test as long-running; skipped with --skiplong option" 7 | ) 8 | 9 | 10 | def pytest_addoption(parser): 11 | parser.addoption( 12 | "--skiplong", action="store_true", default=False, help="skip long-running tests" 13 | ) 14 | 15 | 16 | def pytest_collection_modifyitems(config, items): 17 | if not config.getoption('--skiplong'): 18 | return 19 | skip = pytest.mark.skip(reason="skipping long-running tests") 20 | for item in items: 21 | if "long" in item.keywords: 22 | item.add_marker(skip) 23 | -------------------------------------------------------------------------------- /test/core_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import load_sample 2 | import bct 3 | import numpy as np 4 | 5 | 6 | def test_assortativity_wu_sign(): 7 | x = load_sample(thres=.1) 8 | ass_pos, _ = bct.local_assortativity_wu_sign(x) 9 | 10 | print(ass_pos, .2939) 11 | assert np.allclose(np.sum(ass_pos), .2939, atol=.0001) 12 | 13 | 14 | def test_core_periphery_dir(): 15 | x = load_sample(thres=.1) 16 | c, q = bct.core_periphery_dir(x) 17 | assert np.sum(c) == 57 18 | assert np.sum(np.cumsum(c)) == 4170 19 | assert np.allclose(q, .3086, atol=.0001) 20 | 21 | def test_clique_communities(): 22 | x = load_sample(thres=.23) 23 | 24 | print(np.sum(bct.binarize(x))) 25 | 26 | cis = bct.clique_communities(x, 9) 27 | print(cis.shape, np.max(np.sum(cis, axis=0))) 28 | print(np.sum(cis, axis=1)) 29 | assert np.sum(cis) == 199 30 | -------------------------------------------------------------------------------- /test/distance_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import load_sample 2 | import numpy as np 3 | import bct 4 | 5 | 6 | def test_breadthdist(): 7 | x = load_sample(thres=.02) 8 | r, d = bct.breadthdist(x) 9 | d[np.where(np.isinf(d))] = 0 10 | print(np.sum(r), np.sum(d)) 11 | assert np.sum(r) == 5804 12 | assert np.sum(d) == 30762 13 | 14 | 15 | def test_reachdist(): 16 | x = load_sample(thres=.02) 17 | r, d = bct.reachdist(x) 18 | d[np.where(np.isinf(d))] = 0 19 | print(np.sum(r), np.sum(d)) 20 | assert np.sum(r) == 5804 21 | assert np.sum(d) == 30762 22 | 23 | bx = bct.binarize(x, copy=False) 24 | br, bd = bct.reachdist(bx) 25 | bd[np.where(np.isinf(bd))] = 0 26 | print(np.sum(br), np.sum(bd)) 27 | assert np.sum(br) == 5804 28 | assert np.sum(bd) == 30762 29 | 30 | 31 | def test_distance_bin(): 32 | x = bct.binarize(load_sample(thres=.02), copy=False) 33 | d = bct.distance_bin(x) 34 | d[np.where(np.isinf(d))] = 0 35 | print(np.sum(d)) 36 | assert np.sum(d) == 30506 # deals with diagonals differently 37 | 38 | 39 | def test_distance_wei(): 40 | x = load_sample(thres=.02) 41 | d, e = bct.distance_wei(x) 42 | d[np.where(np.isinf(d))] = 0 43 | print(np.sum(d), np.sum(e)) 44 | 45 | assert np.allclose(np.sum(d), 155650.1, atol=.01) 46 | assert np.sum(e) == 30570 47 | 48 | 49 | def test_charpath(): 50 | x = load_sample(thres=.02) 51 | d, e = bct.distance_wei(x) 52 | l, eff,ecc,radius,diameter = bct.charpath(d) 53 | 54 | assert np.any(np.isinf(d)) 55 | assert not np.isnan(radius) 56 | assert not np.isnan(diameter) 57 | 58 | def test_distance_floyd(): 59 | x = load_sample(thres=.31) 60 | spli, hopsi, pmati = bct.distance_wei_floyd(x, transform='inv') 61 | print(np.sum(spli)) 62 | assert np.allclose(np.sum(spli), 11536.1, atol=.01) 63 | 64 | def test_navigation_wu(): 65 | x = load_sample(thres=.24) 66 | x_len = bct.invert(x) 67 | 68 | #randomly generate distances for testing purposes 69 | n = len(x) 70 | while True: 71 | centroids = np.random.randint(512, size=(n, 3)) 72 | #make sure every centroid is unique 73 | if len(np.unique(centroids, axis=0)) == n: 74 | break 75 | 76 | d = np.zeros((n, n)) 77 | for i in range(n): 78 | for j in range(n): 79 | d[i, j] = np.linalg.norm(centroids[i, :] - centroids[j, :]) 80 | 81 | 82 | sr, plbin, plwei, pldis, paths = bct.navigation_wu(x_len, d, max_hops=14) 83 | 84 | sr2, plbin2, plwei2, pldis2, paths2 = bct.navigation_wu(x_len, d, max_hops=None) 85 | 86 | #TODO create the centroids for an actual bit of sample data and converge the matlab algorithm 87 | #this procedure of random centroid generation generates a random reachability which is usually around 45-60% 88 | #but not guaranteed 89 | 90 | -------------------------------------------------------------------------------- /test/duecredit_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import sys 4 | import subprocess as sp 5 | from datetime import datetime 6 | 7 | 8 | @pytest.fixture 9 | def script_path(): 10 | yield os.path.join( 11 | os.path.dirname(os.path.realpath(__file__)), 12 | "simple_script.py" 13 | ) 14 | 15 | 16 | @pytest.fixture 17 | def dc_context(monkeypatch): 18 | """Backup and delete duecredit cache, replacing it after the test, and enable duecredit""" 19 | original = os.path.join(os.getcwd(), ".duecredit.p") 20 | timestamp = datetime.utcnow().isoformat() 21 | if os.name == "nt": 22 | timestamp.replace(':', ';') 23 | backup = original + '.' + timestamp 24 | if os.path.isfile(original): 25 | os.rename(original, backup) 26 | 27 | monkeypatch.setenv("DUECREDIT_ENABLE", "yes") 28 | 29 | yield 30 | 31 | if os.path.isfile(original): 32 | os.remove(original) 33 | if os.path.isfile(backup): 34 | os.rename(backup, original) 35 | 36 | 37 | def test_duecredit(script_path, dc_context): 38 | result = sp.Popen([sys.executable, script_path]) 39 | result.wait() 40 | assert result.returncode == 0 41 | 42 | result2 = sp.Popen(["duecredit", "summary", "--format", "bibtex"], stdout=sp.PIPE) 43 | result2.wait() 44 | assert result2.returncode == 0 45 | 46 | output = result2.stdout.read().decode("utf-8") 47 | for author in [ 48 | "LaPlante", 49 | "Latora", 50 | "Onnela", 51 | "Fagiolo", 52 | "Rubinov", 53 | "Leicht", 54 | "Reichardt", 55 | "Good", 56 | "Maslov", 57 | ]: 58 | assert author in output 59 | 60 | # fails due to bug 152 61 | # assert output.count('@') == 10 62 | 63 | headers = {line for line in output.split('\n') if line.startswith("@")} 64 | assert len(headers) == 10 65 | -------------------------------------------------------------------------------- /test/efficiency_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import load_sample, load_directed_sample 2 | import numpy as np 3 | import bct 4 | 5 | def test_diffusion_efficiency(): 6 | x = load_sample(thres=.23) 7 | gde, ed = bct.diffusion_efficiency(x) 8 | print(gde, np.sum(ed)) 9 | assert np.allclose(gde, .0069472) 10 | assert np.allclose(np.sum(ed), 131.34, atol=.01) 11 | 12 | def test_resource_efficiency(): 13 | x = load_sample(thres=.39) 14 | x = bct.binarize(x) 15 | 16 | eres, prob = bct.resource_efficiency_bin(x, .35) 17 | 18 | assert np.allclose(np.sum(eres), 323.5398, atol=.0001) 19 | assert np.allclose(np.sum(prob), 138.0000, atol=.0001) 20 | 21 | def test_rout_efficiency(): 22 | x = load_directed_sample(thres=1) 23 | GErout, Erout, Eloc = bct.rout_efficiency(x, 'inv') 24 | 25 | assert np.allclose(np.sum(Erout), 9515.25, atol=.01) 26 | assert np.allclose(GErout, 1.0655, atol=.0001) 27 | assert np.allclose(np.sum(Eloc), 2906.574, atol=.001) 28 | 29 | -------------------------------------------------------------------------------- /test/load_samples.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import bct 3 | import os 4 | 5 | TEST_DIR = os.path.dirname(os.path.realpath(__file__)) 6 | MAT_DIR = os.path.join(TEST_DIR, 'mats') 7 | 8 | 9 | def mat_path(fname): 10 | return os.path.join(MAT_DIR, fname) 11 | 12 | 13 | def load_sample(thres=1.): 14 | return bct.threshold_proportional(np.load(mat_path('sample_data.npy')), 15 | thres, copy=False) 16 | 17 | 18 | def load_signed_sample(thres=1): 19 | return bct.threshold_proportional(np.around( 20 | np.load(mat_path('sample_signed.npy')), 8), thres, copy=False) 21 | 22 | 23 | def load_sparse_sample(thres=.02): 24 | return load_sample(thres=thres) 25 | 26 | 27 | def load_binary_sample(thres=.35): 28 | return bct.binarize(load_sample(thres=thres), copy=False) 29 | 30 | 31 | def load_directed_sample(thres=1.): 32 | return bct.threshold_proportional(np.load(mat_path('sample_directed.npy')), 33 | thres, copy=False) 34 | 35 | 36 | def load_binary_directed_sample(thres=.35): 37 | return bct.binarize(load_directed_sample(thres=thres)) 38 | 39 | 40 | def load_directed_low_modularity_sample(thres=1.): 41 | return bct.threshold_proportional(np.load( 42 | mat_path('sample_directed_gc.npy')), thres, copy=False) 43 | 44 | 45 | def load_binary_directed_low_modularity_sample(thres=.35): 46 | return bct.binarize(load_directed_low_modularity_sample(thres=thres)) 47 | 48 | # unimplemented samples 49 | 50 | 51 | def load_binary_sparse_sample(thres=.35): 52 | raise NotImplementedError() 53 | 54 | 55 | def load_binary_directed_sparse_sample(thres=.02): 56 | raise NotImplementedError() 57 | 58 | 59 | def load_directed_sparse_sample(thres=.02): 60 | raise NotImplementedError() 61 | 62 | 63 | def load_directed_signed_sample(thres=.61): 64 | raise NotImplementedError() 65 | 66 | 67 | def load_directed_signed_sparse_sample(thres=.03): 68 | raise NotImplementedError() 69 | 70 | 71 | def load_signed_sparse_sample(thres=.06): 72 | raise NotImplementedError() 73 | 74 | # NBS samples 75 | 76 | 77 | def load_sample_group_qball(): 78 | q = np.load(mat_path('sample_group_qball.npy')) 79 | return np.transpose( 80 | list(map(bct.normalize, (q[:, :, i] for i in range(q.shape[2])))), 81 | (1, 2, 0)) 82 | 83 | 84 | def load_sample_group_dsi(): 85 | d = np.load(mat_path('sample_group_dsi.npy')) 86 | return np.transpose( 87 | list(map(bct.normalize, (d[:, :, i] for i in range(d.shape[2])))), 88 | (1, 2, 0)) 89 | 90 | 91 | def load_sample_group_fmri(): 92 | f = np.load(mat_path('sample_group_fmri.npy')) 93 | import functools 94 | 95 | def compose(*functions): 96 | return functools.reduce(lambda f, g: lambda x: f(g(x)), functions) 97 | thresh_fun = functools.partial(bct.threshold_proportional, p=.5) 98 | return np.transpose(list(map(compose(bct.normalize, thresh_fun), 99 | (f[:, :, i] for i in range(f.shape[2])))), 100 | (1, 2, 0)) 101 | -------------------------------------------------------------------------------- /test/mats/sample_data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_data.mat -------------------------------------------------------------------------------- /test/mats/sample_data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_data.npy -------------------------------------------------------------------------------- /test/mats/sample_directed.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_directed.mat -------------------------------------------------------------------------------- /test/mats/sample_directed.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_directed.npy -------------------------------------------------------------------------------- /test/mats/sample_directed_gc.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_directed_gc.mat -------------------------------------------------------------------------------- /test/mats/sample_directed_gc.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_directed_gc.npy -------------------------------------------------------------------------------- /test/mats/sample_group_dsi.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_group_dsi.mat -------------------------------------------------------------------------------- /test/mats/sample_group_dsi.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_group_dsi.npy -------------------------------------------------------------------------------- /test/mats/sample_group_fmri.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_group_fmri.mat -------------------------------------------------------------------------------- /test/mats/sample_group_fmri.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_group_fmri.npy -------------------------------------------------------------------------------- /test/mats/sample_group_qball.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_group_qball.mat -------------------------------------------------------------------------------- /test/mats/sample_group_qball.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_group_qball.npy -------------------------------------------------------------------------------- /test/mats/sample_partition.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_partition.mat -------------------------------------------------------------------------------- /test/mats/sample_partition.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_partition.npy -------------------------------------------------------------------------------- /test/mats/sample_pc.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_pc.mat -------------------------------------------------------------------------------- /test/mats/sample_pc.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_pc.npy -------------------------------------------------------------------------------- /test/mats/sample_signed.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_signed.mat -------------------------------------------------------------------------------- /test/mats/sample_signed.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_signed.npy -------------------------------------------------------------------------------- /test/mats/sample_signed_partition.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_signed_partition.mat -------------------------------------------------------------------------------- /test/mats/sample_zi.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_zi.mat -------------------------------------------------------------------------------- /test/mats/sample_zi.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aestrivex/bctpy/1b40e281eda081060707e30b68106ac1ebf54130/test/mats/sample_zi.npy -------------------------------------------------------------------------------- /test/modularity_derived_metrics_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import load_sample, mat_path 2 | import numpy as np 3 | import bct 4 | 5 | 6 | def test_pc(): 7 | x = load_sample(thres=.4) 8 | # ci,q = bct.modularity_und(x) 9 | ci = np.load(mat_path('sample_partition.npy')) 10 | 11 | pc = np.load(mat_path('sample_pc.npy')) 12 | 13 | pc_ = bct.participation_coef(x, ci) 14 | print(list(zip(pc, pc_))) 15 | 16 | assert np.allclose(pc, pc_, atol=0.02) 17 | 18 | 19 | def test_participation(): 20 | W = np.eye(3) 21 | ci = np.array([1, 1, 2]) 22 | 23 | assert np.allclose(bct.participation_coef(W, ci), [0, 0, 0]) 24 | assert np.allclose(bct.participation_coef_sign(W, ci)[0], [0, 0, 0]) 25 | 26 | W = np.ones((3, 3)) 27 | assert np.allclose(bct.participation_coef(W, ci), [ 28 | 0.44444444, 0.44444444, 0.44444444]) 29 | assert np.allclose(bct.participation_coef_sign(W, ci)[0], [ 30 | 0.44444444, 0.44444444, 0.44444444]) 31 | 32 | W = np.eye(3) 33 | W[0, 1] = 1 34 | W[0, 2] = 1 35 | assert np.allclose(bct.participation_coef(W, ci), [0.44444444, 0, 0]) 36 | assert np.allclose(bct.participation_coef_sign(W, ci) 37 | [0], [0.44444444, 0, 0]) 38 | 39 | W = np.eye(3) 40 | W[0, 1] = -1 41 | W[0, 2] = -1 42 | W[1, 2] = 1 43 | assert np.allclose(bct.participation_coef_sign(W, ci)[0], [0., 0.5, 0.]) 44 | 45 | 46 | def test_gateway(): 47 | x = load_sample(thres=.1) 48 | ci = np.load(mat_path('sample_partition.npy')) 49 | 50 | g_pos, _ = bct.gateway_coef_sign(x, ci) 51 | 52 | print(np.sum(g_pos), 43.4382) 53 | assert np.allclose(np.sum(g_pos), 43.4382, atol=.001) 54 | 55 | g_pos_bet, _ = bct.gateway_coef_sign(x, ci, centrality_type='betweenness') 56 | 57 | print(np.sum(g_pos_bet), 43.4026) 58 | assert np.allclose(np.sum(g_pos_bet), 43.4026, atol=.001) 59 | 60 | 61 | def test_zi(): 62 | x = load_sample(thres=.4) 63 | ci = np.load(mat_path('sample_partition.npy')) 64 | 65 | zi = np.load(mat_path('sample_zi.npy')) 66 | 67 | zi_ = bct.module_degree_zscore(x, ci) 68 | print(list(zip(zi, zi_))) 69 | 70 | assert np.allclose(zi, zi_, atol=0.05) 71 | # this function does the same operations but varies by a modest quantity 72 | # because of the matlab and numpy differences in population versus 73 | # sample standard deviation. i tend to think that using the population 74 | # estimator is acceptable in this case so i will allow the higher 75 | # tolerance. 76 | 77 | # TODO this test does not give the same results, why not 78 | 79 | 80 | def test_shannon_entropy(): 81 | x = load_sample(thres=0.4) 82 | ci = np.load(mat_path('sample_partition.npy')) 83 | # ci, q = bct.modularity_und(x) 84 | hpos, _ = bct.diversity_coef_sign(x, ci) 85 | print(np.sum(hpos)) 86 | print(hpos[-1]) 87 | assert np.allclose(np.sum(hpos), 102.6402, atol=.01) 88 | -------------------------------------------------------------------------------- /test/modularity_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from .load_samples import ( 3 | load_sample, load_directed_sample, load_signed_sample, load_directed_low_modularity_sample, TEST_DIR 4 | ) 5 | import numpy as np 6 | import bct 7 | 8 | 9 | def test_modularity_und(): 10 | x = load_sample(thres=.4) 11 | _, q = bct.modularity_und(x) 12 | print(q) 13 | assert np.allclose(q, 0.24097717) 14 | # matlab and bctpy appear to return different results due to the cross- 15 | # package numerical instability of eigendecompositions 16 | 17 | 18 | def test_modularity_louvain_und(): 19 | x = load_sample(thres=.4) 20 | 21 | seed = 38429004 22 | _, q = bct.modularity_louvain_und(x, seed=seed) 23 | assert np.allclose(q, 0.25892588) 24 | 25 | fails = 0 26 | for i in range(100): 27 | ci, q = bct.modularity_louvain_und(x) 28 | try: 29 | assert np.allclose(q, .25, atol=0.01) 30 | except AssertionError: 31 | if fails >= 5: 32 | raise 33 | else: 34 | fails += 1 35 | 36 | seed = 94885236 37 | _, q = bct.modularity_finetune_und(x, seed=seed) 38 | assert np.allclose(q, .25879794) 39 | 40 | 41 | def test_modularity_finetune_und(): 42 | x = load_sample(thres=.4) 43 | 44 | seed = 94885236 45 | _, q = bct.modularity_finetune_und(x, seed=seed) 46 | assert np.allclose(q, .25879794) 47 | 48 | fails = 0 49 | for i in range(100): 50 | _, q = bct.modularity_finetune_und(x) 51 | try: 52 | assert np.allclose(q, .25, atol=0.03) 53 | except AssertionError: 54 | if fails >= 5: 55 | raise 56 | else: 57 | fails += 1 58 | 59 | seed = 71040925 60 | ci, oq = bct.modularity_louvain_und(x, seed=seed) 61 | _, q = bct.modularity_finetune_und(x, ci=ci, seed=seed) 62 | print(q, oq) 63 | # assert np.allclose(q, .25892588) 64 | assert np.allclose(q, .25856714) 65 | assert q - oq >= -1e6 66 | 67 | ci, oq = bct.modularity_und(x) 68 | for i in range(100): 69 | _, q = bct.modularity_finetune_und(x, ci=ci) 70 | assert np.allclose(q, .25, atol=0.002) 71 | assert q - oq >= -1e6 72 | 73 | # modularity_finetune_und appears to be very stable when given a stable ci 74 | # in thousands of test runs on the sample data (using the deterministic 75 | # modularity maximization algorithm), only two states appeared; 76 | # both improved the optimal modularity. a basic increase -- modules that 77 | # always benefited from switching -- always occurred. on top of that, a 78 | # slightly larger increase dependent on order occurred in both matlab and 79 | # bctpy around ~0.6% of the time. Due to numerical instability arising 80 | # from something different between matlab and scipy, these values were not 81 | # the same across languages, but both languages showed bistable transitions 82 | # they were extremely stable. The values were about .0015 apart. 83 | 84 | # also the matlab and python versions of modularity_und return slightly 85 | # different modular structure, but the instability is present despite this 86 | #(i.e. it is unstable both when the modular structure is identical and not) 87 | 88 | 89 | def test_modularity_louvain_und_sign_seed(): 90 | # performance is same as matlab if randomness is quashed 91 | x = load_signed_sample() 92 | seed = 90772777 93 | _, q = bct.modularity_louvain_und_sign(x, seed=seed) 94 | print(q) 95 | assert np.allclose(q, .46605515) 96 | 97 | 98 | def test_modularity_finetune_und_sign_actually_finetune(): 99 | x = load_signed_sample() 100 | seed = 34908314 101 | ci, oq = bct.modularity_louvain_und_sign(x, seed=seed) 102 | _, q = bct.modularity_finetune_und_sign(x, seed=seed, ci=ci) 103 | print(q) 104 | assert np.allclose(q, .47282924) 105 | assert q >= oq 106 | 107 | seed = 88215881 108 | np.random.seed(seed) 109 | randomized_sample = np.random.random_sample(size=(len(x), len(x))) 110 | randomized_sample = randomized_sample + randomized_sample.T 111 | x[np.where(bct.threshold_proportional(randomized_sample, .2))] = 0 112 | 113 | ci, oq = bct.modularity_louvain_und_sign(x, seed=seed) 114 | print(oq) 115 | assert np.allclose(oq, .45254522) 116 | for i in range(100): 117 | _, q = bct.modularity_finetune_und_sign(x, ci=ci) 118 | assert q >= oq 119 | 120 | 121 | def test_modularity_probtune_und_sign(): 122 | x = load_signed_sample() 123 | seed = 59468096 124 | ci, q = bct.modularity_probtune_und_sign(x, seed=seed) 125 | print(q) 126 | assert np.allclose(q, .07885327) 127 | 128 | seed = 1742447 129 | ci, _ = bct.modularity_louvain_und_sign(x, seed=seed) 130 | _, oq = bct.modularity_finetune_und_sign(x, seed=seed, ci=ci) 131 | 132 | for i in np.arange(.05, .5, .02): 133 | fails = 0 134 | for j in range(100): 135 | _, q = bct.modularity_probtune_und_sign(x, ci=ci, p=i) 136 | try: 137 | assert q < oq 138 | except AssertionError: 139 | if fails > 5: 140 | raise 141 | else: 142 | fails += 1 143 | 144 | 145 | def test_modularity_dir_low_modularity(): 146 | x = load_directed_low_modularity_sample(thres=.67) 147 | _, q = bct.modularity_dir(x) 148 | assert np.allclose(q, .06450290) 149 | 150 | 151 | def test_modularity_louvain_dir_low_modularity(): 152 | x = load_directed_low_modularity_sample(thres=.67) 153 | seed = 28917147 154 | _, q = bct.modularity_louvain_dir(x, seed=seed) 155 | assert np.allclose(q, .06934894) 156 | 157 | # def test_modularity_finetune_dir_low_modularity(): 158 | # x = load_directed_low_modularity_sample(thres=.67) 159 | # seed = 39602351 160 | # ci,oq = bct.modularity_louvain_dir(x, seed=seed) 161 | # _,q = bct.modularity_finetune_dir(x, ci=ci, seed=seed) 162 | # print q,oq 163 | # assert q >= oq 164 | # this does not pass. the matlab code appears to have no idea what to do 165 | # with 166 | # the low modularity directed modules. this may be someone else's fault. 167 | 168 | 169 | def test_modularity_dir(): 170 | x = load_directed_sample() 171 | _, q = bct.modularity_dir(x) 172 | print(q, .32742787) 173 | assert np.allclose(q, .32742787) 174 | 175 | 176 | def test_modularity_louvain_dir(): 177 | x = load_directed_sample() 178 | seed = 43938304 179 | _, q = bct.modularity_louvain_dir(x, seed=seed) 180 | assert np.allclose(q, .32697921) 181 | 182 | # def test_modularity_finetune_dir(): 183 | # x = load_directed_sample() 184 | # seed = 26080 185 | # ci,oq = bct.modularity_louvain_dir(x, seed=seed) 186 | # for i in xrange(100): 187 | # _,q = bct.modularity_finetune_dir(x, ci=ci) 188 | # print q,oq 189 | # assert q >= oq 190 | # this does not pass with similar behavior to low modularity. 191 | # the code occasionally returns lower modularity (but very very similar, 192 | # order .001) partitions despite returning 193 | # higher modularity partitions a slight majority of the time. i dont know 194 | # what is wrong 195 | 196 | 197 | def test_community_louvain(): 198 | x = load_sample(thres=0.4) 199 | seed = 39185 200 | ci, q = bct.community_louvain(x, seed=seed) 201 | print(q) 202 | assert np.allclose(q, 0.2583, atol=0.015) 203 | 204 | 205 | def test_modularity_dir_bug71(): 206 | """Regression test for bug described in issue #71""" 207 | fpath = os.path.join(TEST_DIR, "failing_cases", "modularity_dir_example.csv") 208 | x = np.loadtxt(fpath, int, delimiter=',') 209 | 210 | bct.modularity_dir(x) 211 | -------------------------------------------------------------------------------- /test/nbs_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import load_sample_group_dsi, load_sample_group_fmri, load_sample_group_qball 2 | import numpy as np 3 | import bct 4 | 5 | 6 | def test_nbs_dsi_qbi(): 7 | q = load_sample_group_qball() 8 | d = load_sample_group_dsi() 9 | _nbs_helper(q, d, .5, atol=0.3) 10 | 11 | 12 | def test_nbs_paired_dsi_qbi(): 13 | pass 14 | 15 | 16 | def test_nbs_dsi_fmri(): 17 | d = load_sample_group_dsi() 18 | f = load_sample_group_fmri() 19 | assert f.shape == (219, 219, 8) 20 | _nbs_helper(d, f, .03, atol=0.03) 21 | 22 | 23 | def test_nbs_paired_dsi_fmri(): 24 | pass 25 | 26 | 27 | def _nbs_helper(x, y, expected_pval, atol=.05, thresh=.1, ntrials=25, 28 | paired=False): 29 | # comment 30 | 31 | pval, _, _ = bct.nbs_bct(x, y, thresh, k=ntrials, paired=paired) 32 | print(pval, expected_pval) 33 | assert np.allclose(pval, expected_pval, atol=atol) 34 | -------------------------------------------------------------------------------- /test/nodals_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import load_sample 2 | import numpy as np 3 | import bct 4 | 5 | 6 | def test_glob_eff(): 7 | x = load_sample(thres=.4) 8 | geff = bct.efficiency_wei(x) 9 | print(geff, 1.8784) 10 | assert np.allclose(geff, 1.8784, atol=1e-4) 11 | 12 | 13 | def test_loc_eff(): 14 | x = load_sample(thres=.4) 15 | leff_o = bct.efficiency_wei(x, local='original') 16 | print(np.sum(leff_o), 315.6225) 17 | assert np.allclose(np.sum(leff_o), 315.6225, atol=0.1) 18 | 19 | leff = bct.efficiency_wei(x, local=True) 20 | print(np.sum(leff), 268.5581) 21 | assert np.allclose(np.sum(leff), 268.5581, atol=0.1) 22 | 23 | 24 | def test_glob_eff_bin(): 25 | x = load_sample(thres=.4) 26 | geff = bct.efficiency_bin(x) 27 | 28 | y = bct.binarize(x) 29 | geff2 = bct.efficiency_bin(y) 30 | 31 | print(geff, geff2, 0.6999) 32 | assert np.allclose(geff, 0.6999, atol=1e-4) 33 | assert np.allclose(geff2, 0.6999, atol=1e-4) 34 | 35 | 36 | def test_loc_eff_bin(): 37 | x = load_sample(thres=.4) 38 | leff = bct.efficiency_bin(x, local=True) 39 | 40 | y = bct.binarize(x) 41 | leff2 = bct.efficiency_bin(y, local=True) 42 | 43 | print(np.sum(leff), np.sum(leff2), 105.5111) 44 | assert np.allclose(np.sum(leff), 105.5111, atol=0.1) 45 | assert np.allclose(np.sum(leff2), 105.5111, atol=0.1) 46 | -------------------------------------------------------------------------------- /test/partition_distance_test.py: -------------------------------------------------------------------------------- 1 | from .load_samples import load_sample_group_qball, load_sample_group_dsi 2 | import numpy as np 3 | import bct 4 | 5 | 6 | def test_partition_distance(): 7 | q = load_sample_group_qball() 8 | d = load_sample_group_dsi() 9 | 10 | q = np.mean(q, axis=2) 11 | d = np.mean(d, axis=2) 12 | 13 | qi, _ = bct.modularity_und(q) 14 | di, _ = bct.modularity_und(d) 15 | 16 | vi, mi = bct.partition_distance(qi, di) 17 | 18 | print(vi, mi) 19 | assert np.allclose(vi, 0.1964, atol=0.01) 20 | assert np.allclose(mi, 0.6394, atol=0.01) 21 | -------------------------------------------------------------------------------- /test/reference_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from .load_samples import * 4 | import numpy as np 5 | import bct 6 | 7 | 8 | SEED = 1 9 | 10 | 11 | @pytest.mark.xfail(reason="unfixed bug #68") 12 | def test_null_model_und_sign(): 13 | # Regression test for bug fixed in b02a306 14 | x = load_sample(thres=.4) 15 | 16 | bct.null_model_und_sign(x) 17 | 18 | 19 | @pytest.mark.xfail(reason="unfixed bug #68") 20 | def test_null_model_dir_sign(): 21 | # Regression test for counterpart to the undirected bug 22 | x = load_directed_sample(thres=.4) 23 | bct.null_model_dir_sign(x) 24 | 25 | 26 | def test_randmio_und_seed(): 27 | x = load_sample(thres=0.4) 28 | swaps = 5 29 | ref, _ = bct.randmio_und(x, swaps, seed=SEED) 30 | test_same, _ = bct.randmio_und(x, swaps, seed=SEED) 31 | test_diff, _ = bct.randmio_und(x, swaps, seed=SEED*2) 32 | 33 | assert np.allclose(ref, test_same) 34 | assert not np.allclose(ref, test_diff) 35 | -------------------------------------------------------------------------------- /test/simple_script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Exercise some simple functions to make sure their citations are picked up""" 3 | import numpy as np 4 | import bct 5 | 6 | # Zachary karate club http://konect.cc/networks/ucidata-zachary/ 7 | s = """ 8 | 1 2 9 | 1 3 10 | 2 3 11 | 1 4 12 | 2 4 13 | 3 4 14 | 1 5 15 | 1 6 16 | 1 7 17 | 5 7 18 | 6 7 19 | 1 8 20 | 2 8 21 | 3 8 22 | 4 8 23 | 1 9 24 | 3 9 25 | 3 10 26 | 1 11 27 | 5 11 28 | 6 11 29 | 1 12 30 | 1 13 31 | 4 13 32 | 1 14 33 | 2 14 34 | 3 14 35 | 4 14 36 | 6 17 37 | 7 17 38 | 1 18 39 | 2 18 40 | 1 20 41 | 2 20 42 | 1 22 43 | 2 22 44 | 24 26 45 | 25 26 46 | 3 28 47 | 24 28 48 | 25 28 49 | 3 29 50 | 24 30 51 | 27 30 52 | 2 31 53 | 9 31 54 | 1 32 55 | 25 32 56 | 26 32 57 | 29 32 58 | 3 33 59 | 9 33 60 | 15 33 61 | 16 33 62 | 19 33 63 | 21 33 64 | 23 33 65 | 24 33 66 | 30 33 67 | 31 33 68 | 32 33 69 | 9 34 70 | 10 34 71 | 14 34 72 | 15 34 73 | 16 34 74 | 19 34 75 | 20 34 76 | 21 34 77 | 23 34 78 | 24 34 79 | 27 34 80 | 28 34 81 | 29 34 82 | 30 34 83 | 31 34 84 | 32 34 85 | 33 34 86 | """.strip() 87 | 88 | arr = np.zeros((34, 34), dtype=np.uint8) 89 | for row in s.split('\n'): 90 | first, second = row.split(' ') 91 | arr[int(first)-1, int(second)-1] += 1 92 | 93 | arr = bct.binarize(arr + arr.T) 94 | 95 | np.random.seed(1991) 96 | 97 | eff = bct.efficiency_bin(arr) 98 | mod = bct.modularity_und(arr) 99 | rand = bct.randmio_und_connected(arr, 5) 100 | -------------------------------------------------------------------------------- /test/very_long_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | 4 | import bct 5 | 6 | from .load_samples import load_sample 7 | 8 | 9 | @pytest.mark.long 10 | def test_link_communities(): 11 | x = load_sample(thres=0.4) 12 | seed = 949389104 13 | M = bct.link_communities(x) 14 | assert np.max(M) == 1 15 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py{37,38,39,310} 3 | 4 | [testenv] 5 | deps = -rrequirements.txt 6 | commands = 7 | pip install -U pip 8 | pip install . 9 | pytest --skiplong -v 10 | 11 | [gh] 12 | python = 13 | 3.7 = py37 14 | 3.8 = py38 15 | 3.9 = py39 16 | 3.10 = py310 17 | --------------------------------------------------------------------------------