├── .circleci └── config.yml ├── .coveragerc ├── .gitignore ├── .mailmap ├── .travis.yml ├── CHANGES ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── appveyor.yml ├── azure-pipelines.yml ├── bin └── pysurfer ├── codecov.yml ├── doc ├── Makefile ├── _static │ ├── background_options.png │ ├── banner.png │ ├── basic_session.png │ ├── cortex_options.png │ ├── favicon.ico │ ├── navy.css │ ├── pysurfer_logo_small.png │ └── split_view.png ├── _templates │ ├── class.rst │ ├── class_noinherited.rst │ ├── function.rst │ └── layout.html ├── changes.rst ├── conf.py ├── documentation │ ├── command_line.rst │ ├── custom_viz.rst │ ├── index.rst │ └── split_brain.rst ├── index.rst ├── install.rst ├── intro.rst ├── links_names.txt ├── logo_files │ ├── activation.png │ ├── annot.png │ ├── banner.pages │ ├── banner.pdf │ ├── banner.png │ ├── banner_serif.pdf │ ├── banner_serif.png │ ├── brain.png │ ├── contours.png │ ├── favicon.ico │ ├── green_activation.png │ ├── peaks.png │ ├── pysurfer_logo_small.png │ ├── pysurfer_logo_small.psd │ ├── pysurfer_logo_small_crop.png │ └── thickness.png ├── python_reference.rst └── surfer.cfg ├── examples ├── README.txt ├── example_data │ ├── README.rst │ ├── lh.alt_sig.nii.gz │ ├── lh.curv.fsaverage.mgz │ ├── lh.sig.nii.gz │ ├── mask.nii.gz │ ├── meg_source_estimate-lh.stc │ ├── meg_source_estimate-rh.stc │ ├── register.dat │ ├── resting_corr.nii.gz │ ├── rh.curv.fsaverage.mgz │ └── zstat.nii.gz ├── plot_basics.py ├── plot_custom_colors.py ├── plot_fmri_activation.py ├── plot_fmri_activation_volume.py ├── plot_fmri_conjunction.py ├── plot_foci.py ├── plot_freesurfer_normalization.py ├── plot_label.py ├── plot_label_foci.py ├── plot_meg_inverse_solution.py ├── plot_morphometry.py ├── plot_parc_values.py ├── plot_parcellation.py ├── plot_probabilistic_label.py ├── plot_resting_correlations.py ├── plot_topographic_contours.py ├── plot_transparent_brain.py ├── plot_vector_meg_inverse_solution.py ├── rotate_animation.py ├── save_movie.py ├── save_views.py └── show_views.py ├── make └── get_fsaverage.ps1 ├── setup.cfg ├── setup.py └── surfer ├── __init__.py ├── _commandline.py ├── cm.py ├── io.py ├── tests ├── __init__.py ├── test_utils.py └── test_viz.py ├── utils.py └── viz.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build_docs: 4 | docker: 5 | - image: circleci/python:3.7-stretch 6 | steps: 7 | - checkout 8 | - run: 9 | name: Set BASH_ENV 10 | command: | 11 | echo "set -e" >> $BASH_ENV; 12 | echo "export SUBJECTS_DIR=~/subjects" >> $BASH_ENV; 13 | echo "export DISPLAY=:99" >> $BASH_ENV; 14 | echo "export OPENBLAS_NUM_THREADS=4" >> $BASH_ENV; 15 | echo "export PATH=~/.local/bin:$PATH" >> $BASH_ENV; 16 | echo "export PATTERN=\"plot_\(?\!fmri_activation_volume\|resting_correlations\)\"" >> $BASH_ENV; 17 | - run: 18 | name: Merge with upstream 19 | command: | 20 | echo $(git log -1 --pretty=%B) | tee gitlog.txt 21 | echo ${CI_PULL_REQUEST//*pull\//} | tee merge.txt 22 | if [[ $(cat merge.txt) != "" ]]; then 23 | echo "Merging $(cat merge.txt)"; 24 | git remote add upstream git://github.com/nipy/PySurfer.git; 25 | git pull --ff-only upstream "refs/pull/$(cat merge.txt)/merge"; 26 | git fetch upstream master; 27 | fi 28 | 29 | - run: 30 | name: Spin up Xvfb 31 | command: | 32 | /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset; 33 | 34 | # https://github.com/ContinuumIO/anaconda-issues/issues/9190#issuecomment-386508136 35 | # https://github.com/golemfactory/golem/issues/1019 36 | - run: 37 | name: Fix libgcc_s.so.1 pthread_cancel bug 38 | command: | 39 | sudo apt-get install qt5-default 40 | 41 | # Load our data 42 | - restore_cache: 43 | keys: 44 | - data-cache-0 45 | - pip-cache 46 | 47 | - run: 48 | name: Get Python running 49 | command: | 50 | python -m pip install --user -q --upgrade pip numpy 51 | python -m pip install --user -q --upgrade --progress-bar off scipy matplotlib vtk pyqt5 pyqt5-sip nibabel sphinx numpydoc pillow imageio imageio-ffmpeg sphinx-gallery 52 | python -m pip install --user -q --upgrade mayavi "https://github.com/mne-tools/mne-python/archive/master.zip" 53 | - save_cache: 54 | key: pip-cache 55 | paths: 56 | - ~/.cache/pip 57 | 58 | # Look at what we have and fail early if there is some library conflict 59 | - run: 60 | name: Check installation 61 | command: | 62 | LIBGL_DEBUG=verbose python -c "from mayavi import mlab; import matplotlib.pyplot as plt; mlab.figure(); plt.figure()" 63 | 64 | - run: 65 | name: Get data 66 | command: | 67 | echo $SUBJECTS_DIR 68 | mkdir -p $SUBJECTS_DIR 69 | python -c "import mne; mne.datasets.fetch_fsaverage(verbose=True)" 70 | ls $SUBJECTS_DIR 71 | - save_cache: 72 | key: data-cache-0 73 | paths: 74 | - "~/subjects" 75 | 76 | - run: 77 | name: Install PySurfer 78 | command: | 79 | python setup.py develop --user 80 | 81 | - run: 82 | name: Build docs 83 | command: | 84 | cd doc 85 | echo $PATTERN 86 | make html_dev-pattern 87 | 88 | - store_artifacts: 89 | path: doc/_build/html/ 90 | destination: html 91 | 92 | 93 | workflows: 94 | version: 2 95 | 96 | default: 97 | jobs: 98 | - build_docs 99 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | source = surfer 4 | include = */surfer/* 5 | omit = 6 | */setup.py 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.pyo 3 | *.log 4 | *~ 5 | .#* 6 | *.swp 7 | *.orig 8 | *.mov 9 | build 10 | .idea/ 11 | 12 | dist/ 13 | doc/_build/ 14 | doc/build/ 15 | doc/auto_examples/ 16 | doc/generated/ 17 | doc/example_data 18 | doc/modules/generated/ 19 | doc/documentation/pysurfer_usage.txt 20 | pip-log.txt 21 | .coverage 22 | tags 23 | doc/coverages 24 | doc/samples 25 | *.jpg 26 | pysurfer.egg-info 27 | *.avi 28 | .tmp/*.png 29 | examples/example_data/coord-lh.label 30 | .ipynb_checkpoints/ 31 | .cache/ 32 | .pytest_cache/ 33 | -------------------------------------------------------------------------------- /.mailmap: -------------------------------------------------------------------------------- 1 | Michael Waskom mwaskom Michael Waskom Michael Waskom 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: c 2 | sudo: false 3 | dist: bionic 4 | services: 5 | - xvfb 6 | env: 7 | global: PYTHON_VERSION=3.8 8 | CONDA_DEPENDENCIES="numpy scipy matplotlib pyqt coverage pytest pytest-cov flake8 pygments traits traitsui pyface" 9 | PIP_DEPENDENCIES="codecov pytest-sugar pytest-faulthandler nibabel imageio imageio-ffmpeg" 10 | DISPLAY=:99.0 11 | SUBJECTS_DIR=~/subjects 12 | 13 | matrix: 14 | include: 15 | # Full 16 | - os: linux 17 | addons: 18 | apt: 19 | packages: 20 | - mencoder 21 | - libosmesa6 22 | - libglx-mesa0 23 | - libopengl0 24 | - libglx0 25 | - libdbus-1-3 26 | 27 | # 3.5, no mencoder 28 | - os: linux 29 | env: PYTHON_VERSION=3.6 30 | CONDA_DEPENDENCIES="numpy scipy matplotlib coverage pytest pytest-cov flake8" 31 | PIP_DEPENDENCIES="codecov pytest-sugar nibabel imageio imageio-ffmpeg" 32 | 33 | # OSX 34 | - os: osx 35 | 36 | # Setup anaconda 37 | before_install: 38 | # Rvm overrides cd with a function so that it can hook into it to run 39 | # some scripts, see https://github.com/travis-ci/travis-ci/issues/8703 40 | - if [ "${TRAVIS_OS_NAME}" == "osx" ]; then 41 | unset -f cd; 42 | fi; 43 | - git clone https://github.com/astropy/ci-helpers.git 44 | - source ci-helpers/travis/setup_conda.sh 45 | - if [ "${PYTHON_VERSION}" == "3.6" ]; then 46 | pip install --only-binary ":all:" "vtk<9"; 47 | pip install mayavi; 48 | else 49 | pip install --only-binary ":all:" -f "https://vtk.org/download" "vtk>=9"; 50 | pip install https://github.com/enthought/mayavi/zipball/master; 51 | fi; 52 | - mkdir -p $SUBJECTS_DIR 53 | - pip install "https://github.com/mne-tools/mne-python/archive/master.zip" 54 | - python -c "import mne; mne.datasets.fetch_fsaverage(verbose=True)" 55 | 56 | install: 57 | - python setup.py build 58 | - python setup.py install 59 | - SRC_DIR=$(pwd) 60 | 61 | script: 62 | - cd ${SRC_DIR} 63 | - pytest surfer --cov=surfer -v 64 | - make flake 65 | 66 | after_success: 67 | - codecov 68 | -------------------------------------------------------------------------------- /CHANGES: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | .. currentmodule:: surfer 5 | 6 | Version 0.11.0 7 | -------------- 8 | 9 | - Minimum Python version increased to 3.6 10 | - Add support to turn off full-screen antialiasing, which can be problematic on 11 | some drivers (e.g., MESA software rendering on Linux) 12 | - Simplification and refactoring of vector-valued data plotting 13 | - Removal of unnecessary ``info`` log messages about smoothing matrix and 14 | colormap generation (changed to ``debug`` level) 15 | - Clean up of exit functions like ``__del__`` to avoid segfaults 16 | 17 | 18 | Version 0.10.0 19 | -------------- 20 | 21 | - Added an option to smooth to nearest vertex in :meth:`Brain.add_data` using 22 | ``smoothing_steps='nearest'`` 23 | - Added options for using offscreen mode 24 | - Improved integration with Jupyter notebook 25 | - Avoided view changes when using :meth:`Brain.add_foci` 26 | 27 | Version 0.9 28 | ----------- 29 | 30 | - Fixed transparency issues with colormaps with 31 | :meth:`Brain.scale_data_colormap` 32 | - Added an example of using custom colors 33 | - Added options for choosing units for :class:`Brain` (``m`` or ``mm``) 34 | 35 | Version 0.8 36 | ----------- 37 | 38 | - The surface geometry that is displayed can now be changed after initializing 39 | a ``Brain`` instance with e.g. ``brain.set_surf("smoothwm")``. 40 | - Allowed PySurfer to use custom matplotlib colormap objects or the names of 41 | custom colormaps that have been registered with matplotlib. 42 | - Added four new colormaps from ``seaborn``: ``rocket``, ``mako``, ``icefire``, 43 | and ``vlag``. 44 | - Terrain interaction is now possible via the ``interaction`` keyword argument. 45 | - An API reference page is now available. 46 | - Support is now provided for visualizing vector-valued (3 values per vertex) 47 | data. 48 | 49 | Version 0.7 50 | ----------- 51 | 52 | - Support for Python 3.3 and up. 53 | - A new ``alpha`` keyword to the ``Brain`` constructor now controls 54 | opacity of the rendered brain surface. 55 | - The ``curv`` keyword to the ``Brain`` constructor has been 56 | deprecated. To replicate previous behavior when ``curv`` was set to 57 | ``True`` simply omit the ``curv`` keyword. To replicate previous 58 | behavior when ``curv`` was set to ``False``, simply set the 59 | ``cortex`` keyword to None. To ease transition the ``curv`` argument 60 | will still be caught and processed, but it will be removed in a 61 | future release. 62 | - The ``cortex`` keyword to the ``Brain`` constructor now also accepts 63 | a valid color specification (such as a 3-tuple with RGB values or a 64 | color name) to render the cortical surface in that color without 65 | rendering binary curvature values. Additionally it now also accepts 66 | a dictionary with keyword arguments that are passed on to the call 67 | to ``mlab.pipeline.surface``. 68 | - ``Brain.save_movie`` now uses the ``imageio`` library, eliminating the need 69 | to manually install ``ffmpeg``. ``imageio`` has been added as an optional 70 | dependency which can be installed with 71 | ``$ pip install pysurfer[save_movie]``. 72 | - ``Brain.save_image`` now has the option to save with alpha channel and 73 | antialiasing. 74 | 75 | Version 0.6 76 | ----------- 77 | 78 | - Values that were previously selected using a ``config_opts`` 79 | dictionary in the ``Brain`` constructor are now keyword arguments. 80 | This should make it easier to use tab-completion in IPython, and will 81 | generally simplify your scripts. To ease transition, a ``config_opts`` 82 | argument will still be caught, and its entries will be used (overriding 83 | the keyword arguments), but it will be removed in a future release. 84 | - The ability to set default values in a config file has been removed. 85 | While convenient, this approach encourages code that is not fully 86 | reproducible. While existing code that was written expecting a config 87 | file will still run, the visualization will fall back to default values. 88 | These should be updated directly in your plotting scripts. 89 | - Figure size is now specified only through the ``size`` keyword argument 90 | of :class:`Brain`. To make a rectangular window, pass a ``(width, height)`` 91 | tuple. Passing a single value to ``size`` will still make a square window. 92 | - The ``cortex`` keyword argument can now be a mayavi colormap name or 93 | a ``(colormap, min, max, reverse)`` tuple for full control. 94 | - Morphometry plotting was made more flexible with the ability to pass 95 | a specific colormap and anchor points for that colormap. Additionally, 96 | the default anchor points now use robust statistics to give better 97 | values. 98 | - Contour overlay plotting was made more flexible by adding keyword arguments 99 | to control whether a colorbar should be shown and whether existing contour 100 | overlays should be removed before plotting. 101 | 102 | Version 0.5 103 | ----------- 104 | 105 | - Added control over the width of outlines when plotting the borders of 106 | annotations or labels. 107 | - The visual display of the surfaces was improved by using surface normals. 108 | - Made colormap specification in Brain.add_data and Brain.add_contour_overlay 109 | more flexible, with better defaults. 110 | - Brain.save_montage() can now produce 2d grids of views in addition to 111 | horizontal and vertical arrangements. 112 | - Fixed some installation issues and added explicit checks for dependencies 113 | on install. 114 | - Updated the installation docs with information about getting PySurfer 115 | running on OSX, and changed the official environment recommendation to 116 | Anaconda. 117 | 118 | Version 0.4 119 | ----------- 120 | 121 | Enhancements 122 | ~~~~~~~~~~~~ 123 | 124 | - Display data from both hemispheres simultaneously 125 | - Display multiple views simultaneously 126 | - Toggling Mayavi toolbars 127 | - Use nibabel for IO functions 128 | 129 | Version 0.3.1 130 | ------------- 131 | 132 | Fixes 133 | ~~~~~ 134 | 135 | - Reverted change in io.py that broke Python 2.6 compatibility 136 | - Updated traitsui import while keeping backgrounds compatibility 137 | 138 | Version 0.3 139 | ----------- 140 | 141 | Enhancements 142 | ~~~~~~~~~~~~ 143 | 144 | - TimeViewer GUI to control data exploration in the time dimension 145 | - Support for reading and visualizing MEG data from MNE 146 | - Function to project volume-based data using mri_vol2surf 147 | - Label routines can read scalar data and threshold visualization 148 | - Utility function to smooth overlay data along the cortical manifold 149 | - Example for displaying parcellation-based ROI analysis results 150 | - Example for displaying ROI labels 151 | - Example for plotting probabilistic labels 152 | - Improved color flexibility using matplotlib for many visualization modes 153 | - Exposed alpha channel for many visualization modes 154 | 155 | Fixes 156 | ~~~~~ 157 | 158 | - Big-endian overlay data should now display properly on 64-bit systems. 159 | - Colorbar text displays properly on light backgrounds 160 | - Lights are oriented depending on hemisphere so surfaces are equally lit 161 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2011, Neuroimaging in Python Team 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | * Redistributions of source code must retain the above copyright 7 | notice, this list of conditions and the following disclaimer. 8 | * Redistributions in binary form must reproduce the above copyright 9 | notice, this list of conditions and the following disclaimer in the 10 | documentation and/or other materials provided with the distribution. 11 | * Neither the name of the nor the 12 | names of its contributors may be used to endorse or promote products 13 | derived from this software without specific prior written permission. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY 19 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # simple makefile to simplify repetetive build env management tasks under posix 2 | 3 | # caution: testing won't work on windows, see README 4 | 5 | PYTHON ?= python 6 | NOSETESTS ?= nosetests 7 | CTAGS ?= ctags 8 | 9 | all: clean inplace test 10 | 11 | clean-pyc: 12 | find . -name "*.pyc" | xargs rm -f 13 | 14 | clean-so: 15 | find . -name "*.so" | xargs rm -f 16 | find . -name "*.pyd" | xargs rm -f 17 | 18 | clean-build: 19 | rm -rf build 20 | 21 | clean-ctags: 22 | rm -f tags 23 | 24 | clean: clean-build clean-pyc clean-so clean-ctags 25 | 26 | flake: 27 | @if command -v flake8 > /dev/null; then \ 28 | flake8 --exclude surfer/cm.py --count surfer examples; \ 29 | fi 30 | 31 | in: inplace # just a shortcut 32 | inplace: 33 | $(PYTHON) setup.py build_ext -i 34 | 35 | pytest: 36 | rm -f .coverage 37 | pytest surfer 38 | 39 | test: clean pytest flake 40 | 41 | 42 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | PySurfer: Neuroimaging visualization in Python 2 | ============================================== 3 | 4 | 5 | 6 | PySurfer is a Python package for interacting with a cortical surface 7 | representations of neuroimaging data. It extends Mayavi's powerful 8 | visualization engine with a high-level interface for working with MRI and MEG 9 | data. 10 | 11 | PySurfer offers both a command-line interface designed to broadly the 12 | Freesurfer Tksurfer program and a Python library for writing scripts to 13 | efficiently explore complex datasets and prepare publication-ready figures. 14 | 15 | To goal of the project is to facilitate the production of figures that are both 16 | beautiful and scientifically informative. 17 | 18 | Important Links 19 | --------------- 20 | 21 | - Official source code repository: https://github.com/nipy/PySurfer 22 | - Online documentation (stable): http://pysurfer.github.com/ 23 | - NITRC page: http://www.nitrc.org/projects/pysurfer 24 | - Freesurfer: http://surfer.nmr.mgh.harvard.edu/ 25 | - Mailing list: https://mail.python.org/mailman/listinfo/neuroimaging 26 | 27 | Install 28 | ------- 29 | 30 | This packages uses setuptools. To install it for all users, run: 31 | 32 | python setup.py build 33 | sudo python setup.py install 34 | 35 | If you do not have sudo privileges, you can install locally: 36 | 37 | python setup.py install --home 38 | 39 | For information about dependencies, please see the [online 40 | documentation](http://pysurfer.github.io/install.html) 41 | 42 | License 43 | ------- 44 | 45 | Available under the Revised BSD (3-clause) license. 46 | 47 | Testing 48 | ------- 49 | 50 | You can launch the test suite by running `nosetests` from the source folder. 51 | 52 | Another way to test is to build the documentation, which will run the example 53 | scripts and automatically generate static image output. From the source 54 | directory: 55 | 56 | cd doc/ 57 | make clean 58 | make html 59 | 60 | The resulting documentation will live at _build/html/index.html, which can be 61 | compared to the online docs. 62 | 63 | Either method will work only if you have Freesurfer installed on your machine 64 | with a valid SUBJECTS\_DIR folder. 65 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | environment: 2 | matrix: 3 | - PYTHON: "C:\\Python36-x64" 4 | PYTHON_VERSION: "3.6" 5 | PYTHON_ARCH: "64" 6 | install: 7 | - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" 8 | - "python --version" 9 | - "pip install numpy scipy matplotlib nose pillow pytest pytest-cov pytest-faulthandler coverage imageio imageio-ffmpeg codecov pyqt5==5.9" 10 | - "pip install traits traitsui pyface vtk https://github.com/enthought/mayavi/archive/master.zip nibabel" 11 | - "powershell make/get_fsaverage.ps1" 12 | - "python setup.py develop" 13 | - "SET SUBJECTS_DIR=%CD%\\subjects" 14 | - "ls %CD%\\subjects" 15 | 16 | build: false # Not a C# project, build stuff at the test step instead. 17 | 18 | test_script: 19 | # Run the project tests 20 | - "pytest surfer --cov=surfer -v" 21 | 22 | on_success: 23 | - "codecov" 24 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | # start a new build for every push 3 | batch: False 4 | branches: 5 | include: 6 | - master 7 | 8 | jobs: 9 | - job: Windows 10 | variables: 11 | PIP_CACHE_FOLDER: $(Pipeline.Workspace)/.cache/pip 12 | pool: 13 | vmIMage: 'VS2017-Win2016' 14 | strategy: 15 | maxParallel: 4 16 | matrix: 17 | Python37-64bit: 18 | PYTHON_VERSION: '3.7' 19 | PYTHON_ARCH: 'x64' 20 | steps: 21 | - task: UsePythonVersion@0 22 | inputs: 23 | versionSpec: $(PYTHON_VERSION) 24 | architecture: $(PYTHON_ARCH) 25 | addToPath: true 26 | - task: Cache@2 27 | inputs: 28 | key: 'pip' 29 | path: $(PIP_CACHE_FOLDER) 30 | displayName: Cache pip packages 31 | - powershell: | 32 | pip install numpy scipy matplotlib nose pillow pytest pytest-cov pytest-faulthandler coverage imageio imageio-ffmpeg codecov pyqt5==5.9 --cache-dir $(PIP_CACHE_FOLDER) 33 | pip install traits traitsui pyface vtk https://github.com/enthought/mayavi/archive/master.zip nibabel --cache-dir $(PIP_CACHE_FOLDER) 34 | displayName: 'Install pip dependencies' 35 | - powershell: | 36 | powershell make/get_fsaverage.ps1 37 | $env:SUBJECTS_DIR = '$(System.DefaultWorkingDirectory)' + '\subjects' 38 | Write-Host ("##vso[task.setvariable variable=SUBJECTS_DIR]" + $env:SUBJECTS_DIR) 39 | displayName: 'Get fsaverage' 40 | - powershell: | 41 | git clone --depth 1 git://github.com/pyvista/gl-ci-helpers.git 42 | powershell gl-ci-helpers/appveyor/install_opengl.ps1 43 | displayName: 'Get OpenGL' 44 | - script: python setup.py develop 45 | displayName: 'Install' 46 | - script: pytest surfer --cov=surfer -v 47 | displayName: 'Run tests' 48 | - script: codecov --root %BUILD_REPOSITORY_LOCALPATH% -t %CODECOV_TOKEN% 49 | displayName: 'Codecov' 50 | env: 51 | CODECOV_TOKEN: $(CODECOV_TOKEN) 52 | condition: always() 53 | - task: PublishTestResults@2 54 | inputs: 55 | testResultsFiles: 'junit-*.xml' 56 | testRunTitle: 'Publish test results for Python $(python.version)' 57 | condition: always() 58 | -------------------------------------------------------------------------------- /bin/pysurfer: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | """ 3 | This is the top-level command-line interface script for PySurfer. 4 | It accepts all of the relevant arguments, then turns around and calls 5 | IPython on itself to then drop the user into an IPython environment. 6 | """ 7 | import os 8 | import sys 9 | from surfer._commandline import parser 10 | from distutils.version import LooseVersion 11 | import importlib 12 | 13 | 14 | if __name__ == '__main__': 15 | is_ipython = False 16 | try: 17 | get_ipython 18 | is_ipython = True 19 | except NameError: 20 | try: # for old iPython versions 21 | _ip 22 | is_ipython = True 23 | except NameError: 24 | pass 25 | 26 | # Make sure this is going to work before we have to 27 | # boot up mlab/IPython 28 | if len(sys.argv) > 3: 29 | subjects_dir = os.environ['SUBJECTS_DIR'] 30 | if sys.argv[2] in ['both', 'split']: 31 | hemi_checks = ['lh', 'rh'] 32 | else: 33 | hemi_checks = [sys.argv[2]] 34 | for h in hemi_checks: 35 | surf_file = os.path.join(subjects_dir, 36 | "%s/surf/%s.%s" % (sys.argv[1], h, 37 | sys.argv[3])) 38 | if not os.path.exists(surf_file): 39 | sys.exit("ERROR: Could not find %s" % surf_file) 40 | 41 | if not is_ipython: 42 | # Parse the args so that --help exits back to the shell 43 | # instead of into IPython (this would be cleaner if I 44 | # could figure out whether that is possible to do 45 | # from with a script IPython is executing on startup 46 | if len(sys.argv) < 4: 47 | parser.parse_args(["--help"]) 48 | else: 49 | args = parser.parse_args() 50 | 51 | # Start IPython and execute the load script 52 | path = os.path.split(__file__)[0] 53 | load_file = __file__ 54 | import IPython 55 | if LooseVersion(IPython.__version__) < '0.11': 56 | flag = '-nobanner ' 57 | flag += '-wthread ' 58 | else: 59 | flag = '--no-banner ' 60 | try: 61 | gui = 'wx' 62 | importlib.import_module(gui) 63 | except ImportError: 64 | pass 65 | gui = 'qt' 66 | flag += '--gui={gui} -i '.format(gui=gui) 67 | cmd = 'ipython %s ' % (flag + __file__ + 68 | ' "%s"' % ' '.join(sys.argv[1:])) 69 | os.system(cmd) 70 | else: 71 | args = parser.parse_args(sys.argv[1].split()) 72 | 73 | from surfer import Brain 74 | 75 | # Load up the figure and underlying brain object 76 | b = Brain(args.subject_id, args.hemi, args.surf, title=args.title, 77 | cortex=args.cortex, alpha=args.alpha, size=args.size, 78 | background=args.background, foreground=args.foreground, 79 | views=args.views) 80 | 81 | # Maybe load some morphometry 82 | if args.morphometry is not None: 83 | b.add_morphometry(args.morphometry) 84 | 85 | # Maybe load an overlay 86 | if args.overlay is not None: 87 | if args.range is not None: 88 | args.min, args.max = args.range 89 | b.add_overlay(args.overlay, args.min, args.max, args.sign) 90 | 91 | # Maybe load an annot 92 | if args.annotation is not None: 93 | if not args.borders: 94 | args.borders = any([args.overlay, args.morphometry]) 95 | b.add_annotation(args.annotation, borders=args.borders) 96 | 97 | # Maybe load a label 98 | if args.label is not None: 99 | if not args.borders: 100 | args.borders = any([args.overlay, args.morphometry]) 101 | b.add_label(args.label, borders=args.borders) 102 | 103 | # Also point brain at the Brain() object 104 | brain = b 105 | 106 | # It's nice to have mlab in the namespace, but we'll import it 107 | # after the other stuff so getting usage is not interminable 108 | from mayavi import mlab 109 | assert mlab # make pyflakes happy 110 | 111 | # Now clean up the namespace a bit 112 | del parser, args 113 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | precision: 2 3 | round: down 4 | range: "70...100" 5 | status: 6 | project: 7 | default: 8 | target: auto 9 | threshold: 0.01 10 | patch: false 11 | changes: false 12 | comment: 13 | layout: "header, diff, sunburst, uncovered" 14 | behavior: default 15 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = -nWT --keep-going 6 | SPHINXBUILD = sphinx-build 7 | GPAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html_stable to make standalone HTML files (stable version)" 20 | @echo " html_dev to make standalone HTML files (dev version)" 21 | @echo " html_dev-pattern to make standalone HTML files for one example dir (dev version)" 22 | @echo " *-noplot to make standalone HTML files without plotting" 23 | @echo " dirhtml to make HTML files named index.html in directories" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 29 | @echo " changes to make an overview of all changed/added/deprecated items" 30 | @echo " linkcheck to check all external links for integrity" 31 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 32 | 33 | clean: 34 | -rm -rf $(BUILDDIR)/* 35 | -rm -rf auto_examples 36 | -rm -rf auto_tutorials 37 | -rm -rf generated 38 | -rm -rf *.stc 39 | -rm -rf *.fif 40 | -rm -rf *.nii.gz 41 | 42 | html_stable: 43 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html_stable 44 | @echo 45 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html_stable." 46 | 47 | html_dev: 48 | BUILD_DEV_HTML=1 $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 49 | @echo 50 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html" 51 | 52 | html_dev-pattern: 53 | BUILD_DEV_HTML=1 $(SPHINXBUILD) -D plot_gallery=1 -D sphinx_gallery_conf.filename_pattern=$(PATTERN) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html" 56 | 57 | html_dev-noplot: 58 | BUILD_DEV_HTML=1 $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 61 | 62 | dirhtml: 63 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 64 | @echo 65 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/MNE.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/MNE.qhc" 91 | 92 | latex: 93 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 94 | @echo 95 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 96 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 97 | "run these through (pdf)latex." 98 | 99 | changes: 100 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 101 | @echo 102 | @echo "The overview file is in $(BUILDDIR)/changes." 103 | 104 | linkcheck: 105 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 106 | @echo 107 | @echo "Link check complete; look for any errors in the above output " \ 108 | "or in $(BUILDDIR)/linkcheck/output.txt." 109 | 110 | doctest: 111 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 112 | @echo "Testing of doctests in the sources finished, look at the " \ 113 | "results in $(BUILDDIR)/doctest/output.txt." 114 | 115 | view: 116 | @python -c "import webbrowser; webbrowser.open_new_tab('file://$(PWD)/$(BUILDDIR)/html/index.html')" 117 | 118 | show: view 119 | -------------------------------------------------------------------------------- /doc/_static/background_options.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/_static/background_options.png -------------------------------------------------------------------------------- /doc/_static/banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/_static/banner.png -------------------------------------------------------------------------------- /doc/_static/basic_session.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/_static/basic_session.png -------------------------------------------------------------------------------- /doc/_static/cortex_options.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/_static/cortex_options.png -------------------------------------------------------------------------------- /doc/_static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/_static/favicon.ico -------------------------------------------------------------------------------- /doc/_static/navy.css: -------------------------------------------------------------------------------- 1 | /** 2 | * Alternate Sphinx design 3 | * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl. 4 | */ 5 | 6 | body { 7 | font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; 8 | font-size: 14px; 9 | letter-spacing: -0.01em; 10 | line-height: 150%; 11 | text-align: center; 12 | /*background-color: #AFC1C4; */ 13 | /*background-color: -moz-linear-gradient(linear, left top, left bottom, from(#660000), to(#000000));*/ 14 | background-color: #151515; 15 | color: black; 16 | padding: 0; 17 | border: 1px solid #aaa; 18 | 19 | margin: 0px 80px 0px 80px; 20 | min-width: 740px; 21 | } 22 | 23 | a { 24 | color: #330033; 25 | text-decoration: none; 26 | } 27 | 28 | a:hover { 29 | color: #99CCFF; 30 | } 31 | 32 | pre { 33 | font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; 34 | font-size: 0.95em; 35 | letter-spacing: 0.015em; 36 | padding: 0.5em; 37 | border: 1px solid #ccc; 38 | background-color: #f8f8f8; 39 | } 40 | 41 | td.linenos pre { 42 | padding: 0.5em 0; 43 | border: 0; 44 | background-color: #000000; 45 | color: #aaa; 46 | } 47 | 48 | table.highlighttable { 49 | margin-left: 0.5em; 50 | } 51 | 52 | table.highlighttable td { 53 | padding: 0 0.5em 0 0.5em; 54 | } 55 | 56 | cite, code, tt { 57 | font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; 58 | font-size: 0.95em; 59 | letter-spacing: 0.01em; 60 | } 61 | 62 | hr { 63 | border: 1px solid #abc; 64 | margin: 2em; 65 | } 66 | 67 | tt { 68 | background-color: #f2f2f2; 69 | border-bottom: 1px solid #ddd; 70 | color: #333; 71 | } 72 | 73 | tt.descname { 74 | background-color: transparent; 75 | font-weight: bold; 76 | font-size: 1.2em; 77 | border: 0; 78 | } 79 | 80 | tt.descclassname { 81 | background-color: transparent; 82 | border: 0; 83 | } 84 | 85 | tt.xref { 86 | background-color: transparent; 87 | font-weight: bold; 88 | border: 0; 89 | } 90 | 91 | a tt { 92 | background-color: transparent; 93 | font-weight: bold; 94 | border: 0; 95 | color: #CA7900; 96 | } 97 | 98 | a tt:hover { 99 | color: #2491CF; 100 | } 101 | 102 | dl { 103 | margin-bottom: 15px; 104 | } 105 | 106 | dd p { 107 | margin-top: 0px; 108 | } 109 | 110 | dd ul, dd table { 111 | margin-bottom: 10px; 112 | } 113 | 114 | dd { 115 | margin-top: 3px; 116 | margin-bottom: 10px; 117 | margin-left: 30px; 118 | } 119 | 120 | .refcount { 121 | color: #060; 122 | } 123 | 124 | dt:target, 125 | .highlight { 126 | background-color: #fbe54e; 127 | } 128 | 129 | dl.class, dl.function { 130 | border-top: 2px solid #888; 131 | } 132 | 133 | dl.method, dl.attribute { 134 | border-top: 1px solid #aaa; 135 | } 136 | 137 | dl.glossary dt { 138 | font-weight: bold; 139 | font-size: 1.1em; 140 | } 141 | 142 | pre { 143 | line-height: 120%; 144 | } 145 | 146 | pre a { 147 | color: inherit; 148 | text-decoration: underline; 149 | } 150 | 151 | .first { 152 | margin-top: 0 !important; 153 | } 154 | 155 | div.document { 156 | background-color: white; 157 | text-align: left; 158 | background-image: url(contents.png); 159 | background-repeat: repeat-x; 160 | } 161 | 162 | /* 163 | div.documentwrapper { 164 | width: 100%; 165 | } 166 | */ 167 | 168 | div.clearer { 169 | clear: both; 170 | } 171 | 172 | div.related h3 { 173 | display: none; 174 | } 175 | 176 | div.related ul { 177 | background-image: url(navigation.png); 178 | height: 2em; 179 | list-style: none; 180 | border-top: 1px solid #ddd; 181 | border-bottom: 1px solid #ddd; 182 | margin: 0; 183 | padding-left: 10px; 184 | } 185 | 186 | div.related ul li { 187 | margin: 0; 188 | padding: 0; 189 | height: 2em; 190 | float: left; 191 | } 192 | 193 | div.related ul li.right { 194 | float: right; 195 | margin-right: 5px; 196 | } 197 | 198 | div.related ul li a { 199 | margin: 0; 200 | padding: 0 5px 0 5px; 201 | line-height: 1.75em; 202 | color: #330033; 203 | } 204 | 205 | div.related ul li a:hover { 206 | color: #C0C0C0; 207 | } 208 | 209 | div.body { 210 | margin: 0; 211 | padding: 0.5em 20px 20px 20px; 212 | max-width: unset; 213 | } 214 | 215 | div.bodywrapper { 216 | margin: 0 240px 0 0; 217 | border-right: 1px solid #ccc; 218 | } 219 | 220 | div.body a { 221 | text-decoration: underline; 222 | } 223 | 224 | div.sphinxsidebar { 225 | margin: 0; 226 | padding: 0.5em 15px 15px 0; 227 | width: 210px; 228 | float: right; 229 | text-align: left; 230 | /* margin-left: -100%; */ 231 | } 232 | 233 | div.sphinxsidebar h4, div.sphinxsidebar h3 { 234 | margin: 1em 0 0.5em 0; 235 | font-size: 0.9em; 236 | padding: 0.1em 0 0.1em 0.5em; 237 | color: white; 238 | border: 1px solid #86989B; 239 | background-color: #C0C0C0; 240 | } 241 | 242 | div.sphinxsidebar ul { 243 | padding-left: 1.5em; 244 | margin-top: 7px; 245 | list-style: none; 246 | padding: 0; 247 | line-height: 130%; 248 | } 249 | 250 | div.sphinxsidebar ul ul { 251 | list-style: square; 252 | margin-left: 20px; 253 | } 254 | 255 | p { 256 | margin: 0.8em 0 0.5em 0; 257 | } 258 | 259 | p.rubric { 260 | font-weight: bold; 261 | } 262 | 263 | h1 { 264 | margin: 0; 265 | padding: 0.7em 0 0.3em 0; 266 | font-size: 1.5em; 267 | color: #11557C; 268 | } 269 | 270 | h2 { 271 | margin: 1.3em 0 0.2em 0; 272 | font-size: 1.35em; 273 | padding: 0; 274 | } 275 | 276 | h3 { 277 | margin: 1em 0 -0.3em 0; 278 | font-size: 1.2em; 279 | } 280 | 281 | h1 a, h2 a, h3 a, h4 a, h5 a, h6 a { 282 | color: black!important; 283 | } 284 | 285 | h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { 286 | display: none; 287 | margin: 0 0 0 0.3em; 288 | padding: 0 0.2em 0 0.2em; 289 | color: #aaa!important; 290 | } 291 | 292 | h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, 293 | h5:hover a.anchor, h6:hover a.anchor { 294 | display: inline; 295 | } 296 | 297 | h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, 298 | h5 a.anchor:hover, h6 a.anchor:hover { 299 | color: #777; 300 | background-color: #eee; 301 | } 302 | 303 | table { 304 | border-collapse: collapse; 305 | margin: 0 -0.5em 0 -0.5em; 306 | } 307 | 308 | table td, table th { 309 | padding: 0.2em 0.5em 0.2em 0.5em; 310 | } 311 | 312 | div.footer { 313 | background-color: #C0C0C0; 314 | color: #000000; 315 | padding: 3px 8px 3px 0; 316 | clear: both; 317 | font-size: 0.8em; 318 | text-align: right; 319 | } 320 | 321 | div.footer a { 322 | color: #000000; 323 | text-decoration: underline; 324 | } 325 | 326 | div.pagination { 327 | margin-top: 2em; 328 | padding-top: 0.5em; 329 | border-top: 1px solid black; 330 | text-align: center; 331 | } 332 | 333 | div.sphinxsidebar ul.toc { 334 | margin: 1em 0 1em 0; 335 | padding: 0 0 0 0.5em; 336 | list-style: none; 337 | } 338 | 339 | div.sphinxsidebar ul.toc li { 340 | margin: 0.5em 0 0.5em 0; 341 | font-size: 0.9em; 342 | line-height: 130%; 343 | } 344 | 345 | div.sphinxsidebar ul.toc li p { 346 | margin: 0; 347 | padding: 0; 348 | } 349 | 350 | div.sphinxsidebar ul.toc ul { 351 | margin: 0.2em 0 0.2em 0; 352 | padding: 0 0 0 1.8em; 353 | } 354 | 355 | div.sphinxsidebar ul.toc ul li { 356 | padding: 0; 357 | } 358 | 359 | div.admonition, div.warning { 360 | font-size: 0.9em; 361 | margin: 1em 0 0 0; 362 | border: 1px solid #86989B; 363 | background-color: #f7f7f7; 364 | } 365 | 366 | div.admonition p, div.warning p { 367 | margin: 0.5em 1em 0.5em 1em; 368 | padding: 0; 369 | } 370 | 371 | div.admonition pre, div.warning pre { 372 | margin: 0.4em 1em 0.4em 1em; 373 | } 374 | 375 | div.admonition p.admonition-title, 376 | div.warning p.admonition-title { 377 | margin: 0; 378 | padding: 0.1em 0 0.1em 0.5em; 379 | color: white; 380 | border-bottom: 1px solid #86989B; 381 | font-weight: bold; 382 | background-color: #AFC1C4; 383 | } 384 | 385 | div.warning { 386 | border: 1px solid #000000; 387 | } 388 | 389 | div.warning p.admonition-title { 390 | background-color: #000000; 391 | border-bottom-color: #940000; 392 | } 393 | 394 | div.admonition ul, div.admonition ol, 395 | div.warning ul, div.warning ol { 396 | margin: 0.1em 0.5em 0.5em 3em; 397 | padding: 0; 398 | } 399 | 400 | div.versioninfo { 401 | margin: 1em 0 0 0; 402 | border: 1px solid #ccc; 403 | background-color: #DDEAF0; 404 | padding: 8px; 405 | line-height: 1.3em; 406 | font-size: 0.9em; 407 | } 408 | 409 | 410 | a.headerlink { 411 | color: #c60f0f!important; 412 | font-size: 1em; 413 | margin-left: 6px; 414 | padding: 0 4px 0 4px; 415 | text-decoration: none!important; 416 | visibility: hidden; 417 | } 418 | 419 | h1:hover > a.headerlink, 420 | h2:hover > a.headerlink, 421 | h3:hover > a.headerlink, 422 | h4:hover > a.headerlink, 423 | h5:hover > a.headerlink, 424 | h6:hover > a.headerlink, 425 | dt:hover > a.headerlink { 426 | visibility: visible; 427 | } 428 | 429 | a.headerlink:hover { 430 | background-color: #ccc; 431 | color: white!important; 432 | } 433 | 434 | table.indextable td { 435 | text-align: left; 436 | vertical-align: top; 437 | } 438 | 439 | table.indextable dl, table.indextable dd { 440 | margin-top: 0; 441 | margin-bottom: 0; 442 | } 443 | 444 | table.indextable tr.pcap { 445 | height: 10px; 446 | } 447 | 448 | table.indextable tr.cap { 449 | margin-top: 10px; 450 | background-color: #f2f2f2; 451 | } 452 | 453 | img.toggler { 454 | margin-right: 3px; 455 | margin-top: 3px; 456 | cursor: pointer; 457 | } 458 | 459 | img.inheritance { 460 | border: 0px 461 | } 462 | 463 | form.pfform { 464 | margin: 10px 0 20px 0; 465 | } 466 | 467 | table.contentstable { 468 | width: 90%; 469 | } 470 | 471 | table.contentstable p.biglink { 472 | line-height: 150%; 473 | } 474 | 475 | a.biglink { 476 | font-size: 1.3em; 477 | } 478 | 479 | span.linkdescr { 480 | font-style: italic; 481 | padding-top: 5px; 482 | font-size: 90%; 483 | } 484 | 485 | ul.search { 486 | margin: 10px 0 0 20px; 487 | padding: 0; 488 | } 489 | 490 | ul.search li { 491 | padding: 5px 0 5px 20px; 492 | background-image: url(file.png); 493 | background-repeat: no-repeat; 494 | background-position: 0 7px; 495 | } 496 | 497 | ul.search li a { 498 | font-weight: bold; 499 | } 500 | 501 | ul.search li div.context { 502 | color: #888; 503 | margin: 2px 0 0 30px; 504 | text-align: left; 505 | } 506 | 507 | ul.keywordmatches li.goodmatch a { 508 | font-weight: bold; 509 | } 510 | 511 | .sphx-glr-thumbcontainer { 512 | min-height: 320px !important; 513 | margin: 20px !important; 514 | } 515 | .sphx-glr-thumbcontainer .figure { 516 | width: 250px !important; 517 | max-width: 250px !important; 518 | } 519 | .sphx-glr-thumbcontainer img { 520 | max-height: 250px !important; 521 | max-width: 250px !important; 522 | width: 250px !important; 523 | } 524 | .sphx-glr-thumbcontainer a.internal { 525 | padding: 270px 10px 0 !important; 526 | } 527 | 528 | @media only screen and (min-width: 140ex) { 529 | div.sphx-glr-footer-example { 530 | position: unset; 531 | right: unset; 532 | top: unset; 533 | background: unset; 534 | padding: unset; 535 | border: unset; 536 | max-width: unset; 537 | width: unset; 538 | } 539 | div.sphx-glr-footer-example code { 540 | max-width: unset; 541 | overflow: unset; 542 | } 543 | div.sphx-glr-footer-example a { 544 | max-width: unset; 545 | overflow: unset; 546 | padding: 1ex; 547 | } 548 | div.sphx-glr-footer-example a code span:last-child { 549 | font-size: unset; 550 | } 551 | } 552 | 553 | table.longtable.align-default { 554 | width: 100%; 555 | } 556 | -------------------------------------------------------------------------------- /doc/_static/pysurfer_logo_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/_static/pysurfer_logo_small.png -------------------------------------------------------------------------------- /doc/_static/split_view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/_static/split_view.png -------------------------------------------------------------------------------- /doc/_templates/class.rst: -------------------------------------------------------------------------------- 1 | {{ fullname }} 2 | {{ underline }} 3 | 4 | .. currentmodule:: {{ module }} 5 | 6 | .. autoclass:: {{ objname }} 7 | :special-members: __contains__,__getitem__,__iter__,__len__,__add__,__sub__,__mul__,__div__,__neg__,__hash__ 8 | 9 | {% block methods %} 10 | {% endblock %} 11 | 12 | 13 | -------------------------------------------------------------------------------- /doc/_templates/class_noinherited.rst: -------------------------------------------------------------------------------- 1 | {{ fullname }} 2 | {{ underline }} 3 | 4 | .. currentmodule:: {{ module }} 5 | 6 | .. autoclass:: {{ objname }} 7 | :no-inherited-members: 8 | 9 | {% block methods %} 10 | {% endblock %} 11 | -------------------------------------------------------------------------------- /doc/_templates/function.rst: -------------------------------------------------------------------------------- 1 | {{ fullname }} 2 | {{ underline }} 3 | 4 | .. currentmodule:: {{ module }} 5 | 6 | .. autofunction:: {{ objname }} 7 | 8 | .. include:: {{module}}.{{objname}}.examples 9 | 10 | .. raw:: html 11 | 12 |
13 | -------------------------------------------------------------------------------- /doc/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "!layout.html" %} 2 | 3 | {% block extrahead %} 4 | 5 | {% endblock %} 6 | 7 | {% set title = 'PySurfer Python Neuroimaging Visualization' %} 8 | 9 | {% block header %} 10 | 14 | {% endblock %} 15 | -------------------------------------------------------------------------------- /doc/changes.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CHANGES -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # with '#' will be ignored, and an empty message aborts the commit. 3 | # 4 | # PySurfer documentation build configuration file, created by 5 | # sphinx-quickstart on Thu May 12 12:45:43 2011. 6 | # 7 | # This file is execfile()d with the current directory set to its containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import inspect 16 | import os 17 | from os.path import relpath, dirname 18 | import sys 19 | from datetime import date 20 | import sphinx_gallery # noqa 21 | from sphinx_gallery.sorting import FileNameSortKey 22 | from numpydoc import numpydoc, docscrape # noqa 23 | import surfer 24 | 25 | # If extensions (or modules to document with autodoc) are in another directory, 26 | # add these directories to sys.path here. If the directory is relative to the 27 | # documentation root, use os.path.abspath to make it absolute, like shown here. 28 | #sys.path.insert(0, os.path.abspath('.')) 29 | 30 | # -- General configuration ----------------------------------------------------- 31 | 32 | # If your documentation needs a minimal Sphinx version, state it here. 33 | #needs_sphinx = '1.0' 34 | 35 | # Add our own sphinx extensions directory to python path 36 | sys.path.append(os.path.abspath("sphinxext")) 37 | 38 | # Add any Sphinx extension module names here, as strings. They can be extensions 39 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 40 | extensions = [ 41 | 'sphinx.ext.autodoc', 42 | 'sphinx.ext.autosummary', 43 | 'sphinx.ext.coverage', 44 | 'sphinx.ext.doctest', 45 | 'sphinx.ext.intersphinx', 46 | 'sphinx.ext.linkcode', 47 | 'sphinx.ext.mathjax', 48 | 'sphinx.ext.todo', 49 | 'sphinx_gallery.gen_gallery', 50 | 'numpydoc', 51 | ] 52 | 53 | autosummary_generate = True 54 | autodoc_default_options = {'inherited-members': None} 55 | 56 | # Add any paths that contain templates here, relative to this directory. 57 | templates_path = ['_templates'] 58 | 59 | # The suffix of source filenames. 60 | source_suffix = '.rst' 61 | 62 | # The encoding of source files. 63 | #source_encoding = 'utf-8-sig' 64 | 65 | # The master toctree document. 66 | master_doc = 'index' 67 | 68 | # General information about the project. 69 | project = u'PySurfer' 70 | td = date.today() 71 | copyright = (u'2012-%s, Michael Waskom, Alexandre Gramfort, Scott Burns, ' 72 | 'Martin Luessi, Eric Larson' % (td.year,)) 73 | 74 | nitpicky = True 75 | needs_sphinx = '1.5' 76 | 77 | # The version info for the project you're documenting, acts as replacement for 78 | # |version| and |release|, also used in various other places throughout the 79 | # built documents. 80 | 81 | # The short X.Y version. 82 | version = surfer.__version__ 83 | # The full version, including alpha/beta/rc tags. 84 | release = surfer.__version__ 85 | 86 | # The language for content autogenerated by Sphinx. Refer to documentation 87 | # for a list of supported languages. 88 | #language = None 89 | 90 | # There are two options for replacing |today|: either, you set today to some 91 | # non-false value, then it is used: 92 | #today = '' 93 | # Else, today_fmt is used as the format for a strftime call. 94 | #today_fmt = '%B %d, %Y' 95 | 96 | # List of documents that shouldn't be included in the build. 97 | unused_docs = [] 98 | 99 | # List of directories, relative to source directory, that shouldn't be searched 100 | # for source files. 101 | exclude_trees = ['_build'] 102 | exclude_patterns = ['source/generated'] 103 | 104 | # The reST default role (used for this markup: `text`) to use for all 105 | # documents. 106 | #default_role = None 107 | 108 | # If true, '()' will be appended to :func: etc. cross-reference text. 109 | #add_function_parentheses = True 110 | 111 | # If true, the current module name will be prepended to all description 112 | # unit titles (such as .. function::). 113 | #add_module_names = True 114 | 115 | # If true, sectionauthor and moduleauthor directives will be shown in the 116 | # output. They are ignored by default. 117 | #show_authors = False 118 | 119 | # The name of the Pygments (syntax highlighting) style to use. 120 | pygments_style = 'sphinx' # friendly, manni, murphy, tango 121 | 122 | # A list of ignored prefixes for module index sorting. 123 | modindex_common_prefix = ['surfer.'] 124 | 125 | 126 | 127 | # -- Options for HTML output ---------------------------------------------- 128 | 129 | # The theme to use for HTML and HTML Help pages. See the documentation for 130 | # a list of builtin themes. 131 | # html_theme = 'bootstrap' 132 | html_theme = "sphinxdoc" 133 | 134 | # Add any paths that contain custom themes here, relative to this directory. 135 | html_theme_path = [] 136 | 137 | # The name for this set of Sphinx documents. If None, it defaults to 138 | # " v documentation". 139 | #html_title = None 140 | 141 | # A shorter title for the navigation bar. Default is the same as html_title. 142 | #html_short_title = None 143 | 144 | # The name of an image file (relative to this directory) to place at the top 145 | # of the sidebar. 146 | #html_logo = None 147 | 148 | # The name of an image file (within the static path) to use as favicon of the 149 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 150 | # pixels large. 151 | html_favicon = "_static/favicon.ico" 152 | 153 | # Add any paths that contain custom static files (such as style sheets) here, 154 | # relative to this directory. They are copied after the builtin static files, 155 | # so a file named "default.css" will overwrite the builtin "default.css". 156 | html_static_path = ['_static'] 157 | 158 | # Add any extra paths that contain custom files (such as robots.txt or 159 | # .htaccess) here, relative to this directory. These files are copied 160 | # directly to the root of the documentation. 161 | #html_extra_path = [] 162 | 163 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 164 | # using the given strftime format. 165 | #html_last_updated_fmt = '%b %d, %Y' 166 | 167 | # If true, SmartyPants will be used to convert quotes and dashes to 168 | # typographically correct entities. 169 | #html_use_smartypants = True 170 | 171 | # Custom sidebar templates, maps document names to template names. 172 | #html_sidebars = {} 173 | 174 | # Additional templates that should be rendered to pages, maps page names to 175 | # template names. 176 | #html_additional_pages = {} 177 | 178 | # If false, no module index is generated. 179 | html_domain_indices = False 180 | 181 | # If false, no index is generated. 182 | #html_use_index = True 183 | 184 | # If true, the index is split into individual pages for each letter. 185 | #html_split_index = False 186 | 187 | # If true, links to the reST sources are added to the pages. 188 | html_show_sourcelink = False 189 | 190 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 191 | #html_show_sphinx = True 192 | 193 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 194 | #html_show_copyright = True 195 | 196 | # If true, an OpenSearch description file will be output, and all pages will 197 | # contain a tag referring to it. The value of this option must be the 198 | # base URL from which the finished HTML is served. 199 | #html_use_opensearch = '' 200 | 201 | # variables to pass to HTML templating engine 202 | build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False))) 203 | 204 | html_context = {'build_dev_html': build_dev_html} 205 | 206 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 207 | #html_file_suffix = None 208 | 209 | # Output file base name for HTML help builder. 210 | htmlhelp_basename = 'PySurferdoc' 211 | 212 | 213 | # -- Options for LaTeX output --------------------------------------------- 214 | 215 | # The paper size ('letter' or 'a4'). 216 | # latex_paper_size = 'letter' 217 | 218 | # The font size ('10pt', '11pt' or '12pt'). 219 | # latex_font_size = '10pt' 220 | 221 | # Grouping the document tree into LaTeX files. List of tuples 222 | # (source start file, target name, title, author, documentclass 223 | # [howto/manual]). 224 | latex_documents = [ 225 | ('index', 'PySurfer.tex', u'PySurfer Documentation', 226 | u'Michael Waskom, Alexandre Gramfort, Scott Burns, Martin Luessi', 'manual'), 227 | ] 228 | 229 | # The name of an image file (relative to this directory) to place at the top of 230 | # the title page. 231 | #latex_logo = None 232 | 233 | # For "manual" documents, if this is true, then toplevel headings are parts, 234 | # not chapters. 235 | # latex_toplevel_sectioning = 'part' 236 | 237 | # Additional stuff for the LaTeX preamble. 238 | # latex_preamble = '' 239 | 240 | # Documents to append as an appendix to all manuals. 241 | # latex_appendices = [] 242 | 243 | # If false, no module index is generated. 244 | # latex_domain_indices = True 245 | 246 | trim_doctests_flags = True 247 | 248 | # Example configuration for intersphinx: refer to the Python standard library. 249 | intersphinx_mapping = { 250 | # 'python': ('http://docs.python.org/', None), 251 | # 'numpy': ('http://docs.scipy.org/doc/numpy-dev/', None), 252 | # 'scipy': ('http://scipy.github.io/devdocs/', None), 253 | 'matplotlib': ('https://matplotlib.org', None), 254 | 'imageio': ('https://imageio.readthedocs.io/en/latest', None), 255 | 'mayavi': ('https://docs.enthought.com/mayavi/mayavi', None), 256 | 'nibabel': ('https://nipy.org/nibabel', None), 257 | } 258 | 259 | # One entry per manual page. List of tuples 260 | # (source start file, name, description, authors, manual section). 261 | man_pages = [ 262 | ('index', 'pysurfer', u'PySurfer Documentation', 263 | [u'PySurfer Contributors'], 1) 264 | ] 265 | 266 | examples_dirs = ['../examples'] 267 | gallery_dirs = ['auto_examples'] 268 | 269 | try: 270 | from mayavi import mlab 271 | # Do not pop up any mayavi windows while running the 272 | # examples. These are very annoying since they steal the focus. 273 | mlab.options.offscreen = True 274 | scrapers = ('matplotlib', 'mayavi') 275 | except Exception: 276 | scrapers = ('matplotlib',) 277 | 278 | sphinx_gallery_conf = { 279 | 'doc_module': ('surfer',), 280 | 'reference_url': {'surfer': None}, 281 | 'examples_dirs': examples_dirs, 282 | 'gallery_dirs': gallery_dirs, 283 | 'within_subsection_order': FileNameSortKey, 284 | 'image_scrapers': scrapers, 285 | 'default_thumb_file': os.path.join('_static', 'pysurfer_logo_small.png'), 286 | 'backreferences_dir': 'generated', 287 | 'download_section_examples': False, 288 | 'thumbnail_size': (250, 250), 289 | } 290 | 291 | numpydoc_class_members_toctree = False 292 | numpydoc_show_inherited_class_members = False 293 | 294 | 295 | # ----------------------------------------------------------------------------- 296 | # Source code links (adapted from SciPy (doc/source/conf.py)) 297 | # ----------------------------------------------------------------------------- 298 | 299 | def linkcode_resolve(domain, info): 300 | """ 301 | Determine the URL corresponding to Python object 302 | """ 303 | if domain != 'py': 304 | return None 305 | 306 | modname = info['module'] 307 | fullname = info['fullname'] 308 | 309 | submod = sys.modules.get(modname) 310 | if submod is None: 311 | return None 312 | 313 | obj = submod 314 | for part in fullname.split('.'): 315 | try: 316 | obj = getattr(obj, part) 317 | except: 318 | return None 319 | 320 | try: 321 | fn = inspect.getsourcefile(obj) 322 | except: 323 | fn = None 324 | if not fn: 325 | try: 326 | fn = inspect.getsourcefile(sys.modules[obj.__module__]) 327 | except: 328 | fn = None 329 | if not fn: 330 | return None 331 | 332 | try: 333 | source, lineno = inspect.getsourcelines(obj) 334 | except: 335 | lineno = None 336 | 337 | if lineno: 338 | linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1) 339 | else: 340 | linespec = "" 341 | 342 | fn = relpath(fn, start=dirname(surfer.__file__)) 343 | 344 | return "http://github.com/nipy/PySurfer/blob/master/surfer/%s%s" % ( # noqa 345 | fn, linespec) 346 | -------------------------------------------------------------------------------- /doc/documentation/command_line.rst: -------------------------------------------------------------------------------- 1 | .. _command_line: 2 | 3 | .. currentmodule:: surfer 4 | 5 | The command-line interface 6 | ========================== 7 | 8 | Introduction 9 | ------------ 10 | 11 | The command-line program ``pysurfer`` is designed to largely replicate 12 | Freesufer's tksurfer command-line interface in the format and style 13 | of arguments it accepts. Like tksurfer, invoking it will initialize 14 | a visualization in an external window and begin an IPython session in the 15 | terminal, through which the visualization can be manipulated. 16 | 17 | Initializing a simple visualization session is quite easy. Simply call 18 | ``pysurfer`` with three positional arguments: the subject_id, the 19 | hemisphere, and the surface geometry to visualize:: 20 | 21 | pysurfer fsaverage lh inflated 22 | 23 | which will open a viewing session that looks like this: 24 | 25 | .. image:: ../_static/basic_session.png 26 | 27 | Manipulating the visualization 28 | ------------------------------ 29 | 30 | Once the viewer window is open, there are two ways to manipulate the 31 | visualization. To see other angles, zoom in or out, and translate the 32 | brain, simply click and drag with your mouse. See the `Mayavi documentation 33 | `_ for more information about using the 35 | mouse and keyboard to interact with a Mayavi scence. 36 | 37 | When pysurfer finishes loading the visualization, it initializes an 38 | IPython session in terminal, which allows for a more comprehensive 39 | interaction with the scene. The IPython interactive namespace will 40 | include a ``brain`` variable, which is bound to the :class:`Brain` 41 | object underlying the visualization. For convenience, the ``b`` variable 42 | is also mapped to this object. (As a reminder, you can always type 43 | ``whos`` in an IPython prompt to see the contents of the interactive 44 | namespace.) See the :class:`Brain` documentation for full information 45 | about how to control the visualization in this way. 46 | 47 | Other command-line options 48 | -------------------------- 49 | 50 | As in tksurfer, most aspects of the visualization can be initialized 51 | from the command-line. To get a full documentation of the command-line 52 | interface, simply type ``pysurfer`` at a terminal prompt and hit enter. 53 | -------------------------------------------------------------------------------- /doc/documentation/custom_viz.rst: -------------------------------------------------------------------------------- 1 | .. _custom_viz: 2 | 3 | .. currentmodule:: surfer 4 | 5 | Customizing the Visualization 6 | ============================= 7 | 8 | One advantage to PySurfer over Tksurfer is that you are not 9 | limited to a single look for the visualization. Of course, being 10 | built on Mayavi, PySurfer is in theory completely customizable. 11 | However, we also offer a few preset options so that you do not 12 | have to delve into the underlying engine to get a different look. 13 | 14 | Changing the display background 15 | ------------------------------- 16 | 17 | The display background can take any valid matplotlib color (i.e., 18 | it can be a tuple of rgb values, an rgb hex string, or a named HTML 19 | color). 20 | 21 | Changing the display size 22 | ------------------------- 23 | 24 | The default display window is 800px by 800px, but this can be configured 25 | using the ``size`` keyword argument in the Brain constructor. ``size`` 26 | should either be a single number to make a square window, or a pair of 27 | values, ``(width, height)``, to make a rectangular window. 28 | 29 | Changing the curvature color scheme 30 | ----------------------------------- 31 | 32 | By default, a new :class:`Brain` instance displays the binarized 33 | cortical curvature values, so you can see which patches of cortex 34 | are gyri and which are sulci (pass ``curv=False`` to the 35 | :class:`Brain` constructor, or use the ``-no-curv`` switch in the 36 | command-line interface to turn this off). There are four preset 37 | themes for the curvature color scheme, which you can pass to the 38 | ``cortex`` parameter in the :class:`Brain` constructor: ``classic``, 39 | ``bone``, ``high_contrast``, and ``low_contrast``: 40 | 41 | .. image:: ../_static/cortex_options.png 42 | 43 | Note that, in each theme, the darker color signifies sulci. 44 | 45 | It's also possible to customize this further by passing the name of 46 | a mayavi colormap or a colormap name along with the endpoints of the 47 | colormap and whether it should be reversed. 48 | 49 | Additionally, you can load a continuous curvature map with the 50 | :meth:`Brain.add_morphometry` method. 51 | 52 | How to use these themes 53 | ----------------------- 54 | 55 | These options can be selected either as keyword arguments to the 56 | :class:`Brain` constructor, 57 | 58 | .. code-block:: python 59 | 60 | >>> from surfer import Brain 61 | >>> b = Brain('fsaverage', 'lh', 'inflated', cortex='bone') 62 | 63 | or as options in the command-line interface:: 64 | 65 | .. code-block:: bash 66 | 67 | $ pysurfer fsaverage lh inflated -background slategray -size 400 68 | 69 | -------------------------------------------------------------------------------- /doc/documentation/index.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | Detailed Documentation 4 | ====================== 5 | 6 | .. _doc-index: 7 | 8 | .. toctree:: 9 | 10 | ./command_line 11 | ./custom_viz 12 | ./split_brain 13 | -------------------------------------------------------------------------------- /doc/documentation/split_brain.rst: -------------------------------------------------------------------------------- 1 | .. _split_brain: 2 | 3 | Working with a split-screen brain 4 | ================================= 5 | 6 | The split-screen view can be activated by using the argument ``hemi='split'``. 7 | Using this option will put views of the left hemisphere in consecutive 8 | vertical frames on the left, and views of the right hemisphere in 9 | consecutive vertical frames on the right. For example, running the following:: 10 | 11 | brain = Brain('fsaverage', 'split', 'inflated', views=['lat', 'med']) 12 | 13 | Will produce a window with two columns (hemispheres) and two rows (the 14 | lateral and medial views, respectively), shown below. 15 | 16 | .. image:: ../_static/split_view.png 17 | 18 | Adding and displaying data 19 | -------------------------- 20 | 21 | Data can be added to either hemisphere using the same functions that are 22 | normally used, e.g. ``add_data``, ``add_overlay``, ``add_morphometry``. 23 | The objects are automatically shown on all views of the brain. When 24 | calling these functions, the ``hemi`` keyword argument can be set to 25 | ``hemi='lh'`` or ``hemi='rh'`` to specify the hemisphere to plot to. 26 | In some instances (e.g., ``add_morphometry``), if no keyword argument 27 | is provided, PySurfer will attempt to load data or both hemispheres 28 | automtically. 29 | 30 | Note that the ``show_view`` method accepts arguments for the ``row`` and 31 | ``col`` values, which allow the user to control which ``Brain`` panel 32 | gets the updated view. 33 | 34 | Caveats 35 | ------- 36 | The multi-view support is available thanks to the capabilities of the 37 | TraitsUI framework. However, due to some limitations in the implementation 38 | of TraitsUI, there is no guarantee that a set of scripted commands will 39 | result in a painted window when the user may think it will. For 40 | example, making a series of calls to ``brain.add_label()`` followed by 41 | ``brain.save_image('out.png')`` may result in some or all of the labels 42 | being absent from the saved ``out.png``. While we have implemented some 43 | workarounds to help prevent this occurrance, we cannot guarantee it will 44 | work. Thus we recommend that for critical non-interactive plotting (e.g., 45 | if scripting figure generation for a paper) only a single view is used 46 | with ``hemi`` set to ``'lh'``, ``'rh'``, or ``'both'``. This will use a single, 47 | pure Mayavi window, thereby bypassing TraisUI entirely -- this helps 48 | guarantee that drawing commands result in updated visual display. 49 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | PySurfer is a Python library for visualizing cortical surface representations 5 | of neuroimaging data. The package is primarily intended for use with 6 | `Freesurfer `_, but it can plot data that 7 | are drawn from a variety of sources. PySurfer extends `Mayavi's 8 | `_ powerful rendering 9 | engine with a high-level interface for working with MRI and MEG data. 10 | 11 | More Information 12 | ---------------- 13 | 14 | .. toctree:: 15 | :maxdepth: 1 16 | 17 | intro 18 | install 19 | auto_examples/index.rst 20 | documentation/index.rst 21 | python_reference.rst 22 | changes.rst 23 | 24 | Authors 25 | ------- 26 | 27 | - Michael Waskom, New-York University (NYU) 28 | - Alexandre Gramfort, Inria, CEA - Neurospin 29 | - Scott Burns, Vanderbilt University 30 | - Martin Luessi, Harvard Medical School MGH Martinos Center 31 | - Eric Larson, University of Washington ILABS 32 | 33 | License 34 | ------- 35 | 36 | The PySurfer source code is available under the Revised BSD (3-Clause) license 37 | 38 | Support 39 | ------- 40 | 41 | If you have problems installing the software or questions about usage, 42 | documentation or something else related to PySurfer, you can post to the NiPy 43 | mailing list. Please preface the subject line with "[PySurfer]". 44 | 45 | :Mailing list: neuroimaging@python.org [subscription_, archive_] 46 | 47 | .. _subscription: https://mail.python.org/mailman/listinfo/neuroimaging 48 | .. _archive: http://mail.python.org/pipermail/neuroimaging 49 | 50 | Contribute 51 | ---------- 52 | 53 | https://github.com/nipy/PySurfer/ 54 | -------------------------------------------------------------------------------- /doc/install.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | .. _install: 4 | 5 | Installing and Getting Started 6 | ============================== 7 | 8 | PySurfer can be installed with pip_. Note that the package name on PyPi is different from the library name that you import:: 9 | 10 | pip install pysurfer 11 | 12 | If you already have PySurfer installed, you can also use pip to update it:: 13 | 14 | pip install -U --no-deps pysurfer 15 | 16 | If you would like to save movies of time course data, it is necessary to include the optional dependency ``imageio`` with:: 17 | 18 | pip install pysurfer[save_movie] 19 | 20 | If you'd like to install the development version, you have two options. You can 21 | install straight from github:: 22 | 23 | pip install https://api.github.com/repos/nipy/PySurfer/zipball/master 24 | 25 | Or you can clone the `git repository `_ and 26 | install from your local source directory:: 27 | 28 | pip install . 29 | 30 | Dependencies 31 | ~~~~~~~~~~~~ 32 | 33 | PySurfer works on Python 3.6+ and requires the following Python packages: 34 | 35 | * numpy_ 36 | * scipy_ 37 | * nibabel_ 38 | * mayavi_ 39 | * matplotlib_ 40 | 41 | Some input/output functions also make use of the Python Imaging Library (PIL_) 42 | and ``imageio``, although they are not mandatory. 43 | 44 | Getting started 45 | ~~~~~~~~~~~~~~~ 46 | 47 | Because PySurfer relies on some complicated dependencies (Mayavi, VTK and a GUI 48 | library), it can be more difficult to get started with than is the case with 49 | other Python libraries. Consider using the Anaconda_ distribution 50 | or Enthough Canopy_ environment. The difficulty on these 51 | platforms is generally getting Mayavi and VTK installed; see their 52 | installation instructions for information. 53 | 54 | PySurfer generally works out of the box on Linux systems. Getting started on 55 | OSX may be trickier. We have had success using the Anaconda distribution with 56 | the additional step of setting the environment variables ``QT_API`` and ``ETS_TOOLKIT``, e.g.:: 57 | 58 | export QT_API=pyqt 59 | export ETS_TOOLKIT=qt4 60 | 61 | The values you set should match the GUI library you are using. 62 | 63 | You may wish to consult the `Mayavi installation docs 64 | `_ if you are having 65 | trouble getting things working. 66 | 67 | If you are using PySurfer interactively in IPython_/Jupyter, you should 68 | activate one of the GUI event loops so that the Mayavi window runs in a 69 | separate process. After starting IPython (either in the terminal, qtconsole, or 70 | notebook), you have to activate the correct GUI backend, which is probably qt:: 71 | 72 | %gui qt 73 | 74 | This will allow you to have an open PySurfer window while still being able to 75 | execute code in the console/notebook. 76 | 77 | It is also possible to embed the PySurfer visualization into a Jupyter notebook. 78 | This is achieved by leveraging `Mayavi's notebook integration 79 | `_:: 80 | 81 | from mayavi import mlab 82 | mlab.init_notebook(backend='png') 83 | 84 | The ``backend`` parameter can either be ``'png'`` to render the visualization 85 | as a static PNG image, or ``'x3d'`` to render it using 86 | `X3D `_ (still experimental). 87 | 88 | If you are having trouble getting started using PySurfer, please describe the problem on the `nipy mailing list`_. 89 | 90 | .. include:: links_names.txt 91 | -------------------------------------------------------------------------------- /doc/intro.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | .. _intro: 4 | 5 | What is PySurfer? 6 | ================= 7 | 8 | PySurfer is a Python library and application for visualizing brain imaging 9 | data. It is specifically useful for plotting data on a three-dimensional mesh 10 | representing the cortical surface of the brain. If you have functional MRI, 11 | magnetoencephalography, or anatomical measurements from cortex, PySurfer can 12 | help you turn them into beautiful and reproducible graphics. 13 | 14 | PySurfer uses an explicit model of cortical geometry to generate 15 | highly-accurate images of neuroimaging data. This is preferable to other 16 | approaches that use simple 3D renderings of a brain volume because the 17 | underlying topology of the cortex is a two-dimensional sheet. PySurfer can 18 | read cortical models that have been processed using Freesurfer_ to "inflate" 19 | the cortical folds and reveal activations that are buried within deep sulci. 20 | This presentation is much closer to how cortical areas are laid out, and it can 21 | help you understand and communicate your efforts to map functional or 22 | morphometric organization. 23 | 24 | PySurfer and its dependencies are written in Python and released with a liberal 25 | open source license. PySurfer can be combined with other tools from the nipy_ 26 | ecosystem to manipulate and plot data in the same script or interactive 27 | session. The visualization is primarily controlled with a high-level API that 28 | allow you to draw a complex scene with just a few lines of code. This means 29 | that PySurfer is naturally scriptable. Once you have developed a basic 30 | visualization, it's easy to add a for-loop and generate the same image for 31 | every subject in your experiment. It also aids the reproducibility of graphics 32 | you create for conferences or papers, as each figure can be associated with a 33 | short script that shows exactly how the visualization was produced. 34 | 35 | To see a set of examples demonstrating some of PySurfer's capabilities, you can 36 | check out the :ref:`example gallery `. 37 | 38 | .. include:: links_names.txt 39 | 40 | -------------------------------------------------------------------------------- /doc/links_names.txt: -------------------------------------------------------------------------------- 1 | .. This (-*- rst -*-) format file contains commonly used link targets 2 | and name substitutions. It may be included in many files, 3 | therefore it should only contain link targets and name 4 | substitutions. Try grepping for "^\.\. _" to find plausible 5 | candidates for this list. 6 | 7 | .. NOTE: reST targets are 8 | __not_case_sensitive__, so only one target definition is needed for 9 | nipy, NIPY, Nipy, etc... 10 | 11 | .. _nipy: http://nipy.org 12 | .. _`NIPY developer resources`: http://nipy.org/devel 13 | .. _`Brain Imaging Center`: http://bic.berkeley.edu/ 14 | .. _nitime: http://nipy.sourceforge.net/nitime/ 15 | .. _nibabel: http://nipy.sourceforge.net/nibabel/ 16 | .. _ConnectomeViewer: http://www.connectomeviewer.org/viewer/ 17 | 18 | .. Documentation tools 19 | .. _graphviz: http://www.graphviz.org/ 20 | .. _Sphinx: http://sphinx.pocoo.org/ 21 | .. _`Sphinx reST`: http://sphinx.pocoo.org/rest.html 22 | .. _reST: http://docutils.sourceforge.net/rst.html 23 | .. _docutils: http://docutils.sourceforge.net 24 | 25 | .. Licenses 26 | .. _GPL: http://www.gnu.org/licenses/gpl.html 27 | .. _BSD: http://www.opensource.org/licenses/bsd-license.php 28 | .. _LGPL: http://www.gnu.org/copyleft/lesser.html 29 | 30 | .. Working process 31 | .. _pynifti: http://niftilib.sourceforge.net/pynifti/ 32 | .. _nifticlibs: http://nifti.nimh.nih.gov 33 | .. _nifti: http://nifti.nimh.nih.gov 34 | .. _`nipy sourceforge`: http://nipy.sourceforge.net/ 35 | .. _sourceforge: http://nipy.sourceforge.net/ 36 | .. _`nipy launchpad`: https://launchpad.net/nipy 37 | .. _launchpad: https://launchpad.net/ 38 | .. _`nipy trunk`: https://code.launchpad.net/~nipy-developers/nipy/trunk 39 | .. _`nipy mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging 40 | .. _`nipy bugs`: https://bugs.launchpad.net/nipy 41 | 42 | .. Code support stuff 43 | .. _pychecker: http://pychecker.sourceforge.net/ 44 | .. _pylint: http://www.logilab.org/project/pylint 45 | .. _pyflakes: http://divmod.org/trac/wiki/DivmodPyflakes 46 | .. _virtualenv: http://pypi.python.org/pypi/virtualenv 47 | .. _git: http://git.or.cz/ 48 | .. _flymake: http://flymake.sourceforge.net/ 49 | .. _rope: http://rope.sourceforge.net/ 50 | .. _pymacs: http://pymacs.progiciels-bpi.ca/pymacs.html 51 | .. _ropemacs: http://rope.sourceforge.net/ropemacs.html 52 | .. _ECB: http://ecb.sourceforge.net/ 53 | .. _emacs_python_mode: http://www.emacswiki.org/cgi-bin/wiki/PythonMode 54 | .. _doctest-mode: http://www.cis.upenn.edu/~edloper/projects/doctestmode/ 55 | .. _bazaar: http://bazaar-vcs.org/ 56 | .. _subversion: http://subversion.tigris.org/ 57 | .. _nose: http://somethingaboutorange.com/mrl/projects/nose 58 | .. _`python coverage tester`: http://nedbatchelder.com/code/modules/coverage.html 59 | .. _easy_install: http://peak.telecommunity.com/DevCenter/EasyInstall 60 | .. _pip: http://pypi.python.org/pypi/pip 61 | 62 | .. Other python projects 63 | .. _numpy: http://www.scipy.org/NumPy 64 | .. _scipy: http://www.scipy.org 65 | .. _ipython: http://ipython.scipy.org 66 | .. _`ipython manual`: http://ipython.scipy.org/doc/manual/html 67 | .. _matplotlib: http://matplotlib.sourceforge.net 68 | .. _ETS: http://code.enthought.com/projects/tool-suite.php 69 | .. _`Enthought Tool Suite`: http://code.enthought.com/projects/tool-suite.php 70 | .. _python: http://www.python.org 71 | .. _mayavi: http://mayavi.sourceforge.net/ 72 | .. _sympy: http://code.google.com/p/sympy/ 73 | .. _networkx: http://networkx.lanl.gov/ 74 | .. _pythonxy: http://www.pythonxy.com/ 75 | .. _EPD: http://www.enthought.com/products/epd.php 76 | .. _Traits: http://code.enthought.com/projects/traits/ 77 | .. _PIL: http://www.pythonware.com/products/pil/ 78 | .. _Anaconda: https://store.continuum.io/cshop/anaconda/ 79 | .. _Canopy: https://www.enthought.com/products/canopy/ 80 | 81 | .. Python imaging projects 82 | .. _PyMVPA: http://www.pymvpa.org 83 | .. _BrainVISA: http://brainvisa.info 84 | .. _anatomist: http://brainvisa.info 85 | 86 | .. Not so python imaging projects 87 | .. _matlab: http://www.mathworks.com 88 | .. _spm: http://www.fil.ion.ucl.ac.uk/spm 89 | .. _eeglab: http://sccn.ucsd.edu/eeglab 90 | .. _AFNI: http://afni.nimh.nih.gov/afni 91 | .. _FSL: http://www.fmrib.ox.ac.uk/fsl 92 | .. _FreeSurfer: http://surfer.nmr.mgh.harvard.edu 93 | .. _voxbo: http://www.voxbo.org 94 | .. _Slicer: http://slicer.org 95 | .. _Camino: http://web4.cs.ucl.ac.uk/research/medic/camino/pmwiki/pmwiki.php 96 | .. _Camino2Trackvis: http://camino-trackvis.sourceforge.net/ 97 | 98 | .. General software 99 | .. _gcc: http://gcc.gnu.org 100 | .. _xcode: http://developer.apple.com/TOOLS/xcode 101 | .. _mingw: http://www.mingw.org 102 | .. _macports: http://www.macports.org/ 103 | 104 | .. Functional imaging labs 105 | .. _`functional imaging laboratory`: http://www.fil.ion.ucl.ac.uk 106 | .. _FMRIB: http://www.fmrib.ox.ac.uk 107 | 108 | .. Other organizations 109 | .. _enthought: http://www.enthought.com 110 | .. _kitware: http://www.kitware.com 111 | 112 | .. General information links 113 | .. _`wikipedia FMRI`: http://en.wikipedia.org/wiki/Functional_magnetic_resonance_imaging 114 | .. _`wikipedia PET`: http://en.wikipedia.org/wiki/Positron_emission_tomography 115 | 116 | .. Mathematical methods 117 | .. _`wikipedia ICA`: http://en.wikipedia.org/wiki/Independent_component_analysis 118 | .. _`wikipedia PCA`: http://en.wikipedia.org/wiki/Principal_component_analysis 119 | 120 | .. Nipype Paper 121 | .. _paper: http://www.frontiersin.org/Neuroinformatics/10.3389/fninf.2011.00013/abstract 122 | -------------------------------------------------------------------------------- /doc/logo_files/activation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/activation.png -------------------------------------------------------------------------------- /doc/logo_files/annot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/annot.png -------------------------------------------------------------------------------- /doc/logo_files/banner.pages: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/banner.pages -------------------------------------------------------------------------------- /doc/logo_files/banner.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/banner.pdf -------------------------------------------------------------------------------- /doc/logo_files/banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/banner.png -------------------------------------------------------------------------------- /doc/logo_files/banner_serif.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/banner_serif.pdf -------------------------------------------------------------------------------- /doc/logo_files/banner_serif.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/banner_serif.png -------------------------------------------------------------------------------- /doc/logo_files/brain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/brain.png -------------------------------------------------------------------------------- /doc/logo_files/contours.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/contours.png -------------------------------------------------------------------------------- /doc/logo_files/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/favicon.ico -------------------------------------------------------------------------------- /doc/logo_files/green_activation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/green_activation.png -------------------------------------------------------------------------------- /doc/logo_files/peaks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/peaks.png -------------------------------------------------------------------------------- /doc/logo_files/pysurfer_logo_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/pysurfer_logo_small.png -------------------------------------------------------------------------------- /doc/logo_files/pysurfer_logo_small.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/pysurfer_logo_small.psd -------------------------------------------------------------------------------- /doc/logo_files/pysurfer_logo_small_crop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/pysurfer_logo_small_crop.png -------------------------------------------------------------------------------- /doc/logo_files/thickness.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/doc/logo_files/thickness.png -------------------------------------------------------------------------------- /doc/python_reference.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | .. _api_reference: 4 | 5 | ==================== 6 | Python API Reference 7 | ==================== 8 | 9 | This is the reference for classes (``CamelCase`` names) and functions 10 | (``underscore_case`` names) of PySurfer. 11 | 12 | :py:mod:`surfer`: 13 | 14 | .. currentmodule:: surfer 15 | 16 | .. automodule:: surfer 17 | :no-members: 18 | :no-inherited-members: 19 | 20 | .. autosummary:: 21 | :toctree: generated/ 22 | :template: class.rst 23 | 24 | Brain 25 | 26 | .. autosummary:: 27 | :toctree: generated/ 28 | :template: class_noinherited.rst 29 | 30 | TimeViewer 31 | 32 | .. autosummary:: 33 | :toctree: generated/ 34 | :template: function.rst 35 | 36 | project_volume_data 37 | 38 | :py:mod:`surfer.io`: 39 | 40 | .. currentmodule:: surfer.io 41 | 42 | .. automodule:: surfer.io 43 | :no-members: 44 | :no-inherited-members: 45 | 46 | .. autosummary:: 47 | :toctree: generated/ 48 | :template: function.rst 49 | 50 | read_scalar_data 51 | read_stc 52 | 53 | :py:mod:`surfer.utils`: 54 | 55 | .. currentmodule:: surfer.utils 56 | 57 | .. automodule:: surfer.utils 58 | :no-members: 59 | :no-inherited-members: 60 | 61 | .. autosummary:: 62 | :toctree: generated/ 63 | :template: function.rst 64 | 65 | coord_to_label 66 | -------------------------------------------------------------------------------- /doc/surfer.cfg: -------------------------------------------------------------------------------- 1 | [visual] 2 | width=600 3 | height=600 4 | background=black 5 | cortex=classic 6 | default_view=lateral 7 | -------------------------------------------------------------------------------- /examples/README.txt: -------------------------------------------------------------------------------- 1 | .. _examples: 2 | 3 | Example gallery 4 | =============== 5 | 6 | These example scripts demonstrate the functionality offered by PySurfer, 7 | from the most basic tasks to more complex visualizations. 8 | 9 | If you have used PySurfer to present neuroimaging data in a way that isn't 10 | covered here, please contribute_ it! 11 | 12 | .. contents:: Contents 13 | :local: 14 | :depth: 2 15 | 16 | .. _contribute: https://github.com/nipy/PySurfer 17 | -------------------------------------------------------------------------------- /examples/example_data/README.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | PySurfer Example Data 4 | ===================== 5 | 6 | This directory contains a few lightweight files that support the automatic 7 | examples for PySurfer. 8 | -------------------------------------------------------------------------------- /examples/example_data/lh.alt_sig.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/examples/example_data/lh.alt_sig.nii.gz -------------------------------------------------------------------------------- /examples/example_data/lh.curv.fsaverage.mgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/examples/example_data/lh.curv.fsaverage.mgz -------------------------------------------------------------------------------- /examples/example_data/lh.sig.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/examples/example_data/lh.sig.nii.gz -------------------------------------------------------------------------------- /examples/example_data/mask.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/examples/example_data/mask.nii.gz -------------------------------------------------------------------------------- /examples/example_data/meg_source_estimate-lh.stc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/examples/example_data/meg_source_estimate-lh.stc -------------------------------------------------------------------------------- /examples/example_data/meg_source_estimate-rh.stc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/examples/example_data/meg_source_estimate-rh.stc -------------------------------------------------------------------------------- /examples/example_data/register.dat: -------------------------------------------------------------------------------- 1 | fsaverage 2 | 1.000000 3 | 1.000000 4 | 0.150000 5 | 9.975314e-01 -7.324822e-03 1.760415e-02 9.570923e-01 6 | -1.296475e-02 -9.262221e-03 9.970638e-01 -1.781596e+01 7 | -1.459537e-02 -1.000945e+00 2.444772e-03 -1.854964e+01 8 | 0 0 0 1 9 | tkregister 10 | -------------------------------------------------------------------------------- /examples/example_data/resting_corr.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/examples/example_data/resting_corr.nii.gz -------------------------------------------------------------------------------- /examples/example_data/rh.curv.fsaverage.mgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/examples/example_data/rh.curv.fsaverage.mgz -------------------------------------------------------------------------------- /examples/example_data/zstat.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/examples/example_data/zstat.nii.gz -------------------------------------------------------------------------------- /examples/plot_basics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Basic Visualization 3 | =================== 4 | 5 | Initialize a basic visualization session. 6 | 7 | """ 8 | from surfer import Brain 9 | 10 | print(__doc__) 11 | 12 | """ 13 | Define the three important variables. 14 | Note that these are the first three positional arguments 15 | in tksurfer (and pysurfer for that matter). 16 | """ 17 | subject_id = 'fsaverage' 18 | hemi = 'lh' 19 | surf = 'inflated' 20 | 21 | """ 22 | Call the Brain object constructor with these 23 | parameters to initialize the visualization session. 24 | """ 25 | brain = Brain(subject_id, hemi, surf) 26 | -------------------------------------------------------------------------------- /examples/plot_custom_colors.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================================= 3 | Plot RGBA values on brain surface 4 | ================================= 5 | 6 | In this example, each vertex on a 3D brain is plotted with a different 7 | RGBA value. Hue varies along the x-axis (right/left direction) and 8 | alpha varies along the z-axis (up/down direction). However, this can be 9 | easily generalised to other use cases. 10 | 11 | """ 12 | import os 13 | import numpy as np 14 | import matplotlib.pyplot as plt 15 | from mayavi import mlab 16 | from tvtk.api import tvtk 17 | from tvtk.common import configure_input_data 18 | from surfer import Brain 19 | 20 | print(__doc__) 21 | 22 | # 1) define helper functions 23 | 24 | 25 | def norm(x): 26 | '''Normalise array betweeen 0-1''' 27 | return (x - np.min(x)) / (np.max(x) - np.min(x)) 28 | 29 | 30 | # 2) init brain and get spatial co-ordinates 31 | 32 | # params 33 | subjects_dir = os.environ['SUBJECTS_DIR'] 34 | hemi = 'lh' 35 | surf = 'white' 36 | 37 | # init figure 38 | fig = mlab.figure() 39 | b = Brain('fsaverage', hemi, surf, subjects_dir=subjects_dir, 40 | background='white', figure=fig) 41 | 42 | # co-ordinates 43 | x, y, z = b.geo[hemi].coords.T 44 | tris = b.geo[hemi].faces 45 | 46 | 47 | # 3) generate an rgba matrix, of shape n_vertices x 4 48 | 49 | # define color map 50 | cmap = plt.cm.viridis 51 | 52 | # change colour based on position on the x axis 53 | hue = norm(x) 54 | colors = cmap(hue)[:, :3] 55 | 56 | # change alpha based on position on the z axis 57 | alpha = norm(z) 58 | 59 | # combine hue and alpha into a Nx4 matrix 60 | rgba_vals = np.concatenate((colors, alpha[:, None]), axis=1) 61 | 62 | 63 | # 4) add data to plot 64 | 65 | # plot points in x,y,z 66 | mesh = mlab.pipeline.triangular_mesh_source( 67 | x, y, z, tris, figure=fig) 68 | mesh.data.point_data.scalars.number_of_components = 4 # r, g, b, a 69 | mesh.data.point_data.scalars = (rgba_vals * 255).astype('ubyte') 70 | 71 | # tvtk for vis 72 | mapper = tvtk.PolyDataMapper() 73 | configure_input_data(mapper, mesh.data) 74 | actor = tvtk.Actor() 75 | actor.mapper = mapper 76 | fig.scene.add_actor(actor) 77 | -------------------------------------------------------------------------------- /examples/plot_fmri_activation.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======================= 3 | Display fMRI Activation 4 | ======================= 5 | 6 | The most straightforward way to plot activations is when you already have 7 | a map of them defined on the Freesurfer surface. This map can be stored in any 8 | file format that Nibabel can understand. 9 | 10 | """ 11 | from surfer import Brain 12 | 13 | print(__doc__) 14 | 15 | """ 16 | Bring up the visualization window. 17 | """ 18 | brain = Brain("fsaverage", "lh", "inflated") 19 | 20 | """ 21 | Get a path to the overlay file. 22 | """ 23 | overlay_file = "example_data/lh.sig.nii.gz" 24 | 25 | """ 26 | Display the overlay on the surface using the defaults to control thresholding 27 | and colorbar saturation. These can be set through your config file. 28 | """ 29 | brain.add_overlay(overlay_file) 30 | 31 | """ 32 | You can then turn the overlay off. 33 | """ 34 | brain.overlays["sig"].remove() 35 | 36 | """ 37 | Now add the overlay again, but this time with set threshold and showing only 38 | the positive activations. 39 | """ 40 | brain.add_overlay(overlay_file, min=5, max=20, sign="pos") 41 | -------------------------------------------------------------------------------- /examples/plot_fmri_activation_volume.py: -------------------------------------------------------------------------------- 1 | """ 2 | ====================== 3 | Display an fMRI Volume 4 | ====================== 5 | 6 | To plot data on the surface that is stored as a volume image, it is only 7 | slightly more complicated. You'll have to use the function 8 | ``surfer.project_volume_data``, which makes an external call to the Freesurfer 9 | program ``mri_vol2surf``. 10 | 11 | Note: In PySurfer versions 0.4 and lower, the project_volume_data function must 12 | be imported from ``surfer.io``. 13 | 14 | """ 15 | import os 16 | from surfer import Brain, project_volume_data 17 | 18 | print(__doc__) 19 | 20 | """ 21 | Bring up the visualization window. 22 | """ 23 | brain = Brain("fsaverage", "lh", "inflated") 24 | 25 | """ 26 | Get a path to the volume file. 27 | """ 28 | volume_file = "example_data/zstat.nii.gz" 29 | 30 | """ 31 | There are two options for specifying the registration between the volume and 32 | the surface you want to plot on. The first is to give a path to a 33 | Freesurfer-style linear transformation matrix that will align the statistical 34 | volume with the Freesurfer anatomy. 35 | 36 | Most of the time you will be plotting data that are in MNI152 space on the 37 | fsaverage brain. For this case, Freesurfer actually ships a registration matrix 38 | file to align your data with the surface. 39 | """ 40 | reg_file = os.path.join(os.environ["FREESURFER_HOME"], 41 | "average/mni152.register.dat") 42 | zstat = project_volume_data(volume_file, "lh", reg_file) 43 | 44 | """ 45 | Note that the contours of the fsaverage surface don't perfectly match the 46 | MNI brain, so this will only approximate the location of your activation 47 | (although it generally does a pretty good job). A more accurate way to 48 | visualize data would be to run the MNI152 brain through the recon-all pipeline. 49 | 50 | Alternatively, if your data are already in register with the Freesurfer 51 | anatomy, you can provide project_volume_data with the subject ID, avoiding the 52 | need to specify a registration file. 53 | 54 | By default, 3mm of smoothing is applied on the surface to clean up the overlay 55 | a bit, although the extent of smoothing can be controlled. 56 | """ 57 | zstat = project_volume_data(volume_file, "lh", 58 | subject_id="fsaverage", smooth_fwhm=0.5) 59 | 60 | """ 61 | Once you have the statistical data loaded into Python, you can simply pass it 62 | to the `add_overlay` method of the Brain object. 63 | """ 64 | brain.add_overlay(zstat, min=2, max=12) 65 | 66 | """ 67 | It can also be a good idea to plot the inverse of the mask that was used in the 68 | analysis, so you can be clear about areas that were not included. 69 | 70 | It's good to change some parameters of the sampling to account for the fact 71 | that you are projecting binary (0, 1) data. 72 | """ 73 | mask_file = "example_data/mask.nii.gz" 74 | mask = project_volume_data(mask_file, "lh", subject_id="fsaverage", 75 | smooth_fwhm=0, projsum="max").astype(bool) 76 | mask = ~mask 77 | brain.add_data(mask, min=0, max=10, thresh=.5, 78 | colormap="bone", alpha=.6, colorbar=False) 79 | 80 | brain.show_view("medial") 81 | -------------------------------------------------------------------------------- /examples/plot_fmri_conjunction.py: -------------------------------------------------------------------------------- 1 | """ 2 | Display a Conjunction Map 3 | ========================= 4 | 5 | Show the overlap between two different activation maps. 6 | 7 | This is one of the most commonly asked questions about 8 | Tksurfer on the Freesurfer mailing list. Below, we show 9 | how it can be accomplished in just a few lines of code 10 | with PySurfer. 11 | 12 | """ 13 | import os.path as op 14 | import numpy as np 15 | from surfer import io 16 | from surfer import Brain 17 | 18 | print(__doc__) 19 | 20 | """ 21 | Initialize the visualization. 22 | """ 23 | brain = Brain("fsaverage", "lh", "inflated", background="white") 24 | 25 | """ 26 | Read both of the activation maps in using 27 | surfer's io functions. 28 | """ 29 | sig1 = io.read_scalar_data(op.join('example_data', "lh.sig.nii.gz")) 30 | sig2 = io.read_scalar_data(op.join('example_data', "lh.alt_sig.nii.gz")) 31 | 32 | """ 33 | Zero out the vertices that do not meet a threshold. 34 | """ 35 | thresh = 4 36 | sig1[sig1 < thresh] = 0 37 | sig2[sig2 < thresh] = 0 38 | 39 | """ 40 | A conjunction is defined as the minimum significance 41 | value between the two maps at each vertex. 42 | """ 43 | conjunct = np.min(np.vstack((sig1, sig2)), axis=0) 44 | 45 | 46 | """ 47 | Now load the numpy array as an overlay. 48 | Use a high saturation point so that the 49 | blob will largely be colored with lower 50 | values from the lookup table. 51 | """ 52 | brain.add_overlay(sig1, 4, 30, name="sig1") 53 | 54 | """ 55 | A pointer to the overlay's color manager 56 | gets stored in the overlays dictionary. 57 | Change the lookup table to "Reds" and turn the 58 | color bar itself off, as otherwise the bars 59 | for the three maps will get confusingly stacked. 60 | """ 61 | brain.overlays["sig1"].pos_bar.lut_mode = "Reds" 62 | brain.overlays["sig1"].pos_bar.visible = False 63 | 64 | """ 65 | Now load the other two maps and again change 66 | the lookup table and turn off the color bar itself. 67 | """ 68 | brain.add_overlay(sig2, 4, 30, name="sig2") 69 | brain.overlays["sig2"].pos_bar.lut_mode = "Blues" 70 | brain.overlays["sig2"].pos_bar.visible = False 71 | 72 | """ 73 | Display the overlap as purple, which is what you 74 | get when you mix red and blue. 75 | """ 76 | brain.add_overlay(conjunct, 4, 30, name="conjunct") 77 | brain.overlays["conjunct"].pos_bar.lut_mode = "Purples" 78 | brain.overlays["conjunct"].pos_bar.visible = False 79 | -------------------------------------------------------------------------------- /examples/plot_foci.py: -------------------------------------------------------------------------------- 1 | """ 2 | ==================== 3 | Plot Activation Foci 4 | ==================== 5 | 6 | Plot spheroids at positions on the surface manifold 7 | according to coordinates or vertex ids. 8 | 9 | """ 10 | import os 11 | import os.path as op 12 | from numpy import arange 13 | from numpy.random import permutation 14 | import nibabel as nib 15 | from surfer import Brain 16 | 17 | print(__doc__) 18 | 19 | subject_id = "fsaverage" 20 | subjects_dir = os.environ["SUBJECTS_DIR"] 21 | 22 | """ 23 | Bring up the visualization. 24 | """ 25 | brain = Brain(subject_id, "lh", "inflated") 26 | 27 | """ 28 | First we'll get a set of stereotaxic foci in the MNI 29 | coordinate system. These might be peak activations from 30 | a volume based analysis. 31 | """ 32 | coords = [[-36, 18, -3], 33 | [-43, 25, 24], 34 | [-48, 26, -2]] 35 | 36 | """ 37 | Now we plot the foci on the inflated surface. We will map 38 | the foci onto the surface by finding the vertex on the "white" 39 | mesh that is closest to the coordinate of each point we want 40 | to display. 41 | 42 | While this is not a perfect transformation, it can give you 43 | some idea of where peaks from a volume-based analysis would 44 | be located on the surface. 45 | 46 | You can use any valid matplotlib color for the foci; the 47 | default is white. 48 | """ 49 | brain.add_foci(coords, map_surface="white", color="gold") 50 | 51 | """ 52 | You can also plot foci with a set of surface vertex ids. 53 | For instance, you might want to plot the peak activation 54 | within an ROI for each of your indivdiual subjects over 55 | the group activation map. 56 | 57 | Here, we will just demonstrate with a set of randomly 58 | choosen vertices from within the superior temporal sulcus. 59 | 60 | First, we load in the Destrieux parcellation annotation file 61 | and find 10 random vertices within the STS. 62 | """ 63 | annot_path = op.join(subjects_dir, subject_id, "label/lh.aparc.a2009s.annot") 64 | ids, ctab, names = nib.freesurfer.read_annot(annot_path) 65 | verts = arange(0, len(ids)) 66 | coords = permutation(verts[ids == 74])[:10] 67 | 68 | """ 69 | You can also control the size of the focus glpyhs. 70 | We'll make these a little bit smaller than our 71 | other foci. 72 | """ 73 | scale_factor = 0.7 74 | 75 | """ 76 | Finally, plot the foci using the coords_as_verts option to 77 | center each sphereoid at its vertex id. 78 | """ 79 | brain.add_foci(coords, coords_as_verts=True, 80 | scale_factor=scale_factor, color="#A52A2A") 81 | -------------------------------------------------------------------------------- /examples/plot_freesurfer_normalization.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot Freesurfer Normalization 3 | ============================= 4 | 5 | This example shows how PySurfer can be used to examine the quality of 6 | Freesurfer's curvature-driven normalization to a common template. 7 | 8 | We are going to plot the contour of the subject's curvature estimate after 9 | transforming that map into the common space (this step is performed outside 10 | of PySurfer using the Freesurfer program ``mri_surf2surf``). 11 | 12 | With a perfect transformation, the contour lines should follow the light/dark 13 | gray boundary on the fsaverage surface. Large deviations may reflect problems 14 | with the underlying data that you should investigate. 15 | 16 | """ 17 | import nibabel as nib 18 | from surfer import Brain 19 | 20 | print(__doc__) 21 | 22 | brain = Brain("fsaverage", "both", "inflated") 23 | 24 | for hemi in ["lh", "rh"]: 25 | 26 | # This file was created with mri_surf2surf 27 | curv = nib.load("example_data/%s.curv.fsaverage.mgz" % hemi) 28 | 29 | # Binarize the curvature at 0 30 | curv_bin = (curv.get_data() > 0).squeeze() 31 | 32 | # Add the data as a contour overlay, but turn off the colorbar 33 | brain.add_contour_overlay(curv_bin, min=0, max=1.5, n_contours=2, 34 | line_width=3, hemi=hemi) 35 | brain.contour_list[-1]["colorbar"].visible = False 36 | 37 | brain.show_view("dorsal") 38 | -------------------------------------------------------------------------------- /examples/plot_label.py: -------------------------------------------------------------------------------- 1 | """ 2 | Display ROI Labels 3 | ================== 4 | 5 | Using PySurfer you can plot Freesurfer cortical labels on the surface 6 | with a large amount of control over the visual representation. 7 | 8 | """ 9 | import os 10 | from surfer import Brain 11 | 12 | print(__doc__) 13 | 14 | subject_id = "fsaverage" 15 | hemi = "lh" 16 | surf = "smoothwm" 17 | brain = Brain(subject_id, hemi, surf) 18 | 19 | # If the label lives in the normal place in the subjects directory, 20 | # you can plot it by just using the name 21 | brain.add_label("BA1") 22 | 23 | # Some labels have an associated scalar value at each ID in the label. 24 | # For example, they may be probabilistically defined. You can threshold 25 | # what vertices show up in the label using this scalar data 26 | brain.add_label("BA1", color="blue", scalar_thresh=.5) 27 | 28 | # Or you can give a path to a label in an arbitrary location 29 | subj_dir = brain.subjects_dir 30 | label_file = os.path.join(subj_dir, subject_id, 31 | "label", "%s.MT.label" % hemi) 32 | brain.add_label(label_file) 33 | 34 | # By default the label is 'filled-in', but you can 35 | # plot just the label boundaries 36 | brain.add_label("BA44", borders=True) 37 | 38 | # You can also control the opacity of the label color 39 | brain.add_label("BA6", alpha=.7) 40 | 41 | # Finally, you can plot the label in any color you want. 42 | brain.show_view(dict(azimuth=-42, elevation=105, distance=225, 43 | focalpoint=[-30, -20, 15])) 44 | 45 | # Use any valid matplotlib color. 46 | brain.add_label("V1", color="steelblue", alpha=.6) 47 | brain.add_label("V2", color="#FF6347", alpha=.6) 48 | brain.add_label("entorhinal", color=(.2, 1, .5), alpha=.6) 49 | -------------------------------------------------------------------------------- /examples/plot_label_foci.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======================= 3 | Generate Surface Labels 4 | ======================= 5 | 6 | Define a label that is centered on a specific vertex in the surface mesh. Plot 7 | that label and the focus that defines its center. 8 | 9 | """ 10 | from surfer import Brain, utils 11 | 12 | print(__doc__) 13 | 14 | subject_id = "fsaverage" 15 | 16 | """ 17 | Bring up the visualization. 18 | """ 19 | brain = Brain(subject_id, "lh", "inflated", cortex=("gray", -2, 7, True), 20 | units='m') 21 | 22 | """ 23 | First we'll identify a stereotaxic focus in the MNI coordinate system. This 24 | might be a peak activations from a volume based analysis. 25 | """ 26 | coord = [-43, 25, 24] 27 | 28 | """ 29 | Next we grow a label along the surface around the neareset vertex to this 30 | coordinate in the white surface mesh. The `n_steps` argument controls the size 31 | of the resulting label. 32 | """ 33 | utils.coord_to_label(subject_id, coord, label='example_data/coord', 34 | hemi='lh', n_steps=25, map_surface="white") 35 | brain.add_label('example_data/coord-lh.label', color="darkseagreen", alpha=.8) 36 | 37 | """ 38 | Now we plot the focus on the inflated surface at the vertex identified in the 39 | previous step. 40 | """ 41 | brain.add_foci([coord], map_surface="white", color="mediumseagreen") 42 | 43 | """ 44 | We can also do this using a vertex index, perhaps defined as the peak 45 | activation in a surface analysis. This will be more accurate than using a 46 | volume-based focus. 47 | """ 48 | coord = 0 49 | 50 | utils.coord_to_label(subject_id, coord, label='example_data/coord', 51 | hemi='lh', n_steps=40, map_surface="white", 52 | coord_as_vert=True) 53 | brain.add_label('example_data/coord-lh.label', color='royalblue', alpha=.8) 54 | 55 | """ 56 | Now we plot the foci on the inflated surface. We will map the foci onto the 57 | surface by finding the vertex on the "white" mesh that is closest to the 58 | coordinate of the point we want to display. 59 | """ 60 | brain.add_foci([coord], map_surface="white", coords_as_verts=True, 61 | color="mediumblue") 62 | 63 | """ 64 | Set the camera position to show the extent of the labels. 65 | """ 66 | brain.show_view(dict(elevation=40, distance=0.430)) 67 | -------------------------------------------------------------------------------- /examples/plot_meg_inverse_solution.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot MEG inverse solution 3 | ========================= 4 | 5 | Data were computed using mne-python (http://martinos.org/mne) 6 | 7 | """ 8 | import os 9 | import numpy as np 10 | 11 | from surfer import Brain 12 | from surfer.io import read_stc 13 | 14 | print(__doc__) 15 | 16 | ############################################################################### 17 | # Set up some useful variables and make the plot. 18 | 19 | # define subject, surface and hemisphere(s) to plot: 20 | 21 | subject_id, surf = 'fsaverage', 'inflated' 22 | hemi = 'lh' 23 | 24 | # create Brain object for visualization 25 | brain = Brain(subject_id, hemi, surf, size=(400, 400), background='w', 26 | interaction='terrain', cortex='bone', units='m') 27 | 28 | # label for time annotation in milliseconds 29 | 30 | 31 | def time_label(t): 32 | return 'time=%0.2f ms' % (t * 1e3) 33 | 34 | 35 | # Read MNE dSPM inverse solution and plot 36 | 37 | for hemi in ['lh']: # , 'rh']: 38 | stc_fname = os.path.join('example_data', 'meg_source_estimate-' + 39 | hemi + '.stc') 40 | stc = read_stc(stc_fname) 41 | 42 | # data and vertices for which the data is defined 43 | data = stc['data'] 44 | vertices = stc['vertices'] 45 | 46 | # time points (in seconds) 47 | time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], 48 | data.shape[1], endpoint=False) 49 | 50 | # colormap to use 51 | colormap = 'hot' 52 | 53 | # add data and set the initial time displayed to 100 ms, 54 | # plotted using the nearest relevant colors 55 | brain.add_data(data, colormap=colormap, vertices=vertices, 56 | smoothing_steps='nearest', time=time, time_label=time_label, 57 | hemi=hemi, initial_time=0.1, verbose=False) 58 | 59 | # scale colormap 60 | brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True, 61 | verbose=False) 62 | 63 | ############################################################################### 64 | # To change the time displayed to 80 ms uncomment this line: 65 | 66 | # brain.set_time(0.08) 67 | 68 | ############################################################################### 69 | # uncomment these lines to use the interactive TimeViewer GUI 70 | 71 | # from surfer import TimeViewer 72 | # viewer = TimeViewer(brain) 73 | -------------------------------------------------------------------------------- /examples/plot_morphometry.py: -------------------------------------------------------------------------------- 1 | """ 2 | Overlay Morphometry Data 3 | ======================== 4 | 5 | Display morphometry files generated during 6 | the cortical reconstruction process. 7 | 8 | """ 9 | from surfer import Brain 10 | 11 | print(__doc__) 12 | 13 | brain = Brain("fsaverage", "both", "pial", views="frontal", 14 | background="dimgray") 15 | 16 | """ 17 | Because the morphometry files generated by 18 | recon-all live in a predicatble location, 19 | all you need to call the add_morphometry 20 | method with is the name of the measure you want. 21 | Here, we'll look at cortical curvatuve values, 22 | and plot them for both hemispheres. 23 | """ 24 | brain.add_morphometry("curv") 25 | 26 | """ 27 | Each of the possible values is displayed in an 28 | appropriate full-color map, but you can also 29 | display in grayscale. Here we only plot the 30 | left hemisphere. 31 | """ 32 | brain.add_morphometry("sulc", hemi='lh', grayscale=True) 33 | 34 | """ 35 | You can also use a custom colormap and tweak its range. 36 | """ 37 | brain.add_morphometry("thickness", 38 | colormap="PuBuGn", min=1, max=4) 39 | -------------------------------------------------------------------------------- /examples/plot_parc_values.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================== 3 | Display ROI Values 4 | ================== 5 | 6 | Here we demonstrate how to take the results of an ROI analysis performed within 7 | each region of some parcellation and display those values on the surface to 8 | quickly summarize the analysis. 9 | 10 | """ 11 | import os 12 | import numpy as np 13 | import nibabel as nib 14 | from surfer import Brain 15 | 16 | print(__doc__) 17 | 18 | subject_id = "fsaverage" 19 | hemi = "lh" 20 | surf = "inflated" 21 | 22 | """ 23 | Bring up the visualization. 24 | """ 25 | brain = Brain(subject_id, hemi, surf, background="white") 26 | 27 | """ 28 | Read in the automatic parcellation of sulci and gyri. 29 | """ 30 | aparc_file = os.path.join(os.environ["SUBJECTS_DIR"], 31 | subject_id, "label", 32 | hemi + ".aparc.a2009s.annot") 33 | labels, ctab, names = nib.freesurfer.read_annot(aparc_file) 34 | 35 | """ 36 | Make a random vector of scalar data corresponding to a value for each region in 37 | the parcellation. 38 | 39 | """ 40 | rs = np.random.RandomState(4) 41 | roi_data = rs.uniform(.5, .8, size=len(names)) 42 | 43 | """ 44 | Make a vector containing the data point at each vertex. 45 | """ 46 | vtx_data = roi_data[labels] 47 | 48 | """ 49 | Handle vertices that are not defined in the annotation. 50 | """ 51 | vtx_data[labels == -1] = -1 52 | 53 | """ 54 | Display these values on the brain. Use a sequential colormap (assuming 55 | these data move from low to high values), and add an alpha channel so the 56 | underlying anatomy is visible. 57 | """ 58 | brain.add_data(vtx_data, .5, .75, thresh=0, colormap="rocket", alpha=.8) 59 | -------------------------------------------------------------------------------- /examples/plot_parcellation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Using Annotation Files 3 | ====================== 4 | 5 | Display a Freesurfer cortical parcellation from an annotation file. 6 | 7 | """ 8 | import os 9 | from os.path import join as pjoin 10 | from surfer import Brain 11 | 12 | print(__doc__) 13 | 14 | subject_id = 'fsaverage' 15 | hemi = 'both' 16 | surf = 'inflated' 17 | view = 'frontal' 18 | 19 | """ 20 | Bring up the visualization 21 | """ 22 | brain = Brain(subject_id, hemi, surf, views=view, 23 | cortex="bone", background="ivory") 24 | 25 | """ 26 | Display the 'aparc' parcellation borders. 27 | To use annotations that live in your subject's 28 | label directory, just use the annot name. 29 | """ 30 | brain.add_annotation("aparc") 31 | 32 | """ 33 | You can also display the regions with "filled in" colors 34 | """ 35 | brain.add_annotation("aparc", borders=False) 36 | 37 | """ 38 | You may also provide a full path to an annotation file 39 | at an arbitray location on the disc. You can also 40 | plot things separately for the left and right hemispheres. 41 | """ 42 | subjects_dir = os.environ["SUBJECTS_DIR"] 43 | annot_path = pjoin(subjects_dir, subject_id, "label", "lh.aparc.annot") 44 | brain.add_annotation(annot_path, hemi='lh', borders=False, alpha=.75) 45 | annot_path = pjoin(subjects_dir, subject_id, "label", "rh.aparc.a2009s.annot") 46 | brain.add_annotation(annot_path, hemi='rh', borders=2, remove_existing=False) 47 | -------------------------------------------------------------------------------- /examples/plot_probabilistic_label.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============================ 3 | Display Probabilistic Labels 4 | ============================ 5 | 6 | Freesurfer ships with some probabilistic labels of cytoarchitectonic 7 | and visual areas. Here we show several ways to visualize these labels 8 | to help characterize the location of your data. 9 | 10 | """ 11 | from os import environ 12 | from os.path import join 13 | import numpy as np 14 | from surfer import Brain 15 | from nibabel.freesurfer import read_label 16 | 17 | print(__doc__) 18 | 19 | brain = Brain("fsaverage", "lh", "inflated") 20 | 21 | """ 22 | Show the morphometry with a continuous grayscale colormap. 23 | """ 24 | brain.add_morphometry("curv", colormap="binary", 25 | min=-.8, max=.8, colorbar=False) 26 | 27 | """ 28 | The easiest way to label any vertex that could be in the region is with 29 | add_label. 30 | """ 31 | brain.add_label("BA1", color="#A6BDDB") 32 | 33 | """ 34 | You can also threshold based on the probability of that region being at each 35 | vertex. 36 | """ 37 | brain.add_label("BA1", color="#2B8CBE", scalar_thresh=.5) 38 | 39 | """ 40 | It's also possible to plot just the label boundary, in case you wanted to 41 | overlay the label on an activation plot to asses whether it falls within that 42 | region. 43 | """ 44 | brain.add_label("BA45", color="#F0F8FF", borders=3, scalar_thresh=.5) 45 | brain.add_label("BA45", color="#F0F8FF", alpha=.3, scalar_thresh=.5) 46 | 47 | """ 48 | Finally, with a few tricks, you can display the whole probabilistic map. 49 | """ 50 | subjects_dir = environ["SUBJECTS_DIR"] 51 | label_file = join(subjects_dir, "fsaverage", "label", "lh.BA6.label") 52 | 53 | prob_field = np.zeros_like(brain.geo['lh'].x) 54 | ids, probs = read_label(label_file, read_scalars=True) 55 | prob_field[ids] = probs 56 | brain.add_data(prob_field, thresh=1e-5, colormap="RdPu") 57 | -------------------------------------------------------------------------------- /examples/plot_resting_correlations.py: -------------------------------------------------------------------------------- 1 | """ 2 | Display Resting-State Correlations 3 | ================================== 4 | 5 | In this example, we show how to build up a complex visualization of a 6 | volume-based image showing resting-state correlations across the whole brain 7 | from a seed in the angular gyrus. We'll plot several views of both hemispheres 8 | in a single window and manipulate the colormap to best represent the nature of 9 | the data. 10 | 11 | """ 12 | import os 13 | from surfer import Brain, project_volume_data 14 | 15 | print(__doc__) 16 | 17 | """Bring up the visualization""" 18 | brain = Brain("fsaverage", "split", "inflated", 19 | views=['lat', 'med'], background="white") 20 | 21 | """Project the volume file and return as an array""" 22 | mri_file = "example_data/resting_corr.nii.gz" 23 | reg_file = os.path.join(os.environ["FREESURFER_HOME"], 24 | "average/mni152.register.dat") 25 | surf_data_lh = project_volume_data(mri_file, "lh", reg_file) 26 | surf_data_rh = project_volume_data(mri_file, "rh", reg_file) 27 | 28 | """ 29 | You can pass this array to the add_overlay method for a typical activation 30 | overlay (with thresholding, etc.). 31 | """ 32 | brain.add_overlay(surf_data_lh, min=.3, max=.7, name="ang_corr_lh", hemi='lh') 33 | brain.add_overlay(surf_data_rh, min=.3, max=.7, name="ang_corr_rh", hemi='rh') 34 | 35 | """ 36 | You can also pass it to add_data for more control 37 | over the visualization. Here we'll plot the whole 38 | range of correlations 39 | """ 40 | for overlay in brain.overlays_dict["ang_corr_lh"]: 41 | overlay.remove() 42 | for overlay in brain.overlays_dict["ang_corr_rh"]: 43 | overlay.remove() 44 | 45 | """ 46 | We want to use an appropriate color map for these data: a divergent map that 47 | is centered on 0, which is a meaningful transition-point as it marks the change 48 | from negative correlations to positive correlations. By providing the 'center' 49 | argument the add_data function automatically chooses a divergent colormap. 50 | """ 51 | brain.add_data(surf_data_lh, 0, .7, center=0, hemi='lh') 52 | brain.add_data(surf_data_rh, 0, .7, center=0, hemi='rh') 53 | 54 | """ 55 | You can tune the data display by shifting the colormap around interesting 56 | regions. For example, you can ignore small correlation up to a magnitude of 0.2 57 | and let colors become gradually less transparent from 0.2 to 0.5 by re-scaling 58 | the colormap as follows. For more information see the help string of this 59 | function. 60 | """ 61 | brain.scale_data_colormap(.2, .5, .7, transparent=True, center=0) 62 | 63 | """ 64 | You can also set the overall opacity of the displayed data while maintaining 65 | the transparency of the small values. 66 | """ 67 | brain.scale_data_colormap(0, .35, .7, transparent=True, center=0, 68 | alpha=0.75) 69 | 70 | """ 71 | This overlay represents resting-state correlations with a 72 | seed in left angular gyrus. Let's plot that seed. 73 | """ 74 | seed_coords = (-45, -67, 36) 75 | brain.add_foci(seed_coords, map_surface="white", hemi='lh') 76 | -------------------------------------------------------------------------------- /examples/plot_topographic_contours.py: -------------------------------------------------------------------------------- 1 | """ 2 | Display Activation Contours 3 | =========================== 4 | 5 | Load a statistical overlay as a "topographic" contour map. 6 | 7 | """ 8 | import os.path as op 9 | from surfer import Brain 10 | 11 | print(__doc__) 12 | 13 | """ 14 | Bring up the visualization. 15 | Contour maps looks best with the "low_contrast" cortex 16 | colorscheme, so we will pass that as an option to the 17 | Brain constructor. 18 | """ 19 | brain = Brain("fsaverage", "lh", "inflated", 20 | cortex="low_contrast", background="#151540") 21 | 22 | """ 23 | Get a path to the overlay file 24 | """ 25 | overlay_file = op.join("example_data", "lh.sig.nii.gz") 26 | 27 | """ 28 | Add the contour overlay with the default display settings 29 | Contours overlays only ever use the positive components of 30 | your image, but they get threshold and colormap saturation 31 | from your configuration settings just as normal overlays do. 32 | """ 33 | brain.add_contour_overlay(overlay_file) 34 | 35 | """ 36 | The Brain object can only display one contour overlay at a time, 37 | So if we bring up another one, it will remove the original overlay 38 | behind the scenes for us. Here let's specify a different number of 39 | contours and use a different line width. 40 | """ 41 | brain.add_contour_overlay(overlay_file, 42 | min=2, max=20, 43 | n_contours=10, 44 | line_width=2) 45 | -------------------------------------------------------------------------------- /examples/plot_transparent_brain.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======================= 3 | Plot Transparent Brain 4 | ======================= 5 | 6 | Plot a transparent brain to visualize foci below the surface. 7 | 8 | """ 9 | import os 10 | from surfer import Brain 11 | 12 | print(__doc__) 13 | 14 | subject_id = "fsaverage" 15 | subjects_dir = os.environ["SUBJECTS_DIR"] 16 | 17 | """To render a transparent brain, we are specifying an alpha < 18 | 1.0. This allows us to visualize foci that are not on the cortical 19 | surface. When the brain see-through, rendering of binary curvature is 20 | distracting, so we specify a color, rather than a color map as the 21 | argument for cortex: 22 | 23 | """ 24 | brain = Brain(subject_id, "lh", "pial", cortex='ivory', alpha=0.5) 25 | 26 | """Here's a set of stereotaxic foci in the MNI coordinate system that 27 | are not on the cortical surface which we want to display. 28 | 29 | """ 30 | 31 | coords = [[-20, 10, 10], 32 | [-25, 22, 15], 33 | [-18, 8, 20]] 34 | 35 | """Now we plot the foci in the brain. Because the foci are not on the 36 | cortical surface, they are only visible when alpha is set to < 1.0 in 37 | the call to Brain. 38 | 39 | """ 40 | brain.add_foci(coords, color="red") 41 | -------------------------------------------------------------------------------- /examples/plot_vector_meg_inverse_solution.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot vector-valued MEG inverse solution 3 | ======================================= 4 | 5 | Data were computed using mne-python (http://martinos.org/mne). 6 | 7 | """ 8 | import os 9 | import numpy as np 10 | 11 | from surfer import Brain, TimeViewer # noqa, analysis:ignore 12 | from surfer.io import read_stc 13 | 14 | print(__doc__) 15 | 16 | # Do some basic things: define subject, surface and hemisphere(s) to plot, 17 | # and create the :class:`surfer.viz.Brain` object. 18 | 19 | subject_id, surf = 'fsaverage', 'white' 20 | hemi = 'lh' 21 | brain = Brain(subject_id, hemi, surf, size=(800, 800), interaction='terrain', 22 | cortex='0.5', alpha=0.5, show_toolbar=True, units='m') 23 | 24 | # Read the MNE dSPM inverse solution 25 | 26 | hemi = 'lh' 27 | stc_fname = os.path.join('example_data', 'meg_source_estimate-' + 28 | hemi + '.stc') 29 | stc = read_stc(stc_fname) 30 | 31 | # data and vertices for which the data is defined 32 | data = stc['data'] 33 | vertices = stc['vertices'] 34 | time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], 35 | data.shape[1], endpoint=False) 36 | 37 | # MNE will soon add the option for a "full" inverse to be computed and stored. 38 | # In the meantime, we can get the equivalent for our data based on the 39 | # surface normals: 40 | 41 | data_full = brain.geo['lh'].nn[vertices][..., np.newaxis] * data[:, np.newaxis] 42 | 43 | # Now we add the data and set the initial time displayed to 100 ms: 44 | brain.add_data(data_full, colormap='hot', vertices=vertices, alpha=0.5, 45 | smoothing_steps=5, time=time, hemi=hemi, initial_time=0.1, 46 | vector_alpha=0.5, verbose=False) 47 | 48 | # scale colormap 49 | brain.scale_data_colormap(fmin=7, fmid=14, fmax=21, transparent=True, 50 | verbose=False) 51 | 52 | # viewer = TimeViewer(brain) 53 | -------------------------------------------------------------------------------- /examples/rotate_animation.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============================ 3 | Animate brain and save movie 4 | ============================ 5 | 6 | """ 7 | from surfer import Brain 8 | 9 | print(__doc__) 10 | 11 | sub = 'fsaverage' 12 | hemi = 'lh' 13 | surf = 'inflated' 14 | 15 | brain = Brain(sub, hemi, surf) 16 | 17 | brain.animate(['l', 'c']) 18 | 19 | # control number of steps 20 | brain.animate(['l', 'm'], n_steps=30) 21 | 22 | # any path you can think of 23 | brain.animate(['l', 'c', 'm', 'r', 'c', 'r', 'l'], n_steps=45) 24 | 25 | # full turns 26 | brain.animate(["m"] * 3) 27 | 28 | # movies 29 | brain.animate(['l', 'l'], n_steps=10, fname='simple_animation.avi') 30 | 31 | # however, rotating out of the axial plane is not allowed 32 | try: 33 | brain.animate(['l', 'd']) 34 | except ValueError as e: 35 | print(e) 36 | -------------------------------------------------------------------------------- /examples/save_movie.py: -------------------------------------------------------------------------------- 1 | """ 2 | Create movie from MEG inverse solution 3 | ======================================= 4 | 5 | Data were computed using mne-python (http://martinos.org/mne) 6 | 7 | """ 8 | import os 9 | import numpy as np 10 | 11 | from surfer import Brain 12 | from surfer.io import read_stc 13 | 14 | print(__doc__) 15 | 16 | """ 17 | create Brain object for visualization 18 | """ 19 | brain = Brain('fsaverage', 'split', 'inflated', size=(800, 400)) 20 | 21 | """ 22 | read and display MNE dSPM inverse solution 23 | """ 24 | stc_fname = os.path.join('example_data', 'meg_source_estimate-%s.stc') 25 | for hemi in ['lh', 'rh']: 26 | stc = read_stc(stc_fname % hemi) 27 | data = stc['data'] 28 | times = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] 29 | brain.add_data(data, colormap='hot', vertices=stc['vertices'], 30 | smoothing_steps=10, time=times, hemi=hemi, 31 | time_label=lambda t: '%s ms' % int(round(t * 1e3))) 32 | 33 | """ 34 | scale colormap 35 | """ 36 | brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) 37 | 38 | """ 39 | Save a movie. Use a large value for time_dilation because the sample stc only 40 | covers 30 ms. 41 | """ 42 | brain.save_movie('example_current.mov', time_dilation=30) 43 | 44 | brain.close() 45 | -------------------------------------------------------------------------------- /examples/save_views.py: -------------------------------------------------------------------------------- 1 | """ 2 | =================== 3 | Save a set of views 4 | =================== 5 | 6 | Save some views in png files. 7 | 8 | """ 9 | from surfer import Brain 10 | 11 | print(__doc__) 12 | 13 | sub = 'fsaverage' 14 | hemi = 'lh' 15 | surf = 'inflated' 16 | 17 | brain = Brain(sub, hemi, surf) 18 | 19 | ############################################################################### 20 | # save 1 image 21 | brain.show_view('lat') 22 | brain.save_image("%s_lat.png" % sub) 23 | 24 | ############################################################################### 25 | # save some images 26 | brain.save_imageset(sub, ['med', 'lat', 'ros', 'caud'], 'jpg') 27 | -------------------------------------------------------------------------------- /examples/show_views.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============================== 3 | Show the different brain views 4 | ============================== 5 | 6 | Among the views available are lateral, rostral, caudal, frontal etc. 7 | 8 | """ 9 | from surfer import Brain 10 | 11 | print(__doc__) 12 | 13 | sub = 'fsaverage' 14 | hemi = 'both' 15 | surf = 'inflated' 16 | 17 | brain = Brain(sub, hemi, surf) 18 | 19 | ############################################################################### 20 | # show all views 21 | brain.show_view('lateral') 22 | brain.show_view('m') 23 | brain.show_view('rostral') 24 | brain.show_view('caudal') 25 | brain.show_view('ve') 26 | brain.show_view('frontal') 27 | brain.show_view('par') 28 | brain.show_view('dor') 29 | 30 | ############################################################################### 31 | # More advanced parameters 32 | brain.show_view({'distance': 432}) 33 | # with great power comes great responsibility 34 | brain.show_view({'azimuth': 135, 'elevation': 79}, roll=107) 35 | -------------------------------------------------------------------------------- /make/get_fsaverage.ps1: -------------------------------------------------------------------------------- 1 | # Sample script to install Python and pip under Windows 2 | # Authors: Olivier Grisel, Jonathan Helmus and Kyle Kastner 3 | # License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ 4 | 5 | $FSAVERAGE_URL = "https://staff.washington.edu/larsoner/fsaverage_min.zip" 6 | 7 | Add-Type -AssemblyName System.IO.Compression.FileSystem 8 | function Unzip 9 | { 10 | param([string]$zipfile, [string]$outpath) 11 | [System.IO.Compression.ZipFile]::ExtractToDirectory($zipfile, $outpath) 12 | } 13 | 14 | function DownloadExtractFsaverage () { 15 | $webclient = New-Object System.Net.WebClient 16 | $basedir = $pwd.Path + "\" 17 | $filepath = $basedir + "fsaverage_min.zip" 18 | # Download and retry up to 3 times in case of network transient errors. 19 | $url = $FSAVERAGE_URL 20 | Write-Host "Downloading" $url 21 | $retry_attempts = 2 22 | for($i=0; $i -lt $retry_attempts; $i++){ 23 | try { 24 | $webclient.DownloadFile($url, $filepath) 25 | break 26 | } 27 | Catch [Exception]{ 28 | Start-Sleep 1 29 | } 30 | } 31 | if (Test-Path $filepath) { 32 | Write-Host "File saved at" $filepath 33 | } else { 34 | # Retry once to get the error message if any at the last try 35 | $webclient.DownloadFile($url, $filepath) 36 | } 37 | # Now we extract 38 | $subjects_dir = $basedir + "\subjects" 39 | Unzip $filepath $subjects_dir 40 | } 41 | 42 | function main () { 43 | DownloadExtractFsaverage 44 | } 45 | 46 | main -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:pytest] 2 | addopts = --showlocals --durations=10 --doctest-modules -rs --cov-report= --doctest-ignore-import-errors 3 | filterwarnings = 4 | error:: 5 | ignore:Importing from numpy.testing.decorators is deprecated, import from numpy.testing instead.:DeprecationWarning 6 | ignore:use "HasTraits.trait_.et" instead:DeprecationWarning 7 | ignore:np.loads is deprecated, use pickle.loads instead:DeprecationWarning 8 | ignore:can't resolve package from __spec__ or __package__, falling back on __name__ and __path__:ImportWarning 9 | ignore:The binary mode of fromstring is deprecated, as it behaves surprisingly on unicode inputs. Use frombuffer instead:DeprecationWarning 10 | ignore:elementwise == comparison failed:DeprecationWarning 11 | ignore:Importing from numpy:DeprecationWarning 12 | ignore:.*ufunc size changed.*:RuntimeWarning 13 | ignore:Using or importing the ABCs:DeprecationWarning 14 | ignore:the imp module is deprecated in favour of importlib:DeprecationWarning 15 | ignore:.*trait handler has been deprecated.*:DeprecationWarning 16 | ignore:.*rich_compare.*metadata.*deprecated.*:DeprecationWarning 17 | ignore:Matplotlib is building the font cache using fc-list. This may take a moment.:UserWarning 18 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # 3 | # Copyright (C) 2011-2014 Alexandre Gramfort 4 | # Michael Waskom 5 | # Scott Burns 6 | # Martin Luessi 7 | # Eric Larson 8 | 9 | import os 10 | from setuptools import setup 11 | 12 | descr = """PySurfer: cortical surface visualization using Python.""" 13 | 14 | # deal with MPL sandbox violations during easy_install 15 | os.environ['MPLCONFIGDIR'] = '.' 16 | 17 | # get the version, don't import surfer here so setup works on headless systems 18 | version = None 19 | with open(os.path.join('surfer', '__init__.py'), 'r') as fid: 20 | for line in (line.strip() for line in fid): 21 | if line.startswith('__version__'): 22 | version = line.split('=')[1].strip().strip('"') 23 | break 24 | if version is None: 25 | raise RuntimeError('Could not determine version') 26 | 27 | DISTNAME = 'pysurfer' 28 | DESCRIPTION = descr 29 | LONG_DESCRIPTION = descr 30 | MAINTAINER = 'Michael Waskom' 31 | MAINTAINER_EMAIL = 'mwaskom@stanford.edu' 32 | URL = 'http://pysurfer.github.com' 33 | LICENSE = 'BSD (3-clause)' 34 | DOWNLOAD_URL = 'https://github.com/nipy/PySurfer' 35 | VERSION = version 36 | 37 | 38 | if __name__ == "__main__": 39 | if os.path.exists('MANIFEST'): 40 | os.remove('MANIFEST') 41 | setup(name=DISTNAME, 42 | maintainer=MAINTAINER, 43 | include_package_data=True, 44 | maintainer_email=MAINTAINER_EMAIL, 45 | description=DESCRIPTION, 46 | license=LICENSE, 47 | url=URL, 48 | version=VERSION, 49 | download_url=DOWNLOAD_URL, 50 | long_description=LONG_DESCRIPTION, 51 | zip_safe=False, # the package can run out of an .egg file 52 | classifiers=['Intended Audience :: Science/Research', 53 | 'Intended Audience :: Developers', 54 | 'Programming Language :: Python :: 2.7', 55 | 'Programming Language :: Python :: 3.6', 56 | 'Programming Language :: Python :: 3.7', 57 | 'License :: OSI Approved', 58 | 'Programming Language :: Python', 59 | 'Topic :: Software Development', 60 | 'Topic :: Scientific/Engineering', 61 | 'Operating System :: Microsoft :: Windows', 62 | 'Operating System :: POSIX', 63 | 'Operating System :: Unix', 64 | 'Operating System :: MacOS' 65 | ], 66 | platforms='any', 67 | packages=['surfer', 'surfer.tests'], 68 | scripts=['bin/pysurfer'], 69 | python_requires='>=3.6', 70 | install_requires=[ 71 | 'numpy', 'scipy', 'matplotlib', 'nibabel >= 1.2', 'mayavi'], 72 | extras_require={'save_movie': ['imageio >= 1.5']}, 73 | ) 74 | -------------------------------------------------------------------------------- /surfer/__init__.py: -------------------------------------------------------------------------------- 1 | from .viz import Brain, TimeViewer # noqa 2 | from .utils import Surface, verbose, set_log_level, set_log_file # noqa 3 | from .io import project_volume_data # noqa 4 | 5 | __version__ = "0.12.dev0" 6 | 7 | set_log_file() # initialize handlers 8 | set_log_level() # initialize logging level 9 | -------------------------------------------------------------------------------- /surfer/_commandline.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module defines the command-line interface for PySurfer. 3 | It is defined here instead of in either the top level or 4 | intermediate start-up scripts, as it is used in both. 5 | 6 | There should be no reason to import this module in an 7 | interpreter session. 8 | 9 | """ 10 | from argparse import ArgumentParser, RawDescriptionHelpFormatter 11 | 12 | help_text = """ 13 | PySurfer is a package for visualization and interaction with cortical 14 | surface representations of neuroimaging data from Freesurfer. 15 | 16 | The command-line program pysurfer is designed to largely replicate 17 | Freesufer's tksurfer command-line interface in the format and style 18 | of arguments it accepts, and, like tksurfer, invoking it will initialize 19 | a visualization in an external window and begin an IPython session in the 20 | terminal, through which the visualization can be manipulated. 21 | 22 | The visualization interface is exposed through methods on the `brain' 23 | variable that will exist in IPython namespace when the program finishes 24 | loading. Please see the PySurfer documentation for more information 25 | about how to interact with the Brain object. 26 | 27 | """ 28 | 29 | parser = ArgumentParser(prog='pysurfer', 30 | usage='%(prog)s subject_id hemisphere surface ' 31 | '[options]', 32 | formatter_class=RawDescriptionHelpFormatter, 33 | description=help_text) 34 | parser.add_argument("subject_id", 35 | help="subject id as in subjects dir") 36 | parser.add_argument("hemi", metavar="hemi", choices=["lh", "rh", 37 | "both", "split"], 38 | help="hemisphere to load") 39 | parser.add_argument("surf", 40 | help="surface mesh (e.g. 'pial', 'inflated')") 41 | parser.add_argument("-morphometry", metavar="MEAS", 42 | help="load morphometry file (e.g. thickness, curvature)") 43 | parser.add_argument("-annotation", metavar="ANNOT", 44 | help="load annotation (by name or filepath)") 45 | parser.add_argument("-label", 46 | help="load label (by name or filepath") 47 | parser.add_argument("-borders", action="store_true", 48 | help="only show label/annot borders") 49 | parser.add_argument("-overlay", metavar="FILE", 50 | help="load scalar overlay file") 51 | parser.add_argument("-range", metavar=('MIN', 'MAX'), nargs=2, 52 | help="overlay threshold and saturation point") 53 | parser.add_argument("-min", type=float, 54 | help="overlay threshold") 55 | parser.add_argument("-max", type=float, 56 | help="overlay saturation point") 57 | parser.add_argument("-sign", default="abs", choices=["abs", "pos", "neg"], 58 | help="overlay sign") 59 | parser.add_argument("-name", 60 | help="name to use for the overlay") 61 | parser.add_argument("-size", default=800, nargs="?", 62 | help="size of the display window (in pixels)") 63 | parser.add_argument("-background", metavar="COLOR", default="black", 64 | help="background color for display") 65 | parser.add_argument("-foreground", metavar="COLOR", default="white", 66 | help="foreground color for display") 67 | parser.add_argument("-cortex", metavar="COLOR", default="classic", 68 | help="colormap or color for rendering the cortex") 69 | parser.add_argument("-alpha", metavar="COLOR", default="1.0", 70 | help="specifies opacity for the cortical surface") 71 | parser.add_argument("-title", 72 | help="title to use for the figure") 73 | parser.add_argument("-views", nargs="*", default=['lat'], 74 | help="view list (space-separated) to use") 75 | -------------------------------------------------------------------------------- /surfer/io.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | import sys 4 | from tempfile import mktemp 5 | 6 | from subprocess import Popen, PIPE 7 | import gzip 8 | import numpy as np 9 | import nibabel as nib 10 | try: 11 | from nibabel.spatialimages import ImageFileError # removed in nibabel 5.1 12 | except ImportError: 13 | from nibabel.filebasedimages import ImageFileError 14 | 15 | from .utils import verbose 16 | 17 | import logging 18 | logger = logging.getLogger('surfer') 19 | 20 | 21 | def read_scalar_data(filepath): 22 | """Load in scalar data from an image. 23 | 24 | Parameters 25 | ---------- 26 | filepath : str 27 | path to scalar data file 28 | 29 | Returns 30 | ------- 31 | scalar_data : numpy array 32 | flat numpy array of scalar data 33 | """ 34 | try: 35 | scalar_data = np.asanyarray(nib.load(filepath).dataobj) 36 | scalar_data = np.ravel(scalar_data, order="F") 37 | return scalar_data 38 | 39 | except ImageFileError: 40 | ext = os.path.splitext(filepath)[1] 41 | if ext == ".mgz": 42 | openfile = gzip.open 43 | elif ext == ".mgh": 44 | openfile = open 45 | else: 46 | raise ValueError("Scalar file format must be readable " 47 | "by Nibabel or .mg{hz} format") 48 | 49 | fobj = openfile(filepath, "rb") 50 | # We have to use np.fromstring here as gzip fileobjects don't work 51 | # with np.fromfile; same goes for try/finally instead of with statement 52 | try: 53 | v = np.fromstring(fobj.read(4), ">i4")[0] 54 | if v != 1: 55 | # I don't actually know what versions this code will read, so to be 56 | # on the safe side, let's only let version 1 in for now. 57 | # Scalar data might also be in curv format (e.g. lh.thickness) 58 | # in which case the first item in the file is a magic number. 59 | raise NotImplementedError("Scalar data file version not supported") 60 | ndim1 = np.fromstring(fobj.read(4), ">i4")[0] 61 | ndim2 = np.fromstring(fobj.read(4), ">i4")[0] 62 | ndim3 = np.fromstring(fobj.read(4), ">i4")[0] 63 | nframes = np.fromstring(fobj.read(4), ">i4")[0] 64 | datatype = np.fromstring(fobj.read(4), ">i4")[0] 65 | # Set the number of bytes per voxel and numpy data type according to 66 | # FS codes 67 | databytes, typecode = {0: (1, ">i1"), 1: (4, ">i4"), 3: (4, ">f4"), 68 | 4: (2, ">h")}[datatype] 69 | # Ignore the rest of the header here, just seek to the data 70 | fobj.seek(284) 71 | nbytes = ndim1 * ndim2 * ndim3 * nframes * databytes 72 | # Read in all the data, keep it in flat representation 73 | # (is this ever a problem?) 74 | scalar_data = np.fromstring(fobj.read(nbytes), typecode) 75 | finally: 76 | fobj.close() 77 | 78 | return scalar_data 79 | 80 | 81 | def read_stc(filepath): 82 | """Read an STC file from the MNE package 83 | 84 | STC files contain activations or source reconstructions 85 | obtained from EEG and MEG data. 86 | 87 | Parameters 88 | ---------- 89 | filepath: string 90 | Path to STC file 91 | 92 | Returns 93 | ------- 94 | data: dict 95 | The STC structure. It has the following keys: 96 | tmin The first time point of the data in seconds 97 | tstep Time between frames in seconds 98 | vertices vertex indices (0 based) 99 | data The data matrix (nvert * ntime) 100 | """ 101 | fid = open(filepath, 'rb') 102 | 103 | stc = dict() 104 | 105 | fid.seek(0, 2) # go to end of file 106 | file_length = fid.tell() 107 | fid.seek(0, 0) # go to beginning of file 108 | 109 | # read tmin in ms 110 | stc['tmin'] = float(np.fromfile(fid, dtype=">f4", count=1)) 111 | stc['tmin'] /= 1000.0 112 | 113 | # read sampling rate in ms 114 | stc['tstep'] = float(np.fromfile(fid, dtype=">f4", count=1)) 115 | stc['tstep'] /= 1000.0 116 | 117 | # read number of vertices/sources 118 | vertices_n = int(np.fromfile(fid, dtype=">u4", count=1)) 119 | 120 | # read the source vector 121 | stc['vertices'] = np.fromfile(fid, dtype=">u4", count=vertices_n) 122 | 123 | # read the number of timepts 124 | data_n = int(np.fromfile(fid, dtype=">u4", count=1)) 125 | 126 | if ((file_length / 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0: 127 | raise ValueError('incorrect stc file size') 128 | 129 | # read the data matrix 130 | stc['data'] = np.fromfile(fid, dtype=">f4", count=vertices_n * data_n) 131 | stc['data'] = stc['data'].reshape([data_n, vertices_n]).T 132 | 133 | # close the file 134 | fid.close() 135 | return stc 136 | 137 | 138 | @verbose 139 | def project_volume_data(filepath, hemi, reg_file=None, subject_id=None, 140 | projmeth="frac", projsum="avg", projarg=[0, 1, .1], 141 | surf="white", smooth_fwhm=3, mask_label=None, 142 | target_subject=None, verbose=None): 143 | """Sample MRI volume onto cortical manifold. 144 | 145 | Note: this requires Freesurfer to be installed with correct 146 | SUBJECTS_DIR definition (it uses mri_vol2surf internally). 147 | 148 | Parameters 149 | ---------- 150 | filepath : string 151 | Volume file to resample (equivalent to --mov) 152 | hemi : [lh, rh] 153 | Hemisphere target 154 | reg_file : string 155 | Path to TKreg style affine matrix file 156 | subject_id : string 157 | Use if file is in register with subject's orig.mgz 158 | projmeth : [frac, dist] 159 | Projection arg should be understood as fraction of cortical 160 | thickness or as an absolute distance (in mm) 161 | projsum : [avg, max, point] 162 | Average over projection samples, take max, or take point sample 163 | projarg : single float or sequence of three floats 164 | Single float for point sample, sequence for avg/max specifying 165 | start, stop, and step 166 | surf : string 167 | Target surface 168 | smooth_fwhm : float 169 | FWHM of surface-based smoothing to apply; 0 skips smoothing 170 | mask_label : string 171 | Path to label file to constrain projection; otherwise uses cortex 172 | target_subject : string 173 | Subject to warp data to in surface space after projection 174 | verbose : bool, str, int, or None 175 | If not None, override default verbose level (see surfer.verbose). 176 | """ 177 | 178 | fs_home = os.getenv('FREESURFER_HOME') 179 | if fs_home is None: 180 | raise RuntimeError('FreeSurfer environment not defined. Define the ' 181 | 'FREESURFER_HOME environment variable.') 182 | # Run FreeSurferEnv.sh if not most recent script to set PATH 183 | bin_path = os.path.join(fs_home, 'bin') 184 | if bin_path not in os.getenv('PATH', ''): 185 | raise RuntimeError('Freesurfer bin path "%s" not found, be sure to ' 186 | 'source the Freesurfer setup script' % (bin_path)) 187 | if sys.platform == 'darwin': 188 | # OSX does some ugly "protection" where it clears DYLD_LIBRARY_PATH 189 | # for subprocesses 190 | env = copy.deepcopy(os.environ) 191 | ld_path = os.path.join(fs_home, 'lib', 'gcc', 'lib') 192 | if 'DYLD_LIBRARY_PATH' not in env: 193 | env['DYLD_LIBRARY_PATH'] = ld_path 194 | else: 195 | env['DYLD_LIBRARY_PATH'] = ld_path + ':' + env['DYLD_LIBRARY_PATH'] 196 | else: 197 | env = os.environ 198 | 199 | # Set the basic commands 200 | cmd_list = ["mri_vol2surf", 201 | "--mov", os.path.abspath(filepath), 202 | "--hemi", hemi, 203 | "--surf", surf] 204 | 205 | # Specify the affine registration 206 | if reg_file is not None: 207 | cmd_list.extend(["--reg", reg_file]) 208 | elif subject_id is not None: 209 | cmd_list.extend(["--regheader", subject_id]) 210 | else: 211 | raise ValueError("Must specify reg_file or subject_id") 212 | 213 | # Specify the projection 214 | proj_flag = "--proj" + projmeth 215 | if projsum != "point": 216 | proj_flag += "-" 217 | proj_flag += projsum 218 | if hasattr(projarg, "__iter__"): 219 | proj_arg = list(map(str, projarg)) 220 | else: 221 | proj_arg = [str(projarg)] 222 | cmd_list.extend([proj_flag] + proj_arg) 223 | 224 | # Set misc args 225 | if smooth_fwhm: 226 | cmd_list.extend(["--surf-fwhm", str(smooth_fwhm)]) 227 | if mask_label is not None: 228 | cmd_list.extend(["--mask", mask_label]) 229 | if target_subject is not None: 230 | cmd_list.extend(["--trgsubject", target_subject]) 231 | 232 | # Execute the command 233 | out_file = mktemp(prefix="pysurfer-v2s", suffix='.mgz') 234 | cmd_list.extend(["--o", out_file]) 235 | logger.debug(" ".join(cmd_list)) 236 | p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, env=env) 237 | stdout, stderr = p.communicate() 238 | out = p.returncode 239 | if out: 240 | raise RuntimeError(("mri_vol2surf command failed " 241 | "with output: \n\n{}".format(stderr))) 242 | 243 | # Read in the data 244 | surf_data = read_scalar_data(out_file) 245 | os.remove(out_file) 246 | return surf_data 247 | -------------------------------------------------------------------------------- /surfer/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nipy/PySurfer/46773eeca6f825c0ee501d6479df4c217ff395aa/surfer/tests/__init__.py -------------------------------------------------------------------------------- /surfer/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from distutils.version import LooseVersion 2 | import numpy as np 3 | import scipy 4 | from scipy import sparse 5 | import pytest 6 | import matplotlib as mpl 7 | from numpy.testing import assert_allclose, assert_array_equal 8 | 9 | from surfer import utils 10 | 11 | 12 | def _slow_compute_normals(rr, tris): 13 | """Efficiently compute vertex normals for triangulated surface""" 14 | # first, compute triangle normals 15 | r1 = rr[tris[:, 0], :] 16 | r2 = rr[tris[:, 1], :] 17 | r3 = rr[tris[:, 2], :] 18 | tri_nn = np.cross((r2 - r1), (r3 - r1)) 19 | 20 | # Triangle normals and areas 21 | size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1)) 22 | zidx = np.where(size == 0)[0] 23 | size[zidx] = 1.0 # prevent ugly divide-by-zero 24 | tri_nn /= size[:, np.newaxis] 25 | 26 | # accumulate the normals 27 | nn = np.zeros((len(rr), 3)) 28 | for p, verts in enumerate(tris): 29 | nn[verts] += tri_nn[p, :] 30 | size = np.sqrt(np.sum(nn * nn, axis=1)) 31 | size[size == 0] = 1.0 # prevent ugly divide-by-zero 32 | nn /= size[:, np.newaxis] 33 | return nn 34 | 35 | 36 | @utils.requires_fsaverage() 37 | def test_surface(): 38 | """Test IO for Surface class""" 39 | extra, subj_dir = utils._get_extra() 40 | for subjects_dir in [None, subj_dir]: 41 | surface = utils.Surface('fsaverage', 'lh', 'inflated', 42 | subjects_dir=subjects_dir) 43 | surface.load_geometry() 44 | surface.load_label('BA1' + extra) 45 | surface.load_curvature() 46 | xfm = np.eye(4) 47 | xfm[:3, -1] += 2 # translation 48 | x = surface.x 49 | surface.apply_xfm(xfm) 50 | x_ = surface.x 51 | assert_allclose(x + 2, x_) 52 | 53 | # normals 54 | nn = _slow_compute_normals(surface.coords, surface.faces[:10000]) 55 | nn_fast = utils._compute_normals(surface.coords, surface.faces[:10000]) 56 | assert_allclose(nn, nn_fast) 57 | assert 50 < np.linalg.norm(surface.coords, axis=-1).mean() < 100 # mm 58 | surface = utils.Surface('fsaverage', 'lh', 'inflated', 59 | subjects_dir=subj_dir, units='m') 60 | surface.load_geometry() 61 | assert 0.05 < np.linalg.norm(surface.coords, axis=-1).mean() < 0.1 # m 62 | 63 | 64 | def test_huge_cross(): 65 | """Test cross product with lots of elements.""" 66 | x = np.random.rand(100000, 3) 67 | y = np.random.rand(1, 3) 68 | z = np.cross(x, y) 69 | zz = utils._fast_cross_3d(x, y) 70 | assert_array_equal(z, zz) 71 | 72 | 73 | def test_create_color_lut(): 74 | """Test various ways of making a colormap.""" 75 | # Test valid lut 76 | cmap_in = (np.random.rand(256, 4) * 255).astype(int) 77 | cmap_out = utils.create_color_lut(cmap_in) 78 | assert_array_equal(cmap_in, cmap_out) 79 | 80 | # Test mostly valid lut 81 | cmap_in = cmap_in[:, :3] 82 | cmap_out = utils.create_color_lut(cmap_in) 83 | assert_array_equal(cmap_in, cmap_out[:, :3]) 84 | assert_array_equal(cmap_out[:, 3], np.ones(256, int) * 255) 85 | 86 | # Test named matplotlib lut 87 | cmap_out = utils.create_color_lut("BuGn_r") 88 | assert cmap_out.shape == (256, 4) 89 | 90 | # Test named pysurfer lut 91 | cmap_out = utils.create_color_lut("icefire_r") 92 | assert cmap_out.shape == (256, 4) 93 | 94 | # Test matplotlib object lut 95 | cmap_in = mpl.colors.ListedColormap(["blue", "white", "red"]) 96 | cmap_out = utils.create_color_lut(cmap_in) 97 | assert_array_equal(cmap_out, (cmap_in(np.linspace(0, 1, 256)) * 255)) 98 | 99 | # Test list of colors lut 100 | cmap_out = utils.create_color_lut(["purple", "pink", "white"]) 101 | assert cmap_out.shape == (256, 4) 102 | 103 | # Test that we can ask for a specific number of colors 104 | cmap_out = utils.create_color_lut("Reds", 12) 105 | assert cmap_out.shape == (12, 4) 106 | 107 | 108 | def test_smooth(): 109 | """Test smoothing support.""" 110 | adj_mat = sparse.csc_matrix(np.repeat(np.repeat(np.eye(2), 2, 0), 2, 1)) 111 | vertices = np.array([0, 2]) 112 | want = np.repeat(np.eye(2), 2, axis=0) 113 | smooth = utils.smoothing_matrix(vertices, adj_mat).toarray() 114 | assert_allclose(smooth, want) 115 | if LooseVersion(scipy.__version__) < LooseVersion('1.3'): 116 | with pytest.raises(RuntimeError, match='nearest.*requires'): 117 | utils.smoothing_matrix(vertices, adj_mat, 'nearest') 118 | else: 119 | smooth = utils.smoothing_matrix(vertices, adj_mat, 'nearest').toarray() 120 | assert_allclose(smooth, want) 121 | -------------------------------------------------------------------------------- /surfer/tests/test_viz.py: -------------------------------------------------------------------------------- 1 | import gc 2 | import os 3 | import os.path as op 4 | from os.path import join as pjoin 5 | import sys 6 | 7 | import pytest 8 | from mayavi import mlab 9 | import nibabel as nib 10 | import numpy as np 11 | from numpy.testing import assert_array_equal, assert_array_less 12 | 13 | from unittest import SkipTest 14 | 15 | from surfer import Brain, io, utils 16 | from surfer.utils import (requires_fsaverage, requires_imageio, requires_fs, 17 | _get_extra) 18 | 19 | subject_id = 'fsaverage' 20 | std_args = [subject_id, 'lh', 'inflated'] 21 | data_dir = pjoin(op.dirname(__file__), '..', '..', 'examples', 'example_data') 22 | 23 | overlay_fname = pjoin(data_dir, 'lh.sig.nii.gz') 24 | 25 | 26 | def _set_backend(backend=None): 27 | """Use testing backend for Windows.""" 28 | only_test = (sys.platform == 'win32' or 29 | (os.getenv('TRAVIS', 'false') == 'true' and 30 | sys.platform == 'linux') and sys.version[0] == '3') 31 | if backend is None: 32 | backend = 'test' if only_test else 'auto' 33 | if only_test and backend != 'test': 34 | raise SkipTest('non-testing backend crashes on Windows and ' 35 | 'Travis Py3k') 36 | mlab.options.backend = backend 37 | 38 | 39 | def get_view(brain): 40 | """Setup for view persistence test""" 41 | fig = brain._figures[0][0] 42 | if mlab.options.backend == 'test': 43 | return 44 | fig.scene.camera.parallel_scale = 50 45 | assert fig.scene.camera.parallel_scale == 50 46 | view, roll = brain.show_view() 47 | return fig.scene.camera.parallel_scale, view, roll 48 | 49 | 50 | def check_view(brain, view): 51 | """Test view persistence""" 52 | fig = brain._figures[0][0] 53 | if mlab.options.backend == 'test': 54 | return 55 | parallel_scale, view, roll = view 56 | assert fig.scene.camera.parallel_scale == parallel_scale 57 | view_now, roll_now = brain.show_view() 58 | assert view_now[:3] == view[:3] 59 | assert_array_equal(view_now[3], view[3]) 60 | assert roll_now == roll 61 | 62 | 63 | @requires_fsaverage() 64 | def test_offscreen(): 65 | """Test offscreen rendering.""" 66 | _set_backend() 67 | brain = Brain(*std_args, offscreen=True) 68 | shot = brain.screenshot() 69 | assert_array_less((400, 400, 2), shot.shape) 70 | assert_array_less(shot.shape, (801, 801, 4)) 71 | brain.close() 72 | 73 | 74 | @requires_fsaverage() 75 | def test_image(tmpdir): 76 | """Test image saving.""" 77 | tmp_name = tmpdir.join('temp.png') 78 | tmp_name = str(tmp_name) # coerce to str to avoid PIL error 79 | 80 | _set_backend() 81 | subject_id, _, surf = std_args 82 | brain = Brain(subject_id, 'both', surf=surf, size=100) 83 | brain.add_overlay(overlay_fname, hemi='lh', min=5, max=20, sign="pos") 84 | brain.save_imageset(tmp_name, ['med', 'lat'], 'jpg') 85 | brain.save_image(tmp_name) 86 | brain.save_image(tmp_name, 'rgba', True) 87 | brain.screenshot() 88 | brain.save_montage(tmp_name, ['l', 'v', 'm'], orientation='v') 89 | brain.save_montage(tmp_name, ['l', 'v', 'm'], orientation='h') 90 | brain.save_montage(tmp_name, [['l', 'v'], ['m', 'f']]) 91 | brain.close() 92 | 93 | 94 | @requires_fsaverage() 95 | def test_brain_separate(): 96 | """Test that Brain does not reuse existing figures by default.""" 97 | _set_backend('auto') 98 | brain = Brain(*std_args) 99 | assert brain.brain_matrix.size == 1 100 | brain_2 = Brain(*std_args) 101 | assert brain_2.brain_matrix.size == 1 102 | assert brain._figures[0][0] is not brain_2._figures[0][0] 103 | brain_3 = Brain(*std_args, figure=brain._figures[0][0]) 104 | assert brain._figures[0][0] is brain_3._figures[0][0] 105 | 106 | 107 | @requires_fsaverage() 108 | def test_brains(): 109 | """Test plotting of Brain with different arguments.""" 110 | # testing backend breaks when passing in a figure, so we use 'auto' here 111 | # (shouldn't affect usability, but it makes testing more annoying) 112 | _set_backend('auto') 113 | mlab.figure(101) 114 | surfs = ['inflated', 'white', 'white', 'white', 'white', 'white', 'white'] 115 | hemis = ['lh', 'rh', 'both', 'both', 'rh', 'both', 'both'] 116 | titles = [None, 'Hello', 'Good bye!', 'lut test', 117 | 'dict test', 'None test', 'RGB test'] 118 | cortices = ["low_contrast", ("Reds", 0, 1, False), 'hotpink', 119 | ['yellow', 'blue'], dict(colormap='Greys'), 120 | None, (0.5, 0.5, 0.5)] 121 | sizes = [500, (400, 300), (300, 300), (300, 400), 500, 400, 300] 122 | backgrounds = ["white", "blue", "black", "0.75", 123 | (0.2, 0.2, 0.2), "black", "0.75"] 124 | foregrounds = ["black", "white", "0.75", "red", 125 | (0.2, 0.2, 0.2), "blue", "black"] 126 | figs = [101, mlab.figure(), None, None, mlab.figure(), None, None] 127 | subj_dir = utils._get_subjects_dir() 128 | subj_dirs = [None, subj_dir, subj_dir, subj_dir, 129 | subj_dir, subj_dir, subj_dir] 130 | alphas = [1.0, 0.5, 0.25, 0.7, 0.5, 0.25, 0.7] 131 | for surf, hemi, title, cort, s, bg, fg, fig, sd, alpha \ 132 | in zip(surfs, hemis, titles, cortices, sizes, 133 | backgrounds, foregrounds, figs, subj_dirs, alphas): 134 | brain = Brain(subject_id, hemi, surf, title=title, cortex=cort, 135 | alpha=alpha, size=s, background=bg, foreground=fg, 136 | figure=fig, subjects_dir=sd) 137 | with np.errstate(invalid='ignore'): # encountered in double_scalars 138 | brain.set_distance() 139 | brain.close() 140 | brain = Brain(subject_id, hemi, surf, subjects_dir=sd, 141 | interaction='terrain') 142 | brain.close() 143 | pytest.raises(ValueError, Brain, subject_id, 'lh', 'inflated', 144 | subjects_dir='') 145 | pytest.raises(ValueError, Brain, subject_id, 'lh', 'inflated', 146 | interaction='foo', subjects_dir=sd) 147 | 148 | 149 | @requires_fsaverage() 150 | def test_annot(): 151 | """Test plotting of annot.""" 152 | _set_backend() 153 | annots = ['aparc', 'aparc.a2005s'] 154 | borders = [True, False, 2] 155 | alphas = [1, 0.5] 156 | brain = Brain(*std_args) 157 | view = get_view(brain) 158 | 159 | for a, b, p in zip(annots, borders, alphas): 160 | brain.add_annotation(a, b, p, opacity=0.8) 161 | check_view(brain, view) 162 | 163 | brain.set_surf('white') 164 | with pytest.raises(ValueError): 165 | brain.add_annotation('aparc', borders=-1) 166 | 167 | subj_dir = utils._get_subjects_dir() 168 | annot_path = pjoin(subj_dir, subject_id, 'label', 'lh.aparc.a2009s.annot') 169 | labels, ctab, names = nib.freesurfer.read_annot(annot_path) 170 | brain.add_annotation((labels, ctab)) 171 | 172 | brain.add_annotation('aparc', color="red", remove_existing=True) 173 | surf = brain.annot["surface"] 174 | ctab = surf.module_manager.scalar_lut_manager.lut.table 175 | for color in ctab: 176 | assert color[:3] == (255, 0, 0) 177 | 178 | brain.close() 179 | 180 | 181 | @requires_fsaverage() 182 | def test_contour(): 183 | """Test plotting of contour overlay.""" 184 | _set_backend() 185 | brain = Brain(*std_args) 186 | view = get_view(brain) 187 | 188 | overlay_file = pjoin(data_dir, "lh.sig.nii.gz") 189 | brain.add_contour_overlay(overlay_file) 190 | brain.add_contour_overlay(overlay_file, max=20, n_contours=9, 191 | line_width=2) 192 | brain.contour['surface'].actor.property.line_width = 1 193 | brain.contour['surface'].contour.number_of_contours = 10 194 | 195 | check_view(brain, view) 196 | brain.close() 197 | 198 | 199 | @requires_fsaverage() 200 | @requires_fs() 201 | def test_data(): 202 | """Test plotting of data.""" 203 | _set_backend() 204 | brain = Brain(*std_args) 205 | mri_file = pjoin(data_dir, 'resting_corr.nii.gz') 206 | reg_file = pjoin(data_dir, 'register.dat') 207 | surf_data = io.project_volume_data(mri_file, "lh", reg_file) 208 | brain.add_data(surf_data, -.7, .7, colormap="jet", alpha=.7) 209 | brain.set_surf('white') 210 | brain.add_data([], vertices=np.array([], int)) 211 | brain.close() 212 | 213 | 214 | @requires_fsaverage() 215 | def test_close(): 216 | """Test that close and del actually work.""" 217 | _set_backend() 218 | brain = Brain('fsaverage', 'both', 'inflated') 219 | brain.close() 220 | brain.__del__() 221 | del brain 222 | gc.collect() 223 | 224 | 225 | @requires_fsaverage() 226 | def test_data_limits(): 227 | """Test handling of data limits.""" 228 | _set_backend() 229 | brain = Brain('fsaverage', 'both', 'inflated') 230 | surf_data = np.linspace(0, 1, 163842) 231 | pytest.raises(ValueError, brain.add_data, surf_data, 0, 0) 232 | brain.add_data(surf_data, 0, 1, hemi='lh') 233 | assert brain.data_dict['lh']['fmax'] == 1. 234 | brain.add_data(surf_data, 0, 0.5, hemi='rh') 235 | assert brain.data_dict['lh']['fmax'] == 1. # unmodified 236 | assert brain.data_dict['rh']['fmax'] == 0.5 237 | brain.close() 238 | 239 | 240 | @requires_fsaverage() 241 | def test_foci(): 242 | """Test plotting of foci.""" 243 | _set_backend('test') 244 | brain = Brain(*std_args) 245 | coords = [[-36, 18, -3], 246 | [-43, 25, 24], 247 | [-48, 26, -2]] 248 | brain.add_foci(coords, 249 | map_surface="white", 250 | color="gold", 251 | name='test1', 252 | resolution=25) 253 | 254 | subj_dir = utils._get_subjects_dir() 255 | annot_path = pjoin(subj_dir, subject_id, 'label', 'lh.aparc.a2009s.annot') 256 | ids, ctab, names = nib.freesurfer.read_annot(annot_path) 257 | verts = np.arange(0, len(ids)) 258 | coords = np.random.permutation(verts[ids == 74])[:10] 259 | scale_factor = 0.7 260 | brain.add_foci(coords, coords_as_verts=True, scale_factor=scale_factor, 261 | color="#A52A2A", name='test2') 262 | with pytest.raises(ValueError): 263 | brain.remove_foci(['test4']) 264 | brain.remove_foci('test1') 265 | brain.remove_foci() 266 | assert len(brain.foci_dict) == 0 267 | brain.close() 268 | 269 | 270 | @requires_fsaverage() 271 | def test_label(): 272 | """Test plotting of label.""" 273 | _set_backend() 274 | subject_id = "fsaverage" 275 | hemi = "lh" 276 | surf = "inflated" 277 | brain = Brain(subject_id, hemi, surf) 278 | view = get_view(brain) 279 | 280 | extra, subj_dir = _get_extra() 281 | brain.add_label("BA1" + extra) 282 | check_view(brain, view) 283 | brain.add_label("BA1" + extra, color="blue", scalar_thresh=.5) 284 | label_file = pjoin(subj_dir, subject_id, 285 | "label", "%s.MT%s.label" % (hemi, extra)) 286 | brain.add_label(label_file) 287 | brain.add_label("BA44" + extra, borders=True) 288 | brain.add_label("BA6" + extra, alpha=.7) 289 | brain.show_view("medial") 290 | brain.add_label("V1" + extra, color="steelblue", alpha=.6) 291 | brain.add_label("V2" + extra, color="#FF6347", alpha=.6) 292 | brain.add_label("entorhinal" + extra, color=(.2, 1, .5), alpha=.6) 293 | brain.set_surf('white') 294 | brain.show_view(dict(elevation=40, distance=430), distance=430) 295 | with pytest.raises(ValueError, match='!='): 296 | brain.show_view(dict(elevation=40, distance=430), distance=431) 297 | 298 | # remove labels 299 | brain.remove_labels('V1' + extra) 300 | assert 'V2' + extra in brain.labels_dict 301 | assert 'V1' + extra not in brain.labels_dict 302 | brain.remove_labels() 303 | assert 'V2' + extra not in brain.labels_dict 304 | 305 | brain.close() 306 | 307 | 308 | @requires_fsaverage() 309 | def test_meg_inverse(): 310 | """Test plotting of MEG inverse solution.""" 311 | _set_backend() 312 | brain = Brain(*std_args) 313 | stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') 314 | stc = io.read_stc(stc_fname) 315 | vertices = stc['vertices'] 316 | colormap = 'hot' 317 | data = stc['data'] 318 | data_full = (brain.geo['lh'].nn[vertices][..., np.newaxis] * 319 | data[:, np.newaxis]) 320 | time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'], 321 | data.shape[1], endpoint=False) 322 | 323 | def time_label(t): 324 | return 'time=%0.2f ms' % (1e3 * t) 325 | 326 | for use_data in (data, data_full): 327 | brain.add_data(use_data, colormap=colormap, vertices=vertices, 328 | smoothing_steps=1, time=time, time_label=time_label) 329 | 330 | brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) 331 | assert brain.data_dict['lh']['time_idx'] == 0 332 | 333 | brain.set_time(.1) 334 | assert brain.data_dict['lh']['time_idx'] == 2 335 | # viewer = TimeViewer(brain) 336 | 337 | # multiple data layers 338 | pytest.raises(ValueError, brain.add_data, data, vertices=vertices, 339 | time=time[:-1]) 340 | brain.add_data(data, colormap=colormap, vertices=vertices, 341 | smoothing_steps=1, time=time, time_label=time_label, 342 | initial_time=.09) 343 | assert brain.data_dict['lh']['time_idx'] == 1 344 | data_dicts = brain._data_dicts['lh'] 345 | assert len(data_dicts) == 3 346 | assert data_dicts[0]['time_idx'] == 1 347 | assert data_dicts[1]['time_idx'] == 1 348 | 349 | # shift time in both layers 350 | brain.set_data_time_index(0) 351 | assert data_dicts[0]['time_idx'] == 0 352 | assert data_dicts[1]['time_idx'] == 0 353 | brain.set_data_smoothing_steps(2) 354 | 355 | # add second data-layer without time axis 356 | brain.add_data(data[:, 1], colormap=colormap, vertices=vertices, 357 | smoothing_steps=2) 358 | brain.set_data_time_index(2) 359 | assert len(data_dicts) == 4 360 | 361 | # change surface 362 | brain.set_surf('white') 363 | 364 | # remove all layers 365 | brain.remove_data() 366 | assert brain._data_dicts['lh'] == [] 367 | 368 | brain.close() 369 | 370 | 371 | @requires_fsaverage() 372 | def test_morphometry(): 373 | """Test plotting of morphometry.""" 374 | _set_backend() 375 | brain = Brain(*std_args) 376 | brain.add_morphometry("curv") 377 | brain.add_morphometry("sulc", grayscale=True) 378 | brain.add_morphometry("thickness") 379 | brain.close() 380 | 381 | 382 | @requires_imageio() 383 | @requires_fsaverage() 384 | def test_movie(tmpdir): 385 | """Test saving a movie of an MEG inverse solution.""" 386 | import imageio 387 | if sys.version_info < (3,): 388 | raise SkipTest('imageio ffmpeg requires Python 3') 389 | # create and setup the Brain instance 390 | _set_backend() 391 | brain = Brain(*std_args) 392 | stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc') 393 | stc = io.read_stc(stc_fname) 394 | data = stc['data'] 395 | time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin'] 396 | brain.add_data(data, colormap='hot', vertices=stc['vertices'], 397 | smoothing_steps=10, time=time, time_label='time=%0.2f ms') 398 | brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True) 399 | 400 | # save movies with different options 401 | dst = str(tmpdir.join('test.mov')) 402 | # test the number of frames in the movie 403 | brain.save_movie(dst) 404 | frames = imageio.mimread(dst) 405 | assert len(frames) == 2 406 | brain.save_movie(dst, time_dilation=10) 407 | frames = imageio.mimread(dst) 408 | assert len(frames) == 7 409 | brain.save_movie(dst, tmin=0.081, tmax=0.102) 410 | frames = imageio.mimread(dst) 411 | assert len(frames) == 2 412 | brain.close() 413 | 414 | 415 | @requires_fsaverage() 416 | def test_overlay(): 417 | """Test plotting of overlay.""" 418 | _set_backend() 419 | # basic overlay support 420 | overlay_file = pjoin(data_dir, "lh.sig.nii.gz") 421 | brain = Brain(*std_args) 422 | brain.add_overlay(overlay_file) 423 | brain.overlays["sig"].remove() 424 | brain.add_overlay(overlay_file, min=5, max=20, sign="pos", opacity=0.7) 425 | sig1 = io.read_scalar_data(pjoin(data_dir, "lh.sig.nii.gz")) 426 | sig2 = io.read_scalar_data(pjoin(data_dir, "lh.alt_sig.nii.gz")) 427 | 428 | # two-sided overlay 429 | brain.add_overlay(sig1, 4, 30, name="two-sided") 430 | overlay = brain.overlays_dict.pop('two-sided')[0] 431 | assert_array_equal(overlay.pos_bar.data_range, [4, 30]) 432 | assert_array_equal(overlay.neg_bar.data_range, [-30, -4]) 433 | assert overlay.pos_bar.reverse_lut 434 | assert not overlay.neg_bar.reverse_lut 435 | overlay.remove() 436 | 437 | thresh = 4 438 | sig1[sig1 < thresh] = 0 439 | sig2[sig2 < thresh] = 0 440 | 441 | conjunct = np.min(np.vstack((sig1, sig2)), axis=0) 442 | brain.add_overlay(sig1, 4, 30, name="sig1") 443 | brain.overlays["sig1"].pos_bar.lut_mode = "Reds" 444 | brain.overlays["sig1"].pos_bar.visible = False 445 | 446 | brain.add_overlay(sig2, 4, 30, name="sig2") 447 | brain.overlays["sig2"].pos_bar.lut_mode = "Blues" 448 | brain.overlays["sig2"].pos_bar.visible = False 449 | 450 | brain.add_overlay(conjunct, 4, 30, name="conjunct") 451 | brain.overlays["conjunct"].pos_bar.lut_mode = "Purples" 452 | brain.overlays["conjunct"].pos_bar.visible = False 453 | 454 | brain.set_surf('white') 455 | 456 | brain.close() 457 | 458 | 459 | @requires_fsaverage() 460 | def test_probabilistic_labels(): 461 | """Test plotting of probabilistic labels.""" 462 | _set_backend() 463 | brain = Brain("fsaverage", "lh", "inflated", 464 | cortex="low_contrast") 465 | 466 | extra, subj_dir = _get_extra() 467 | brain.add_label("BA1" + extra, color="darkblue") 468 | brain.add_label("BA1" + extra, color="dodgerblue", scalar_thresh=.5) 469 | brain.add_label("BA45" + extra, color="firebrick", borders=True) 470 | brain.add_label("BA45" + extra, color="salmon", borders=True, 471 | scalar_thresh=.5) 472 | 473 | label_file = pjoin(subj_dir, "fsaverage", "label", 474 | "lh.BA6%s.label" % (extra,)) 475 | prob_field = np.zeros_like(brain.geo['lh'].x) 476 | ids, probs = nib.freesurfer.read_label(label_file, read_scalars=True) 477 | prob_field[ids] = probs 478 | brain.add_data(prob_field, thresh=1e-5) 479 | 480 | brain.data["colorbar"].number_of_colors = 10 481 | brain.data["colorbar"].number_of_labels = 11 482 | brain.close() 483 | 484 | 485 | @requires_fsaverage() 486 | def test_text(): 487 | """Test plotting of text.""" 488 | _set_backend('test') 489 | brain = Brain(*std_args) 490 | brain.add_text(0.1, 0.1, 'Hello', 'blah') 491 | brain.close() 492 | 493 | 494 | @requires_fsaverage() 495 | def test_animate(tmpdir): 496 | """Test animation.""" 497 | _set_backend('auto') 498 | brain = Brain(*std_args, size=100) 499 | brain.add_morphometry('curv') 500 | tmp_name = str(tmpdir.join('test.avi')) 501 | brain.animate(["m"] * 3, n_steps=2) 502 | brain.animate(['l', 'l'], n_steps=2, fname=tmp_name) 503 | # can't rotate in axial plane 504 | pytest.raises(ValueError, brain.animate, ['l', 'd']) 505 | brain.close() 506 | 507 | 508 | @requires_fsaverage() 509 | def test_views(): 510 | """Test showing different views.""" 511 | _set_backend('test') 512 | brain = Brain(*std_args) 513 | brain.show_view('lateral') 514 | brain.show_view('m') 515 | brain.show_view('rostral') 516 | brain.show_view('caudal') 517 | brain.show_view('ve') 518 | brain.show_view('frontal') 519 | brain.show_view('par') 520 | brain.show_view('dor') 521 | brain.show_view({'distance': 432}) 522 | brain.show_view({'azimuth': 135, 'elevation': 79}, roll=107) 523 | brain.close() 524 | -------------------------------------------------------------------------------- /surfer/utils.py: -------------------------------------------------------------------------------- 1 | try: 2 | from collections.abc import Sequence 3 | except ImportError: # Py 2.7 4 | from collections import Sequence 5 | from distutils.version import LooseVersion 6 | import logging 7 | import warnings 8 | import sys 9 | import os 10 | from os import path as op 11 | import inspect 12 | from functools import wraps 13 | 14 | import mayavi 15 | from mayavi import mlab 16 | from mayavi.filters.api import Threshold 17 | import numpy as np 18 | import nibabel as nib 19 | from scipy import sparse 20 | from scipy.spatial.distance import cdist 21 | import matplotlib as mpl 22 | from matplotlib import cm as mpl_cm 23 | from . import cm as surfer_cm 24 | 25 | logger = logging.getLogger('surfer') 26 | 27 | 28 | # Py3k compat 29 | if sys.version[0] == '2': 30 | string_types = basestring # noqa, analysis:ignore 31 | else: 32 | string_types = str 33 | 34 | 35 | if LooseVersion(mayavi.__version__) == LooseVersion('4.5.0'): 36 | # Monkey-patch Mayavi 4.5: 37 | # In Mayavi 4.5, filters seem to be missing a .point_data attribute that 38 | # Threshold accesses on initialization. 39 | _orig_meth = Threshold._get_data_range 40 | 41 | def _patch_func(): 42 | return [] 43 | 44 | def _patch_meth(self): 45 | return [] 46 | 47 | class _MayaviThresholdPatch(object): 48 | 49 | def __enter__(self): 50 | Threshold._get_data_range = _patch_meth 51 | 52 | def __exit__(self, exc_type, exc_val, exc_tb): 53 | Threshold._get_data_range = _orig_meth 54 | 55 | _mayavi_threshold_patch = _MayaviThresholdPatch() 56 | 57 | def threshold_filter(*args, **kwargs): 58 | with _mayavi_threshold_patch: 59 | thresh = mlab.pipeline.threshold(*args, **kwargs) 60 | thresh._get_data_range = _patch_func 61 | return thresh 62 | else: 63 | threshold_filter = mlab.pipeline.threshold 64 | 65 | 66 | class Surface(object): 67 | """Container for surface object 68 | 69 | Attributes 70 | ---------- 71 | subject_id : string 72 | Name of subject 73 | hemi : {'lh', 'rh'} 74 | Which hemisphere to load 75 | surf : string 76 | Name of the surface to load (eg. inflated, orig ...) 77 | subjects_dir : str | None 78 | If not None, this directory will be used as the subjects directory 79 | instead of the value set using the SUBJECTS_DIR environment variable. 80 | offset : float | None 81 | If float, align inside edge of each hemisphere to center + offset. 82 | If None, do not change coordinates (default). 83 | units : str 84 | Can be 'm' or 'mm' (default). 85 | """ 86 | 87 | def __init__(self, subject_id, hemi, surf, subjects_dir=None, 88 | offset=None, units='mm'): 89 | """Surface 90 | 91 | Parameters 92 | ---------- 93 | subject_id : string 94 | Name of subject 95 | hemi : {'lh', 'rh'} 96 | Which hemisphere to load 97 | surf : string 98 | Name of the surface to load (eg. inflated, orig ...) 99 | offset : float | None 100 | If 0.0, the surface will be offset such that the medial 101 | wall is aligned with the origin. If None, no offset will 102 | be applied. If != 0.0, an additional offset will be used. 103 | """ 104 | if hemi not in ['lh', 'rh']: 105 | raise ValueError('hemi must be "lh" or "rh') 106 | self.subject_id = subject_id 107 | self.hemi = hemi 108 | self.surf = surf 109 | self.offset = offset 110 | self.coords = None 111 | self.faces = None 112 | self.nn = None 113 | self.units = _check_units(units) 114 | 115 | subjects_dir = _get_subjects_dir(subjects_dir) 116 | self.data_path = op.join(subjects_dir, subject_id) 117 | 118 | def load_geometry(self): 119 | surf_path = op.join(self.data_path, "surf", 120 | "%s.%s" % (self.hemi, self.surf)) 121 | coords, faces = nib.freesurfer.read_geometry(surf_path) 122 | if self.units == 'm': 123 | coords /= 1000. 124 | if self.offset is not None: 125 | if self.hemi == 'lh': 126 | coords[:, 0] -= (np.max(coords[:, 0]) + self.offset) 127 | else: 128 | coords[:, 0] -= (np.min(coords[:, 0]) + self.offset) 129 | nn = _compute_normals(coords, faces) 130 | 131 | if self.coords is None: 132 | self.coords = coords 133 | self.faces = faces 134 | self.nn = nn 135 | else: 136 | self.coords[:] = coords 137 | self.faces[:] = faces 138 | self.nn[:] = nn 139 | 140 | @property 141 | def x(self): 142 | return self.coords[:, 0] 143 | 144 | @property 145 | def y(self): 146 | return self.coords[:, 1] 147 | 148 | @property 149 | def z(self): 150 | return self.coords[:, 2] 151 | 152 | def load_curvature(self): 153 | """Load in curvature values from the ?h.curv file.""" 154 | curv_path = op.join(self.data_path, "surf", "%s.curv" % self.hemi) 155 | self.curv = nib.freesurfer.read_morph_data(curv_path) 156 | self.bin_curv = np.array(self.curv > 0, int) 157 | 158 | def load_label(self, name): 159 | """Load in a Freesurfer .label file. 160 | 161 | Label files are just text files indicating the vertices included 162 | in the label. Each Surface instance has a dictionary of labels, keyed 163 | by the name (which is taken from the file name if not given as an 164 | argument. 165 | 166 | """ 167 | label = nib.freesurfer.read_label(op.join(self.data_path, 'label', 168 | '%s.%s.label' % (self.hemi, name))) 169 | label_array = np.zeros(len(self.x), int) 170 | label_array[label] = 1 171 | try: 172 | self.labels[name] = label_array 173 | except AttributeError: 174 | self.labels = {name: label_array} 175 | 176 | def apply_xfm(self, mtx): 177 | """Apply an affine transformation matrix to the x,y,z vectors.""" 178 | self.coords = np.dot(np.c_[self.coords, np.ones(len(self.coords))], 179 | mtx.T)[:, :3] 180 | 181 | 182 | def _fast_cross_3d(x, y): 183 | """Compute cross product between list of 3D vectors 184 | 185 | Much faster than np.cross() when the number of cross products 186 | becomes large (>500). This is because np.cross() methods become 187 | less memory efficient at this stage. 188 | 189 | Parameters 190 | ---------- 191 | x : array 192 | Input array 1. 193 | y : array 194 | Input array 2. 195 | 196 | Returns 197 | ------- 198 | z : array 199 | Cross product of x and y. 200 | 201 | Notes 202 | ----- 203 | x and y must both be 2D row vectors. One must have length 1, or both 204 | lengths must match. 205 | """ 206 | assert x.ndim == 2 207 | assert y.ndim == 2 208 | assert x.shape[1] == 3 209 | assert y.shape[1] == 3 210 | assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0] 211 | if max([x.shape[0], y.shape[0]]) >= 500: 212 | return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1], 213 | x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2], 214 | x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]] 215 | else: 216 | return np.cross(x, y) 217 | 218 | 219 | def _compute_normals(rr, tris): 220 | """Efficiently compute vertex normals for triangulated surface""" 221 | # first, compute triangle normals 222 | r1 = rr[tris[:, 0], :] 223 | r2 = rr[tris[:, 1], :] 224 | r3 = rr[tris[:, 2], :] 225 | tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1)) 226 | 227 | # Triangle normals and areas 228 | size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1)) 229 | zidx = np.where(size == 0)[0] 230 | size[zidx] = 1.0 # prevent ugly divide-by-zero 231 | tri_nn /= size[:, np.newaxis] 232 | 233 | npts = len(rr) 234 | 235 | # the following code replaces this, but is faster (vectorized): 236 | # 237 | # for p, verts in enumerate(tris): 238 | # nn[verts, :] += tri_nn[p, :] 239 | # 240 | nn = np.zeros((npts, 3)) 241 | for verts in tris.T: # note this only loops 3x (number of verts per tri) 242 | for idx in range(3): # x, y, z 243 | nn[:, idx] += np.bincount(verts, tri_nn[:, idx], minlength=npts) 244 | size = np.sqrt(np.sum(nn * nn, axis=1)) 245 | size[size == 0] = 1.0 # prevent ugly divide-by-zero 246 | nn /= size[:, np.newaxis] 247 | return nn 248 | 249 | 250 | ############################################################################### 251 | # LOGGING (courtesy of mne-python) 252 | 253 | def set_log_level(verbose=None, return_old_level=False): 254 | """Convenience function for setting the logging level 255 | 256 | Parameters 257 | ---------- 258 | verbose : bool, str, int, or None 259 | The verbosity of messages to print. If a str, it can be either DEBUG, 260 | INFO, WARNING, ERROR, or CRITICAL. Note that these are for 261 | convenience and are equivalent to passing in logging.DEBUG, etc. 262 | For bool, True is the same as 'INFO', False is the same as 'WARNING'. 263 | If None, the environment variable MNE_LOG_LEVEL is read, and if 264 | it doesn't exist, defaults to INFO. 265 | return_old_level : bool 266 | If True, return the old verbosity level. 267 | """ 268 | if verbose is None: 269 | verbose = "INFO" 270 | elif isinstance(verbose, bool): 271 | if verbose is True: 272 | verbose = 'INFO' 273 | else: 274 | verbose = 'WARNING' 275 | if isinstance(verbose, string_types): 276 | verbose = verbose.upper() 277 | logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO, 278 | WARNING=logging.WARNING, ERROR=logging.ERROR, 279 | CRITICAL=logging.CRITICAL) 280 | if verbose not in logging_types: 281 | raise ValueError('verbose must be of a valid type') 282 | verbose = logging_types[verbose] 283 | old_verbose = logger.level 284 | logger.setLevel(verbose) 285 | return (old_verbose if return_old_level else None) 286 | 287 | 288 | class WrapStdOut(object): 289 | """Ridiculous class to work around how doctest captures stdout""" 290 | def __getattr__(self, name): 291 | # Even more ridiculous than this class, this must be sys.stdout (not 292 | # just stdout) in order for this to work (tested on OSX and Linux) 293 | return getattr(sys.stdout, name) 294 | 295 | 296 | def set_log_file(fname=None, output_format='%(message)s', overwrite=None): 297 | """Convenience function for setting the log to print to a file 298 | 299 | Parameters 300 | ---------- 301 | fname : str, or None 302 | Filename of the log to print to. If None, stdout is used. 303 | To suppress log outputs, use set_log_level('WARN'). 304 | output_format : str 305 | Format of the output messages. See the following for examples: 306 | http://docs.python.org/dev/howto/logging.html 307 | e.g., "%(asctime)s - %(levelname)s - %(message)s". 308 | overwrite : bool, or None 309 | Overwrite the log file (if it exists). Otherwise, statements 310 | will be appended to the log (default). None is the same as False, 311 | but additionally raises a warning to notify the user that log 312 | entries will be appended. 313 | """ 314 | handlers = logger.handlers 315 | for h in handlers: 316 | if isinstance(h, logging.FileHandler): 317 | h.close() 318 | logger.removeHandler(h) 319 | if fname is not None: 320 | if op.isfile(fname) and overwrite is None: 321 | warnings.warn('Log entries will be appended to the file. Use ' 322 | 'overwrite=False to avoid this message in the ' 323 | 'future.') 324 | mode = 'w' if overwrite is True else 'a' 325 | lh = logging.FileHandler(fname, mode=mode) 326 | else: 327 | """ we should just be able to do: 328 | lh = logging.StreamHandler(sys.stdout) 329 | but because doctests uses some magic on stdout, we have to do this: 330 | """ 331 | lh = logging.StreamHandler(WrapStdOut()) 332 | 333 | lh.setFormatter(logging.Formatter(output_format)) 334 | # actually add the stream handler 335 | logger.addHandler(lh) 336 | 337 | 338 | if hasattr(inspect, 'signature'): # py35 339 | def _get_args(function, varargs=False): 340 | params = inspect.signature(function).parameters 341 | args = [key for key, param in params.items() 342 | if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)] 343 | if varargs: 344 | varargs = [param.name for param in params.values() 345 | if param.kind == param.VAR_POSITIONAL] 346 | if len(varargs) == 0: 347 | varargs = None 348 | return args, varargs 349 | else: 350 | return args 351 | else: 352 | def _get_args(function, varargs=False): 353 | out = inspect.getargspec(function) # args, varargs, keywords, defaults 354 | if varargs: 355 | return out[:2] 356 | else: 357 | return out[0] 358 | 359 | 360 | def verbose(function): 361 | """Decorator to allow functions to override default log level 362 | 363 | Do not call this function directly to set the global verbosity level, 364 | instead use set_log_level(). 365 | 366 | Parameters (to decorated function) 367 | ---------------------------------- 368 | verbose : bool, str, int, or None 369 | The level of messages to print. If a str, it can be either DEBUG, 370 | INFO, WARNING, ERROR, or CRITICAL. Note that these are for 371 | convenience and are equivalent to passing in logging.DEBUG, etc. 372 | For bool, True is the same as 'INFO', False is the same as 'WARNING'. 373 | None defaults to using the current log level [e.g., set using 374 | mne.set_log_level()]. 375 | """ 376 | arg_names = _get_args(function) 377 | # this wrap allows decorated functions to be pickled (e.g., for parallel) 378 | 379 | @wraps(function) 380 | def dec(*args, **kwargs): 381 | # Check if the first arg is "self", if it has verbose, make it default 382 | if len(arg_names) > 0 and arg_names[0] == 'self': 383 | default_level = getattr(args[0], 'verbose', None) 384 | else: 385 | default_level = None 386 | verbose_level = kwargs.get('verbose', default_level) 387 | if verbose_level is not None: 388 | old_level = set_log_level(verbose_level, True) 389 | # set it back if we get an exception 390 | try: 391 | ret = function(*args, **kwargs) 392 | except Exception: 393 | set_log_level(old_level) 394 | raise 395 | set_log_level(old_level) 396 | return ret 397 | else: 398 | return function(*args, **kwargs) 399 | 400 | # set __wrapped__ attribute so ?? in IPython gets the right source 401 | dec.__wrapped__ = function 402 | 403 | return dec 404 | 405 | 406 | ############################################################################### 407 | # USEFUL FUNCTIONS 408 | 409 | def _check_units(units): 410 | if units not in ('m', 'mm'): 411 | raise ValueError('Units must be "m" or "mm", got %r' % (units,)) 412 | return units 413 | 414 | 415 | def find_closest_vertices(surface_coords, point_coords): 416 | """Return the vertices on a surface mesh closest to some given coordinates. 417 | 418 | The distance metric used is Euclidian distance. 419 | 420 | Parameters 421 | ---------- 422 | surface_coords : numpy array 423 | Array of coordinates on a surface mesh 424 | point_coords : numpy array 425 | Array of coordinates to map to vertices 426 | 427 | Returns 428 | ------- 429 | closest_vertices : numpy array 430 | Array of mesh vertex ids 431 | 432 | """ 433 | point_coords = np.atleast_2d(point_coords) 434 | return np.argmin(cdist(surface_coords, point_coords), axis=0) 435 | 436 | 437 | def tal_to_mni(coords, units='mm'): 438 | """Convert Talairach coords to MNI using the Lancaster transform. 439 | 440 | Parameters 441 | ---------- 442 | coords : n x 3 numpy array 443 | Array of Talairach coordinates 444 | units : str 445 | Can be 'm' or 'mm' (default). 446 | 447 | Returns 448 | ------- 449 | mni_coords : n x 3 numpy array 450 | Array of coordinates converted to MNI space. 451 | """ 452 | coords = np.atleast_2d(coords) 453 | xfm = np.array([[1.06860, -0.00396, 0.00826, 1.07816], 454 | [0.00640, 1.05741, 0.08566, 1.16824], 455 | [-0.01281, -0.08863, 1.10792, -4.17805], 456 | [0.00000, 0.00000, 0.00000, 1.00000]]) 457 | units = _check_units(units) 458 | if units == 'm': 459 | xfm[:3, 3] /= 1000. 460 | mni_coords = np.dot(np.c_[coords, np.ones(coords.shape[0])], xfm.T)[:, :3] 461 | return mni_coords 462 | 463 | 464 | def mesh_edges(faces): 465 | """Returns sparse matrix with edges as an adjacency matrix 466 | 467 | Parameters 468 | ---------- 469 | faces : array of shape [n_triangles x 3] 470 | The mesh faces 471 | 472 | Returns 473 | ------- 474 | edges : sparse matrix 475 | The adjacency matrix 476 | """ 477 | npoints = np.max(faces) + 1 478 | nfaces = len(faces) 479 | a, b, c = faces.T 480 | edges = sparse.coo_matrix((np.ones(nfaces), (a, b)), 481 | shape=(npoints, npoints)) 482 | edges = edges + sparse.coo_matrix((np.ones(nfaces), (b, c)), 483 | shape=(npoints, npoints)) 484 | edges = edges + sparse.coo_matrix((np.ones(nfaces), (c, a)), 485 | shape=(npoints, npoints)) 486 | edges = edges + edges.T 487 | edges = edges.tocoo() 488 | return edges 489 | 490 | 491 | def create_color_lut(cmap, n_colors=256, center=None): 492 | """Return a colormap suitable for setting as a Mayavi LUT. 493 | 494 | Parameters 495 | ---------- 496 | cmap : string, list of colors, n x 3 or n x 4 array 497 | Input colormap definition. This can be the name of a matplotlib 498 | colormap, a list of valid matplotlib colors, or a suitable 499 | mayavi LUT (possibly missing the alpha channel). 500 | 501 | if value is "auto", a default sequential or divergent colormap is 502 | returned 503 | n_colors : int, optional 504 | Number of colors in the resulting LUT. This is ignored if cmap 505 | is a 2d array. 506 | center : double, optional 507 | indicates whether desired colormap should be for divergent values, 508 | currently only used to select default colormap for cmap='auto' 509 | 510 | Returns 511 | ------- 512 | lut : n_colors x 4 integer array 513 | Color LUT suitable for passing to mayavi 514 | """ 515 | if isinstance(cmap, np.ndarray): 516 | if np.ndim(cmap) == 2: 517 | if cmap.shape[1] == 4: 518 | # This looks likes a LUT that's ready to go 519 | lut = cmap.astype(int) 520 | elif cmap.shape[1] == 3: 521 | # This looks like a LUT, but it's missing the alpha channel 522 | alpha = np.ones(len(cmap), int) * 255 523 | lut = np.c_[cmap, alpha] 524 | 525 | return lut 526 | 527 | # choose default colormaps (REMEMBER to change doc, e.g., in 528 | # Brain.add_data, when changing these defaults) 529 | if isinstance(cmap, string_types) and cmap == "auto": 530 | if center is None: 531 | cmap = "rocket" 532 | else: 533 | cmap = "icefire" 534 | 535 | surfer_cmaps = ["rocket", "mako", "icefire", "vlag"] 536 | surfer_cmaps += [name + "_r" for name in surfer_cmaps] 537 | 538 | if not isinstance(cmap, string_types) and isinstance(cmap, Sequence): 539 | colors = list(map(mpl.colors.colorConverter.to_rgba, cmap)) 540 | cmap = mpl.colors.ListedColormap(colors) 541 | elif cmap in surfer_cmaps: 542 | cmap = getattr(surfer_cm, cmap) 543 | else: 544 | try: 545 | # Try to get a named matplotlib colormap 546 | # This will also pass Colormap object back out 547 | cmap = mpl_cm.get_cmap(cmap) 548 | except (TypeError, ValueError): 549 | # If we get here, it's a bad input 550 | # but don't raise the matplotlib error as it is less accurate 551 | raise ValueError("Input %r was not valid for making a lut" % cmap) 552 | 553 | # Convert from a matplotlib colormap to a lut array 554 | lut = (cmap(np.linspace(0, 1, n_colors)) * 255).astype(int) 555 | 556 | return lut 557 | 558 | 559 | @verbose 560 | def smoothing_matrix(vertices, adj_mat, smoothing_steps=20, verbose=None): 561 | """Create a smoothing matrix which can be used to interpolate data defined 562 | for a subset of vertices onto mesh with an adjancency matrix given by 563 | adj_mat. 564 | 565 | If smoothing_steps is None, as many smoothing steps are applied until 566 | the whole mesh is filled with with non-zeros. Only use this option if 567 | the vertices correspond to a subsampled version of the mesh. 568 | 569 | Parameters 570 | ---------- 571 | vertices : 1d array 572 | vertex indices 573 | adj_mat : sparse matrix 574 | N x N adjacency matrix of the full mesh 575 | smoothing_steps : int or None 576 | number of smoothing steps (Default: 20) 577 | verbose : bool, str, int, or None 578 | If not None, override default verbose level (see surfer.verbose). 579 | 580 | Returns 581 | ------- 582 | smooth_mat : sparse matrix 583 | smoothing matrix with size N x len(vertices) 584 | """ 585 | if smoothing_steps == 'nearest': 586 | mat = _nearest(vertices, adj_mat) 587 | else: 588 | mat = _smooth(vertices, adj_mat, smoothing_steps) 589 | return mat 590 | 591 | 592 | def _nearest(vertices, adj_mat): 593 | import scipy 594 | from scipy.sparse.csgraph import dijkstra 595 | if LooseVersion(scipy.__version__) < LooseVersion('1.3'): 596 | raise RuntimeError('smoothing_steps="nearest" requires SciPy >= 1.3') 597 | # Vertices can be out of order, so sort them to start ... 598 | order = np.argsort(vertices) 599 | vertices = vertices[order] 600 | _, _, sources = dijkstra(adj_mat, False, indices=vertices, min_only=True, 601 | return_predecessors=True) 602 | col = np.searchsorted(vertices, sources) 603 | # ... then get things back to the correct configuration. 604 | col = order[col] 605 | row = np.arange(len(col)) 606 | data = np.ones(len(col)) 607 | mat = sparse.coo_matrix((data, (row, col))) 608 | assert mat.shape == (adj_mat.shape[0], len(vertices)), mat.shape 609 | return mat 610 | 611 | 612 | def _smooth(vertices, adj_mat, smoothing_steps): 613 | from scipy import sparse 614 | logger.debug("Updating smoothing matrix, be patient..") 615 | e = adj_mat.copy() 616 | e.data[e.data == 2] = 1 617 | n_vertices = e.shape[0] 618 | e = e + sparse.eye(n_vertices, n_vertices) 619 | idx_use = vertices 620 | smooth_mat = 1.0 621 | n_iter = smoothing_steps if smoothing_steps is not None else 1000 622 | for k in range(n_iter): 623 | e_use = e[:, idx_use] 624 | 625 | data1 = e_use * np.ones(len(idx_use)) 626 | idx_use = np.where(data1)[0] 627 | scale_mat = sparse.dia_matrix((1 / data1[idx_use], 0), 628 | shape=(len(idx_use), len(idx_use))) 629 | 630 | smooth_mat = scale_mat * e_use[idx_use, :] * smooth_mat 631 | 632 | logger.debug("Smoothing matrix creation, step %d" % (k + 1)) 633 | if smoothing_steps is None and len(idx_use) >= n_vertices: 634 | break 635 | 636 | # Make sure the smoothing matrix has the right number of rows 637 | # and is in COO format 638 | smooth_mat = smooth_mat.tocoo() 639 | smooth_mat = sparse.coo_matrix((smooth_mat.data, 640 | (idx_use[smooth_mat.row], 641 | smooth_mat.col)), 642 | shape=(n_vertices, 643 | len(vertices))) 644 | 645 | return smooth_mat 646 | 647 | 648 | @verbose 649 | def coord_to_label(subject_id, coord, label, hemi='lh', n_steps=30, 650 | map_surface='white', coord_as_vert=False, units='mm', 651 | verbose=None): 652 | """Create label from MNI coordinate 653 | 654 | Parameters 655 | ---------- 656 | subject_id : string 657 | Use if file is in register with subject's orig.mgz 658 | coord : numpy array of size 3 | int 659 | One coordinate in MNI space or the vertex index. 660 | label : str 661 | Label name 662 | hemi : [lh, rh] 663 | Hemisphere target 664 | n_steps : int 665 | Number of dilation iterations 666 | map_surface : str 667 | The surface name used to find the closest point 668 | coord_as_vert : bool 669 | whether the coords parameter should be interpreted as vertex ids 670 | units : str 671 | Can be 'm' or 'mm' (default). 672 | verbose : bool, str, int, or None 673 | If not None, override default verbose level (see surfer.verbose). 674 | """ 675 | geo = Surface(subject_id, hemi, map_surface, units=units) 676 | geo.load_geometry() 677 | 678 | coords = geo.coords 679 | # work in mm from here on 680 | if geo.units == 'm': 681 | coords = coords * 1000 682 | if coord_as_vert: 683 | coord = coords[coord] 684 | 685 | n_vertices = len(coords) 686 | adj_mat = mesh_edges(geo.faces) 687 | foci_vtxs = find_closest_vertices(coords, [coord]) 688 | data = np.zeros(n_vertices) 689 | data[foci_vtxs] = 1. 690 | smooth_mat = smoothing_matrix(np.arange(n_vertices), adj_mat, 1) 691 | for _ in range(n_steps): 692 | data = smooth_mat * data 693 | idx = np.where(data.ravel() > 0)[0] 694 | # Write label 695 | label_fname = label + '-' + hemi + '.label' 696 | logger.debug("Saving label : %s" % label_fname) 697 | f = open(label_fname, 'w') 698 | f.write('#label at %s from subject %s\n' % (coord, subject_id)) 699 | f.write('%d\n' % len(idx)) 700 | for i in idx: 701 | x, y, z = coords[i] 702 | f.write('%d %f %f %f 0.000000\n' % (i, x, y, z)) 703 | 704 | 705 | def _get_subjects_dir(subjects_dir=None, raise_error=True): 706 | """Get the subjects directory from parameter or environment variable 707 | 708 | Parameters 709 | ---------- 710 | subjects_dir : str | None 711 | The subjects directory. 712 | raise_error : bool 713 | If True, raise a ValueError if no value for SUBJECTS_DIR can be found 714 | or the corresponding directory does not exist. 715 | 716 | Returns 717 | ------- 718 | subjects_dir : str 719 | The subjects directory. If the subjects_dir input parameter is not 720 | None, its value will be returned, otherwise it will be obtained from 721 | the SUBJECTS_DIR environment variable. 722 | """ 723 | if subjects_dir is None: 724 | subjects_dir = os.environ.get("SUBJECTS_DIR", "") 725 | if not subjects_dir and raise_error: 726 | raise ValueError('The subjects directory has to be specified ' 727 | 'using the subjects_dir parameter or the ' 728 | 'SUBJECTS_DIR environment variable.') 729 | 730 | if raise_error and not os.path.exists(subjects_dir): 731 | raise ValueError('The subjects directory %s does not exist.' 732 | % subjects_dir) 733 | 734 | return subjects_dir 735 | 736 | 737 | def has_fsaverage(subjects_dir=None, raise_error=True, return_why=False): 738 | """Determine whether the user has a usable fsaverage""" 739 | subjects_dir = _get_subjects_dir(subjects_dir, raise_error=raise_error) 740 | out = '' 741 | if not op.isdir(subjects_dir): 742 | out = 'SUBJECTS_DIR not found: %s' % (subjects_dir,) 743 | else: 744 | fs_dir = op.join(_get_subjects_dir(subjects_dir, False), 'fsaverage') 745 | surf_dir = op.join(fs_dir, 'surf') 746 | if not op.isdir(fs_dir): 747 | out = 'fsaverage not found in SUBJECTS_DIR: %s' % (fs_dir,) 748 | elif not op.isdir(surf_dir): 749 | out = 'fsaverage has no "surf" directory: %s' % (surf_dir,) 750 | out = (out == '', out) if return_why else (out == '') 751 | return out 752 | 753 | 754 | def requires_fsaverage(): 755 | import pytest 756 | has, why = has_fsaverage(raise_error=False, return_why=True) 757 | return pytest.mark.skipif( 758 | not has, reason='Requires fsaverage subject data (%s)' % why) 759 | 760 | 761 | def requires_imageio(): 762 | import pytest 763 | try: 764 | from imageio.plugins.ffmpeg import get_exe # noqa, analysis:ignore 765 | except ImportError: 766 | has = False 767 | else: 768 | has = True 769 | return pytest.mark.skipif(not has, reason="Requires imageio with ffmpeg") 770 | 771 | 772 | def requires_fs(): 773 | import pytest 774 | has = ('FREESURFER_HOME' in os.environ) 775 | return pytest.mark.skipif( 776 | not has, reason='Requires FreeSurfer command line tools') 777 | 778 | 779 | def _get_extra(): 780 | # Get extra label for newer freesurfer 781 | subj_dir = _get_subjects_dir() 782 | fname = op.join(subj_dir, 'fsaverage', 'label', 'lh.BA1.label') 783 | return '_exvivo' if not op.isfile(fname) else '', subj_dir 784 | --------------------------------------------------------------------------------