├── .circleci └── config.yml ├── .gitignore ├── .travis.yml ├── Makefile ├── README.rst ├── continuous_integration ├── clean_output.sh ├── install.sh ├── install_spm.sh ├── install_spm12.sh └── setup_spm.sh ├── examples ├── README.md ├── easy_start │ ├── nipype_preproc_spm_auditory.py │ ├── nipype_preproc_spm_haxby.py │ ├── segmentation_demos.py │ └── spm_auditory_preproc.ini ├── pipelining │ ├── multimodal_faces_preproc.ini │ ├── nipype_preproc_spm_multimodal_faces.py │ ├── nistats_glm_fsl_feeds_fmri.py │ └── openfmri_preproc.py └── pure_python │ ├── coreg_demos.py │ ├── pure_python_preproc_demo.py │ ├── realign_demos.py │ └── slice_timing_demos.py ├── pypreprocess.py ├── pypreprocess ├── __init__.py ├── affine_transformations.py ├── cluster_level_analysis.py ├── conf_parser.py ├── configure_spm.py ├── coreg.py ├── datasets.py ├── external │ ├── __init__.py │ ├── nistats │ │ ├── __init__.py │ │ ├── design_matrix.py │ │ ├── experimental_paradigm.py │ │ ├── glm.py │ │ ├── hemodynamic_models.py │ │ ├── model.py │ │ ├── regression.py │ │ └── utils.py │ └── tempita │ │ ├── __init__.py │ │ ├── _looper.py │ │ ├── _tempita.py │ │ └── compat3.py ├── fsl_to_nistats.py ├── histograms.py ├── io_utils.py ├── kernel_smooth.py ├── nipype_preproc_fsl_utils.py ├── nipype_preproc_spm_utils.py ├── openfmri.py ├── purepython_preproc_utils.py ├── realign.py ├── reporting │ ├── README.md │ ├── __init__.py │ ├── check_preprocessing.py │ ├── pypreproc_reporter.py │ └── template_reports │ │ ├── log_link_template.html │ │ ├── log_sub_template.html │ │ ├── log_template.html │ │ ├── report_sub_template.html │ │ └── report_template.html ├── reslice.py ├── slice_timing.py ├── spm_loader │ ├── __init__.py │ ├── spm.py │ └── utils.py ├── subject_data.py ├── tests │ ├── __init__.py │ ├── _test_utils.py │ ├── test_affine_transformations.py │ ├── test_cluster_level_analysis.py │ ├── test_conf_parser.py │ ├── test_configure_spm.py │ ├── test_coreg.py │ ├── test_data │ │ ├── some_anat.mat │ │ ├── spm_hist2_args_1.mat │ │ ├── spm_hist2_args_2.mat │ │ └── spmmmfmri.mat │ ├── test_histograms.py │ ├── test_io_utils.py │ ├── test_kernel_smooth.py │ ├── test_realign.py │ ├── test_reslice.py │ ├── test_slice_timing.py │ ├── test_subject_data.py │ ├── test_tsdiffana.py │ └── test_version.py ├── time_diff.py └── version.py ├── scripts ├── HCP.ini ├── HCP_tfMRI_MOTOR_preproc.ini ├── abide_preproc.py └── hcp_preproc_and_analysis.py ├── setup.cfg ├── setup.py └── spike ├── glm_utils.py └── sprint.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | jobs: 4 | build: 5 | docker: 6 | - image: circleci/buildpack-deps:16.04 7 | parallelism: 1 8 | environment: 9 | DISTRIB: "conda" 10 | PYTHON_VERSION: "*" 11 | NUMPY_VERSION: "*" 12 | SCIPY_VERSION: "*" 13 | SCIKIT_LEARN_VERSION: "*" 14 | MATPLOTLIB_VERSION: "*" 15 | NIPYPE_VERSION: "*" 16 | 17 | steps: 18 | - checkout 19 | # Get rid of existing virtualenvs on circle ci as they conflict with conda. 20 | # Trick found here: 21 | # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 22 | - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs 23 | # We need to remove conflicting texlive packages. 24 | - run: sudo -E apt-get -yq remove texlive-binaries --purge 25 | - restore_cache: 26 | key: spm12+datasets+pypi-{{ .Branch }} 27 | 28 | - run: 29 | name: Download & install conda if absent 30 | command: | 31 | if 32 | ls $HOME/miniconda3/bin | grep conda -q 33 | then 34 | echo "(Mini)Conda already present from the cache." 35 | else 36 | wget https://repo.continuum.io/miniconda/Miniconda3-4.6.14-Linux-x86_64.sh -O ~/miniconda.sh 37 | chmod +x ~/miniconda.sh && ~/miniconda.sh -b 38 | fi 39 | - run: 40 | name: Setup conda path in env variables 41 | command: | 42 | echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV 43 | - run: 44 | name: Create new conda env 45 | command: | 46 | if 47 | conda env list | grep testenv 48 | then 49 | echo "Conda env testenv already exists courtesy of the cache." 50 | else 51 | conda create -n testenv -yq 52 | fi 53 | 54 | - run: 55 | name: Installing SPM & exporting its paths + other path 56 | command: | 57 | source activate testenv 58 | sudo apt-get update 59 | sudo apt-get install bc 60 | source continuous_integration/setup_spm.sh 61 | echo 'export SPM_ROOT_DIR="$HOME/opt/spm12"' >> $BASH_ENV 62 | echo 'export SPM_DIR="$SPM_ROOT_DIR/spm12/"' >> $BASH_ENV 63 | echo 'export SPM_MCR="$SPM_ROOT_DIR/spm12.sh"' >> $BASH_ENV 64 | echo 'export PATH="/home/circleci/.local/bin:$PATH"' >> $BASH_ENV 65 | pip install --upgrade pip 66 | pip install scipy sklearn nibabel nilearn configobj coverage pytest -q 67 | pip install matplotlib pandas nipype --ignore-installed -q 68 | python setup.py install --user 69 | python -c "from pypreprocess import datasets; datasets.fetch_spm_auditory(); datasets.fetch_spm_multimodal_fmri(); datasets.fetch_fsl_feeds()" 70 | sudo chown -R $USER:$USER /home/circleci/ 71 | sudo chmod 755 /home/circleci/opt/spm12/spm12/spm12 72 | sudo chmod 777 /home/circleci/.matlab/mcr_v97/ 73 | python examples/easy_start/nipype_preproc_spm_auditory.py 74 | python examples/pipelining/nistats_glm_fsl_feeds_fmri.py 75 | python examples/pipelining/nipype_preproc_spm_multimodal_faces.py 76 | sh continuous_integration/clean_output.sh 77 | - save_cache: 78 | key: spm12+datasets+pypi-{{ .Branch }} 79 | paths: 80 | - $SPM_ROOT_DIR 81 | - $HOME/nilearn_data 82 | - $HOME/.local/bin 83 | 84 | - store_artifacts: 85 | path: $HOME/nilearn_data/spm_auditory/pypreprocess_output/ 86 | - store_artifacts: 87 | path: $HOME/nilearn_data/spm_multimodal_fmri/pypreprocess_output/ 88 | - store_artifacts: 89 | path: $HOME/nilearn_data/fsl_feeds/pypreprocess_output/ 90 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #* 2 | *~ 3 | *.pyc 4 | *.nii* 5 | *.img 6 | *.hdr 7 | *.exe 8 | *.exe 9 | *.o 10 | *.so 11 | *.a 12 | *.swp 13 | *.log 14 | *.m 15 | build/ 16 | examples/pure_python/demos_cache/ 17 | .idea/ 18 | .idea/workspace.xml 19 | .coverage 20 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: xenial 3 | 4 | language: python 5 | 6 | matrix: 7 | # Do not wait for the allowed_failures entry to finish before 8 | # setting the status 9 | fast_finish: true 10 | 11 | allow_failures: 12 | # allow_failures keyed to python 3.5 & skipping tests. 13 | - python: "3.5" 14 | env: DISTRIB="travisci" PYTHON_VERSION="3.5" FLAKE8_VERSION="*" SKIP_TESTS="true" 15 | include: 16 | - name: "Python 3.5 latest package versions" 17 | python: "3.5" 18 | env: DISTRIB="travisci" PYTHON_VERSION="3.5" 19 | NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" 20 | SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" 21 | JOBLIB_VERSION="*" NIPYPE_VERSION="*" NILEARN_VERSION="*" 22 | NETWORKX_VERSION="*" CONFIGOBJ_VERSION="*" PYTEST_VERSION="4.1.0" 23 | 24 | - name: "Python 3.6 latest package versions" 25 | python: "3.6" 26 | env: DISTRIB="travisci" PYTHON_VERSION="3.6" 27 | NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" 28 | SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" 29 | JOBLIB_VERSION="0.12" NIPYPE_VERSION="*" PYTEST_VERSION="4.1.0" 30 | NILEARN_VERSION="*" NETWORKX_VERSION="*" CONFIGOBJ_VERSION="*" 31 | # joblib.Memory switches from keyword cachedir to location in version 0.12 32 | # Making sure we get the deprecation warning. 33 | 34 | - name: "Python 3.7 latest package versions" 35 | python: "3.7" 36 | env: DISTRIB="travisci" PYTHON_VERSION="3.7" 37 | NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" 38 | SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" 39 | JOBLIB_VERSION="*" NIPYPE_VERSION="*" PYTEST_VERSION="4.3.0" 40 | NILEARN_VERSION="*" NETWORKX_VERSION="*" CONFIGOBJ_VERSION="*" 41 | 42 | - name: "Python 3.7 pre-release checks" 43 | python: "3.7" 44 | env: DISTRIB="travisci" PIP_FLAGS="--pre" COVERAGE="true" 45 | NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" 46 | SCIKIT_LEARN_VERSION="*" JOBLIB_VERSION="*" 47 | NIPYPE_VERSION="*" NILEARN_VERSION="*" NETWORKX_VERSION="*" 48 | MATPLOTLIB_VERSION="*" CONFIGOBJ_VERSION="*" PYTEST_VERSION="4.3.0" 49 | 50 | install: 51 | - pip install --upgrade pytest 52 | - pip install --upgrade numpy 53 | - source continuous_integration/install.sh 54 | 55 | script: 56 | - make clean 57 | - make test-code 58 | 59 | after_success: 60 | - | 61 | if [[ "$COVERAGE" == "true" ]]; then 62 | coveralls || echo "coverage upload failed" 63 | fi 64 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # simple makefile to simplify repetetive build env management tasks under posix 2 | 3 | # caution: testing won't work on windows, see README 4 | 5 | PYTHON ?= python 6 | CYTHON ?= cython 7 | CTAGS ?= ctags 8 | 9 | all: clean test 10 | 11 | clean-pyc: 12 | find . -name "*.pyc" | xargs rm -f 13 | 14 | clean-so: 15 | find . -name "*.so" | xargs rm -f 16 | find . -name "*.pyd" | xargs rm -f 17 | 18 | clean-build: 19 | rm -rf build 20 | 21 | clean-ctags: 22 | rm -f tags 23 | 24 | clean: clean-build clean-pyc clean-so clean-ctags 25 | 26 | in: inplace # just a shortcut 27 | inplace: 28 | $(PYTHON) setup.py build_ext -i 29 | 30 | test-code: 31 | python -m pytest --pyargs pypreprocess 32 | 33 | test-coverage: 34 | rm -rf coverage .coverage 35 | pytest --pyargs pypreprocess --showlocals 36 | 37 | test: test-code 38 | 39 | trailing-spaces: 40 | find . -name "*.py" | xargs perl -pi -e 's/[ \t]*$$//' 41 | 42 | cython: 43 | find -name "*.pyx" | xargs $(CYTHON) 44 | 45 | ctags: 46 | # make tags for symbol based navigation in emacs and vim 47 | # Install with: sudo apt-get install exuberant-ctags 48 | $(CTAGS) -R * 49 | -------------------------------------------------------------------------------- /continuous_integration/clean_output.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # A script to delete cache, temporary and heavy files 5 | # for a given pypreprocess output 6 | 7 | 8 | remove_files(){ 9 | exts="*.nii *.nii.gz *.img *.hdr *.txt" 10 | for ext in $exts 11 | do 12 | echo $ext $1 13 | find $1 -name $ext -exec rm -rf {} \; 14 | done 15 | } 16 | 17 | remove_dirs(){ 18 | dirs="variance_maps effects_maps t_maps z_maps tmp Session1 Session2 QA cache_dir" 19 | for dir in $dirs 20 | do 21 | p="$(find $1 -name $dir -type d)" 22 | if [ ! -z "$p" ] 23 | then 24 | rm -rf $p 25 | echo $p" deleted" 26 | fi 27 | done 28 | find $1 -type d -empty -delete 29 | } 30 | 31 | # Main 32 | ## Directories to clean 33 | paths="/home/ubuntu/nilearn_data/spm_auditory/pypreprocess_output/" 34 | paths="$paths /home/ubuntu/nilearn_data/spm_multimodal_fmri/pypreprocess_output/" 35 | paths="$paths /home/ubuntu/nilearn_data/fsl_feeds/pypreprocess_output/" 36 | 37 | ## Main loop 38 | for path in $paths 39 | do 40 | if [ -d "$path" ] 41 | then 42 | echo "Cleaning "$path 43 | remove_files $path 44 | remove_dirs $path 45 | else 46 | echo $path" not found" 47 | fi 48 | done 49 | 50 | -------------------------------------------------------------------------------- /continuous_integration/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is meant to be called by the "install" step defined in 3 | # .travis.yml. See http://docs.travis-ci.com/ for more details. 4 | # The behavior of the script is controlled by environment variabled defined 5 | # in the .travis.yml in the top level folder of the project. 6 | # 7 | # This script is adapted from a similar script from the scikit-learn repository. 8 | # 9 | # License: 3-clause BSD 10 | 11 | set -e 12 | 13 | # Fix the compilers to workaround avoid having the Python 3.4 build 14 | # lookup for g++44 unexpectedly. 15 | export CC=gcc 16 | export CXX=g++ 17 | 18 | create_new_venv() { 19 | # At the time of writing numpy 1.9.1 is included in the travis 20 | # virtualenv but we want to be in control of the numpy version 21 | # we are using for example through apt-get install 22 | deactivate 23 | virtualenv --system-site-packages testvenv 24 | source testvenv/bin/activate 25 | pip install --upgrade pytest 26 | } 27 | 28 | echo_requirements_string() { 29 | # Echo a requirement string for example 30 | # "pip nose python='2.7.3 scikit-learn=*". It has a hardcoded 31 | # list of possible packages to install and looks at _VERSION 32 | # environment variables to know whether to install a given package and 33 | # if yes which version to install. For example: 34 | # - for numpy, NUMPY_VERSION is used 35 | # - for scikit-learn, SCIKIT_LEARN_VERSION is used 36 | TO_INSTALL_ALWAYS="pip pytest" 37 | REQUIREMENTS="$TO_INSTALL_ALWAYS" 38 | TO_INSTALL_MAYBE="numpy scipy matplotlib scikit-learn nilearn nipype pandas flake8 joblib networkx configobj" 39 | for PACKAGE in $TO_INSTALL_MAYBE; do 40 | # Capitalize package name and add _VERSION 41 | PACKAGE_VERSION_VARNAME="${PACKAGE^^}_VERSION" 42 | # replace - by _, needed for scikit-learn for example 43 | PACKAGE_VERSION_VARNAME="${PACKAGE_VERSION_VARNAME//-/_}" 44 | # dereference $PACKAGE_VERSION_VARNAME to figure out the 45 | # version to install 46 | PACKAGE_VERSION="${!PACKAGE_VERSION_VARNAME}" 47 | if [[ -n "$PACKAGE_VERSION" ]]; then 48 | if [[ "$PACKAGE_VERSION" == "*" ]]; then 49 | REQUIREMENTS="$REQUIREMENTS $PACKAGE" 50 | else 51 | REQUIREMENTS="$REQUIREMENTS $PACKAGE==$PACKAGE_VERSION" 52 | fi 53 | fi 54 | 55 | done 56 | echo $REQUIREMENTS 57 | } 58 | 59 | 60 | create_new_travisci_env() { 61 | REQUIREMENTS=$(echo_requirements_string) 62 | pip install $PIP_FLAGS ${REQUIREMENTS} 63 | pip install pytest pytest-cov 64 | 65 | if [[ "$INSTALL_MKL" == "true" ]]; then 66 | # Make sure that MKL is used 67 | pip install mkl 68 | fi 69 | } 70 | 71 | if [[ "$DISTRIB" == "neurodebian" ]]; then 72 | create_new_venv 73 | pip install nose-timer pytest 74 | bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh) 75 | sudo apt-get install -qq python-scipy python-nose python-pytest python-nibabel python-sklearn python-joblib 76 | 77 | elif [[ "$DISTRIB" == "travisci" ]]; then 78 | create_new_travisci_env 79 | # Note: nibabel is in setup.py install_requires so nibabel will 80 | # always be installed eventually. Defining NIBABEL_VERSION is only 81 | # useful if you happen to want a specific nibabel version rather 82 | # than the latest available one. 83 | if [[ -n "$NIBABEL_VERSION" ]]; then 84 | pip install nibabel=="$NIBABEL_VERSION" 85 | fi 86 | 87 | else 88 | echo "Unrecognized distribution ($DISTRIB); cannot setup CI environment." 89 | exit 1 90 | fi 91 | 92 | pip install psutil memory_profiler 93 | 94 | if [[ "$COVERAGE" == "true" ]]; then 95 | pip install codecov 96 | fi 97 | 98 | # numpy not installed when skipping the tests so we do not want to run 99 | # setup.py install 100 | if [[ "$SKIP_TESTS" != "true" ]]; then 101 | python setup.py install 102 | fi 103 | -------------------------------------------------------------------------------- /continuous_integration/install_spm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | SPM_ROOT_DIR=~/opt/spm8 5 | 6 | if [ -d "$SPM_ROOT_DIR" ] 7 | then 8 | echo "spm already installed" 9 | else 10 | echo "Creating directory : " $SPM_ROOT_DIR 11 | mkdir -p $SPM_ROOT_DIR && cd $SPM_ROOT_DIR 12 | echo "Downloading spm8 : " 13 | wget http://www.fil.ion.ucl.ac.uk/spm/download/restricted/utopia/spm8/spm8_r5236.zip 14 | echo "Unzipping : " 15 | unzip -q spm8_r5236.zip 16 | echo "Chmoding : " 17 | chmod 755 spm8/run_spm8.sh 18 | echo "Downloading MCR : " 19 | wget http://www.fil.ion.ucl.ac.uk/spm/download/restricted/utopia/MCR/glnxa64/MCRInstaller.bin 20 | echo "Chmoding : " 21 | chmod 755 MCRInstaller.bin 22 | echo "Installing MCR : " 23 | ./MCRInstaller.bin -P bean421.installLocation="mcr" -silent 24 | fi 25 | 26 | echo "Writing spm.sh : " 27 | cat < $SPM_ROOT_DIR/spm8.sh 28 | #!/bin/bash 29 | SPM8_STANDALONE_HOME=$SPM_ROOT_DIR/spm8 30 | exec "\${SPM8_STANDALONE_HOME}/run_spm8.sh" "\${SPM8_STANDALONE_HOME}/../mcr/v713" \${1+"\$@"} 31 | EOF 32 | echo "Chmoding : " 33 | chmod 755 $SPM_ROOT_DIR/spm8.sh 34 | echo "Quiting : " 35 | # Create CTF 36 | $SPM_ROOT_DIR/spm8.sh quit 37 | echo "Export SPM_DIR and SPM_MCR by running the following commands:" 38 | echo 39 | cmds="export SPM_DIR=$SPM_ROOT_DIR/spm8/spm8_mcr/spm8; export SPM_MCR=$SPM_ROOT_DIR/spm8.sh" 40 | echo ${cmds} 41 | echo 42 | echo "N.B.: You may want add the above commands (the exports) to your ~/.bashrc file once and for all." 43 | ${cmds} 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /continuous_integration/install_spm12.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # Time-stamp: <2020-03-13 11:26:59 cp983411> 3 | 4 | # Download and install SPM12 standalone (no Matlab license required) 5 | 6 | set -e 7 | # set -x # echo on for debugging 8 | 9 | # Installation directory can be specified as first argument on the command line 10 | # Warning: use a fully qualitifed path (from root) to correctly set up env variables 11 | 12 | if [ $# -eq 0 ] 13 | then 14 | SPM_ROOT_DIR=$HOME/opt/spm12 # default 15 | else 16 | SPM_ROOT_DIR=$1 17 | fi 18 | 19 | mkdir -p $SPM_ROOT_DIR 20 | mkdir -p $SPM_ROOT_DIR/mcr 21 | 22 | # Download 23 | SPM_SRC=spm12_r7771_Linux_R2019b.zip 24 | MCRINST=MATLAB*.zip 25 | 26 | wget -N -r -l1 --no-parent -nd -P $SPM_ROOT_DIR https://www.fil.ion.ucl.ac.uk/spm/download/restricted/utopia/dev/$SPM_SRC --no-check-certificate 27 | wget -N -r -l1 --no-parent -nd -P $SPM_ROOT_DIR/mcr https://ssd.mathworks.com/supportfiles/downloads/R2019b/Release/4/deployment_files/installer/complete/glnxa64/MATLAB_Runtime_R2019b_Update_4_glnxa64.zip --no-check-certificate 28 | 29 | # Install SPM 30 | cd $SPM_ROOT_DIR 31 | unzip -q -u ${SPM_SRC} 32 | chmod 755 spm12/run_spm12.sh 33 | 34 | # Install Matlab runtime compiler 35 | cd $SPM_ROOT_DIR/mcr 36 | unzip ${MCRINST} 37 | ./install -mode silent -agreeToLicense yes -destinationFolder $SPM_ROOT_DIR/mcr -outputFile $SPM_ROOT_DIR/mcr 38 | 39 | # create start-up script 40 | cd $SPM_ROOT_DIR 41 | cat < spm12.sh 42 | #!/bin/bash 43 | SPM12_STANDALONE_HOME=$SPM_ROOT_DIR/spm12 44 | exec "\${SPM12_STANDALONE_HOME}/run_spm12.sh" "\${SPM12_STANDALONE_HOME}/../mcr/v97" \${1+"\$@"} 45 | EOF 46 | 47 | chmod 755 spm12.sh 48 | 49 | if [ ! -f /usr/lib/x86_64-linux-gnu/libXp.so.6 ]; then 50 | echo "WARNING!!!" 51 | echo "/usr/lib/x86_64-linux-gnu/libXp.so.6 is missing" 52 | echo 53 | echo To install it: 54 | echo 'sudo add-apt-repository "deb http://security.ubuntu.com/ubuntu precise-security main"' 55 | echo 'sudo apt update' 56 | echo 'sudo apt install lixp6' 57 | echo 'sudo add-apt-repository -r "deb http://security.ubuntu.com/ubuntu precise-security main"' 58 | fi 59 | 60 | # Create CTF 61 | ${SPM_ROOT_DIR}/spm12.sh quit 62 | cmds="export SPM_DIR=$SPM_ROOT_DIR/spm12/; export SPM_MCR=$SPM_ROOT_DIR/spm12.sh" 63 | ${cmds} 64 | echo 65 | echo ${cmds} 66 | echo "IMPORTANT: you should now execute the following line: " 67 | echo "echo ${cmds} >> $HOME/.bashrc" 68 | -------------------------------------------------------------------------------- /continuous_integration/setup_spm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | SPM_INSTALL_SCRIPT=continuous_integration/install_spm12.sh 5 | echo "" 6 | echo "SPM_INSTALL_SCRIPT: $SPM_INSTALL_SCRIPT" 7 | sudo bash $SPM_INSTALL_SCRIPT 8 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | This directory contains example scripts. 2 | -------------------------------------------------------------------------------- /examples/easy_start/nipype_preproc_spm_auditory.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: dohmatob elvis dopgima elvis[dot]dohmatob[at]inria[dot]fr 3 | Synopsis: Minimal script for preprocessing single-subject data 4 | """ 5 | 6 | import os 7 | import time 8 | import numpy as np 9 | import nibabel 10 | from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc 11 | from pypreprocess.datasets import fetch_spm_auditory 12 | import pandas as pd 13 | from nilearn.glm.first_level.design_matrix import (make_first_level_design_matrix, 14 | check_design_matrix) 15 | 16 | from nilearn.plotting.matrix_plotting import plot_design_matrix 17 | from nilearn.glm.first_level import FirstLevelModel 18 | import matplotlib.pyplot as plt 19 | 20 | # file containing configuration for preprocessing the data 21 | this_dir = os.path.dirname(os.path.abspath(__file__)) 22 | jobfile = os.path.join(this_dir, "spm_auditory_preproc.ini") 23 | 24 | # fetch spm auditory data 25 | sd = fetch_spm_auditory() 26 | dataset_dir = os.path.dirname(os.path.dirname(os.path.dirname(sd.anat))) 27 | 28 | # construct experimental paradigm 29 | stats_start_time = time.ctime() 30 | tr = 7. 31 | n_scans = 96 32 | _duration = 6 33 | n_conditions = 2 34 | epoch_duration = _duration * tr 35 | conditions = ['rest', 'active'] * 8 36 | duration = epoch_duration * np.ones(len(conditions)) 37 | onset = np.linspace(0, (len(conditions) - 1) * epoch_duration, 38 | len(conditions)) 39 | paradigm = pd.DataFrame( 40 | {'onset': onset, 'duration': duration, 'trial_type': conditions}) 41 | 42 | hfcut = 2 * 2 * epoch_duration 43 | hfcut = 1./hfcut 44 | 45 | fd = open(sd.func[0].split(".")[0] + "_onset.txt", "w") 46 | for c, o, d in zip(conditions, onset, duration): 47 | fd.write("%s %s %s\r\n" % (c, o, d)) 48 | fd.close() 49 | 50 | # preprocess the data 51 | subject_data = do_subjects_preproc(jobfile, dataset_dir=dataset_dir)[0] 52 | 53 | # construct design matrix 54 | nscans = len(subject_data.func[0]) 55 | frametimes = np.linspace(0, (nscans - 1) * tr, nscans) 56 | drift_model = 'Cosine' 57 | hrf_model = 'spm + derivative' 58 | design_matrix = make_first_level_design_matrix( 59 | frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, 60 | high_pass=hfcut) 61 | 62 | # plot and save design matrix 63 | ax = plot_design_matrix(design_matrix) 64 | ax.set_position([.05, .25, .9, .65]) 65 | ax.set_title('Design matrix') 66 | dmat_outfile = os.path.join(subject_data.output_dir, 'design_matrix.png') 67 | plt.savefig(dmat_outfile, bbox_inches="tight", dpi=200) 68 | 69 | # specify contrasts 70 | contrasts = {} 71 | _, matrix, names = check_design_matrix(design_matrix) 72 | contrast_matrix = np.eye(len(names)) 73 | for i in range(len(names)): 74 | contrasts[names[i]] = contrast_matrix[i] 75 | 76 | # more interesting contrasts""" 77 | contrasts = {'active-rest': contrasts['active'] - contrasts['rest']} 78 | 79 | # fit GLM 80 | print('\r\nFitting a GLM (this takes time) ..') 81 | fmri_glm = FirstLevelModel(noise_model='ar1', standardize=False, t_r=tr).fit( 82 | [nibabel.concat_images(subject_data.func[0])], design_matrices=design_matrix) 83 | 84 | 85 | # save computed mask 86 | mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") 87 | print("Saving mask image %s" % mask_path) 88 | nibabel.save(fmri_glm.masker_.mask_img_, mask_path) 89 | 90 | # compute bg unto which activation will be projected 91 | anat_img = nibabel.load(subject_data.anat) 92 | 93 | print("Computing contrasts ..") 94 | z_maps = {} 95 | effects_maps = {} 96 | for contrast_id, contrast_val in contrasts.items(): 97 | print("\tcontrast id: %s" % contrast_id) 98 | z_map = fmri_glm.compute_contrast( 99 | contrasts[contrast_id], output_type='z_score') 100 | 101 | z_maps[contrast_id] = z_map 102 | -------------------------------------------------------------------------------- /examples/easy_start/nipype_preproc_spm_haxby.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: DOHMATOB Elvis Dopgima elvis[dot]dohmatob[at]inria[dot]fr 3 | Synopsis: Preprocessing of HAXBY 2001 dataset 4 | """ 5 | 6 | import os 7 | from nilearn.datasets import fetch_haxby 8 | from pypreprocess.nipype_preproc_spm_utils import (do_subjects_preproc, 9 | SubjectData) 10 | 11 | # DARTEL ? 12 | DARTEL = False 13 | 14 | DATASET_DESCRIPTION = """\ 15 | This is a block-design fMRI dataset from a study on face and object\ 16 | representation in human ventral temporal cortex. It consists of 6 subjects\ 17 | with 12 runs per subject. In each run, the subjects passively viewed \ 18 | greyscale images of eight object categories, grouped in 24s blocks separated\ 19 | by rest periods. Each image was shown for 500ms and was followed by a 1500ms\ 20 | inter-stimulus interval. Full-brain fMRI data were recorded with a volume \ 21 | repetition time of 2.5s, thus, a stimulus block was covered by roughly 9 \ 22 | volumes. 23 | 24 | Get full description \ 25 | here.\ 26 | """ 27 | 28 | # fetch HAXBY dataset 29 | N_SUBJECTS = 2 30 | haxby_data = fetch_haxby(subjects=N_SUBJECTS) 31 | 32 | # set output dir 33 | OUTPUT_DIR = os.path.join(os.path.dirname(haxby_data.mask), 34 | "haxby_runs") 35 | if not os.path.exists(OUTPUT_DIR): 36 | os.makedirs(OUTPUT_DIR) 37 | 38 | # get subject data 39 | subjects = [] 40 | for subject_id in set([os.path.basename(os.path.dirname(x)) 41 | for x in haxby_data.func]): 42 | # instantiate subject_data object 43 | subject_data = SubjectData() 44 | subject_data.subject_id = subject_id 45 | subject_data.session_id = "haxby2001" 46 | 47 | # set func 48 | subject_data.func = [x for x in haxby_data.func if subject_id in x] 49 | 50 | assert len(subject_data.func) == 1 51 | subject_data.func = subject_data.func[0] 52 | 53 | # set anat 54 | subject_data.anat = [x for x in haxby_data.anat if subject_id in x] 55 | assert len(subject_data.anat) == 1 56 | subject_data.anat = subject_data.anat[0] 57 | 58 | # set subject output directory 59 | subject_data.output_dir = os.path.join(OUTPUT_DIR, 60 | subject_data.subject_id) 61 | 62 | # add this subject to list 63 | subjects.append(subject_data) 64 | 65 | # do preprocessing proper 66 | results = do_subjects_preproc(subjects, output_dir=OUTPUT_DIR, 67 | dataset_id="HAXBY 2001", realign=False, 68 | coregister=False, dartel=DARTEL, 69 | tsdiffana=False, 70 | dataset_description=DATASET_DESCRIPTION) 71 | -------------------------------------------------------------------------------- /examples/easy_start/segmentation_demos.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Demo script for anatomical MRI segmentation with SPM. 4 | 5 | It demos segmentations on SPM single-subject auditory. 6 | 7 | @author: Mehdi RAHIM 8 | """ 9 | 10 | import os 11 | from pypreprocess.subject_data import SubjectData 12 | from pypreprocess.datasets import fetch_spm_auditory 13 | from pypreprocess.nipype_preproc_spm_utils import _do_subject_segment 14 | from pypreprocess.reporting.check_preprocessing import plot_segmentation 15 | import matplotlib.pyplot as plt 16 | 17 | 18 | OUTPUT_DIR = 'segmentation_demos_output' 19 | def _spm_auditory_subject_data(): 20 | """ Fetching auditory example into SubjectData Structure 21 | """ 22 | subject_data = fetch_spm_auditory() 23 | subject_data['func'] = None 24 | base_dir = os.path.dirname(subject_data['anat']) 25 | subject_data.output_dir = os.path.join(base_dir, OUTPUT_DIR) 26 | return SubjectData(**subject_data) 27 | 28 | 29 | # Fetch and generate the subject_data structure 30 | print('Fetching Auditory Dataset') 31 | subject_data = _spm_auditory_subject_data() 32 | 33 | # Segment the GM, WM and the CSF 34 | print('Segmentation with SPM') 35 | subject_data = _do_subject_segment(subject_data, caching=True, report=False, 36 | hardlink_output=False) 37 | print('Segmentation saved in : %s' % subject_data.output_dir) 38 | 39 | # Overlay the tissues with T1 acquisition 40 | plot_segmentation(img=subject_data['anat'], 41 | gm_filename=subject_data['gm'], 42 | wm_filename=subject_data['wm'], 43 | csf_filename=subject_data['csf']) 44 | plt.show() 45 | -------------------------------------------------------------------------------- /examples/easy_start/spm_auditory_preproc.ini: -------------------------------------------------------------------------------- 1 | ###################################################################################### 2 | # 3 | # pypreprocess configuration. 4 | # 5 | # Copy this file to the acquisition directory containing the data you wish to 6 | # preprocess. Then, manually edit the values to customize the pipeline to suite your 7 | # needs. 8 | # 9 | # Disable a preprocessing step by setting 'disable = True' under the corresponding 10 | # section, or simply comment the section altogether. 11 | # 12 | # IMPORTANT NOTES 13 | # =============== 14 | # - indexing begins from 1 (matlab style) 15 | # - you can explicitly specifiy the software to be used for a specific stage of the 16 | # preprocessing by accordingly setting the 'software' field under the 17 | # corresponding section (e.g like so: software = spm) 18 | # - A value of 'auto', 'unspecified', 'none', etc. for a parameter means it should 19 | # be specified or inferred at run-time 20 | # 21 | # Authored by DOHMATOB Elvis Dopgima 22 | # 23 | ###################################################################################### 24 | 25 | [config] # DON'T TOUCH THIS LINE ! 26 | 27 | ########## 28 | # INPUT 29 | ########## 30 | 31 | # Path (relative or full) of directory containing data (if different from directory 32 | # containing this configuration file). 33 | dataset_dir = "pycmd: from pypreprocess.datasets import fetch_spm_auditory; print(fetch_spm_auditory().data_dir)" 34 | 35 | # Brief description of dataset (you can use html formatting) 36 | dataset_description = """SPM Auditory 37 | (single-subject) 38 | """ 39 | 40 | # The name of the dataset as will be shown in the report pages. Must be an integer 41 | # or auto 42 | dataset_id = auto 43 | 44 | # The number of subjects to include; by default all subjects are included. 45 | nsubjects = 1 46 | 47 | # List of (or wildcard for) subject id's to be ignored / excluded; must be space- 48 | # separated list of subject ids. 49 | exclude_these_subject_ids = None 50 | 51 | # List of (or wildcard for) the only subjects to be included; must be space 52 | # separated list of subject ids. 53 | include_only_these_subject_ids = None 54 | 55 | # Wildcard for, or space-separated list of, subject directories relative to the 56 | # acquisition directory 57 | subject_dirs = sub* 58 | 59 | # Path of session-wise functional images, relative to the subject data dir. 60 | # Wildcards are allowed. Each session must be specified in the form 61 | session_1_func = fM00223/fM00223_*.img 62 | 63 | # Path of T1 (anat) image relative to the subject data dir 64 | anat = sM00223/sM00223_002.img 65 | 66 | # Should caching (nipype, joblib, etc.) be used to safe ages of hard-earned computation ? 67 | caching = True 68 | 69 | # Number of jobs to be spawn altogether. 70 | n_jobs = 1 71 | 72 | # Should orientation meta-date be stripped-off image headers ? 73 | deleteorient = False 74 | 75 | 76 | ############################ 77 | # Slice-Timing Correction 78 | ############################ 79 | 80 | # Don't you want us to do Slice-Timing Correction (STC) ? 81 | disable_slice_timing = False 82 | 83 | # Repetition Time 84 | TR = 7. 85 | 86 | # Formula for Acquisition Time for single brain volume. 87 | TA = TR * (1 - 1 / nslices) 88 | 89 | # Can be ascending, descending, or an explicitly specified sequence. 90 | slice_order = ascending 91 | 92 | # Were the EPI slices interleaved ? 93 | interleaved = False 94 | 95 | # Reference slice (indexing begins from 1) 96 | refslice = 1 97 | 98 | # software to use for Slice-Timing Correction 99 | slice_timing_software = spm 100 | 101 | 102 | #################################### 103 | # Realignment (Motion Correction) 104 | #################################### 105 | 106 | # Don't do realignment / motion correction ? 107 | disable_realign = False 108 | 109 | # Register all volumes to the mean thereof ? 110 | register_to_mean = True 111 | 112 | # Reslice volumes ? 113 | realign_reslice = False 114 | 115 | # Software to use realignment / motion correction. Can be spm or fsl 116 | realign_software = spm 117 | 118 | 119 | ################### 120 | # Coregistration 121 | ################### 122 | 123 | # Don't you want us to do coregistration of T1 (anat) and fMRI (func) ? 124 | disable_coregister = False 125 | 126 | # During coregistration, do you want us to register func -> anat or anat -> func ? 127 | coreg_func_to_anat = True 128 | 129 | # Should we reslice files during coregistration ? 130 | coregister_reslice = False 131 | 132 | # Software to use for coregistration 133 | coregister_software = spm 134 | 135 | 136 | ######################## 137 | # Tissue Segmentation 138 | ######################## 139 | 140 | # Don't you want us to segment the brain (into gray-matter, white matter, csf, etc.) ? 141 | disable_segment = False 142 | 143 | # Software to use for tissue segmentation. 144 | segment_software = spm 145 | 146 | # Output modulates TPMs ? 147 | output_modulated_tpms = True 148 | 149 | # Use spm's NewSegment ? 150 | newsegment = False 151 | 152 | ################## 153 | # Normalization 154 | ################## 155 | 156 | # Don't you want us to normalize each subject's brain unto a template (MNI 157 | # for example) ? 158 | disable_normalize = True 159 | 160 | # Path to your template image. 161 | template = "MNI" 162 | 163 | # Voxel sizes of final func images 164 | func_write_voxel_sizes = [3, 3, 3] 165 | 166 | # Voxel sizes of final anat images 167 | anat_write_voxel_size = [1, 1, 1] 168 | 169 | # Use dartel for normalization ? 170 | dartel = False 171 | 172 | # Software to use for normalization. 173 | normalize_software = spm 174 | 175 | 176 | ############## 177 | # Smoothing 178 | ############## 179 | 180 | # Software to use for smoothing. 181 | smooth_software = spm 182 | 183 | # FWHM (in mm) of smoothing kernel. 184 | fwhm = [5, 5, 5] 185 | 186 | 187 | ########### 188 | # Output 189 | ########### 190 | 191 | # Root directory (full path or relative to the directory containing this file) for 192 | # all output files and reports 193 | output_dir = ./pypreprocess_output 194 | scratch = ./pypreprocess_scratch # this is where intermediate cached results will be stored. Can be on a separate device (e.g external disk) 195 | 196 | # Generate html reports ? 197 | report = True 198 | 199 | # Plot coefficient of variation post-preprocessing ? 200 | tsdiffana = True 201 | 202 | 203 | ######### 204 | # MISC 205 | ######### 206 | 207 | # Path to MATLAB executable 208 | matlab_exec = 209 | 210 | # Path to SPM 211 | spm_mcr = 212 | -------------------------------------------------------------------------------- /examples/pipelining/multimodal_faces_preproc.ini: -------------------------------------------------------------------------------- 1 | ###################################################################################### 2 | # 3 | # pypreprocess configuration. 4 | # 5 | # Copy this file to the acquisition directory containing the data you wish to 6 | # preprocess. Then, manually edit the values to customize the pipeline to suite your 7 | # needs. 8 | # 9 | # Disable a preprocessing step by setting 'disable = True' under the corresponding 10 | # section, or simply comment the section altogether. 11 | # 12 | # IMPORTANT NOTES 13 | # =============== 14 | # - indexing begins from 1 (matlab style) 15 | # - you can explicitly specify the software to be used for a specific stage of the 16 | # preprocessing by accordingly setting the 'software' field under the 17 | # corresponding section (e.g like so: software = spm) 18 | # - A value of 'auto', 'unspecified', 'none', etc. for a parameter means it should 19 | # be specified or inferred at run-time 20 | # 21 | # Authored by DOHMATOB Elvis Dopgima 22 | # 23 | ###################################################################################### 24 | 25 | [config] # DON'T TOUCH THIS LINE! 26 | 27 | ########## 28 | # INPUT 29 | ########## 30 | 31 | # Path (relative or full) of directory containing data (if different from directory 32 | # containing this configuration file). 33 | dataset_dir = unspecified 34 | 35 | # Brief description of dataset (you can use html formatting) 36 | dataset_description = Multimodal Faces fMRI dataset 37 | 38 | 39 | # The name of the dataset as will be shown in the report pages. Must be an integer 40 | # or auto 41 | dataset_id = auto 42 | 43 | # The number of subjects to include; by default all subjects are included. 44 | nsubjects = 1 45 | 46 | # List of (or wildcard for) subject id's to be ignored / excluded; must be space- 47 | # separated list of subject ids. 48 | exclude_these_subject_ids = None 49 | 50 | # List of (or wildcard for) the only subjects to be included; must be space 51 | # separated list of subject ids. 52 | include_only_these_subject_ids = None 53 | 54 | # Wildcard for, or space-separated list of, subject directories relative to the 55 | # acquisition directory 56 | subject_dirs = sub* 57 | 58 | # Path of session-wise functional images, relative to the subject data dir. 59 | # Wildcards are allowed. Each session must be specified in the form 60 | session_1_func = fMRI/Session1/fMETHODS-*.img 61 | session_2_func = fMRI/Session2/fMETHODS-*.img 62 | 63 | 64 | # Path of T1 (anat) image relative to the subject data dir 65 | anat = sMRI/smri.img 66 | 67 | # Should caching (nipype, joblib, etc.) be used to safe ages of hard-earned computation? 68 | caching = True 69 | 70 | # Number of jobs to be spawn altogether. 71 | n_jobs = 1 72 | 73 | # Should orientation meta-date be stripped-off image headers? 74 | deleteorient = False 75 | 76 | 77 | ############################ 78 | # Slice-Timing Correction 79 | ############################ 80 | 81 | # Don't you want us to do Slice-Timing Correction (STC)? 82 | disable_slice_timing = False 83 | 84 | # Repetition Time 85 | TR = 7. 86 | 87 | # Formula for Acquisition Time for single brain volume. 88 | TA = TR * (1 - 1 / nslices) 89 | 90 | # Can be ascending, descending, or an explicitly specified sequence. 91 | slice_order = ascending 92 | 93 | # Were the EPI slices interleaved? 94 | interleaved = False 95 | 96 | # Reference slice (indexing begins from 1) 97 | refslice = 1 98 | 99 | # software to use for Slice-Timing Correction 100 | slice_timing_software = spm 101 | 102 | 103 | #################################### 104 | # Realignment (Motion Correction) 105 | #################################### 106 | 107 | # Don't do realignment / motion correction? 108 | disable_realign = False 109 | 110 | # Register all volumes to the mean thereof? 111 | register_to_mean = True 112 | 113 | # Reslice volumes ? 114 | realign_reslice = False 115 | 116 | # Software to use realignment / motion correction. Can be spm or fsl 117 | realign_software = spm 118 | 119 | 120 | ################### 121 | # Coregistration 122 | ################### 123 | 124 | # Don't you want us to do coregistration of T1 (anat) and fMRI (func)? 125 | disable_coregister = False 126 | 127 | # During coregistration, do you want us to register func -> anat or anat -> func? 128 | coreg_func_to_anat = True 129 | 130 | # Should we reslice files during coregistration? 131 | coregister_reslice = False 132 | 133 | # Software to use for coregistration 134 | coregister_software = spm 135 | 136 | 137 | ######################## 138 | # Tissue Segmentation 139 | ######################## 140 | 141 | # Don't you want us to segment the brain (into gray-matter, white matter, csf, etc.)? 142 | disable_segment = False 143 | 144 | # Software to use for tissue segmentation. 145 | segment_software = spm 146 | 147 | # Use spm's NewSegment? 148 | newsegment = True 149 | 150 | ################## 151 | # Normalization 152 | ################## 153 | 154 | # Don't you want want us to normalize each subject's brain unto a template (MNI 155 | # for example)? 156 | disable_normalize = False 157 | 158 | # Path to your template image. 159 | template = "MNI" 160 | 161 | # Voxel sizes of final func images 162 | func_write_voxel_sizes = [3, 3, 3] 163 | 164 | # Voxel sizes of final anat images 165 | anat_write_voxel_size = [1, 1, 1] 166 | 167 | # Use dartel for normalization? 168 | dartel = False 169 | 170 | # Software to use for normalization. 171 | normalize_software = spm 172 | 173 | 174 | ############## 175 | # Smoothing 176 | ############## 177 | 178 | # FWHM (in mm) of smoothing kernel. 179 | fwhm = [5, 5, 5] 180 | 181 | 182 | ########### 183 | # Output 184 | ########### 185 | 186 | # Root directory (full path or relative to the directory containing this file) for 187 | # all output files and reports 188 | output_dir = ./pypreprocess_output 189 | 190 | # Generate html reports? 191 | report = True 192 | 193 | # Plot coefficient of variation post-preprocessing ? 194 | plot_tsdiffana = True 195 | 196 | ######### 197 | # MISC 198 | ######### 199 | 200 | # Path to MATLAB executable 201 | matlab_exec = /usr/local/MATLAB/R2012b/bin/matlab 202 | 203 | # Path to SPM 204 | spm_dir = ~/CODE/spm8 205 | 206 | -------------------------------------------------------------------------------- /examples/pipelining/nipype_preproc_spm_multimodal_faces.py: -------------------------------------------------------------------------------- 1 | """ 2 | :Author: yannick schwartz, dohmatob elvis dopgima 3 | :Synopsis: Minimal script for preprocessing single-subject data 4 | + GLM with nistats 5 | """ 6 | 7 | # standard imports 8 | import sys 9 | import os 10 | import time 11 | import nibabel 12 | import numpy as np 13 | import scipy.io 14 | 15 | # imports for GLM business 16 | from nilearn.glm.first_level.design_matrix import (make_first_level_design_matrix, 17 | check_design_matrix) 18 | from nilearn.glm.first_level import FirstLevelModel 19 | import pandas as pd 20 | 21 | # pypreprocess imports 22 | from pypreprocess.datasets import fetch_spm_multimodal_fmri 23 | from pypreprocess.nipype_preproc_spm_utils import do_subject_preproc 24 | from pypreprocess.subject_data import SubjectData 25 | 26 | # file containing configuration for preprocessing the data 27 | this_dir = os.path.abspath(os.path.dirname(sys.argv[0])) 28 | jobfile = os.path.join(this_dir, "multimodal_faces_preproc.ini") 29 | 30 | # set dataset dir 31 | if len(sys.argv) > 1: 32 | dataset_dir = sys.argv[1] 33 | else: 34 | dataset_dir = os.path.join(this_dir, "spm_multimodal_faces") 35 | 36 | # fetch spm multimodal_faces data 37 | subject_data = fetch_spm_multimodal_fmri() 38 | dataset_dir = os.path.dirname(os.path.dirname(os.path.dirname( 39 | subject_data.anat))) 40 | 41 | # preprocess the data 42 | subject_id = "sub001" 43 | subject_data = SubjectData( 44 | output_dir=os.path.join(dataset_dir, "pypreprocess_output", subject_id), 45 | subject_id=subject_id, func=[subject_data.func1, subject_data.func2], 46 | anat=subject_data.anat, trials_ses1=subject_data.trials_ses1, 47 | trials_ses2=subject_data.trials_ses2, session_ids=["Session1", "Session2"]) 48 | subject_data = do_subject_preproc(subject_data, realign=True, coregister=True, 49 | segment=True, normalize=True) 50 | 51 | # experimental paradigm meta-params 52 | stats_start_time = time.ctime() 53 | tr = 2. 54 | drift_model = 'Cosine' 55 | hrf_model = 'spm + derivative' 56 | hfcut = 1. / 128 57 | 58 | # make design matrices 59 | first_level_effects_maps = [] 60 | mask_images = [] 61 | design_matrices = [] 62 | for x in range(2): 63 | if not os.path.exists(subject_data.output_dir): 64 | os.makedirs(subject_data.output_dir) 65 | 66 | # build paradigm 67 | n_scans = len(subject_data.func[x]) 68 | timing = scipy.io.loadmat(getattr(subject_data, "trials_ses%i" % (x + 1)), 69 | squeeze_me=True, struct_as_record=False) 70 | 71 | faces_onsets = timing['onsets'][0].ravel() 72 | scrambled_onsets = timing['onsets'][1].ravel() 73 | onsets = np.hstack((faces_onsets, scrambled_onsets)) 74 | onsets *= tr # because onsets were reporting in 'scans' units 75 | conditions = ['faces'] * len(faces_onsets) + ['scrambled'] * len( 76 | scrambled_onsets) 77 | 78 | _duration = 0.6 79 | duration = _duration * np.ones(len(conditions)) 80 | 81 | # build design matrix 82 | frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) 83 | paradigm = pd.DataFrame({'trial_type': conditions, 'duration': duration, 'onset': onsets}) 84 | design_matrix = make_first_level_design_matrix(frametimes, paradigm, 85 | hrf_model=hrf_model, 86 | drift_model=drift_model, 87 | high_pass=hfcut) 88 | design_matrices.append(design_matrix) 89 | 90 | # specify contrasts 91 | _, matrix, names = check_design_matrix(design_matrix) 92 | contrasts = {} 93 | n_columns = len(names) 94 | contrast_matrix = np.eye(n_columns) 95 | for i in range(2): 96 | contrasts[names[2 * i]] = contrast_matrix[2 * i] 97 | 98 | # more interesting contrasts 99 | contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled'] 100 | contrasts['scrambled-faces'] = -contrasts['faces-scrambled'] 101 | contrasts['effects_of_interest'] = contrasts['faces'] + contrasts['scrambled'] 102 | 103 | # fit GLM 104 | print('Fitting a GLM (this takes time)...') 105 | fmri_glm = FirstLevelModel().fit( 106 | [nibabel.concat_images(x) for x in subject_data.func], 107 | design_matrices=design_matrices) 108 | 109 | # save computed mask 110 | mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") 111 | print("Saving mask image %s" % mask_path) 112 | nibabel.save(fmri_glm.masker_.mask_img_, mask_path) 113 | mask_images.append(mask_path) 114 | 115 | # compute contrast maps 116 | z_maps = {} 117 | effects_maps = {} 118 | for contrast_id, contrast_val in contrasts.items(): 119 | print("\tcontrast id: %s" % contrast_id) 120 | z_map = fmri_glm.compute_contrast( 121 | [contrast_val] * 2, output_type='z_score') 122 | 123 | z_maps[contrast_id] = z_map 124 | -------------------------------------------------------------------------------- /examples/pipelining/nistats_glm_fsl_feeds_fmri.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: DOHMATOB Elvis Dopgima elvis[dot]dohmatob[at]inria[dot]fr 3 | Synopsis: Demo script for nipy's GLM and inference + reporting 4 | on FSL's FEEDS fMRI single-subject example data 5 | """ 6 | 7 | import os 8 | import numpy as np 9 | import pandas as pd 10 | from nilearn.glm.first_level.design_matrix import (make_first_level_design_matrix, 11 | check_design_matrix) 12 | from nilearn.glm.first_level import FirstLevelModel 13 | import nibabel 14 | import time 15 | from pypreprocess.nipype_preproc_spm_utils import (SubjectData, 16 | do_subjects_preproc) 17 | from pypreprocess.datasets import fetch_fsl_feeds 18 | from pypreprocess.io_utils import compute_mean_3D_image 19 | 20 | """MISC""" 21 | DATASET_DESCRIPTION = "FSL FEEDS example data (single-subject)" 22 | 23 | """experimental setup""" 24 | stats_start_time = time.ctime() 25 | n_scans = 180 26 | TR = 3. 27 | EV1_epoch_duration = 2 * 30 28 | EV2_epoch_duration = 2 * 45 29 | TA = TR * n_scans 30 | EV1_epochs = TA / EV1_epoch_duration 31 | EV1_epochs = int(TA / EV1_epoch_duration) 32 | EV2_epochs = int(TA / EV2_epoch_duration) 33 | EV1_onset = np.linspace(0, EV1_epoch_duration * (EV1_epochs - 1), EV1_epochs) 34 | EV2_onset = np.linspace(0, EV2_epoch_duration * (EV2_epochs - 1), EV2_epochs) 35 | EV1_on = 30 36 | EV2_on = 45 37 | conditions = ['EV1'] * EV1_epochs + ['EV2'] * EV2_epochs 38 | onset = list(EV1_onset) + list(EV2_onset) 39 | duration = [EV1_on] * EV1_epochs + [EV2_on] * EV2_epochs 40 | paradigm = pd.DataFrame({'trial_type': conditions, 'onset': onset, 41 | 'duration': duration}) 42 | frametimes = np.linspace(0, (n_scans - 1) * TR, n_scans) 43 | maximum_epoch_duration = max(EV1_epoch_duration, EV2_epoch_duration) 44 | hfcut = 1.5 * maximum_epoch_duration # why ? 45 | hfcut = 1./hfcut 46 | 47 | """construct design matrix""" 48 | drift_model = 'Cosine' 49 | hrf_model = 'spm + derivative' 50 | design_matrix = make_first_level_design_matrix(frame_times=frametimes, 51 | events=paradigm, 52 | hrf_model=hrf_model, 53 | drift_model=drift_model, 54 | high_pass=hfcut) 55 | 56 | """fetch input data""" 57 | _subject_data = fetch_fsl_feeds() 58 | subject_data = SubjectData() 59 | subject_data.subject_id = "sub001" 60 | subject_data.func = _subject_data.func 61 | subject_data.anat = _subject_data.anat 62 | 63 | output_dir = os.path.join(_subject_data.data_dir, "pypreprocess_output") 64 | if not os.path.exists(output_dir): 65 | os.makedirs(output_dir) 66 | subject_data.output_dir = os.path.join( 67 | output_dir, subject_data.subject_id) 68 | 69 | 70 | 71 | """preprocess the data""" 72 | results = do_subjects_preproc( 73 | [subject_data], 74 | output_dir=output_dir, 75 | dataset_id="FSL FEEDS single-subject", 76 | dataset_description=DATASET_DESCRIPTION, 77 | do_shutdown_reloaders=False, 78 | ) 79 | 80 | """collect preprocessed data""" 81 | fmri_files = results[0]['func'] 82 | anat_file = results[0]['anat'] 83 | 84 | """specify contrasts""" 85 | _, matrix, names = check_design_matrix(design_matrix) 86 | contrasts = {} 87 | n_columns = len(names) 88 | I = np.eye(len(names)) 89 | for i in range(2): 90 | contrasts['%s' % names[2 * i]] = I[2 * i] 91 | 92 | """more interesting contrasts""" 93 | contrasts['EV1>EV2'] = contrasts['EV1'] - contrasts['EV2'] 94 | contrasts['EV2>EV1'] = contrasts['EV2'] - contrasts['EV1'] 95 | contrasts['effects_of_interest'] = contrasts['EV1'] + contrasts['EV2'] 96 | 97 | """fit GLM""" 98 | print('\r\nFitting a GLM (this takes time) ..') 99 | fmri_glm = FirstLevelModel() 100 | fmri_glm.fit(fmri_files, design_matrices=design_matrix) 101 | 102 | """save computed mask""" 103 | mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") 104 | print("Saving mask image %s" % mask_path) 105 | nibabel.save(fmri_glm.masker_.mask_img_, mask_path) 106 | 107 | # compute bg unto which activation will be projected 108 | mean_fmri_files = compute_mean_3D_image(fmri_files) 109 | print("Computing contrasts ..") 110 | z_maps = {} 111 | for contrast_id, contrast_val in contrasts.items(): 112 | print("\tcontrast id: %s" % contrast_id) 113 | z_map = fmri_glm.compute_contrast( 114 | contrasts[contrast_id], output_type='z_score') 115 | 116 | z_maps[contrast_id] = z_map 117 | -------------------------------------------------------------------------------- /examples/pipelining/openfmri_preproc.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: yannick schwartz, DOHMATOB Elvis Dopgima 3 | Synopsis: Command line interface to preprocess OpenfMRI 4 | """ 5 | 6 | import os 7 | import sys 8 | 9 | from optparse import OptionParser 10 | 11 | from pypreprocess.openfmri import preproc_dataset 12 | 13 | 14 | parser = OptionParser(usage=( 15 | '%prog [input_dir] [output_dir]\n\n' 16 | 'Examples:\n\r' 17 | 'Assuming you current directory is .../pypreprocess/examples' 18 | 'python openfmri_preproc.py /tmp/ds001 /tmp/ds001_preproc' 19 | 'python openfmri_preproc.py /tmp/ds001 /tmp/ds001_preproc -s sub001 -O\n' 20 | 'python openfmri_preproc.py /tmp/ds001 /tmp/ds001_preproc -O -D -n 6')) 21 | 22 | parser.description = ( 23 | '`input_dir` is the path to an existing ' 24 | 'OpenfMRI dataset or where to download it. ' 25 | 'The directory name must match a valid OpenfMRI dataset id, ' 26 | 'and therefore look like /path/to/dir/{dataset_id}. OpenfMRI datasets ' 27 | 'identifiers may be found here: https://openfmri.org/data-sets but ' 28 | 'contain only 3 digits. e.g., the valid id for ds000001 is ds001.') 29 | 30 | parser.add_option( 31 | '-s', '--subjects', dest='subjects', 32 | help=('Process a single subject matching the given id. ' 33 | 'A file path may be given, and must contain ' 34 | 'a subject_id per line.')) 35 | 36 | parser.add_option( 37 | '-O', '--delete-orient', dest='delete_orient', 38 | default=False, action="store_true", 39 | help=('Delete orientation information in nifti files.')) 40 | 41 | parser.add_option( 42 | '-D', '--dartel', dest='dartel', 43 | default=False, action="store_true", 44 | help=('Use dartel.')) 45 | 46 | parser.add_option( 47 | '-n', '--n-jobs', dest='n_jobs', type='int', 48 | default=os.environ.get('N_JOBS', '1'), 49 | help='Number of parallel jobs.') 50 | 51 | options, args = parser.parse_args(sys.argv) 52 | if len(args) < 3: 53 | options, args = parser.parse_args(sys.argv + ['-h']) 54 | input_dir, output_dir = args[1:] 55 | input_dir = input_dir.rstrip('/') 56 | output_dir = output_dir.rstrip('/') 57 | _, dataset_id = os.path.split(input_dir) 58 | 59 | if not dataset_id.startswith('ds') and not os.path.exists(input_dir): 60 | parser.error("The directory does not exist and " 61 | "does not seem to be an OpenfMRI dataset.") 62 | 63 | if options.subjects is not None and os.path.exists(options.subjects): 64 | with open(options.subjects, 'rb') as f: 65 | restrict = f.read().split() 66 | else: 67 | restrict = None if options.subjects is None else [options.subjects] 68 | 69 | preproc_dataset(data_dir=input_dir, 70 | output_dir=output_dir, 71 | restrict_subjects=restrict, 72 | dartel=options.dartel, 73 | delete_orient=options.delete_orient, 74 | n_jobs=options.n_jobs) 75 | 76 | print("\r\nAll output written to %s" % output_dir) 77 | -------------------------------------------------------------------------------- /examples/pure_python/coreg_demos.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: DOHMATOB Elvis Dopgima elvis[dot]dohmatob[at]inria[dot]fr 3 | Synopsis: Demo for coregistration in pure python 4 | 5 | It demos coregistration on a variety of datasets including: 6 | SPM single-subject auditory, NYU rest, ABIDE, etc. 7 | """ 8 | 9 | import os 10 | import glob 11 | import matplotlib.pyplot as plt 12 | from pypreprocess.datasets import fetch_spm_auditory 13 | from nilearn.datasets import fetch_nyu_rest 14 | from pypreprocess.reporting.check_preprocessing import plot_registration 15 | from pypreprocess.coreg import Coregister 16 | from joblib import Memory 17 | 18 | # misc 19 | mem = Memory("demos_cache") 20 | 21 | 22 | def _run_demo(func, anat): 23 | # fit 24 | coreg = Coregister().fit(anat, func) 25 | 26 | # apply coreg 27 | VFk = coreg.transform(func) 28 | 29 | # QA 30 | plot_registration(anat, VFk, title="before coreg") 31 | plot_registration(VFk, anat, title="after coreg") 32 | plt.show() 33 | 34 | 35 | def _spm_auditory_factory(): 36 | sd = fetch_spm_auditory() 37 | return sd.func[0], sd.anat 38 | 39 | def _nyu_rest_factory(session=1): 40 | from pypreprocess.nipype_preproc_spm_utils import SubjectData 41 | 42 | nyu_data = fetch_nyu_rest(sessions=[session], n_subjects=7) 43 | 44 | session_func = [x for x in nyu_data.func if "session%i" % session in x] 45 | session_anat = [ 46 | x for x in nyu_data.anat_skull if "session%i" % session in x] 47 | 48 | for subject_id in set([os.path.basename(os.path.dirname 49 | (os.path.dirname(x))) 50 | for x in session_func]): 51 | # instantiate subject_data object 52 | subject_data = SubjectData() 53 | subject_data.subject_id = subject_id 54 | subject_data.session_id = session 55 | 56 | # set func 57 | subject_data.func = [x for x in session_func if subject_id in x] 58 | assert len(subject_data.func) == 1 59 | subject_data.func = subject_data.func[0] 60 | 61 | # set anat 62 | subject_data.anat = [x for x in session_anat if subject_id in x] 63 | assert len(subject_data.anat) == 1 64 | subject_data.anat = subject_data.anat[0] 65 | 66 | # set subject output directory 67 | subject_data.output_dir = "/tmp/%s" % subject_id 68 | 69 | subject_data.sanitize(deleteorient=False, niigz2nii=False) 70 | 71 | yield (subject_data.subject_id, subject_data.func[0], 72 | subject_data.anat) 73 | 74 | # spm auditory demo 75 | mem.cache(_run_demo)(*_spm_auditory_factory()) 76 | 77 | # NYU rest demo 78 | for subject_id, func, anat in _nyu_rest_factory(): 79 | print("%s +++NYU rest %s+++\r\n" % ("\t" * 5, subject_id)) 80 | mem.cache(_run_demo)(func, anat) 81 | -------------------------------------------------------------------------------- /examples/pure_python/pure_python_preproc_demo.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: DOHMATOB Elvis Dopgima elvis[dot]dohmatob[at]inria[dot]fr 3 | Synopsis: single_subject_pipeline.py demo 4 | """ 5 | 6 | from pypreprocess.datasets import fetch_spm_multimodal_fmri 7 | from pypreprocess.purepython_preproc_utils import do_subject_preproc 8 | 9 | # fetch data 10 | sd = fetch_spm_multimodal_fmri() 11 | sd.output_dir = "/tmp/sub001" 12 | sd.func = [sd.func1, sd.func2] 13 | sd.session_output_dirs = ["/tmp/sub001/session1", "/tmp/sub001/session2"] 14 | 15 | # preproc data 16 | do_subject_preproc(sd, concat=False, coregister=True, stc=True, 17 | tsdiffana=True, realign=True, report=False, reslice=True) 18 | -------------------------------------------------------------------------------- /examples/pure_python/realign_demos.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: DOHMATOB Elvis Dopgima elvis[dot]dohmatob[at]inria[dot]fr 3 | Synopsis: single_subject_pipeline.py demo 4 | """ 5 | 6 | import os 7 | import sys 8 | from tempfile import mkdtemp 9 | from collections import namedtuple 10 | import matplotlib.pyplot as plt 11 | from joblib import Memory 12 | from pypreprocess.realign import MRIMotionCorrection 13 | from pypreprocess.reporting.check_preprocessing import ( 14 | plot_spm_motion_parameters) 15 | from pypreprocess.datasets import ( 16 | fetch_fsl_feeds, fetch_spm_multimodal_fmri, 17 | fetch_spm_auditory) 18 | from nilearn.datasets import fetch_nyu_rest 19 | 20 | # data structure for subject data 21 | SubjectData = namedtuple('SubjectData', 'subject_id func output_dir') 22 | mem = Memory("demos_cache") 23 | 24 | 25 | def _demo_runner(subjects, dataset_id, **spm_realign_kwargs): 26 | """Demo runner. 27 | 28 | Parameters 29 | ---------- 30 | subjects: iterable for subject data 31 | each subject data can be anything, with a func (string or list 32 | of strings; existing file path(s)) and an output_dir (string, 33 | existing dir path) field 34 | dataset_id: string 35 | a short string describing the data being processed (e.g. "HAXBY!") 36 | 37 | Notes 38 | ----- 39 | Don't invoke this directly! 40 | 41 | """ 42 | 43 | # loop over subjects 44 | for subject_data in subjects: 45 | print("%sMotion correction for %s (%s)" % ('\t' * 2, 46 | subject_data.subject_id, 47 | dataset_id)) 48 | 49 | # instantiate realigner 50 | mrimc = MRIMotionCorrection(**spm_realign_kwargs) 51 | 52 | # fit realigner 53 | mrimc = mem.cache(mrimc.fit)(subject_data.func) 54 | 55 | # write realigned files to disk 56 | mem.cache(mrimc.transform)(subject_data.output_dir, reslice=False, 57 | concat=False) 58 | 59 | # plot results 60 | for sess, rp_filename in zip( 61 | range(len(mrimc.realignment_parameters_)), 62 | mrimc.realignment_parameters_): 63 | plot_spm_motion_parameters( 64 | rp_filename, 65 | title="Estimated motion for %s (session %i) of '%s'" % ( 66 | subject_data.subject_id, sess, dataset_id)) 67 | 68 | 69 | def demo_nyu_rest(output_dir): 70 | """Demo for FSL Feeds data. 71 | 72 | Parameters 73 | ---------- 74 | output_dir: string 75 | where output will be written to 76 | 77 | """ 78 | 79 | output_dir = os.path.join(output_dir, 'nyu_mrimc_output') 80 | # fetch data 81 | nyu_data = fetch_nyu_rest() 82 | 83 | # subject data factory 84 | subjects = [] 85 | session = 1 86 | session_func = [x for x in nyu_data.func if "session%i" % session in x] 87 | for subject_id in set([os.path.basename( 88 | os.path.dirname 89 | (os.path.dirname(x))) for x in session_func]): 90 | # set func 91 | func = [ 92 | x for x in session_func if subject_id in x] 93 | assert len(func) == 1 94 | func = func[0] 95 | subjects.append(SubjectData( 96 | subject_id=subject_id, func=func, output_dir=os.path.join( 97 | output_dir, "session%i" % session, subject_id))) 98 | 99 | # invoke demon to run de demo 100 | _demo_runner(subjects, "NYU resting state") 101 | 102 | 103 | def demo_fsl_feeds(output_dir): 104 | """Demo for FSL Feeds data. 105 | 106 | Parameters 107 | ---------- 108 | output_dir: string 109 | where output will be written to 110 | 111 | """ 112 | output_dir = os.path.join(output_dir, "fsl_feeds_mrimc_output") 113 | fsl_feeds = fetch_fsl_feeds() 114 | subject_id = "sub001" 115 | subjects = [SubjectData(subject_id=subject_id, 116 | func=fsl_feeds.func, 117 | output_dir=os.path.join(output_dir, subject_id))] 118 | _demo_runner(subjects, "FSL FEEDS") 119 | 120 | 121 | def demo_spm_multimodal_fmri(output_dir): 122 | """Demo for SPM multimodal fmri (faces vs scrambled) 123 | 124 | Parameters 125 | ---------- 126 | output_dir: string 127 | where output will be written to 128 | 129 | """ 130 | output_dir = os.path.join(output_dir, "spm_multimodal_fmri_output") 131 | spm_multimodal_fmri = fetch_spm_multimodal_fmri() 132 | subject_id = "sub001" 133 | subjects = [SubjectData(subject_id=subject_id, 134 | func=[spm_multimodal_fmri.func1, 135 | spm_multimodal_fmri.func2], 136 | output_dir=os.path.join(output_dir, subject_id))] 137 | _demo_runner(subjects, "SPM Multimodal fMRI faces vs scrambled", 138 | n_sessions=2) 139 | 140 | 141 | def demo_spm_auditory(output_dir): 142 | """Demo for SPM single-subject Auditory 143 | 144 | Parameters 145 | ---------- 146 | output_dir: string 147 | where output will be written to 148 | 149 | """ 150 | output_dir = os.path.join(output_dir, "spm_auditory_output") 151 | spm_auditory = fetch_spm_auditory() 152 | subject_id = "sub001" 153 | subjects = [SubjectData(subject_id=subject_id, 154 | func=[spm_auditory.func], 155 | output_dir=os.path.join(output_dir, subject_id))] 156 | _demo_runner(subjects, "SPM single-subject Auditory") 157 | 158 | if __name__ == '__main__': 159 | 160 | output_root_dir = None 161 | if (len(sys.argv) > 1): 162 | output_root_dir = os.path.abspath(os.path.expanduser(sys.argv[1])) 163 | 164 | if (output_root_dir is None or not os.path.isdir(output_root_dir)): 165 | output_root_dir = mkdtemp() 166 | 167 | # run spm multimodal demo 168 | demo_spm_auditory(output_root_dir) 169 | 170 | # # run spm multimodal demo 171 | # demo_spm_multimodal_fmri(output_root_dir) 172 | 173 | # # run fsl feeds demo 174 | # demo_fsl_feeds(output_root_dir) 175 | 176 | # # run nyu_rest demo 177 | # demo_nyu_rest(output_root_dir) 178 | print("output written in {0}".format(output_root_dir)) 179 | 180 | plt.show() 181 | -------------------------------------------------------------------------------- /pypreprocess.py: -------------------------------------------------------------------------------- 1 | """ 2 | Synopsis: Run pypreprocess using dataset-specific configuration file given 3 | at command line. 4 | Author: DOHMATOB Elvis Dopgima 5 | 6 | """ 7 | 8 | import sys 9 | import matplotlib 10 | matplotlib.use('Agg') 11 | from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc 12 | 13 | if __name__ == "__main__": 14 | # sanitize command-line usage 15 | if len(sys.argv) < 2: 16 | print("\r\nUsage: python %s \r\n" % 17 | sys.argv[0]) 18 | print("Example:\r\npython %s scripts/HCP_tfMRI_MOTOR_preproc" 19 | ".ini\r\n" % sys.argv[0]) 20 | sys.exit(1) 21 | 22 | # consume config file and run pypreprocess back-end 23 | subjects = do_subjects_preproc(sys.argv[1]) 24 | -------------------------------------------------------------------------------- /pypreprocess/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | 3 | import matplotlib 4 | matplotlib.use('Agg') 5 | -------------------------------------------------------------------------------- /pypreprocess/cluster_level_analysis.py: -------------------------------------------------------------------------------- 1 | """ Utilities to describe the result of cluster-level analysis of statistical 2 | maps. 3 | 4 | Author: Bertrand Thirion, 2015 5 | """ 6 | import numpy as np 7 | from scipy.ndimage import label, maximum_filter 8 | from scipy.stats import norm 9 | from nilearn.image.resampling import coord_transform 10 | from nilearn._utils.niimg_conversions import check_niimg, _check_same_fov 11 | 12 | 13 | def fdr_threshold(z_vals, alpha): 14 | """ return the BH fdr for the input z_vals""" 15 | z_vals_ = - np.sort(- z_vals) 16 | p_vals = norm.sf(z_vals_) 17 | n_samples = len(p_vals) 18 | pos = p_vals < alpha * np.linspace( 19 | .5 / n_samples, 1 - .5 / n_samples, n_samples) 20 | if pos.any(): 21 | return (z_vals_[pos][-1] - 1.e-8) 22 | else: 23 | return np.infty 24 | 25 | 26 | def fdr_p_values(z_vals): 27 | """ return the fdr p_values for the z-variate""" 28 | order = np.argsort(- z_vals) 29 | p_vals = norm.sf(z_vals[order]) 30 | n_samples = len(z_vals) 31 | fdr = np.minimum(1, p_vals / np.linspace(1. / n_samples, 1., n_samples)) 32 | for i in range(n_samples - 1, 0, -1): 33 | fdr[i - 1] = min(fdr[i - 1], fdr[i]) 34 | 35 | inv_order = np.empty(n_samples, 'int') 36 | inv_order[order] = np.arange(n_samples) 37 | return fdr[inv_order] 38 | 39 | 40 | def empirical_p_value(z_score, ref): 41 | """ retrun the percentile """ 42 | ranks = np.searchsorted(np.sort(ref), z_score) 43 | return 1 - ranks * 1. / ref.size 44 | 45 | 46 | def cluster_stats(stat_img, mask_img, threshold, height_control='fpr', 47 | cluster_th=0, nulls=None): 48 | """ 49 | Return a list of clusters, each cluster being represented by a 50 | dictionary. Clusters are sorted by descending size order. Within 51 | each cluster, local maxima are sorted by descending statical value 52 | 53 | Parameters 54 | ---------- 55 | stat_img: Niimg-like object, 56 | statsitical image (presumably in z scale) 57 | mask_img: Niimg-like object, 58 | mask image 59 | threshold: float, 60 | cluster forming threshold (either a p-value or z-scale value) 61 | height_control: string 62 | false positive control meaning of cluster forming 63 | threshold: 'fpr'|'fdr'|'bonferroni'|'none' 64 | cluster_th: int or float, 65 | cluster size threshold 66 | nulls: dictionary, 67 | statistics of the null distribution 68 | 69 | Notes 70 | ----- 71 | If there is no cluster, an empty list is returned 72 | """ 73 | if nulls is None: nulls = {} 74 | 75 | # Masking 76 | mask_img, stat_img = check_niimg(mask_img), check_niimg(stat_img) 77 | if not _check_same_fov(mask_img, stat_img): 78 | raise ValueError('mask_img and stat_img do not have the same fov') 79 | mask = mask_img.get_fdata().astype(np.bool) 80 | affine = mask_img.affine 81 | stat_map = stat_img.get_fdata() * mask 82 | n_voxels = mask.sum() 83 | 84 | # Thresholding 85 | if height_control == 'fpr': 86 | z_th = norm.isf(threshold) 87 | elif height_control == 'fdr': 88 | z_th = fdr_threshold(stat_map[mask], threshold) 89 | elif height_control == 'bonferroni': 90 | z_th = norm.isf(threshold / n_voxels) 91 | else: # Brute-force thresholding 92 | z_th = threshold 93 | 94 | p_th = norm.sf(z_th) 95 | # General info 96 | info = {'n_voxels': n_voxels, 97 | 'threshold_z': z_th, 98 | 'threshold_p': p_th, 99 | 'threshold_pcorr': np.minimum(1, p_th * n_voxels)} 100 | 101 | above_th = stat_map > z_th 102 | above_values = stat_map * above_th 103 | if (above_th == 0).all(): 104 | return [], info 105 | 106 | # Extract connected components above threshold 107 | labels, n_labels = label(above_th) 108 | 109 | # Extract the local maxima anove the threshold 110 | maxima_mask = (above_values == 111 | np.maximum(z_th, maximum_filter(above_values, 3))) 112 | x, y, z = np.array(np.where(maxima_mask)) 113 | maxima_coords = np.array(coord_transform(x, y, z, affine)).T 114 | maxima_labels = labels[maxima_mask] 115 | maxima_values = above_values[maxima_mask] 116 | 117 | # FDR-corrected p-values 118 | max_fdr_p_values = fdr_p_values(stat_map[mask])[maxima_mask[mask]] 119 | 120 | # Default "nulls" 121 | if not 'zmax' in nulls: 122 | nulls['zmax'] = 'bonferroni' 123 | if not 'smax' in nulls: 124 | nulls['smax'] = None 125 | if not 's' in nulls: 126 | nulls['s'] = None 127 | 128 | # Make list of clusters, each cluster being a dictionary 129 | clusters = [] 130 | for k in range(n_labels): 131 | cluster_size = np.sum(labels == k + 1) 132 | if cluster_size >= cluster_th: 133 | 134 | # get the position of the maxima that belong to that cluster 135 | in_cluster = maxima_labels == k + 1 136 | 137 | # sort the maxima by decreasing statistical value 138 | max_vals = maxima_values[in_cluster] 139 | sorted_ = max_vals.argsort()[::-1] 140 | 141 | # Report significance levels in each cluster 142 | z_score = max_vals[sorted_] 143 | p_values = norm.sf(z_score) 144 | 145 | # Voxel-level corrected p-values 146 | fwer_p_value = None 147 | if nulls['zmax'] == 'bonferroni': 148 | fwer_p_value = np.minimum(1, p_values * n_voxels) 149 | elif isinstance(nulls['zmax'], np.ndarray): 150 | fwer_p_value = empirical_p_value( 151 | clusters['z_score'], nulls['zmax']) 152 | 153 | # Cluster-level p-values (corrected) 154 | cluster_fwer_p_value = None 155 | if isinstance(nulls['smax'], np.ndarray): 156 | cluster_fwer_p_value = empirical_p_value( 157 | cluster_size, nulls['smax']) 158 | 159 | # Cluster-level p-values (uncorrected) 160 | cluster_p_value = None 161 | if isinstance(nulls['s'], np.ndarray): 162 | cluster_p_value = empirical_p_value( 163 | cluster_size, nulls['s']) 164 | 165 | # write all this into the cluster structure 166 | clusters.append({ 167 | 'size': cluster_size, 168 | 'maxima': maxima_coords[in_cluster][sorted_], 169 | 'z_score': z_score, 170 | 'fdr_p_value': max_fdr_p_values[in_cluster][sorted_], 171 | 'p_value': p_values, 172 | 'fwer_p_value': fwer_p_value, 173 | 'cluster_fwer_p_value': cluster_fwer_p_value, 174 | 'cluster_p_value': cluster_p_value 175 | }) 176 | 177 | # Sort clusters by descending size order 178 | order = np.argsort(- np.array([cluster['size'] for cluster in clusters])) 179 | clusters = [clusters[i] for i in order] 180 | 181 | return clusters, info 182 | -------------------------------------------------------------------------------- /pypreprocess/external/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurospin/pypreprocess/17d2e198119f3638903a1894fbd8f698043062ee/pypreprocess/external/__init__.py -------------------------------------------------------------------------------- /pypreprocess/external/nistats/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurospin/pypreprocess/17d2e198119f3638903a1894fbd8f698043062ee/pypreprocess/external/nistats/__init__.py -------------------------------------------------------------------------------- /pypreprocess/external/nistats/experimental_paradigm.py: -------------------------------------------------------------------------------- 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 | # vi: set ft=python sts=4 ts=4 sw=4 et: 3 | from __future__ import with_statement 4 | """ 5 | An experimental protocol is handled as a pandas DataFrame 6 | that includes an 'onset' field. 7 | This yields the onset time of the events in the paradigm. It can also contain: 8 | * a 'name' field that yields the condition identifier. 9 | * a 'duration' field that yields event duration (for so-called block 10 | paradigms). 11 | * a 'modulation' field that associated a scalar value to each event. 12 | 13 | Author: Bertrand Thirion, 2015 14 | """ 15 | 16 | import numpy as np 17 | 18 | 19 | def check_paradigm(paradigm): 20 | """Test that the DataFrame is describes a valid experimental paradigm 21 | 22 | A DataFrame is considered as valid whenever it has an 'onset' key. 23 | 24 | Parameters 25 | ---------- 26 | paradigm : pandas DataFrame 27 | Describes a functional paradigm. 28 | 29 | Returns 30 | ------- 31 | name : array of shape (n_events,), dtype='s' 32 | Per-event experimental conditions identifier. 33 | Defaults to np.repeat('dummy', len(onsets)). 34 | 35 | onset : array of shape (n_events,), dtype='f' 36 | Per-event onset time (in seconds) 37 | 38 | duration : array of shape (n_events,), dtype='f' 39 | Per-event durantion, (in seconds) 40 | defaults to zeros(n_events) when no duration is provided 41 | 42 | modulation : array of shape (n_events,), dtype='f' 43 | Per-event modulation, (in seconds) 44 | defaults to ones(n_events) when no duration is provided 45 | """ 46 | if 'onset' not in paradigm.keys(): 47 | raise ValueError('The provided paradigm has no onset key') 48 | 49 | onset = np.array(paradigm['onset']) 50 | n_events = len(onset) 51 | name = np.repeat('dummy', n_events) 52 | duration = np.zeros(n_events) 53 | modulation = np.ones(n_events) 54 | if 'name' in paradigm.keys(): 55 | name = np.array(paradigm['name']) 56 | if 'duration' in paradigm.keys(): 57 | duration = np.array(paradigm['duration']).astype(np.float) 58 | if 'modulation' in paradigm.keys(): 59 | modulation = np.array(paradigm['modulation']).astype(np.float) 60 | return name, onset, duration, modulation 61 | 62 | 63 | def paradigm_from_csv(csv_file): 64 | """Utility function to directly read the paradigm from a csv file 65 | 66 | This is simply meant to avoid explicitly import pandas everywhere. 67 | 68 | Parameters 69 | ---------- 70 | csv_file : string, 71 | Path to a csv file. 72 | 73 | Returns 74 | ------- 75 | paradigm : pandas DataFrame, 76 | Holding the paradigm information. 77 | """ 78 | import pandas 79 | return pandas.read_csv(csv_file) 80 | -------------------------------------------------------------------------------- /pypreprocess/external/nistats/utils.py: -------------------------------------------------------------------------------- 1 | """ Misc utilities for the library 2 | 3 | Authors: Bertrand Thirion, Matthew Brett, 2015 4 | """ 5 | import sys 6 | import scipy.linalg as spl 7 | import numpy as np 8 | from scipy.stats import norm 9 | from warnings import warn 10 | 11 | py3 = sys.version_info[0] >= 3 12 | 13 | 14 | def z_score(pvalue): 15 | """ Return the z-score corresponding to a given p-value. 16 | """ 17 | pvalue = np.minimum(np.maximum(pvalue, 1.e-300), 1. - 1.e-16) 18 | return norm.isf(pvalue) 19 | 20 | 21 | def multiple_fast_inv(a): 22 | """Compute the inverse of a set of arrays. 23 | 24 | Parameters 25 | ---------- 26 | a: array_like of shape (n_samples, n_dim, n_dim) 27 | Set of square matrices to be inverted. A is changed in place. 28 | 29 | Returns 30 | ------- 31 | a: ndarray 32 | yielding the inverse of the inputs 33 | 34 | Raises 35 | ------ 36 | LinAlgError : 37 | If `a` is singular. 38 | ValueError : 39 | If `a` is not square, or not 2-dimensional. 40 | 41 | Notes 42 | ----- 43 | This function is borrowed from scipy.linalg.inv, 44 | but with some customizations for speed-up. 45 | """ 46 | if a.shape[1] != a.shape[2]: 47 | raise ValueError('a must have shape (n_samples, n_dim, n_dim)') 48 | from scipy.linalg import calc_lwork 49 | from scipy.linalg.lapack import get_lapack_funcs 50 | a1, n = a[0], a.shape[0] 51 | getrf, getri = get_lapack_funcs(('getrf', 'getri'), (a1,)) 52 | for i in range(n): 53 | if (getrf.module_name[:7] == 'clapack' 54 | and getri.module_name[:7] != 'clapack'): 55 | # ATLAS 3.2.1 has getrf but not getri. 56 | lu, piv, info = getrf(np.transpose(a[i]), rowmajor=0, 57 | overwrite_a=True) 58 | a[i] = np.transpose(lu) 59 | else: 60 | a[i], piv, info = getrf(a[i], overwrite_a=True) 61 | if info == 0: 62 | if getri.module_name[:7] == 'flapack': 63 | lwork = calc_lwork.getri(getri.prefix, a1.shape[0]) 64 | lwork = lwork[1] 65 | # XXX: the following line fixes curious SEGFAULT when 66 | # benchmarking 500x500 matrix inverse. This seems to 67 | # be a bug in LAPACK ?getri routine because if lwork is 68 | # minimal (when using lwork[0] instead of lwork[1]) then 69 | # all tests pass. Further investigation is required if 70 | # more such SEGFAULTs occur. 71 | lwork = int(1.01 * lwork) 72 | a[i], _ = getri(a[i], piv, lwork=lwork, overwrite_lu=1) 73 | else: # clapack 74 | a[i], _ = getri(a[i], piv, overwrite_lu=1) 75 | else: 76 | raise ValueError('Matrix LU decomposition failed') 77 | return a 78 | 79 | 80 | def multiple_mahalanobis(effect, covariance): 81 | """Returns the squared Mahalanobis distance for a given set of samples 82 | 83 | Parameters 84 | ---------- 85 | effect: array of shape (n_features, n_samples), 86 | Each column represents a vector to be evaluated 87 | 88 | covariance: array of shape (n_features, n_features, n_samples), 89 | Corresponding covariance models stacked along the last axis 90 | 91 | Returns 92 | ------- 93 | sqd: array of shape (n_samples,) 94 | the squared distances (one per sample) 95 | """ 96 | # check size 97 | if effect.ndim == 1: 98 | effect = effect[:, np.newaxis] 99 | if covariance.ndim == 2: 100 | covariance = covariance[:, :, np.newaxis] 101 | if effect.shape[0] != covariance.shape[0]: 102 | raise ValueError('Inconsistant shape for effect and covariance') 103 | if covariance.shape[0] != covariance.shape[1]: 104 | raise ValueError('Inconsistant shape for covariance') 105 | 106 | # transpose and make contuguous for the sake of speed 107 | Xt, Kt = np.ascontiguousarray(effect.T), np.ascontiguousarray(covariance.T) 108 | 109 | # compute the inverse of the covariances 110 | Kt = multiple_fast_inv(Kt) 111 | 112 | # derive the squared Mahalanobis distances 113 | sqd = np.sum(np.sum(Xt[:, :, np.newaxis] * Xt[:, np.newaxis] * Kt, 1), 1) 114 | return sqd 115 | 116 | 117 | def full_rank(X, cmax=1e15): 118 | """ Computes the condition number of X and if it is larger than cmax, 119 | returns a matrix with a condition number smaller than cmax. 120 | 121 | Parameters 122 | ---------- 123 | X : array of shape (nrows, ncols) 124 | input array 125 | 126 | cmax : float, optional (default:1.e15), 127 | tolerance for condition number 128 | 129 | Returns 130 | ------- 131 | X : array of shape (nrows, ncols) 132 | output array 133 | 134 | cond : float, 135 | actual condition number 136 | """ 137 | U, s, V = spl.svd(X, full_matrices=False) 138 | smax, smin = s.max(), s.min() 139 | cond = smax / smin 140 | if cond < cmax: 141 | return X, cond 142 | 143 | warn('Matrix is singular at working precision, regularizing...') 144 | lda = (smax - cmax * smin) / (cmax - 1) 145 | X = np.dot(U, np.dot(np.diag(s + lda), V)) 146 | return X, cmax 147 | 148 | 149 | def pos_recipr(X): 150 | """ Return element-wise reciprocal of array, setting `X`>=0 to 0 151 | 152 | Return the reciprocal of an array, setting all entries less than or 153 | equal to 0 to 0. Therefore, it presumes that X should be positive in 154 | general. 155 | 156 | Parameters 157 | ---------- 158 | X : array-like 159 | 160 | Returns 161 | ------- 162 | rX : array 163 | array of same shape as `X`, dtype np.float, with values set to 164 | 1/X where X > 0, 0 otherwise 165 | """ 166 | X = np.asarray(X) 167 | return np.where(X <= 0, 0, 1. / X) 168 | 169 | # _basestring = str if py3 else basestring 170 | -------------------------------------------------------------------------------- /pypreprocess/external/tempita/__init__.py: -------------------------------------------------------------------------------- 1 | # The original Tempita implements all of its templating code here. 2 | # Moved it to _tempita.py to make the compilation portable. 3 | 4 | from ._tempita import * 5 | -------------------------------------------------------------------------------- /pypreprocess/external/tempita/_looper.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper for looping over sequences, particular in templates. 3 | 4 | Often in a loop in a template it's handy to know what's next up, 5 | previously up, if this is the first or last item in the sequence, etc. 6 | These can be awkward to manage in a normal Python loop, but using the 7 | looper you can get a better sense of the context. Use like:: 8 | 9 | >>> for loop, item in looper(['a', 'b', 'c']): # doctest: +SKIP 10 | ... print loop.number, item 11 | ... if not loop.last: 12 | ... print '---' 13 | 1 a 14 | --- 15 | 2 b 16 | --- 17 | 3 c 18 | 19 | """ 20 | 21 | import sys 22 | from .compat3 import basestring_ 23 | 24 | __all__ = ['looper'] 25 | 26 | 27 | class looper(object): 28 | """ 29 | Helper for looping (particularly in templates) 30 | 31 | Use this like:: 32 | 33 | for loop, item in looper(seq): 34 | if loop.first: 35 | ... 36 | """ 37 | 38 | def __init__(self, seq): 39 | self.seq = seq 40 | 41 | def __iter__(self): 42 | return looper_iter(self.seq) 43 | 44 | def __repr__(self): 45 | return '<%s for %r>' % ( 46 | self.__class__.__name__, self.seq) 47 | 48 | 49 | class looper_iter(object): 50 | 51 | def __init__(self, seq): 52 | self.seq = list(seq) 53 | self.pos = 0 54 | 55 | def __iter__(self): 56 | return self 57 | 58 | def __next__(self): 59 | if self.pos >= len(self.seq): 60 | raise StopIteration 61 | result = loop_pos(self.seq, self.pos), self.seq[self.pos] 62 | self.pos += 1 63 | return result 64 | 65 | if sys.version < "3": 66 | next = __next__ 67 | 68 | 69 | class loop_pos(object): 70 | 71 | def __init__(self, seq, pos): 72 | self.seq = seq 73 | self.pos = pos 74 | 75 | def __repr__(self): 76 | return '' % ( 77 | self.seq[self.pos], self.pos) 78 | 79 | def index(self): 80 | return self.pos 81 | index = property(index) 82 | 83 | def number(self): 84 | return self.pos + 1 85 | number = property(number) 86 | 87 | def item(self): 88 | return self.seq[self.pos] 89 | item = property(item) 90 | 91 | def __next__(self): 92 | try: 93 | return self.seq[self.pos + 1] 94 | except IndexError: 95 | return None 96 | __next__ = property(__next__) 97 | 98 | if sys.version < "3": 99 | next = __next__ 100 | 101 | def previous(self): 102 | if self.pos == 0: 103 | return None 104 | return self.seq[self.pos - 1] 105 | previous = property(previous) 106 | 107 | def odd(self): 108 | return not self.pos % 2 109 | odd = property(odd) 110 | 111 | def even(self): 112 | return self.pos % 2 113 | even = property(even) 114 | 115 | def first(self): 116 | return self.pos == 0 117 | first = property(first) 118 | 119 | def last(self): 120 | return self.pos == len(self.seq) - 1 121 | last = property(last) 122 | 123 | def length(self): 124 | return len(self.seq) 125 | length = property(length) 126 | 127 | def first_group(self, getter=None): 128 | """ 129 | Returns true if this item is the start of a new group, 130 | where groups mean that some attribute has changed. The getter 131 | can be None (the item itself changes), an attribute name like 132 | ``'.attr'``, a function, or a dict key or list index. 133 | """ 134 | if self.first: 135 | return True 136 | return self._compare_group(self.item, self.previous, getter) 137 | 138 | def last_group(self, getter=None): 139 | """ 140 | Returns true if this item is the end of a new group, 141 | where groups mean that some attribute has changed. The getter 142 | can be None (the item itself changes), an attribute name like 143 | ``'.attr'``, a function, or a dict key or list index. 144 | """ 145 | if self.last: 146 | return True 147 | return self._compare_group(self.item, self.__next__, getter) 148 | 149 | def _compare_group(self, item, other, getter): 150 | if getter is None: 151 | return item != other 152 | elif (isinstance(getter, basestring_) 153 | and getter.startswith('.')): 154 | getter = getter[1:] 155 | if getter.endswith('()'): 156 | getter = getter[:-2] 157 | return getattr(item, getter)() != getattr(other, getter)() 158 | else: 159 | return getattr(item, getter) != getattr(other, getter) 160 | elif hasattr(getter, '__call__'): 161 | return getter(item) != getter(other) 162 | else: 163 | return item[getter] != other[getter] 164 | -------------------------------------------------------------------------------- /pypreprocess/external/tempita/compat3.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | __all__ = ['b', 'basestring_', 'bytes', 'unicode_', 'next', 'is_unicode'] 4 | 5 | if sys.version < "3": 6 | b = bytes = str 7 | basestring_ = basestring 8 | unicode_ = unicode 9 | else: 10 | 11 | def b(s): 12 | if isinstance(s, str): 13 | return s.encode('latin1') 14 | return bytes(s) 15 | basestring_ = (bytes, str) 16 | bytes = bytes 17 | unicode_ = str 18 | text = str 19 | 20 | if sys.version < "3": 21 | 22 | def next(obj): 23 | return obj.next() 24 | else: 25 | next = next 26 | 27 | if sys.version < "3": 28 | 29 | def is_unicode(obj): 30 | return isinstance(obj, unicode) 31 | else: 32 | 33 | def is_unicode(obj): 34 | return isinstance(obj, str) 35 | 36 | 37 | def coerce_text(v): 38 | if not isinstance(v, basestring_): 39 | if sys.version < "3": 40 | attr = '__unicode__' 41 | else: 42 | attr = '__str__' 43 | if hasattr(v, attr): 44 | return unicode(v) 45 | else: 46 | return bytes(v) 47 | return v 48 | -------------------------------------------------------------------------------- /pypreprocess/fsl_to_nistats.py: -------------------------------------------------------------------------------- 1 | """ 2 | :Module: fsl_to_nistats 3 | :Synopsis: Utility script for converting FSL configuration (design, etc.) files 4 | into Dataframe format. 5 | :Author: DOHMATOB Elvis Dopgima 6 | 7 | """ 8 | 9 | import os 10 | import re 11 | import numpy as np 12 | from pypreprocess.external.nistats.design_matrix import make_design_matrix 13 | import pandas as pd 14 | 15 | # regex for contrasts 16 | CON_REAL_REGX = ("set fmri\(con_real(?P\d+?)\.(?P\d+?)\)" 17 | " (?P\S+)") 18 | 19 | # regex for "Number of EVs" 20 | NUM_EV_REGX = """set fmri\(evs_orig\) (?P\d+) 21 | set fmri\(evs_real\) (?P\d+) 22 | set fmri\(evs_vox\) (?P\d+)""" 23 | 24 | # regex for "Number of contrasts" 25 | NUM_CON_REGX = """set fmri\(ncon_orig\) (?P\d+) 26 | set fmri\(ncon_real\) (?P\d+)""" 27 | 28 | # regex for "# EV %i title" 29 | EV_TITLE_REGX = """set fmri\(evtitle\d+?\) \"(?P.+)\"""" 30 | 31 | # regex for "Title for contrast_real %i" 32 | CON_TITLE_REGX = """set fmri\(conname_real\.\d+?\) \"(?P.+)\"""" 33 | 34 | # regex for "Basic waveform shape (EV %i)" 35 | # 0 : Square 36 | # 1 : Sinusoid 37 | # 2 : Custom (1 entry per volume) 38 | # 3 : Custom (3 column format) 39 | # 4 : Interaction 40 | # 10 : Empty (all zeros) 41 | EV_SHAPE_REGX = """set fmri\(shape\d+\) (?P[0|1|3])""" 42 | 43 | # regex for "Custom EV file (EV %i)" 44 | EV_CUSTOM_FILE_REGX = """set fmri\(custom\d+?\) \"(?P.+)\"""" 45 | 46 | 47 | def _get_abspath_relative_to_file(filename, ref_filename): 48 | """ 49 | Returns the absolute path of a given filename relative to a reference 50 | filename (ref_filename). 51 | 52 | """ 53 | 54 | # we only handle files 55 | assert os.path.isfile(ref_filename) 56 | 57 | old_cwd = os.getcwd() # save CWD 58 | os.chdir(os.path.dirname(ref_filename)) # we're in context now 59 | abspath = os.path.abspath(filename) # bing0! 60 | os.chdir(old_cwd) # restore CWD 61 | 62 | return abspath 63 | 64 | 65 | def _insert_directory_in_file_name(filename, directory, level): 66 | if not isinstance(filename, str): 67 | return [_insert_directory_in_file_name(x, directory, level) 68 | for x in filename] 69 | 70 | filename = os.path.abspath(filename) 71 | parts = filename.split("/")[1:] 72 | assert level < len(parts) 73 | 74 | head = parts[:-1 - level] 75 | tail = parts[len(parts) - level - 1:-1] 76 | return os.path.join("/", *tuple(head + [directory] + tail + [ 77 | os.path.basename(filename)])) 78 | 79 | 80 | def read_fsl_design_file(design_filename): 81 | """ 82 | Scrapes an FSL design file for the list of contrasts. 83 | 84 | Returns 85 | ------- 86 | conditions: list of n_conditions strings 87 | condition (EV) titles 88 | 89 | timing_files: list of n_condtions strings 90 | absolute paths of files containing timing info for each condition_id 91 | 92 | contrast_ids: list of n_contrasts strings 93 | contrast titles 94 | 95 | contrasts: 2D array of shape (n_contrasts, n_conditions) 96 | array of contrasts, one line per contrast_id; one column per 97 | condition_id 98 | 99 | Raises 100 | ------ 101 | AssertionError or IndexError if design_filename is corrupt (not in 102 | official FSL format) 103 | 104 | """ 105 | 106 | # read design file 107 | design_conf = open(design_filename, 'r').read() 108 | 109 | # scrape n_conditions and n_contrasts 110 | n_conditions_orig = int(re.search(NUM_EV_REGX, 111 | design_conf).group("evs_orig")) 112 | n_conditions = int(re.search(NUM_EV_REGX, design_conf).group("evs_real")) 113 | n_contrasts = int(re.search(NUM_CON_REGX, design_conf).group("ncon_real")) 114 | 115 | # initialize 2D array of contrasts 116 | contrasts = np.zeros((n_contrasts, n_conditions)) 117 | 118 | # lookup EV titles 119 | conditions = [item.group("evtitle") for item in re.finditer( 120 | EV_TITLE_REGX, design_conf)] 121 | assert len(conditions) == n_conditions_orig 122 | 123 | # lookup contrast titles 124 | contrast_ids = [item.group("conname_real")for item in re.finditer( 125 | CON_TITLE_REGX, design_conf)] 126 | assert len(contrast_ids) == n_contrasts 127 | 128 | # # lookup EV (condition) shapes 129 | # condition_shapes = [int(item.group("shape")) for item in re.finditer( 130 | # EV_SHAPE_REGX, design_conf)] 131 | # print(condition_shapes) 132 | 133 | # lookup EV (condition) custom files 134 | timing_files = [_get_abspath_relative_to_file(item.group("custom"), 135 | design_filename) 136 | for item in re.finditer(EV_CUSTOM_FILE_REGX, design_conf)] 137 | 138 | # lookup the contrast values 139 | count = 0 140 | for item in re.finditer(CON_REAL_REGX, design_conf): 141 | count += 1 142 | value = float(item.group('con_val')) 143 | 144 | i = int(item.group('con_num')) - 1 145 | j = int(item.group('ev_num')) - 1 146 | 147 | # roll-call 148 | assert 0 <= i < n_contrasts, item.group() 149 | assert 0 <= j < n_conditions, item.group() 150 | 151 | contrasts[i, j] = value 152 | 153 | # roll-call 154 | assert count == n_contrasts * n_conditions, count 155 | 156 | return conditions, timing_files, contrast_ids, contrasts 157 | 158 | 159 | def make_paradigm_from_timing_files(timing_files, condition_ids=None): 160 | if not condition_ids is None: 161 | assert len(condition_ids) == len(timing_files) 162 | 163 | onsets = [] 164 | durations = [] 165 | amplitudes = [] 166 | _condition_ids = [] 167 | count = 0 168 | for timing_file in timing_files: 169 | timing = np.loadtxt(timing_file) 170 | if timing.ndim == 1: 171 | timing = timing[np.newaxis, :] 172 | 173 | if condition_ids is None: 174 | condition_id = os.path.basename(timing_file).lower( 175 | ).split('.')[0] 176 | else: 177 | condition_id = condition_ids[count] 178 | _condition_ids = _condition_ids + [condition_id 179 | ] * timing.shape[0] 180 | 181 | count += 1 182 | 183 | if timing.shape[1] == 3: 184 | onsets = onsets + list(timing[..., 0]) 185 | durations = durations + list(timing[..., 1]) 186 | amplitudes = amplitudes + list(timing[..., 2]) 187 | elif timing.shape[1] == 2: 188 | onsets = onsets + list(timing[..., 0]) 189 | durations = durations + list(timing[..., 1]) 190 | amplitudes = durations + list(np.ones(len(timing))) 191 | elif timing.shape[1] == 1: 192 | onsets = onsets + list(timing[..., 0]) 193 | durations = durations + list(np.zeros(len(timing))) 194 | amplitudes = durations + list(np.ones(len(timing))) 195 | else: 196 | raise TypeError( 197 | "Timing info must either be 1D array of onsets of 2D " 198 | "array with 2 or 3 columns: the first column is for " 199 | "the onsets, the second for the durations, and the " 200 | "third --if present-- if for the amplitudes; got %s" % timing) 201 | 202 | return pd.DataFrame({'name': condition_ids, 203 | 'onset': onsets, 204 | 'duration': durations, 205 | 'modulation': amplitudes}) 206 | 207 | 208 | def make_dmtx_from_timing_files(timing_files, condition_ids=None, 209 | frametimes=None, n_scans=None, tr=None, 210 | add_regs_file=None, 211 | add_reg_names=None, 212 | **make_dmtx_kwargs): 213 | # make paradigm 214 | paradigm = make_paradigm_from_timing_files(timing_files, 215 | condition_ids=condition_ids) 216 | 217 | # make frametimes 218 | if frametimes is None: 219 | assert not n_scans is None, ("frametimes not specified, especting a " 220 | "value for n_scans") 221 | assert not tr is None, ("frametimes not specified, especting a " 222 | "value for tr") 223 | frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) 224 | else: 225 | assert n_scans is None, ("frametimes specified, not especting a " 226 | "value for n_scans") 227 | assert tr is None, ("frametimes specified, not especting a " 228 | "value for tr") 229 | 230 | # load addition regressors from file 231 | if not add_regs_file is None: 232 | if isinstance(add_regs_file, np.ndarray): 233 | add_regs = add_regs_file 234 | else: 235 | assert os.path.isfile(add_regs_file), ( 236 | "add_regs_file %s doesn't exist") 237 | add_regs = np.loadtxt(add_regs_file) 238 | assert add_regs.ndim == 2, ( 239 | "Bad add_regs_file: %s (must contain a 2D array, each column " 240 | "representing the values of a single regressor)" % add_regs_file) 241 | if add_reg_names is None: 242 | add_reg_names = ["R%i" % (col + 1) for col in range( 243 | add_regs.shape[-1])] 244 | else: 245 | assert len(add_reg_names) == add_regs.shape[1], ( 246 | "Expecting %i regressor names, got %i" % ( 247 | add_regs.shape[1], len(add_reg_names))) 248 | 249 | make_dmtx_kwargs["add_reg_names"] = add_reg_names 250 | make_dmtx_kwargs["add_regs"] = add_regs 251 | 252 | # make design matrix 253 | design_matrix = make_design_matrix(frame_times=frametimes, 254 | paradigm=paradigm, 255 | **make_dmtx_kwargs) 256 | 257 | # return output 258 | return design_matrix, paradigm, frametimes 259 | -------------------------------------------------------------------------------- /pypreprocess/nipype_preproc_fsl_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Bertrand Thirion, Alexandre Abraham, DOHMATOB Elvis Dopgima 3 | 4 | """ 5 | 6 | import os 7 | import subprocess 8 | import nipype.interfaces.fsl as fsl 9 | from nipype.caching import Memory as NipypeMemory 10 | from joblib import Memory as JoblibMemory 11 | 12 | fsl.FSLCommand.set_default_output_type('NIFTI_GZ') 13 | FSL_T1_TEMPLATE = "/usr/share/fsl/data/standard/MNI152_T1_1mm_brain.nii.gz" 14 | 15 | 16 | def _get_file_ext(filename): 17 | parts = filename.split('.') 18 | 19 | return parts[0], ".".join(parts[1:]) 20 | 21 | 22 | def _get_output_filename(input_filename, output_dir, output_prefix='', 23 | ext=None): 24 | if isinstance(input_filename, str): 25 | if not ext is None: 26 | ext = "." + ext if not ext.startswith('.') else ext 27 | input_filename = _get_file_ext(input_filename)[0] + ext 28 | 29 | return os.path.join(output_dir, 30 | output_prefix + os.path.basename(input_filename)) 31 | else: 32 | return [_get_output_filename(x, output_dir, 33 | output_prefix=output_prefix) 34 | for x in input_filename] 35 | 36 | 37 | def do_fsl_merge(in_files, output_dir, output_prefix='merged_', 38 | cmd_prefix="fsl5.0-" 39 | ): 40 | output_filename = _get_output_filename(in_files[0], output_dir, 41 | output_prefix=output_prefix, 42 | ext='.nii.gz') 43 | 44 | cmdline = "%sfslmerge -t %s %s" % (cmd_prefix, output_filename, 45 | " ".join(in_files)) 46 | print(cmdline) 47 | print(subprocess.check_output(cmdline)) 48 | 49 | return output_filename 50 | 51 | 52 | def do_subject_preproc(subject_id, 53 | output_dir, 54 | func, 55 | anat, 56 | do_bet=True, 57 | do_mc=True, 58 | do_coreg=True, 59 | do_normalize=True, 60 | cmd_prefix="fsl5.0-", 61 | **kwargs 62 | ): 63 | """ 64 | Preprocesses subject data using FSL. 65 | 66 | Parameters 67 | ---------- 68 | 69 | """ 70 | 71 | output = {'func': func, 72 | 'anat': anat 73 | } 74 | 75 | # output dir 76 | subject_output_dir = os.path.join(output_dir, subject_id) 77 | if not os.path.exists(subject_output_dir): 78 | os.makedirs(subject_output_dir) 79 | 80 | # prepare for smart-caching 81 | cache_dir = os.path.join(output_dir, "cache_dir") 82 | if not os.path.exists(cache_dir): 83 | os.makedirs(cache_dir) 84 | 85 | nipype_mem = NipypeMemory(base_dir=cache_dir) 86 | joblib_mem = JoblibMemory(cache_dir, verbose=100) 87 | 88 | # sanitize input files 89 | if not isinstance(output['func'], str): 90 | output['func'] = joblib_mem.cache(do_fsl_merge)( 91 | func, subject_output_dir, output_prefix='Merged', 92 | cmd_prefix=cmd_prefix) 93 | 94 | ###################### 95 | # Brain Extraction 96 | ###################### 97 | if do_bet: 98 | if not fsl.BET._cmd.startswith("fsl"): 99 | fsl.BET._cmd = cmd_prefix + fsl.BET._cmd 100 | 101 | bet = nipype_mem.cache(fsl.BET) 102 | bet_results = bet(in_file=output['anat'], 103 | ) 104 | 105 | output['anat'] = bet_results.outputs.out_file 106 | 107 | ####################### 108 | # Motion correction 109 | ####################### 110 | if do_mc: 111 | if not fsl.MCFLIRT._cmd.startswith("fsl"): 112 | fsl.MCFLIRT._cmd = cmd_prefix + fsl.MCFLIRT._cmd 113 | 114 | mcflirt = nipype_mem.cache(fsl.MCFLIRT) 115 | mcflirt_results = mcflirt(in_file=output['func'], 116 | cost='mutualinfo', 117 | save_mats=True, # save mc matrices 118 | save_plots=True # save mc params 119 | ) 120 | 121 | output['motion_parameters'] = mcflirt_results.outputs.par_file 122 | output['motion_matrices'] = mcflirt_results.outputs.mat_file 123 | output['func'] = mcflirt_results.outputs.out_file 124 | 125 | ################### 126 | # Coregistration 127 | ################### 128 | if do_coreg: 129 | if not fsl.FLIRT._cmd.startswith("fsl"): 130 | fsl.FLIRT._cmd = cmd_prefix + fsl.FLIRT._cmd 131 | 132 | flirt1 = nipype_mem.cache(fsl.FLIRT) 133 | flirt1_results = flirt1(in_file=output['func'], 134 | reference=output['anat'] 135 | ) 136 | 137 | if not do_normalize: 138 | output['func'] = flirt1_results.outputs.out_file 139 | 140 | ########################## 141 | # Spatial normalization 142 | ########################## 143 | if do_normalize: 144 | if not fsl.FLIRT._cmd.startswith("fsl"): 145 | fsl.FLIRT._cmd = cmd_prefix + fsl.FLIRT._cmd 146 | 147 | # T1 normalization 148 | flirt2 = nipype_mem.cache(fsl.FLIRT) 149 | flirt2_results = flirt2(in_file=output['anat'], 150 | reference=FSL_T1_TEMPLATE) 151 | 152 | output['anat'] = flirt2_results.outputs.out_file 153 | 154 | # concatenate 'func -> anat' and 'anat -> standard space' 155 | # transformation matrices to obtaun 'func -> standard space' 156 | # transformation matrix 157 | if do_coreg: 158 | if not fsl.ConvertXFM._cmd.startswith("fsl"): 159 | fsl.ConvertXFM._cmd = cmd_prefix + fsl.ConvertXFM._cmd 160 | 161 | convertxfm = nipype_mem.cache(fsl.ConvertXFM) 162 | convertxfm_results = convertxfm( 163 | in_file=flirt1_results.outputs.out_matrix_file, 164 | in_file2=flirt2_results.outputs.out_matrix_file, 165 | concat_xfm=True 166 | ) 167 | 168 | # warp func data into standard space by applying 169 | # 'func -> standard space' transformation matrix 170 | if not fsl.ApplyXfm._cmd.startswith("fsl"): 171 | fsl.ApplyXfm._cmd = cmd_prefix + fsl.ApplyXfm._cmd 172 | 173 | applyxfm = nipype_mem.cache(fsl.ApplyXfm) 174 | applyxfm_results = applyxfm( 175 | in_file=output['func'], 176 | in_matrix_file=convertxfm_results.outputs.out_file, 177 | reference=FSL_T1_TEMPLATE 178 | ) 179 | 180 | output['func'] = applyxfm_results.outputs.out_file 181 | 182 | return output 183 | -------------------------------------------------------------------------------- /pypreprocess/openfmri.py: -------------------------------------------------------------------------------- 1 | # standard imports 2 | import os 3 | import glob 4 | import warnings 5 | 6 | # import spm preproc utilities 7 | from .nipype_preproc_spm_utils import (do_subjects_preproc, SubjectData) 8 | from .datasets import fetch_openfmri 9 | 10 | DATASET_DESCRIPTION = """\ 11 |

openfmri.org datasets.

12 | """ 13 | 14 | 15 | def preproc_dataset(data_dir, output_dir, 16 | ignore_subjects=None, restrict_subjects=None, 17 | delete_orient=False, dartel=False, 18 | n_jobs=-1): 19 | """Main function for preprocessing a dataset with the OpenfMRI layout. 20 | 21 | Parameters 22 | ---------- 23 | data_dir: str 24 | Path of input directory. If does not exist and finishes 25 | by a valid OpenfMRI dataset id, it will be downloaded, 26 | i.e., /path/to/dir/{dataset_id}. 27 | output_dir: str 28 | Path of output directory. 29 | ignore_subjects: list or None 30 | List of subject identifiers not to process. 31 | restrict_subjects: list or None 32 | List of subject identifiers to process. 33 | delete_orient: bool 34 | Delete orientation information in nifti files. 35 | dartel: bool 36 | Use dartel. 37 | n_jobs: int 38 | Number of parallel jobs. 39 | 40 | Examples 41 | -------- 42 | preproc_dataset('/tmp/ds105', '/tmp/ds105_preproc ', 43 | ignore_subjects=['sub002', 'sub003'], 44 | delete_orient=True, 45 | n_jobs=3) 46 | 47 | Warning 48 | ------- 49 | Subjects may be excluded if some data is missing. 50 | 51 | Returns list of Bunch objects with fields anat, func, and subject_id 52 | for each preprocessed subject 53 | """ 54 | parent_dir, dataset_id = os.path.split(data_dir) 55 | 56 | if not os.path.exists(data_dir): 57 | fetch_openfmri(parent_dir, dataset_id) 58 | 59 | ignore_subjects = [] if ignore_subjects is None else ignore_subjects 60 | 61 | # glob for subjects and their imaging sessions identifiers 62 | if restrict_subjects is None: 63 | subjects = [os.path.basename(x) 64 | for x in glob.glob(os.path.join(data_dir, 'sub???'))] 65 | else: 66 | subjects = restrict_subjects 67 | 68 | subjects = sorted(subjects) 69 | 70 | # producer subject data 71 | def subject_factory(): 72 | for subject_id in subjects: 73 | if subject_id in ignore_subjects: 74 | continue 75 | 76 | sessions = set() 77 | subject_dir = os.path.join(data_dir, subject_id) 78 | for session_dir in glob.glob(os.path.join( 79 | subject_dir, 'BOLD', '*')): 80 | sessions.add(os.path.split(session_dir)[1]) 81 | sessions = sorted(sessions) 82 | # construct subject data structure 83 | subject_data = SubjectData() 84 | subject_data.session_id = sessions 85 | subject_data.subject_id = subject_id 86 | subject_data.func = [] 87 | 88 | # glob for BOLD data 89 | has_bad_sessions = False 90 | for session_id in subject_data.session_id: 91 | bold_dir = os.path.join( 92 | data_dir, subject_id, 'BOLD', session_id) 93 | 94 | # glob BOLD data for this session 95 | func = glob.glob(os.path.join(bold_dir, "bold.nii.gz")) 96 | # check that this session is OK (has BOLD data, etc.) 97 | if not func: 98 | warnings.warn( 99 | 'Subject %s is missing data for session %s.' % ( 100 | subject_id, session_id)) 101 | has_bad_sessions = True 102 | break 103 | 104 | subject_data.func.append(func[0]) 105 | 106 | # exclude subject if necessary 107 | if has_bad_sessions: 108 | warnings.warn('Excluding subject %s' % subject_id) 109 | continue 110 | 111 | # anatomical data 112 | subject_data.anat = os.path.join( 113 | data_dir, subject_id, 'anatomy', 'highres001.nii.gz') 114 | # pypreprocess is setup to work with non-skull stripped brain and 115 | # is likely to crash otherwise. 116 | if not os.path.exists(subject_data.anat): 117 | subject_data.anat = os.path.join( 118 | data_dir, subject_id, 'anatomy', 'highres001_brain.nii.gz') 119 | 120 | # subject output_dir 121 | subject_data.output_dir = os.path.join(output_dir, subject_id) 122 | yield subject_data 123 | 124 | return do_subjects_preproc( 125 | subject_factory(), 126 | n_jobs=n_jobs, 127 | dataset_id=dataset_id, 128 | output_dir=output_dir, 129 | deleteorient=delete_orient, 130 | dartel=dartel, 131 | dataset_description=DATASET_DESCRIPTION, 132 | # caching=False, 133 | ) 134 | -------------------------------------------------------------------------------- /pypreprocess/reporting/README.md: -------------------------------------------------------------------------------- 1 | ========= 2 | REPORTING 3 | ========= 4 | 5 | pypreprocess' post-preprocessing and post-analysis (first level analysis, etc.) tools for QA and html reporting. 6 | -------------------------------------------------------------------------------- /pypreprocess/reporting/__init__.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('Agg') 3 | -------------------------------------------------------------------------------- /pypreprocess/reporting/check_preprocessing.py: -------------------------------------------------------------------------------- 1 | """ 2 | :Module: check_preprocessing 3 | :Synopsis: module for generating post-preproc plots (registration, 4 | segmentation, etc.) 5 | :Author: bertrand thirion, dohmatob elvis dopgima 6 | 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | import nibabel 12 | from nilearn.plotting import plot_img 13 | from nilearn.image import reorder_img, mean_img 14 | from ..io_utils import load_vols 15 | EPS = np.finfo(float).eps 16 | 17 | import io 18 | import base64 19 | import urllib.parse 20 | 21 | def _plot_to_svg(fig, dpi=300): 22 | """ 23 | Converts matplotlib figure instance to an SVG url 24 | that can be loaded in a browser. 25 | 26 | Parameters 27 | ---------- 28 | fig: `matplotlib.figure.Figure` instance 29 | consisting of the plot to be converted to SVG 30 | url and then enbedded in an HTML report. 31 | 32 | dpi: float, optional (default 300) 33 | Dots per inch. Resolution of the SVG plot generated 34 | """ 35 | with io.BytesIO() as io_buffer: 36 | fig.tight_layout(pad=0.4) 37 | fig.savefig( 38 | io_buffer, format="svg", facecolor="white", 39 | edgecolor="white", dpi=dpi) 40 | return urllib.parse.quote(io_buffer.getvalue().decode("utf-8")) 41 | 42 | 43 | def plot_spm_motion_parameters(parameter_file, lengths, 44 | title=None, output_filename=None, 45 | close=False, report_path=None): 46 | """ Plot motion parameters obtained with SPM software 47 | 48 | Parameters 49 | ---------- 50 | parameter_file: string 51 | path of file containing the motion parameters 52 | subject_id: string (optional) 53 | subject id 54 | titile: string (optional) 55 | title to attribute to plotted figure 56 | output_filename: string 57 | output filename for storing the plotted figure 58 | 59 | """ 60 | # load parameters 61 | motion = np.loadtxt(parameter_file) if isinstance( 62 | parameter_file, str) else parameter_file[..., :6] 63 | 64 | motion[:, 3:] *= (180. / np.pi) 65 | 66 | # do plotting 67 | plt.figure() 68 | plt.plot(motion) 69 | 70 | aux = 0. 71 | for l in lengths[:-1]: 72 | plt.axvline(aux + l, linestyle="--", c="k") 73 | aux += l 74 | 75 | if not title is None: 76 | plt.title(title) 77 | plt.legend(('TransX', 'TransY', 'TransZ', 'RotX', 'RotY', 'RotZ'), 78 | loc="upper left", ncol=2) 79 | plt.xlabel('time(scans)') 80 | plt.ylabel('Estimated motion (mm/degrees)') 81 | 82 | if report_path not in [False, None]: 83 | fig = plt.gcf() 84 | svg_plot = _plot_to_svg(fig) 85 | else: 86 | svg_plot = None 87 | 88 | if not output_filename is None: 89 | plt.savefig(output_filename, bbox_inches="tight", dpi=200) 90 | if close: 91 | plt.close() 92 | 93 | return svg_plot 94 | 95 | 96 | def compute_cv(data, mask_array=None): 97 | if mask_array is not None: 98 | cv = .0 * mask_array 99 | cv[mask_array > 0] = data[mask_array > 0].std(-1) /\ 100 | (data[mask_array > 0].mean(-1) + EPS) 101 | else: 102 | cv = data.std(-1) / (data.mean(-1) + EPS) 103 | 104 | return cv 105 | 106 | 107 | def plot_registration(reference_img, coregistered_img, 108 | title="untitled coregistration!", 109 | cut_coords=None, 110 | display_mode='ortho', 111 | cmap=None, close=False, 112 | output_filename=None, 113 | report_path=None): 114 | """Plots a coregistered source as bg/contrast for the reference image 115 | 116 | Parameters 117 | ---------- 118 | reference_img: string 119 | path to reference (background) image 120 | 121 | coregistered_img: string 122 | path to other image (to be compared with reference) 123 | 124 | display_mode: string (optional, defaults to 'ortho') 125 | display_mode param 126 | 127 | cmap: matplotlib colormap object (optional, defaults to spectral) 128 | colormap to user for plots 129 | 130 | output_filename: string (optional) 131 | path where plot will be stored 132 | 133 | """ 134 | # sanity 135 | if cmap is None: 136 | cmap = plt.cm.gray # registration QA always gray cmap! 137 | 138 | reference_img = mean_img(reference_img) 139 | coregistered_img = mean_img(coregistered_img) 140 | 141 | if cut_coords is None: 142 | cut_coords = (-10, -28, 17) 143 | 144 | if display_mode in ['x', 'y', 'z']: 145 | cut_coords = (cut_coords['xyz'.index(display_mode)],) 146 | 147 | # XXX nilearn complains about rotations in affine, etc. 148 | coregistered_img = reorder_img(coregistered_img, resample="continuous") 149 | 150 | _slicer = plot_img(coregistered_img, cmap=cmap, cut_coords=cut_coords, 151 | display_mode=display_mode, black_bg=True) 152 | 153 | # XXX nilearn complains about rotations in affine, etc. 154 | reference_img = reorder_img(reference_img, resample="continuous") 155 | 156 | _slicer.add_edges(reference_img) 157 | # misc 158 | _slicer.title(title, size=12, color='w', alpha=0) 159 | 160 | if report_path not in [False, None]: 161 | fig = plt.gcf() 162 | svg_plot = _plot_to_svg(fig) 163 | else: 164 | svg_plot = None 165 | 166 | if not output_filename is None: 167 | try: 168 | plt.savefig(output_filename, dpi=200, bbox_inches='tight', 169 | facecolor="k", edgecolor="k") 170 | if close: 171 | plt.close() 172 | except AttributeError: 173 | # XXX TODO: handle this case!! 174 | pass 175 | 176 | return svg_plot 177 | 178 | def plot_segmentation( 179 | img, gm_filename, wm_filename=None, csf_filename=None, 180 | output_filename=None, cut_coords=None, display_mode='ortho', 181 | cmap=None, title='GM + WM + CSF segmentation', close=False, 182 | report_path=None): 183 | """ 184 | Plot a contour mapping of the GM, WM, and CSF of a subject's anatomical. 185 | 186 | Parameters 187 | ---------- 188 | img_filename: string or image object 189 | path of file containing image data, or image object simply 190 | 191 | gm_filename: string 192 | path of file containing Grey Matter template 193 | 194 | wm_filename: string (optional) 195 | path of file containing White Matter template 196 | 197 | csf_filename: string (optional) 198 | path of file containing Cerebro-Spinal Fluid template 199 | 200 | """ 201 | # misc 202 | if cmap is None: 203 | cmap = plt.cm.gray 204 | if cut_coords is None: 205 | cut_coords = (-10, -28, 17) 206 | if display_mode in ['x', 'y', 'z']: 207 | cut_coords = (cut_coords['xyz'.index(display_mode)],) 208 | 209 | # plot img 210 | img = mean_img(img) 211 | img = reorder_img(img, resample="continuous") 212 | _slicer = plot_img(img, cut_coords=cut_coords, display_mode=display_mode, 213 | cmap=cmap, black_bg=True) 214 | 215 | # add TPM contours 216 | gm = nibabel.load(gm_filename) 217 | _slicer.add_contours(gm, levels=[.51], colors=["r"]) 218 | if not wm_filename is None: 219 | _slicer.add_contours(wm_filename, levels=[.51], colors=["g"]) 220 | if not csf_filename is None: 221 | _slicer.add_contours(csf_filename, levels=[.51], colors=['b']) 222 | 223 | # misc 224 | _slicer.title(title, size=12, color='w', alpha=0) 225 | 226 | if report_path not in [False, None]: 227 | fig = plt.gcf() 228 | svg_plot = _plot_to_svg(fig) 229 | else: 230 | svg_plot = None 231 | 232 | if not output_filename is None: 233 | plt.savefig(output_filename, bbox_inches='tight', dpi=200, 234 | facecolor="k", edgecolor="k") 235 | if close: 236 | plt.close() 237 | 238 | return svg_plot 239 | -------------------------------------------------------------------------------- /pypreprocess/reporting/template_reports/log_link_template.html: -------------------------------------------------------------------------------- 1 | (see log) -------------------------------------------------------------------------------- /pypreprocess/reporting/template_reports/log_sub_template.html: -------------------------------------------------------------------------------- 1 |

${heading}

2 | ${log} -------------------------------------------------------------------------------- /pypreprocess/reporting/template_reports/log_template.html: -------------------------------------------------------------------------------- 1 | ${log} -------------------------------------------------------------------------------- /pypreprocess/reporting/template_reports/report_sub_template.html: -------------------------------------------------------------------------------- 1 |

${heading}

2 |

${tooltip}

3 | 4 | -------------------------------------------------------------------------------- /pypreprocess/reporting/template_reports/report_template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ${subject_name} 5 | 6 | 7 | 8 |

Pypreprocess report for ${subject_name}

9 |

Start time: ${start_time}

10 |

${end_time}

11 |
12 |

Preprocessing steps:

13 | ${preproc_undergone} 14 |

References

15 |
    16 |
  • Russel A. Poldrack et al. Handbook of Functional MRI Data Analysis
  • 17 |
18 |
19 |

Visualisation for each step:

-------------------------------------------------------------------------------- /pypreprocess/reslice.py: -------------------------------------------------------------------------------- 1 | """ 2 | :Module: spm_reslice 3 | :Synopsis: Routine functions for reslicing volumes after affine registration 4 | (as in motion correction, coregistration, etc) 5 | :Author: DOHMATOB Elvis Dopgima 6 | 7 | """ 8 | 9 | import numpy as np 10 | import scipy.ndimage 11 | import scipy.linalg 12 | import nibabel 13 | from nilearn.image.image import check_niimg 14 | from .affine_transformations import get_physical_coords 15 | 16 | 17 | def _get_mask(M, coords, dim, wrp=[1, 1, 0], tiny=5e-2): 18 | """ 19 | Wrapper for get_physical_coords(...) with optional wrapping of dimensions. 20 | 21 | Parameters 22 | ---------- 23 | M: 2D array of shape (4, 4) 24 | affine transformation describing voxel-to-world mapping 25 | coords: array_like of shape (3, n_voxels) 26 | voxel(s) under consideration 27 | dim: list of 3 ints 28 | dimensions (nx, ny, nz) of the voxel space (for example [64, 56, 21]) 29 | wrp: list of 3 bools, optional (default [1, 1, 0]) 30 | each coordinate value indicates whether wrapping should be done in the 31 | corresponding dimension or not. Possible values are: 32 | [0, 0, 0]: no wrapping; use this value for PET data 33 | [1, 1, 0]: wrap all except z (slice-wise) dimension; use this value for 34 | fMRI data 35 | tiny: float, optional (default 5e-2) 36 | threshold for filtering voxels that have fallen out of the FOV 37 | 38 | Returns 39 | ------- 40 | Tuple (fov_mask, physical_coords), where: 41 | fov_mask: 1D array_like of len voxel.shape[1] 42 | mask for filtering voxels that are still in the FOV. 1 means 'OK', 43 | 0 means 'fell out of FOV' 44 | physical_coords: array of same shape as input coords 45 | transformed coords 46 | 47 | """ 48 | 49 | physical_coords = get_physical_coords(M, coords) 50 | fov_mask = np.ones(physical_coords.shape[-1]).astype('bool') 51 | 52 | for j in range(3): 53 | if not wrp[j]: 54 | fov_mask = fov_mask & (physical_coords[j] >= -tiny 55 | ) & (physical_coords[j] < dim[j] + tiny) 56 | 57 | return fov_mask, physical_coords 58 | 59 | 60 | def reslice_vols(vols, target_affine=None, interp_order=3, 61 | interp_mode='constant', mask=True, wrp=None, log=None): 62 | """ 63 | Uses B-spline interpolation to reslice (i.e resample) all other 64 | volumes to have thesame affine header matrix as the first (0th) volume. 65 | 66 | Parameters 67 | ---------- 68 | vols: list of `nibabel.Nifti1Image` objects 69 | vols[0] is the reference volume. All other volumes will be resliced 70 | so that the end up with the same header affine matrix as vol[0]. 71 | 72 | target_affine: 2D array of shape (4, 4), optional (default None) 73 | Target affine matrix to which the vols will be resliced. If not 74 | specified, vols will be resliced to match the first vol's affine. 75 | 76 | interp_order: int, optional (default 3) 77 | Degree of B-spline interpolation used for resampling the volumes. 78 | 79 | interp_mode: string, optional (default "wrap") 80 | Mode param to be passed to `scipy.ndimage.map_coordinates`. 81 | 82 | mask: boolean, optional (default True) 83 | If set, vols will be masked before reslicing. This masking will 84 | help eliminate artefactual motion across volumes due to on-off 85 | voxels. 86 | 87 | wrp: list_like of 3 booleans, optional (default None) 88 | Option passed to _get_mask function. For each axis, it specifies 89 | if or not wrapping is to be done along that axis. 90 | 91 | log: function(basestring), optional (default None) 92 | function for logging messages. 93 | 94 | Returns 95 | ------- 96 | vols: generator object on `nibabel.Nifti1Image` objects 97 | resliced volumes. 98 | 99 | Raises 100 | ------ 101 | RuntimeError in case dimensions are inconsistent across volumes. 102 | 103 | """ 104 | 105 | wrp = [1, 1, 0] if wrp is None else wrp 106 | vols = list(vols) 107 | 108 | def _log(msg): 109 | if log: 110 | log(msg) 111 | else: 112 | print(msg) 113 | 114 | # load first vol 115 | vol_0 = check_niimg(vols[0]) 116 | 117 | # sanitize target_affine 118 | reslice_first_vol = True 119 | if target_affine is None: 120 | reslice_first_vol = False 121 | target_affine = vol_0.affine 122 | 123 | # build working grid 124 | dim = vol_0.shape 125 | n_scans = len(vols) 126 | grid = np.mgrid[0:dim[0], 0:dim[1], 0:dim[2]].reshape((3, -1)) 127 | 128 | # compute global mask for all vols, to mask out voxels that show 129 | # artefactual movement across volumes 130 | msk = np.ones(grid.shape[1]).astype('bool') 131 | if mask: 132 | for t in range(len(vols)): 133 | # load vol 134 | vol = check_niimg(vols[t]) 135 | 136 | # saniiy check on dimensions 137 | if vol.shape != dim: 138 | raise RuntimeError( 139 | ("All source volumes must have the same dimensions as the " 140 | "reference. Volume %i has dim %s instead of %s.") % ( 141 | t, vol.shape, dim)) 142 | 143 | # affine matrix for passing from vol's space to the ref vol's 144 | M = scipy.linalg.inv(scipy.linalg.lstsq( 145 | target_affine, vol.affine)[0]) 146 | fov_msk, _ = _get_mask(M, grid, dim, wrp=wrp) 147 | msk = msk & fov_msk 148 | 149 | # loop on all vols, reslicing them one-by-one 150 | rvols = [] 151 | for t in range(n_scans): 152 | _log('\tReslicing volume %i/%i...' % (t + 1, len(vols))) 153 | vol = check_niimg(vols[t]) 154 | 155 | # reslice vol 156 | if t > 0 or reslice_first_vol: 157 | # affine matrix for passing from vol's space to the ref vol's 158 | M = scipy.linalg.inv(scipy.linalg.lstsq(target_affine, 159 | vol.affine)[0]) 160 | 161 | # transform vol's grid according to M 162 | _, new_grid = _get_mask(M, grid, dim, wrp=wrp) 163 | 164 | # resample vol on new grid 165 | rdata = scipy.ndimage.map_coordinates( 166 | vol.get_fdata(), new_grid, order=interp_order, mode=interp_mode) 167 | else: # don't reslice first vol 168 | rdata = vol.get_fdata().ravel() 169 | rdata[~msk] = 0 170 | 171 | # replace vols's affine with ref vol's (this has been the ultimate 172 | # goal all along) 173 | rvols.append(nibabel.Nifti1Image(rdata.reshape(dim), target_affine)) 174 | 175 | return rvols 176 | -------------------------------------------------------------------------------- /pypreprocess/spm_loader/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | __version__ = '0.1' 3 | -------------------------------------------------------------------------------- /pypreprocess/spm_loader/spm.py: -------------------------------------------------------------------------------- 1 | import gzip 2 | import os.path as pt 3 | 4 | import scipy.io as sio 5 | 6 | 7 | _call_options = [] 8 | 9 | 10 | def _load_mat(location): 11 | if location.endswith('.gz'): 12 | return sio.loadmat( 13 | gzip.open(location, 'rb'), 14 | squeeze_me=True, 15 | struct_as_record=False 16 | )['SPM'] 17 | 18 | return sio.loadmat( 19 | location, squeeze_me=True, struct_as_record=False)['SPM'] 20 | 21 | 22 | def _wdir(wd): 23 | def func(path): 24 | return pt.join(str(wd), pt.split(str(path))[1]) 25 | return func 26 | 27 | 28 | def _find_data_dir(wd, fpath): 29 | 30 | def right_splits(p): 31 | while p not in ['', None]: 32 | p = p.rsplit(pt.sep, 1)[0] 33 | yield p 34 | 35 | def left_splits(p): 36 | while len(p.split(pt.sep, 1)) > 1: 37 | p = p.split(pt.sep, 1)[1] 38 | yield p 39 | 40 | if not pt.isfile(fpath): 41 | for rs in right_splits(wd): 42 | for ls in left_splits(fpath): 43 | p = pt.join(rs, *ls.split(pt.sep)) 44 | if pt.isfile(p): 45 | return pt.dirname(p) 46 | else: 47 | return pt.dirname(fpath) 48 | 49 | 50 | def _prefix_filename(path, prefix): 51 | path, filename = pt.split(str(path)) 52 | return pt.join(path, '%s%s' % (prefix, filename)) 53 | 54 | 55 | def load_intra(spmdotmat_path, inputs=True, outputs=True, **options): 56 | """Function to load SPM.mat data structure, fixing file paths 57 | (of images, etc.) as necessary 58 | 59 | Parameters 60 | ---------- 61 | 62 | spmdotmat_path: string 63 | exisiting filename, path to SPM.mat matrix to be loaded 64 | 65 | Returns 66 | ------- 67 | A dict of params of the analysis specified in the SPM.mat file 68 | 69 | XXX Document the other args for this function!!! 70 | 71 | """ 72 | 73 | spmmat = _load_mat(spmdotmat_path) 74 | 75 | wd, _ = pt.split(pt.realpath(spmdotmat_path)) # work dir 76 | bd = _wdir(wd) # beta directory 77 | 78 | analysis = {} # this will contain our final stuff to return 79 | 80 | for opt in options: 81 | if opt not in _call_options: 82 | if isinstance(options[opt], int): 83 | analysis[opt] = wd.split(pt.sep)[options[opt]] 84 | else: 85 | analysis[opt] = options[opt] 86 | 87 | # parse loaded mat structure for fields we need 88 | analysis['design_matrix'] = spmmat.xX.X.tolist() # xX: design 89 | analysis['conditions'] = [str(i) for i in spmmat.xX.name] # xX: design 90 | analysis['n_scans'] = spmmat.nscan.tolist() \ 91 | if isinstance(spmmat.nscan.tolist(), list) else [spmmat.nscan.tolist()] 92 | analysis['n_sessions'] = spmmat.nscan.size 93 | analysis['TR'] = float(spmmat.xY.RT) # xY: data 94 | analysis['mask'] = bd(spmmat.VM.fname) # VM: mask 95 | 96 | if outputs: 97 | analysis['b_maps'] = [] 98 | analysis['c_maps'] = {} 99 | analysis['c_maps_smoothed'] = {} 100 | analysis['t_maps'] = {} 101 | analysis['contrasts'] = {} 102 | 103 | for c in spmmat.xCon: 104 | name = str(c.name) 105 | scon = _prefix_filename(c.Vcon.fname, 's') 106 | 107 | analysis['c_maps'][name] = bd(c.Vcon.fname) 108 | analysis['c_maps_smoothed'][name] = bd(scon) 109 | analysis['t_maps'][name] = bd(c.Vspm.fname) 110 | analysis['contrasts'][name] = c.c.tolist() 111 | 112 | for i, b in enumerate(spmmat.Vbeta): 113 | analysis['b_maps'].append(bd(b.fname)) 114 | 115 | if inputs: 116 | analysis['raw_data'] = [] 117 | analysis['data'] = [] 118 | for Y in spmmat.xY.P: 119 | Y = str(Y).strip() 120 | data_dir = _find_data_dir(wd, Y) 121 | if data_dir is not None: 122 | analysis['data'].append(pt.join(data_dir, pt.split(Y)[1])) 123 | analysis['raw_data'].append( 124 | pt.join(data_dir, pt.split(Y)[1].strip('swa'))) 125 | else: 126 | analysis['data'].append(pt.split(Y)[1]) 127 | analysis['raw_data'].append(pt.split(Y)[1].strip('swa')) 128 | 129 | return analysis 130 | -------------------------------------------------------------------------------- /pypreprocess/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurospin/pypreprocess/17d2e198119f3638903a1894fbd8f698043062ee/pypreprocess/tests/__init__.py -------------------------------------------------------------------------------- /pypreprocess/tests/_test_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import nibabel 3 | import numpy as np 4 | from pypreprocess.subject_data import SubjectData 5 | 6 | DATA_DIR = "test_tmp_data" 7 | if not os.path.exists(DATA_DIR): 8 | os.makedirs(DATA_DIR) 9 | 10 | 11 | def create_random_image(shape=None, 12 | ndim=3, 13 | n_scans=None, 14 | affine=np.eye(4), 15 | parent_class=nibabel.Nifti1Image): 16 | """ 17 | Creates a random image of prescribed shape 18 | 19 | """ 20 | 21 | rng = np.random.RandomState(0) 22 | 23 | if shape is None: 24 | shape = np.random.random_integers(20, size=ndim) 25 | 26 | ndim = len(shape) 27 | 28 | ndim = len(shape) 29 | if not n_scans is None and ndim == 4: 30 | shape[-1] = n_scans 31 | 32 | return parent_class(np.random.randn(*shape), affine) 33 | 34 | 35 | def make_dataset(n_subjects=1, n_scans=10, n_sessions=1, 36 | threeD_filenames=False, dataset_name="test_dataset", 37 | output_dir=DATA_DIR, ext="nii.gz"): 38 | 39 | output_dir = os.path.join(output_dir, dataset_name) 40 | if not os.path.exists(output_dir): 41 | os.makedirs(output_dir) 42 | 43 | dataset = [] 44 | for i in range(n_subjects): 45 | subject_data = {"subject_id": "sub%03i" % (i + 1), "func": [], 46 | 'anat': '%s/anat.nii.gz' % DATA_DIR} 47 | nibabel.save(create_random_image(ndim=3), 48 | subject_data['anat']) 49 | subject_data_dir = os.path.join(output_dir, subject_data["subject_id"]) 50 | if not os.path.exists(subject_data_dir): 51 | os.makedirs(subject_data_dir) 52 | 53 | for j in range(n_sessions): 54 | session_dir = os.path.join(subject_data_dir, 55 | "session%03i" % (j + 1)) 56 | if not os.path.exists(session_dir): 57 | os.makedirs(session_dir) 58 | sfunc = [] 59 | if threeD_filenames: 60 | for k in range(n_scans): 61 | func_filename = os.path.join(session_dir, 62 | "func%03i.%s" % (j + 1, ext)) 63 | nibabel.save(create_random_image(ndim=3), 64 | func_filename) 65 | sfunc.append(func_filename) 66 | subject_data['func'].append(sfunc) 67 | else: 68 | func_filename = os.path.join(session_dir, "func.%s" % ext) 69 | nibabel.save(create_random_image(ndim=4, n_scans=n_scans), 70 | func_filename) 71 | subject_data['func'].append(func_filename) 72 | 73 | dataset.append(subject_data) 74 | 75 | return dataset 76 | 77 | 78 | def _save_img(img, filename): 79 | dirname = os.path.dirname(filename) 80 | if not os.path.exists(dirname): 81 | os.makedirs(dirname) 82 | 83 | nibabel.save(img, filename) 84 | 85 | 86 | def _make_sd(func_filenames=None, anat_filename=None, ext=".nii.gz", 87 | n_sessions=1, make_sess_dirs=False, func_ndim=4, 88 | unique_func_names=False, output_dir="/tmp/titi"): 89 | if not func_filenames is None: 90 | n_sessions = len(func_filenames) 91 | func = [create_random_image(ndim=func_ndim) for _ in range(n_sessions)] 92 | anat = create_random_image(ndim=3) 93 | if anat_filename is None: 94 | anat_filename = '%s/anat%s' % (DATA_DIR, ext) 95 | _save_img(anat, anat_filename) 96 | if not func_filenames is None: 97 | for sess_func, filename in zip(func, func_filenames): 98 | if isinstance(filename, str): 99 | _save_img(sess_func, filename) 100 | else: 101 | vols = nibabel.four_to_three(sess_func) 102 | for x, y in zip(vols, filename): 103 | assert isinstance(y, str), type(y) 104 | _save_img(x, y) 105 | else: 106 | func_filenames = [] 107 | for sess in range(n_sessions): 108 | sess_dir = DATA_DIR if not make_sess_dirs else os.path.join( 109 | DATA_DIR, "session%i" % sess) 110 | if not os.path.exists(sess_dir): 111 | os.makedirs(sess_dir) 112 | func_filename = '%s/func%s%s' % ( 113 | sess_dir, "_sess_%i_" % sess if ( 114 | n_sessions > 1 and unique_func_names) else "", ext) 115 | _save_img(func[sess], func_filename) 116 | func_filenames.append(func_filename) 117 | 118 | sd = SubjectData(anat=anat_filename, 119 | func=func_filenames, 120 | output_dir=output_dir) 121 | return sd 122 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_affine_transformations.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import nibabel 3 | from ..affine_transformations import ( 4 | get_initial_motion_params, spm_matrix, spm_imatrix, transform_coords, 5 | apply_realignment, nibabel2spm_affine, get_physical_coords, 6 | extract_realignment_params, extract_realignment_matrix) 7 | from ._test_utils import create_random_image 8 | 9 | 10 | def test_get_initial_motion_params(): 11 | # params for zero motion 12 | p = get_initial_motion_params() 13 | np.testing.assert_array_equal(p, [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0]) 14 | 15 | 16 | def test_spm_matrix(): 17 | p = get_initial_motion_params() 18 | 19 | # identity 20 | np.testing.assert_array_equal(spm_matrix(p), 21 | np.eye(4)) 22 | 23 | # induce translations 24 | p[:3] += [.2, .3, .4] 25 | 26 | M = np.array([[1., 0., 0., .2], 27 | [0., 1., 0., .3], 28 | [0., 0., 1., .4], 29 | [0., 0., 0., 1.]]) 30 | 31 | np.testing.assert_array_equal(M, spm_matrix(p)) 32 | 33 | 34 | def test_spm_imatrix(): 35 | p = get_initial_motion_params() 36 | 37 | # spm_matrix and spm_imatrix should be inverses of one another 38 | np.testing.assert_array_equal(spm_imatrix(spm_matrix(p)), p) 39 | 40 | 41 | def test_transform_coords(): 42 | p = get_initial_motion_params() 43 | M1 = np.eye(4) 44 | M2 = np.eye(4) 45 | coords = (0., 0., 1.) 46 | 47 | # rigidly move the voxel 48 | new_coords = transform_coords(p, M1, M2, coords) 49 | 50 | # coords shouldn't change 51 | assert new_coords.shape == (3, 1) 52 | np.testing.assert_array_equal(new_coords.ravel(), coords) 53 | 54 | 55 | def test_apply_realignment_3D_niimg(): 56 | # create 3D niimg 57 | vol = create_random_image(shape=(7, 11, 13)) 58 | 59 | # apply realignment to vol 60 | apply_realignment(vol, [1, 2, 3, 4, 5, 6]) 61 | 62 | 63 | def test_nibabel2spm_affine(): 64 | affine = np.eye(4) 65 | np.testing.assert_array_equal(nibabel2spm_affine(affine)[:, -1], 66 | [-1, -1, -1, 1]) 67 | 68 | 69 | def test_physical_coords(): 70 | affine = np.eye(4) 71 | affine[:-1, -1] = [1., -1., 1.] 72 | coords = get_physical_coords(affine, [1, 2, 3]) 73 | assert coords.ndim == 2 74 | assert coords.shape[1] == 1 75 | assert coords.shape[0] == 3 76 | np.testing.assert_array_equal(coords.ravel(), [2., 1., 4.]) 77 | 78 | 79 | def test_extract_realigment_params(): 80 | affine1 = np.eye(4) 81 | affine2 = np.eye(4) 82 | affine2[-2, -1] += 2. # translation along +z 83 | vol1 = nibabel.Nifti1Image(np.zeros((2, 2, 2)), affine1) 84 | vol2 = nibabel.Nifti1Image(np.zeros((2, 2, 2)), affine2) 85 | np.testing.assert_array_equal(extract_realignment_params(vol1, vol2), 86 | [0, 0, 2, 0, 0, 0, 1, 1, 1, 0, 0, 0]) 87 | for inverse in [True, False]: 88 | salt = -2 if inverse else 2 89 | np.testing.assert_array_equal( 90 | extract_realignment_params(vol1, vol2, inverse=inverse), 91 | [0, 0, salt, 0, 0, 0, 1, 1, 1, 0, 0, 0]) 92 | 93 | 94 | def test_extract_realigment_matrix(): 95 | affine1 = np.eye(4) 96 | affine2 = np.eye(4) 97 | affine2[-2, -1] += -7. # translation in direction -z 98 | vol1 = nibabel.Nifti1Image(np.zeros((2, 2, 2)), affine1) 99 | vol2 = nibabel.Nifti1Image(np.zeros((2, 2, 2)), affine2) 100 | np.testing.assert_array_equal( 101 | extract_realignment_matrix(vol1, vol2), 102 | [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, -7], [0, 0, 0, 1]]) 103 | for inverse in [True, False]: 104 | salt = 7 if inverse else -7 105 | np.testing.assert_array_equal( 106 | extract_realignment_matrix(vol1, vol2, inverse=inverse), 107 | [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, salt], [0, 0, 0, 1]]) 108 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_cluster_level_analysis.py: -------------------------------------------------------------------------------- 1 | """ Test the cluster level thresholding utilities 2 | """ 3 | import numpy as np 4 | from scipy.stats import norm 5 | from numpy.testing import assert_array_almost_equal, assert_almost_equal 6 | import nibabel as nib 7 | from ..cluster_level_analysis import (empirical_p_value, fdr_threshold, 8 | fdr_p_values, cluster_stats) 9 | 10 | 11 | def test_empirical_p_value(): 12 | ref = np.arange(100) 13 | np.random.shuffle(ref) 14 | z_score = np.array([-1, 18.6, 98.9, 100]) 15 | pvals_ = empirical_p_value(z_score, ref) 16 | pvals = np.array([1., .81, .01, .0]) 17 | assert_array_almost_equal(pvals, pvals_) 18 | 19 | 20 | def test_fdr(): 21 | n = 100 22 | x = np.linspace(.5 / n, 1. - .5 / n, n) 23 | x[:10] = .0005 24 | x = norm.isf(x) 25 | np.random.shuffle(x) 26 | assert_almost_equal(fdr_threshold(x, .1), norm.isf(.0005)) 27 | assert fdr_threshold(x, .001) == np.infty 28 | 29 | 30 | def test_fdr_p_values(): 31 | n = 100 32 | x = np.linspace(.5 / n, 1. - .5 / n, n) 33 | x[:10] = .0005 34 | x = norm.isf(x) 35 | fdr = fdr_p_values(x) 36 | assert_array_almost_equal(fdr[:10], .005) 37 | assert np.all(fdr[10:] > .95) 38 | assert fdr.max() <= 1 39 | 40 | 41 | def test_cluster_stats(): 42 | shape = (9, 10, 11) 43 | data = np.random.randn(*shape) 44 | threshold = norm.sf(data.max() + 1) 45 | data[2:4, 5:7, 6:8] = np.maximum(10, data.max() + 2) 46 | stat_img = nib.Nifti1Image(data, np.eye(4)) 47 | mask_img = nib.Nifti1Image(np.ones(shape), np.eye(4)) 48 | 49 | # test 1 50 | clusters, _ = cluster_stats( 51 | stat_img, mask_img, threshold, height_control='fpr', 52 | cluster_th=0) 53 | assert len(clusters) == 1 54 | cluster = clusters[0] 55 | assert cluster['size'] == 8 56 | assert_array_almost_equal(cluster['z_score'], 10 * np.ones(8)) 57 | assert cluster['maxima'].shape == (8, 3) 58 | 59 | # test 2:excessive size threshold 60 | clusters, _ = cluster_stats( 61 | stat_img, mask_img, threshold, height_control='fpr', 62 | cluster_th=10) 63 | assert clusters == [] 64 | 65 | # test 3: excessive cluster forming threshold 66 | clusters, _ = cluster_stats( 67 | stat_img, mask_img, 100, height_control='fpr', 68 | cluster_th=0) 69 | assert clusters == [] 70 | 71 | # test 4: fdr threshold 72 | clusters, _ = cluster_stats( 73 | stat_img, mask_img, .05, height_control='fdr', 74 | cluster_th=5) 75 | assert len(clusters) == 1 76 | cluster_ = clusters[0] 77 | assert_array_almost_equal(cluster['maxima'], cluster_['maxima']) 78 | 79 | # test 5: fdr threshold 80 | clusters, _ = cluster_stats( 81 | stat_img, mask_img, .05, height_control='bonferroni', 82 | cluster_th=5) 83 | assert len(clusters) == 1 84 | cluster_ = clusters[0] 85 | assert_array_almost_equal(cluster['maxima'], cluster_['maxima']) 86 | 87 | # test 5: direct threshold 88 | clusters, _ = cluster_stats( 89 | stat_img, mask_img, 5., height_control=None, 90 | cluster_th=5) 91 | assert len(clusters) == 1 92 | cluster_ = clusters[0] 93 | assert_array_almost_equal(cluster['maxima'], cluster_['maxima']) 94 | 95 | 96 | def test_multi_cluster_stats(): 97 | shape = (9, 10, 11) 98 | data = np.random.randn(*shape) 99 | threshold = norm.sf(data.max() + 1) 100 | data[2:4, 5:7, 6:8] = np.maximum(10, data.max() + 2) 101 | data[6:7, 8:9, 9:10] = np.maximum(11, data.max() + 1) 102 | stat_img = nib.Nifti1Image(data, np.eye(4)) 103 | mask_img = nib.Nifti1Image(np.ones(shape), np.eye(4)) 104 | 105 | # test 1 106 | clusters, _ = cluster_stats( 107 | stat_img, mask_img, threshold, height_control='fpr', 108 | cluster_th=0) 109 | assert len(clusters) == 2 110 | cluster = clusters[1] 111 | assert cluster['size'] == 1 112 | assert_array_almost_equal(cluster['z_score'], 11) 113 | assert_array_almost_equal(cluster['maxima'], np.array([[6, 8, 9]])) 114 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_conf_parser.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from ..conf_parser import _generate_preproc_pipeline 4 | from ._test_utils import _make_sd 5 | 6 | 7 | def _make_config(out_file, **kwargs): 8 | config = """ 9 | [config] 10 | """ 11 | for k, v in kwargs.items(): 12 | if isinstance(v, list): 13 | v = ", ".join(v) 14 | config += "%s=%s\r\n" % (k, v) 15 | fd = open(out_file, "w") 16 | fd.write(config) 17 | return config 18 | 19 | 20 | def test_obligatory_params_config(): 21 | dataset_dir = "/tmp/data" 22 | output_dir = "/tmp/output" 23 | if not os.path.exists(dataset_dir): 24 | os.makedirs(dataset_dir) 25 | config_file = os.path.join(dataset_dir, "empty.ini") 26 | _make_config(config_file) 27 | with pytest.raises(ValueError): 28 | assert "dataset_dir not specified" in _generate_preproc_pipeline(config_file) 29 | 30 | _make_config(config_file, dataset_dir=dataset_dir) 31 | with pytest.raises(ValueError): 32 | assert "output_dir not specified" in _generate_preproc_pipeline(config_file) 33 | 34 | # this should not give any errors 35 | _make_config(config_file, dataset_dir=dataset_dir, output_dir=output_dir, 36 | session_1_func="fM00223/fM00223_*.img") 37 | _generate_preproc_pipeline(config_file) 38 | 39 | 40 | def test_issue110(): 41 | dataset_dir = "/tmp/data" 42 | output_dir = "/tmp/output" 43 | if not os.path.exists(dataset_dir): 44 | os.makedirs(dataset_dir) 45 | config_file = os.path.join(dataset_dir, "empty.ini") 46 | _make_config(config_file, newsegment=True, dataset_dir=dataset_dir, 47 | output_dir=output_dir) 48 | _, options = _generate_preproc_pipeline(config_file) 49 | assert options["newsegment"] 50 | 51 | 52 | def test_empty_params_default_to_none(): 53 | output_dir = "/tmp/output" 54 | config_file = "/tmp/empty.ini" 55 | for i in range(3): 56 | dataset_dir = " " * i 57 | _make_config(config_file, dataset_dir=dataset_dir, 58 | output_dir=output_dir) 59 | _make_config(config_file) 60 | with pytest.raises(ValueError): 61 | assert "dataset_dir not specified" in _generate_preproc_pipeline(config_file) 62 | 63 | def test_bf_issue_62(): 64 | dataset_dir = "/tmp/dataset" 65 | output_dir = "/tmp/output" 66 | config_file = os.path.join(dataset_dir, "conf.ini") 67 | _make_sd(func_filenames=[os.path.join(dataset_dir, 68 | "sub001/session1/func.nii"), 69 | os.path.join(dataset_dir, 70 | "sub001/session2/func.nii"), 71 | os.path.join(dataset_dir, 72 | "sub001/session3/func.nii")], 73 | output_dir=os.path.join(output_dir, "sub001")) 74 | _make_sd(func_filenames=[os.path.join(dataset_dir, 75 | "sub002/session1/func.nii"), 76 | os.path.join(dataset_dir, 77 | "sub002/session2/func.nii")], 78 | output_dir=os.path.join(output_dir, "sub002")) 79 | _make_config(config_file, dataset_dir=dataset_dir, output_dir=output_dir, 80 | session_1_func="session1/func.nii", 81 | session_2_func="session2/func.nii", 82 | session_3_func="session3/func.nii") 83 | subjects, _ = _generate_preproc_pipeline(config_file) 84 | assert len(subjects[0]['func']) == 3 85 | assert len(subjects[1]['func']) == 2 86 | 87 | 88 | def test_newsegment_if_dartel(): 89 | dataset_dir = "/tmp/dataset" 90 | output_dir = "/tmp/output" 91 | config_file = os.path.join(dataset_dir, "conf.ini") 92 | for kwargs in [{}, dict(newsegment=True), dict(newsegment=False)]: 93 | _make_config(config_file, dataset_dir=dataset_dir, 94 | output_dir=output_dir, dartel=True, **kwargs) 95 | _, params = _generate_preproc_pipeline(config_file) 96 | assert params["dartel"] 97 | assert params["newsegment"] 98 | 99 | 100 | def test_bf_issue_122(): 101 | dataset_dir = "/tmp/dataset" 102 | output_dir = "/tmp/output" 103 | config_file = os.path.join(dataset_dir, "conf.ini") 104 | _make_config(config_file, dataset_dir=dataset_dir, output_dir=output_dir, 105 | session_1_func="session1/func_3D.nii") 106 | _make_sd(func_filenames=[os.path.join(dataset_dir, 107 | "sub001/session1/func_3D.nii")], 108 | output_dir=os.path.join(output_dir, "sub001"), 109 | func_ndim=3) 110 | subjects, _ = _generate_preproc_pipeline(config_file) 111 | subjects[0].sanitize() # 122 reports a bug here 112 | 113 | 114 | def test_env_vars(): 115 | os.environ["DATASET_DIR"] = "/tmp/dataset/" 116 | os.environ["OUTPUT_DIR"] = "/tmp/output/" 117 | config_file = os.path.join(os.environ["DATASET_DIR"], "conf.ini") 118 | _make_config(config_file, session_1_func="session1/func.nii") 119 | _generate_preproc_pipeline(config_file) # this call shouldn't crash 120 | 121 | 122 | def test_explicit_list_subdirs(): 123 | dataset_dir = "/tmp/dataset" 124 | output_dir = "/tmp/output" 125 | if not os.path.exists(dataset_dir): 126 | os.makedirs(dataset_dir) 127 | config_file = os.path.join(dataset_dir, "empty.ini") 128 | _make_config(config_file, subject_dirs=["sub01", "sub02"], 129 | session_1_func="session1/func_3D.nii") 130 | for subject_id in ["sub01", "sub02"]: 131 | _make_sd(func_filenames=[os.path.join( 132 | dataset_dir, "%s/session1/func_3D.nii" % subject_id)], 133 | output_dir=os.path.join(output_dir, subject_id)) 134 | subjects, _ = _generate_preproc_pipeline(config_file) 135 | assert len(subjects) == 2 136 | 137 | 138 | def test_list_of_subj_wildcards(): 139 | dataset_dir = "/tmp/dataset" 140 | output_dir = "/tmp/output" 141 | if not os.path.exists(dataset_dir): 142 | os.makedirs(dataset_dir) 143 | for subject_id in ["subx01", "subx02", "suby01", "suby02"]: 144 | _make_sd(func_filenames=[os.path.join( 145 | dataset_dir, "%s/session1/func_3D.nii" % subject_id)], 146 | output_dir=os.path.join(output_dir, subject_id)) 147 | config_file = os.path.join(dataset_dir, "empty.ini") 148 | _make_config(config_file, subject_dirs=["subx*", "suby*"], 149 | session_1_func="session1/func_3D.nii") 150 | subjects, _ = _generate_preproc_pipeline(config_file) 151 | assert len(subjects) == 4 152 | 153 | 154 | def test_user_specified_scratch(): 155 | dataset_dir = "/tmp/dataset" 156 | output_dir = "/tmp/output" 157 | scratch = "/tmp/scratch" 158 | config_file = os.path.join(dataset_dir, "conf.ini") 159 | _make_config(config_file, dataset_dir=dataset_dir, output_dir=output_dir, 160 | scratch=scratch, session_1_func="session1/func_3D.nii") 161 | _make_sd(func_filenames=[os.path.join(dataset_dir, 162 | "sub001/session1/func_3D.nii")], 163 | output_dir=os.path.join(output_dir, "sub001"), 164 | func_ndim=3) 165 | subjects, _ = _generate_preproc_pipeline(config_file) 166 | assert subjects[0].scratch == "/tmp/scratch/sub001" 167 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_coreg.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import pytest 4 | import numpy.testing 5 | import nibabel 6 | import scipy.io 7 | from ..coreg import compute_similarity_from_jhist, Coregister 8 | from ..affine_transformations import apply_realignment_to_vol 9 | from .test_histograms import test_joint_histogram 10 | 11 | # global setup 12 | THIS_FILE = os.path.abspath(__file__).split('.')[0] 13 | THIS_DIR = os.path.dirname(THIS_FILE) 14 | OUTPUT_DIR = "/tmp/%s" % os.path.basename(THIS_FILE) 15 | 16 | 17 | def test_compute_similarity_from_jhist(): 18 | jh = test_joint_histogram() 19 | 20 | for cost_fun in ['mi', 'nmi', 'ecc']: 21 | s = compute_similarity_from_jhist(jh, cost_fun=cost_fun) 22 | assert not (s > 1) 23 | 24 | @pytest.mark.skip() 25 | def test_coregister_on_toy_data(): 26 | shape = (23, 29, 31) 27 | ref = nibabel.Nifti1Image(np.arange(np.prod(shape)).reshape(shape), 28 | np.eye(4) 29 | ) 30 | 31 | # rigidly move reference vol to get a new volume: the source vol 32 | src = apply_realignment_to_vol(ref, [1, 1, 1, # translations 33 | 0, .01, 0, # rotations 34 | ]) 35 | 36 | # learn realignment params for coregistration: src -> ref 37 | c = Coregister(sep=[4, 2, 1]).fit(ref, src) 38 | 39 | # compare estimated realigment parameters with ground-truth 40 | numpy.testing.assert_almost_equal(-c.params_[4], .01, decimal=2) 41 | numpy.testing.assert_array_almost_equal(-c.params_[[3, 5]], 42 | [0, 0], decimal=2) 43 | numpy.testing.assert_array_equal(np.round(-c.params_)[[0, 1, 2]], 44 | [1., 1., 1.]) 45 | 46 | 47 | @pytest.mark.skip() 48 | def test_coregister_on_real_data(): 49 | # load data 50 | _tmp = scipy.io.loadmat( 51 | os.path.join(THIS_DIR, "test_data/some_anat.mat"), 52 | squeeze_me=True, struct_as_record=False) 53 | ref = nibabel.Nifti1Image(_tmp['data'], _tmp['affine']) 54 | 55 | # rigidly move reference vol to get a new volume: the source vol 56 | src = apply_realignment_to_vol(ref, [1, 2, 3, # translations 57 | 0, .01, 0, # rotations 58 | ]) 59 | 60 | # learn realignment params for coregistration: src -> ref 61 | c = Coregister().fit(ref, src) 62 | 63 | # compare estimated realigment parameters with ground-truth 64 | numpy.testing.assert_almost_equal(-c.params_[4], .01, decimal=4) 65 | numpy.testing.assert_array_almost_equal(-c.params_[[3, 5]], 66 | [0, 0], decimal=4) 67 | numpy.testing.assert_array_equal(np.round(-c.params_)[[0, 1, 2]], 68 | [1., 2., 3.]) 69 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_data/some_anat.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurospin/pypreprocess/17d2e198119f3638903a1894fbd8f698043062ee/pypreprocess/tests/test_data/some_anat.mat -------------------------------------------------------------------------------- /pypreprocess/tests/test_data/spm_hist2_args_1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurospin/pypreprocess/17d2e198119f3638903a1894fbd8f698043062ee/pypreprocess/tests/test_data/spm_hist2_args_1.mat -------------------------------------------------------------------------------- /pypreprocess/tests/test_data/spm_hist2_args_2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurospin/pypreprocess/17d2e198119f3638903a1894fbd8f698043062ee/pypreprocess/tests/test_data/spm_hist2_args_2.mat -------------------------------------------------------------------------------- /pypreprocess/tests/test_data/spmmmfmri.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neurospin/pypreprocess/17d2e198119f3638903a1894fbd8f698043062ee/pypreprocess/tests/test_data/spmmmfmri.mat -------------------------------------------------------------------------------- /pypreprocess/tests/test_histograms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import nibabel 3 | import numpy.testing 4 | from ..histograms import(_correct_voxel_samp, 5 | make_sampled_grid, 6 | trilinear_interp, 7 | joint_histogram 8 | ) 9 | 10 | 11 | def test_correct_voxel_samp(): 12 | numpy.testing.assert_array_equal( 13 | _correct_voxel_samp(np.eye(4), 2), [2., 2., 2.]) 14 | 15 | numpy.testing.assert_array_equal( 16 | _correct_voxel_samp(np.eye(4), [3, 2, 1]), [3., 2., 1.]) 17 | 18 | numpy.testing.assert_array_equal(_correct_voxel_samp( 19 | np.array([[-1., 0., 0., 128.], 20 | [0., 1., 0., -168.], 21 | [0., 0., 3., -75.], 22 | [0., 0., 0., 1.]]), 23 | 4), [4., 4., 4. / 3]) 24 | 25 | 26 | def test_make_sampled_grid_without_spm_magic(): 27 | for samp in [1., [1.], [1.] * 3]: 28 | numpy.testing.assert_array_equal(make_sampled_grid([3, 5, 7], 29 | samp=samp, 30 | magic=False), 31 | np.array( 32 | [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], 33 | [1., 1., 1., 1., 1., 2., 2., 2., 2., 2., 3., 3., 3., 3., 3.], 34 | [1., 2., 3., 4., 5., 1., 2., 3., 4., 5., 1., 2., 3., 4., 5.] 35 | ])) 36 | 37 | 38 | def test_trilinear_interp(): 39 | shape = (23, 29, 31) 40 | f = np.arange(np.prod(shape)) 41 | 42 | assert trilinear_interp(f, shape, 1, 1, 1) == 0. 43 | assert trilinear_interp(f, shape, 2, 1, 1) == 1. 44 | assert trilinear_interp(f, shape, 1 + shape[0], 1, 1) == shape[0] 45 | assert 0. < trilinear_interp(f, shape, 1.5, 1., 1.) < 1. 46 | 47 | 48 | def test_joint_histogram(): 49 | ref_shape = (23, 29, 61) 50 | src_shape = (13, 51, 19) 51 | ref = np.arange(np.prod(ref_shape)).reshape(ref_shape) 52 | src = np.arange(np.prod(src_shape)).reshape(src_shape) 53 | 54 | # pre-sampled ref 55 | grid = make_sampled_grid(ref_shape, samp=2.) 56 | sampled_ref = trilinear_interp(ref.ravel(order='F'), ref_shape, *grid) 57 | jh = joint_histogram(sampled_ref, src, grid=grid, M=np.eye(4)) 58 | assert jh.shape == (256, 256) 59 | assert np.all(jh >= 0) 60 | 61 | # ref not presampled 62 | jh = joint_histogram(nibabel.Nifti1Image(ref, np.eye(4)), 63 | src, samp=np.pi, M=np.eye(4)) 64 | assert jh.shape == (256, 256) 65 | assert np.all(jh >= 0) 66 | 67 | return jh 68 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_kernel_smooth.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import nibabel 3 | import numpy.testing 4 | from ..kernel_smooth import fwhm2sigma, sigma2fwhm, smooth_image 5 | 6 | 7 | def create_random_image(shape=None, 8 | ndim=3, 9 | n_scans=None, 10 | affine=np.eye(4), 11 | parent_class=nibabel.Nifti1Image): 12 | """ 13 | Creates a random image of prescribed shape 14 | 15 | """ 16 | rng = np.random.RandomState(0) 17 | if shape is None: 18 | shape = rng.random_integers(20, size=ndim) 19 | ndim = len(shape) 20 | ndim = len(shape) 21 | if not n_scans is None and ndim == 4: 22 | shape[-1] = n_scans 23 | return parent_class(np.random.randn(*shape), affine) 24 | 25 | 26 | def test_fwhm2sigma(): 27 | fwhm = [1, 2, 3] 28 | for _fwhm in fwhm: 29 | numpy.testing.assert_array_equal( 30 | fwhm2sigma(_fwhm), np.array(_fwhm) / np.sqrt(8. * np.log(2))) 31 | for j in range(3): 32 | _fwhm = fwhm[j:] 33 | numpy.testing.assert_array_equal( 34 | fwhm2sigma(_fwhm), np.array(_fwhm) / np.sqrt(8. * np.log(2))) 35 | 36 | 37 | def test_sigma2sigma(): 38 | sigma = [7, 2, 3] 39 | for _sigma in sigma: 40 | numpy.testing.assert_array_equal(sigma2fwhm(_sigma), 41 | np.array( 42 | _sigma) * np.sqrt(8. * np.log(2))) 43 | for j in range(3): 44 | _sigma = sigma[j:] 45 | numpy.testing.assert_array_equal(sigma2fwhm(_sigma), 46 | np.array( 47 | _sigma) * np.sqrt(8. * np.log(2))) 48 | 49 | 50 | def test_fwhm2sigma_and_sigma2fwhm_are_inverses(): 51 | toto = [5, 7, 11.] 52 | numpy.testing.assert_array_equal(toto, sigma2fwhm(fwhm2sigma(toto))) 53 | numpy.testing.assert_array_almost_equal(toto, fwhm2sigma(sigma2fwhm(toto))) 54 | 55 | 56 | def test_smooth_image_for_3D_vol(): 57 | vol = create_random_image() 58 | svol = smooth_image(vol, [5, 7, 11.]) 59 | assert svol.shape == vol.shape 60 | numpy.testing.assert_array_equal(svol.affine, vol.affine) 61 | 62 | 63 | def test_smooth_image_for_4D_film(): 64 | film = create_random_image(ndim=4) 65 | sfilm = smooth_image(film, [5, 7., 11]) 66 | assert sfilm.shape == film.shape 67 | numpy.testing.assert_array_equal(sfilm.affine, film.affine) 68 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_realign.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import numpy as np 4 | import scipy.io 5 | import pytest 6 | import nibabel 7 | from nibabel.processing import smooth_image as nibabel_smoothing 8 | from nilearn.image import index_img 9 | from ..realign import _compute_rate_of_change_of_chisq, MRIMotionCorrection 10 | from ..affine_transformations import ( 11 | apply_realignment, extract_realignment_params, get_initial_motion_params) 12 | from ._test_utils import create_random_image 13 | 14 | # global setup 15 | THIS_FILE = os.path.abspath(__file__).split('.')[0] 16 | THIS_DIR = os.path.dirname(THIS_FILE) 17 | OUTPUT_DIR = "/tmp/%s" % os.path.basename(THIS_FILE) 18 | 19 | 20 | def _make_vol_specific_translation(translation, n_scans, t): 21 | """ 22 | translation: constant part plus vol-dependent part 23 | 24 | """ 25 | 26 | return (t > 0) * (translation + 1. * t / n_scans) 27 | 28 | 29 | def _make_vol_specific_rotation(rotation, n_scans, t): 30 | """ 31 | rotation: constant part plus vol-dependent part 32 | 33 | """ 34 | 35 | return (t > 0) * (rotation + 1. * t / n_scans 36 | ) * np.pi / 180. 37 | 38 | 39 | def test_compute_rate_of_change_of_chisq(): 40 | # XXX please document strange variables !!! 41 | # setup 42 | true_A = np.array( 43 | [[-0., -0., -0., -0., -0., -0.], 44 | [1., -0., -0., -0., 1.000001, 1.000001], 45 | [1., -0., -0., -0., 1.0000015, 1.0000015], 46 | [-2., 1., -0., 1.000001, -2.000001, -5.], 47 | [1., -0., -0., -0., 1.000001, 2.000001], 48 | [1., -0., -0., -0., 1.0000015, 2.0000015], 49 | [-2., 1., -0., 1.0000015, -2.000001, -6.9999995], 50 | [1., -0., -0., -0., 1.000001, 3.000001], 51 | [1., -0., -0., -0., 1.0000015, 3.0000015], 52 | [-2., 1., -0., 1.000002, -2.000001, -8.999999], 53 | [1., -0., -0., -0., 1.000001, 4.000001], 54 | [1., -0., -0., -0., 1.0000015, 4.0000015], 55 | [-2., -3., 1., -7.0000005, -5., 0.9999975], 56 | [1., -0., -0., -0., 2.000001, 1.000001], 57 | [1., -0., -0., -0., 2.0000015, 1.0000015], 58 | [-2., 1., -0., 2.000001, -4.000001, -5.], 59 | [1., -0., -0., -0., 2.000001, 2.000001], 60 | [1., -0., -0., -0., 2.0000015, 2.0000015], 61 | [-2., 1., -0., 2.0000015, -4.000001, -6.9999995], 62 | [1., -0., -0., -0., 2.000001, 3.000001], 63 | [1., -0., -0., -0., 2.0000015, 3.0000015], 64 | [-2., 1., -0., 2.000002, -4.000001, -8.999999], 65 | [1., -0., -0., -0., 2.000001, 4.000001], 66 | [1., -0., -0., -0., 2.0000015, 4.0000015], 67 | [-2., -3., 1., -10., -6.9999995, 0.9999975], 68 | [1., -0., -0., -0., 3.000001, 1.000001], 69 | [1., -0., -0., -0., 3.0000015, 1.0000015], 70 | [-2., 1., -0., 3.000001, -6.000001, -5.], 71 | [1., -0., -0., -0., 3.000001, 2.000001], 72 | [1., -0., -0., -0., 3.0000015, 2.0000015], 73 | [-2., 1., -0., 3.0000015, -6.000001, -6.9999995], 74 | [1., -0., -0., -0., 3.000001, 3.000001], 75 | [1., -0., -0., -0., 3.0000015, 3.0000015], 76 | [-2., 1., -0., 3.000002, -6.000001, -8.999999], 77 | [1., -0., -0., -0., 3.000001, 4.000001], 78 | [1., -0., -0., -0., 3.0000015, 4.0000015], 79 | [-2., -3., 1., -12.9999995, -8.999999, 0.9999975], 80 | [1., -0., -0., -0., 4.000001, 1.000001], 81 | [1., -0., -0., -0., 4.0000015, 1.0000015], 82 | [-2., 1., -0., 4.000001, -8.000001, -5.], 83 | [1., -0., -0., -0., 4.000001, 2.000001], 84 | [1., -0., -0., -0., 4.0000015, 2.0000015], 85 | [-2., 1., -0., 4.0000015, -8.000001, -6.9999995], 86 | [1., -0., -0., -0., 4.000001, 3.000001], 87 | [1., -0., -0., -0., 4.0000015, 3.0000015], 88 | [-2., 1., -0., 4.000002, -8.000001, -8.999999], 89 | [1., -0., -0., -0., 4.000001, 4.000001], 90 | [1., -0., -0., -0., 4.0000015, 4.0000015], 91 | [-2., -3., 1., -15.999999, -10.9999985, 0.9999975], 92 | [1., -0., -0., -0., 5.000001, 1.000001], 93 | [1., -0., -0., -0., 5.0000015, 1.0000015], 94 | [-2., 1., -0., 5.000001, -10.000001, -5.], 95 | [1., -0., -0., -0., 5.000001, 2.000001], 96 | [1., -0., -0., -0., 5.0000015, 2.0000015], 97 | [-2., 1., -0., 5.0000015, -10.000001, -6.9999995], 98 | [1., -0., -0., -0., 5.000001, 3.000001], 99 | [1., -0., -0., -0., 5.0000015, 3.0000015], 100 | [-2., 1., -0., 5.000002, -10.000001, -8.999999], 101 | [1., -0., -0., -0., 5.000001, 4.000001], 102 | [1., -0., -0., -0., 5.0000015, 4.0000015]]) 103 | decimal_precision = 8 # precision for array comparison (SPM is grnd-truth) 104 | lkp = [0, 1, 2, 3, 4, 5] # translations + rotations model 105 | grid = np.mgrid[1:4:, 1:5:, 1:6:].reshape((3, -1), 106 | # true_A came from matlab 107 | order='F') 108 | gradG = np.vstack(([0, 0, 0], np.diff(grid, axis=1).T)).T # image gradient 109 | M = np.eye(4) # grid transformation matrix 110 | 111 | # compute A 112 | A = _compute_rate_of_change_of_chisq(M, grid, gradG, lkp=lkp) 113 | 114 | # compare A with true_A (from spm implementation) 115 | np.testing.assert_array_almost_equal(A, true_A, 116 | decimal=decimal_precision) 117 | 118 | 119 | def test_appy_realigment_and_extract_realignment_params_APIs(): 120 | # setu 121 | n_scans = 10 122 | translation = np.array([1, 2, 3]) # mm 123 | rotation = np.array([3, 2, 1]) # degrees 124 | 125 | # create data 126 | affine = np.array([[-3., 0., 0., 96.], 127 | [0., 3., 0., -96.], 128 | [0., 0., 3., -69.], 129 | [0., 0., 0., 1.]]) 130 | film = create_random_image(shape=[16, 16, 16, n_scans], affine=affine) 131 | 132 | # there should be no motion 133 | for t in range(n_scans): 134 | np.testing.assert_array_equal( 135 | extract_realignment_params(index_img(film, t), index_img(film, 0)), 136 | get_initial_motion_params()) 137 | 138 | # now introduce motion into other vols relative to the first vol 139 | rp = np.ndarray((n_scans, 12)) 140 | for t in range(n_scans): 141 | rp[t, ...] = get_initial_motion_params() 142 | rp[t, :3] += _make_vol_specific_translation(translation, n_scans, t) 143 | rp[t, 3:6] += _make_vol_specific_rotation(rotation, n_scans, t) 144 | 145 | # apply motion (noise) 146 | film = apply_realignment(film, rp) 147 | 148 | # check that motion has been induced 149 | for t in range(n_scans): 150 | _tmp = get_initial_motion_params() 151 | _tmp[:3] += _make_vol_specific_translation(translation, n_scans, t) 152 | _tmp[3:6] += _make_vol_specific_rotation(rotation, n_scans, t) 153 | 154 | np.testing.assert_array_almost_equal( 155 | extract_realignment_params(film[t], film[0]), _tmp) 156 | 157 | 158 | def test_MRIMotionCorrection_fit(): 159 | # setup 160 | output_dir = os.path.join(OUTPUT_DIR, inspect.stack()[0][3]) 161 | if not os.path.exists(output_dir): os.makedirs(output_dir) 162 | n_scans = 2 163 | lkp = np.arange(6) 164 | translation = np.array([1, 3, 2]) # mm 165 | rotation = np.array([1, 2, .5]) # degrees 166 | MAX_RE = .12 # we'll test for this max relative error in estimating motion 167 | 168 | # create data 169 | vol = scipy.io.loadmat(os.path.join(THIS_DIR, 170 | "test_data/spmmmfmri.mat"), 171 | squeeze_me=True, struct_as_record=False) 172 | data = np.ndarray(list(vol['data'].shape) + [n_scans]) 173 | for t in range(n_scans): data[..., t] = vol['data'] 174 | film = nibabel.Nifti1Image(data, vol['affine']) 175 | 176 | # rigidly move other volumes w.r.t. the first 177 | rp = np.array([get_initial_motion_params() for _ in range(n_scans)]) 178 | for t in range(n_scans): 179 | rp[t, ...][:3] += _make_vol_specific_translation( 180 | translation, n_scans, t) 181 | rp[t, ...][3:6] += _make_vol_specific_rotation(rotation, n_scans, t) 182 | 183 | film = apply_realignment(film, rp) 184 | 185 | _kwargs = {'quality': 1., 'lkp': lkp} 186 | for n_jobs in [1, 2]: 187 | for smooth_func in [None, nibabel_smoothing]: 188 | kwargs = _kwargs.copy() 189 | if smooth_func is not None: 190 | kwargs['smooth_func'] = smooth_func 191 | # instantiate object 192 | mrimc = MRIMotionCorrection(**kwargs).fit([film], n_jobs=n_jobs) 193 | 194 | # check shape of realignment params 195 | np.testing.assert_array_equal(np.array( 196 | mrimc.realignment_parameters_).shape, [1] + [n_scans, 6]) 197 | 198 | # check that we estimated the correct motion params 199 | # XXX refine the notion of "closeness" below 200 | for t in range(n_scans): 201 | _tmp = get_initial_motion_params()[:6] 202 | 203 | # check the estimated motion is well within our MAX_RE limit 204 | _tmp[:3] += _make_vol_specific_translation( 205 | translation, n_scans, t) 206 | _tmp[3:6] += _make_vol_specific_rotation(rotation, n_scans, t) 207 | if t > 0: np.testing.assert_allclose( 208 | mrimc.realignment_parameters_[0][t][lkp], 209 | _tmp[lkp], rtol=MAX_RE) 210 | else: np.testing.assert_array_equal( 211 | mrimc.realignment_parameters_[0][t], 212 | get_initial_motion_params()[:6]) 213 | 214 | #################### 215 | # check transform 216 | #################### 217 | mrimc_output = mrimc.transform(output_dir) 218 | assert len(mrimc_output['realigned_images']) == 1 219 | assert len(set(mrimc_output['realigned_images'][0])) == n_scans 220 | assert len(set(mrimc_output['realigned_images'][0])) == n_scans 221 | 222 | @pytest.mark.skip() 223 | def test_bug_fix_issue_36_on_realign(): 224 | from pypreprocess.datasets import fetch_spm_auditory 225 | sd = fetch_spm_auditory("/tmp/spm_auditory/") 226 | 227 | # shouldn't throw an IndexError 228 | MRIMotionCorrection(n_sessions=8, quality=1.).fit( 229 | [sd.func[:2], sd.func[:3]] * 4).transform("/tmp") 230 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_reslice.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import nibabel 3 | from nilearn.image import iter_img 4 | from ..reslice import reslice_vols 5 | from ..affine_transformations import ( 6 | get_initial_motion_params, apply_realignment) 7 | 8 | 9 | def test_reslice_vols(): 10 | # create basic L pattern 11 | n_scans = 3 12 | film = np.zeros((10, 10, 10, n_scans)) 13 | film[-3:, 5:-1, :, :] = 1 14 | film[:, 2:5, :, :] = 1 15 | affine = np.array( 16 | [[-2.99256921e+00, -1.12436414e-01, -2.23214120e-01, 17 | 1.01544670e+02], 18 | [-5.69147766e-02, 2.87465930e+00, -1.07026458e+00, 19 | -8.77408752e+01], 20 | [-2.03200281e-01, 8.50703299e-01, 3.58708930e+00, 21 | -7.10269012e+01], 22 | [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 23 | 1.00000000e+00]]) 24 | film = nibabel.Nifti1Image(film, affine) 25 | 26 | # rigidly move other volumes w.r.t. the first 27 | rp = np.array([get_initial_motion_params() 28 | for _ in range(n_scans)]) 29 | for t in range(film.shape[-1]): 30 | rp[t, ...][:3] += t / n_scans 31 | rp[t, ...][3:6] += np.pi * t 32 | 33 | film = apply_realignment(film, rp) 34 | 35 | # affines are not the same 36 | assert not np.all(film[1].affine == film[0].affine) 37 | 38 | # reslice vols 39 | film = list(reslice_vols(film)) 40 | 41 | # affines are now the same 42 | np.testing.assert_array_equal(film[1].affine, 43 | film[0].affine) 44 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_slice_timing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import inspect 3 | import pytest 4 | import numpy as np 5 | import nibabel 6 | from ..slice_timing import STC, fMRISTC, get_slice_indices 7 | from ..io_utils import save_vols 8 | 9 | # global setup 10 | this_file = os.path.basename(os.path.abspath(__file__)).split('.')[0] 11 | OUTPUT_DIR = "/tmp/%s" % this_file 12 | 13 | 14 | def test_get_slice_indices_ascending(): 15 | np.testing.assert_array_equal( 16 | get_slice_indices(5, slice_order="ascending", return_final=True), 17 | [0, 1, 2, 3, 4]) 18 | 19 | 20 | def test_get_slice_indices_ascending_interleaved(): 21 | np.testing.assert_array_equal( 22 | get_slice_indices(5, slice_order="ascending", interleaved=True, 23 | return_final=True), [0, 3, 1, 4, 2]) 24 | 25 | 26 | def test_get_slice_indices_descending(): 27 | # descending 28 | np.testing.assert_array_equal( 29 | get_slice_indices(5, slice_order="descending", return_final=True), 30 | [4, 3, 2, 1, 0]) 31 | 32 | 33 | def test_get_slice_indices_descending_interleaved(): 34 | # descending and interleaved 35 | np.testing.assert_array_equal( 36 | get_slice_indices(5, slice_order="descending", interleaved=True, 37 | return_final=True), [4, 1, 3, 0, 2]) 38 | 39 | 40 | def test_get_slice_indices_explicit(): 41 | slice_order = [1, 4, 3, 2, 0] 42 | np.testing.assert_array_equal( 43 | get_slice_indices(5, slice_order=slice_order, return_final=True), 44 | [4, 0, 3, 2, 1]) 45 | 46 | 47 | def test_get_slice_indices_explicit_interleaved(): 48 | slice_order = [1, 4, 3, 2, 0] 49 | with pytest.raises(ValueError): 50 | np.testing.assert_array_equal( 51 | get_slice_indices(5, slice_order=slice_order, 52 | interleaved=True), [2, 0, 4, 1, 3]) 53 | 54 | 55 | def test_STC_constructor(): 56 | stc = STC() 57 | assert stc.ref_slice == 0 58 | assert stc.interleaved == False 59 | assert stc.verbose == 1 60 | 61 | 62 | def test_fMRISTC_constructor(): 63 | fmristc = fMRISTC() 64 | assert fmristc.ref_slice == 0 65 | assert fmristc.interleaved == False 66 | assert fmristc.verbose == 1 67 | 68 | 69 | def check_STC(true_signal, corrected_signal, ref_slice=0, 70 | rtol=None, atol=None): 71 | n_slices = true_signal.shape[2] 72 | np.testing.assert_array_almost_equal( 73 | corrected_signal[..., ref_slice, :], 74 | true_signal[..., ref_slice, :]) 75 | for _ in range(1, n_slices): 76 | # relative closeness 77 | if rtol is not None: 78 | np.testing.assert_allclose(true_signal[..., 1:-1], 79 | corrected_signal[..., 1:-1], 80 | rtol=rtol) 81 | 82 | # relative closeness 83 | if atol is not None: 84 | np.testing.assert_allclose(true_signal[..., 1:-1], 85 | corrected_signal[..., 1:-1], 86 | atol=atol) 87 | 88 | 89 | def test_STC_for_sinusoidal_mixture(): 90 | # setup 91 | n_slices = 10 92 | n_rows = 3 93 | n_columns = 2 94 | slice_indices = np.arange(n_slices, dtype=int) 95 | timescale = .01 96 | sine_freq = [.5, .8, .11, .7] 97 | 98 | def my_sinusoid(t): 99 | """Creates mixture of sinusoids with different frequencies 100 | 101 | """ 102 | 103 | res = t * 0 104 | 105 | for f in sine_freq: 106 | res += np.sin(2 * np.pi * t * f) 107 | 108 | return res 109 | 110 | time = np.arange(0, 24 + timescale, timescale) 111 | # signal = my_sinusoid(time) 112 | 113 | # define timing vars 114 | freq = 10 115 | TR = freq * timescale 116 | 117 | # sample the time 118 | acquisition_time = time[::freq] 119 | 120 | # corrupt the sampled time by shifting it to the right 121 | slice_TR = 1. * TR / n_slices 122 | time_shift = slice_indices * slice_TR 123 | shifted_acquisition_time = np.array([tau + acquisition_time 124 | for tau in time_shift]) 125 | 126 | # acquire the signal at the corrupt sampled time points 127 | acquired_signal = np.array([ 128 | [[my_sinusoid(shifted_acquisition_time[j]) 129 | for j in range(n_slices)] 130 | for _ in range(n_columns)] for _ in range(n_rows)]) 131 | 132 | n_scans = len(acquisition_time) 133 | 134 | # do STC 135 | stc = STC() 136 | stc.fit(n_slices=n_slices, n_scans=n_scans) 137 | stc.transform(acquired_signal) 138 | 139 | # truth 140 | true_signal = np.array([ 141 | [[my_sinusoid(acquisition_time) 142 | for j in range(n_slices)] for _ in range(n_columns)] 143 | for _ in range(n_rows)]) 144 | 145 | # check 146 | check_STC(true_signal, stc.output_data_, rtol=1.) 147 | check_STC(true_signal, stc.output_data_, atol=.13) 148 | 149 | 150 | def test_STC_for_HRF(): 151 | # setup 152 | import math 153 | n_slices = 10 154 | n_rows = 2 155 | n_columns = 3 156 | slice_indices = np.arange(n_slices, dtype=int) 157 | 158 | # create time values scaled at 1% 159 | timescale = .01 160 | n_timepoints = 24 161 | time = np.linspace(0, n_timepoints, num=int(1 + (n_timepoints - 0) / timescale)) 162 | 163 | # create gamma functions 164 | n1 = 4 165 | lambda1 = 2 166 | n2 = 7 167 | lambda2 = 2 168 | a = .3 169 | c1 = 1 170 | c2 = .5 171 | 172 | def _compute_hrf(t): 173 | """Auxiliary function to compute HRF at given times (t) 174 | 175 | """ 176 | 177 | hx = (t ** (n1 - 1)) * np.exp( 178 | -t / lambda1) / ((lambda1 ** n1) * math.factorial(n1 - 1)) 179 | hy = (t ** (n2 - 1)) * np.exp( 180 | -t / lambda2) / ((lambda2 ** n2) * math.factorial(n2 - 1)) 181 | 182 | # create hrf = weighted difference of two gammas 183 | hrf = a * (c1 * hx - c2 * hy) 184 | 185 | return hrf 186 | 187 | # sample the time and the signal 188 | freq = 100 189 | TR = 3. 190 | acquisition_time = time[::int(TR * freq)] 191 | n_scans = len(acquisition_time) 192 | 193 | # corrupt the sampled time by shifting it to the right 194 | slice_TR = 1. * TR / n_slices 195 | time_shift = slice_indices * slice_TR 196 | shifted_acquisition_time = np.array([tau + acquisition_time 197 | for tau in time_shift]) 198 | 199 | # acquire the signal at the corrupt sampled time points 200 | acquired_sample = np.array([_compute_hrf( 201 | shifted_acquisition_time[j]) 202 | for j in range(n_slices)]) 203 | acquired_sample = np.array([acquired_sample, ] * n_columns) 204 | acquired_sample = np.array([acquired_sample, ] * n_rows) 205 | 206 | # do STC 207 | stc = STC() 208 | stc.fit(n_scans=n_scans, n_slices=n_slices) 209 | stc.transform(acquired_sample) 210 | 211 | # truth 212 | true_signal = np.array([ 213 | [[_compute_hrf(acquisition_time) 214 | for j in range(n_slices)] 215 | for _ in range(n_columns)] for _ in range(n_rows)]) 216 | 217 | # check 218 | check_STC(true_signal, stc.output_data_, atol=.005) 219 | 220 | 221 | def test_transform(): 222 | # setup 223 | output_dir = os.path.join(OUTPUT_DIR, inspect.stack()[0][3]) 224 | if not os.path.exists(output_dir): 225 | os.makedirs(output_dir) 226 | film = nibabel.Nifti1Image(np.random.rand(11, 13, 17, 19), 227 | np.eye(4)) 228 | threeD_vols = nibabel.four_to_three(film) 229 | 230 | # filenames 231 | film_filename = os.path.join(output_dir, 'film.nii.gz') 232 | threeD_vols_filenames = [os.path.join(output_dir, 'fMETHODS-%06i.nii' % i) 233 | for i in range(len(threeD_vols))] 234 | 235 | for stuff in [film, threeD_vols]: 236 | for as_files in [False, True]: 237 | if as_files: 238 | if isinstance(stuff, list): 239 | basenames = [os.path.basename(x) 240 | for x in threeD_vols_filenames] 241 | else: 242 | basenames = os.path.basename(film_filename) 243 | stuff = save_vols(stuff, output_dir, basenames=basenames) 244 | fmristc = fMRISTC().fit(raw_data=stuff) 245 | output = fmristc.transform(output_dir=output_dir) 246 | 247 | # test output type, shape, etc. 248 | if isinstance(stuff, list): 249 | assert isinstance(output, list) 250 | assert len(output) == film.shape[-1] 251 | if as_files: 252 | assert os.path.basename(output[7]) == 'afMETHODS-000007.nii' 253 | else: 254 | if as_files: 255 | assert os.path.basename(output) == 'afilm.nii.gz' 256 | 257 | 258 | def test_get_slice_indices_not_final(): 259 | # regression test for issue #232: by default, let backend software SPM, 260 | # decide the final order of indices of slices 261 | np.testing.assert_array_equal( 262 | get_slice_indices(5, slice_order="ascending", interleaved=True), 263 | [0, 2, 4, 1, 3]) 264 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_subject_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | from nose.tools import assert_equal, assert_true 3 | from ._test_utils import _make_sd 4 | from pypreprocess.subject_data import SubjectData 5 | 6 | DATA_DIR = "test_tmp_data" 7 | if not os.path.exists(DATA_DIR): 8 | os.makedirs(DATA_DIR) 9 | 10 | 11 | def test_init(): 12 | sd = SubjectData(anat='/tmp/anat.nii.gz', func='/tmp/func.nii.gz') 13 | assert_equal(sd.anat, "/tmp/anat.nii.gz") 14 | assert_equal(sd.func, "/tmp/func.nii.gz") 15 | 16 | 17 | def test_sanitize(): 18 | sd = _make_sd(ext=".nii.gz") 19 | sd.sanitize() 20 | assert_equal(os.path.basename(sd.func[0]), "func.nii.gz") 21 | assert_equal(os.path.basename(sd.anat), "anat.nii.gz") 22 | 23 | sd = _make_sd(ext=".nii.gz") 24 | sd.sanitize(niigz2nii=True) 25 | assert_equal(os.path.basename(sd.func[0]), "func.nii") 26 | assert_equal(os.path.basename(sd.anat), "anat.nii") 27 | 28 | sd = _make_sd(ext=".nii") 29 | sd.sanitize() 30 | assert_equal(os.path.basename(sd.func[0]), "func.nii") 31 | assert_equal(os.path.basename(sd.anat), "anat.nii") 32 | 33 | sd = _make_sd(ext=".nii") 34 | sd.sanitize(niigz2nii=True) 35 | assert_equal(os.path.basename(sd.func[0]), "func.nii") 36 | assert_equal(os.path.basename(sd.anat), "anat.nii") 37 | 38 | 39 | def test_unique_func_filenames(): 40 | # XXX against issue 40 41 | for ext in [".nii", ".nii.gz"]: 42 | for make_sess_dirs in [False, True]: 43 | for n_sessions in [1, 2]: 44 | for niigz2nii in [False, True]: 45 | sd = _make_sd(ext=ext, n_sessions=n_sessions, 46 | make_sess_dirs=make_sess_dirs, 47 | unique_func_names=not make_sess_dirs) 48 | sd.sanitize(niigz2nii=niigz2nii) 49 | 50 | assert_equal(len(sd.func), len(set(sd.func))) 51 | 52 | return sd 53 | 54 | 55 | def test_not_unique_func_filenames_exception_thrown(): 56 | sd = _make_sd(func_filenames=["/tmp/titi/func1.nii", 57 | "/tmp/titi/func2.nii"], 58 | output_dir="/tmp") 59 | try: 60 | sd.sanitize() 61 | raise RuntimeError("Check failed!") 62 | except RuntimeError: 63 | pass 64 | 65 | sd = _make_sd(func_filenames=["/tmp/titi/session1/func.nii", 66 | "/tmp/titi/session1/func.nii"], 67 | output_dir="/tmp") 68 | try: 69 | sd.sanitize() 70 | raise RuntimeError("Check failed!") 71 | except RuntimeError: 72 | pass 73 | 74 | sd = _make_sd( 75 | func_filenames=[["/tmp/titi/func/1.img", "/tmp/titi/func/2.img"], 76 | ["/tmp/titi/func/1.img", "/tmp/titi/func/3.img"]], 77 | output_dir="/tmp") 78 | try: 79 | sd.sanitize() 80 | raise RuntimeError("Check failed!") 81 | except RuntimeError: 82 | pass 83 | 84 | sd = _make_sd( 85 | func_filenames=["/tmp/titi/func/1.img", 86 | ["/tmp/titi/func/1.img", "/tmp/titi/func/3.img"]], 87 | output_dir="/tmp") 88 | try: 89 | sd.sanitize() 90 | raise RuntimeError("Check failed!") 91 | except RuntimeError: 92 | pass 93 | 94 | sd = _make_sd( 95 | func_filenames=[["/tmp/titi/func/1.img", "/tmp/titi/func/2.img"], 96 | ["/tmp/titi/func/3.img", "/tmp/titi/func/4.img"]], 97 | output_dir="/tmp") 98 | sd.sanitize() 99 | 100 | # abspaths of func images should be different with a session 101 | sd = _make_sd( 102 | func_filenames=[["/tmp/titi/func/1.img", "/tmp/titi/func/1.img"], 103 | ["/tmp/titi/func/3.img", "/tmp/titi/func/4.img"]], 104 | output_dir="/tmp") 105 | try: 106 | sd.sanitize() 107 | raise RuntimeError("Check failed!") 108 | except RuntimeError: 109 | pass 110 | 111 | 112 | def test_issue_40(): 113 | sd = _make_sd( 114 | func_filenames=[[('/tmp/rob/ds005/pypreprocess_output/sub001/' 115 | 'task001_run001/deleteorient_1_bold.nii'), 116 | ('/tmp/rob/ds005/pypreprocess_output/sub001/' 117 | 'task001_run001/deleteorient_1_bold.nii'), 118 | ('/tmp/rob/ds005/pypreprocess_output/sub001/' 119 | 'task001_run001/deleteorient_1_bold.nii')]], 120 | output_dir="/tmp") 121 | try: 122 | sd.sanitize() 123 | raise RuntimeError("Check failed!") 124 | except RuntimeError: 125 | pass 126 | 127 | 128 | def test_opt_params(): 129 | # adression issue #104 130 | subject_data = SubjectData() 131 | for deleteorient in [True, False]: 132 | for niigz2nii in [True, False]: 133 | # this shouldn't crash 134 | subject_data.sanitize(deleteorient=deleteorient, 135 | niigz2nii=niigz2nii) 136 | subject_data.output_dir = "/tmp/toto" 137 | subject_data.sanitize() 138 | assert_true(os.path.isdir(subject_data.output_dir)) 139 | subject_data._delete_orientation() 140 | 141 | 142 | def test_single_vol_timeseries_ok(): 143 | sd = _make_sd(func_filenames=["/tmp/titi/func1.nii"], func_ndim=3, 144 | output_dir="/tmp") 145 | 146 | # this shouldn't error 147 | sd.sanitize() 148 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_tsdiffana.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | """ 4 | 5 | import numpy as np 6 | import nibabel 7 | from numpy.testing import assert_array_almost_equal 8 | from ..time_diff import (time_slice_diffs, multi_session_time_slice_diffs, 9 | plot_tsdiffs) 10 | from ._test_utils import _make_sd 11 | 12 | def make_test_data(n_scans=3): 13 | shape = (7, 8, 9, n_scans) 14 | film = np.zeros(shape) 15 | film[-3:, 5:-1, :, :] = 1 16 | film[:, 2:5, :, :] = 1 17 | scal = np.sum(film) * 1. / film.size 18 | affine = np.eye(4) 19 | return nibabel.Nifti1Image(film, affine), scal 20 | 21 | 22 | def test_ts_diff_ana_null(): 23 | """ Run ts_diff_ana on constant image sequence """ 24 | # create basic L pattern 25 | n_scans = 2 26 | film, scal = make_test_data(n_scans=n_scans) 27 | film.to_filename('/tmp/plop.nii') 28 | shape = film.shape 29 | report = time_slice_diffs(film) 30 | 31 | assert_array_almost_equal(report['volume_means'], 32 | scal * np.ones(n_scans)) 33 | assert_array_almost_equal(report['volume_mean_diff2'], 34 | np.zeros(n_scans - 1)) 35 | assert_array_almost_equal(report['slice_mean_diff2'], 36 | np.zeros((n_scans - 1, shape[2]))) 37 | assert_array_almost_equal(report['diff2_mean_vol'].get_fdata(), 38 | np.zeros(shape[:3])) 39 | assert_array_almost_equal(report['slice_diff2_max_vol'].get_fdata(), 40 | np.zeros(shape[:3])) 41 | 42 | 43 | def test_ts_diff_ana(): 44 | """ Run ts_diff_ana on changing image sequence """ 45 | # create basic L pattern 46 | n_scans = 2 47 | shape = (7, 8, 9, n_scans) 48 | film = np.zeros(shape) 49 | film[-3:, 5:-1, :, :] = 1 50 | film[:, 2:5, :, :] = 1 51 | ref = film.copy() 52 | scal = np.sum(ref) * 1. / ref.size 53 | film = np.dot(film, np.diag(np.arange(n_scans))) 54 | 55 | affine = np.eye(4) 56 | film = nibabel.Nifti1Image(film, affine) 57 | 58 | report = time_slice_diffs(film) 59 | 60 | assert_array_almost_equal(report['volume_means'], 61 | scal * np.arange(n_scans)) 62 | assert_array_almost_equal(report['volume_mean_diff2'], 63 | scal * np.ones(n_scans - 1)) 64 | assert_array_almost_equal(report['slice_mean_diff2'], 65 | scal * np.ones((n_scans - 1, shape[2])) 66 | ) 67 | assert_array_almost_equal(report['diff2_mean_vol'].get_fdata(), 68 | ref[..., 0]) 69 | assert_array_almost_equal(report['slice_diff2_max_vol'].get_fdata(), 70 | ref[..., 0]) 71 | 72 | 73 | def test_ts_diff_ana_two_session(): 74 | """ Run ts_diff_ana on two image sequences """ 75 | # create basic L pattern 76 | n_scans = 2 77 | film, _ = make_test_data(n_scans=n_scans) 78 | film, affine = film.get_fdata(), film.affine 79 | shape = film.shape 80 | ref = film.copy() 81 | scal = np.sum(ref) * 1. / ref.size 82 | film = np.dot(film, np.diag(np.arange(n_scans))) 83 | 84 | affine = np.eye(4) 85 | films = [] 86 | for _ in range(2): 87 | films.append(nibabel.Nifti1Image(film, affine)) 88 | 89 | report = multi_session_time_slice_diffs(films) 90 | 91 | assert_array_almost_equal(report['volume_means'], 92 | np.tile(scal * np.arange(n_scans), 2)) 93 | assert_array_almost_equal(report['volume_mean_diff2'], 94 | np.tile(scal * np.ones(n_scans - 1), 2)) 95 | assert_array_almost_equal( 96 | report['slice_mean_diff2'], 97 | np.tile(scal * np.ones((n_scans - 1, shape[2])), (2, 1))) 98 | assert_array_almost_equal(report['diff2_mean_vol'].get_fdata(), 99 | ref[..., 0]) 100 | assert_array_almost_equal(report['slice_diff2_max_vol'].get_fdata(), 101 | ref[..., 0]) 102 | 103 | 104 | def test_plot_tsdiffs_no_crash(): 105 | n_scans, n_sessions = 2, 2 106 | rng = np.random.RandomState(42) 107 | shape = (7, 8, 9, n_scans) 108 | films = [] 109 | affine = np.eye(4) 110 | for _ in range(n_sessions): 111 | film = np.zeros(shape) 112 | msk = (rng.randn(*shape) > .7) 113 | film[msk] = rng.randn(msk.sum()) 114 | films.append(nibabel.Nifti1Image(film, affine)) 115 | 116 | results = multi_session_time_slice_diffs(films) 117 | for use_same_figure in [True, False]: 118 | plot_tsdiffs(results, use_same_figure=use_same_figure) 119 | 120 | 121 | def test_ts_diff_ana_nan(): 122 | """ Check that Nans are well handled by tsdiffana """ 123 | # create basic L pattern 124 | n_scans = 20 125 | shape = (4, 5, 6, n_scans) 126 | film = np.random.randn(*shape) 127 | film[0] = np.nan 128 | film[film < - 2.] = np.nan 129 | affine = np.eye(4) 130 | film = nibabel.Nifti1Image(film, affine) 131 | 132 | report = time_slice_diffs(film) 133 | 134 | assert not np.any(np.isnan(report['volume_means'])) 135 | assert not np.any(np.isnan(report['volume_mean_diff2'])) 136 | assert not np.any(np.isnan(report['slice_mean_diff2'])) 137 | assert not np.any(np.isnan(report['diff2_mean_vol'].get_fdata())) 138 | assert not np.any(np.isnan(report['slice_diff2_max_vol'].get_fdata())) 139 | 140 | 141 | def test_issue_144(): 142 | # tsdiffana should error on 3D images 143 | sd = _make_sd(func_filenames=["/tmp/titi/func1.nii"], func_ndim=3, 144 | output_dir="/tmp") 145 | sd.sanitize() 146 | try: 147 | time_slice_diffs(sd.func[0]) 148 | assert False 149 | except Exception: 150 | pass 151 | -------------------------------------------------------------------------------- /pypreprocess/tests/test_version.py: -------------------------------------------------------------------------------- 1 | """ 2 | A simple test script for existance of pypreprocess version 3 | """ 4 | 5 | import pypreprocess 6 | from ..version import __version__ 7 | 8 | 9 | def test_version(): 10 | version = pypreprocess.__version__ 11 | assert isinstance(version, str) 12 | -------------------------------------------------------------------------------- /pypreprocess/version.py: -------------------------------------------------------------------------------- 1 | """ 2 | pypreprocess version 3 | """ 4 | 5 | __version__ = '0.0.1.dev0' 6 | -------------------------------------------------------------------------------- /scripts/HCP.ini: -------------------------------------------------------------------------------- 1 | ###################################################################################### 2 | # 3 | # pypreprocess configuration. 4 | # 5 | # Copy this file to the acquisition directory containing the data you wish to 6 | # preprocess. Then, manually edit the values to customize the pipeline to suite your 7 | # needs. 8 | # 9 | # Disable a preprocessing step by setting 'disable = True' under the corresponding 10 | # section, or simply comment the section altogether. 11 | # 12 | # IMPORTANT NOTES 13 | # =============== 14 | # - indexing begins from 1 (matlab style) 15 | # - you can explicitly specifiy the software to be used for a specific stage of the 16 | # preprocessing by accordingly setting the 'software' field under the 17 | # corresponding section (e.g like so: software = spm) 18 | # - A value of 'auto', 'unspecified', 'none', etc. for a parameter means it should 19 | # be specified or inferred at run-time 20 | # 21 | # Authored by DOHMATOB Elvis Dopgima 22 | # 23 | ###################################################################################### 24 | 25 | [config] # DON'T TOUCH THIS LINE ! 26 | 27 | ########## 28 | # INPUT 29 | ########## 30 | 31 | protocol = unspecified # at runtime, we'll iterate over MOTOR, LANGUAGE, SOCIAL, etc. 32 | 33 | # Path (relative or full) of directory containing data (if different from directory 34 | # containing this configuration file). 35 | dataset_dir = /storage/data/HCP/S500-1,/storage/data/HCP/S500-2,/storage/data/HCP/S500-3,/storage/data/HCP/S500-4,/storage/data/HCP/S500-5 36 | 37 | # Brief description of dataset (you can use html formatting) 38 | dataset_description = """HCP experiments""" 39 | 40 | # The number of subjects to include; by default all subjects are included. 41 | nsubjects = auto 42 | 43 | # Wildcard for, or space-separated list of, subject directories relative to the 44 | # acquisition directory 45 | subject_dirs = * 46 | 47 | # Path of session-wise functional images, relative to the subject data dir. 48 | # Wildcards are allowed. Each session must be specified in the form 49 | session_1_func = MNINonLinear/Results/tfMRI_%protocol%_LR/tfMRI_%protocol%_LR.nii.gz 50 | session_2_func = MNINonLinear/Results/tfMRI_%protocol%_RL/tfMRI_%protocol%_RL.nii.gz 51 | 52 | # Path of T1 (anat) image relative to the subject data dir 53 | anat = MNINonLinear/T1w_restore_brain.nii.gz 54 | 55 | # Should caching (nipype, joblib, etc.) be used to safe ages of hard-earned computation ? 56 | caching = True 57 | 58 | # Should orientation meta-date be stripped-off image headers ? 59 | deleteorient = False 60 | 61 | # distortion correction ? 62 | disable_distortion_correction = True 63 | 64 | ############################ 65 | # Slice-Timing Correction 66 | ############################ 67 | 68 | # Don't you want us to do Slice-Timing Correction (STC) ? 69 | disable_slice_timing = True 70 | 71 | # Repetition Time 72 | TR = 2.0 73 | 74 | # Formula for Acquisition Time for single brain volume. 75 | TA = TR * (1 - 1 / nslices) 76 | 77 | # Can be ascending, descending, or an explicitly specified sequence. 78 | slice_order = ascending 79 | 80 | # Were the EPI slices interleaved ? 81 | interleaved = False 82 | 83 | # Reference slice (indexing begins from 1) 84 | refslice = 1 85 | 86 | # software to use for Slice-Timing Correction 87 | slice_timing_software = spm 88 | 89 | 90 | #################################### 91 | # Realignment (Motion Correction) 92 | #################################### 93 | 94 | # Don't do realignment / motion correction ? 95 | disable_realign = True 96 | 97 | # Register all volumes to the mean thereof ? 98 | register_to_mean = True 99 | 100 | # Reslice volumes ? 101 | realign_reslice = False 102 | 103 | # Software to use realignment / motion correction. Can be spm or fsl 104 | realign_software = spm 105 | 106 | 107 | ################### 108 | # Coregistration 109 | ################### 110 | 111 | # Don't you want us to do coregistration of T1 (anat) and fMRI (func) ? 112 | disable_coregister = True 113 | 114 | # During coregistration, do you want us to register func -> anat or anat -> func ? 115 | coreg_func_to_anat = True 116 | 117 | # Should we reslice files during coregistration ? 118 | coregister_reslice = False 119 | 120 | # Software to use for coregistration 121 | coregister_software = spm 122 | 123 | 124 | ######################## 125 | # Tissue Segmentation 126 | ######################## 127 | 128 | # Don't you want us to segment the brain (into gray-matter, white matter, csf, etc.) ? 129 | disable_segment = True 130 | 131 | # Software to use for tissue segmentation. 132 | segment_software = spm 133 | 134 | # Use spm's NewSegment ? 135 | newsegment = True 136 | 137 | ################## 138 | # Normalization 139 | ################## 140 | 141 | # Don't you want want us to normalize each subject's brain unto a template (MNI 142 | # for example) ? 143 | disable_normalize = True 144 | 145 | # Path to your template image. 146 | template = "MNI" 147 | 148 | # Voxel sizes of final func images 149 | func_write_voxel_sizes = [3, 3, 3] 150 | 151 | # Voxel sizes of final anat images 152 | anat_write_voxel_size = [1, 1, 1] 153 | 154 | # Use dartel for normalization ? 155 | dartel = False 156 | 157 | # Software to use for normalization. 158 | normalize_software = spm 159 | 160 | 161 | ############## 162 | # Smoothing 163 | ############## 164 | 165 | # FWHM (in mm) of smoothing kernel. 166 | fwhm = 0 167 | 168 | 169 | ########### 170 | # Output 171 | ########### 172 | 173 | # Root directory (full path or relative to the directory containing this file) for 174 | # all output files and reports 175 | output_dir = /storage/workspace/elvis/HCP500_GLM/%protocol% 176 | 177 | # Generate html reports ? 178 | report = False 179 | 180 | # Plot coefficient of variation post-preprocessing ? 181 | plot_tsdiffana = True 182 | 183 | -------------------------------------------------------------------------------- /scripts/HCP_tfMRI_MOTOR_preproc.ini: -------------------------------------------------------------------------------- 1 | ###################################################################################### 2 | # 3 | # pypreprocess configuration. 4 | # 5 | # Copy this file to the acquisition directory containing the data you wish to 6 | # preprocess. Then, manually edit the values to customize the pipeline to suite your 7 | # needs. 8 | # 9 | # Disable a preprocessing step by setting 'disable = True' under the corresponding 10 | # section, or simply comment the section altogether. 11 | # 12 | # IMPORTANT NOTES 13 | # =============== 14 | # - indexing begins from 1 (matlab style) 15 | # - you can explicitly specifiy the software to be used for a specific stage of the 16 | # preprocessing by accordingly setting the 'software' field under the 17 | # corresponding section (e.g like so: software = spm) 18 | # - A value of 'auto', 'unspecified', 'none', etc. for a parameter means it should 19 | # be specified or inferred at run-time 20 | # 21 | # Authored by DOHMATOB Elvis Dopgima 22 | # 23 | ###################################################################################### 24 | 25 | [config] # DON'T TOUCH THIS LINE ! 26 | 27 | ########## 28 | # INPUT 29 | ########## 30 | 31 | protocol = unspecified # at runtime, we'll iterate over MOTOR, LANGUAGE, SOCIAL, etc. 32 | 33 | # Path (relative or full) of directory containing data (if different from directory 34 | # containing this configuration file). 35 | dataset_dir = /storage/data/HCP/Q1/,/storage/data/HCP/Q2 36 | 37 | # Brief description of dataset (you can use html formatting) 38 | dataset_description = """HCP experiments""" 39 | 40 | # The name of the dataset as will be shown in the report pages. Must be an integer 41 | # or auto 42 | dataset_id = auto 43 | 44 | # The number of subjects to include; by default all subjects are included. 45 | nsubjects = auto 46 | 47 | # List of (or wildcard for) subject id's to be ignored / excluded; must be space- 48 | # separated list of subject ids. 49 | exclude_these_subject_ids = 117122 50 | 51 | # List of (or wildcard for) the only subjects to be included; must be space 52 | # separated list of subject ids. 53 | include_only_these_subject_ids = auto 54 | 55 | # Wildcard for, or space-separated list of, subject directories relative to the 56 | # acquisition directory 57 | subject_dirs = ****** 58 | 59 | # Path of session-wise functional images, relative to the subject data dir. 60 | # Wildcards are allowed. Each session must be specified in the form 61 | session_1_func = MNINonLinear/Results/tfMRI_%protocol%_LR/tfMRI_%protocol%_LR.nii.gz 62 | session_2_func = MNINonLinear/Results/tfMRI_%protocol%_RL/tfMRI_%protocol%_RL.nii.gz 63 | 64 | # Path of T1 (anat) image relative to the subject data dir 65 | anat = MNINonLinear/T1w_restore_brain.nii.gz 66 | 67 | # Should caching (nipype, joblib, etc.) be used to safe ages of hard-earned computation ? 68 | caching = True 69 | 70 | # Number of jobs to be spawn altogether. 71 | n_jobs = 32 72 | 73 | # Should orientation meta-date be stripped-off image headers ? 74 | deleteorient = False 75 | 76 | 77 | # distortion correction ? 78 | disable_distortion_correction = True 79 | 80 | ############################ 81 | # Slice-Timing Correction 82 | ############################ 83 | 84 | # Don't you want us to do Slice-Timing Correction (STC) ? 85 | disable_slice_timing = True 86 | 87 | # Repetition Time 88 | TR = 2.0 89 | 90 | # Formula for Acquisition Time for single brain volume. 91 | TA = TR * (1 - 1 / nslices) 92 | 93 | # Can be ascending, descending, or an explicitly specified sequence. 94 | slice_order = ascending 95 | 96 | # Were the EPI slices interleaved ? 97 | interleaved = False 98 | 99 | # Reference slice (indexing begins from 1) 100 | refslice = 1 101 | 102 | # software to use for Slice-Timing Correction 103 | slice_timing_software = spm 104 | 105 | 106 | #################################### 107 | # Realignment (Motion Correction) 108 | #################################### 109 | 110 | # Don't do realignment / motion correction ? 111 | disable_realign = True 112 | 113 | # Register all volumes to the mean thereof ? 114 | register_to_mean = True 115 | 116 | # Reslice volumes ? 117 | realign_reslice = False 118 | 119 | # Software to use realignment / motion correction. Can be spm or fsl 120 | realign_software = spm 121 | 122 | 123 | ################### 124 | # Coregistration 125 | ################### 126 | 127 | # Don't you want us to do coregistration of T1 (anat) and fMRI (func) ? 128 | disable_coregister = True 129 | 130 | # During coregistration, do you want us to register func -> anat or anat -> func ? 131 | coreg_func_to_anat = True 132 | 133 | # Should we reslice files during coregistration ? 134 | coregister_reslice = False 135 | 136 | # Software to use for coregistration 137 | coregister_software = spm 138 | 139 | 140 | ######################## 141 | # Tissue Segmentation 142 | ######################## 143 | 144 | # Don't you want us to segment the brain (into gray-matter, white matter, csf, etc.) ? 145 | disable_segment = True 146 | 147 | # Software to use for tissue segmentation. 148 | segment_software = spm 149 | 150 | # Use spm's NewSegment ? 151 | newsegment = True 152 | 153 | ################## 154 | # Normalization 155 | ################## 156 | 157 | # Don't you want want us to normalize each subject's brain unto a template (MNI 158 | # for example) ? 159 | disable_normalize = True 160 | 161 | # Path to your template image. 162 | template = "MNI" 163 | 164 | # Voxel sizes of final func images 165 | func_write_voxel_sizes = [3, 3, 3] 166 | 167 | # Voxel sizes of final anat images 168 | anat_write_voxel_size = [1, 1, 1] 169 | 170 | # Use dartel for normalization ? 171 | dartel = False 172 | 173 | # Software to use for normalization. 174 | normalize_software = spm 175 | 176 | 177 | ############## 178 | # Smoothing 179 | ############## 180 | 181 | # FWHM (in mm) of smoothing kernel. 182 | fwhm = [5, 5, 5] 183 | 184 | 185 | ########### 186 | # Output 187 | ########### 188 | 189 | # Root directory (full path or relative to the directory containing this file) for 190 | # all output files and reports 191 | output_dir = /storage/workspace/elvis/HCP_GLM/%protocol% 192 | 193 | # Generate html reports ? 194 | report = False 195 | 196 | # Plot coefficient of variation post-preprocessing ? 197 | plot_tsdiffana = True 198 | 199 | -------------------------------------------------------------------------------- /scripts/abide_preproc.py: -------------------------------------------------------------------------------- 1 | """ 2 | :Module: nipype_preproc_spm_abide 3 | :Synopsis: SPM use-case for preprocessing ABIDE rest dataset 4 | :Author: dohmatob elvis dopgima 5 | 6 | """ 7 | 8 | """standard imports""" 9 | import os 10 | import glob 11 | import sys 12 | from pypreprocess.nipype_preproc_spm_utils import (do_subjects_preproc, 13 | SubjectData 14 | ) 15 | 16 | # brief description of ABIDE 17 | DATASET_DESCRIPTION = """\ 18 |

Institute %s, \ 19 | ABIDE rest auditory dataset.

\ 20 | """ 21 | 22 | """DARTEL ?""" 23 | DO_DARTEL = True 24 | 25 | """institutes we're insterested in""" 26 | INSTITUTES = [ 27 | 'CMU', 28 | 'Caltech', 29 | 'KKI', 30 | 'Leuven', 31 | 'MaxMun', 32 | 'NYU', 33 | 'OHSU', 34 | 'Olin', 35 | 'Pitt', 36 | 'SBL', 37 | 'SDSU', 38 | 'Stanford', 39 | 'Trinity', 40 | 'UCLA', 41 | 'UM', 42 | 'USM', 43 | 'Yale'] 44 | 45 | 46 | def preproc_abide_institute(institute_id, abide_data_dir, abide_output_dir, 47 | do_dartel=True, 48 | do_report=True, 49 | n_jobs=-1, 50 | ): 51 | """Preprocesses a given ABIDE institute 52 | 53 | """ 54 | 55 | # set institute output dir 56 | institute_output_dir = os.path.join(abide_output_dir, institute_id) 57 | if not os.path.exists(institute_output_dir): 58 | os.makedirs(institute_output_dir) 59 | 60 | # set subject id wildcard for globbing institute subjects 61 | subject_id_wildcard = "%s_*/%s_*" % (institute_id, institute_id) 62 | 63 | # glob for subject ids 64 | subject_ids = [os.path.basename(x) 65 | for x in glob.glob(os.path.join(abide_data_dir, 66 | subject_id_wildcard))] 67 | 68 | # sort the ids 69 | subject_ids.sort() 70 | 71 | ignored_subject_ids = [] 72 | 73 | # producer subject data 74 | def subject_factory(): 75 | for subject_id in subject_ids: 76 | subject_data = SubjectData() 77 | subject_data.subject_id = subject_id 78 | 79 | try: 80 | subject_data.func = glob.glob( 81 | os.path.join( 82 | abide_data_dir, 83 | "%s/%s/scans/rest*/resources/NIfTI/files/rest.nii" % ( 84 | subject_id, subject_id)))[0] 85 | except IndexError: 86 | ignored_because = "no rest data found" 87 | print("Ignoring subject %s (%s)" % (subject_id,) 88 | ignored_because) 89 | ignored_subject_ids.append((subject_id, ignored_because)) 90 | continue 91 | 92 | try: 93 | subject_data.anat = glob.glob( 94 | os.path.join( 95 | abide_data_dir, 96 | "%s/%s/scans/anat/resources/NIfTI/files/mprage.nii" % ( 97 | subject_id, subject_id)))[0] 98 | except IndexError: 99 | if do_dartel: 100 | # can't do DARTEL in under such conditions 101 | continue 102 | 103 | try: 104 | subject_data.hires = glob.glob( 105 | os.path.join( 106 | abide_data_dir, 107 | ("%s/%s/scans/hires/resources/NIfTI/" 108 | "files/hires.nii") % (subject_id, subject_id)))[0] 109 | except IndexError: 110 | ignored_because = "no anat/hires data found" 111 | print("Ignoring subject %s (%s)" % (subject_id,) 112 | ignored_because) 113 | ignored_subject_ids.append((subject_id, ignored_because)) 114 | continue 115 | 116 | subject_data.output_dir = os.path.join( 117 | os.path.join( 118 | institute_output_dir, subject_id)) 119 | 120 | # yield data for this subject 121 | yield subject_data 122 | 123 | # do preprocessing proper 124 | report_filename = os.path.join(institute_output_dir, 125 | "_report.html") 126 | do_subjects_preproc( 127 | subject_factory(), 128 | dataset_id=institute_id, 129 | output_dir=institute_output_dir, 130 | do_report=do_report, 131 | do_dartel=do_dartel, 132 | dataset_description="%s" % DATASET_DESCRIPTION.replace( 133 | "%s", 134 | institute_id), 135 | report_filename=report_filename, 136 | do_shutdown_reloaders=True,) 137 | 138 | for subject_id, ignored_because in ignored_subject_ids: 139 | print("Ignored %s because %s" % (subject_id, ignored_because)) 140 | 141 | """sanitize cmd-line input""" 142 | if len(sys.argv) < 3: 143 | print("\r\nUsage: source /etc/fsl/4.1/fsl.sh; python %s " 144 | " [comma-separated institute" 145 | " ids]\r\n" % sys.argv[0]) 146 | print("Examples:\r\nsource /etc/fsl/4.1/fsl.sh; python %s " 147 | "/volatile/home/aa013911/ABIDE " 148 | "/volatile/home/aa013911/DED/ABIDE_runs" % sys.argv[0]) 149 | print("source /etc/fsl/4.1/fsl.sh; python %s " 150 | "/volatile/home/aa013911/ABIDE " 151 | "/volatile/home/aa013911/DED/ABIDE_runs Leveun,KKI,NYU" 152 | % sys.argv[0]) 153 | sys.exit(1) 154 | 155 | ABIDE_DIR = os.path.abspath(sys.argv[1]) 156 | 157 | OUTPUT_DIR = os.path.abspath(sys.argv[2]) 158 | if not os.path.isdir(OUTPUT_DIR): 159 | os.makedirs(OUTPUT_DIR) 160 | 161 | if len(sys.argv) > 3: 162 | INSTITUTES = sys.argv[3].split(",") 163 | 164 | if DO_DARTEL: 165 | import joblib 166 | joblib.Parallel(n_jobs=1, verbose=100)( 167 | joblib.delayed(preproc_abide_institute)( 168 | institute_id, 169 | ABIDE_DIR, 170 | OUTPUT_DIR, 171 | do_dartel=True, 172 | # do_report=False, 173 | ) 174 | for institute_id in INSTITUTES) 175 | else: 176 | for institute_id in INSTITUTES: 177 | preproc_abide_institute(institute_id, ABIDE_DIR, OUTPUT_DIR, 178 | do_dartel=False, 179 | do_report=False, 180 | ) 181 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | 3 | [bdist_rpm] 4 | doc-files = doc 5 | 6 | [flake8] 7 | # For PEP8 error codes see 8 | # http://pep8.readthedocs.org/en/latest/intro.html#error-codes 9 | # E402: module level import not at top of file 10 | # W503: line break before binary operator 11 | # W504: line break after binary operator 12 | ignore=E402, W503, W504. W605 13 | 14 | [tool:pytest] 15 | doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS 16 | junit_family = xunit2 17 | addopts = 18 | --doctest-modules 19 | -s 20 | -vv 21 | --durations=0 22 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | descr = """A set of python modules for neuroimaging...""" 4 | 5 | import sys 6 | import os 7 | import io 8 | import shutil 9 | 10 | DISTNAME = 'pypreprocess' 11 | DESCRIPTION = 'Statistical learning for neuroimaging in Python' 12 | LONG_DESCRIPTION = io.open('README.rst', encoding="utf8").read() 13 | MAINTAINER = 'Gael Varoquaux' 14 | MAINTAINER_EMAIL = 'gael.varoquaux@normalesup.org' 15 | URL = 'http://pypreprocess.github.com' 16 | LICENSE = 'new BSD' 17 | DOWNLOAD_URL = 'http://pypreprocess.github.com' 18 | VERSION = '0.1' 19 | 20 | from numpy.distutils.core import setup 21 | 22 | 23 | # For some commands, use setuptools 24 | if len(set(('develop', 'sdist', 'release', 'bdist_egg', 'bdist_rpm', 25 | 'bdist', 'bdist_dumb', 'bdist_wininst', 'install_egg_info', 26 | 'build_sphinx', 'egg_info', 'easy_install', 'upload', 27 | )).intersection(sys.argv)) > 0: 28 | from setuptools import setup 29 | 30 | 31 | def configuration(parent_package='', top_path=None): 32 | if os.path.exists('MANIFEST'): 33 | os.remove('MANIFEST') 34 | 35 | from numpy.distutils.misc_util import Configuration 36 | config = Configuration(None, parent_package, top_path) 37 | 38 | # main modules 39 | config.add_subpackage('pypreprocess') 40 | 41 | # spm loader 42 | config.add_subpackage('pypreprocess/spm_loader') 43 | 44 | # extrenal dependecies 45 | config.add_subpackage('pypreprocess/external') 46 | config.add_subpackage('pypreprocess/external/tempita') 47 | config.add_subpackage('pypreprocess/external/nistats') 48 | 49 | # plugin for generating reports 50 | config.add_subpackage('pypreprocess/reporting') 51 | config.add_data_dir("pypreprocess/reporting/template_reports") 52 | config.add_data_dir("pypreprocess/reporting/css") 53 | config.add_data_dir("pypreprocess/reporting/js") 54 | config.add_data_dir("pypreprocess/reporting/icons") 55 | config.add_data_dir("pypreprocess/reporting/images") 56 | 57 | return config 58 | 59 | 60 | if __name__ == "__main__": 61 | 62 | old_path = os.getcwd() 63 | local_path = os.path.dirname(os.path.abspath(sys.argv[0])) 64 | # python 3 compatibility stuff. 65 | # Simplified version of scipy strategy: copy files into 66 | # build/py3k, and patch them using lib2to3. 67 | if sys.version_info[0] == 3: 68 | try: 69 | import lib2to3cache 70 | except ImportError: 71 | pass 72 | local_path = os.path.join(local_path, 'build', 'py3k') 73 | if os.path.exists(local_path): 74 | shutil.rmtree(local_path) 75 | print("Copying source tree into build/py3k for 2to3 transformation" 76 | "...") 77 | shutil.copytree(os.path.join(old_path, 'pypreprocess'), 78 | os.path.join(local_path, 'pypreprocess')) 79 | import lib2to3.main 80 | from io import StringIO 81 | print("Converting to Python3 via 2to3...") 82 | _old_stdout = sys.stdout 83 | try: 84 | sys.stdout = StringIO() # supress noisy output 85 | res = lib2to3.main.main("lib2to3.fixes", 86 | ['-x', 'import', '-w', local_path]) 87 | finally: 88 | sys.stdout = _old_stdout 89 | 90 | if res != 0: 91 | raise Exception('2to3 failed, exiting ...') 92 | 93 | os.chdir(local_path) 94 | sys.path.insert(0, local_path) 95 | 96 | setup(configuration=configuration, 97 | name=DISTNAME, 98 | maintainer=MAINTAINER, 99 | include_package_data=True, 100 | maintainer_email=MAINTAINER_EMAIL, 101 | description=DESCRIPTION, 102 | license=LICENSE, 103 | url=URL, 104 | version=VERSION, 105 | download_url=DOWNLOAD_URL, 106 | long_description=LONG_DESCRIPTION, 107 | zip_safe=False, # the package can run out of an .egg file 108 | classifiers=[ 109 | 'Intended Audience :: Science/Research', 110 | 'Intended Audience :: Developers', 111 | 'License :: OSI Approved', 112 | 'Programming Language :: C', 113 | 'Programming Language :: Python', 114 | 'Topic :: Software Development', 115 | 'Topic :: Scientific/Engineering', 116 | 'Operating System :: Microsoft :: Windows', 117 | 'Operating System :: POSIX', 118 | 'Operating System :: Unix' 119 | ] 120 | ) 121 | -------------------------------------------------------------------------------- /spike/sprint.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import numpy as np 4 | import scipy.io 5 | import nibabel 6 | from nipy.modalities.fmri.design_matrix import make_dmtx 7 | from nipy.modalities.fmri.experimental_paradigm import BlockParadigm 8 | from nipy.modalities.fmri.glm import FMRILinearModel 9 | from pypreprocess.datasets import fetch_spm_multimodal_fmri 10 | from pypreprocess.purepython_preproc_utils import do_subject_preproc 11 | from joblib import Memory 12 | from pypreprocess.reslice import reslice_vols 13 | 14 | 15 | def parse_onset_file(onset_file): 16 | conditions = [] 17 | onsets = [] 18 | durations = [] 19 | amplitudes = [] 20 | line_cnt = 0 21 | fd = open(onset_file, 'r') 22 | while True: 23 | line = fd.readline() 24 | if not line: 25 | break 26 | line = line.rstrip("\r\n") 27 | line = line.split(" ") 28 | line_cnt += 1 29 | if len(line) not in [2, 3, 4]: 30 | raise ValueError("Mal-formed line %i: %s" % ( 31 | line_cnt, " ".join(line))) 32 | if len(line) == 2: 33 | line.append(0.) 34 | if len(line) == 3: 35 | line.append(1.) 36 | condition, onset, duration, amplitude = line 37 | conditions.append(condition) 38 | onsets.append(float(onset)) 39 | durations.append(float(duration)) 40 | amplitudes.append(float(amplitude)) 41 | 42 | fd.close() 43 | if not line_cnt > 0: 44 | raise ValueError( 45 | "Couldn't read any data from onset file: %s" % onset_file) 46 | return map(np.array, [conditions, onsets, durations, amplitudes]) 47 | 48 | # fetch data 49 | data_dir = "examples/spm_multimodal/" 50 | subject_data = fetch_spm_multimodal_fmri(data_dir) 51 | 52 | # XXX to be verified 53 | tr = 2. 54 | drift_model = 'Cosine' 55 | hrf_model = 'spm + derivative' 56 | hfcut = 128. 57 | time_units = "tr" # default if 1 58 | if time_units == "tr": 59 | time_units = tr 60 | 61 | # re-write onset files into compatible format 62 | for sess in range(2): 63 | trials = getattr(subject_data, "trials_ses%i" % (sess + 1)) 64 | fd = open(trials.split(".")[0] + ".txt", 'w') 65 | timing = scipy.io.loadmat(trials, squeeze_me=True, struct_as_record=False) 66 | onsets = np.hstack(timing['onsets']) 67 | durations = np.hstack(timing['durations']) 68 | amplitudes = np.ones_like(onsets) 69 | conditions = [list(timing['names'][i:i + 1]) * len(timing['onsets'][i]) 70 | for i in range(len(timing['names']))] 71 | conditions = np.hstack(conditions) 72 | assert len(amplitudes) == len(onsets) == len(durations) == len(conditions) 73 | for condition, onset, duration, amplitude in zip(conditions, onsets, 74 | durations, amplitudes): 75 | fd.write("%s %s %s %s\r\n" % (condition, onset, duration, amplitude)) 76 | fd.close() 77 | 78 | output_dir = "" 79 | anat_wildcard = 'sMRI/smri.img' 80 | session_1_onset = "fMRI/trials_ses1.txt" 81 | session_1_func = "fMRI/Session1/fMETHODS-0005-00*.img" 82 | session_2_onset = "fMRI/trials_ses2.txt" 83 | session_2_func = "fMRI/Session2/fMETHODS-0006-00*.img" 84 | 85 | subject_dirs = sorted(glob.glob("%s/sub*" % data_dir)) 86 | session_onset_wildcards = [session_1_onset, session_2_onset] 87 | session_func_wildcards = [session_1_func, session_2_func] 88 | 89 | 90 | def do_subject_glm(subject_data): 91 | """FE analysis for a single subject.""" 92 | subject_id = subject_data['subject_id'] 93 | output_dir = subject_data["output_dir"] 94 | func_files = subject_data['func'] 95 | anat = subject_data['anat'] 96 | onset_files = subject_data['onset'] 97 | # subject_id = os.path.basename(subject_dir) 98 | # subject_output_dir = os.path.join(output_dir, subject_id) 99 | mem = Memory(os.path.join(output_dir, "cache")) 100 | if not os.path.exists(output_dir): 101 | os.makedirs(output_dir) 102 | 103 | # glob files: anat, session func files, session onset files 104 | # anat = glob.glob(os.path.join(subject_dir, anat_wildcard)) 105 | # assert len(anat) == 1 106 | # anat = anat[0] 107 | # onset_files = sorted([glob.glob(os.path.join(subject_dir, session))[0] 108 | # for session in session_onset_wildcards]) 109 | # func_files = sorted([sorted(glob.glob(os.path.join(subject_dir, session))) 110 | # for session in session_func_wildcards]) 111 | 112 | ### Preprocess data ####################################################### 113 | if 0: 114 | subject_data = mem.cache(do_subject_preproc)( 115 | dict(func=func_files, anat=anat, output_dir=output_dir)) 116 | func_files = subject_data['func'] 117 | anat = subject_data['anat'] 118 | 119 | # reslice func images 120 | func_files = [mem.cache(reslice_vols)( 121 | sess_func, 122 | target_affine=nibabel.load(sess_func[0]).affine) 123 | for sess_func in func_files] 124 | 125 | ### GLM: loop on (session_bold, onse_file) pairs over the various sessions 126 | design_matrices = [] 127 | for session, (func_file, onset_file) in enumerate(zip(func_files, 128 | onset_files)): 129 | if isinstance(func_file, str): 130 | bold = nibabel.load(func_file) 131 | else: 132 | if len(func_file) == 1: 133 | func_file = func_file[0] 134 | bold = nibabel.load(func_file) 135 | assert len(bold.shape) == 4 136 | n_scans = bold.shape[-1] 137 | del bold 138 | else: 139 | n_scans = len(func_file) 140 | frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) 141 | conditions, onsets, durations, amplitudes = parse_onset_file( 142 | onset_file) 143 | onsets *= tr 144 | durations *= tr 145 | paradigm = BlockParadigm(con_id=conditions, onset=onsets, 146 | duration=durations, amplitude=amplitudes) 147 | design_matrices.append(make_dmtx(frametimes, 148 | paradigm, hrf_model=hrf_model, 149 | drift_model=drift_model, 150 | hfcut=hfcut)) 151 | 152 | # specify contrasts 153 | n_columns = len(design_matrices[0].names) 154 | contrasts = {} 155 | for i in range(paradigm.n_conditions): 156 | contrasts['%s' % design_matrices[0].names[2 * i] 157 | ] = np.eye(n_columns)[2 * i] 158 | 159 | # more interesting contrasts 160 | contrasts['faces-scrambled'] = contrasts['faces' 161 | ] - contrasts['scrambled'] 162 | contrasts['scrambled-faces'] = -contrasts['faces-scrambled'] 163 | contrasts['effects_of_interest'] = contrasts['faces' 164 | ] + contrasts['scrambled'] 165 | 166 | # effects of interest F-test 167 | diff_contrasts = [] 168 | for i in range(paradigm.n_conditions - 1): 169 | a = contrasts[design_matrices[0].names[2 * i]] 170 | b = contrasts[design_matrices[0].names[2 * (i + 1)]] 171 | diff_contrasts.append(a - b) 172 | contrasts["diff"] = diff_contrasts 173 | 174 | # fit GLM 175 | print('Fitting a GLM (this takes time)...') 176 | fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func, 177 | check_affines=False) 178 | for sess_func in func_files], 179 | [design_matrix.matrix 180 | for design_matrix in design_matrices], 181 | mask='compute' 182 | ) 183 | fmri_glm.fit(do_scaling=True, model='ar1') 184 | 185 | # save computed mask 186 | mask_path = os.path.join(output_dir, "mask.nii.gz") 187 | 188 | print("Saving mask image %s" % mask_path) 189 | nibabel.save(fmri_glm.mask, mask_path) 190 | 191 | # compute contrasts 192 | z_maps = {} 193 | effects_maps = {} 194 | for contrast_id, contrast_val in contrasts.items(): 195 | print("\tcontrast id: %s" % contrast_id) 196 | if np.ndim(contrast_val) > 1: 197 | contrast_type = "t" 198 | else: 199 | contrast_type = "F" 200 | z_map, t_map, effects_map, var_map = fmri_glm.contrast( 201 | [contrast_val] * 2, 202 | con_id=contrast_id, 203 | contrast_type=contrast_type, 204 | output_z=True, 205 | output_stat=True, 206 | output_effects=True, 207 | output_variance=True 208 | ) 209 | 210 | # store stat maps to disk 211 | for map_type, out_map in zip(['z', 't', 'effects', 'variance'], 212 | [z_map, t_map, effects_map, var_map]): 213 | map_dir = os.path.join( 214 | output_dir, '%s_maps' % map_type) 215 | if not os.path.exists(map_dir): 216 | os.makedirs(map_dir) 217 | map_path = os.path.join( 218 | map_dir, '%s.nii.gz' % contrast_id) 219 | print("\t\tWriting %s ..." % map_path) 220 | nibabel.save(out_map, map_path) 221 | 222 | # collect zmaps for contrasts we're interested in 223 | if map_type == 'z': 224 | z_maps[contrast_id] = map_path 225 | if map_type == 'effects': 226 | effects_maps[contrast_id] = map_path 227 | 228 | return subject_id, anat, effects_maps, z_maps, contrasts, fmri_glm.mask 229 | 230 | 231 | if __name__ == "__main__": 232 | mem = Memory(os.path.join(output_dir, "cache")) 233 | first_level_glms = map(mem.cache(do_subject_glm), subject_dirs) 234 | 235 | # plot stats (per subject) 236 | import matplotlib.pyplot as plt 237 | from nilearn.plotting import plot_stat_map 238 | all_masks = [] 239 | all_effects_maps = [] 240 | for (subject_id, anat, effects_maps, z_maps, 241 | contrasts, mask) in first_level_glms: 242 | all_masks.append(mask) 243 | anat_img = nibabel.load(anat) 244 | z_map = nibabel.load(z_maps.values()[0]) 245 | all_effects_maps.append(effects_maps) 246 | for contrast_id, z_map in z_maps.items(): 247 | plot_stat_map(z_map, black_bg=True, threshold=2.3, 248 | title="%s: %s" % (subject_id, contrast_id)) 249 | plt.savefig("%s_%s.png" % (subject_id, contrast_id)) 250 | --------------------------------------------------------------------------------