├── doc └── MA-MAGeTBrain-Schematic.png ├── bin ├── mb_vote.sh ├── mb_multiatlas_vote.sh ├── validation │ ├── multiatlas_cv_collect.sh │ ├── multiatlas_subject_cv_collect.sh │ ├── nfold_cv_collect.sh │ ├── nfold_subject_cv_collect.sh │ ├── multiatlas_cv_setup.sh │ ├── nfold_cv_setup.sh │ ├── multiatlas_subject_cv_setup.sh │ ├── loo_cv_setup.sh │ ├── stratified_nfold_cv_setup.py │ └── nfold_subject_cv_setup.sh ├── mb_multiatlas_resample.sh ├── mb_antsRegistrationSyN.sh ├── mb_antsRegistrationBSplineSyN.sh ├── activate ├── mb_resample.sh ├── mb_register.sh ├── mb_qc.sh ├── simpleitk_weighted_vote.py ├── mb.sh ├── ants_generate_iterations.py ├── mb_header.sh └── mb_stages.sh ├── LICENSE └── README.md /doc/MA-MAGeTBrain-Schematic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CoBrALab/antsRegistration-MAGeT/HEAD/doc/MA-MAGeTBrain-Schematic.png -------------------------------------------------------------------------------- /bin/mb_vote.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #mb_vote.sh labelname subjectname 3 | if [[ -n ${__mb_debug:-} ]]; then 4 | set -x 5 | fi 6 | set -euo pipefail 7 | 8 | export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=${THREADS_PER_COMMAND:-$(nproc)} 9 | 10 | labelname=$1 11 | subject=$2 12 | 13 | shift 2 14 | 15 | subjectname=$(basename ${subject}) 16 | subjectext=$(echo ${subjectname} | grep -i -o -E '(.mnc|.nii|.nii.gz|.nrrd)') 17 | 18 | labelname=$(echo ${labelname} | sed -r 's/(.mnc|.nii|.nii.gz|.nrrd)//g') 19 | 20 | ImageMath 3 /tmp/${subjectname}_${labelname}${subjectext} MajorityVoting "$@" 21 | ConvertImage 3 /tmp/${subjectname}_${labelname}${subjectext} output/labels/majorityvote/${subjectname}_${labelname}${subjectext} 1 22 | 23 | rm -f /tmp/${subjectname}_${labelname}${subjectext} 24 | -------------------------------------------------------------------------------- /bin/mb_multiatlas_vote.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #mb_vote.sh labelname subjectname 3 | if [[ -n ${__mb_debug:-} ]]; then 4 | set -x 5 | fi 6 | set -euo pipefail 7 | 8 | export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=${THREADS_PER_COMMAND:-$(nproc)} 9 | 10 | labelname=$1 11 | subject=$2 12 | 13 | shift 2 14 | 15 | subjectname=$(basename ${subject}) 16 | subjectext=$(echo ${subjectname} | grep -i -o -E '(.mnc|.nii|.nii.gz|.nrrd)') 17 | 18 | labelname=$(echo ${labelname} | sed -r 's/(.mnc|.nii|.nii.gz|.nrrd)//g') 19 | 20 | ImageMath 3 /tmp/${subjectname}_${labelname}${subjectext} MajorityVoting "$@" 21 | ConvertImage 3 /tmp/${subjectname}_${labelname}${subjectext} output/multiatlas/labels/majorityvote/${subjectname}_${labelname}${subjectext} 1 22 | 23 | rm -f /tmp/${subjectname}_${labelname}${subjectext} 24 | -------------------------------------------------------------------------------- /bin/validation/multiatlas_cv_collect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Script to generate csv file of LabelOverlapMeasures from a multiatlas n-fold run 3 | #Run with multiatlas_cv_collect.sh 4 | set -euo pipefail 5 | 6 | if [[ $3 ]] 7 | then 8 | targetdir=$3 9 | else 10 | targetdir=. 11 | fi 12 | 13 | if [[ ! -n $1 ]] 14 | then 15 | echo "Output file not specified" 16 | exit 1 17 | fi 18 | 19 | output=$1 20 | echo "file,atlases,templates,fold,Label,Total/Target,Jaccard,Dice,VolumeSimilarity,FalseNegative,FalsePositive" > $output 21 | for dir in $targetdir/NFOLD_multiatlas/${2}* 22 | do 23 | atlases=$(basename $dir | grep -Eho '[0-9]+atlases' | sed 's/atlases//g') 24 | fold=$(basename $dir | grep -Eho 'fold[0-9]+' | sed 's/fold//g') 25 | for label in $dir/output/multiatlas/labels/majorityvote/*.mnc 26 | do 27 | cat <(tail -n +2) | awk -vT="$(basename $label),$atlases,0,$fold," '{ print T \$0 }' >> $output 29 | EOT 30 | done | parallel -v 31 | done 32 | -------------------------------------------------------------------------------- /bin/validation/multiatlas_subject_cv_collect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Script to generate csv file of LabelOverlapMeasures from a multiatlas_subject_cv run 3 | #Run with multiatlas_subject_cv_collect.sh 4 | set -euo pipefail 5 | 6 | if [[ $3 ]] 7 | then 8 | targetdir=$3 9 | else 10 | targetdir=. 11 | fi 12 | 13 | if [[ ! -n $1 ]] 14 | then 15 | echo "Output file not specified" 16 | exit 1 17 | fi 18 | 19 | output=$1 20 | echo "file,atlases,templates,fold,Label,Total/Target,Jaccard,Dice,VolumeSimilarity,FalseNegative,FalsePositive" > $output 21 | for dir in $targetdir/NFOLD_multiatlas_subject/${2}* 22 | do 23 | atlases=$(basename $dir | grep -Eho '[0-9]+atlases' | sed 's/atlases//g') 24 | fold=$(basename $dir | grep -Eho 'fold[0-9]+' | sed 's/fold//g') 25 | for label in $dir/*/output/multiatlas/labels/majorityvote/*.mnc 26 | do 27 | cat <(tail -n +2) | awk -vT="$(basename $label),$atlases,0,$fold," '{ print T \$0 }' >> $output 29 | EOT 30 | done | parallel -v 31 | done 32 | -------------------------------------------------------------------------------- /bin/validation/nfold_cv_collect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Script to generate csv file of LabelOverlapMeasures from a n-fold CV run 3 | #Run with nfold_cv_collect.sh 4 | set -euo pipefail 5 | 6 | if [[ $3 ]] 7 | then 8 | targetdir=$3 9 | else 10 | targetdir=. 11 | fi 12 | 13 | if [[ ! -n $1 ]] 14 | then 15 | echo "Output file not specified" 16 | exit 1 17 | fi 18 | 19 | output=$1 20 | echo "file,atlases,templates,fold,Label,Total/Target,Jaccard,Dice,VolumeSimilarity,FalseNegative,FalsePositive" > $output 21 | for dir in ${targetdir}/NFOLDCV/${2}* 22 | do 23 | atlases=$(basename $dir | grep -Eho '[0-9]+atlases' | sed 's/atlases//g') 24 | templates=$(basename $dir | grep -Eho '[0-9]+templates' | sed 's/templates//g') 25 | fold=$(basename $dir | grep -Eho 'fold[0-9]+' | sed 's/fold//g') 26 | for label in $dir/output/labels/majorityvote/*.mnc 27 | do 28 | cat <(tail -n +2) | awk -vT="$(basename $label),$atlases,$templates,$fold," '{ print T \$0 }' >> $output 30 | EOT 31 | done | parallel -v 32 | done 33 | -------------------------------------------------------------------------------- /bin/validation/nfold_subject_cv_collect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Script to generate csv file of LabelOverlapMeasures from a n-fold CV run 3 | #Run with nfold_subject_cv_collect.sh 4 | set -euo pipefail 5 | 6 | if [[ $3 ]] 7 | then 8 | targetdir=$3 9 | else 10 | targetdir=. 11 | fi 12 | 13 | if [[ ! -n $1 ]] 14 | then 15 | echo "Output file not specified" 16 | exit 1 17 | fi 18 | 19 | output=$1 20 | echo "file,atlases,templates,fold,Label,Total/Target,Jaccard,Dice,VolumeSimilarity,FalseNegative,FalsePositive" > $output 21 | for dir in ${targetdir}/NFOLDCV_subject/${2}* 22 | do 23 | atlases=$(basename $dir | grep -Eho '[0-9]+atlases' | sed 's/atlases//g') 24 | templates=$(basename $dir | grep -Eho '[0-9]+templates' | sed 's/templates//g') 25 | fold=$(basename $dir | grep -Eho 'fold[0-9]+' | sed 's/fold//g') 26 | for label in $dir/*/output/labels/majorityvote/*.mnc 27 | do 28 | cat <(tail -n +2) | awk -vT="$(basename $label),$atlases,$templates,$fold," '{ print T \$0 }' >> $output 30 | EOT 31 | done | parallel -v 32 | done 33 | -------------------------------------------------------------------------------- /bin/mb_multiatlas_resample.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #mb_resample.sh labelname atlasname templatename subjectname 3 | if [[ -n ${__mb_debug:-} ]]; then 4 | set -x 5 | fi 6 | set -euo pipefail 7 | 8 | export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=${THREADS_PER_COMMAND:-$(nproc)} 9 | 10 | labelname=$1 11 | atlas=$2 12 | template=$3 13 | 14 | if [[ -n ${__mb_fast:-} ]]; then 15 | __mb_float="--float 1" 16 | else 17 | __mb_float="--float 0" 18 | fi 19 | 20 | atlasname=$(basename $atlas) 21 | templatename=$(basename $template) 22 | 23 | #Transforms are applied like matrix algebra, last transform on the command line is applied first 24 | antsApplyTransforms -d 3 ${__mb_float} ${MB_VERBOSE:-} --interpolation GenericLabel -r ${template} \ 25 | -i $(echo $atlas | sed -r 's/(t1|T1w|t2|T2w).*//g')${labelname} \ 26 | -o /tmp/${atlasname}-${templatename}-${labelname} \ 27 | -t output/transforms/atlas-template/${templatename}/${atlasname}-${templatename}1_NL.xfm \ 28 | -t output/transforms/atlas-template/${templatename}/${atlasname}-${templatename}0_GenericAffine.xfm 29 | 30 | ConvertImage 3 /tmp/${atlasname}-${templatename}-${labelname} output/multiatlas/labels/candidates/${templatename}/${atlasname}-${templatename}-${labelname} 1 31 | rm -f /tmp/${atlasname}-${templatename}-${labelname} 32 | -------------------------------------------------------------------------------- /bin/mb_antsRegistrationSyN.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Registration method based on the defaults of the antsRegistrationSyN.sh script from the main distro 3 | if [[ -n ${__mb_debug:-} ]]; then 4 | set -x 5 | fi 6 | set -euo pipefail 7 | 8 | movingfile=$1 9 | fixedfile=$2 10 | outputdir=$3 11 | 12 | if [[ -n ${__mb_fast:-} ]]; then 13 | __mb_float="--float 1" 14 | __mb_syn_metric="--metric Mattes[${fixedfile},${movingfile},1,256,None,1]" 15 | else 16 | __mb_syn_metric="--metric CC[${fixedfile},${movingfile},1,4]" 17 | __mb_float="--float 0" 18 | fi 19 | 20 | antsRegistration --dimensionality 3 ${__mb_float} ${MB_VERBOSE:-} --minc \ 21 | --output [${outputdir}/$(basename $movingfile)-$(basename $fixedfile)] \ 22 | --winsorize-image-intensities [0.005,0.995] --use-histogram-matching 0 \ 23 | --initial-moving-transform [${fixedfile},${movingfile},1] \ 24 | --transform Rigid[0.1] --metric Mattes[${fixedfile},${movingfile},1,32,Regular,0.25] --convergence [1000x500x250x100,1e-6,10] --shrink-factors 8x4x2x1 --smoothing-sigmas 3x2x1x0vox \ 25 | --transform Affine[0.1] --metric Mattes[${fixedfile},${movingfile},1,32,Regular,0.25] --convergence [1000x500x250x100,1e-6,10] --shrink-factors 8x4x2x1 --smoothing-sigmas 3x2x1x0vox \ 26 | --transform SyN[0.1,3,0] ${__mb_syn_metric} --convergence [100x70x50x20,1e-6,10] --shrink-factors 8x4x2x1 --smoothing-sigmas 3x2x1x0vox 27 | -------------------------------------------------------------------------------- /bin/mb_antsRegistrationBSplineSyN.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ##Registration method based on the defaults of the antsRegistrationSyN.sh script with BSplineSyN from the main distro 3 | if [[ -n ${__mb_debug:-} ]]; then 4 | set -x 5 | fi 6 | set -euo pipefail 7 | 8 | movingfile=$1 9 | fixedfile=$2 10 | outputdir=$3 11 | 12 | if [[ -n ${__mb_fast:-} ]]; then 13 | __mb_float="--float 1" 14 | __mb_syn_metric="--metric Mattes[${fixedfile},${movingfile},1,256,None,1]" 15 | else 16 | __mb_syn_metric="--metric CC[${fixedfile},${movingfile},1,4]" 17 | __mb_float="--float 0" 18 | fi 19 | 20 | antsRegistration --dimensionality 3 ${__mb_float} ${MB_VERBOSE:-} --minc \ 21 | --output [$outputdir/$(basename $movingfile)-$(basename $fixedfile)] \ 22 | --winsorize-image-intensities [0.005,0.995] --use-histogram-matching 0 \ 23 | --initial-moving-transform [$fixedfile,$movingfile,1] \ 24 | --transform Rigid[0.1] --metric Mattes[$fixedfile,$movingfile,1,32,Regular,0.25] --convergence [1000x500x250x100,1e-6,10] --shrink-factors 8x4x2x1 --smoothing-sigmas 3x2x1x0vox \ 25 | --transform Affine[0.1] --metric Mattes[$fixedfile,$movingfile,1,32,Regular,0.25] --convergence [1000x500x250x100,1e-6,10] --shrink-factors 8x4x2x1 --smoothing-sigmas 3x2x1x0vox \ 26 | --transform BSplineSyN[0.1,26,0,3] ${__mb_syn_metric} --convergence [100x70x50x20,1e-6,10] --shrink-factors 8x4x2x1 --smoothing-sigmas 3x2x1x0vox 27 | -------------------------------------------------------------------------------- /bin/activate: -------------------------------------------------------------------------------- 1 | # 2 | # This file must be used with "source bin/activate" *from bash* 3 | # you cannot run it directly 4 | # 5 | # Shameless taken from the python virtual-env package 6 | 7 | 8 | _script_home() { 9 | ( 10 | cd "$(dirname "${BASH_SOURCE[0]}")"/..; pwd 11 | ) 12 | } 13 | 14 | 15 | unload_mb () { 16 | if [ -n "$_OLD_VIRTUAL_PATH" ] ; then 17 | PATH="$_OLD_VIRTUAL_PATH" 18 | export PATH 19 | unset _OLD_VIRTUAL_PATH 20 | fi 21 | 22 | if [ -n "$_OLD_VIRTUAL_PYTHONPATH" ] ; then 23 | PYTHONPATH="$_OLD_VIRTUAL_PATH" 24 | export PYTHONPATH 25 | unset _OLD_VIRTUAL_PYTHONPATH 26 | fi 27 | 28 | # This should detect bash and zsh, which have a hash command that must 29 | # be called to get it to forget past commands. Without forgetting 30 | # past commands the $PATH changes we made may not be respected 31 | if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then 32 | hash -r 33 | fi 34 | 35 | unset MB_ENV 36 | if [ ! "$1" = "nondestructive" ] ; then 37 | # Self destruct! 38 | unset -f unload_mb 39 | fi 40 | } 41 | 42 | # unset irrelavent variables 43 | unload_mb nondestructive 44 | 45 | MB_ENV=$(_script_home) 46 | export MB_ENV 47 | 48 | _OLD_VIRTUAL_PATH="$PATH" 49 | PATH="$MB_ENV/bin:$PATH" 50 | export PATH 51 | 52 | # This should detect bash and zsh, which have a hash command that must 53 | # be called to get it to forget past commands. Without forgetting 54 | # past commands the $PATH changes we made may not be respected 55 | if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then 56 | hash -r 57 | fi 58 | -------------------------------------------------------------------------------- /bin/validation/multiatlas_cv_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Generator for multi-atlas validation of MaGeT, shamelessly stolen from ealier script 3 | # Start by running a mb run with all inputs as atlases, templates and subjects, skip voting 4 | # This primes the pipeline with all the possible registrations 5 | # Then, run multiatlas_setup.sh 6 | # This shuffles the list of inputs and creates random samples satisftying 7 | # 8 | # Afterwards links into the directory the already processed transforms and candidate labels 9 | # Then all that is left is to run mb-multiatlas.sh in each directory to complete the voting stage 10 | set -euo pipefail 11 | 12 | nfolds=$1 13 | natlases=$2 14 | pool=(input/atlas/*t1.mnc) 15 | 16 | if [[ $3 ]] 17 | then 18 | targetdir=$3 19 | else 20 | targetdir=. 21 | fi 22 | 23 | for fold in $(seq ${nfolds}) 24 | do 25 | #Shuffle inputs in a random list using sort 26 | pool=($(printf "%s\n" "${pool[@]}" | sort -R)) 27 | #Since list is now random, slice array according to numbers provided before 28 | atlases=("${pool[@]:0:${natlases}}") 29 | templates=("${pool[@]:${natlases}}") 30 | 31 | #Setup folders for random run 32 | folddir=${targetdir}/NFOLD_multiatlas/${natlases}atlases_fold${fold} 33 | mkdir -p ${folddir}/input/{atlas,template} 34 | mkdir -p ${folddir}/output/multiatlas/labels/majorityvote 35 | 36 | #Link in precomputed transforms and candidate labels 37 | ln -s "$(readlink -f output/transforms)" ${folddir}/output/transforms 38 | ln -s "$(readlink -f output/multiatlas/labels/candidates)" ${folddir}/output/multiatlas/labels/candidates 39 | 40 | #Do a trick of replacing _t1.mnc with * to allow bash expansion to include all label files 41 | tmp=("${atlases[@]/_t1.mnc/*}") 42 | ln -s ${tmp[@]} ${folddir}/input/atlas 43 | ln -s "${templates[@]}" ${folddir}/input/template 44 | (cd ${folddir}; mb.sh -- multiatlas-vote) 45 | done 46 | 47 | multiatlas_cv_collect.sh ${natlases}atlases_0templates.csv ${natlases}atlases ${targetdir} && rm -rf ${targetdir}/NFOLD_multiatlas/${natlases}atlases_fold* 48 | -------------------------------------------------------------------------------- /bin/mb_resample.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #mb_resample.sh labelname atlasname templatename subjectname 3 | if [[ -n ${__mb_debug:-} ]]; then 4 | set -x 5 | fi 6 | set -euo pipefail 7 | 8 | export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=${THREADS_PER_COMMAND:-$(nproc)} 9 | 10 | labelname=$1 11 | atlas=$2 12 | template=$3 13 | subject=$4 14 | subjectname=$(basename ${subject}) 15 | atlasname=$(basename ${atlas}) 16 | templatename=$(basename ${template}) 17 | 18 | if [[ -n ${__mb_fast:-} ]]; then 19 | __mb_float="--float 1" 20 | else 21 | __mb_float="--float 0" 22 | fi 23 | 24 | #Check for subjectname == $templatename, if so, we skipped that registration, so don't apply those transforms 25 | if [[ ${subjectname} == "${templatename}" ]] 26 | then 27 | antsApplyTransforms -d 3 ${__mb_float} ${MB_VERBOSE:-} --interpolation GenericLabel -r ${subject} \ 28 | -i $(echo ${atlas} | sed -r 's/(t1|T1w|t2|T2w).*//g')${labelname} \ 29 | -o /tmp/${atlasname}-${templatename}-${subjectname}-${labelname} \ 30 | -t output/transforms/atlas-template/${templatename}/${atlasname}-${templatename}1_NL.xfm \ 31 | -t output/transforms/atlas-template/${templatename}/${atlasname}-${templatename}0_GenericAffine.xfm 32 | else 33 | #Transforms are applied like matrix algebra, last transform on the command line is applied first 34 | antsApplyTransforms -d 3 ${__mb_float} ${MB_VERBOSE:-} --interpolation GenericLabel -r ${subject} \ 35 | -i $(echo ${atlas} | sed -r 's/(t1|T1w|t2|T2w).*//g')${labelname} \ 36 | -o /tmp/${atlasname}-${templatename}-${subjectname}-${labelname} \ 37 | -t output/transforms/template-subject/${subjectname}/${templatename}-${subjectname}1_NL.xfm \ 38 | -t output/transforms/template-subject/${subjectname}/${templatename}-${subjectname}0_GenericAffine.xfm \ 39 | -t output/transforms/atlas-template/${templatename}/${atlasname}-${templatename}1_NL.xfm \ 40 | -t output/transforms/atlas-template/${templatename}/${atlasname}-${templatename}0_GenericAffine.xfm 41 | fi 42 | 43 | ConvertImage 3 /tmp/${atlasname}-${templatename}-${subjectname}-$labelname output/labels/candidates/${subjectname}/${atlasname}-${templatename}-${subjectname}-$labelname 1 44 | rm -f /tmp/${atlasname}-${templatename}-${subjectname}-${labelname} 45 | -------------------------------------------------------------------------------- /bin/validation/nfold_cv_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Cross validator for MaGeT, shamelessly stolen from ealier script 3 | # Start by running a ``mb.sh -- atlas template resample`` run with all inputs 4 | # as atlases, templates and subjects. This primes the pipeline with all the 5 | # possible candidate labels. 6 | # Then, run or submit a job of nfold_cv_setup.sh 7 | # This shuffles the list of inputs and creates random samples satisftying 8 | # and and links into the directory the already 9 | # processed transforms and candidate labels 10 | # Then ``mb.sh -- vote`` is run for each fold, producing final labels 11 | # Finally, the collection script is done and the folds are cleaned up 12 | set -euo pipefail 13 | 14 | nfolds=$1 15 | natlases=$2 16 | ntemplates=$3 17 | pool=(input/atlas/*t1.mnc) 18 | 19 | if [[ $4 ]] 20 | then 21 | targetdir=$4 22 | else 23 | targetdir=. 24 | fi 25 | 26 | for fold in $(seq ${nfolds}) 27 | do 28 | #Shuffle inputs in a random list using sort 29 | pool=($(printf "%s\n" "${pool[@]}" | sort -R)) 30 | #Since list is now random, slice array according to numbers provided before 31 | atlases=("${pool[@]:0:${natlases}}") 32 | subjects=("${pool[@]:${natlases}}") 33 | templates=("${subjects[@]:0:${ntemplates}}") 34 | 35 | #Setup folders for random run 36 | folddir=${targetdir}/NFOLDCV/${natlases}atlases_${ntemplates}templates_fold${fold} 37 | mkdir -p ${folddir}/input/{atlas,template,subject} 38 | mkdir -p ${folddir}/output/labels/majorityvote 39 | 40 | #Link in precomputed transforms and candidate labels 41 | ln -s "$(readlink -f output/transforms)" ${folddir}/output/transforms 42 | ln -s "$(readlink -f output/labels/candidates)" ${folddir}/output/labels/candidates 43 | 44 | #Do a trick of replacing _t1.mnc with * to allow bash expansion to include all label files 45 | tmp=("${atlases[@]/_t1.mnc/*}") 46 | cp -l ${tmp[@]} ${folddir}/input/atlas 47 | cp -l "${templates[@]}" ${folddir}/input/template 48 | cp -l "${subjects[@]}" ${folddir}/input/subject 49 | (cd ${folddir}; mb.sh -- vote) 50 | done 51 | 52 | nfold_cv_collect.sh ${natlases}atlases_${ntemplates}templates.csv ${natlases}atlases_${ntemplates}templates ${targetdir} && rm -rf ${targetdir}/NFOLDCV/${natlases}atlases_${ntemplates}templates_fold* 53 | -------------------------------------------------------------------------------- /bin/validation/multiatlas_subject_cv_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Generator for multi-atlas validation of MaGeT, shamelessly stolen from ealier script 3 | # Start by running a mb run with all inputs as atlases, templates and subjects, skip voting 4 | # This primes the pipeline with all the possible registrations 5 | # Then, run multiatlas_setup.sh 6 | # This shuffles the list of inputs and creates random samples satisftying 7 | # 8 | # Afterwards links into the directory the already processed transforms and candidate labels 9 | # Then all that is left is to run mb-multiatlas.sh in each directory to complete the voting stage 10 | set -euo pipefail 11 | 12 | nfolds=$1 13 | natlases=$2 14 | origpool=(input/atlas/*t1.mnc) 15 | 16 | if [[ $3 ]] 17 | then 18 | targetdir=$3 19 | else 20 | targetdir=. 21 | fi 22 | 23 | i=0 24 | for subject in "${origpool[@]}" 25 | do 26 | subjectname=$(basename $subject) 27 | echo ${subjectname} 28 | 29 | 30 | pool=( "${origpool[@]::$i}" "${origpool[@]:$((i+1))}" ) 31 | 32 | for fold in $(seq ${nfolds}) 33 | do 34 | #Shuffle inputs in a random list using sort 35 | pool=($(printf "%s\n" "${pool[@]}" | sort -R)) 36 | #Since list is now random, slice array according to numbers provided before 37 | atlases=("${pool[@]:0:${natlases}}") 38 | #templates=("${pool[@]:$natlases}") 39 | 40 | #Setup folders for random run 41 | folddir=${targetdir}/NFOLD_multiatlas_subject/${natlases}atlases_fold${fold}/${subjectname} 42 | mkdir -p ${folddir}/input/{atlas,template} 43 | mkdir -p ${folddir}/output/multiatlas/labels/majorityvote 44 | 45 | #Link in precomputed transforms and candidate labels 46 | ln -s "$(readlink -f output/transforms)" ${folddir}/output/transforms 47 | ln -s "$(readlink -f output/multiatlas/labels/candidates)" ${folddir}/output/multiatlas/labels/candidates 48 | 49 | #Do a trick of replacing _t1.mnc with * to allow bash expansion to include all label files 50 | tmp=("${atlases[@]/_t1.mnc/*}") 51 | ln -s ${tmp[@]} ${folddir}/input/atlas 52 | ln -s ${subject} ${folddir}/input/template 53 | (cd ${folddir}; mb.sh -- multiatlas-vote) 54 | done 55 | ((i++)) 56 | 57 | done 58 | multiatlas_subject_cv_collect.sh ${natlases}atlases_0templates.csv ${natlases}atlases ${targetdir} && rm -rf ${targetdir}/NFOLD_multiatlas_subject/${natlases}atlases_fold* 59 | -------------------------------------------------------------------------------- /bin/validation/loo_cv_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Generator for leave-one-out cross validation of MaGeT, shamelessly stolen from ealier script 3 | # Start by running a ``mb.sh -- template subject resample`` with 4 | # - high res (original atlases) as atlases with labels 5 | # - subsampled atlases as templates (plus possibly other similar templates from a control set) 6 | # - subsampled atlases as subjects (must have same name as atlas files) 7 | # This primes the pipeline with all the possible registrations 8 | # Then, run LOOCV_setup.sh 9 | # This shuffles the atlases to create magetbrain runs with an odd number of atlases with a given atlas left out 10 | # Afterwards links into the directory the already processed transforms and candidate labels 11 | # Then all that is left is to run mb.sh in each directory to complete the voting stage 12 | set -euo pipefail 13 | 14 | pool=(input/atlas/*t1.mnc) 15 | templates=(input/template/*t1.mnc) 16 | 17 | if [[ $4 ]] 18 | then 19 | targetdir=$4 20 | else 21 | targetdir=. 22 | fi 23 | 24 | i=0 25 | for leaveout in "${pool[@]}" 26 | do 27 | #Create a slice of array missing item i 28 | reamining_atlases=( "${pool[@]::$i}" "${pool[@]:$((i+1))}" ) 29 | 30 | #Loop through remaining atlases, create all combinations nCm for odd m 31 | for j in $(seq 0 ${#reamining_atlases[@]}) 32 | do 33 | #Generate array with missing oddmaker item using slicing again 34 | oddarray=( "${remaining_atlases[@]::$j}" "${remaining_atlases[@]:$((j+1))}" ) 35 | #Call this a fold and create directory for it 36 | folddir=${targetdir}/LOOCV/$(basename $leaveout)/fold$j 37 | mkdir -p ${folddir}/input/{atlas,template,subject} 38 | mkdir -p ${folddir}/output/labels/majorityvote 39 | #Link in precomputed transforms and candidate labels 40 | ln -s "$(readlink -f output/transforms)" ${folddir}/output/transforms 41 | ln -s "$(readlink -f output/labels/candidates)" ${folddir}/output/labels/candidates 42 | #Do a trick of replacing _t1.mnc with * to allow bash expansion to include all label files 43 | tmp=("${oddarray[@]/_t1.mnc/*}") 44 | cp -l ${tmp[@]} ${folddir}/input/atlas 45 | #Link in all templates 46 | cp -l "${templates[@]}" ${folddir}/input/template 47 | #Do a quick clever rewrite of the atlas file left out to its corresponding subject file named identically 48 | cp -l ${leaveout/atlas/subject} ${folddir}/input/subject 49 | (cd ${folddir}; mb.sh -- vote) 50 | done 51 | ((i++)) 52 | done 53 | -------------------------------------------------------------------------------- /bin/validation/stratified_nfold_cv_setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from sklearn.cross_validation import StratifiedShuffleSplit,ShuffleSplit 3 | import numpy as np 4 | import pandas 5 | import os 6 | 7 | folds=5 8 | num_atlases=2 9 | num_templates=2 10 | inputs = pandas.read_csv("list.csv") 11 | groups = inputs.iloc[:,1] 12 | 13 | try: 14 | sss_atlas = StratifiedShuffleSplit(y = groups, n_iter=folds, train_size=num_atlases, test_size=len(inputs)-num_atlases) 15 | except ValueError: 16 | sss_atlas = ShuffleSplit(len(groups), n_iter=folds, train_size=num_atlases, test_size=len(inputs)-num_atlases) 17 | 18 | for atlas_index, test_index in sss_atlas: 19 | print("Atlases:", len(atlas_index)) 20 | atlases = inputs.iloc[atlas_index,0] 21 | test = inputs.iloc[test_index,0] 22 | test_types = inputs.iloc[test_index,1] 23 | try: 24 | sss_templates = StratifiedShuffleSplit(y = test_types, n_iter=1, train_size=num_templates, test_size=len(test_types)-num_templates) 25 | except ValueError: 26 | sss_templates = ShuffleSplit(len(test_types), n_iter=1, train_size=num_templates, test_size=len(test_types)-num_templates) 27 | for template_index, subject_index in sss_templates: 28 | print("Templates:", len(template_index), "Subjects:", len(subject_index)) 29 | templates = test.iloc[template_index] 30 | subjects = test.iloc[subject_index] 31 | #This code guarantees that the number of atlases and templates requested is given 32 | #As a result it slightly breaks the stratification, so we do it in a random fashion 33 | while len(atlases) < num_atlases: 34 | randid = np.random.randint(len(subjects)) 35 | atlases = atlases.append(subjects[randid:randid+1]) 36 | subjects.drop(subjects.index[randid], inplace=True) 37 | while len(atlases) > num_atlases: 38 | randid = np.random.randint(len(atlases)) 39 | subjects = subjects.append(atlases[randid:randid+1]) 40 | atlases.drop(atlases.index[randid], inplace=True) 41 | while len(templates) < num_templates: 42 | randid = np.random.randint(len(subjects)) 43 | templates = templates.append(subjects[randid:randid+1]) 44 | subjects.drop(subjects.index[randid], inplace=True) 45 | while len(templates) > num_templates: 46 | randid = np.random.randint(len(templates)) 47 | subjects = subjects.append(templates[randid:randid+1]) 48 | templates.drop(templates.index[randid], inplace=True) 49 | print len(atlases) 50 | print len(templates) 51 | print len(subjects) 52 | print "Atlases:" 53 | for atlas in atlases: 54 | print atlas 55 | # print "Templates:" 56 | # for template in templates: 57 | # print template 58 | # print "Subjects:" 59 | # for subject in subjects: 60 | # print subject 61 | -------------------------------------------------------------------------------- /bin/validation/nfold_subject_cv_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Cross validator for MaGeT, shamelessly stolen from ealier script 3 | # Compared to nfold_cv_setup.sh, this script does subject-wise n-fold validation 4 | # each subject gets n-folds of atlas/template populations 5 | # Start by running a ``mb.sh -- atlas template resample`` run with all inputs 6 | # as atlases, templates and subjects. This primes the pipeline with all the 7 | # possible candidate labels 8 | # Then, run or submit a job of 9 | # nfold_subject_cv_setup.sh 10 | # This shuffles the list of inputs and creates random samples satisftying 11 | # and and links into the directory the already 12 | # processed transforms and candidate labels 13 | # Then ``mb.sh -- vote`` is run for each fold, producing final labels 14 | # Finally, the collection script is done and the folds are cleaned up 15 | set -euo pipefail 16 | 17 | nfolds=$1 18 | natlases=$2 19 | ntemplates=$3 20 | origpool=(input/atlas/*t1.mnc) 21 | 22 | if [[ $4 ]] 23 | then 24 | targetdir=$4 25 | else 26 | targetdir=. 27 | fi 28 | 29 | i=0 30 | for subject in "${origpool[@]}" 31 | do 32 | echo ${subject} 33 | 34 | subjectname=$(basename ${subject}) 35 | if [[ -d ${targetdir}/NFOLDCV_subject/${natlases}atlases_${ntemplates}templates_fold/${subjectname} ]] 36 | then 37 | ((i++)) 38 | continue 39 | fi 40 | 41 | pool=( "${origpool[@]::$i}" "${origpool[@]:$((i+1))}" ) 42 | 43 | for fold in $(seq ${nfolds}) 44 | do 45 | #Shuffle inputs in a random list using sort 46 | pool=($(printf "%s\n" "${pool[@]}" | sort -R)) 47 | #Since list is now random, slice array according to numbers provided before 48 | atlases=("${pool[@]:0:${natlases}}") 49 | #subjects=("${pool[@]:$natlases}") 50 | templates=("${pool[@]:${natlases}:${ntemplates}}") 51 | 52 | #Setup folders for random run 53 | folddir=${targetdir}/NFOLDCV_subject/${natlases}atlases_${ntemplates}templates_fold${fold}/${subjectname} 54 | mkdir -p ${folddir}/input/{atlas,template,subject} 55 | mkdir -p ${folddir}/output/labels/majorityvote 56 | 57 | #Link in precomputed transforms and candidate labels 58 | ln -s "$(readlink -f output/transforms)" ${folddir}/output/transforms 59 | ln -s "$(readlink -f output/labels/candidates)" ${folddir}/output/labels/candidates 60 | 61 | #Do a trick of replacing _t1.mnc with * to allow bash expansion to include all label files 62 | tmp=("${atlases[@]/_t1.mnc/*}") 63 | ln -s ${tmp[@]} ${folddir}/input/atlas 64 | ln -s "${templates[@]}" ${folddir}/input/template 65 | ln -s ${subject} ${folddir}/input/subject 66 | (cd ${folddir}; mb.sh -- vote) 67 | done 68 | ((i++)) 69 | 70 | done 71 | 72 | nfold_subject_cv_collect.sh ${natlases}atlases_${ntemplates}templates_subject.csv ${natlases}atlases_${ntemplates}templates ${targetdir} && rm -rf ${targetdir}/NFOLDCV_subject/${natlases}atlases_${ntemplates}templates_fold* 73 | -------------------------------------------------------------------------------- /bin/mb_register.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [[ -n ${__mb_debug:-} ]]; then 3 | set -x 4 | fi 5 | set -euo pipefail 6 | export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=${THREADS_PER_COMMAND:-$(nproc)} 7 | export ITK_USE_THREADPOOL=1 8 | export ITK_GLOBAL_DEFAULT_THREADER=Pool 9 | 10 | tmpdir=$(mktemp -d) 11 | 12 | movingfile1=$1 13 | fixedfile1=$2 14 | outputdir=$3 15 | shift 3 16 | labelfiles=( "$@" ) 17 | 18 | movingmask="NOMASK" 19 | fixedmask="NOMASK" 20 | 21 | ext=$(basename ${movingfile1} | grep -o -E '(.mnc|.nii|.nii.gz|.nrrd|.hdr)') 22 | fixed_minimum_resolution=$(python -c "print(min([abs(x) for x in [float(x) for x in \"$(PrintHeader ${fixedfile1} 1)\".split(\"x\")]]))") 23 | moving_minimum_resolution=$(python -c "print(min([abs(x) for x in [float(x) for x in \"$(PrintHeader ${movingfile1} 1)\".split(\"x\")]]))") 24 | fixed_maximum_resolution=$(python -c "print(max([ a*b for a,b in zip([abs(x) for x in [float(x) for x in \"$(PrintHeader ${fixedfile1} 1)\".split(\"x\")]],[abs(x) for x in [float(x) for x in \"$(PrintHeader ${fixedfile1} 2)\".split(\"x\")]])]))") 25 | 26 | if [[ -n ${__mb_fast:-} ]]; then 27 | __mb_float="--float 1" 28 | __mb_syn_metric="--metric Mattes[${fixedfile1},${movingfile1},1,32,None]" 29 | else 30 | __mb_syn_metric="--metric CC[${fixedfile1},${movingfile1},1,4,None]" 31 | __mb_float="--float 0" 32 | fi 33 | 34 | if ((${#labelfiles[@]} > 0)); then 35 | #Flatten all the label files into a mask 36 | cp ${labelfiles[0]} ${tmpdir}/mergedmask${ext} 37 | if ((${#labelfiles[@]} > 1)); then 38 | for label in "${labelfiles[@]}"; do 39 | ImageMath 3 ${tmpdir}/mergedmask${ext} addtozero ${tmpdir}/mergedmask${ext} ${label} 40 | done 41 | fi 42 | #Binarize the labels 43 | ThresholdImage 3 ${tmpdir}/mergedmask${ext} ${tmpdir}/mergedmask${ext} 0.5 inf 1 0 44 | #Morphologically pad the labelmask 3mm radius 45 | iMath 3 ${tmpdir}/movinglabelmask${ext} MD ${tmpdir}/mergedmask${ext} $(python -c "print(3.0/${moving_minimum_resolution})") 1 ball 1 46 | movingmask=${tmpdir}/movinglabelmask${ext} 47 | fi 48 | 49 | if [[ ! -s ${outputdir}/$(basename ${movingfile1})-$(basename ${fixedfile1})0_GenericAffine.xfm ]] 50 | then 51 | antsRegistration --dimensionality 3 ${__mb_float} ${MB_VERBOSE:-} --minc \ 52 | --output [${outputdir}/$(basename ${movingfile1})-$(basename ${fixedfile1})] \ 53 | --initial-moving-transform [${fixedfile1},${movingfile1},1] \ 54 | $(eval echo $(ants_generate_iterations.py --min ${fixed_minimum_resolution} --max ${fixed_maximum_resolution} --output multilevel-halving --convergence 1e-7)) 55 | fi 56 | 57 | if [[ (! -s ${outputdir}/$(basename ${movingfile1})_labelmask${ext} ) && ( ${movingmask} != "NOMASK" ) ]]; then 58 | antsApplyTransforms -d 3 ${__mb_float} -i ${movingmask} -o ${outputdir}/$(basename ${movingfile1})_labelmask${ext} \ 59 | -t ${outputdir}/$(basename ${movingfile1})-$(basename ${fixedfile1})0_GenericAffine.xfm -r ${fixedfile1} \ 60 | -n GenericLabel 61 | fixedmask=${outputdir}/$(basename ${movingfile1})_labelmask${ext} 62 | fi 63 | 64 | nonlinear_steps=$(ants_generate_iterations.py --min ${fixed_minimum_resolution} --max ${fixed_maximum_resolution}) 65 | antsRegistration --dimensionality 3 ${__mb_float} ${MB_VERBOSE:-} --minc \ 66 | --output [${outputdir}/$(basename ${movingfile1})-$(basename ${fixedfile1})] \ 67 | --initial-moving-transform ${outputdir}/$(basename ${movingfile1})-$(basename ${fixedfile1})0_GenericAffine.xfm \ 68 | --transform "SyN[0.25,3,0]" \ 69 | ${__mb_syn_metric} \ 70 | $(eval echo ${nonlinear_steps}) \ 71 | --masks [${fixedmask},${movingmask}] 72 | 73 | rm -rf ${tmpdir} 74 | -------------------------------------------------------------------------------- /bin/mb_qc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #mb_qc.sh imagefile labelfile outputdir 3 | if [[ -n ${__mb_debug:-} ]]; then 4 | set -x 5 | fi 6 | set -euo pipefail 7 | 8 | imagefile=$1 9 | labelfile=$2 10 | outputdir=$3 11 | tmpdir=$(mktemp -d) 12 | 13 | LUT=$(cat < 0: 56 | if len(opt.weights) != len(opt.input_labels): 57 | sys.exit("Weights provided not equal to number of input labels") 58 | 59 | # load volumes from input files 60 | labelimg_list = [] # list of candidate segmentation images 61 | 62 | # use this to verify if the voxel-wise computations make sense 63 | def check_metadata(img, metadata, filename): 64 | if img.GetSize() != metadata["size"]: 65 | sys.exit( 66 | "Size of {0} not the same as {1}".format(filename, opt.input_labels[0]) 67 | ) 68 | elif img.GetOrigin() != metadata["origin"]: 69 | sys.exit( 70 | "Origin of {0} not the same as {1}".format( 71 | filename, opt.input_labels[0] 72 | ) 73 | ) 74 | elif img.GetSpacing() != metadata["spacing"]: 75 | sys.exit( 76 | "Spacing of {0} not the same as {1}".format( 77 | filename, opt.input_labels[0] 78 | ) 79 | ) 80 | elif img.GetDirection() != metadata["direction"]: 81 | sys.exit( 82 | "Direction of {0} not the same as {1}".format( 83 | filename, opt.input_labels[0] 84 | ) 85 | ) 86 | 87 | for filename in opt.input_labels: 88 | if opt.verbose: 89 | print("Reading labels from {}...".format(filename)) 90 | 91 | # get all the candidate segmentations 92 | labelimg = sitk.ReadImage(filename, sitk.sitkUInt32) 93 | 94 | structure = labelimg > 0 # find the structural voxels 95 | label_shape_analysis = sitk.LabelShapeStatisticsImageFilter() 96 | label_shape_analysis.SetBackgroundValue(0) 97 | label_shape_analysis.Execute(structure) 98 | b = label_shape_analysis.GetBoundingBox(1) # get the bounding box 99 | 100 | if len(labelimg_list) == 0: 101 | metadata = {} # get the metadata of the first image 102 | metadata["size"] = labelimg.GetSize() 103 | metadata["origin"] = labelimg.GetOrigin() 104 | metadata["spacing"] = labelimg.GetSpacing() 105 | metadata["direction"] = labelimg.GetDirection() 106 | 107 | # get the first bounding box 108 | bbox = [b[0], b[1], b[2], b[0] + b[3], b[1] + b[4], b[2] + b[5]] 109 | 110 | else: # check that the metadata is the same for each other image 111 | check_metadata(labelimg, metadata, filename) 112 | 113 | new_bbox = (b[0], b[1], b[2], b[0] + b[3], b[1] + b[4], b[2] + b[5]) 114 | for i in range(0, 3): # for each minimum bounding box index 115 | if new_bbox[i] < bbox[i]: 116 | bbox[i] = new_bbox[i] # keep the new minimum 117 | for i in range(3, 6): # for each maximum bounding box index 118 | if new_bbox[i] > bbox[i]: 119 | bbox[i] = new_bbox[i] # keep the new maximum 120 | 121 | labelimg_list.append(labelimg) 122 | 123 | if opt.verbose: 124 | print("Computing weighted votes...") 125 | 126 | nimg = len(labelimg_list) 127 | for n, img in enumerate(labelimg_list): 128 | if opt.verbose: 129 | print("Processing image {}, {} of {}".format(opt.input_labels[n], n+1, nimg)) 130 | 131 | label_array = sitk.GetArrayFromImage(img)[ 132 | bbox[2] : bbox[5], bbox[1] : bbox[4], bbox[0] : bbox[3] 133 | ] 134 | 135 | if n == 0: 136 | label_values = np.unique(label_array) # obtain the list of labels 137 | votes = np.zeros( 138 | ( 139 | label_values.shape[0], 140 | label_array.shape[0], 141 | label_array.shape[1], 142 | label_array.shape[2], 143 | ), 144 | dtype=np.float64) 145 | # make sure that they are the same in each image 146 | elif np.asarray(np.unique(label_array) != label_values).any(): 147 | warn( 148 | "Labels in image {0} not the same as in image {1}.".format( 149 | opt.input_labels[n], opt.input_labels[0] 150 | ) 151 | ) 152 | 153 | for i, value in enumerate(label_values): 154 | # count the votes for each label 155 | votes[i][np.where(label_array == value)] += opt.weights[n] 156 | 157 | mode = np.argmin(votes, axis=0) # find the majority votes 158 | if opt.probabilities: 159 | probability = votes / np.sum(opt.weights) # Find probability maps 160 | labels = np.zeros(votes[0].shape, dtype=np.uint32) # array of labels 161 | 162 | for i, value in enumerate(label_values.tolist()): 163 | # assign the majority vote to all voxels 164 | labels[np.where(mode == i)] = value 165 | 166 | labels = np.pad( 167 | labels, 168 | ( 169 | (bbox[2], labelimg.GetDepth() - bbox[5]), 170 | (bbox[1], labelimg.GetHeight() - bbox[4]), 171 | (bbox[0], labelimg.GetWidth() - bbox[3]), 172 | ), 173 | "constant", 174 | constant_values=0, 175 | ) 176 | 177 | if opt.verbose: 178 | print("Writing output labels to {}...".format(opt.output)) 179 | 180 | output_image = sitk.GetImageFromArray(labels) 181 | output_image.CopyInformation(labelimg) # copy the metadata 182 | 183 | # save the result to the output file 184 | sitk.WriteImage(output_image, opt.output, True) 185 | 186 | if opt.probabilities: 187 | for i, value in enumerate(label_values.tolist()): 188 | if value != 0: 189 | # assign the majority vote to all voxels 190 | probability_map = np.pad( 191 | probability[i], 192 | ( 193 | (bbox[2], labelimg.GetDepth() - bbox[5]), 194 | (bbox[1], labelimg.GetHeight() - bbox[4]), 195 | (bbox[0], labelimg.GetWidth() - bbox[3]), 196 | ), 197 | "constant", 198 | constant_values=0, 199 | ) 200 | output_image = sitk.GetImageFromArray(probability_map) 201 | output_image.CopyInformation(labelimg) # copy the metadata 202 | if opt.verbose: 203 | print( 204 | "Writing probability map to {}...".format( 205 | opt.output.rsplit(".nii")[0].rsplit(".mnc")[0] 206 | + "_" 207 | + str(value) 208 | + ".mnc" 209 | ) 210 | ) 211 | # save the result to the output file 212 | sitk.WriteImage( 213 | output_image, 214 | opt.output.rsplit(".nii")[0].rsplit(".mnc")[0] 215 | + "_" 216 | + str(value) 217 | + ".mnc", 218 | True, 219 | ) 220 | -------------------------------------------------------------------------------- /bin/mb.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | shopt -s extglob 3 | shopt -s nullglob 4 | 5 | export LANG=C 6 | export LANGUAGE=C 7 | export LC_CTYPE=C 8 | export LC_NUMERIC=C 9 | export LC_TIME=C 10 | export LC_COLLATE=C 11 | export LC_MONETARY=C 12 | export LC_MESSAGES=C 13 | export LC_PAPER=C 14 | export LC_NAME=C 15 | export LC_ADDRESS=C 16 | export LC_TELEPHONE=C 17 | export LC_MEASUREMENT=C 18 | export LC_IDENTIFICATION=C 19 | export LC_ALL=C 20 | 21 | #Setup some extra environment settings 22 | export QBATCH_SCRIPT_FOLDER="output/.qbatch/" 23 | 24 | read -r -d '' __usage <<-'EOF' || true # exits non-zero when EOF encountered 25 | -s --subject [arg] Specific subject files to process. 26 | -t --template [arg] Specific template files to process. 27 | -v --verbose Enable verbose mode for all scripts. 28 | -d --debug Enables debug mode. 29 | -h --help This help page. 30 | -n --dry-run Don't submit any jobs. Useful with debug above. 31 | -r --reg-command [arg] Provide an alternative registration command. Default="mb_register.sh" 32 | -m --mem-factor [arg] Scaling factor for memory estimates. Default="1.10" 33 | -w --walltime-factor [arg] Scaling factor for time estimates. Default="1.10" 34 | -l --label-masking Use atlas labels to focus registration. 35 | -f --fast Use float for math and Mattes for SyN reg. 36 | EOF 37 | 38 | read -r -d '' __helptext <<-'EOF' || true # exits non-zero when EOF encountered 39 | MAGeTBrain implementation using ANTs 40 | Supports MINC and NIFTI input files (ANTs must be built with MINC support) 41 | 42 | Invocation: mb.sh [options] -- [stage 1] [stage 2] ... [stage N] 43 | 44 | Standard stages: template, subject, resample, vote, run (template, subject, resample, vote, qc) 45 | Multiatlas stages: multiatlas-resample, multiatlas-vote, multiatlas (template, multiatlas-resample, multiatlas-vote) 46 | Other stages: init, status, cleanup 47 | Multiple commands will run multiple stages. Order is not checked. 48 | EOF 49 | 50 | # shellcheck source=mb_header.sh 51 | source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/mb_header.sh" 52 | # shellcheck source=mb_stages.sh 53 | source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/mb_stages.sh" 54 | 55 | ### Command-line argument switches (like -d for debugmode, -h for showing help) 56 | ############################################################################## 57 | 58 | # debug mode 59 | if [ "${arg_d:?}" = "1" ]; then 60 | set -o xtrace 61 | LOG_LEVEL="7" 62 | export __mb_debug=1 63 | fi 64 | 65 | # verbose mode 66 | if [[ "${arg_v:?}" = "1" ]]; then 67 | #set -o verbose 68 | export MB_VERBOSE='--verbose' 69 | else 70 | export MB_VERBOSE='' 71 | fi 72 | 73 | # dry-run mode 74 | if [[ "${arg_n:?}" = "1" ]]; then 75 | __mb_dryrun='-n' 76 | else 77 | __mb_dryrun='' 78 | fi 79 | 80 | # label masking 81 | if [[ "${arg_l:?}" = "1" ]]; then 82 | __mb_label_masking='1' 83 | else 84 | __mb_label_masking='' 85 | fi 86 | 87 | # fast mode 88 | if [[ "${arg_f:?}" = "1" ]]; then 89 | export __mb_fast='1' 90 | else 91 | export __mb_fast='' 92 | fi 93 | 94 | # help mode 95 | if [[ "${arg_h:?}" = "1" ]]; then 96 | # Help exists with code 1 97 | help "Help using ${0}" 98 | fi 99 | 100 | __memory_scaling_factor=${arg_m} 101 | __walltime_scaling_factor=${arg_w} 102 | 103 | ### Runtime 104 | ############################################################################## 105 | 106 | function cleanup_before_exit () { 107 | info "Cleaning up. Done" 108 | } 109 | trap cleanup_before_exit EXIT 110 | 111 | #All jobs are prefixed with a date-time in ISO format(to the minute) so you can submit multiple jobs at once 112 | __datetime=T$(date -u +%F_%H-%M-%S) 113 | 114 | #If the commandlist is empty, assume the command is "run" 115 | if [[ $# -lt 1 ]] 116 | then 117 | commandlist="run" 118 | else 119 | commandlist=( "$@" ) 120 | fi 121 | 122 | if [[ ${commandlist[*]} =~ "init" ]] 123 | then 124 | stage_init && exit 0 125 | elif [[ ! (-d input/atlas && -d input/template && -d input/subject )]] 126 | then 127 | error "Error, input directories not found, run mb.sh -- init" && exit 1 128 | fi 129 | 130 | #Collect a list of atlas/template/subject files, must be named _t1.(nii,nii.gz,mnc, hdr/img) 131 | atlases=( input/atlas/*_@(t1|T1w|t1w).@(nii|mnc|nii.gz|hdr) ) 132 | 133 | if [[ -n "${arg_s:-}" ]] 134 | then 135 | subjects=( ${arg_s} ) 136 | info "Specific subject(s) specified ${subjects[*]}" 137 | else 138 | subjects=( input/subject/*_@(t1|T1w|t1w).@(nii|mnc|nii.gz|hdr) ) 139 | fi 140 | 141 | if [[ -n "${arg_t:-}" ]] 142 | then 143 | templates=( ${arg_t} ) 144 | info "Specific template(s) specified ${templates[*]}" 145 | else 146 | templates=( input/template/*_@(t1|T1w|t1w).@(nii|mnc|nii.gz|hdr) ) 147 | fi 148 | 149 | models=( input/model/*_@(t1|T1w|t1w).@(nii|mnc|nii.gz|hdr) ) 150 | 151 | #Labels are figured out by looking at only the first atlas, and substituting t1 for label* 152 | labels=( $(echo ${atlases[0]} | sed -r 's/_(t1|t1w|T1W).*/_label\*/g' ) ) 153 | labels=( $( for item in ${labels[@]} ; do echo $item ; done | sed 's/input.*label/label/g' || true ) ) 154 | 155 | #Sanity Check on inputs 156 | if (( ${#atlases[@]} == 0 )) 157 | then 158 | error "Zero atlases found, please check input/atlas/*_t1.[mnc, nii, nii.gz]" && exit 1 159 | fi 160 | 161 | if (( ${#templates[@]} == 0 )) 162 | then 163 | error "Zero templates found, please check input/template/*_t1.[mnc, nii, nii.gz]" && exit 1 164 | fi 165 | 166 | if (( ${#subjects[@]} == 0 )) 167 | then 168 | warning "Zero subjects found, please check input/subject/*_t1.[mnc, nii, nii.gz], this is okay if performing multiatlas" 169 | fi 170 | 171 | if (( ${#atlases[@]} % 2 == 0 )) 172 | then 173 | warning "Even number of atlases detected, use an odd number to avoid tie label votes" 174 | fi 175 | 176 | if (( ${#atlases[@]} % 2 == 0 )) 177 | then 178 | warning "Even number of templates detected, use an odd number to avoid tie label votes" 179 | fi 180 | 181 | 182 | if (( $(find input/atlas -maxdepth 1 -name '*label*' | wc -l) % ${#atlases[@]} != 0 )) 183 | then 184 | error "Unbalanced number of label files vs atlases, please ensure one label per type per atlas" && exit 1 185 | fi 186 | 187 | #Sanity check on Analyze files, check that a matching img file exists 188 | if [[ "${atlases[*]}" =~ "hdr" ]] 189 | then 190 | for atlas in "${atlases[@]}" 191 | do 192 | if [[ ! -s input/atlas/$(basename ${atlas} .hdr).img ]] 193 | then 194 | error "atlas ${atlas} is missing corresponding input/atlas/$(basename ${atlas} .hdr).img file" 195 | fi 196 | done 197 | fi 198 | 199 | if [[ "${templates[*]}" =~ "hdr" ]] 200 | then 201 | for template in "${templates[@]}" 202 | do 203 | if [[ ! -s input/template/$(basename ${template} .hdr).img ]] 204 | then 205 | error "template ${template} is missing corresponding input/template/$(basename ${template} .hdr).img file" 206 | fi 207 | done 208 | fi 209 | 210 | if [[ "${subjects[*]}" =~ "hdr" ]] 211 | then 212 | for subject in "${subjects[@]}" 213 | do 214 | if [[ ! -s input/subject/$(basename ${subject} .hdr).img ]] 215 | then 216 | error "subject ${subject} is missing corresponding input/subject/$(basename ${subject} .hdr).img file" 217 | fi 218 | done 219 | fi 220 | 221 | #Alternative registration commands can be specified 222 | #Must accept $movingfile $fixedfile $outputprefix [labels] 223 | regcommand=${arg_r} 224 | 225 | #Create directories 226 | debug "Creating output directories" 227 | debug " output/transforms/atlas-template" 228 | debug " output/transforms/template-subject" 229 | debug " output/labels/candidates" 230 | debug " output/labels/majorityvote" 231 | debug " output/jobscripts" 232 | mkdir -p output/transforms/atlas-template 233 | mkdir -p output/transforms/template-subject 234 | mkdir -p output/labels/candidates 235 | mkdir -p output/labels/majorityvote 236 | mkdir -p output/jobscripts 237 | 238 | for subject in "${subjects[@]}" 239 | do 240 | debug "Creating output/labels/candidates/$(basename ${subject}) output/transforms/template-subject/$(basename ${subject})" 241 | mkdir -p output/labels/candidates/$(basename ${subject}) 242 | mkdir -p output/transforms/template-subject/$(basename ${subject}) 243 | done 244 | 245 | for template in "${templates[@]}" 246 | do 247 | debug "Creating output/transforms/atlas-template/$(basename ${template})" 248 | mkdir -p output/transforms/atlas-template/$(basename ${template}) 249 | done 250 | 251 | #Exit if status exists in command list, doesn't matter if other commands were listed 252 | [[ ${commandlist[*]} =~ "status" ]] && stage_status && exit 0 253 | 254 | echo ${__invocation} > output/jobscripts/${__datetime}-mb_run_command 255 | 256 | for stage in "${commandlist[@]}" 257 | do 258 | case ${stage} in 259 | *) 260 | stage_status 261 | ;;& 262 | template|subject|multiatlas|run) 263 | stage_estimate 264 | ;;& 265 | template|multiatlas|run) 266 | stage_register_atlas_template 267 | ;;& 268 | multiatlas|multiatlas-resample) 269 | stage_multiatlas_resample 270 | ;;& 271 | multiatlas|multiatlas-vote) 272 | stage_multiatlas_vote 273 | exit 0 274 | ;; 275 | subject|run) 276 | stage_register_template_subject 277 | ;;& 278 | resample|run) 279 | stage_resample 280 | ;;& 281 | vote|run) 282 | stage_vote 283 | ;;& 284 | qc|run) 285 | stage_qc 286 | exit 0 287 | ;; 288 | cleanup) 289 | stage_cleanup 290 | exit 0 291 | ;; 292 | template|multiatlas|multiatlas-resample|multiatlas-vote|subject|resample|vote|cleanup|run) 293 | #Catch the fall-through of case matching before erroring 294 | ;; 295 | *) 296 | error "Stage ${stage} not recognized" && help 297 | esac 298 | done 299 | -------------------------------------------------------------------------------- /bin/ants_generate_iterations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # This file generates steps of registration between two images and attempts to compensate 4 | # For ANTs' dependency on the resolution of the file 5 | 6 | # We do this by defining two scales to step over 7 | # blur_scale, which is the real-space steps in blurring we will do 8 | # shrink_scale, which is the subsampling scale that is 1/2 the fwhm blur scale, adjusted for file minimum resolution and max size 9 | 10 | from __future__ import division, print_function 11 | 12 | import argparse 13 | import math 14 | import sys 15 | 16 | def RepresentsInt(s): 17 | try: 18 | int(s) 19 | return True 20 | except ValueError: 21 | return False 22 | 23 | 24 | parser = argparse.ArgumentParser( 25 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 26 | 27 | 28 | parser.add_argument( 29 | '--min', help='minimum resolution of fixed file (mm)', type=float, required=True) 30 | parser.add_argument( 31 | '--max', help='max size of fixed file (mm)', type=float, required=True) 32 | parser.add_argument( 33 | '--start-scale', help='set starting scale (mm), default calculated from max size', type=float) 34 | parser.add_argument( 35 | '--final-iterations', help='total number of iterations at lowest scale', type=int, default=25) 36 | parser.add_argument( 37 | '--output', help='type of output to generate', default='generic', 38 | choices=['generic', 'affine', 'modelbuild', 'twolevel_dbm', 'multilevel-halving', 'exhaustive-affine', 39 | 'lsq6', 'lsq9', 'lsq12', 'rigid', 'similarity']) 40 | parser.add_argument('--step-size', help='step mode for generation', default=1) 41 | parser.add_argument( 42 | '--convergence', help='set convergence for generated stages', default='1e-6') 43 | parser.add_argument( 44 | '--close', help='images are already close, skip large scales of pyramid for affine', action='store_true') 45 | parser.add_argument('--reg-pairs', help='number of pairs of input scans for affine output', default=1, type=int) 46 | 47 | args = parser.parse_args() 48 | 49 | # Setup inital inputs 50 | min_resolution = args.min 51 | max_size = args.max 52 | 53 | if RepresentsInt(args.step_size): 54 | step_size = int(args.step_size) 55 | elif args.step_size == "power2": 56 | step_size = args.step_size 57 | else: 58 | sys.exit("Unrecognized step size") 59 | 60 | # Make empty arrays 61 | shrinks = [] 62 | blurs = [] 63 | iterations = [] 64 | 65 | if args.output == "affine" or args.output == "multilevel-halving" and args.final_iterations == 25: 66 | args.final_iterations = 50 67 | 68 | # Converter 69 | fwhm_to_sigma = 2 * math.sqrt(2 * math.log(2)) 70 | 71 | # Inital resolution scaling 72 | if args.start_scale: 73 | start_shrink = args.start_scale / min_resolution 74 | else: 75 | start_shrink = max_size / 28 / min_resolution * 2 76 | 77 | max_shrink = max_size / min_resolution / 32 78 | 79 | if isinstance(step_size, int): 80 | for shrink_scale in range(int(round(start_shrink)), 0, -1 * step_size): 81 | shrinks.append( 82 | str(int(min(max_shrink , max(1.0, round(shrink_scale)))))) 83 | blurs.append(str(math.sqrt(((shrink_scale*min_resolution)**2.0 - min_resolution**2.0)/(2.0*math.sqrt(2*math.log(2.0)))**2))) 84 | iterations.append(str(min(500, int(args.final_iterations * 3**(max(0,shrink_scale - 1)))))) 85 | else: 86 | blur_scale = start_shrink * 2 * min_resolution 87 | shrink_scale = start_shrink 88 | while (blur_scale > 0.5 * min_resolution): 89 | shrinks.append( 90 | str(int(min(max_size / 32 / min_resolution, max(1.0, round(shrink_scale)))))) 91 | blurs.append(str(blur_scale / fwhm_to_sigma)) 92 | iterations.append(str(min(500, int(args.final_iterations * 3**(max(0,shrink_scale-1)))))) 93 | blur_scale = blur_scale / 2 94 | shrink_scale = shrink_scale / 2 95 | 96 | if args.output == 'exhaustive-affine': 97 | transforms = ["--transform Translation[ ", 98 | "--transform Rigid[ ", 99 | "--transform Similarity[ ", 100 | "--transform Affine[ "] 101 | gradient_steps = [ 0.5, 0.33437015, 0.2236068, 0.1 ] 102 | gradient_steps_repeat = [ 0.5, 0.33437015, 0.14953488, 0.1 ] 103 | masks = ["--masks [ NOMASK,NOMASK ]", 104 | "--masks [ NOMASK,NOMASK ]", 105 | "--masks [ NOMASK,NOMASK ]", 106 | "--masks [ ${fixedmask},${movingmask} ]" ] 107 | repeatmask = [ False, 108 | False, 109 | "--masks [ ${fixedmask},${movingmask} ]", 110 | False ] 111 | 112 | for i, transform in enumerate(transforms): 113 | if args.close and i < 2: 114 | pass 115 | else: 116 | if i == len(transforms) - 1: 117 | print(transform + str(gradient_steps[i]) + " ]", end=' \\\n') 118 | for j in range(1, args.reg_pairs+1): 119 | print("\t--metric Mattes[ ${{fixedfile{j}}},${{movingfile{j}}},1,32,None ]".format(j=j), end=' \\\n') 120 | print("\t--convergence [ {},{},10 ]".format("x".join(iterations), args.convergence), end=' \\\n') 121 | print("\t--shrink-factors {}".format("x".join(shrinks)), end=' \\\n') 122 | print("\t--smoothing-sigmas {}mm".format("x".join(blurs)), end=' \\\n') 123 | print("\t" + masks[i], end=' ') 124 | else: 125 | print(transform + str(gradient_steps[i]) + " ]", end=' \\\n') 126 | for j in range(1, args.reg_pairs+1): 127 | print("\t--metric Mattes[ ${{fixedfile{j}}},${{movingfile{j}}},1,32,None ]".format(j=j), end=' \\\n') 128 | print("\t--convergence [ {},{},10 ]".format("x".join(iterations), args.convergence), end=' \\\n') 129 | print("\t--shrink-factors {}".format("x".join(shrinks)), end=' \\\n') 130 | print("\t--smoothing-sigmas {}mm".format("x".join(blurs)), end=' \\\n') 131 | print("\t" + masks[i], end=' \\\n') 132 | if repeatmask[i]: 133 | print(transform + str(gradient_steps[i]) + " ]", end=' \\\n') 134 | for j in range(1, args.reg_pairs+1): 135 | print("\t--metric Mattes[ ${{fixedfile{j}}},${{movingfile{j}}},1,32,None ]".format(j=j), end=' \\\n') 136 | print("\t--convergence [ {},{},10 ]".format("x".join(iterations), args.convergence), end=' \\\n') 137 | print("\t--shrink-factors {}".format("x".join(shrinks)), end=' \\\n') 138 | print("\t--smoothing-sigmas {}mm".format("x".join(blurs)), end=' \\\n') 139 | print("\t" + repeatmask[i], end=' \\\n') 140 | 141 | elif args.output == 'twolevel_dbm': 142 | print("--reg-iterations {}".format("x".join(iterations)), end=' \\\n') 143 | print("--reg-shrinks {}".format("x".join(shrinks)), end=' \\\n') 144 | print("--reg-smoothing {}mm".format("x".join(blurs)), end=' ') 145 | 146 | elif args.output == 'modelbuild': 147 | print("-q {}".format("x".join(iterations)), end=' \\\n') 148 | print("-f {}".format("x".join(shrinks)), end=' \\\n') 149 | print("-s {}mm".format("x".join(blurs)), end=' ') 150 | 151 | elif args.output == 'generic': 152 | print("--convergence [ {},{},10 ]".format("x".join(iterations), args.convergence), end=' \\\n') 153 | print("--shrink-factors {}".format("x".join(shrinks)), end=' \\\n') 154 | print("--smoothing-sigmas {}mm".format("x".join(blurs)), end=' ') 155 | 156 | else: 157 | if args.output in ["multilevel-halving", "affine", "lsq12"]: 158 | transforms = ["--transform Translation[ ", 159 | "--transform Rigid[ ", 160 | "--transform Similarity[ ", 161 | "--transform Affine[ "] 162 | elif args.output in ["lsq9","similarity"]: 163 | transforms = ["--transform Translation[ ", 164 | "--transform Rigid[ ", 165 | "--transform Similarity[ ", 166 | "--transform Similarity[ "] 167 | elif args.output in ["lsq6","rigid"]: 168 | transforms = ["--transform Translation[ ", 169 | "--transform Rigid[ ", 170 | "--transform Rigid[ ", 171 | "--transform Rigid[ "] 172 | gradient_steps = [ 0.5, 0.33437015, 0.2236068, 0.1 ] 173 | gradient_steps_repeat = [ 0.5, 0.33437015, 0.14953488, 0.1 ] 174 | repeatmask = [ False, 175 | False, 176 | "--masks [ ${fixedmask},${movingmask} ]", 177 | False ] 178 | masks = ["--masks [ NOMASK,NOMASK ]", 179 | "--masks [ NOMASK,NOMASK ]", 180 | "--masks [ NOMASK,NOMASK ]", 181 | "--masks [ ${fixedmask},${movingmask} ]" ] 182 | slicestart = [ 0, 183 | int(round(0.25*len(blurs))), 184 | int(round(0.50*len(blurs))), 185 | int(round(0.75*len(blurs)))] 186 | sliceend = [ int(round(0.50*len(blurs))), 187 | int(round(0.75*len(blurs))), 188 | int(round(0.95*len(blurs))), 189 | -1] 190 | 191 | for i, transform in enumerate(transforms): 192 | if args.close and i < 2: 193 | pass 194 | else: 195 | if i == len(transforms) - 1: 196 | print(transform + str(gradient_steps[i]) + " ]", end=' \\\n') 197 | for j in range(1, args.reg_pairs+1): 198 | print("\t--metric Mattes[ ${{fixedfile{j}}},${{movingfile{j}}},1,64,None ]".format(j=j), end=' \\\n') 199 | print("\t--convergence [ {},{},10 ]".format("x".join(iterations[slicestart[i]:]), args.convergence), end=' \\\n') 200 | print("\t--shrink-factors {}".format("x".join(shrinks[slicestart[i]:])), end=' \\\n') 201 | print("\t--smoothing-sigmas {}mm".format("x".join(blurs[slicestart[i]:])), end=' \\\n') 202 | print("\t" + masks[i], end=' ') 203 | else: 204 | print(transform + str(gradient_steps[i]) + " ]", end=' \\\n') 205 | for j in range(1, args.reg_pairs+1): 206 | print("\t--metric Mattes[ ${{fixedfile{j}}},${{movingfile{j}}},1,32,None ]".format(j=j), end=' \\\n') 207 | print("\t--convergence [ {},{},10 ]".format("x".join(iterations[slicestart[i]:sliceend[i]]), args.convergence), end=' \\\n') 208 | print("\t--shrink-factors {}".format("x".join(shrinks[slicestart[i]:sliceend[i]])), end=' \\\n') 209 | print("\t--smoothing-sigmas {}mm".format("x".join(blurs[slicestart[i]:sliceend[i]])), end=' \\\n') 210 | print("\t" + masks[i], end=' \\\n') 211 | if repeatmask[i]: 212 | print(transform + str(gradient_steps_repeat[i]) + " ]", end=' \\\n') 213 | for j in range(1, args.reg_pairs+1): 214 | print("\t--metric Mattes[ ${{fixedfile{j}}},${{movingfile{j}}},1,32,None ]".format(j=j), end=' \\\n') 215 | print("\t--convergence [ {},{},10 ]".format("x".join(iterations[slicestart[i]:sliceend[i]]), args.convergence), end=' \\\n') 216 | print("\t--shrink-factors {}".format("x".join(shrinks[slicestart[i]:sliceend[i]])), end=' \\\n') 217 | print("\t--smoothing-sigmas {}mm".format("x".join(blurs[slicestart[i]:sliceend[i]])), end=' \\\n') 218 | print("\t" + repeatmask[i], end=' \\\n') 219 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | License 2 | 3 | The following license governs the use of this software in academic and educational environments. Commercial use requires a commercial license from CoBrALab , http://cobralab.ca 4 | 5 | ACADEMIC PUBLIC LICENSE 6 | 7 | Copyright (C) 2018 CoBrALab and Gabriel A. Devenyi 8 | 9 | Preamble 10 | 11 | This license contains the terms and conditions of using this software in noncommercial settings: at academic institutions for teaching and research use, and at non-profit research organizations. You will find that this license provides noncommercial users of this software with rights that are similar to the well-known GNU General Public License, yet it retains the possibility for this software authors to financially support the development by selling commercial licenses. In fact, if you intend to use this software in a "for-profit" environment, where research is conducted to develop or enhance a product, is used in a commercial service offering, or when a commercial company uses this software to participate in a research project (for example government-funded or EU-funded research projects), then you must obtain a commercial license for this software. In that case, please contact the Authors to inquire about commercial licenses. 12 | 13 | What are the rights given to noncommercial users? Similarly to the GPL, you have the right to use the software, to distribute copies, to receive source code, to change the software and distribute your modifications or the modified software. Also similarly to the GPL, if you distribute verbatim or modified copies of this software, they must be distributed under this license. 14 | 15 | By modelling the GPL, this license guarantees that you're safe when using this software in your work, for teaching or research. This license guarantees that this software will remain available free of charge for nonprofit use. You can modify this software to your purposes, and you can also share your modifications. Even in the unlikely case of the authors abandoning this software entirely, this license permits anyone to continue developing it from the last release, and to create further releases under this license. 16 | 17 | We believe that the combination of noncommercial open-source and commercial licensing will be beneficial for the whole user community, because income from commercial licenses will enable faster development and a higher level of software quality, while further enjoying the informal, open communication and collaboration channels of open source development. 18 | 19 | The precise terms and conditions for using, copying, distribution and modification follow. 20 | 21 | ACADEMIC PUBLIC LICENSE 22 | 23 | TERMS AND CONDITIONS FOR USE, COPYING, DISTRIBUTION AND MODIFICATION 24 | 25 | 0. Definitions 26 | 27 | "Program" means a copy of the code accompanying this licence, which is said to be distributed under this Academic Public License. 28 | 29 | "Work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) 30 | 31 | "Using the Program" means any act of creating executables that contain or directly use libraries that are part of the Program, running any of the tools that are part of the Program, or creating works based on the Program. 32 | 33 | Each licensee is addressed as "you". 34 | 35 | 1. Permission is hereby granted to use the Program free of charge for any noncommercial purpose, including teaching and research at universities, colleges and other educational institutions, research at non-profit research institutions, and personal non-profit purposes. For using the Program for commercial purposes, including but not restricted to consulting activities, design of commercial hardware or software networking products, and a commercial entity participating in research projects, you must contact the Author for an appropriate license. Permission is also granted to use the Program for a reasonably limited period of time for the purpose of evaluating its usefulness for a particular purpose. 36 | 37 | 2. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. 38 | 39 | 3. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 2 above, provided that you also meet all of these conditions: 40 | 41 | a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. 42 | b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. 43 | These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose regulations for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. (If the same, independent sections are distributed as part of a package that is otherwise reliant on, or is based on the Program, then the distribution of the whole package, including but not restricted to the independent section, must be on the unmodified terms of this License, regardless of who the author of the included sections was.) 44 | 45 | Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based or reliant on the Program. 46 | 47 | In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of storage or distribution medium does not bring the other work under the scope of this License. 48 | 49 | 4. You may copy and distribute the Program (or a work based on it, under Section 3) in object code or executable form under the terms of Sections 2 and 3 above provided that you also do one of the following: 50 | 51 | a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 2 and 3 above on a medium customarily used for software interchange; or, 52 | b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 2 and 3 above on a medium customarily used for software interchange; or, 53 | c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b) above.) 54 | The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. 55 | 56 | If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 57 | 58 | 5. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 59 | 60 | 6. You are not required to accept this License, since you have not signed it. Nothing else grants you permission to modify or distribute the Program or its derivative works; law prohibits these actions if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License and all its terms and conditions for copying, distributing or modifying the Program or works based on it, to do so. 61 | 62 | 7. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 63 | 64 | 8. If, as a consequence of a court judgement or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. 65 | 66 | If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. 67 | 68 | 9. If the distribution and/or use of the Program are restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 69 | 70 | NO WARRANTY 71 | 72 | 10. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 73 | 74 | 11. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED ON IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 75 | 76 | END OF TERMS AND CONDITIONS 77 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MAGeTbrain Implementation using antsRegistration 2 | 3 | Implementation of Multiple Automatically Generated Templates brain segmentation 4 | algorithm [MAGeTbrain](https://github.com/CobraLab/MAGeTbrain) built upon 5 | [antsRegistration](https://github.com/stnava/ANTs) and the 6 | [qbatch](https://github.com/pipitone/qbatch) generic cluster submission system. 7 | 8 | ![Multi-atlas and MAGeT brain operation schematic](doc/MA-MAGeTBrain-Schematic.png "Schematic") 9 | 10 | ## Requirements 11 | 12 | - [bash](https://www.gnu.org/software/bash) version 3.0 or later 13 | 14 | - [python](https://www.python.org) version 2.7.x or later 15 | 16 | - [qbatch](https://github.com/pipitone/qbatch) git version 17 | 18 | - [ANTs](https://github.com/stnava/ANTs) with ITK_BUILD_MINC_SUPPORT 19 | 20 | 21 | MAGeTbrain is very computationally expensive, performing 22 | ``atlases*templates + templates*subjects`` linear and non-linear registrations. 23 | This produces large number of files of moderate file size. Typical subject 24 | pools produce outputs on the scale of 100's of GB. 25 | 26 | ## Principles of MAGeTbrain 27 | 28 | MAGeTbrain was developed to produce high-quality segmentations (labels) of 29 | anatomical areas in structural MRI volumes. It operates by the principle of 30 | starting with a small number of high-quality expertly segmented atlases and 31 | transforming them onto a subject pool through an intermediate registration to a 32 | "representative" subset of subjects called the template pool. Through 33 | pair-wise registration from atlas-template and template-subject the number of 34 | candidate segmentations is increased while the template pool absorbs the 35 | large-scale methodological and structural variation of the subject pool. After 36 | all candidates have been produced the resulting segmentations are fused via a 37 | majority vote scheme. 38 | 39 | ## Best practices 40 | 41 | MAGeTbrain accepts any expertly segmented MRI volume/label pairs as atlas 42 | inputs (such as those available at for the 43 | hippocampus, subcortical and cerebellar structures) or you may 44 | provide your own for other structures. An odd number of atlases is strongly 45 | recommended to avoid tie votes during the label fusion process. 46 | 47 | Templates, which are a duplication of a subset of the subject pool should be 48 | chosen to span the anatomical variability of the subject pool. Typical template 49 | pool size is 21 subjects. Based on simulations of MAGeTbrain, additional 50 | subjects beyond 21 provides minimal improvement in the final outputs. The size 51 | of the template pool should again be an odd number to avoid voting ties. 52 | For subject pools smaller than 21, include all subjects also in the template 53 | pool while maintaining an odd number if need be. File names for a subject 54 | included in both the subject and template pool should be the same so that 55 | MAGeTbrain can skip registering identical images together. 56 | 57 | Typical application of MAGeTbrain is to preprocess input MRI volumes prior 58 | to starting to correct for bias fields and to crop excess non-head features. 59 | One such recommended pipeline based on the MINC tools is available at 60 | . Subject input volumes 61 | should be corrected but otherwise in native (or otherwise non volumetrically 62 | deformed) space, in order to ensure label volumes provide real-world measures. 63 | Subject scans must all be oriented in the same manner for affine registration to 64 | succeed. MAGeTbrain was tested and optimized on 1x1x1 mm isotropic MPRAGE 65 | subject data. It has been very successfully used on higher resolution data and 66 | on other contrast types but may require tweaking of time/memory estimates. 67 | 68 | ## How to run antsRegistration-MAGeT on Niagara 69 | 70 | ```sh 71 | > git clone https://github.com/CobraLab/antsRegistration-MAGeT.git 72 | > module load cobralab/2019b 73 | > source antsRegistration-MAGeT/bin/activate 74 | > cd /path/to/my/working/Directory 75 | > mb.sh -- init 76 | 2016-06-21 20:46:49 UTC [ info] Creating input/atlas input/template input/subject 77 | 2016-06-21 20:46:49 UTC [ info] Cleaning up. Done 78 | # Copy atlas/template/subject files into input directories 79 | > mb.sh -- run 80 | 2016-06-21 14:32:53 UTC [ info] Found: 81 | 2016-06-21 14:32:53 UTC [ info] 5 atlases in input/atlas 82 | 2016-06-21 14:32:53 UTC [ info] 1 labels per atlas in input/atlas 83 | 2016-06-21 14:32:53 UTC [ info] 13 templates in input/template 84 | 2016-06-21 14:32:53 UTC [ info] 13 subjects in input/subject 85 | 2016-06-21 14:32:53 UTC [ info] 0 models in input/models 86 | 2016-06-21 14:32:53 UTC [ info] Progress: 87 | 2016-06-21 14:32:53 UTC [ info] 0 of 65 atlas-template registrations completed 88 | 2016-06-21 14:32:53 UTC [ info] 0 of 156 template-subject registrations completed 89 | 2016-06-21 14:32:53 UTC [ info] 0 of 845 resample labels completed 90 | 2016-06-21 14:32:53 UTC [ info] 0 of 13 voted labels completed 91 | 2016-06-21 14:32:53 UTC [ info] Computing Atlas to Template Registrations 92 | 36648384[].gpc-sched-ib0 93 | ... 94 | 2016-06-21 14:32:55 UTC [ info] Computing Template to Subject Registrations 95 | 36648397[].gpc-sched-ib0 96 | ... 97 | 2016-06-21 14:32:58 UTC [ info] Computing Label Resamples 98 | 36648410.gpc-sched-ib0 99 | ... 100 | 2016-06-21 14:33:03 UTC [ info] Computing Votes 101 | 36648424.gpc-sched-ib0 102 | ... 103 | 2016-06-21 14:33:07 UTC [ info] Cleaning up. Done 104 | 105 | # For options and individual stage control, see mb.sh --help 106 | ``` 107 | 108 | ## MAGeTbrain Stages 109 | 110 | MAGeTBrain runs in a number of stages, some of which can run in parallel and 111 | others which must run after prior stages are completed. In addition, this 112 | pipeline describes a number of utility stages for assisting in running 113 | MAGeTbrain. All of these stages can be run by invoking them after ``--`` in 114 | your ``mb.sh`` call. 115 | 116 | ### Utility stages 117 | 118 | - ``init`` - setup the input directory structure for MAGeTbrain 119 | 120 | - ``status`` - display the status check counting work completed and work to 121 | be done and exit 122 | 123 | - ``cleanup`` - create and submit a job to tar, compress and delete all 124 | intermediate files, for use after a successful run 125 | 126 | ### Standard stages 127 | 128 | - ``template`` - register atlases to templates 129 | 130 | - ``subject`` - register templates to subjects 131 | 132 | - ``resample`` - transform candidate label files through 133 | atlas-template-subject chain. Depends on completion of ``template`` and 134 | ``subject`` stages 135 | 136 | - ``vote`` - perform majority vote label fusion on candidate labels 137 | 138 | - ``run`` - calculate and submit all standard stages 139 | 140 | Stages manually specified on the command line do not check if their antecedent 141 | stages have completed successfully, this can result in undefined behavior. 142 | If you specify stages manually, please ensure that antecedent stages are 143 | complete. 144 | 145 | Commands in a given stage in MAGeTbrain are deemed complete if their output 146 | files exist, this means that if a pipeline was stopped at some point, it can 147 | resume by examining the existing files. If input files are changed be careful 148 | to cleanup old intermediate files. 149 | 150 | ### Multi-atlas stages 151 | 152 | Multi-atlas mode in MAGeTbrain disables the "template" concept, resulting in 153 | operation like a classic multi-atlas segmentation tool. 154 | All subjects are ignored for this mode, instead templates are treated as 155 | subjects. 156 | 157 | - ``multiatlas-resample`` - transform candidate label files through 158 | atlas-template chain, treating templates as if they were subjects. Depends upon 159 | completion of ``template`` stage 160 | 161 | - ``multiatlas-vote`` - perform majority vote label fusion on template 162 | candidate labels 163 | 164 | - ``multiatlas`` - perform ``template``, ``multiatlas-resample`` and 165 | ``multiatlas-vote`` stages 166 | 167 | Typical use of this mode is for verification vs MAGeTbrain mode and for manual 168 | "best" template selection. For template selection, include all your subjects as 169 | templates, run ``multiatlas`` mode, then QC the resulting labels. Choose the 170 | bestquality labels from the template pool and use those subjects as your 171 | templates. Run MAGeTbrain as normal from there. 172 | 173 | ## Complex MAGeTbrain Runs 174 | 175 | ### Resolution 176 | 177 | MAGeTbrain was originally designed and optimized for the case of CoBrALab's 178 | 0.3 mm isotropic atlases and 1 mm isotropic template/subject whole-brain 179 | MRIs with T1 or T2 contrasts. 180 | 181 | This new version of MAGeTbrain will attempt to compute walltime and memory 182 | requirements for a given input resolution based on some empirical research 183 | plus a 15% safety 184 | factor for errors in estimates. 185 | 186 | ### Multi-spectral 187 | 188 | Currently needs reimplementation 189 | 190 | ### ROI (masked) based registrations 191 | 192 | Using a brain or ROI mask provides a number a number of potential benefits, 193 | improved registrations and reduction in memory requirements, although these 194 | benefits have not been throughly examined empirically. 195 | 196 | Testing thus far has indicated masking is most effective if used to remove 197 | non-brain tissues or areas of interest, rather than focusing registrations. As 198 | such it is recommended to skull-strip input scans. Particular attention should 199 | be paid to cerebllar regions to not lose and cerebellum volume after cropping. 200 | 201 | If enabled with the "label maksing" option ``-l``, atlas to template registrations 202 | will use the input labels to focus the registration, reducing runtime and 203 | possibly improving registration quality, depending upon anatomical differences. 204 | 205 | ### Slabs 206 | 207 | MAGeTbrain should successfully operate using slabs rather than whole brains as 208 | inputs. It may benefit from an ROI mask defining the boundary of the slab in 209 | order to prevent non-linear registrations from wasting cycles and avoiding 210 | possible sharp-edge effects on the smooth registration fields. 211 | 212 | ### Pathological populations 213 | 214 | Subject populations with significant pathology or structural abnormalities 215 | pose particular problems in nonlinear registrations as structural correspondence 216 | is no longer guaranteed. In these cases, there are a few suggested methods to 217 | improve results. First, choose as templates subjects that have a "reasonable" 218 | level of pathology, rather than extreme examples. These templates can help to 219 | "bridge the gap" between atlases and subjects where large deformations are needed. 220 | 221 | Secondly, one can attempt to find the "best" templates via application of the 222 | ``multiatlas`` mode. These may in fact correspond with the same subjects as 223 | suggested above. 224 | 225 | Finally, special populations have been segmented with MAGeTbrain by (semi)manual 226 | segmentation of some subjects and their use as atlases. This has been particularly 227 | successful with neonates. 228 | 229 | ## How to install/configure MAGeTbrain to run elsewhere 230 | 231 | MAGeTbrain was designed and tested to run on Compute Canada's SciNet 232 | supercomputing cluster located in Toronto, Canada. The cluster consists of 233 | 3000+ 8-CPU (16-core) compute nodes with 16 GB of RAM each. MAGeTbrain handles 234 | job creation and submission via the ``qbatch`` job creation tool supporting 235 | PBS, SGE and LSF(soon) clusters. ``qbatch`` is configured via a number of 236 | environment variables, see . ``qbatch`` 237 | will split up MAGeTbrain jobs according to its configuration to honour 238 | walltime and memory specifications. 239 | 240 | To run MAGeTbrain locally, install ``qbatch`` and define 241 | ``QBATCH_SYSTEM="local"``, ``qbatch`` will run commands locally using GNU 242 | parallel. Note that MAGeTbrain running a single computer is a very slow 243 | process. We estimate a processing time of approximately 350 hours for a 5 atlas 244 | 21 template, 1 subject run, and 60 hours per additional subject when running 245 | on a single CPU. Processing is linearly decreased with more processors but 246 | eventually memory limited on most desktop computers. We strongly recommend 247 | installing MAGeTbrain on a cluster. 248 | 249 | ## Input File/Directory Structure 250 | 251 | The following is an example data structure for a single atlas/template/subject. 252 | The names ``atlas1``, ``template1``, and ``subject1`` are arbitrary. 253 | ``ext`` can be any image format ANTs/ITK supports, currently MINC2 (``.mnc``), 254 | NIFTI1/2 (``.nii`` or ``.nii.gz``) and Analyze (``.hdr`` and ``.img``) 255 | 256 | ```sh 257 | input/ 258 | atlas/ 259 | atlas1_t1.ext - mandatory MRI volume 260 | [ atlas1_[t2, pd, fa, md].ext ] - co-registered to t1 261 | atlas1_label_name.ext - mandatory label file, basename must match t1 262 | [ atlas1_label_name2.ext ] - additional labels 263 | [ atlas1_label_nameN.ext ] - arbitrary numbers of labels 264 | [ atlas1_mask.ext ] - mask used to focus registration 265 | ### additional atlas/label pairs as desired 266 | template/ 267 | subject1_t1.ext - filename should match subject with same MRI 268 | [ subject1_[t2, pd, fa, md].ext ] - co-registered to t1, requires atlas to also have this contrast 269 | [ subject1_mask.ext ] - mask used to focus registration 270 | ### additional templates as desired 271 | subject/ 272 | subject1_t1.ext - filename should match subject with same MRI 273 | [ subject1_[t2, pd, fa, md].ext ] - co-registered to t1, requires template to also have this contrast 274 | [ subject1_mask.ext ] - mask used to focus registration 275 | ### additional subjects as desired 276 | ``` 277 | 278 | ## Output File/Directory Structure 279 | 280 | The following describes the standard set of outputs for the input structure 281 | above 282 | 283 | ```sh 284 | output/ 285 | transforms/ 286 | atlas-template/ 287 | template1_t1.ext/ 288 | atlas1_t1.ext-template1_t1.ext0_GenericAffine.xfm - MINC format affine transform 289 | atlas1_t1.ext-template1_t1.ext1_NL.xfm -- MINC format nonlinear transform 290 | atlas1_t1.ext-template1_t1.ext1_NL_grid_0.mnc -- MINC format nonlinear grid 291 | ### additional atlas to template1 registrations 292 | ### additional directories per template 293 | template-subject/ 294 | subject1_t1.ext/ 295 | template1_t1.ext-subject1_t1.ext0_GenericAffine.xfm - MINC format affine transform 296 | template1_t1.ext-subject1_t1.ext1_NL.xfm - MINC format nonlinear transform 297 | template1_t1.ext-subject1_t1.ext1_NL_grid_0.mnc -- MINC format nonlinear grid 298 | ### additional template to subject1 registrations 299 | ### additional directories per subject 300 | labels/ 301 | candidates/ 302 | subject1_t1.ext/ 303 | atlas1_t1.ext-template1_t1.ext-subject1_t1.ext-atlas1_label_name.ext - resampled candidate label 304 | ### additional candidate labels for each atlas-template-subject path 305 | ### additional candidate labels for each input label 306 | majorityvote/ 307 | subject1_label_name.ext - final majority vote label 308 | ### additional labels for each input label 309 | ### additional labels for each subject 310 | ### Optional outputs for multiatlas mode 311 | multiatlas/ 312 | labels/ 313 | candidates/ 314 | template1_t1/ 315 | atlas1_t1.ext-template1.ext-atlas1_label_name.ext - resampled candidate label 316 | ### additional labels for each atlas-template path 317 | ### additional directories for each template 318 | majorityvote/ 319 | template1_label_name.ext - final majority vote label 320 | ### additional labels for each template 321 | ``` 322 | -------------------------------------------------------------------------------- /bin/mb_header.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This file: 3 | # 4 | # - Demos BASH3 Boilerplate (change this for your script) 5 | # 6 | # Usage: 7 | # 8 | # LOG_LEVEL=7 ./main.sh -f /tmp/x -d (change this for your script) 9 | # 10 | # Based on a template by BASH3 Boilerplate v2.3.0 11 | # http://bash3boilerplate.sh/#authors 12 | # 13 | # The MIT License (MIT) 14 | # Copyright (c) 2013 Kevin van Zonneveld and contributors 15 | # You are not obligated to bundle the LICENSE file with your b3bp projects as long 16 | # as you leave these references intact in the header comments of your source files. 17 | 18 | # Exit on error. Append "|| true" if you expect an error. 19 | set -o errexit 20 | # Exit on error inside any functions or subshells. 21 | set -o errtrace 22 | # Do not allow use of undefined vars. Use ${VAR:-} to use an undefined VAR 23 | set -o nounset 24 | # Catch the error in case mysqldump fails (but gzip succeeds) in `mysqldump |gzip` 25 | set -o pipefail 26 | # Turn on traces, useful while debugging but commented out by default 27 | # set -o xtrace 28 | 29 | if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then 30 | __i_am_main_script="0" # false 31 | 32 | if [[ "${__usage+x}" ]]; then 33 | if [[ "${BASH_SOURCE[1]}" = "${0}" ]]; then 34 | __i_am_main_script="1" # true 35 | fi 36 | 37 | __b3bp_external_usage="true" 38 | __b3bp_tmp_source_idx=1 39 | fi 40 | else 41 | __i_am_main_script="1" # true 42 | [[ "${__usage+x}" ]] && unset -v __usage 43 | [[ "${__helptext+x}" ]] && unset -v __helptext 44 | fi 45 | 46 | # Set magic variables for current file, directory, os, etc. 47 | __dir="$(cd "$(dirname "${BASH_SOURCE[${__b3bp_tmp_source_idx:-0}]}")" && pwd)" 48 | __file="${__dir}/$(basename "${BASH_SOURCE[${__b3bp_tmp_source_idx:-0}]}")" 49 | __base="$(basename "${__file}" .sh)" 50 | __invocation="$(printf %q "${__file}")$((($#)) && printf ' %q' "$@" || true)" 51 | 52 | # Define the environment variables (and their defaults) that this script depends on 53 | LOG_LEVEL="${LOG_LEVEL:-6}" # 7 = debug -> 0 = emergency 54 | NO_COLOR="${NO_COLOR:-}" # true = disable color. otherwise autodetected 55 | 56 | 57 | ### Functions 58 | ############################################################################## 59 | 60 | function __b3bp_log () { 61 | local log_level="${1}" 62 | shift 63 | 64 | # shellcheck disable=SC2034 65 | local color_debug="\\x1b[35m" 66 | # shellcheck disable=SC2034 67 | local color_info="\\x1b[32m" 68 | # shellcheck disable=SC2034 69 | local color_notice="\\x1b[34m" 70 | # shellcheck disable=SC2034 71 | local color_warning="\\x1b[33m" 72 | # shellcheck disable=SC2034 73 | local color_error="\\x1b[31m" 74 | # shellcheck disable=SC2034 75 | local color_critical="\\x1b[1;31m" 76 | # shellcheck disable=SC2034 77 | local color_alert="\\x1b[1;33;41m" 78 | # shellcheck disable=SC2034 79 | local color_emergency="\\x1b[1;4;5;33;41m" 80 | 81 | local colorvar="color_${log_level}" 82 | 83 | local color="${!colorvar:-${color_error}}" 84 | local color_reset="\\x1b[0m" 85 | 86 | if [[ "${NO_COLOR:-}" = "true" ]] || ( [[ "${TERM:-}" != "xterm"* ]] && [[ "${TERM:-}" != "screen"* ]] ) || [[ ! -t 2 ]]; then 87 | if [[ "${NO_COLOR:-}" != "false" ]]; then 88 | # Don't use colors on pipes or non-recognized terminals 89 | color=""; color_reset="" 90 | fi 91 | fi 92 | 93 | # all remaining arguments are to be printed 94 | local log_line="" 95 | 96 | while IFS=$'\n' read -r log_line; do 97 | echo -e "$(date -u +"%Y-%m-%d %H:%M:%S UTC") ${color}$(printf "[%9s]" "${log_level}")${color_reset} ${log_line}" 1>&2 98 | done <<< "${@:-}" 99 | } 100 | 101 | function emergency () { __b3bp_log emergency "${@}"; exit 1; } 102 | function alert () { [[ "${LOG_LEVEL:-0}" -ge 1 ]] && __b3bp_log alert "${@}"; true; } 103 | function critical () { [[ "${LOG_LEVEL:-0}" -ge 2 ]] && __b3bp_log critical "${@}"; true; } 104 | function error () { [[ "${LOG_LEVEL:-0}" -ge 3 ]] && __b3bp_log error "${@}"; true; } 105 | function warning () { [[ "${LOG_LEVEL:-0}" -ge 4 ]] && __b3bp_log warning "${@}"; true; } 106 | function notice () { [[ "${LOG_LEVEL:-0}" -ge 5 ]] && __b3bp_log notice "${@}"; true; } 107 | function info () { [[ "${LOG_LEVEL:-0}" -ge 6 ]] && __b3bp_log info "${@}"; true; } 108 | function debug () { [[ "${LOG_LEVEL:-0}" -ge 7 ]] && __b3bp_log debug "${@}"; true; } 109 | 110 | function help () { 111 | echo "" 1>&2 112 | echo " ${*}" 1>&2 113 | echo "" 1>&2 114 | echo " ${__usage:-No usage available}" 1>&2 115 | echo "" 1>&2 116 | 117 | if [[ "${__helptext:-}" ]]; then 118 | echo " ${__helptext}" 1>&2 119 | echo "" 1>&2 120 | fi 121 | 122 | exit 1 123 | } 124 | 125 | 126 | ### Parse commandline options 127 | ############################################################################## 128 | 129 | # Commandline options. This defines the usage page, and is used to parse cli 130 | # opts & defaults from. The parsing is unforgiving so be precise in your syntax 131 | # - A short option must be preset for every long option; but every short option 132 | # need not have a long option 133 | # - `--` is respected as the separator between options and arguments 134 | # - We do not bash-expand defaults, so setting '~/app' as a default will not resolve to ${HOME}. 135 | # you can use bash variables to work around this (so use ${HOME} instead) 136 | 137 | # shellcheck disable=SC2015 138 | [[ "${__usage+x}" ]] || read -r -d '' __usage <<-'EOF' || true # exits non-zero when EOF encountered 139 | -f --file [arg] Filename to process. Required. 140 | -t --temp [arg] Location of tempfile. Default="/tmp/bar" 141 | -v Enable verbose mode, print script as it is executed 142 | -d --debug Enables debug mode 143 | -h --help This page 144 | -n --no-color Disable color output 145 | -1 --one Do just one thing 146 | EOF 147 | 148 | # shellcheck disable=SC2015 149 | [[ "${__helptext+x}" ]] || read -r -d '' __helptext <<-'EOF' || true # exits non-zero when EOF encountered 150 | This is Bash3 Boilerplate's help text. Feel free to add any description of your 151 | program or elaborate more on command-line arguments. This section is not 152 | parsed and will be added as-is to the help. 153 | EOF 154 | 155 | # Translate usage string -> getopts arguments, and set $arg_ defaults 156 | while read -r __b3bp_tmp_line; do 157 | if [[ "${__b3bp_tmp_line}" =~ ^- ]]; then 158 | # fetch single character version of option string 159 | __b3bp_tmp_opt="${__b3bp_tmp_line%% *}" 160 | __b3bp_tmp_opt="${__b3bp_tmp_opt:1}" 161 | 162 | # fetch long version if present 163 | __b3bp_tmp_long_opt="" 164 | 165 | if [[ "${__b3bp_tmp_line}" = *"--"* ]]; then 166 | __b3bp_tmp_long_opt="${__b3bp_tmp_line#*--}" 167 | __b3bp_tmp_long_opt="${__b3bp_tmp_long_opt%% *}" 168 | fi 169 | 170 | # map opt long name to+from opt short name 171 | printf -v "__b3bp_tmp_opt_long2short_${__b3bp_tmp_long_opt//-/_}" '%s' "${__b3bp_tmp_opt}" 172 | printf -v "__b3bp_tmp_opt_short2long_${__b3bp_tmp_opt}" '%s' "${__b3bp_tmp_long_opt//-/_}" 173 | 174 | # check if option takes an argument 175 | if [[ "${__b3bp_tmp_line}" =~ \[.*\] ]]; then 176 | __b3bp_tmp_opt="${__b3bp_tmp_opt}:" # add : if opt has arg 177 | __b3bp_tmp_init="" # it has an arg. init with "" 178 | printf -v "__b3bp_tmp_has_arg_${__b3bp_tmp_opt:0:1}" '%s' "1" 179 | elif [[ "${__b3bp_tmp_line}" =~ \{.*\} ]]; then 180 | __b3bp_tmp_opt="${__b3bp_tmp_opt}:" # add : if opt has arg 181 | __b3bp_tmp_init="" # it has an arg. init with "" 182 | # remember that this option requires an argument 183 | printf -v "__b3bp_tmp_has_arg_${__b3bp_tmp_opt:0:1}" '%s' "2" 184 | else 185 | __b3bp_tmp_init="0" # it's a flag. init with 0 186 | printf -v "__b3bp_tmp_has_arg_${__b3bp_tmp_opt:0:1}" '%s' "0" 187 | fi 188 | __b3bp_tmp_opts="${__b3bp_tmp_opts:-}${__b3bp_tmp_opt}" 189 | fi 190 | 191 | [[ "${__b3bp_tmp_opt:-}" ]] || continue 192 | 193 | if [[ "${__b3bp_tmp_line}" =~ ^Default= ]] || [[ "${__b3bp_tmp_line}" =~ \.\ *Default= ]]; then 194 | # ignore default value if option does not have an argument 195 | __b3bp_tmp_varname="__b3bp_tmp_has_arg_${__b3bp_tmp_opt:0:1}" 196 | 197 | if [[ "${!__b3bp_tmp_varname}" != "0" ]]; then 198 | __b3bp_tmp_init="${__b3bp_tmp_line##*Default=}" 199 | __b3bp_tmp_re='^"(.*)"$' 200 | if [[ "${__b3bp_tmp_init}" =~ ${__b3bp_tmp_re} ]]; then 201 | __b3bp_tmp_init="${BASH_REMATCH[1]}" 202 | else 203 | __b3bp_tmp_re="^'(.*)'$" 204 | if [[ "${__b3bp_tmp_init}" =~ ${__b3bp_tmp_re} ]]; then 205 | __b3bp_tmp_init="${BASH_REMATCH[1]}" 206 | fi 207 | fi 208 | fi 209 | fi 210 | 211 | if [[ "${__b3bp_tmp_line}" =~ ^Required\. ]] || [[ "${__b3bp_tmp_line}" =~ \.\ *Required\. ]]; then 212 | # remember that this option requires an argument 213 | printf -v "__b3bp_tmp_has_arg_${__b3bp_tmp_opt:0:1}" '%s' "2" 214 | fi 215 | 216 | printf -v "arg_${__b3bp_tmp_opt:0:1}" '%s' "${__b3bp_tmp_init}" 217 | done <<< "${__usage:-}" 218 | 219 | # run getopts only if options were specified in __usage 220 | if [[ "${__b3bp_tmp_opts:-}" ]]; then 221 | # Allow long options like --this 222 | __b3bp_tmp_opts="${__b3bp_tmp_opts}-:" 223 | 224 | # Reset in case getopts has been used previously in the shell. 225 | OPTIND=1 226 | 227 | # start parsing command line 228 | set +o nounset # unexpected arguments will cause unbound variables 229 | # to be dereferenced 230 | # Overwrite $arg_ defaults with the actual CLI options 231 | while getopts "${__b3bp_tmp_opts}" __b3bp_tmp_opt; do 232 | [[ "${__b3bp_tmp_opt}" = "?" ]] && help "Invalid use of script: ${*} " 233 | 234 | if [[ "${__b3bp_tmp_opt}" = "-" ]]; then 235 | # OPTARG is long-option-name or long-option=value 236 | if [[ "${OPTARG}" =~ .*=.* ]]; then 237 | # --key=value format 238 | __b3bp_tmp_long_opt=${OPTARG/=*/} 239 | # Set opt to the short option corresponding to the long option 240 | __b3bp_tmp_varname="__b3bp_tmp_opt_long2short_${__b3bp_tmp_long_opt//-/_}" 241 | printf -v "__b3bp_tmp_opt" '%s' "${!__b3bp_tmp_varname}" 242 | OPTARG=${OPTARG#*=} 243 | else 244 | # --key value format 245 | # Map long name to short version of option 246 | __b3bp_tmp_varname="__b3bp_tmp_opt_long2short_${OPTARG//-/_}" 247 | printf -v "__b3bp_tmp_opt" '%s' "${!__b3bp_tmp_varname}" 248 | # Only assign OPTARG if option takes an argument 249 | __b3bp_tmp_varname="__b3bp_tmp_has_arg_${__b3bp_tmp_opt}" 250 | printf -v "OPTARG" '%s' "${@:OPTIND:${!__b3bp_tmp_varname}}" 251 | # shift over the argument if argument is expected 252 | ((OPTIND+=__b3bp_tmp_has_arg_${__b3bp_tmp_opt})) 253 | fi 254 | # we have set opt/OPTARG to the short value and the argument as OPTARG if it exists 255 | fi 256 | __b3bp_tmp_varname="arg_${__b3bp_tmp_opt:0:1}" 257 | __b3bp_tmp_default="${!__b3bp_tmp_varname}" 258 | 259 | __b3bp_tmp_value="${OPTARG}" 260 | if [[ -z "${OPTARG}" ]]; then 261 | __b3bp_tmp_value=$((__b3bp_tmp_default + 1)) 262 | fi 263 | 264 | printf -v "${__b3bp_tmp_varname}" '%s' "${__b3bp_tmp_value}" 265 | debug "cli arg ${__b3bp_tmp_varname} = (${__b3bp_tmp_default}) -> ${!__b3bp_tmp_varname}" 266 | done 267 | set -o nounset # no more unbound variable references expected 268 | 269 | shift $((OPTIND-1)) 270 | 271 | if [[ "${1:-}" = "--" ]] ; then 272 | shift 273 | fi 274 | fi 275 | 276 | 277 | ### Automatic validation of required option arguments 278 | ############################################################################## 279 | 280 | for __b3bp_tmp_varname in ${!__b3bp_tmp_has_arg_*}; do 281 | # validate only options which required an argument 282 | [[ "${!__b3bp_tmp_varname}" = "2" ]] || continue 283 | 284 | __b3bp_tmp_opt_short="${__b3bp_tmp_varname##*_}" 285 | __b3bp_tmp_varname="arg_${__b3bp_tmp_opt_short}" 286 | [[ "${!__b3bp_tmp_varname}" ]] && continue 287 | 288 | __b3bp_tmp_varname="__b3bp_tmp_opt_short2long_${__b3bp_tmp_opt_short}" 289 | printf -v "__b3bp_tmp_opt_long" '%s' "${!__b3bp_tmp_varname}" 290 | [[ "${__b3bp_tmp_opt_long:-}" ]] && __b3bp_tmp_opt_long=" (--${__b3bp_tmp_opt_long//_/-})" 291 | 292 | help "Option -${__b3bp_tmp_opt_short}${__b3bp_tmp_opt_long:-} requires an argument" 293 | done 294 | 295 | 296 | ### Cleanup Environment variables 297 | ############################################################################## 298 | 299 | for __tmp_varname in ${!__b3bp_tmp_*}; do 300 | unset -v "${__tmp_varname}" 301 | done 302 | 303 | unset -v __tmp_varname 304 | 305 | 306 | ### Externally supplied __usage. Nothing else to do here 307 | ############################################################################## 308 | 309 | if [[ "${__b3bp_external_usage:-}" = "true" ]]; then 310 | unset -v __b3bp_external_usage 311 | return 312 | fi 313 | 314 | 315 | ### Signal trapping and backtracing 316 | ############################################################################## 317 | 318 | function __b3bp_cleanup_before_exit () { 319 | info "Cleaning up. Done" 320 | } 321 | trap __b3bp_cleanup_before_exit EXIT 322 | 323 | # requires `set -o errtrace` 324 | __b3bp_err_report() { 325 | local error_code 326 | error_code=${?} 327 | error "Error in ${__file} in function ${1} on line ${2}" 328 | exit ${error_code} 329 | } 330 | # Uncomment the following line for always providing an error backtrace 331 | # trap '__b3bp_err_report "${FUNCNAME:-.}" ${LINENO}' ERR 332 | 333 | 334 | ### Command-line argument switches (like -d for debugmode, -h for showing helppage) 335 | ############################################################################## 336 | 337 | # debug mode 338 | if [[ "${arg_d:?}" = "1" ]]; then 339 | set -o xtrace 340 | LOG_LEVEL="7" 341 | # Enable error backtracing 342 | trap '__b3bp_err_report "${FUNCNAME:-.}" ${LINENO}' ERR 343 | fi 344 | 345 | # verbose mode 346 | if [[ "${arg_v:?}" = "1" ]]; then 347 | set -o verbose 348 | fi 349 | 350 | # no color mode 351 | if [[ "${arg_n:?}" = "1" ]]; then 352 | NO_COLOR="true" 353 | fi 354 | 355 | # help mode 356 | if [[ "${arg_h:?}" = "1" ]]; then 357 | # Help exists with code 1 358 | help "Help using ${0}" 359 | fi 360 | 361 | 362 | ### Validation. Error out if the things required for your script are not present 363 | ############################################################################## 364 | 365 | [[ "${arg_f:-}" ]] || help "Setting a filename with -f or --file is required" 366 | [[ "${LOG_LEVEL:-}" ]] || emergency "Cannot continue without LOG_LEVEL. " 367 | 368 | 369 | ### Runtime 370 | ############################################################################## 371 | 372 | info "__i_am_main_script: ${__i_am_main_script}" 373 | info "__file: ${__file}" 374 | info "__dir: ${__dir}" 375 | info "__base: ${__base}" 376 | info "OSTYPE: ${OSTYPE}" 377 | 378 | info "arg_f: ${arg_f}" 379 | info "arg_d: ${arg_d}" 380 | info "arg_v: ${arg_v}" 381 | info "arg_h: ${arg_h}" 382 | 383 | info "$(echo -e "multiple lines example - line #1\\nmultiple lines example - line #2\\nimagine logging the output of 'ls -al /path/'")" 384 | 385 | # All of these go to STDERR, so you can use STDOUT for piping machine readable information to other software 386 | debug "Info useful to developers for debugging the application, not useful during operations." 387 | info "Normal operational messages - may be harvested for reporting, measuring throughput, etc. - no action required." 388 | notice "Events that are unusual but not error conditions - might be summarized in an email to developers or admins to spot potential problems - no immediate action required." 389 | warning "Warning messages, not an error, but indication that an error will occur if action is not taken, e.g. file system 85% full - each item must be resolved within a given time. This is a debug message" 390 | error "Non-urgent failures, these should be relayed to developers or admins; each item must be resolved within a given time." 391 | critical "Should be corrected immediately, but indicates failure in a primary system, an example is a loss of a backup ISP connection." 392 | alert "Should be corrected immediately, therefore notify staff who can fix the problem. An example would be the loss of a primary ISP connection." 393 | emergency "A \"panic\" condition usually affecting multiple apps/servers/sites. At this level it would usually notify all tech staff on call." 394 | -------------------------------------------------------------------------------- /bin/mb_stages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Functions for various stages of mb.sh 3 | 4 | stage_init () { 5 | info "Creating input/atlas input/template input/subject" 6 | mkdir -p input/atlas input/template input/subject input/model 7 | } 8 | 9 | stage_status () { 10 | #Status printout 11 | info "Found:" 12 | info " ${#atlases[@]} atlases in input/atlas" 13 | info " ${#labels[@]} labels per atlas in input/atlas" 14 | info " ${#templates[@]} templates in input/template" 15 | info " ${#subjects[@]} subjects in input/subject" 16 | info " ${#models[@]} models in input/models" 17 | 18 | info "Progress:" 19 | info " $(find output/transforms/atlas-template -name '*1_NL.xfm' | wc -l) of $(( ${#atlases[@]} * ${#templates[@]} )) atlas-template registrations completed" 20 | info " $(find output/transforms/template-subject -name '*1_NL.xfm' | wc -l) of $(( ${#templates[@]} * ${#subjects[@]} - ${#templates[@]} )) template-subject registrations completed" 21 | info " $(find output/labels/candidates -type f | wc -l) of $(( ${#atlases[@]} * ${#templates[@]} * ${#subjects[@]} * ${#labels[@]} )) resample labels completed" 22 | info " $(find output/labels/majorityvote -type f | wc -l) of $(( ${#subjects[@]} * ${#labels[@]} )) voted labels completed" 23 | if [[ -d output/multiatlas ]] 24 | then 25 | info " $(find output/multiatlas/labels/candidates -type f | wc -l) of $(( ${#atlases[@]} * ${#templates[@]} * ${#labels[@]} )) multiatlas resample labels completed" 26 | info " $(find output/multiatlas/labels/majorityvote -type f | wc -l) of $(( ${#templates[@]} * ${#labels[@]} )) multiatlas voted labels completed" 27 | fi 28 | } 29 | 30 | stage_estimate () { 31 | if [[ ${QBATCH_SYSTEM} == "local" ]]; then 32 | __qbatch_atlas_template_opts="" 33 | __qbatch_template_subject_opts="" 34 | return 0 35 | fi 36 | #Function estimates the memory requirements for doing registrations based on 37 | #empircally fit equation memoryGB = a * fixed_voxels + b * moving_voxels + c 38 | local a=5.454998e-07 39 | local b=6.458353e-08 40 | local c=1.305710e-01 41 | local atlas_voxels 42 | local template_voxels 43 | local subject_voxels 44 | local atlas_template_memory 45 | local template_subject_memory 46 | local atlas_template_walltime_seconds 47 | local template_subject_walltime_seconds 48 | 49 | info "Checking Resolution of First Atlas" 50 | atlas_voxels=$(( $(PrintHeader $(ls -LS "${atlases[@]}" | head -1) 2 | sed 's/x/\*/g') )) 51 | info " Found ${atlas_voxels} voxels" 52 | info "Checking Resolution of First Template" 53 | template_voxels=$(( $(PrintHeader $(ls -LS "${templates[@]}" | head -1) 2 | sed 's/x/\*/g') )) 54 | info " Found ${template_voxels} voxels" 55 | info "Checking Resolution of First Subject" 56 | subject_voxels=$(( $(PrintHeader $(ls -LS "${subjects[@]}" | head -1) 2 | sed 's/x/\*/g') )) 57 | info " Found ${subject_voxels} voxels" 58 | 59 | notice "MAGeTbrain estimates walltime and memory based on files with the largest file size, if some files are uncompressed, this estimate may be incorrect" 60 | 61 | 62 | atlas_template_memory=$(python -c "import math; print(max(1, int(math.ceil((${a} * ${template_voxels} + ${b} * ${atlas_voxels} + ${c}) * ${__memory_scaling_factor}))))") 63 | template_subject_memory=$(python -c "import math; print(max(1, int(math.ceil((${a} * ${subject_voxels} + ${b} * ${template_voxels} + ${c}) * ${__memory_scaling_factor}))))") 64 | 65 | #Estimate walltime from empircally fit equation: seconds = d * fixed_voxels + e * moving_voxels + f 66 | local d=3.763172e-04 67 | local e=3.871282e-06 68 | local f=4.223281e+03 69 | 70 | atlas_template_walltime_seconds=$(python -c "import math; print(int(math.ceil((${d} * ${template_voxels} + ${e} * ${atlas_voxels} + ${f}) * ${__walltime_scaling_factor})))") 71 | template_subject_walltime_seconds=$(python -c "import math; print(int(math.ceil((${d} * ${subject_voxels} + ${e} * ${template_voxels} + ${f}) * ${__walltime_scaling_factor})))") 72 | 73 | #A little bit of special casing for Niagara 74 | if [[ $(printenv) =~ niagara ]] 75 | then 76 | __qbatch_atlas_template_opts="--walltime $(( atlas_template_walltime_seconds * 8 * ${QBATCH_CORES:-${QBATCH_PPJ:-1}} * ${QBATCH_CHUNKSIZE:-${QBATCH_PPJ:-1}} / (40 * 60) ))" 77 | __qbatch_template_subject_opts="--walltime $(( template_subject_walltime_seconds * 8 * ${QBATCH_CORES:-${QBATCH_PPJ:-1}} * ${QBATCH_CHUNKSIZE:-${QBATCH_PPJ:-1}} / (40 / 60) ))" 78 | else 79 | # Assume QBATCH variables are set properly, scale memory and walltime according to QBATCH specifications 80 | __qbatch_atlas_template_opts="--mem $(( atlas_template_memory * ${QBATCH_CORES:-${QBATCH_PPJ:-1}} ))G --walltime $(( atlas_template_walltime_seconds * 8 * ${QBATCH_CHUNKSIZE:-${QBATCH_PPJ:-1}} * ${QBATCH_CORES:-${QBATCH_PPJ:-1}} / ${QBATCH_PPJ:-1} ))" 81 | __qbatch_template_subject_opts="--mem $(( template_subject_memory * ${QBATCH_CORES:-${QBATCH_PPJ:-1}} ))G --walltime $(( template_subject_walltime_seconds * 8 * ${QBATCH_CHUNKSIZE:-${QBATCH_PPJ:-1}} * ${QBATCH_CORES:-${QBATCH_PPJ:-1}} / ${QBATCH_PPJ:-1} ))" 82 | fi 83 | } 84 | 85 | stage_register_atlas_template () { 86 | #Atlas to template registration 87 | local atlasname 88 | local templatename 89 | info "Computing Atlas to Template Registrations" 90 | for template in "${templates[@]}" 91 | do 92 | templatename=$(basename ${template}) 93 | for atlas in "${atlases[@]}" 94 | do 95 | atlasname=$(basename ${atlas}) 96 | if [[ ! -s output/transforms/atlas-template/${templatename}/${atlasname}-${templatename}1_NL.xfm ]] 97 | then 98 | if [[ -n ${__mb_label_masking} ]] 99 | then 100 | debug ${regcommand} ${atlas} ${template} output/transforms/atlas-template/${templatename} "$(echo ${atlas} | sed -r 's/(.mnc|.nii|.nii.gz|.nrrd)//g' | sed 's/_t1//g')_label*" 101 | echo ${regcommand} ${atlas} ${template} output/transforms/atlas-template/${templatename} "$(echo ${atlas} | sed -r 's/(.mnc|.nii|.nii.gz|.nrrd)//g' | sed 's/_t1//g')_label*" 102 | else 103 | debug ${regcommand} ${atlas} ${template} output/transforms/atlas-template/${templatename} 104 | echo ${regcommand} ${atlas} ${template} output/transforms/atlas-template/${templatename} 105 | fi 106 | fi 107 | done > output/jobscripts/${__datetime}-mb_register_atlas_template-${templatename} 108 | debug qbatch ${__mb_dryrun} --logdir 'output/logs' ${__qbatch_atlas_template_opts} output/jobscripts/${__datetime}-mb_register_atlas_template-${templatename} 109 | qbatch ${__mb_dryrun} --logdir 'output/logs' ${__qbatch_atlas_template_opts} output/jobscripts/${__datetime}-mb_register_atlas_template-${templatename} 110 | done 111 | } 112 | 113 | stage_multiatlas_resample () { 114 | debug "Setting up Multiatlas/Template Output Directories" 115 | local templatename 116 | local atlasname 117 | local labelname 118 | mkdir -p output/multiatlas/labels/candidates 119 | for template in "${templates[@]}" 120 | do 121 | mkdir -p output/multiatlas/labels/candidates/$(basename ${template}) 122 | done 123 | info "Computing Multiatlas/Template Label Resamples" 124 | for template in "${templates[@]}" 125 | do 126 | templatename=$(basename ${template}) 127 | for atlas in "${atlases[@]}" 128 | do 129 | atlasname=$(basename ${atlas}) 130 | for label in "${labels[@]}" 131 | do 132 | labelname=$(basename ${label}) 133 | if [[ ! -s output/multiatlas/labels/candidates/${templatename}/${atlasname}-${templatename}-${labelname} ]] 134 | then 135 | debug mb_multiatlas_resample.sh ${labelname} ${atlas} ${template} 136 | echo mb_multiatlas_resample.sh ${labelname} ${atlas} ${template} 137 | fi 138 | done 139 | done 140 | done > output/jobscripts/${__datetime}-mb-multiatlas_resample 141 | debug qbatch ${__mb_dryrun} --logdir 'output/logs' -j 4 -c 1000 --depend "${__datetime}-mb_register_atlas_template*" --walltime 4:00:00 output/jobscripts/${__datetime}-mb-multiatlas_resample 142 | qbatch ${__mb_dryrun} --logdir 'output/logs' -j 4 -c 1000 --depend "${__datetime}-mb_register_atlas_template*" --walltime 4:00:00 output/jobscripts/${__datetime}-mb-multiatlas_resample 143 | } 144 | 145 | stage_multiatlas_vote () { 146 | info "Computing Multiatlas/Template Votes" 147 | local templatename 148 | local atlasname 149 | local labelname 150 | local majorityvotingcmd 151 | mkdir -p output/multiatlas/labels/majorityvote 152 | for template in "${templates[@]}" 153 | do 154 | templatename=$(basename ${template}) 155 | for label in "${labels[@]}" 156 | do 157 | labelname=$(basename ${label}) 158 | if [[ ! -s output/multiatlas/labels/majorityvote/${templatename}_$(echo ${labelname} | sed -r 's/(.mnc|.nii|.nii.gz|.nrrd)//g')$(echo ${templatename} | grep -i -o -E '(.mnc|.nii|.nii.gz|.nrrd)') ]] 159 | then 160 | majorityvotingcmd="mb_multiatlas_vote.sh ${labelname} ${template}" 161 | for atlas in "${atlases[@]}" 162 | do 163 | atlasname=$(basename ${atlas}) 164 | majorityvotingcmd+=" output/multiatlas/labels/candidates/${templatename}/${atlasname}-${templatename}-${labelname}" 165 | done 166 | echo """${majorityvotingcmd} && \ 167 | ConvertImage 3 output/multiatlas/labels/majorityvote/${templatename}_${label} /tmp/${templatename}_${label} 1 && \ 168 | mv /tmp/${templatename}_${label} output/multiatlas/labels/majorityvote/${templatename}_${label}""" 169 | fi 170 | done 171 | done > output/jobscripts/${__datetime}-mb-multiatlas_vote 172 | debug qbatch ${__mb_dryrun} --logdir 'output/logs' -j 2 -c 100 --depend "${__datetime}-mb-multiatlas_resample*" --walltime 4:00:00 output/jobscripts/${__datetime}-mb-multiatlas_vote 173 | qbatch ${__mb_dryrun} --logdir 'output/logs' -j 2 -c 100 --depend "${__datetime}-mb-multiatlas_resample*" --walltime 4:00:00 output/jobscripts/${__datetime}-mb-multiatlas_vote 174 | } 175 | 176 | 177 | stage_register_template_subject () { 178 | #Template to subject registration 179 | info "Computing Template to Subject Registrations" 180 | local subjectname 181 | local templatename 182 | for subject in "${subjects[@]}" 183 | do 184 | subjectname=$(basename ${subject}) 185 | for template in "${templates[@]}" 186 | do 187 | templatename=$(basename ${template}) 188 | #If subject and template name are the same, skip the registration step since it should be identity 189 | if [[ (! -s output/transforms/template-subject/${subjectname}/${templatename}-${subjectname}1_NL.xfm) && (${subjectname} != "${templatename}") ]] 190 | then 191 | if [[ -n ${__mb_label_masking} ]]; then 192 | debug ${regcommand} ${template} ${subject} output/transforms/template-subject/${subjectname} output/transforms/atlas-template/${templatename}/*_labelmask* 193 | echo ${regcommand} ${template} ${subject} output/transforms/template-subject/${subjectname} output/transforms/atlas-template/${templatename}/*_labelmask* 194 | else 195 | debug ${regcommand} ${template} ${subject} output/transforms/template-subject/${subjectname} 196 | echo ${regcommand} ${template} ${subject} output/transforms/template-subject/${subjectname} 197 | fi 198 | fi 199 | done > output/jobscripts/${__datetime}-mb_register_template_subject-${subjectname} 200 | if [[ -n ${__mb_label_masking} ]]; then 201 | debug qbatch ${__mb_dryrun} --logdir 'output/logs' --depend "${__datetime}-mb_register_atlas_template*" ${__qbatch_template_subject_opts} output/jobscripts/${__datetime}-mb_register_template_subject-${subjectname} 202 | qbatch ${__mb_dryrun} --logdir 'output/logs' --depend "${__datetime}-mb_register_atlas_template*" ${__qbatch_template_subject_opts} output/jobscripts/${__datetime}-mb_register_template_subject-${subjectname} 203 | else 204 | debug qbatch ${__mb_dryrun} --logdir 'output/logs' ${__qbatch_template_subject_opts} output/jobscripts/${__datetime}-mb_register_template_subject-${subjectname} 205 | qbatch ${__mb_dryrun} --logdir 'output/logs' ${__qbatch_template_subject_opts} output/jobscripts/${__datetime}-mb_register_template_subject-${subjectname} 206 | fi 207 | done 208 | } 209 | 210 | stage_resample () { 211 | #Resample candidate labels 212 | info "Computing Label Resamples" 213 | local subjectname 214 | local templatename 215 | local atlasname 216 | local labelname 217 | for subject in "${subjects[@]}" 218 | do 219 | subjectname=$(basename ${subject}) 220 | for template in "${templates[@]}" 221 | do 222 | templatename=$(basename ${template}) 223 | for atlas in "${atlases[@]}" 224 | do 225 | atlasname=$(basename ${atlas}) 226 | for label in "${labels[@]}" 227 | do 228 | labelname=$(basename ${label}) 229 | if [[ ! -s output/labels/candidates/${subjectname}/${atlasname}-${templatename}-${subjectname}-${labelname} ]] 230 | then 231 | debug mb_resample.sh ${labelname} ${atlas} ${template} ${subject} 232 | echo mb_resample.sh ${labelname} ${atlas} ${template} ${subject} 233 | fi 234 | done 235 | done 236 | done > output/jobscripts/${__datetime}-mb_resample-${subjectname} 237 | debug qbatch ${__mb_dryrun} --logdir 'output/logs' -j 2 -c 1000 --depend "${__datetime}-mb_register_atlas_template*" --depend "${__datetime}-mb_register_template_subject-${subjectname}*" --walltime 6:00:00 output/jobscripts/${__datetime}-mb_resample-${subjectname} 238 | qbatch ${__mb_dryrun} --logdir 'output/logs' -j 2 -c 1000 --depend "${__datetime}-mb_register_atlas_template*" --depend "${__datetime}-mb_register_template_subject-${subjectname}*" --walltime 6:00:00 output/jobscripts/${__datetime}-mb_resample-${subjectname} 239 | done 240 | } 241 | 242 | stage_vote () { 243 | #Voting 244 | info "Computing Votes" 245 | local subjectname 246 | local templatename 247 | local atlasname 248 | local labelname 249 | local majorityvotingcmd 250 | for subject in "${subjects[@]}" 251 | do 252 | subjectname=$(basename ${subject}) 253 | for label in "${labels[@]}" 254 | do 255 | labelname=$(basename ${label}) 256 | if [[ ! -s output/labels/majorityvote/${subjectname}_$(echo ${labelname} | sed -r 's/(.mnc|.nii|.nii.gz|.nrrd)//g')$(echo ${subjectname} | grep -i -o -E '(.mnc|.nii|.nii.gz|.nrrd)') ]] 257 | then 258 | majorityvotingcmd="mb_vote.sh ${labelname} ${subject}" 259 | for atlas in "${atlases[@]}" 260 | do 261 | atlasname=$(basename ${atlas}) 262 | for template in "${templates[@]}" 263 | do 264 | templatename=$(basename ${template}) 265 | majorityvotingcmd+=" output/labels/candidates/${subjectname}/${atlasname}-${templatename}-${subjectname}-${labelname}" 266 | done 267 | done 268 | debug ${majorityvotingcmd} 269 | echo ${majorityvotingcmd} 270 | fi 271 | done > output/jobscripts/${__datetime}-mb_vote-${subjectname} 272 | debug qbatch ${__mb_dryrun} --logdir 'output/logs' -j 2 -c 1000 --depend "${__datetime}-mb_resample-${subjectname}*" --walltime 0:30:00 output/jobscripts/${__datetime}-mb_vote-${subjectname} 273 | qbatch ${__mb_dryrun} --logdir 'output/logs' -j 2 -c 1000 --depend "${__datetime}-mb_resample-${subjectname}*" --walltime 0:30:00 output/jobscripts/${__datetime}-mb_vote-${subjectname} 274 | done 275 | } 276 | 277 | stage_qc () { 278 | #Voting 279 | info "Computing QC Images" 280 | local subjectname 281 | local labelname 282 | mkdir -p output/labels/QC 283 | for subject in "${subjects[@]}" 284 | do 285 | subjectname=$(basename ${subject}) 286 | for label in "${labels[@]}" 287 | do 288 | labelname=$(basename ${label}) 289 | if [[ ! -s output/labels/QC/${subjectname}_${labelname}.jpg ]] 290 | then 291 | echo mb_qc.sh ${subject} output/labels/majorityvote/${subjectname}_$(echo ${labelname} | sed -r 's/(.mnc|.nii|.nii.gz|.nrrd)//g')$(echo ${subjectname} | grep -i -o -E '(.mnc|.nii|.nii.gz|.nrrd)') \ 292 | output/labels/QC 293 | debug mb_qc.sh ${subject} output/labels/majorityvote/${subjectname}_$(echo ${labelname} | sed -r 's/(.mnc|.nii|.nii.gz|.nrrd)//g')$(echo ${subjectname} | grep -i -o -E '(.mnc|.nii|.nii.gz|.nrrd)') \ 294 | output/labels/QC 295 | fi 296 | done > output/jobscripts/${__datetime}-mb_qc-${subjectname} 297 | debug qbatch ${__mb_dryrun} --logdir 'output/logs' -j 1 -c 1000 --depend ${__datetime}-mb_vote-${subjectname} --walltime 0:30:00 output/jobscripts/${__datetime}-mb_qc-${subjectname} 298 | qbatch ${__mb_dryrun} --logdir 'output/logs' -j 1 -c 1000 --depend ${__datetime}-mb_vote-${subjectname} --walltime 0:30:00 output/jobscripts/${__datetime}-mb_qc-${subjectname} 299 | done 300 | } 301 | 302 | stage_cleanup () { 303 | #Tar and delete intermediate files 304 | info "Calculating tarring and delete cleanup jobs" 305 | 306 | cat <<- EOF > output/jobscripts/${__datetime}-mb_cleanup 307 | tar -cvf output/transforms/atlas-template.tar.gz output/transforms/atlas-template && rm -rf output/transforms/atlas-template 308 | tar -cvf output/transforms/template-subject.tar.gz output/transforms/template-subject && rm -rf output/transforms/template-subject 309 | tar -cvf output/labels/candidates.tar.gz output/labels/candidates && rm -rf output/labels/candidates 310 | if [[ -d output/multiatlas/labels/candidates ]]; then tar -cvf output/labels/candidates.tar.gz output/multiatlas/labels/candidates && rm -rf output/multiatlas/labels/candidates; fi 311 | EOF 312 | debug qbatch ${__mb_dryrun} --logdir 'output/logs' --walltime 8:00:00 --depend "${__datetime}-mb*" output/jobscripts/${__datetime}-mb_cleanup 313 | qbatch ${__mb_dryrun} --logdir 'output/logs' --walltime 8:00:00 --depend "${__datetime}-mb*" output/jobscripts/${__datetime}-mb_cleanup 314 | } 315 | --------------------------------------------------------------------------------