├── .github
└── workflows
│ └── testing.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── casts
├── cast_ipython.rc
├── cast_live_python
└── nipype_tutorial_showcase.sh
├── docs
├── index.html
├── notebooks
│ ├── advanced_aws.html
│ ├── advanced_command_line_interface.html
│ ├── advanced_create_interfaces.html
│ ├── advanced_interfaces_caching.html
│ ├── advanced_mipav.html
│ ├── advanced_sphinx_ext.html
│ ├── advanced_spmmcr.html
│ ├── basic_data_input.html
│ ├── basic_data_input_bids.html
│ ├── basic_data_output.html
│ ├── basic_debug.html
│ ├── basic_error_and_crashes.html
│ ├── basic_execution_configuration.html
│ ├── basic_function_interface.html
│ ├── basic_graph_visualization.html
│ ├── basic_import_workflows.html
│ ├── basic_interfaces.html
│ ├── basic_iteration.html
│ ├── basic_joinnodes.html
│ ├── basic_mapnodes.html
│ ├── basic_model_specification_fmri.html
│ ├── basic_nodes.html
│ ├── basic_plugins.html
│ ├── basic_workflow.html
│ ├── example_1stlevel.html
│ ├── example_2ndlevel.html
│ ├── example_normalize.html
│ ├── example_preprocessing.html
│ ├── handson_analysis.html
│ ├── handson_preprocessing.html
│ ├── introduction_dataset.html
│ ├── introduction_docker.html
│ ├── introduction_jupyter-notebook.html
│ ├── introduction_neurodocker.html
│ ├── introduction_nipype.html
│ ├── introduction_python.html
│ ├── introduction_quickstart.html
│ ├── introduction_quickstart_non-neuroimaging.html
│ ├── introduction_showcase.html
│ ├── remark-latest.min.js
│ ├── resources_help.html
│ ├── resources_installation.html
│ ├── resources_python_cheat_sheet.html
│ ├── resources_resources.html
│ ├── wip_nipype_cmd.html
│ ├── wip_resource_sched_profiler.html
│ └── wip_saving_workflows.html
└── static
│ ├── css
│ ├── homepage.css
│ └── mobile.css
│ ├── images
│ ├── bids.png
│ ├── datasink_flow.png
│ ├── example_FSL.png
│ ├── example_Freesurfer.png
│ ├── example_SPM12.png
│ ├── gantt_chart.png
│ ├── iterables.png
│ ├── itersource_1.png
│ ├── itersource_2.png
│ ├── joinnode.png
│ ├── jupyter_function-completion.png
│ ├── jupyter_tab-4-times.png
│ ├── jupyter_tab-once.png
│ ├── jupyter_tab-twice.png
│ ├── logoDocker.png
│ ├── logoNipype_text.png
│ ├── logoNipype_tutorial.png
│ ├── mapnode.png
│ ├── nipype_architecture.png
│ ├── nipype_example_graph.png
│ ├── node_sinlge_node.png
│ ├── node_two_nodes.png
│ ├── python.png
│ ├── sphinx_ext.svg
│ ├── synchronize_1.png
│ └── synchronize_2.png
│ └── template_google_analytics.rst
├── generate.sh
├── index.ipynb
├── notebooks
├── advanced_aws.ipynb
├── advanced_create_interfaces.ipynb
├── advanced_interfaces_caching.ipynb
├── advanced_mipav.ipynb
├── advanced_nipypecli.ipynb
├── advanced_sphinx_ext.ipynb
├── advanced_spmmcr.ipynb
├── basic_data_input.ipynb
├── basic_data_input_bids.ipynb
├── basic_data_output.ipynb
├── basic_debug.ipynb
├── basic_error_and_crashes.ipynb
├── basic_execution_configuration.ipynb
├── basic_function_interface.ipynb
├── basic_graph_visualization.ipynb
├── basic_import_workflows.ipynb
├── basic_interfaces.ipynb
├── basic_iteration.ipynb
├── basic_joinnodes.ipynb
├── basic_mapnodes.ipynb
├── basic_model_specification_fmri.ipynb
├── basic_nodes.ipynb
├── basic_plugins.ipynb
├── basic_workflow.ipynb
├── example_1stlevel.ipynb
├── example_2ndlevel.ipynb
├── example_normalize.ipynb
├── example_preprocessing.ipynb
├── handson_analysis.ipynb
├── handson_preprocessing.ipynb
├── introduction_dataset.ipynb
├── introduction_docker.ipynb
├── introduction_jupyter-notebook.ipynb
├── introduction_neurodocker.ipynb
├── introduction_nipype.html
├── introduction_python.ipynb
├── introduction_quickstart.ipynb
├── introduction_quickstart_non-neuroimaging.ipynb
├── introduction_showcase.ipynb
├── remark-latest.min.js
├── resources_help.ipynb
├── resources_installation.ipynb
├── resources_python_cheat_sheet.ipynb
├── resources_resources.ipynb
├── scripts
│ ├── ANTS_registration.py
│ ├── brainvolume.m
│ └── transform.tfm
├── wip_nipype_cmd.ipynb
├── wip_resource_sched_profiler.ipynb
└── wip_saving_workflows.ipynb
├── static
├── css
│ ├── homepage.css
│ └── mobile.css
├── images
│ ├── bids.png
│ ├── datasink_flow.png
│ ├── example_FSL.png
│ ├── example_Freesurfer.png
│ ├── example_SPM12.png
│ ├── gantt_chart.png
│ ├── iterables.png
│ ├── itersource_1.png
│ ├── itersource_2.png
│ ├── joinnode.png
│ ├── jupyter_function-completion.png
│ ├── jupyter_tab-4-times.png
│ ├── jupyter_tab-once.png
│ ├── jupyter_tab-twice.png
│ ├── logoDocker.png
│ ├── logoNipype_text.png
│ ├── logoNipype_tutorial.png
│ ├── mapnode.png
│ ├── nipype_architecture.png
│ ├── nipype_example_graph.png
│ ├── node_sinlge_node.png
│ ├── node_two_nodes.png
│ ├── python.png
│ ├── sphinx_ext.svg
│ ├── synchronize_1.png
│ └── synchronize_2.png
└── template_google_analytics.rst
├── test_notebooks.py
└── update_pages.sh
/.github/workflows/testing.yml:
--------------------------------------------------------------------------------
1 | name: Build & run notebooks
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request:
7 | branches: [ master ]
8 | workflow_dispatch:
9 | inputs:
10 | nipype_branch:
11 | description: 'Build specific Nipype branch'
12 | required: true
13 | default: 'master'
14 |
15 |
16 | jobs:
17 | build:
18 | runs-on: ubuntu-latest
19 |
20 | steps:
21 | - uses: actions/checkout@v2
22 | - name: generate the Dockerfile from generate.sh
23 | run: |
24 | BRANCH=${{ github.event.inputs.nipype_branch }}
25 | BRANCH=${BRANCH:-"master"}
26 | bash generate.sh $BRANCH
27 | # In this step, this action saves a list of existing images,
28 | # the cache is created without them in the post run.
29 | # It also restores the cache if it exists.
30 | - uses: satackey/action-docker-layer-caching@v0.0.11
31 | with:
32 | key: tutorial-docker-cache-{hash}
33 | restore-keys: |
34 | tutorial-docker-cache-
35 | layer-tutorial-docker-cache-
36 | - name: build the image
37 | run: docker build . --file Dockerfile -t nipype_tutorial:latest
38 |
39 | test_1:
40 | needs: build
41 | runs-on: ubuntu-latest
42 | steps:
43 | - uses: satackey/action-docker-layer-caching@v0.0.11
44 | with:
45 | key: tutorial-docker-cache-{hash}
46 | restore-keys: |
47 | tutorial-docker-cache-
48 | layer-tutorial-docker-cache-
49 | - name: run test 1
50 | run: docker run --rm nipype_tutorial:latest python /home/neuro/nipype_tutorial/test_notebooks.py 1
51 |
52 | test_2:
53 | needs: build
54 | runs-on: ubuntu-latest
55 | steps:
56 | - uses: satackey/action-docker-layer-caching@v0.0.11
57 | with:
58 | key: tutorial-docker-cache-{hash}
59 | restore-keys: |
60 | tutorial-docker-cache-
61 | layer-tutorial-docker-cache-
62 | - name: run test 2
63 | run: docker run --rm nipype_tutorial:latest python /home/neuro/nipype_tutorial/test_notebooks.py 2
64 |
65 | test_3:
66 | needs: build
67 | runs-on: ubuntu-latest
68 | steps:
69 | - uses: satackey/action-docker-layer-caching@v0.0.11
70 | with:
71 | key: tutorial-docker-cache-{hash}
72 | restore-keys: |
73 | tutorial-docker-cache-
74 | layer-tutorial-docker-cache-
75 | - name: run test 3
76 | run: docker run --rm nipype_tutorial:latest python /home/neuro/nipype_tutorial/test_notebooks.py 3
77 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *~
3 | .ipynb_checkpoints/
4 | crash*.pklz
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
6 |
7 | ## Our Standards
8 |
9 | Examples of behavior that contributes to creating a positive environment include:
10 |
11 | * Using welcoming and inclusive language
12 | * Being respectful of differing viewpoints and experiences
13 | * Gracefully accepting constructive criticism
14 | * Focusing on what is best for the community
15 | * Showing empathy towards other community members
16 |
17 | Examples of unacceptable behavior by participants include:
18 |
19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances
20 | * Trolling, insulting/derogatory comments, and personal or political attacks
21 | * Public or private harassment
22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission
23 | * Other conduct which could reasonably be considered inappropriate in a professional setting
24 |
25 | ## Our Responsibilities
26 |
27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
28 |
29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
30 |
31 | ## Scope
32 |
33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
34 |
35 | ## Enforcement
36 |
37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at michaelnotter@hotmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
38 |
39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
40 |
41 | ## Attribution
42 |
43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
44 |
45 | [homepage]: http://contributor-covenant.org
46 | [version]: http://contributor-covenant.org/version/1/4/
47 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to `nipype_tutorial`
2 |
3 | Welcome to the `nipype_tutorial` repository! We're excited you're here and want to contribute.
4 |
5 | These guidelines are designed to make it as easy as possible to get involved.
6 | If you have any questions that aren't discussed below, please let us know by opening an [issue][link_issues]!
7 |
8 | Before you start you'll need to set up a free [GitHub][link_github] account and sign in.
9 | Here are some [instructions][link_signupinstructions] on how to do just that!
10 |
11 | ### Labels
12 |
13 | The current list of labels are [here][link_labels] and include:
14 |
15 | * [][link_helpwanted]
16 | *These issues contain a task that a member of the team has determined we need additional help with.*
17 |
18 | If you feel that you can contribute to one of these issues, we especially encourage you to do so!
19 |
20 | * [][link_bugs]
21 | *These issues point to problems in the project.*
22 |
23 | If you find new a bug, please give as much detail as possible in your issue, including steps to recreate the error.
24 | If you experience the same bug as one already listed, please add any additional information that you have as a comment.
25 |
26 | * [][link_feature]
27 | *These issues are asking for enhancements to be added to the project.*
28 |
29 | Please try to make sure that your requested feature is distinct from any others that have already been requested or implemented.
30 | If you find one that's similar but there are subtle differences please reference the other request in your issue.
31 |
32 | ## Making a change
33 |
34 | We appreciate all contributions to `nipype_tutorial`, but those accepted fastest will follow a workflow similar to the following:
35 |
36 | **1. Comment on an existing issue or open a new issue referencing your addition.**
37 |
38 | This allows other members of the `nipype_tutorial` development team to confirm that you aren't overlapping with work that's currently underway and that everyone is on the same page with the goal of the work you're going to carry out.
39 |
40 | [This blog][link_pushpullblog] is a nice explanation of why putting this work in up front is so useful to everyone involved.
41 |
42 | **2. [Fork][link_fork] the [`nipype_tutorial` repository][link_nipype_tutorial] to your profile.**
43 |
44 | This is now your own unique copy of `nipype_tutorial`.
45 | Changes here won't effect anyone else's work, so it's a safe space to explore edits to the code!
46 |
47 | Make sure to [keep your fork up to date][link_updateupstreamwiki] with the original repository.
48 |
49 | **3. Make the changes you've discussed.**
50 |
51 | Try to keep the changes focused.
52 | If you feel tempted to "branch out" then please make a [new branch][link_branches].
53 |
54 | **4. Submit a [pull request][link_pullrequest].**
55 |
56 | A member of the development team will review your changes to confirm that they can be merged into the main codebase.
57 |
58 | ## Recognizing contributions
59 |
60 | We welcome and recognize all contributions from documentation to testing to code development.
61 | You can see a list of our current contributors in the [contributors tab][link_contributors].
62 |
63 | ## Thank you!
64 |
65 | You're awesome. :wave::smiley:
66 |
67 |
68 |
69 | *— Based on contributing guidelines from the [STEMMRoleModels][link_stemmrolemodels] project.*
70 |
71 | [link_github]: https://github.com/
72 | [link_nipype_tutorial]: https://github.com/rmarkello/nipype_tutorial
73 | [link_signupinstructions]: https://help.github.com/articles/signing-up-for-a-new-github-account
74 | [link_react]: https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comments
75 | [link_issues]: https://github.com/rmarkello/nipype_tutorial/issues
76 | [link_labels]: https://github.com/rmarkello/nipype_tutorial/labels
77 | [link_discussingissues]: https://help.github.com/articles/discussing-projects-in-issues-and-pull-requests
78 |
79 | [link_bugs]: https://github.com/rmarkello/nipype_tutorial/labels/bug
80 | [link_helpwanted]: https://github.com/rmarkello/nipype_tutorial/labels/help%20wanted
81 | [link_feature]: https://github.com/rmarkello/nipype_tutorial/labels/enhancement
82 |
83 | [link_pullrequest]: https://help.github.com/articles/creating-a-pull-request/
84 | [link_fork]: https://help.github.com/articles/fork-a-repo/
85 | [link_pushpullblog]: https://www.igvita.com/2011/12/19/dont-push-your-pull-requests/
86 | [link_branches]: https://help.github.com/articles/creating-and-deleting-branches-within-your-repository/
87 | [link_updateupstreamwiki]: https://help.github.com/articles/syncing-a-fork/
88 | [link_contributors]: https://github.com/rmarkello/nipype_tutorial/graphs/contributors
89 | [link_stemmrolemodels]: https://github.com/KirstieJane/STEMMRoleModels
90 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2017, Michael Notter and the nipype_tutorial developers
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Nipype Tutorial Notebooks
2 | [](https://github.com/miykael/nipype_tutorial/actions?query=workflow%3ACI)
3 | [](https://github.com/miykael/nipype_tutorial/issues/)
4 | [](https://github.com/miykael/nipype_tutorial/pulls/)
5 | [](https://GitHub.com/miykael/nipype_tutorial/graphs/contributors/)
6 | [](https://github.com/miykael/nipype_tutorial/commits/master)
7 | [](https://github.com/miykael/nipype_tutorial/archive/master.zip)
8 | [](https://hub.docker.com/r/miykael/nipype_tutorial/)
9 | [](http://hits.dwyl.io/miykael/nipype_tutorial)
10 |
11 | This is the Nipype Tutorial in Jupyter Notebook format. You can access the tutorial in two ways:
12 |
13 | 1. [Nipype Tutorial Homepage](https://miykael.github.io/nipype_tutorial/): This website contains a static, read-only version of all the notebooks.
14 | 2. [Nipype Tutorial Docker Image](https://miykael.github.io/nipype_tutorial/notebooks/introduction_docker.html): This guide explains how to use Docker to run the notebooks interactively on your own computer. The nipype tutorial docker image is the best interactive way to learn Nipype.
15 |
16 |
17 | # Feedback, Help & Support
18 |
19 | If you want to help with this tutorial or have any questions, feel free to fork the repo of the [Notebooks](https://github.com/miykael/nipype_tutorial) or interact with other contributors on the slack channel [brainhack.slack.com/messages/nipype/](https://brainhack.slack.com/messages/nipype/). If you have any questions or found a problem, open a new [issue on github](https://github.com/miykael/nipype_tutorial/issues).
20 |
21 |
22 | # Thanks and Acknowledgment
23 |
24 | A huge thanks to [Michael Waskom](https://github.com/mwaskom), [Oscar Esteban](https://github.com/oesteban), [Chris Gorgolewski](https://github.com/chrisfilo) and [Satrajit Ghosh](https://github.com/satra) for their input to this tutorial! And a huge thanks to [Dorota Jarecka](https://github.com/djarecka/) who updated this tutorial to Python 3 and is helping me with keeping this tutorial updated and running!
25 |
--------------------------------------------------------------------------------
/casts/cast_ipython.rc:
--------------------------------------------------------------------------------
1 | # This file contains ipython configuration variables to be used for generating
2 | # asciinema demos to guarantee consistent appearance.
3 |
4 | # make a fake temporary home dir and go into it
5 | SCREENCAST_HOME=~/demo
6 | if [ ! -e "$SCREENCAST_HOME" ]; then
7 | mkdir -p ${SCREENCAST_HOME} || {
8 | echo "FAILED to create $SCREENCAST_HOME" >&2
9 | exit 1; # we need demo directory!
10 | }
11 | fi
12 | cd $SCREENCAST_HOME
13 | ipython
14 |
15 | # cleanup at the end
16 | trap "cd ; rm -rf ~/demo > /dev/null 2>&1" EXIT
17 |
--------------------------------------------------------------------------------
/casts/cast_live_python:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | set -u -e
4 |
5 | test ! -e $1 && echo "input file does not exist" && exit 1
6 | title="$(echo $(basename $1) | sed -e 's/.sh$//')"
7 | bashrc_file="$(dirname $0)/cast_ipython.rc"
8 |
9 | # shortcut for making xdotool use the right window
10 | function xdt() {
11 | winid=$1
12 | shift
13 | xdotool windowactivate --sync $winid
14 | if [ "$#" -gt 0 ]; then
15 | xdotool "$@"
16 | fi
17 | }
18 |
19 | # make sure the target xterm is up and running
20 | width=106
21 | height=29
22 | fs=15
23 | text_width=$(($width - 8))
24 |
25 | geometry=${width}x${height}
26 | this_window=$(xdotool getwindowfocus)
27 |
28 | # For consistent appearance
29 | xterm +sb -fa Hermit -fs $fs -bg white -fg black -geometry $geometry -title Screencast-xterm -e "bash --rcfile cast_ipython.rc" &
30 | xterm_pid=$!
31 | sleep 2
32 |
33 | xterm_window=$(xdotool search --pid $xterm_pid)
34 |
35 | # By default should stay in the xterm window, so when we need to deal with
36 | # current one (waiting etc), then switch
37 | function wait () {
38 | xdt $this_window
39 | read -p "$@" in
40 | echo "$in"
41 | xdt $xterm_window
42 | }
43 | function instruct () {
44 | xdt $this_window
45 | wait "$@"
46 | }
47 | function type () {
48 | xdt $xterm_window type --clearmodifiers --delay 40 "$1"
49 | }
50 | function key () {
51 | xdt $xterm_window key --clearmodifiers $*
52 | }
53 | function sleep () {
54 | xdotool sleep $1
55 | }
56 | function execute () {
57 | xdt $xterm_window sleep 0.5 key Return
58 | sleep 0.2
59 | }
60 | function say()
61 | {
62 | ac=$(instruct "SAY: $1")
63 | if [ "$ac" != "s" ] ; then
64 | echo "skipping"
65 | return
66 | fi
67 | type "$(printf "#\n# $1" | fmt -w ${text_width} --prefix '# ')"
68 | key Return
69 | }
70 | function show () {
71 | xdt $xterm_window type --clearmodifiers --delay 10 "$(printf "\n$1" | sed -e 's/^/# /g')"
72 | sleep 0.1
73 | key Return
74 | }
75 | function run () {
76 | help="Press Enter to type, s to skip this action"
77 | ac=$(instruct "EXEC: $1. $help")
78 | if [ "$ac" = "s" ]; then
79 | echo "skipping"
80 | return
81 | fi
82 | type "$1"
83 | ac=$(instruct "EXEC: $1. $help")
84 | if [ "$ac" = "s" ]; then
85 | echo "skipping"
86 | return
87 | fi
88 | execute
89 | }
90 | function run_expfail () {
91 | # TODO we could announce or visualize the expected failure
92 | run "$1"
93 | }
94 |
95 | xdt $xterm_window sleep 0.1
96 |
97 | echo "xterm PID $xterm_pid (window $xterm_window) this window $this_window"
98 |
99 | # now get the process tree attached to the terminal so we can
100 | # figure out when it is idle, and when it is not
101 | # XXX must happen after asciinema is running
102 | xterm_pstree="$(pstree -p -A $xterm_pid)"
103 |
104 | . $1
105 |
106 | sleep 1
107 |
108 | show "$(cowsay "Demo was using $(datalad --version 2>&1 | head -n1). Discover more at http://datalad.org")"
109 |
110 | # key Control_L+d
111 |
112 | echo "INSTRUCTION: Press Ctrl-D or run exit to close the terminal"
113 |
--------------------------------------------------------------------------------
/casts/nipype_tutorial_showcase.sh:
--------------------------------------------------------------------------------
1 | say "Nipype Showcase"
2 | show "Import nipype building blocks"
3 | run "from nipype import Node, Workflow"
4 |
5 | say "Import relevant interfaces"
6 | show "Import relevant interfaces"
7 | run "from nipype.interfaces.fsl import SliceTimer, MCFLIRT, Smooth"
8 |
9 | say "Create SliceTime correction node"
10 | show "Create SliceTime correction node"
11 | run "slicetimer = Node(SliceTimer(index_dir=False,
12 | interleaved=True,
13 | time_repetition=2.5),
14 | name='slicetimer')
15 | "
16 |
17 | say "Create Motion correction node"
18 | show "Create Motion correction node"
19 | run "mcflirt = Node(MCFLIRT(mean_vol=True,
20 | save_plots=True),
21 | name='mcflirt')
22 | "
23 |
24 | say "Create Smoothing node"
25 | show "Create Smoothing node"
26 | run "smooth = Node(Smooth(fwhm=4), name='smooth')"
27 |
28 | say "Create Workflow"
29 | show "Create Workflow"
30 | run "preproc01 = Workflow(name='preproc_flow', base_dir='.')"
31 |
32 | say "Connect nodes within the workflow"
33 | show "Connect nodes within the workflow"
34 | run "preproc01.connect([(slicetimer, mcflirt, [('slice_time_corrected_file', 'in_file')]),
35 | (mcflirt, smooth, [('out_file', 'in_file')])
36 | ])
37 | "
38 |
39 | say "Create a visualization of the workflow"
40 | show "Create a visualization of the workflow"
41 | run "preproc01.write_graph(graph2use='orig')"
42 |
43 | say "Visualize the figure"
44 | show "Visualize the figure"
45 | run "!eog preproc_flow/graph_detailed.png
46 | "
47 |
48 | say "Feed some input to the workflow"
49 | show "Feed some input to the workflow"
50 | run "slicetimer.inputs.in_file = 'path/to/your/func.nii.gz'"
51 |
52 | say "Run the Workflow and stop the time"
53 | show "Run the Workflow and stop the time"
54 | run "%time preproc01.run('MultiProc', plugin_args={'n_procs': 5})"
55 |
56 | say "Investigate the output"
57 | show "Investigate the output"
58 | run "!tree preproc_flow -I '*js|*json|*pklz|_report|*.dot|*html'"
59 |
60 | say "Change the size of the smoothing kernel"
61 | show "Change the size of the smoothing kernel"
62 | run "smooth.inputs.fwhm = 2"
63 |
64 | say "Rerun the workflow"
65 | show "Rerun the workflow"
66 | run "%time preproc01.run('MultiProc', plugin_args={'n_procs': 5})"
67 |
68 | say "Create 4 additional copies of the workflow"
69 | show "Create 4 additional copies of the workflow"
70 | run "preproc02 = preproc01.clone('preproc02')
71 | preproc03 = preproc01.clone('preproc03')
72 | preproc04 = preproc01.clone('preproc04')
73 | preproc05 = preproc01.clone('preproc05')
74 | "
75 |
76 | say "Create a new workflow - metaflow"
77 | show "Create a new workflow - metaflow"
78 | run "metaflow = Workflow(name='metaflow', base_dir='.')"
79 |
80 | say "Add the 5 workflows to this metaflow"
81 | show "Add the 5 workflows to this metaflow"
82 | run "metaflow.add_nodes([preproc01, preproc02, preproc03,
83 | preproc04, preproc05])
84 | "
85 |
86 | say "Visualize the workflow"
87 | show "Visualize the workflow"
88 | run "metaflow.write_graph(graph2use='flat')
89 | !eog metaflow/graph_detailed.png
90 | "
91 |
92 | say "Run this metaflow in parallel"
93 | show "Run this metaflow in parallel"
94 | run "%time metaflow.run('MultiProc', plugin_args={'n_procs': 5})"
95 |
96 | say "Investigate the output"
97 | show "Investigate the output"
98 | run "!tree metaflow -I '*js|*json|*pklz|_report|*.dot|*html'"
99 |
100 | say "The End."
101 | show "The End."
102 |
--------------------------------------------------------------------------------
/docs/static/css/homepage.css:
--------------------------------------------------------------------------------
1 | html,body {
2 | height: 100%;
3 | }
4 |
5 | body {
6 | overflow-y: scroll;
7 | }
8 |
9 | body {
10 | font-family: 'Helvetica','Corbel',sans-serif;
11 | font-size: 14px;
12 | margin: 0;
13 | color: #444;
14 | line-height: 1.4;
15 | min-width: 1000px;
16 | }
17 |
18 | article {
19 | padding: 0 20px;
20 | position: relative;
21 | }
22 |
23 | h1,h2,h3,h4,h5 {
24 | color: #111;
25 | font-family: inherit;
26 | font-weight: bold;
27 | }
28 |
29 | h1 {
30 | font-size: 25px;
31 | margin-bottom: 22px;
32 | }
33 |
34 | h2 {
35 | font-size: 20px;
36 | margin-bottom: 22px;
37 | }
38 |
39 | h3 {
40 | font-size: 18px;
41 | margin-bottom: 22px;
42 | }
43 |
44 | h4 {
45 | font-size: 15px;
46 | margin-bottom: 22px;
47 | }
48 |
49 | h5 {
50 | font-size: 15px;
51 | margin-bottom: -22px;
52 | }
53 |
54 | p {
55 | font-size: 14px;
56 | margin: 22px 0;
57 | }
58 |
59 | b,strong {
60 | font-weight: bold;
61 | }
62 |
63 | em,cite {
64 | font-style: italic;
65 | }
66 |
67 | hr {
68 | background: #CCC;
69 | border: 0;
70 | box-shadow: 0 2px 2px rgba(0,0,0,0.075);
71 | clear: both;
72 | color: #CCC;
73 | display: block;
74 | height: 1px;
75 | margin: 18px 0 36px 0;
76 | padding: 0;
77 | width: 100%;
78 | }
79 |
80 | hr.thin {
81 | margin-bottom: 18px;
82 | margin-left: auto;
83 | margin-right: auto;
84 | opacity: .40;
85 | filter: alpha(opacity=40);
86 | width: 50%;
87 | }
88 |
89 | a:link,a:visited,header a:visited,footer a:visited,.visited-no-recolor a:visited,a.visited-no-recolor:visited {
90 | color: #005987;
91 | }
92 |
93 | a:link.no-underline,a:visited.no-underline,header a:visited.no-underline,footer a:visited.no-underline,.visited-no-recolor a:visited.no-underline,a.visited-no-recolor:visited.no-underline {
94 | text-decoration: none;
95 | }
96 |
97 | a:visited {
98 | color: #7d00ad;
99 | }
100 |
101 | a:link:hover,a:link:focus,a:visited:hover,a:visited:focus {
102 | color: #707070;
103 | }
104 |
105 | a:link:hover.no-underline,a:link:focus.no-underline,a:visited:hover.no-underline,a:visited:focus.no-underline {
106 | text-decoration: none;
107 | }
108 |
109 | form {
110 | display: inline;
111 | }
112 |
113 | .fixed-width {
114 | font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif;
115 | }
116 |
117 | ul.styled-list {
118 | margin-left: 16px;
119 | }
120 |
121 | ul.styled-list {
122 | list-style: disc;
123 | }
124 |
125 | ol.styled-list {
126 | list-style: decimal;
127 | }
128 |
129 | #homepage p {
130 | opacity: .8;
131 | }
132 |
133 | .section-separator {
134 | margin: 50px auto;
135 | position: relative;
136 | }
137 |
138 | .section-separator h2 {
139 | color: #898989;
140 | text-align: center;
141 | width: auto;
142 | font-size: 140%;
143 | margin: 0;
144 | position: relative;
145 | z-index: 1;
146 | }
147 |
148 | #homepage .section-separator hr {
149 | position: relative;
150 | margin: 0;
151 | top: -1em;
152 | }
153 |
154 | .library-section .section-separator {
155 | margin: 30px auto;
156 | }
157 |
158 | .library-section .library-section-separator {
159 | margin-bottom: 5px;
160 | }
161 |
162 | .library-section li {
163 | margin-bottom: -0.5em;
164 | }
165 |
166 | .domain-table-container .subject-link {
167 | background: #fdfdfd;
168 | border-bottom: 1px solid #ddd;
169 | border-right: 1px solid #ddd;
170 | box-sizing: border-box;
171 | display: block;
172 | font-family: inherit;
173 | padding: 5px 10px;
174 | text-decoration: none;
175 | }
176 |
177 | .domain-table-container .subject-link:hover {
178 | background: #314453;
179 | color: #fff;
180 | text-decoration: none;
181 | }
182 |
183 | .domain-header {
184 | color: #314453;
185 | margin-bottom: 0;
186 | padding-left: 10px;
187 | padding-top: 25px;
188 | }
189 |
190 | .domain-table-container {
191 | border-top: 2px solid #314453;
192 | border-left: 1px solid #ddd;
193 | margin-top: 3px;
194 | }
195 |
196 | .domain-header.color01 .domain-title {
197 | color: hsl(0, 60%, 50%);
198 | margin-left: -10px;
199 | padding: 3px 10px;
200 | }
201 |
202 | .domain-table-container.color01 {
203 | border-top: 2px solid hsl(0, 60%, 50%);
204 | }
205 |
206 | .domain-table-container.color01 .subject-link:hover {
207 | background: hsl(0, 60%, 50%);
208 | }
209 |
210 | .domain-header.color02 .domain-title {
211 | color: hsl(20, 60%, 50%);
212 | margin-left: -10px;
213 | padding: 3px 10px;
214 | }
215 |
216 | .domain-table-container.color02 {
217 | border-top: 2px solid hsl(20, 60%, 50%);
218 | }
219 |
220 | .domain-table-container.color02 .subject-link:hover {
221 | background: hsl(20, 60%, 50%);
222 | }
223 |
224 | .domain-header.color03 .domain-title {
225 | color: hsl(50, 60%, 50%);
226 | margin-left: -10px;
227 | padding: 3px 10px;
228 | }
229 |
230 | .domain-table-container.color03 {
231 | border-top: 2px solid hsl(50, 60%, 50%);
232 | }
233 |
234 | .domain-table-container.color03 .subject-link:hover {
235 | background: hsl(50, 60%, 50%);
236 | }
237 |
238 | .domain-header.color04 .domain-title {
239 | color: hsl(150, 60%, 50%);
240 | margin-left: -10px;
241 | padding: 3px 10px;
242 | }
243 |
244 | .domain-table-container.color04 {
245 | border-top: 2px solid hsl(150, 60%, 50%);
246 | }
247 |
248 | .domain-table-container.color04 .subject-link:hover {
249 | background: hsl(150, 60%, 50%);
250 | }
251 |
252 | .domain-header.color05 .domain-title {
253 | color: hsl(205, 60%, 50%);
254 | margin-left: -10px;
255 | padding: 3px 10px;
256 | }
257 |
258 | .domain-table-container.color05 {
259 | border-top: 2px solid hsl(205, 60%, 50%);
260 | }
261 |
262 | .domain-table-container.color05 .subject-link:hover {
263 | background: hsl(205, 60%, 50%);
264 | }
265 |
266 | .domain-header.color06 .domain-title {
267 | color: hsl(240, 60%, 50%);
268 | margin-left: -10px;
269 | padding: 3px 10px;
270 | }
271 |
272 | .domain-table-container.color06 {
273 | border-top: 2px solid hsl(240, 60%, 50%);
274 | }
275 |
276 | .domain-table-container.color06 .subject-link:hover {
277 | background: hsl(240, 60%, 50%);
278 | }
279 |
280 | .domain-header.color07 .domain-title {
281 | color: hsl(280, 60%, 50%);
282 | margin-left: -10px;
283 | padding: 3px 10px;
284 | }
285 |
286 | .domain-table-container.color07 {
287 | border-top: 2px solid hsl(280, 60%, 50%);
288 | }
289 |
290 | .domain-table-container.color07 .subject-link:hover {
291 | background: hsl(280, 60%, 50%);
292 | }
293 |
--------------------------------------------------------------------------------
/docs/static/images/bids.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/bids.png
--------------------------------------------------------------------------------
/docs/static/images/datasink_flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/datasink_flow.png
--------------------------------------------------------------------------------
/docs/static/images/example_FSL.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/example_FSL.png
--------------------------------------------------------------------------------
/docs/static/images/example_Freesurfer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/example_Freesurfer.png
--------------------------------------------------------------------------------
/docs/static/images/example_SPM12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/example_SPM12.png
--------------------------------------------------------------------------------
/docs/static/images/gantt_chart.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/gantt_chart.png
--------------------------------------------------------------------------------
/docs/static/images/iterables.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/iterables.png
--------------------------------------------------------------------------------
/docs/static/images/itersource_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/itersource_1.png
--------------------------------------------------------------------------------
/docs/static/images/itersource_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/itersource_2.png
--------------------------------------------------------------------------------
/docs/static/images/joinnode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/joinnode.png
--------------------------------------------------------------------------------
/docs/static/images/jupyter_function-completion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/jupyter_function-completion.png
--------------------------------------------------------------------------------
/docs/static/images/jupyter_tab-4-times.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/jupyter_tab-4-times.png
--------------------------------------------------------------------------------
/docs/static/images/jupyter_tab-once.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/jupyter_tab-once.png
--------------------------------------------------------------------------------
/docs/static/images/jupyter_tab-twice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/jupyter_tab-twice.png
--------------------------------------------------------------------------------
/docs/static/images/logoDocker.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/logoDocker.png
--------------------------------------------------------------------------------
/docs/static/images/logoNipype_text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/logoNipype_text.png
--------------------------------------------------------------------------------
/docs/static/images/logoNipype_tutorial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/logoNipype_tutorial.png
--------------------------------------------------------------------------------
/docs/static/images/mapnode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/mapnode.png
--------------------------------------------------------------------------------
/docs/static/images/nipype_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/nipype_architecture.png
--------------------------------------------------------------------------------
/docs/static/images/nipype_example_graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/nipype_example_graph.png
--------------------------------------------------------------------------------
/docs/static/images/node_sinlge_node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/node_sinlge_node.png
--------------------------------------------------------------------------------
/docs/static/images/node_two_nodes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/node_two_nodes.png
--------------------------------------------------------------------------------
/docs/static/images/python.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/python.png
--------------------------------------------------------------------------------
/docs/static/images/synchronize_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/synchronize_1.png
--------------------------------------------------------------------------------
/docs/static/images/synchronize_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/docs/static/images/synchronize_2.png
--------------------------------------------------------------------------------
/docs/static/template_google_analytics.rst:
--------------------------------------------------------------------------------
1 |
11 |
12 |
--------------------------------------------------------------------------------
/generate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | NIPYPE_BRANCH=${1:-"master"}
6 | case $NIPYPE_BRANCH in
7 | master)
8 | NIPYPE_URL="https://github.com/nipy/nipype/tarball/master"
9 | ;;
10 | *)
11 | NIPYPE_URL="git+https://github.com/nipy/nipype.git@${NIPYPE_BRANCH}"
12 | ;;
13 | esac
14 |
15 | # Generate Dockerfile
16 | generate_docker() {
17 | docker run --rm kaczmarj/neurodocker:master generate docker \
18 | --base neurodebian:stretch-non-free \
19 | --pkg-manager apt \
20 | --install convert3d ants fsl gcc g++ graphviz tree \
21 | git-annex-standalone vim emacs-nox nano less ncdu \
22 | tig git-annex-remote-rclone octave netbase \
23 | --add-to-entrypoint "source /etc/fsl/fsl.sh" \
24 | --spm12 version=r7219 \
25 | --user=neuro \
26 | --workdir /home/neuro \
27 | --miniconda \
28 | conda_install="python=3.8 pytest jupyter jupyterlab jupyter_contrib_nbextensions
29 | traits pandas matplotlib scikit-learn scikit-image seaborn nbformat nb_conda" \
30 | pip_install="$NIPYPE_URL
31 | pybids==0.13.1
32 | nilearn datalad[full] nipy duecredit nbval niflow-nipype1-workflows" \
33 | create_env="neuro" \
34 | activate=True \
35 | --env LD_LIBRARY_PATH="/opt/miniconda-latest/envs/neuro:$LD_LIBRARY_PATH" \
36 | --run-bash "source activate neuro && jupyter nbextension enable exercise2/main && jupyter nbextension enable spellchecker/main" \
37 | --user=root \
38 | --run 'mkdir /data && chmod 777 /data && chmod a+s /data' \
39 | --run 'mkdir /output && chmod 777 /output && chmod a+s /output' \
40 | --user=neuro \
41 | --run 'printf "[user]\n\tname = miykael\n\temail = michaelnotter@hotmail.com\n" > ~/.gitconfig' \
42 | --run-bash 'source activate neuro && cd /data && datalad install -r ///workshops/nih-2017/ds000114 && cd ds000114 && datalad update -r && datalad get -r sub-01/ses-test/anat sub-01/ses-test/func/*fingerfootlips*' \
43 | --run 'curl -L https://files.osf.io/v1/resources/fvuh8/providers/osfstorage/580705089ad5a101f17944a9 -o /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c.tar.gz && tar xf /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c.tar.gz -C /data/ds000114/derivatives/fmriprep/. && rm /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c.tar.gz && find /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c -type f -not -name ?mm_T1.nii.gz -not -name ?mm_brainmask.nii.gz -not -name ?mm_tpm*.nii.gz -delete' \
44 | --copy . "/home/neuro/nipype_tutorial" \
45 | --user=root \
46 | --run 'chown -R neuro /home/neuro/nipype_tutorial' \
47 | --run 'rm -rf /opt/conda/pkgs/*' \
48 | --user=neuro \
49 | --run 'mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > ~/.jupyter/jupyter_notebook_config.py' \
50 | --workdir /home/neuro/nipype_tutorial \
51 | --cmd jupyter-notebook
52 | }
53 |
54 | # Generate Singularity file (does not include last --cmd option)
55 | generate_singularity() {
56 | docker run --rm kaczmarj/neurodocker:master generate singularity \
57 | --base neurodebian:stretch-non-free \
58 | --pkg-manager apt \
59 | --install convert3d ants fsl gcc g++ graphviz tree \
60 | git-annex-standalone vim emacs-nox nano less ncdu \
61 | tig git-annex-remote-rclone octave netbase \
62 | --add-to-entrypoint "source /etc/fsl/fsl.sh" \
63 | --spm12 version=r7219 \
64 | --user=neuro \
65 | --workdir /home/neuro \
66 | --miniconda \
67 | conda_install="python=3.8 pytest jupyter jupyterlab jupyter_contrib_nbextensions
68 | traits pandas matplotlib scikit-learn scikit-image seaborn nbformat nb_conda" \
69 | pip_install="$NIPYPE_URL
70 | pybids==0.13.1
71 | nilearn datalad[full] nipy duecredit nbval niflow-nipype1-workflows" \
72 | create_env="neuro" \
73 | activate=True \
74 | --env LD_LIBRARY_PATH="/opt/miniconda-latest/envs/neuro:$LD_LIBRARY_PATH" \
75 | --run-bash "source activate neuro && jupyter nbextension enable exercise2/main && jupyter nbextension enable spellchecker/main" \
76 | --user=root \
77 | --run 'mkdir /data && chmod 777 /data && chmod a+s /data' \
78 | --run 'mkdir /output && chmod 777 /output && chmod a+s /output' \
79 | --user=neuro \
80 | --run 'printf "[user]\n\tname = miykael\n\temail = michaelnotter@hotmail.com\n" > ~/.gitconfig' \
81 | --run-bash 'source activate neuro && cd /data && datalad install -r ///workshops/nih-2017/ds000114 && cd ds000114 && datalad update -r && datalad get -r sub-01/ses-test/anat sub-01/ses-test/func/*fingerfootlips*' \
82 | --run 'curl -L https://files.osf.io/v1/resources/fvuh8/providers/osfstorage/580705089ad5a101f17944a9 -o /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c.tar.gz && tar xf /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c.tar.gz -C /data/ds000114/derivatives/fmriprep/. && rm /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c.tar.gz && find /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c -type f -not -name ?mm_T1.nii.gz -not -name ?mm_brainmask.nii.gz -not -name ?mm_tpm*.nii.gz -delete' \
83 | --copy . "/home/neuro/nipype_tutorial" \
84 | --user=root \
85 | --run 'chown -R neuro /home/neuro/nipype_tutorial' \
86 | --run 'rm -rf /opt/conda/pkgs/*' \
87 | --user=neuro \
88 | --run 'mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > ~/.jupyter/jupyter_notebook_config.py' \
89 | --workdir /home/neuro/nipype_tutorial
90 | }
91 |
92 | generate_docker > Dockerfile
93 | generate_singularity > Singularity
94 |
--------------------------------------------------------------------------------
/notebooks/advanced_aws.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Using Nipype with Amazon Web Services (AWS)\n",
8 | "\n",
9 | "Several groups have been successfully using Nipype on AWS. This procedure\n",
10 | "involves setting a temporary cluster using StarCluster and potentially\n",
11 | "transferring files to/from S3. The latter is supported by Nipype through\n",
12 | "`DataSink` and `S3DataGrabber`."
13 | ]
14 | },
15 | {
16 | "cell_type": "markdown",
17 | "metadata": {},
18 | "source": [
19 | "## Using DataSink with S3\n",
20 | "\n",
21 | "The `DataSink` class now supports sending output data directly to an AWS S3\n",
22 | "bucket. It does this through the introduction of several input attributes to the\n",
23 | "`DataSink` interface and by parsing the `base_directory` attribute. This class\n",
24 | "uses the [boto3](https://boto3.readthedocs.org/en/latest/) and\n",
25 | "[botocore](https://botocore.readthedocs.org/en/latest/) Python packages to\n",
26 | "interact with AWS. To configure the `DataSink` to write data to S3, the user must\n",
27 | "set the ``base_directory`` property to an S3-style filepath.\n",
28 | "\n",
29 | "For example:"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": null,
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "from nipype.interfaces.io import DataSink\n",
39 | "ds = DataSink()\n",
40 | "ds.inputs.base_directory = 's3://mybucket/path/to/output/dir'"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "With the `\"s3://\"` prefix in the path, the `DataSink` knows that the output\n",
48 | "directory to send files is on S3 in the bucket `\"mybucket\"`. `\"path/to/output/dir\"`\n",
49 | "is the relative directory path within the bucket `\"mybucket\"` where output data\n",
50 | "will be uploaded to (***Note***: if the relative path specified contains folders that\n",
51 | "don’t exist in the bucket, the `DataSink` will create them). The `DataSink` treats\n",
52 | "the S3 base directory exactly as it would a local directory, maintaining support\n",
53 | "for containers, substitutions, subfolders, `\".\"` notation, etc. to route output\n",
54 | "data appropriately.\n",
55 | "\n",
56 | "There are four new attributes introduced with S3-compatibility: ``creds_path``,\n",
57 | "``encrypt_bucket_keys``, ``local_copy``, and ``bucket``."
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "metadata": {},
64 | "outputs": [],
65 | "source": [
66 | "ds.inputs.creds_path = '/home/neuro/aws_creds/credentials.csv'\n",
67 | "ds.inputs.encrypt_bucket_keys = True\n",
68 | "ds.local_copy = '/home/neuro/workflow_outputs/local_backup'"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "``creds_path`` is a file path where the user's AWS credentials file (typically\n",
76 | "a csv) is stored. This credentials file should contain the AWS access key id and\n",
77 | "secret access key and should be formatted as one of the following (these formats\n",
78 | "are how Amazon provides the credentials file by default when first downloaded).\n",
79 | "\n",
80 | "Root-account user:\n",
81 | "\n",
82 | "\tAWSAccessKeyID=ABCDEFGHIJKLMNOP\n",
83 | "\tAWSSecretKey=zyx123wvu456/ABC890+gHiJk\n",
84 | "\n",
85 | "IAM-user:\n",
86 | "\n",
87 | "\tUser Name,Access Key Id,Secret Access Key\n",
88 | "\t\"username\",ABCDEFGHIJKLMNOP,zyx123wvu456/ABC890+gHiJk\n",
89 | "\n",
90 | "The ``creds_path`` is necessary when writing files to a bucket that has\n",
91 | "restricted access (almost no buckets are publicly writable). If ``creds_path``\n",
92 | "is not specified, the DataSink will check the ``AWS_ACCESS_KEY_ID`` and\n",
93 | "``AWS_SECRET_ACCESS_KEY`` environment variables and use those values for bucket\n",
94 | "access.\n",
95 | "\n",
96 | "``encrypt_bucket_keys`` is a boolean flag that indicates whether to encrypt the\n",
97 | "output data on S3, using server-side AES-256 encryption. This is useful if the\n",
98 | "data being output is sensitive and one desires an extra layer of security on the\n",
99 | "data. By default, this is turned off.\n",
100 | "\n",
101 | "``local_copy`` is a string of the filepath where local copies of the output data\n",
102 | "are stored in addition to those sent to S3. This is useful if one wants to keep\n",
103 | "a backup version of the data stored on their local computer. By default, this is\n",
104 | "turned off.\n",
105 | "\n",
106 | "``bucket`` is a boto3 Bucket object that the user can use to overwrite the\n",
107 | "bucket specified in their ``base_directory``. This can be useful if one has to\n",
108 | "manually create a bucket instance on their own using special credentials (or\n",
109 | "using a mock server like [fakes3](https://github.com/jubos/fake-s3)). This is\n",
110 | "typically used for developers unit-testing the DataSink class. Most users do not\n",
111 | "need to use this attribute for actual workflows. This is an optional argument.\n",
112 | "\n",
113 | "Finally, the user needs only to specify the input attributes for any incoming\n",
114 | "data to the node, and the outputs will be written to their S3 bucket."
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "metadata": {},
120 | "source": [
121 | "```python\n",
122 | "workflow.connect(inputnode, 'subject_id', ds, 'container')\n",
123 | "workflow.connect(realigner, 'realigned_files', ds, 'motion')\n",
124 | "```"
125 | ]
126 | },
127 | {
128 | "cell_type": "markdown",
129 | "metadata": {},
130 | "source": [
131 | "So, for example, outputs for `sub001`’s `realigned_file1.nii.gz` will be in:\n",
132 | "\n",
133 | " s3://mybucket/path/to/output/dir/sub001/motion/realigned_file1.nii.gz"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "metadata": {},
139 | "source": [
140 | "## Using S3DataGrabber\n",
141 | "Coming soon..."
142 | ]
143 | }
144 | ],
145 | "metadata": {
146 | "kernelspec": {
147 | "display_name": "Python [default]",
148 | "language": "python",
149 | "name": "python3"
150 | },
151 | "language_info": {
152 | "codemirror_mode": {
153 | "name": "ipython",
154 | "version": 3
155 | },
156 | "file_extension": ".py",
157 | "mimetype": "text/x-python",
158 | "name": "python",
159 | "nbconvert_exporter": "python",
160 | "pygments_lexer": "ipython3",
161 | "version": "3.6.5"
162 | }
163 | },
164 | "nbformat": 4,
165 | "nbformat_minor": 2
166 | }
167 |
--------------------------------------------------------------------------------
/notebooks/advanced_interfaces_caching.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Interface caching\n",
8 | "\n",
9 | "This section details the interface-caching mechanism, exposed in the `nipype.caching` module."
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "## Interface caching: why and how\n",
17 | "\n",
18 | "* `Pipelines` (also called `workflows`) specify processing by an execution graph. This is useful because it opens the door to dependency checking and enables\n",
19 | " - to minimize recomputations, \n",
20 | " - to have the execution engine transparently deal with intermediate file manipulations.\n",
21 | "\n",
22 | " They, however, do not blend in well with arbitrary Python code, as they must rely on their own execution engine.\n",
23 | "\n",
24 | "\n",
25 | "* `Interfaces` give fine control of the execution of each step with a thin wrapper on the underlying software. As a result that can easily be inserted in Python code. \n",
26 | "\n",
27 | " However, they force the user to specify explicit input and output file names and cannot do any caching.\n",
28 | "\n",
29 | "This is why nipype exposes an intermediate mechanism, `caching` that provides transparent output file management and caching within imperative Python code rather than a workflow."
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "metadata": {},
35 | "source": [
36 | "## A big picture view: using the [`Memory`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#memory) object\n",
37 | "\n",
38 | "nipype caching relies on the [`Memory`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#memory) class: it creates an\n",
39 | "execution context that is bound to a disk cache:"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "from nipype.caching import Memory\n",
49 | "mem = Memory(base_dir='.')"
50 | ]
51 | },
52 | {
53 | "cell_type": "markdown",
54 | "metadata": {},
55 | "source": [
56 | "Note that the caching directory is a subdirectory called `nipype_mem` of the given `base_dir`. This is done to avoid polluting the base director.\n",
57 | "\n",
58 | "In the corresponding execution context, nipype interfaces can be turned into callables that can be used as functions using the [`Memory.cache`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#nipype.caching.memory.Memory.cache) method. For instance, if we want to run the fslMerge command on a set of files:"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "metadata": {},
65 | "outputs": [],
66 | "source": [
67 | "from nipype.interfaces import fsl\n",
68 | "fsl_merge = mem.cache(fsl.Merge)"
69 | ]
70 | },
71 | {
72 | "cell_type": "markdown",
73 | "metadata": {},
74 | "source": [
75 | "Note that the [`Memory.cache`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#nipype.caching.memory.Memory.cache) method takes interfaces **classes**, and not instances.\n",
76 | "\n",
77 | "The resulting `fsl_merge` object can be applied as a function to parameters, that will form the inputs of the `merge` fsl commands. Those inputs are given as keyword arguments, bearing the same name as the name in the inputs specs of the interface. In IPython, you can also get the argument list by using the `fsl_merge?` syntax to inspect the docs:"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "metadata": {},
83 | "source": [
84 | "```python\n",
85 | "In [3]: fsl_merge?\n",
86 | "String Form:PipeFunc(nipype.interfaces.fsl.utils.Merge,\n",
87 | " base_dir=/home/varoquau/dev/nipype/nipype/caching/nipype_mem)\n",
88 | "Namespace: Interactive\n",
89 | "File: /home/varoquau/dev/nipype/nipype/caching/memory.py\n",
90 | "Definition: fsl_merge(self, **kwargs)\n",
91 | "Docstring: Use fslmerge to concatenate images\n",
92 | "\n",
93 | "Inputs\n",
94 | "------\n",
95 | "\n",
96 | "Mandatory:\n",
97 | "dimension: dimension along which the file will be merged\n",
98 | "in_files: None\n",
99 | "\n",
100 | "Optional:\n",
101 | "args: Additional parameters to the command\n",
102 | "environ: Environment variables (default={})\n",
103 | "ignore_exception: Print an error message instead of throwing an exception in case the interface fails to run (default=False)\n",
104 | "merged_file: None\n",
105 | "output_type: FSL output type\n",
106 | "\n",
107 | "Outputs\n",
108 | "-------\n",
109 | "merged_file: None\n",
110 | "Class Docstring:\n",
111 | "...\n",
112 | "```"
113 | ]
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "metadata": {},
118 | "source": [
119 | "Thus `fsl_merge` is applied to parameters as such:"
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": null,
125 | "metadata": {},
126 | "outputs": [],
127 | "source": [
128 | "filepath = '/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz'\n",
129 | "\n",
130 | "results = fsl_merge(dimension='t', in_files=[filepath, filepath])"
131 | ]
132 | },
133 | {
134 | "cell_type": "markdown",
135 | "metadata": {},
136 | "source": [
137 | "The results are standard nipype nodes results. In particular, they expose an `outputs` attribute that carries all the outputs of the process, as specified by the docs."
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": null,
143 | "metadata": {},
144 | "outputs": [],
145 | "source": [
146 | "results.outputs.merged_file"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "Finally, and most important, if the node is applied to the same input parameters, it is not computed, and the results are reloaded from the disk:"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": null,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "results = fsl_merge(dimension='t', in_files=[filepath, filepath])"
163 | ]
164 | },
165 | {
166 | "cell_type": "markdown",
167 | "metadata": {},
168 | "source": [
169 | "Once the [`Memory`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#memory) is set up and you are applying it to data, an important thing to keep in mind is that you are using up disk cache. It might be useful to clean it using the methods that [`Memory`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#memory) provides for this: [`Memory.clear_previous_runs`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#nipype.caching.memory.Memory.clear_previous_runs), [`Memory.clear_runs_since`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#nipype.caching.memory.Memory.clear_runs_since)."
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "metadata": {},
175 | "source": [
176 | "### Example\n",
177 | "\n",
178 | "A full-blown example showing how to stage multiple operations can be found in the [`caching_example.py`](http://nipype.readthedocs.io/en/latest/_downloads/howto_caching_example.py) file."
179 | ]
180 | },
181 | {
182 | "cell_type": "markdown",
183 | "metadata": {},
184 | "source": [
185 | "## Usage patterns: working efficiently with caching\n",
186 | "\n",
187 | "The goal of the `caching` module is to enable writing plain Python code rather than workflows. Use it: instead of data grabber nodes, use for instance the `glob` module. To vary parameters, use `for` loops. To make reusable code, write Python functions.\n",
188 | "\n",
189 | "One good rule of thumb to respect is to avoid the usage of explicit filenames apart from the outermost inputs and outputs of your processing. The reason being that the caching mechanism of `nipy.caching` takes care of generating the unique hashes, ensuring that, when you vary parameters, files are not overridden by the output of different computations.\n",
190 | "\n",
191 | "
\n",
192 | "**Debugging**: \n",
193 | "If you need to inspect the running environment of the nodes, it may be useful to know where they were executed. With `nipype.caching`, you do not control this location as it is encoded by hashes. \n",
194 | "To find out where an operation has been persisted, simply look in it's output variable: \n",
195 | "```out.runtime.cwd```\n",
196 | "
\n",
197 | "\n",
198 | "Finally, the more you explore different parameters, the more you risk creating cached results that will never be reused. Keep in mind that it may be useful to flush the cache using [`Memory.clear_previous_runs`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#nipype.caching.memory.Memory.clear_previous_runs) or [`Memory.clear_runs_since`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html#nipype.caching.memory.Memory.clear_runs_since)."
199 | ]
200 | },
201 | {
202 | "cell_type": "markdown",
203 | "metadata": {},
204 | "source": [
205 | "## API reference\n",
206 | "\n",
207 | "For more info about the API, go to [`caching.memory`](http://nipype.readthedocs.io/en/latest/api/generated/nipype.caching.memory.html)."
208 | ]
209 | }
210 | ],
211 | "metadata": {
212 | "kernelspec": {
213 | "display_name": "Python [default]",
214 | "language": "python",
215 | "name": "python3"
216 | },
217 | "language_info": {
218 | "codemirror_mode": {
219 | "name": "ipython",
220 | "version": 3
221 | },
222 | "file_extension": ".py",
223 | "mimetype": "text/x-python",
224 | "name": "python",
225 | "nbconvert_exporter": "python",
226 | "pygments_lexer": "ipython3",
227 | "version": "3.6.5"
228 | }
229 | },
230 | "nbformat": 4,
231 | "nbformat_minor": 2
232 | }
233 |
--------------------------------------------------------------------------------
/notebooks/advanced_mipav.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Using MIPAV, JIST, and CBS Tools\n",
8 | "\n",
9 | "If you are trying to use MIPAV, JIST or CBS Tools interfaces you need to configure CLASSPATH environmental variable correctly. It needs to include extensions shipped with MIPAV, MIPAV itself and MIPAV plugins.\n",
10 | "\n",
11 | "For example, in order to use the standalone MCR version of spm, you need to ensure that the following commands are executed at the beginning of your script:"
12 | ]
13 | },
14 | {
15 | "cell_type": "markdown",
16 | "metadata": {},
17 | "source": [
18 | "```\n",
19 | "# location of additional JAVA libraries to use\n",
20 | "JAVALIB=/Applications/mipav/jre/Contents/Home/lib/ext/\n",
21 | "\n",
22 | "# location of the MIPAV installation to use\n",
23 | "MIPAV=/Applications/mipav\n",
24 | "# location of the plugin installation to use\n",
25 | "# please replace 'ThisUser' by your user name\n",
26 | "PLUGINS=/Users/ThisUser/mipav/plugins\n",
27 | "\n",
28 | "export CLASSPATH=$JAVALIB/*:$MIPAV:$MIPAV/lib/*:$PLUGINS\n",
29 | "```"
30 | ]
31 | }
32 | ],
33 | "metadata": {
34 | "kernelspec": {
35 | "display_name": "Python [default]",
36 | "language": "python",
37 | "name": "python3"
38 | },
39 | "language_info": {
40 | "codemirror_mode": {
41 | "name": "ipython",
42 | "version": 3
43 | },
44 | "file_extension": ".py",
45 | "mimetype": "text/x-python",
46 | "name": "python",
47 | "nbconvert_exporter": "python",
48 | "pygments_lexer": "ipython3",
49 | "version": "3.6.5"
50 | }
51 | },
52 | "nbformat": 4,
53 | "nbformat_minor": 2
54 | }
55 |
--------------------------------------------------------------------------------
/notebooks/advanced_nipypecli.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Nipype Command Line Interface\n",
8 | "\n",
9 | "The Nipype Command Line Interface allows a variety of operations:"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "%%bash\n",
19 | "nipypecli"
20 | ]
21 | },
22 | {
23 | "cell_type": "markdown",
24 | "metadata": {},
25 | "source": [
26 | "\n",
27 | "**Note**: These have replaced previous nipype command line tools such as `nipype_display_crash`, `nipype_crash_search`, `nipype2boutiques`, `nipype_cmd` and `nipype_display_pklz`.\n",
28 | "
"
29 | ]
30 | }
31 | ],
32 | "metadata": {
33 | "kernelspec": {
34 | "display_name": "Python [default]",
35 | "language": "python",
36 | "name": "python3"
37 | },
38 | "language_info": {
39 | "codemirror_mode": {
40 | "name": "ipython",
41 | "version": 3
42 | },
43 | "file_extension": ".py",
44 | "mimetype": "text/x-python",
45 | "name": "python",
46 | "nbconvert_exporter": "python",
47 | "pygments_lexer": "ipython3",
48 | "version": "3.6.5"
49 | }
50 | },
51 | "nbformat": 4,
52 | "nbformat_minor": 2
53 | }
54 |
--------------------------------------------------------------------------------
/notebooks/advanced_sphinx_ext.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Sphinx extensions\n",
8 | "\n",
9 | "To help users document their **Nipype**-based code, the software is shipped\n",
10 | "with a set of extensions (currently only one) to customize the appearance\n",
11 | "and simplify the generation process."
12 | ]
13 | },
14 | {
15 | "cell_type": "markdown",
16 | "metadata": {},
17 | "source": [
18 | "# `nipype.sphinxext.plot_workflow` - Workflow plotting extension\n",
19 | "\n",
20 | "A directive for including a nipype workflow graph in a Sphinx document.\n",
21 | "\n",
22 | "This code is forked from the plot_figure sphinx extension of matplotlib.\n",
23 | "\n",
24 | "By default, in HTML output, `workflow` will include a .png file with a link to a high-res .png. In LaTeX output, it will include a .pdf. The source code for the workflow may be included as **inline content** to the directive `workflow`:\n",
25 | "\n",
26 | " .. workflow ::\n",
27 | " :graph2use: flat\n",
28 | " :simple_form: no\n",
29 | "\n",
30 | " from nipype.workflows.dmri.camino.connectivity_mapping import create_connectivity_pipeline\n",
31 | " wf = create_connectivity_pipeline()\n",
32 | " \n",
33 | "For example, the following graph has been generated inserting the previous code block in this documentation:\n",
34 | "\n",
35 | "
"
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {},
41 | "source": [
42 | "### Options\n",
43 | "\n",
44 | "The ``workflow`` directive supports the following options:\n",
45 | "\n",
46 | "- `graph2use`: {`'hierarchical'`, `'colored'`, `'flat'`, `'orig'`, `'exec'`} \n",
47 | " Specify the type of graph to be generated.\n",
48 | "\n",
49 | "\n",
50 | "- `simple_form`: `bool` \n",
51 | " Whether the graph will be in detailed or simple form.\n",
52 | "\n",
53 | "\n",
54 | "- `format`: {`'python'`, `'doctest'`} \n",
55 | " Specify the format of the input\n",
56 | "\n",
57 | "\n",
58 | "- `include-source`: `bool` \n",
59 | " Whether to display the source code. The default can be changed using the `workflow_include_source` variable in conf.py\n",
60 | "\n",
61 | "\n",
62 | "- `encoding`: `str` \n",
63 | " If this source file is in a non-UTF8 or non-ASCII encoding, the encoding must be specified using the `:encoding:` option. The encoding will not be inferred using the ``-*- coding -*-`` metacomment.\n",
64 | "\n",
65 | "Additionally, this directive supports all of the options of the `image` directive, except for `target` (since workflow will add its own target). These include `alt`, `height`, `width`, `scale`, `align` and `class`."
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {},
71 | "source": [
72 | "### Configuration options\n",
73 | "\n",
74 | "The workflow directive has the following configuration options:\n",
75 | "\n",
76 | "- `graph2use` \n",
77 | " Select a graph type to use\n",
78 | "\n",
79 | "\n",
80 | "- `simple_form` \n",
81 | " determines if the node name shown in the visualization is either of the form nodename (package) when set to True or nodename.Class.package when set to False.\n",
82 | "\n",
83 | "\n",
84 | "- `wf_include_source` \n",
85 | " Default value for the include-source option\n",
86 | "\n",
87 | "\n",
88 | "- `wf_html_show_source_link` \n",
89 | " Whether to show a link to the source in HTML.\n",
90 | "\n",
91 | "\n",
92 | "- `wf_pre_code` \n",
93 | " Code that should be executed before each workflow.\n",
94 | "\n",
95 | "\n",
96 | "- `wf_basedir` \n",
97 | " Base directory, to which ``workflow::`` file names are relative to. (If None or empty, file names are relative to the directory where the file containing the directive is.)\n",
98 | "\n",
99 | "\n",
100 | "- `wf_formats` \n",
101 | " File formats to generate. List of tuples or strings: \n",
102 | " [(suffix, dpi), suffix, ...] \n",
103 | " that determine the file format and the DPI. For entries whose DPI was omitted, sensible defaults are chosen. When passing from the command line through sphinx_build the list should be passed as suffix:dpi,suffix:dpi, ....\n",
104 | "\n",
105 | "\n",
106 | "- `wf_html_show_formats` \n",
107 | " Whether to show links to the files in HTML.\n",
108 | "\n",
109 | "\n",
110 | "- `wf_rcparams` \n",
111 | " A dictionary containing any non-standard rcParams that should be applied before each workflow.\n",
112 | "\n",
113 | "\n",
114 | "- `wf_apply_rcparams` \n",
115 | " By default, rcParams are applied when `context` option is not used in a workflow directive. This configuration option overrides this behavior and applies rcParams before each workflow.\n",
116 | "\n",
117 | "\n",
118 | "- `wf_working_directory` \n",
119 | " By default, the working directory will be changed to the directory of the example, so the code can get at its data files, if any. Also, its path will be added to `sys.path` so it can import any helper modules sitting beside it. This configuration option can be used to specify a central directory (also added to `sys.path`) where data files and helper modules for all code are located.\n",
120 | "\n",
121 | "\n",
122 | "- `wf_template` \n",
123 | " Provide a customized template for preparing restructured text."
124 | ]
125 | }
126 | ],
127 | "metadata": {
128 | "kernelspec": {
129 | "display_name": "Python [default]",
130 | "language": "python",
131 | "name": "python3"
132 | },
133 | "language_info": {
134 | "codemirror_mode": {
135 | "name": "ipython",
136 | "version": 3
137 | },
138 | "file_extension": ".py",
139 | "mimetype": "text/x-python",
140 | "name": "python",
141 | "nbconvert_exporter": "python",
142 | "pygments_lexer": "ipython3",
143 | "version": "3.6.5"
144 | }
145 | },
146 | "nbformat": 4,
147 | "nbformat_minor": 2
148 | }
149 |
--------------------------------------------------------------------------------
/notebooks/advanced_spmmcr.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Using SPM with MATLAB Common Runtime (MCR)\n",
8 | "\n",
9 | "In order to use the standalone MCR version of spm, you need to ensure that the following commands are executed at the beginning of your script:"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "from nipype.interfaces import spm\n",
19 | "matlab_cmd = '/opt/spm12-r7219/run_spm12.sh /opt/matlabmcr-2010a/v713/ script'\n",
20 | "spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True)"
21 | ]
22 | },
23 | {
24 | "cell_type": "markdown",
25 | "metadata": {},
26 | "source": [
27 | "You can test it by calling:"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "spm.SPMCommand().version"
37 | ]
38 | },
39 | {
40 | "cell_type": "markdown",
41 | "metadata": {},
42 | "source": [
43 | "If you want to enforce the standalone MCR version of spm for nipype globally, you can do so by setting the following environment variables:\n",
44 | "\n",
45 | "- *`SPMMCRCMD`* \n",
46 | " Specifies the command to use to run the spm standalone MCR version. You may still override the command as described above.\n",
47 | "\n",
48 | "\n",
49 | "- *`FORCE_SPMMCR`* \n",
50 | " Set this to any value in order to enforce the use of spm standalone MCR version in nipype globally. Technically, this sets the `use_mcr` flag of the spm interface to True.\n",
51 | "\n",
52 | "Information about the MCR version of SPM8 can be found at: http://en.wikibooks.org/wiki/SPM/Standalone"
53 | ]
54 | }
55 | ],
56 | "metadata": {
57 | "kernelspec": {
58 | "display_name": "Python [default]",
59 | "language": "python",
60 | "name": "python3"
61 | },
62 | "language_info": {
63 | "codemirror_mode": {
64 | "name": "ipython",
65 | "version": 3
66 | },
67 | "file_extension": ".py",
68 | "mimetype": "text/x-python",
69 | "name": "python",
70 | "nbconvert_exporter": "python",
71 | "pygments_lexer": "ipython3",
72 | "version": "3.6.5"
73 | }
74 | },
75 | "nbformat": 4,
76 | "nbformat_minor": 2
77 | }
78 |
--------------------------------------------------------------------------------
/notebooks/basic_debug.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Debugging Nipype Workflows\n",
8 | "\n",
9 | "Throughout [Nipype](http://nipy.org/nipype/) we try to provide meaningful error messages. If you run into an error that does not have a meaningful error message please let us know so that we can improve error reporting.\n",
10 | "\n",
11 | "Here are some notes that may help to debug workflows or understanding performance issues.\n",
12 | "\n",
13 | "1. Always run your workflow first on a single iterable (e.g. subject) and\n",
14 | " gradually increase the execution distribution complexity (Linear->MultiProc-> \n",
15 | " SGE).\n",
16 | "\n",
17 | "- Use the debug config mode. This can be done by setting:\n",
18 | "\n",
19 | " ```python\n",
20 | " from nipype import config\n",
21 | " config.enable_debug_mode()\n",
22 | " ```\n",
23 | "\n",
24 | " as the first import of your nipype script.\n",
25 | " \n",
26 | " **Note:**\n",
27 | " - Turning on debug will rerun your workflows and will rerun them after debugging is turned off.\n",
28 | " - Turning on debug mode will also override log levels specified elsewhere, such as in the nipype configuration. \n",
29 | " - `workflow`, `interface` and `utils` loggers will all be set to level `DEBUG`.\n",
30 | " \n",
31 | "\n",
32 | "- There are several configuration options that can help with debugging.\n",
33 | " See [Configuration File](config_file.ipynb) for more details:\n",
34 | "\n",
35 | " keep_inputs\n",
36 | " remove_unnecessary_outputs\n",
37 | " stop_on_first_crash\n",
38 | " stop_on_first_rerun\n",
39 | "\n",
40 | "- When running in distributed mode on cluster engines, it is possible for a\n",
41 | " node to fail without generating a crash file in the crashdump directory. In\n",
42 | " such cases, it will store a crash file in the `batch` directory.\n",
43 | "\n",
44 | "- All Nipype crashfiles can be inspected with the `nipypecli crash`\n",
45 | " utility.\n",
46 | "\n",
47 | "- The `nipypecli search` command allows you to search for regular expressions\n",
48 | " in the tracebacks of the Nipype crashfiles within a log folder.\n",
49 | "\n",
50 | "- Nipype determines the hash of the input state of a node. If any input\n",
51 | " contains strings that represent files on the system path, the hash evaluation\n",
52 | " mechanism will determine the timestamp or content hash of each of those\n",
53 | " files. Thus any node with an input containing huge dictionaries (or lists) of\n",
54 | " file names can cause serious performance penalties.\n",
55 | "\n",
56 | "- For HUGE data processing, `stop_on_first_crash: False`, is needed to get the\n",
57 | " bulk of processing done, and then `stop_on_first_crash: True`, is needed for\n",
58 | " debugging and finding failing cases. Setting `stop_on_first_crash: False`\n",
59 | " is a reasonable option when you would expect 90% of the data to execute\n",
60 | " properly.\n",
61 | "\n",
62 | "- Sometimes nipype will hang as if nothing is going on and if you hit `Ctrl+C`\n",
63 | " you will get a `ConcurrentLogHandler` error. Simply remove the pypeline.lock\n",
64 | " file in your home directory and continue.\n",
65 | "\n",
66 | "- On many clusters with shared NFS mounts synchronization of files across\n",
67 | " clusters may not happen before the typical NFS cache timeouts. When using\n",
68 | " PBS/LSF/SGE/Condor plugins in such cases the workflow may crash because it\n",
69 | " cannot retrieve the node result. Setting the `job_finished_timeout` can help:\n",
70 | "\n",
71 | " ```python\n",
72 | " workflow.config['execution']['job_finished_timeout'] = 65\n",
73 | " ```"
74 | ]
75 | }
76 | ],
77 | "metadata": {
78 | "kernelspec": {
79 | "display_name": "Python [default]",
80 | "language": "python",
81 | "name": "python3"
82 | },
83 | "language_info": {
84 | "codemirror_mode": {
85 | "name": "ipython",
86 | "version": 3
87 | },
88 | "file_extension": ".py",
89 | "mimetype": "text/x-python",
90 | "name": "python",
91 | "nbconvert_exporter": "python",
92 | "pygments_lexer": "ipython3",
93 | "version": "3.6.5"
94 | }
95 | },
96 | "nbformat": 4,
97 | "nbformat_minor": 2
98 | }
99 |
--------------------------------------------------------------------------------
/notebooks/basic_function_interface.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Function Interface\n",
8 | "\n",
9 | "Satra once called the `Function` module, the \"do anything you want card\". Which is a perfect description. Because it allows you to put any code you want into an empty node, which you then can put in your workflow exactly where it needs to be.\n",
10 | "\n",
11 | "## A Simple Function Interface\n",
12 | "\n",
13 | "You might have already seen the `Function` module in the [example section in the Node tutorial](basic_nodes.ipynb#Example-of-a-simple-node). Let's take a closer look at it again."
14 | ]
15 | },
16 | {
17 | "cell_type": "markdown",
18 | "metadata": {},
19 | "source": [
20 | "The most important component of a working `Function` interface is a Python function. There are several ways to associate a function with a `Function` interface, but the most common way will involve functions you code yourself as part of your Nipype scripts. Consider the following function:"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "metadata": {},
27 | "outputs": [],
28 | "source": [
29 | "# Create a small example function\n",
30 | "def add_two(x_input):\n",
31 | " return x_input + 2"
32 | ]
33 | },
34 | {
35 | "cell_type": "markdown",
36 | "metadata": {},
37 | "source": [
38 | "This simple function takes a value, adds 2 to it, and returns that new value.\n",
39 | "\n",
40 | "Just as Nipype interfaces have inputs and outputs, Python functions have inputs, in the form of parameters or arguments, and outputs, in the form of their return values. When you define a Function interface object with an existing function, as in the case of ``add_two()`` above, you must pass the constructor information about the function's inputs, its outputs, and the function itself. For example,"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": null,
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "# Import Node and Function module\n",
50 | "from nipype import Node, Function\n",
51 | "\n",
52 | "# Create Node\n",
53 | "addtwo = Node(Function(input_names=[\"x_input\"],\n",
54 | " output_names=[\"val_output\"],\n",
55 | " function=add_two),\n",
56 | " name='add_node')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "Then you can set the inputs and run just as you would with any other interface:"
64 | ]
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": null,
69 | "metadata": {},
70 | "outputs": [],
71 | "source": [
72 | "addtwo.inputs.x_input = 4\n",
73 | "addtwo.run()"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "addtwo.result.outputs"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "metadata": {},
88 | "source": [
89 | "You need to be careful that the name of the input paramter to the node is the same name as the input parameter to the function, i.e. `x_input`. But you don't have to specify `input_names` or `output_names`. You can also just use:"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": null,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "addtwo = Node(Function(function=add_two), name='add_node')\n",
99 | "addtwo.inputs.x_input = 8\n",
100 | "addtwo.run()"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "addtwo.result.outputs"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {},
115 | "source": [
116 | "## Using External Packages\n",
117 | "\n",
118 | "Chances are, you will want to write functions that do more complicated processing, particularly using the growing stack of Python packages geared towards neuroimaging, such as [Nibabel](http://nipy.org/nibabel/), [Nipy](http://nipy.org/), or [PyMVPA](http://www.pymvpa.org/).\n",
119 | "\n",
120 | "While this is completely possible (and, indeed, an intended use of the Function interface), it does come with one important constraint. The function code you write is executed in a standalone environment, which means that any external functions or classes you use have to be imported within the function itself:"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "def get_n_trs(in_file):\n",
130 | " import nibabel\n",
131 | " f = nibabel.load(in_file)\n",
132 | " return f.shape[-1]"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "metadata": {},
138 | "source": [
139 | "Without explicitly importing Nibabel in the body of the function, this would fail.\n",
140 | "\n",
141 | "Alternatively, it is possible to provide a list of strings corresponding to the imports needed to execute a function as a parameter of the `Function` constructor. This allows for the use of external functions that do not import all external definitions inside the function body."
142 | ]
143 | },
144 | {
145 | "cell_type": "markdown",
146 | "metadata": {},
147 | "source": [
148 | "## Advanced Use\n",
149 | "\n",
150 | "To use an existing function object (as we have been doing so far) with a Function interface, it must be passed to the constructor. However, it is also possible to dynamically set how a Function interface will process its inputs using the special ``function_str`` input.\n",
151 | "\n",
152 | "This input takes not a function object, but actually a single string that can be parsed to define a function. In the equivalent case to our example above, the string would be"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "metadata": {},
159 | "outputs": [],
160 | "source": [
161 | "add_two_str = \"def add_two(val):\\n return val + 2\\n\""
162 | ]
163 | },
164 | {
165 | "cell_type": "markdown",
166 | "metadata": {},
167 | "source": [
168 | "Unlike when using a function object, this input can be set like any other, meaning that you could write a function that outputs different function strings depending on some run-time contingencies, and connect that output the ``function_str`` input of a downstream Function interface."
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "metadata": {},
174 | "source": [
175 | "# Important - Function Nodes are closed environments\n",
176 | "\n",
177 | "There's only one trap that you should be aware of when using the `Function` module.\n",
178 | "\n",
179 | "If you want to use another module inside a function, you have to import it again inside the function. Let's take a look at the following example:"
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": null,
185 | "metadata": {},
186 | "outputs": [],
187 | "source": [
188 | "from nipype import Node, Function\n",
189 | "\n",
190 | "# Create the Function object\n",
191 | "def get_random_array(array_shape):\n",
192 | "\n",
193 | " # Import random function\n",
194 | " from numpy.random import random\n",
195 | " \n",
196 | " return random(array_shape)\n",
197 | "\n",
198 | "# Create Function Node that executes get_random_array\n",
199 | "rndArray = Node(Function(input_names=[\"array_shape\"],\n",
200 | " output_names=[\"random_array\"],\n",
201 | " function=get_random_array),\n",
202 | " name='rndArray_node')\n",
203 | "\n",
204 | "# Specify the array_shape of the random array\n",
205 | "rndArray.inputs.array_shape = (3, 3)\n",
206 | "\n",
207 | "# Run node\n",
208 | "rndArray.run()\n",
209 | "\n",
210 | "# Print output\n",
211 | "print(rndArray.result.outputs)"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "metadata": {},
217 | "source": [
218 | "Now, let's see what happens if we move the import of `random` outside the scope of `get_random_array`:"
219 | ]
220 | },
221 | {
222 | "cell_type": "code",
223 | "execution_count": null,
224 | "metadata": {},
225 | "outputs": [],
226 | "source": [
227 | "from nipype import Node, Function\n",
228 | "\n",
229 | "# Import random function\n",
230 | "from numpy.random import random\n",
231 | "\n",
232 | "\n",
233 | "# Create the Function object\n",
234 | "def get_random_array(array_shape):\n",
235 | " \n",
236 | " return random(array_shape)\n",
237 | "\n",
238 | "# Create Function Node that executes get_random_array\n",
239 | "rndArray = Node(Function(input_names=[\"array_shape\"],\n",
240 | " output_names=[\"random_array\"],\n",
241 | " function=get_random_array),\n",
242 | " name='rndArray_node')\n",
243 | "\n",
244 | "# Specify the array_shape of the random array\n",
245 | "rndArray.inputs.array_shape = (3, 3)\n",
246 | "\n",
247 | "# Run node\n",
248 | "try:\n",
249 | " rndArray.run()\n",
250 | "except Exception as err:\n",
251 | " print(err)\n",
252 | "else:\n",
253 | " raise"
254 | ]
255 | },
256 | {
257 | "cell_type": "markdown",
258 | "metadata": {},
259 | "source": [
260 | "As you can see, if we don't import `random` inside the scope of the function, we receive the following error:\n",
261 | "\n",
262 | " Exception raised while executing Node rndArray_node.\n",
263 | "\n",
264 | " Traceback (most recent call last):\n",
265 | " [...]\n",
266 | " File \"\", line 3, in get_random_array\n",
267 | " NameError: name 'random' is not defined"
268 | ]
269 | }
270 | ],
271 | "metadata": {
272 | "anaconda-cloud": {},
273 | "kernelspec": {
274 | "display_name": "Python [default]",
275 | "language": "python",
276 | "name": "python3"
277 | },
278 | "language_info": {
279 | "codemirror_mode": {
280 | "name": "ipython",
281 | "version": 3
282 | },
283 | "file_extension": ".py",
284 | "mimetype": "text/x-python",
285 | "name": "python",
286 | "nbconvert_exporter": "python",
287 | "pygments_lexer": "ipython3",
288 | "version": "3.6.5"
289 | }
290 | },
291 | "nbformat": 4,
292 | "nbformat_minor": 2
293 | }
294 |
--------------------------------------------------------------------------------
/notebooks/basic_graph_visualization.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Graph Visualization\n",
8 | "\n",
9 | "We've learned from the [Workflow](./basic_workflow.ipynb) tutorial that every Nipype workflow is a directed acyclic graph. Some workflow structures are easy to understand directly from the script and some others are too complex for that. Luckily, there is the ``write_graph`` method!\n",
10 | "\n",
11 | "## ``write_graph``\n",
12 | "\n",
13 | "**``write_graph``** allows us to visualize any workflow in five different ways:\n",
14 | "\n",
15 | "- **``orig``** - creates a top-level graph without expanding internal workflow nodes\n",
16 | "- **``flat``** - expands workflow nodes recursively\n",
17 | "- **``hierarchical``** - expands workflow nodes recursively with a notion on the hierarchy\n",
18 | "- **``colored``** - expands workflow nodes recursively with a notion on hierarchy in color\n",
19 | "- **``exec``** - expands workflows to depict iterables\n",
20 | "\n",
21 | "Which graph visualization should be used is chosen by the **``graph2use``** parameter.\n",
22 | "\n",
23 | "Additionally, we can also choose the format of the output file (png or svg) with the **``format``** parameter.\n",
24 | "\n",
25 | "A third parameter, called **``simple_form``** can be used to specify if the node names used in the graph should be of the form ***``nodename (package)``*** or ***``nodename.Class.package``***."
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "## Preparation\n",
33 | "\n",
34 | "Instead of creating a new workflow from scratch, let's just import one from the Nipype workflow library."
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "# Import the function to create an spm fmri preprocessing workflow\n",
44 | "from niflow.nipype1.workflows.fmri.spm import create_spm_preproc\n",
45 | "\n",
46 | "# Create the workflow object\n",
47 | "spmflow = create_spm_preproc()"
48 | ]
49 | },
50 | {
51 | "cell_type": "markdown",
52 | "metadata": {},
53 | "source": [
54 | "For a reason that will become clearer under the ``exec`` visualization, let's add an iternode at the beginning of the ``spmflow`` and connect them together under a new workflow, called ``metaflow``. The iternode will cause the workflow to be executed three times, once with the ``fwhm`` value set to 4, once set to 6 and once set to 8. For more about this see the [Iteration](./basic_iteration.ipynb) tutorial."
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": null,
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "# Import relevant modules\n",
64 | "from nipype import IdentityInterface, Node, Workflow\n",
65 | "\n",
66 | "# Create an iternode that iterates over three different fwhm values\n",
67 | "inputNode = Node(IdentityInterface(fields=['fwhm']), name='iternode')\n",
68 | "inputNode.iterables = ('fwhm', [4, 6, 8])\n",
69 | "\n",
70 | "# Connect inputNode and spmflow in a workflow\n",
71 | "metaflow = Workflow(name='metaflow')\n",
72 | "metaflow.connect(inputNode, \"fwhm\", spmflow, \"inputspec.fwhm\")"
73 | ]
74 | },
75 | {
76 | "cell_type": "markdown",
77 | "metadata": {},
78 | "source": [
79 | "# ``orig`` graph\n",
80 | "\n",
81 | "This visualization gives us a basic overview of all the nodes and internal workflows in a workflow and shows in a simple way the dependencies between them."
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": null,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": [
90 | "# Write graph of type orig\n",
91 | "spmflow.write_graph(graph2use='orig', dotfilename='./graph_orig.dot')\n",
92 | "\n",
93 | "# Visualize graph\n",
94 | "from IPython.display import Image\n",
95 | "Image(filename=\"graph_orig.png\")"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {},
101 | "source": [
102 | "# ``flat`` graph\n",
103 | "\n",
104 | "This visualization gives us already more information about the internal structure of the ``spmflow`` workflow. As we can, the internal workflow ``getmask`` from the ``orig`` visualization above was replaced by the individual nodes contained in this internal workflow."
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": null,
110 | "metadata": {},
111 | "outputs": [],
112 | "source": [
113 | "# Write graph of type flat\n",
114 | "spmflow.write_graph(graph2use='flat', dotfilename='./graph_flat.dot')\n",
115 | "\n",
116 | "# Visualize graph\n",
117 | "from IPython.display import Image\n",
118 | "Image(filename=\"graph_flat.png\")"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "# ``hierarchical`` graph\n",
126 | "\n",
127 | "To better appreciate this visualization, let's look at the ``metaflow`` workflow that has one hierarchical level more than the ``spmflow``.\n",
128 | "\n",
129 | "As you can see, this visualization makes it much clearer which elements of a workflow are nodes and which ones are internal workflows. Also, each connection is shown as an individual arrow, and not just represented by one single arrow between two nodes. Additionally, iternodes and mapnodes are visualized differently than normal nodes to make them pop out more."
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "# Write graph of type hierarchical\n",
139 | "metaflow.write_graph(graph2use='hierarchical', dotfilename='./graph_hierarchical.dot')\n",
140 | "\n",
141 | "# Visualize graph\n",
142 | "from IPython.display import Image\n",
143 | "Image(filename=\"graph_hierarchical.png\")"
144 | ]
145 | },
146 | {
147 | "cell_type": "markdown",
148 | "metadata": {},
149 | "source": [
150 | "# ``colored`` graph\n",
151 | "\n",
152 | "This visualization is almost the same as the ``hierarchical`` above. The only difference is that individual nodes and different hierarchy levels are colored coded differently."
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "metadata": {},
159 | "outputs": [],
160 | "source": [
161 | "# Write graph of type colored\n",
162 | "metaflow.write_graph(graph2use='colored', dotfilename='./graph_colored.dot')\n",
163 | "\n",
164 | "# Visualize graph\n",
165 | "from IPython.display import Image\n",
166 | "Image(filename=\"graph_colored.png\")"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {},
172 | "source": [
173 | "# ``exec`` graph\n",
174 | "\n",
175 | "This visualization is the most different from the rest. Like the ``flat`` visualization, it depicts all individual nodes. But additionally, it drops the ``utility`` nodes from the workflow and expands workflows to depict iterables (can be seen in the ``detailed_graph`` visualization further down below)."
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": null,
181 | "metadata": {},
182 | "outputs": [],
183 | "source": [
184 | "# Write graph of type exec\n",
185 | "metaflow.write_graph(graph2use='exec', dotfilename='./graph_exec.dot')\n",
186 | "\n",
187 | "# Visualize graph\n",
188 | "from IPython.display import Image\n",
189 | "Image(filename=\"graph_exec.png\")"
190 | ]
191 | },
192 | {
193 | "cell_type": "markdown",
194 | "metadata": {},
195 | "source": [
196 | "# Detailed graphs\n",
197 | "\n",
198 | "The ``orig``, ``flat`` and ``exec`` visualization also create a **detailed graph** whenever ``write_graph`` is executed. A detailed graph shows a node with not just the node name, but also with all its input and output parameters.\n",
199 | "\n",
200 | "## detailed ``flat`` graph\n",
201 | "\n",
202 | "For example, the detailed graph of the ``flat`` graph looks as follows:"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "from IPython.display import Image\n",
212 | "Image(filename=\"graph_flat_detailed.png\")"
213 | ]
214 | },
215 | {
216 | "cell_type": "markdown",
217 | "metadata": {},
218 | "source": [
219 | "Such a visualization might be more complicated to read, but it gives you a complete overview of a workflow and all its components."
220 | ]
221 | },
222 | {
223 | "cell_type": "markdown",
224 | "metadata": {},
225 | "source": [
226 | "## detailed ``exec`` graph\n",
227 | "\n",
228 | "Now, if we look at the detailed graph of the ``exec`` visualization, we can see where the iteration takes place:"
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "execution_count": null,
234 | "metadata": {},
235 | "outputs": [],
236 | "source": [
237 | "from IPython.display import Image\n",
238 | "Image(filename=\"graph_exec_detailed.png\")"
239 | ]
240 | },
241 | {
242 | "cell_type": "markdown",
243 | "metadata": {},
244 | "source": [
245 | "In the middle left of the figure, we have three ``preproc.smooth`` nodes of the ``spm`` interface with the names \"a0\", \"a1\" and \"a2\". Those represent the three smoothing nodes with the ``fwhm`` parameter set to 4, 6 and 8. Now if those nodes would be connected to another workflow, this would mean that the workflow that follows would be depicted three times, each time for another input coming from the ``preproc.smooth`` node.\n",
246 | "\n",
247 | "Therefore, the **detailed ``exec``** visualization makes all individual execution elements very clear and allows it to see which elements can be executed in parallel."
248 | ]
249 | },
250 | {
251 | "cell_type": "markdown",
252 | "metadata": {},
253 | "source": [
254 | "# ``simple_form``\n",
255 | "\n",
256 | "Last but not least is the third ``write_graph`` argument, ``simple_form``. If this parameter is set to ``False``, this means that the node names in the visualization will be written in the form of ***``nodename.Class.package``***, instead of ***``nodename (package)``***. For example, let's look at the ``orig``visualization with ``simple_form`` set to ``False``."
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": [
265 | "# Write graph of type orig\n",
266 | "spmflow.write_graph(graph2use='orig', dotfilename='./graph_orig_notSimple.dot', simple_form=False)\n",
267 | "\n",
268 | "# Visualize graph\n",
269 | "from IPython.display import Image\n",
270 | "Image(filename=\"graph_orig_notSimple.png\")"
271 | ]
272 | }
273 | ],
274 | "metadata": {
275 | "anaconda-cloud": {},
276 | "kernelspec": {
277 | "display_name": "Python 3",
278 | "language": "python",
279 | "name": "python3"
280 | },
281 | "language_info": {
282 | "codemirror_mode": {
283 | "name": "ipython",
284 | "version": 3
285 | },
286 | "file_extension": ".py",
287 | "mimetype": "text/x-python",
288 | "name": "python",
289 | "nbconvert_exporter": "python",
290 | "pygments_lexer": "ipython3",
291 | "version": "3.6.11"
292 | }
293 | },
294 | "nbformat": 4,
295 | "nbformat_minor": 2
296 | }
297 |
--------------------------------------------------------------------------------
/notebooks/basic_import_workflows.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Reusable workflows\n",
8 | "\n",
9 | "Nipype doesn't just allow you to create your own workflows. It also already comes with predefined workflows, developed by the community, for the community. For a full list of all workflows, look under the [Workflows](http://nipype.readthedocs.io/en/latest/documentation.html) section of the main homepage.\n",
10 | "\n",
11 | "But to give you a short overview, there are workflows about:\n",
12 | "\n",
13 | "**Functional MRI** workflows:\n",
14 | " - from **``fsl``** about ``resting state``, ``fixed_effects``, ``modelfit``, ``featreg``, ``susan_smooth`` and many more\n",
15 | " - from **``spm``** about ``DARTEL`` and ``VBM``\n",
16 | "\n",
17 | "**Structural MRI** workflows\n",
18 | " - from **``ants``** about ``ANTSBuildTemplate`` and ``antsRegistrationBuildTemplate``\n",
19 | " - from **``freesurfer``** about ``bem``, ``recon`` and tessellation\n",
20 | " \n",
21 | "**Diffusion** workflows:\n",
22 | " - from **``camino``** about ``connectivity_mapping``, ``diffusion`` and ``group_connectivity``\n",
23 | " - from **``dipy``** about ``denoise``\n",
24 | " - from **``fsl``** about ``artifacts``, ``dti``, ``epi``, ``tbss`` and many more\n",
25 | " - from **``mrtrix``** about ``connectivity_mapping``, ``diffusion`` and ``group_connectivity``"
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "# How to load a workflow from the Nipype library\n",
33 | "\n",
34 | "Let's consider the example of a functional MRI workflow, that uses FSL's Susan algorithm to smooth some data. To load such a workflow, we only need the following command:"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "from niflow.nipype1.workflows.fmri.fsl.preprocess import create_susan_smooth\n",
44 | "smoothwf = create_susan_smooth()"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "Once a workflow is created, we need to make sure that the mandatory inputs are specified. To see which inputs we have to define, we can use the command:\n",
52 | "\n",
53 | "``create_susan_smooth?``\n",
54 | "\n",
55 | "Which gives us the output:\n",
56 | "\n",
57 | "```\n",
58 | "Create a SUSAN smoothing workflow\n",
59 | "\n",
60 | "Parameters\n",
61 | "----------\n",
62 | "Inputs:\n",
63 | " inputnode.in_files : functional runs (filename or list of filenames)\n",
64 | " inputnode.fwhm : fwhm for smoothing with SUSAN\n",
65 | " inputnode.mask_file : mask used for estimating SUSAN thresholds (but not for smoothing)\n",
66 | "\n",
67 | "Outputs:\n",
68 | " outputnode.smoothed_files : functional runs (filename or list of filenames)\n",
69 | "```"
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "metadata": {},
75 | "source": [
76 | "As we can see, we also need a mask file. For the sake of convenience, let's take the mean image of a functional image and threshold it at the 50% percentile:"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {},
83 | "outputs": [],
84 | "source": [
85 | "!fslmaths /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz \\\n",
86 | " -Tmean -thrP 50 /output/sub-01_ses-test_task-fingerfootlips_mask.nii.gz"
87 | ]
88 | },
89 | {
90 | "cell_type": "markdown",
91 | "metadata": {},
92 | "source": [
93 | "Now, we're ready to finish up our smooth workflow."
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": null,
99 | "metadata": {},
100 | "outputs": [],
101 | "source": [
102 | "smoothwf.inputs.inputnode.in_files = '/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz'\n",
103 | "smoothwf.inputs.inputnode.mask_file = '/output/sub-01_ses-test_task-fingerfootlips_mask.nii.gz'\n",
104 | "smoothwf.inputs.inputnode.fwhm = 4\n",
105 | "smoothwf.base_dir = '/output'"
106 | ]
107 | },
108 | {
109 | "cell_type": "markdown",
110 | "metadata": {},
111 | "source": [
112 | "Before we run it, let's visualize the graph:"
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": null,
118 | "metadata": {},
119 | "outputs": [],
120 | "source": [
121 | "from nilearn import plotting\n",
122 | "%matplotlib inline\n",
123 | "import matplotlib.pyplot as plt\n",
124 | "from IPython.display import Image\n",
125 | "smoothwf.write_graph(graph2use='colored', format='png', simple_form=True)\n",
126 | "Image(filename='/output/susan_smooth/graph.png')"
127 | ]
128 | },
129 | {
130 | "cell_type": "markdown",
131 | "metadata": {},
132 | "source": [
133 | "And we're ready to go:"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {},
140 | "outputs": [],
141 | "source": [
142 | "smoothwf.run('MultiProc', plugin_args={'n_procs': 4})"
143 | ]
144 | },
145 | {
146 | "cell_type": "markdown",
147 | "metadata": {},
148 | "source": [
149 | "Once it's finished, we can look at the results:"
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": null,
155 | "metadata": {},
156 | "outputs": [],
157 | "source": [
158 | "%%bash\n",
159 | "fslmaths /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz -Tmean fmean.nii.gz\n",
160 | "fslmaths /output/susan_smooth/smooth/mapflow/_smooth0/sub-01_ses-test_task-fingerfootlips_bold_smooth.nii.gz \\\n",
161 | " -Tmean smean.nii.gz"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": null,
167 | "metadata": {},
168 | "outputs": [],
169 | "source": [
170 | "from nilearn import image, plotting"
171 | ]
172 | },
173 | {
174 | "cell_type": "code",
175 | "execution_count": null,
176 | "metadata": {},
177 | "outputs": [],
178 | "source": [
179 | "plotting.plot_epi(\n",
180 | " 'fmean.nii.gz', title=\"mean (no smoothing)\", display_mode='z',\n",
181 | " cmap='gray', cut_coords=(-45, -30, -15, 0, 15));\n",
182 | "plotting.plot_epi(\n",
183 | " 'smean.nii.gz', title=\"mean (susan smoothed)\", display_mode='z',\n",
184 | " cmap='gray', cut_coords=(-45, -30, -15, 0, 15));"
185 | ]
186 | },
187 | {
188 | "cell_type": "markdown",
189 | "metadata": {},
190 | "source": [
191 | "# Inspect inputs and outputs of a loaded or created workflow\n",
192 | "\n",
193 | "If you want to see a summary of all possible inputs and outputs of a given workflow, use the `_get_inputs()` and the `_get_outputs()` function."
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": null,
199 | "metadata": {},
200 | "outputs": [],
201 | "source": [
202 | "# Show all possible inputs\n",
203 | "smoothwf._get_inputs()"
204 | ]
205 | },
206 | {
207 | "cell_type": "code",
208 | "execution_count": null,
209 | "metadata": {},
210 | "outputs": [],
211 | "source": [
212 | "# Show all possible outputs\n",
213 | "smoothwf._get_outputs()"
214 | ]
215 | },
216 | {
217 | "cell_type": "markdown",
218 | "metadata": {},
219 | "source": [
220 | "# How to change node parameters from existing workflows\n",
221 | "\n",
222 | "What if we want to change certain parameters of a loaded or already existing workflow? Let's first get the names of all the nodes in the workflow:"
223 | ]
224 | },
225 | {
226 | "cell_type": "code",
227 | "execution_count": null,
228 | "metadata": {},
229 | "outputs": [],
230 | "source": [
231 | "print(smoothwf.list_node_names())"
232 | ]
233 | },
234 | {
235 | "cell_type": "markdown",
236 | "metadata": {},
237 | "source": [
238 | "Ok. Hmm, what if we want to change the 'median' node, from 50% to 99%? For this, we first need to get the node."
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": null,
244 | "metadata": {},
245 | "outputs": [],
246 | "source": [
247 | "median = smoothwf.get_node('median')"
248 | ]
249 | },
250 | {
251 | "cell_type": "markdown",
252 | "metadata": {},
253 | "source": [
254 | "Now that we have the node, we can change its value as we want:"
255 | ]
256 | },
257 | {
258 | "cell_type": "code",
259 | "execution_count": null,
260 | "metadata": {},
261 | "outputs": [],
262 | "source": [
263 | "median.inputs.op_string = '-k %s -p 99'"
264 | ]
265 | },
266 | {
267 | "cell_type": "markdown",
268 | "metadata": {},
269 | "source": [
270 | "And we can run the workflow again..."
271 | ]
272 | },
273 | {
274 | "cell_type": "code",
275 | "execution_count": null,
276 | "metadata": {},
277 | "outputs": [],
278 | "source": [
279 | "smoothwf.run('MultiProc', plugin_args={'n_procs': 4})"
280 | ]
281 | },
282 | {
283 | "cell_type": "markdown",
284 | "metadata": {},
285 | "source": [
286 | "And now the output is:"
287 | ]
288 | },
289 | {
290 | "cell_type": "code",
291 | "execution_count": null,
292 | "metadata": {},
293 | "outputs": [],
294 | "source": [
295 | "!fslmaths /output/susan_smooth/smooth/mapflow/_smooth0/sub-01_ses-test_task-fingerfootlips_bold_smooth.nii.gz \\\n",
296 | " -Tmean mmean.nii.gz"
297 | ]
298 | },
299 | {
300 | "cell_type": "code",
301 | "execution_count": null,
302 | "metadata": {},
303 | "outputs": [],
304 | "source": [
305 | "from nilearn import image, plotting"
306 | ]
307 | },
308 | {
309 | "cell_type": "code",
310 | "execution_count": null,
311 | "metadata": {},
312 | "outputs": [],
313 | "source": [
314 | "plotting.plot_epi(\n",
315 | " 'smean.nii.gz', title=\"mean (susan smooth)\", display_mode='z',\n",
316 | " cmap='gray', cut_coords=(-45, -30, -15, 0, 15))\n",
317 | "plotting.plot_epi(\n",
318 | " 'mmean.nii.gz', title=\"mean (smoothed, median=99%)\", display_mode='z',\n",
319 | " cmap='gray', cut_coords=(-45, -30, -15, 0, 15))"
320 | ]
321 | }
322 | ],
323 | "metadata": {
324 | "anaconda-cloud": {},
325 | "kernelspec": {
326 | "display_name": "Python 3",
327 | "language": "python",
328 | "name": "python3"
329 | },
330 | "language_info": {
331 | "codemirror_mode": {
332 | "name": "ipython",
333 | "version": 3
334 | },
335 | "file_extension": ".py",
336 | "mimetype": "text/x-python",
337 | "name": "python",
338 | "nbconvert_exporter": "python",
339 | "pygments_lexer": "ipython3",
340 | "version": "3.6.11"
341 | }
342 | },
343 | "nbformat": 4,
344 | "nbformat_minor": 2
345 | }
346 |
--------------------------------------------------------------------------------
/notebooks/basic_model_specification_fmri.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Model Specification for 1st-Level fMRI Analysis\n",
8 | "\n",
9 | "Nipype provides also an interfaces to create a first level Model for an fMRI analysis. Such a model is needed to specify the study-specific information, such as **condition**, their **onsets**, and **durations**. For more information, make sure to check out [nipype.algorithms.modelgen](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.algorithms.modelgen.html)."
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "## General purpose model specification\n",
17 | "\n",
18 | "The `SpecifyModel` provides a generic mechanism for model specification. A mandatory input called `subject_info` provides paradigm specification for each run corresponding to a subject. This has to be in the form of a `Bunch` or a list of `Bunch` objects (one for each run). Each `Bunch` object contains the following attributes."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "### Required for most designs\n",
26 | "\n",
27 | "- **`conditions`** : list of names\n",
28 | "\n",
29 | "\n",
30 | "- **`onsets`** : lists of onsets corresponding to each condition\n",
31 | "\n",
32 | "\n",
33 | "- **`durations`** : lists of durations corresponding to each condition. Should be left to a single 0 if all events are being modeled as impulses."
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "metadata": {},
39 | "source": [
40 | "### Optional\n",
41 | "\n",
42 | "- **`regressor_names`**: list of names corresponding to each column. Should be None if automatically assigned.\n",
43 | "\n",
44 | "\n",
45 | "- **`regressors`**: list of lists. values for each regressor - must correspond to the number of volumes in the functional run\n",
46 | "\n",
47 | "\n",
48 | "- **`amplitudes`**: lists of amplitudes for each event. This will be ignored by SPM's Level1Design.\n",
49 | "\n",
50 | "\n",
51 | "The following two (`tmod`, `pmod`) will be ignored by any `Level1Design` class other than `SPM`:\n",
52 | "\n",
53 | "- **`tmod`**: lists of conditions that should be temporally modulated. Should default to None if not being used.\n",
54 | "\n",
55 | "- **`pmod`**: list of Bunch corresponding to conditions\n",
56 | " - `name`: name of parametric modulator\n",
57 | " - `param`: values of the modulator\n",
58 | " - `poly`: degree of modulation"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "Together with this information, one needs to specify:\n",
66 | "\n",
67 | "- whether the durations and event onsets are specified in terms of scan volumes or secs.\n",
68 | "\n",
69 | "- the high-pass filter cutoff,\n",
70 | "\n",
71 | "- the repetition time per scan\n",
72 | "\n",
73 | "- functional data files corresponding to each run.\n",
74 | "\n",
75 | "Optionally you can specify realignment parameters, outlier indices. Outlier files should contain a list of numbers, one per row indicating which scans should not be included in the analysis. The numbers are 0-based"
76 | ]
77 | },
78 | {
79 | "cell_type": "markdown",
80 | "metadata": {},
81 | "source": [
82 | "## Example\n",
83 | "\n",
84 | "An example Bunch definition:"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": null,
90 | "metadata": {},
91 | "outputs": [],
92 | "source": [
93 | "from nipype.interfaces.base import Bunch\n",
94 | "condnames = ['Tapping', 'Speaking', 'Yawning']\n",
95 | "event_onsets = [[0, 10, 50],\n",
96 | " [20, 60, 80],\n",
97 | " [30, 40, 70]]\n",
98 | "durations = [[0],[0],[0]]\n",
99 | "\n",
100 | "subject_info = Bunch(conditions=condnames,\n",
101 | " onsets = event_onsets,\n",
102 | " durations = durations)"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": null,
108 | "metadata": {},
109 | "outputs": [],
110 | "source": [
111 | "subject_info"
112 | ]
113 | },
114 | {
115 | "cell_type": "markdown",
116 | "metadata": {},
117 | "source": [
118 | "## Input via textfile\n",
119 | "\n",
120 | "Alternatively, you can provide condition, onset, duration and amplitude\n",
121 | "information through event files. The event files have to be in 1, 2 or 3\n",
122 | "column format with the columns corresponding to Onsets, Durations and\n",
123 | "Amplitudes and they have to have the name event_name.run\n",
124 | "e.g.: `Words.run001.txt`.\n",
125 | " \n",
126 | "The event_name part will be used to create the condition names. `Words.run001.txt` may look like:\n",
127 | "\n",
128 | " # Word Onsets Durations\n",
129 | " 0 10\n",
130 | " 20 10\n",
131 | " ...\n",
132 | "\n",
133 | "or with amplitudes:\n",
134 | "\n",
135 | " # Word Onsets Durations Amplitudes\n",
136 | " 0 10 1\n",
137 | " 20 10 1\n",
138 | " ..."
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {},
144 | "source": [
145 | "## Example based on dataset\n",
146 | "\n",
147 | "Now let's look at a TSV file from our tutorial dataset."
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": null,
153 | "metadata": {},
154 | "outputs": [],
155 | "source": [
156 | "!cat /data/ds000114/task-fingerfootlips_events.tsv"
157 | ]
158 | },
159 | {
160 | "cell_type": "markdown",
161 | "metadata": {},
162 | "source": [
163 | "We can also use [pandas](http://pandas.pydata.org/) to create a data frame from our dataset."
164 | ]
165 | },
166 | {
167 | "cell_type": "code",
168 | "execution_count": null,
169 | "metadata": {},
170 | "outputs": [],
171 | "source": [
172 | "import pandas as pd\n",
173 | "trialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')\n",
174 | "trialinfo.head()"
175 | ]
176 | },
177 | {
178 | "cell_type": "markdown",
179 | "metadata": {},
180 | "source": [
181 | "Before we can use the onsets, we first need to split them into the three conditions:"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {},
188 | "outputs": [],
189 | "source": [
190 | "for group in trialinfo.groupby('trial_type'):\n",
191 | " print(group)"
192 | ]
193 | },
194 | {
195 | "cell_type": "markdown",
196 | "metadata": {},
197 | "source": [
198 | "The last thing we now need to to is to put this into a ``Bunch`` object and we're done:"
199 | ]
200 | },
201 | {
202 | "cell_type": "code",
203 | "execution_count": null,
204 | "metadata": {},
205 | "outputs": [],
206 | "source": [
207 | "from nipype.interfaces.base import Bunch\n",
208 | "\n",
209 | "conditions = []\n",
210 | "onsets = []\n",
211 | "durations = []\n",
212 | "\n",
213 | "for group in trialinfo.groupby('trial_type'):\n",
214 | " conditions.append(group[0])\n",
215 | " onsets.append(group[1].onset.tolist())\n",
216 | " durations.append(group[1].duration.tolist())\n",
217 | "\n",
218 | "subject_info = Bunch(conditions=conditions,\n",
219 | " onsets=onsets,\n",
220 | " durations=durations)\n",
221 | "subject_info.items()"
222 | ]
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "metadata": {},
227 | "source": [
228 | "# Sparse model specification\n",
229 | "\n",
230 | "In addition to standard models, `SpecifySparseModel` allows model generation for sparse and sparse-clustered acquisition experiments. Details of the model generation and utility are provided in [Ghosh et al. (2009) OHBM 2009](https://www.researchgate.net/publication/242810827_Incorporating_hemodynamic_response_functions_to_improve_analysis_models_for_sparse-acquisition_experiments)"
231 | ]
232 | }
233 | ],
234 | "metadata": {
235 | "kernelspec": {
236 | "display_name": "Python [default]",
237 | "language": "python",
238 | "name": "python3"
239 | },
240 | "language_info": {
241 | "codemirror_mode": {
242 | "name": "ipython",
243 | "version": 3
244 | },
245 | "file_extension": ".py",
246 | "mimetype": "text/x-python",
247 | "name": "python",
248 | "nbconvert_exporter": "python",
249 | "pygments_lexer": "ipython3",
250 | "version": "3.6.5"
251 | }
252 | },
253 | "nbformat": 4,
254 | "nbformat_minor": 2
255 | }
256 |
--------------------------------------------------------------------------------
/notebooks/basic_nodes.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Nodes\n",
8 | "\n",
9 | "From the [Interface](basic_interfaces.ipynb) tutorial, you learned that interfaces are the core pieces of Nipype that run the code of your desire. But to streamline your analysis and to execute multiple interfaces in a sensible order, you have to put them in something that we call a ``Node``.\n",
10 | "\n",
11 | "In Nipype, a node is an object that executes a certain function. This function can be anything from a Nipype interface to a user-specified function or an external script. Each node consists of a name, an interface category and at least one input field, and at least one output field.\n",
12 | "\n",
13 | "Following is a simple node from the `utility` interface, with the name `name_of_node`, the input field `IN` and the output field `OUT`:\n",
14 | "\n",
15 | "\n",
16 | "\n",
17 | "Once you connect multiple nodes to each other, you create a directed graph. In Nipype we call such graphs either workflows or pipelines. Directed connections can only be established from an output field (below `node1_out`) of a node to an input field (below `node2_in`) of another node.\n",
18 | "\n",
19 | "\n",
20 | "\n",
21 | "This is all there is to Nipype. Connecting specific nodes with certain functions to other specific nodes with other functions. So let us now take a closer look at the different kind of nodes that exist and see when they should be used."
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {},
27 | "source": [
28 | "## Example of a simple node\n",
29 | "\n",
30 | "First, let us take a look at a simple stand-alone node. In general, a node consists of the following elements:\n",
31 | "\n",
32 | " nodename = Nodetype(interface_function(), name='labelname')\n",
33 | "\n",
34 | "- **nodename**: Variable name of the node in the python environment.\n",
35 | "- **Nodetype**: Type of node to be created. This can be a `Node`, `MapNode` or `JoinNode`.\n",
36 | "- **interface_function**: Function the node should execute. Can be user specific or coming from an `Interface`.\n",
37 | "- **labelname**: Label name of the node in the workflow environment (defines the name of the working directory)"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "Let us take a look at an example: For this, we need the `Node` module from Nipype, as well as the `Function` module. The second only serves a support function for this example. It isn't a prerequisite for a `Node`."
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "# Import Node and Function module\n",
54 | "from nipype import Node, Function\n",
55 | "\n",
56 | "# Create a small example function\n",
57 | "def add_two(x_input):\n",
58 | " return x_input + 2\n",
59 | "\n",
60 | "# Create Node\n",
61 | "addtwo = Node(Function(input_names=[\"x_input\"],\n",
62 | " output_names=[\"val_output\"],\n",
63 | " function=add_two),\n",
64 | " name='add_node')"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "As specified before, `addtwo` is the **nodename**, `Node` is the **Nodetype**, `Function(...)` is the **interface_function** and `add_node` is the **labelname** of the this node. In this particular case, we created an artificial input field, called `x_input`, an artificial output field called `val_output` and specified that this node should run the function `add_two()`.\n",
72 | "\n",
73 | "But before we can run this node, we need to declare the value of the input field `x_input`:"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "addtwo.inputs.x_input = 4"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "metadata": {},
88 | "source": [
89 | "After all input fields are specified, we can run the node with `run()`:"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": null,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "addtwo.run()"
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {},
105 | "outputs": [],
106 | "source": [
107 | "temp_res = addtwo.run()"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": null,
113 | "metadata": {},
114 | "outputs": [],
115 | "source": [
116 | "temp_res.outputs"
117 | ]
118 | },
119 | {
120 | "cell_type": "markdown",
121 | "metadata": {},
122 | "source": [
123 | "And what is the output of this node?"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": null,
129 | "metadata": {},
130 | "outputs": [],
131 | "source": [
132 | "addtwo.result.outputs"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "metadata": {},
138 | "source": [
139 | "## Example of a neuroimaging node\n",
140 | "\n",
141 | "Let's get back to the BET example from the [Interface](basic_interfaces.ipynb) tutorial. The only thing that differs from this example, is that we will put the ``BET()`` constructor inside a ``Node`` and give it a name."
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": null,
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "# Import BET from the FSL interface\n",
151 | "from nipype.interfaces.fsl import BET\n",
152 | "\n",
153 | "# Import the Node module\n",
154 | "from nipype import Node\n",
155 | "\n",
156 | "# Create Node\n",
157 | "bet = Node(BET(frac=0.3), name='bet_node')"
158 | ]
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "metadata": {},
163 | "source": [
164 | "In the [Interface](basic_interfaces.ipynb) tutorial, we were able to specify the input file with the ``in_file`` parameter. This works exactly the same way in this case, where the interface is in a node. The only thing that we have to be careful about when we use a node is to specify where this node should be executed. This is only relevant for when we execute a node by itself, but not when we use them in a [Workflow](basic_workflow.ipynb)."
165 | ]
166 | },
167 | {
168 | "cell_type": "code",
169 | "execution_count": null,
170 | "metadata": {},
171 | "outputs": [],
172 | "source": [
173 | "in_file = '/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'\n",
174 | "\n",
175 | "# Specify node inputs\n",
176 | "bet.inputs.in_file = in_file\n",
177 | "bet.inputs.out_file = '/output/node_T1w_bet.nii.gz'"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": null,
183 | "metadata": {},
184 | "outputs": [],
185 | "source": [
186 | "res = bet.run()"
187 | ]
188 | },
189 | {
190 | "cell_type": "markdown",
191 | "metadata": {},
192 | "source": [
193 | "As we know from the [Interface](basic_interfaces.ipynb) tutorial, the skull stripped output is stored under ``res.outputs.out_file``. So let's take a look at the before and the after:"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": null,
199 | "metadata": {},
200 | "outputs": [],
201 | "source": [
202 | "from nilearn.plotting import plot_anat\n",
203 | "%matplotlib inline\n",
204 | "import matplotlib.pyplot as plt\n",
205 | "plot_anat(in_file, title='BET input', cut_coords=(10,10,10),\n",
206 | " display_mode='ortho', dim=-1, draw_cross=False, annotate=False);\n",
207 | "plot_anat(res.outputs.out_file, title='BET output', cut_coords=(10,10,10),\n",
208 | " display_mode='ortho', dim=-1, draw_cross=False, annotate=False);"
209 | ]
210 | },
211 | {
212 | "cell_type": "markdown",
213 | "metadata": {},
214 | "source": [
215 | "### Exercise 1\n",
216 | "Define a `Node` for `IsotropicSmooth` (from `fsl`). Run the node for T1 image for one of the subjects."
217 | ]
218 | },
219 | {
220 | "cell_type": "code",
221 | "execution_count": null,
222 | "metadata": {
223 | "solution2": "hidden",
224 | "solution2_first": true
225 | },
226 | "outputs": [],
227 | "source": [
228 | "# write your solution here"
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "execution_count": null,
234 | "metadata": {
235 | "solution2": "hidden"
236 | },
237 | "outputs": [],
238 | "source": [
239 | "# Import the Node module\n",
240 | "from nipype import Node\n",
241 | "# Import IsotropicSmooth from the FSL interface\n",
242 | "from nipype.interfaces.fsl import IsotropicSmooth\n",
243 | "\n",
244 | "# Define a node\n",
245 | "smooth_node = Node(IsotropicSmooth(), name=\"smoothing\")\n",
246 | "smooth_node.inputs.in_file = '/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'\n",
247 | "smooth_node.inputs.fwhm = 4\n",
248 | "smooth_node.inputs.out_file = '/output/node_T1w_smooth.nii.gz'\n",
249 | "smooth_res = smooth_node.run()"
250 | ]
251 | },
252 | {
253 | "cell_type": "markdown",
254 | "metadata": {},
255 | "source": [
256 | "### Exercise 2\n",
257 | "Plot the original image and the image after smoothing."
258 | ]
259 | },
260 | {
261 | "cell_type": "code",
262 | "execution_count": null,
263 | "metadata": {
264 | "solution2": "hidden",
265 | "solution2_first": true
266 | },
267 | "outputs": [],
268 | "source": [
269 | "# write your solution here"
270 | ]
271 | },
272 | {
273 | "cell_type": "code",
274 | "execution_count": null,
275 | "metadata": {
276 | "solution2": "hidden"
277 | },
278 | "outputs": [],
279 | "source": [
280 | "from nilearn.plotting import plot_anat\n",
281 | "%pylab inline\n",
282 | "plot_anat(smooth_node.inputs.in_file, title='smooth input', cut_coords=(10,10,10),\n",
283 | " display_mode='ortho', dim=-1, draw_cross=False, annotate=False);\n",
284 | "plot_anat(smooth_res.outputs.out_file, title='smooth output', cut_coords=(10,10,10),\n",
285 | " display_mode='ortho', dim=-1, draw_cross=False, annotate=False);"
286 | ]
287 | }
288 | ],
289 | "metadata": {
290 | "anaconda-cloud": {},
291 | "kernelspec": {
292 | "display_name": "Python [default]",
293 | "language": "python",
294 | "name": "python3"
295 | },
296 | "language_info": {
297 | "codemirror_mode": {
298 | "name": "ipython",
299 | "version": 3
300 | },
301 | "file_extension": ".py",
302 | "mimetype": "text/x-python",
303 | "name": "python",
304 | "nbconvert_exporter": "python",
305 | "pygments_lexer": "ipython3",
306 | "version": "3.6.5"
307 | }
308 | },
309 | "nbformat": 4,
310 | "nbformat_minor": 2
311 | }
312 |
--------------------------------------------------------------------------------
/notebooks/introduction_dataset.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "\n",
8 | "BRAIN IMAGING
\n",
9 | "DATA STRUCTURE
"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "The dataset for this tutorial is structured according to the [Brain Imaging Data Structure (BIDS)](http://bids.neuroimaging.io/). BIDS is a simple and intuitive way to organize and describe your neuroimaging and behavioral data. Neuroimaging experiments result in complicated data that can be arranged in many different ways. So far there is no consensus on how to organize and share data obtained in neuroimaging experiments. BIDS tackles this problem by suggesting a new standard for the arrangement of neuroimaging datasets."
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "The idea of BIDS is that the file and folder names follow a strict set of rules:\n",
24 | "\n",
25 | "\n"
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "metadata": {},
31 | "source": [
32 | "Using the same structure for all of your studies will allow you to easily reuse all of your scripts between studies. But additionally, it also has the advantage that sharing code with and using scripts from other researchers will be much easier."
33 | ]
34 | },
35 | {
36 | "cell_type": "markdown",
37 | "metadata": {},
38 | "source": [
39 | "# Tutorial Dataset\n",
40 | "\n",
41 | "For this tutorial, we will be using a subset of the [fMRI dataset (ds000114)](https://openfmri.org/dataset/ds000114/) publicly available on [openfmri.org](https://openfmri.org). **If you're using the suggested Docker image you probably have all data needed to run the tutorial within the Docker container.**\n",
42 | "If you want to have data locally you can use [Datalad](http://datalad.org/) to download a subset of the dataset, via the [datalad repository](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds000114). In order to install dataset with all subrepositories you can run:"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": null,
48 | "metadata": {},
49 | "outputs": [],
50 | "source": [
51 | "%%bash\n",
52 | "cd /data\n",
53 | "datalad install -r ///workshops/nih-2017/ds000114"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {},
59 | "source": [
60 | "In order to download data, you can use ``datalad get foldername`` command, to download all files in the folder ``foldername``. For this tutorial we only want to download part of the dataset, i.e. the anatomical and the functional `fingerfootlips` images:"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "%%bash\n",
70 | "cd /data/ds000114\n",
71 | "datalad get -J 4 derivatives/fmriprep/sub-*/anat/*preproc.nii.gz \\\n",
72 | " sub-01/ses-test/anat \\\n",
73 | " sub-*/ses-test/func/*fingerfootlips*"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {},
79 | "source": [
80 | "So let's have a look at the tutorial dataset."
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": null,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "!tree -L 4 /data/ds000114/"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {},
95 | "source": [
96 | "As you can, for every subject we have one anatomical T1w image, five functional images, and one diffusion weighted image.\n",
97 | "\n",
98 | "**Note**: If you used `datalad` or `git annex` to get the dataset, you can see symlinks for the image files."
99 | ]
100 | },
101 | {
102 | "cell_type": "markdown",
103 | "metadata": {},
104 | "source": [
105 | "# Behavioral Task\n",
106 | "\n",
107 | "Subject from the ds000114 dataset did five behavioral tasks. In our dataset two of them are included. \n",
108 | "\n",
109 | "The **motor task** consisted of ***finger tapping***, ***foot twitching*** and ***lip pouching*** interleaved with fixation at a cross.\n",
110 | "\n",
111 | "The **landmark task** was designed to mimic the ***line bisection task*** used in neurological practice to diagnose spatial hemineglect. Two conditions were contrasted, specifically judging if a horizontal line had been bisected exactly in the middle, versus judging if a horizontal line was bisected at all. More about the dataset and studies you can find [here](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3641991/).\n",
112 | "\n",
113 | "To each of the functional images above, we therefore also have a tab-separated values file (``tva``), containing information such as stimuli onset, duration, type, etc. So let's have a look at one of them:"
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": null,
119 | "metadata": {},
120 | "outputs": [],
121 | "source": [
122 | "%%bash\n",
123 | "cd /data/ds000114\n",
124 | "datalad get sub-01/ses-test/func/sub-01_ses-test_task-linebisection_events.tsv"
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": null,
130 | "metadata": {},
131 | "outputs": [],
132 | "source": [
133 | "!cat /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-linebisection_events.tsv"
134 | ]
135 | }
136 | ],
137 | "metadata": {
138 | "anaconda-cloud": {},
139 | "kernelspec": {
140 | "display_name": "Python [default]",
141 | "language": "python",
142 | "name": "python3"
143 | },
144 | "language_info": {
145 | "codemirror_mode": {
146 | "name": "ipython",
147 | "version": 3
148 | },
149 | "file_extension": ".py",
150 | "mimetype": "text/x-python",
151 | "name": "python",
152 | "nbconvert_exporter": "python",
153 | "pygments_lexer": "ipython3",
154 | "version": "3.6.5"
155 | }
156 | },
157 | "nbformat": 4,
158 | "nbformat_minor": 2
159 | }
160 |
--------------------------------------------------------------------------------
/notebooks/introduction_docker.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
\n",
8 | "\n",
9 | "# Docker\n",
10 | "\n",
11 | "[Docker](https://www.docker.com) is an open-source project that automates the deployment of applications inside software containers. Those containers wrap up a piece of software in a complete filesystem that contains everything it needs to run: code, system tools, software libraries, such as Python, FSL, AFNI, SPM, FreeSurfer, ANTs, etc. This guarantees that it will always run the same, regardless of the environment it is running in.\n",
12 | "\n",
13 | "Important: **You don't need Docker to run Nipype on your system**. For Mac and Linux users, it probably is much simpler to install Nipype directly on your system. For more information on how to do this see the [Nipype website](resources_installation.ipynb). But for Windows users, or users that don't want to set up all the dependencies themselves, Docker is the way to go."
14 | ]
15 | },
16 | {
17 | "cell_type": "markdown",
18 | "metadata": {},
19 | "source": [
20 | "# Docker Image for the interactive Nipype Tutorial\n",
21 | "\n",
22 | "If you want to run this Nipype Tutorial with the example dataset locally on your own system, you need to use the docker image, provided under [miykael/nipype_tutorial](https://hub.docker.com/r/miykael/nipype_tutorial/). This docker image sets up a Linux environment on your system, with functioning Python, Nipype, FSL, ANTs and SPM12 software package, some example data, and all the tutorial notebooks to learn Nipype. Alternatively, you can also build your own docker image from Dockerfile or create a different Dockerfile using [Neurodocker](https://github.com/kaczmarj/neurodocker)."
23 | ]
24 | },
25 | {
26 | "cell_type": "markdown",
27 | "metadata": {},
28 | "source": [
29 | "# Install Docker\n",
30 | "\n",
31 | "Before you can do anything, you first need to install [Docker](https://www.docker.com) on your system. The installation process differs per system. Luckily, the docker homepage has nice instructions for...\n",
32 | "\n",
33 | " - [Ubuntu](https://docs.docker.com/engine/installation/linux/ubuntu/) or [Debian](https://docs.docker.com/engine/installation/linux/docker-ce/debian/)\n",
34 | " - [Windows 7/8/9/10](https://docs.docker.com/toolbox/toolbox_install_windows/) or [Windows 10Pro](https://docs.docker.com/docker-for-windows/install/)\n",
35 | " - [OS X (from El Capitan 10.11 on)](https://docs.docker.com/docker-for-mac/install/) or [OS X (before El Capitan 10.11)](https://docs.docker.com/toolbox/toolbox_install_mac/).\n",
36 | "\n",
37 | "Once Docker is installed, open up the docker terminal and test it works with the command:\n",
38 | "\n",
39 | " docker run hello-world\n",
40 | "\n",
41 | "**Note:** Linux users might need to use ``sudo`` to run ``docker`` commands or follow [post-installation steps](https://docs.docker.com/engine/installation/linux/linux-postinstall/)."
42 | ]
43 | },
44 | {
45 | "cell_type": "markdown",
46 | "metadata": {},
47 | "source": [
48 | "# Pulling the Docker image\n",
49 | "\n",
50 | "You can download various Docker images, but for this tutorial, we will suggest ``miykael/nipype_tutorial``:\n",
51 | "\n",
52 | " docker pull miykael/nipype_tutorial:latest\n",
53 | " \n",
54 | "Once it's done you can check available images on your system:\n",
55 | "\n",
56 | " docker images"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "# How to run the Docker image\n",
64 | "\n",
65 | "After installing docker on your system and making sure that the ``hello-world`` example was running, we are good to go to start the Nipype Tutorial image. The exact implementation is a bit different for Windows user, but the general commands look similar.\n",
66 | "\n",
67 | "The suggested Docker image, miykael/nipype_tutorial, already contains all tutorial notebooks and data used in the tutorial, so the simplest way to run container is:\n",
68 | "\n",
69 | " docker run -it --rm -p 8888:8888 miykael/nipype_tutorial jupyter notebook\n",
70 | " \n",
71 | "However, if you want to use your version of notebooks, save notebook outputs locally or use you local data, you can also mount your local directories, e.g.: \n",
72 | "\n",
73 | " docker run -it --rm -v /path/to/nipype_tutorial/:/home/neuro/nipype_tutorial -v /path/to/data/:/data -v /path/to/output/:/output -p 8888:8888 miykael/nipype_tutorial jupyter notebook\n",
74 | "\n",
75 | "But what do those flags mean?\n",
76 | "\n",
77 | "- The ``-it`` flag tells docker that it should open an interactive container instance.\n",
78 | "- The ``--rm`` flag tells docker that the container should automatically be removed after we close docker.\n",
79 | "- The ``-p`` flag specifies which port we want to make available for docker.\n",
80 | "- The ``-v`` flag tells docker which folders should be mount to make them accessible inside the container. Here: ``/path/to/nipype_tutorial`` is your local directory where you downloaded [Nipype Tutorial repository](https://github.com/miykael/nipype_tutorial/). ``/path/to/data/`` is a directory where you have dataset [``ds000114``](https://openfmri.org/dataset/ds000114/), and ``/path/to/output`` can be an empty directory that will be used for output. The second part of the ``-v`` flag (here: ``/home/neuro/nipype_tutorial``, ``/data`` or ``/output``) specifies under which path the mounted folders can be found inside the container. **Important**: To use the ``tutorial``, ``data`` and ``output`` folder, you first need to create them on your system!\n",
81 | "- ``miykael/nipype_tutorial`` tells docker which image you want to run.\n",
82 | "- ``jupyter notebook`` tells that you want to run directly the jupyter notebook command within the container. Alternatively, you can also use ``jupyter-lab``, ``bash`` or ``ipython``.\n",
83 | "\n",
84 | "**Note** that when you run this docker image without any more specification than it will prompt you a URL link in your terminal that you will need to copy paste into your browser to get to the notebooks. "
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "metadata": {},
90 | "source": [
91 | "## Run a docker image on Linux or Mac\n",
92 | "\n",
93 | "Running a docker image on a Linux or Mac OS is very simple. Make sure that the folders ``tutorial``, ``data``, and ``output`` exist. Then just open a new terminal and use the command from above. Once the docker image is downloaded, open the shown URL link in your browser and you are good to go. The URL will look something like:\n",
94 | "\n",
95 | " http://localhost:8888/?token=0312c1ef3b61d7a44ff5346d3d150c23249a548850e13868"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {},
101 | "source": [
102 | "## Run a docker image on Windows\n",
103 | "\n",
104 | "Running a docker image on Windows is a bit trickier than on Ubuntu. Assuming you've installed the DockerToolbox, open the Docker Quickstart Terminal. Once the docker terminal is ready (when you see the whale), execute the following steps (see also figure):\n",
105 | "\n",
106 | "1. We need to check the IP address of your docker machine. For this, use the command: \n",
107 | "\n",
108 | " ``docker-machine ip``\n",
109 | "\n",
110 | " In my case, this returned ``192.168.99.100``\n",
111 | "\n",
112 | "2. If you haven't already created a new folder to store your container output into, do so. You can create the folder either in the explorer as usual or do it with the command ``mkdir -p`` in the docker console. For example like this:\n",
113 | "\n",
114 | " ``mkdir -p /c/Users/username/output``\n",
115 | "\n",
116 | " Please replace ``username`` with the name of the current user on your system. **Pay attention** that the folder paths in the docker terminal are not a backslash (``\\``) as we usually have in Windows. Also, ``C:\\`` needs to be specified as ``/c/``.\n",
117 | "\n",
118 | "3. Now, we can open run the container with the command from above:\n",
119 | "\n",
120 | " `` docker run -it --rm -v /c/Users/username/path/to/nipype_tutorial/:/home/neuro/nipype_tutorial -v /c/Users/username/path/to/data/:/data -v /c/Users/username/path/to/output/:/output -p 8888:8888 miykael/nipype_tutorial``\n",
121 | "\n",
122 | "4. Once the docker image is downloaded, it will show you an URL that looks something like this:\n",
123 | "\n",
124 | " ``http://localhost:8888/?token=0312c1ef3b61d7a44ff5346d3d150c23249a548850e13868``\n",
125 | " \n",
126 | " This URL will not work on a Windows system. To make it work, you need to replace the string ``localhost`` with the IP address of your docker machine, that we acquired under step 1. Afterward, your URL should look something like this:\n",
127 | "\n",
128 | " ``http://192.168.99.100:8888/?token=0312c1ef3b61d7a44ff5346d3d150c23249a548850e13868``\n",
129 | "\n",
130 | " Copy this link into your webbrowser and you're good to go!"
131 | ]
132 | },
133 | {
134 | "cell_type": "markdown",
135 | "metadata": {},
136 | "source": [
137 | "# Docker tips and tricks\n",
138 | "\n",
139 | "\n",
140 | "## Access Docker Container with ``bash`` or ``ipython``\n",
141 | "\n",
142 | "You don't have to open a jupyter notebook when you run ``miykael/nipype_tutorial``. You can also access the docker container directly with ``bash`` or ``ipython`` by adding it to the end of your command, i.e.:\n",
143 | "\n",
144 | " docker run -it --rm -v /path/to/nipype_tutorial/:/home/neuro/nipype_tutorial -v /path/to/data/:/data -v /path/to/output/:/output -p 8888:8888 miykael/nipype_tutorial bash\n",
145 | "\n",
146 | "This also works with other software commands, such as ``bet`` etc."
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "## Stop Docker Container\n",
154 | "\n",
155 | "To stop a running docker container, either close the docker terminal or select the terminal and use the ``Ctrl-C`` shortcut multiple times."
156 | ]
157 | },
158 | {
159 | "cell_type": "markdown",
160 | "metadata": {},
161 | "source": [
162 | "## List all installed docker images\n",
163 | "\n",
164 | "To see a list of all installed docker images use:\n",
165 | "\n",
166 | " docker images"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {},
172 | "source": [
173 | "## Delete a specific docker image\n",
174 | "\n",
175 | "To delete a specific docker image, first use the ``docker images`` command to list all installed containers and then use the ``IMAGE ID`` and the ``rmi`` instruction to delete the container:\n",
176 | "\n",
177 | " docker rmi -f 7d9495d03763"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "## Export and Import a docker image\n",
185 | "\n",
186 | "If you don't want to depend on an internet connection, you can also export an already downloaded docker image and then later on import it on another PC. To do so, use the following two commands:\n",
187 | "\n",
188 | "\n",
189 | " # Export docker image miykael/nipype_tutorial\n",
190 | " docker save -o nipype_tutorial.tar miykael/nipype_tutorial\n",
191 | "\n",
192 | " # Import docker image on another PC\n",
193 | " docker load --input nipype_tutorial.tar\n",
194 | " \n",
195 | "It might be possible that you run into administrator privileges issues because you ran your docker command with ``sudo``. This means that other users don't have access rights to ``nipype_tutorial.tar``. To avoid this, just change the rights of ``nipype_tutorial.tar`` with the command:\n",
196 | "\n",
197 | " sudo chmod 777 nipype_tutorial.tar"
198 | ]
199 | }
200 | ],
201 | "metadata": {
202 | "anaconda-cloud": {},
203 | "kernelspec": {
204 | "display_name": "Python [default]",
205 | "language": "python",
206 | "name": "python3"
207 | },
208 | "language_info": {
209 | "codemirror_mode": {
210 | "name": "ipython",
211 | "version": 3
212 | },
213 | "file_extension": ".py",
214 | "mimetype": "text/x-python",
215 | "name": "python",
216 | "nbconvert_exporter": "python",
217 | "pygments_lexer": "ipython3",
218 | "version": "3.6.5"
219 | }
220 | },
221 | "nbformat": 4,
222 | "nbformat_minor": 1
223 | }
224 |
--------------------------------------------------------------------------------
/notebooks/introduction_jupyter-notebook.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "\n",
8 | "\n",
9 | "# Jupyter Notebook\n",
10 | "\n",
11 | "This notebook was adapted from https://github.com/oesteban/biss2016 and is originally based on https://github.com/jvns/pandas-cookbook.\n",
12 | "\n",
13 | "[Jupyter Notebook](http://jupyter.org/) started as a web application, based on [IPython](https://ipython.org/) that can run Python code directly in the webbrowser. Now, Jupyter Notebook can handle over 40 programming languages and is *the* interactive, open source web application to run any scientific code.\n",
14 | "\n",
15 | "You might also want to try a new Jupyter environment [JupyterLab](https://github.com/jupyterlab/jupyterlab). "
16 | ]
17 | },
18 | {
19 | "cell_type": "markdown",
20 | "metadata": {},
21 | "source": [
22 | "## How to run a cell\n",
23 | "\n",
24 | "First, we need to explain how to run cells. Try to run the cell below!"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": null,
30 | "metadata": {
31 | "collapsed": true
32 | },
33 | "outputs": [],
34 | "source": [
35 | "import pandas as pd\n",
36 | "\n",
37 | "print(\"Hi! This is a cell. Click on it and press the ▶ button above to run it\")"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "You can also run a cell with `Ctrl+Enter` or `Shift+Enter`. Experiment a bit with that."
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "## Tab Completion"
52 | ]
53 | },
54 | {
55 | "cell_type": "markdown",
56 | "metadata": {},
57 | "source": [
58 | "One of the most useful things about Jupyter Notebook is its tab completion. \n",
59 | "\n",
60 | "Try this: click just after `read_csv(` in the cell below and press `Shift+Tab` 4 times, slowly. Note that if you're using JupyterLab you don't have an additional help box option."
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {
67 | "collapsed": true
68 | },
69 | "outputs": [],
70 | "source": [
71 | "# NBVAL_SKIP\n",
72 | "# Use TAB completion for function info\n",
73 | "pd.read_csv("
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {},
79 | "source": [
80 | "After the first time, you should see this:\n",
81 | "\n",
82 | "\n",
83 | "\n",
84 | "After the second time:\n",
85 | "\n",
86 | "\n",
87 | "After the fourth time, a big help box should pop up at the bottom of the screen, with the full documentation for the `read_csv` function:\n",
88 | "\n",
89 | "\n",
90 | "I find this amazingly useful. I think of this as \"the more confused I am, the more times I should press `Shift+Tab`\".\n",
91 | "\n",
92 | "Okay, let's try tab completion for function names!"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": null,
98 | "metadata": {
99 | "collapsed": true
100 | },
101 | "outputs": [],
102 | "source": [
103 | "# NBVAL_SKIP\n",
104 | "# Use TAB completion to see possible function names\n",
105 | "pd.r"
106 | ]
107 | },
108 | {
109 | "cell_type": "markdown",
110 | "metadata": {},
111 | "source": [
112 | "You should see this:\n",
113 | "\n",
114 | ""
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "metadata": {},
120 | "source": [
121 | "## Get Help\n",
122 | "\n",
123 | "There's an additional way on how you can reach the help box shown above after the fourth `Shift+Tab` press. Instead, you can also use `obj?` or `obj??` to get help or more help for an object."
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": null,
129 | "metadata": {
130 | "collapsed": true
131 | },
132 | "outputs": [],
133 | "source": [
134 | "pd.read_csv?"
135 | ]
136 | },
137 | {
138 | "cell_type": "markdown",
139 | "metadata": {},
140 | "source": [
141 | "## Writing code\n",
142 | "\n",
143 | "Writing code in the notebook is pretty normal."
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": null,
149 | "metadata": {
150 | "collapsed": true
151 | },
152 | "outputs": [],
153 | "source": [
154 | "def print_10_nums():\n",
155 | " for i in range(10):\n",
156 | " print(i)"
157 | ]
158 | },
159 | {
160 | "cell_type": "code",
161 | "execution_count": null,
162 | "metadata": {
163 | "collapsed": true
164 | },
165 | "outputs": [],
166 | "source": [
167 | "print_10_nums()"
168 | ]
169 | },
170 | {
171 | "cell_type": "markdown",
172 | "metadata": {},
173 | "source": [
174 | "If you messed something up and want to revert to an older version of a code in a cell, use `Ctrl+Z` or to go than back `Ctrl+Y`.\n",
175 | "\n",
176 | "For a full list of all keyboard shortcuts, click on the small keyboard icon in the notebook header or click on `Help > Keyboard Shortcuts`."
177 | ]
178 | },
179 | {
180 | "cell_type": "markdown",
181 | "metadata": {},
182 | "source": [
183 | "## Saving a Notebook\n",
184 | "\n",
185 | "Jupyter Notebooks autosave, so you don't have to worry about losing code too much. At the top of the page you can usually see the current save status:\n",
186 | "\n",
187 | "- Last Checkpoint: 2 minutes ago (unsaved changes)\n",
188 | "- Last Checkpoint: a few seconds ago (autosaved)\n",
189 | "\n",
190 | "If you want to save a notebook on purpose, either click on `File > Save and Checkpoint` or press `Ctrl+S`."
191 | ]
192 | },
193 | {
194 | "cell_type": "markdown",
195 | "metadata": {},
196 | "source": [
197 | "## Magic functions"
198 | ]
199 | },
200 | {
201 | "cell_type": "markdown",
202 | "metadata": {},
203 | "source": [
204 | "IPython has all kinds of magic functions. Magic functions are prefixed by % or %%, and typically take their arguments without parentheses, quotes or even commas for convenience. Line magics take a single % and cell magics are prefixed with two %%.\n",
205 | "\n",
206 | "Some useful magic functions are:\n",
207 | "\n",
208 | "Magic Name | Effect\n",
209 | "---------- | -------------------------------------------------------------\n",
210 | "%env | Get, set, or list environment variables\n",
211 | "%pdb | Control the automatic calling of the pdb interactive debugger\n",
212 | "%pylab | Load numpy and matplotlib to work interactively\n",
213 | "%%debug | Activates debugging mode in cell\n",
214 | "%%html | Render the cell as a block of HTML\n",
215 | "%%latex | Render the cell as a block of latex\n",
216 | "%%sh | %%sh script magic\n",
217 | "%%time | Time execution of a Python statement or expression\n",
218 | "\n",
219 | "You can run `%magic` to get a list of magic functions or `%quickref` for a reference sheet."
220 | ]
221 | },
222 | {
223 | "cell_type": "markdown",
224 | "metadata": {},
225 | "source": [
226 | "### Example 1\n",
227 | "\n",
228 | "Let's see how long a specific command takes with `%time` or `%%time`:"
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "execution_count": null,
234 | "metadata": {
235 | "collapsed": true
236 | },
237 | "outputs": [],
238 | "source": [
239 | "%time result = sum([x for x in range(10**6)])"
240 | ]
241 | },
242 | {
243 | "cell_type": "markdown",
244 | "metadata": {},
245 | "source": [
246 | "### Example 2\n",
247 | "\n",
248 | "Let's use `%%latex` to render a block of latex"
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "execution_count": null,
254 | "metadata": {
255 | "collapsed": true
256 | },
257 | "outputs": [],
258 | "source": [
259 | "%%latex\n",
260 | "$$F(k) = \\int_{-\\infty}^{\\infty} f(x) e^{2\\pi i k} \\mathrm{d} x$$"
261 | ]
262 | }
263 | ],
264 | "metadata": {
265 | "anaconda-cloud": {},
266 | "kernelspec": {
267 | "display_name": "Python [default]",
268 | "language": "python",
269 | "name": "python3"
270 | },
271 | "language_info": {
272 | "codemirror_mode": {
273 | "name": "ipython",
274 | "version": 3
275 | },
276 | "file_extension": ".py",
277 | "mimetype": "text/x-python",
278 | "name": "python",
279 | "nbconvert_exporter": "python",
280 | "pygments_lexer": "ipython3",
281 | "version": "3.6.5"
282 | }
283 | },
284 | "nbformat": 4,
285 | "nbformat_minor": 1
286 | }
287 |
--------------------------------------------------------------------------------
/notebooks/introduction_neurodocker.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Neurodocker tutorial\n",
8 | "\n",
9 | "This page covers the steps to create containers with [Neurodocker](https://github.com/kaczmarj/neurodocker). [Neurodocker](https://github.com/kaczmarj/neurodocker) is a brilliant tool to create your own neuroimaging docker container. [Neurodocker](https://github.com/kaczmarj/neurodocker) is a command-line program that enables users to generate [Docker](http://www.docker.io/) containers and [Singularity](http://singularity.lbl.gov/) images that include neuroimaging software.\n",
10 | "\n",
11 | "Requirements:\n",
12 | "\n",
13 | "* [Docker](http://www.docker.io/) or [Singularity](http://singularity.lbl.gov/)\n",
14 | "* Internet connection"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "## Usage\n",
22 | "\n",
23 | "To view the Neurodocker help message\n",
24 | "\n",
25 | " docker run --rm kaczmarj/neurodocker:v0.4.0 generate [docker|singularity] --help\n",
26 | "\n",
27 | "**Note**: choose between ``docker`` and ``singularity`` in ``[docker|singularity]``.\n",
28 | "\n",
29 | "1. Users must specify a base Docker image and the package manager. Any Docker\n",
30 | " image on DockerHub can be used as your base image. Common base images\n",
31 | " include ``debian:stretch``, ``ubuntu:16.04``, ``centos:7``, and the various\n",
32 | " ``neurodebian`` images. If users would like to install software from the\n",
33 | " NeuroDebian repositories, it is recommended to use a ``neurodebian`` base\n",
34 | " image. The package manager is ``apt`` or ``yum``, depending on the base\n",
35 | " image.\n",
36 | "2. Next, users should configure the container to fit their needs. This includes\n",
37 | " installing neuroimaging software, installing packages from the chosen package\n",
38 | " manager, installing Python and Python packages, copying files from the local\n",
39 | " machine into the container, and other operations. The list of supported\n",
40 | " neuroimaging software packages is available in the ``neurodocker`` help\n",
41 | " message.\n",
42 | "3. The ``neurodocker`` command will generate a Dockerfile or Singularity recipe.\n",
43 | " The Dockerfile can be used with the ``docker build`` command to build a\n",
44 | " Docker image. The Singularity recipe can be used to build a Singularity\n",
45 | " container with the ``singularity build`` command."
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "metadata": {},
51 | "source": [
52 | "## Create a Dockerfile or Singularity recipe with FSL, Python 3.6, and Nipype\n",
53 | "\n",
54 | "This command prints a Dockerfile (the specification for a Docker image) or a\n",
55 | "Singularity recipe (the specification for a Singularity container) to the\n",
56 | "terminal.\n",
57 | "\n",
58 | " docker run --rm kaczmarj/neurodocker:0.4.0 generate [docker|singularity] \\\n",
59 | " --base debian:stretch --pkg-manager apt \\\n",
60 | " --fsl version=5.0.10 \\\n",
61 | " --miniconda create_env=neuro \\\n",
62 | " conda_install=\"python=3.6 traits\" \\\n",
63 | " pip_install=\"nipype\""
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "## Build the Docker image\n",
71 | "\n",
72 | "The Dockerfile can be saved and used to build the Docker image\n",
73 | "\n",
74 | " docker run --rm kaczmarj/neurodocker:v0.4.0 generate docker \\\n",
75 | " --base debian:stretch --pkg-manager apt \\\n",
76 | " --fsl version=5.0.10 \\\n",
77 | " --miniconda env_name=neuro \\\n",
78 | " conda_install=\"python=3.6 traits\" \\\n",
79 | " pip_install=\"nipype\" > Dockerfile\n",
80 | "\n",
81 | " docker build --tag my_image .\n",
82 | " # or\n",
83 | " docker build --tag my_image - < Dockerfile"
84 | ]
85 | },
86 | {
87 | "cell_type": "markdown",
88 | "metadata": {},
89 | "source": [
90 | "## Build the Singularity container\n",
91 | "\n",
92 | "The Singularity recipe can be saved and used to build the Singularity container\n",
93 | "\n",
94 | " docker run --rm kaczmarj/neurodocker:0.4.0 generate singularity \\\n",
95 | " --base debian:stretch --pkg-manager apt \\\n",
96 | " --fsl version=5.0.10 \\\n",
97 | " --miniconda create_env=neuro \\\n",
98 | " conda_install=\"python=3.6 traits\" \\\n",
99 | " pip_install=\"nipype\" > Singularity\n",
100 | "\n",
101 | " singularity build my_nipype.simg Singularity"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "## Use NeuroDebian\n",
109 | "\n",
110 | "This example installs AFNI and ANTs from the NeuroDebian repositories. It also\n",
111 | "installs ``git`` and ``vim``.\n",
112 | "\n",
113 | " docker run --rm kaczmarj/neurodocker:v0.4.0 generate [docker|singularity] \\\n",
114 | " --base neurodebian:stretch --pkg-manager apt \\\n",
115 | " --install afni ants git vim\n",
116 | "\n",
117 | "**Note**: the ``--install`` option will install software using the package manager.\n",
118 | "Because the NeuroDebian repositories are enabled in the chosen base image, AFNI\n",
119 | "and ANTs may be installed using the package manager. ``git`` and ``vim`` are\n",
120 | "available in the default repositories."
121 | ]
122 | },
123 | {
124 | "cell_type": "markdown",
125 | "metadata": {},
126 | "source": [
127 | "## Other examples\n",
128 | "\n",
129 | "Create a container with ``dcm2niix``, Nipype, and jupyter notebook. Install\n",
130 | "Miniconda as a non-root user, and activate the Miniconda environment upon\n",
131 | "running the container.\n",
132 | "\n",
133 | " docker run --rm kaczmarj/neurodocker:v0.4.0 generate docker \\\n",
134 | " --base centos:7 --pkg-manager yum \\\n",
135 | " --dcm2niix version=master method=source \\\n",
136 | " --user neuro \\\n",
137 | " --miniconda create_env=neuro conda_install=\"jupyter traits nipype\" \\\n",
138 | " > Dockerfile\n",
139 | " docker build --tag my_nipype - < Dockerfile\n",
140 | "\n",
141 | "Copy local files into a container.\n",
142 | "\n",
143 | " docker run --rm kaczmarj/neurodocker:v0.4.0 generate [docker|singularity] \\\n",
144 | " --base ubuntu:16.04 --pkg-manager apt \\\n",
145 | " --copy relative/path/to/source.txt /absolute/path/to/destination.txt\n",
146 | " \n",
147 | "See the [Neurodocker examples page](https://github.com/kaczmarj/neurodocker/tree/master/examples/) for more."
148 | ]
149 | }
150 | ],
151 | "metadata": {
152 | "kernelspec": {
153 | "display_name": "Python [default]",
154 | "language": "python",
155 | "name": "python3"
156 | },
157 | "language_info": {
158 | "codemirror_mode": {
159 | "name": "ipython",
160 | "version": 3
161 | },
162 | "file_extension": ".py",
163 | "mimetype": "text/x-python",
164 | "name": "python",
165 | "nbconvert_exporter": "python",
166 | "pygments_lexer": "ipython3",
167 | "version": "3.6.5"
168 | }
169 | },
170 | "nbformat": 4,
171 | "nbformat_minor": 2
172 | }
173 |
--------------------------------------------------------------------------------
/notebooks/introduction_showcase.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Nipype Showcase\n",
8 | "\n",
9 | "What's all the hype about Nipype? Is it really that good? Short answer: Yes!\n",
10 | "\n",
11 | "Long answer: ... well, let's consider a very simple fMRI preprocessing workflow that just performs:\n",
12 | "1. slice time correction\n",
13 | "2. motion correction\n",
14 | "3. smoothing"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "# Preparing the preprocessing workflow"
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {},
27 | "source": [
28 | "First, we need to import the main Nipype tools: `Node` and `Workflow`"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": null,
34 | "metadata": {},
35 | "outputs": [],
36 | "source": [
37 | "from nipype import Node, Workflow"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "Now, we can import the interfaces that we want to use for the preprocessing."
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "from nipype.interfaces.fsl import SliceTimer, MCFLIRT, Smooth"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {},
59 | "source": [
60 | "Next, we will put the three interfaces into a node and define the specific input parameters."
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "# Initiate a node to correct for slice wise acquisition\n",
70 | "slicetimer = Node(SliceTimer(index_dir=False,\n",
71 | " interleaved=True,\n",
72 | " time_repetition=2.5),\n",
73 | " name=\"slicetimer\")"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "# Initiate a node to correct for motion\n",
83 | "mcflirt = Node(MCFLIRT(mean_vol=True,\n",
84 | " save_plots=True),\n",
85 | " name=\"mcflirt\")"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "metadata": {},
92 | "outputs": [],
93 | "source": [
94 | "# Initiate a node to smooth functional images\n",
95 | "smooth = Node(Smooth(fwhm=4), name=\"smooth\")"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {},
101 | "source": [
102 | "After creating the nodes, we can now create the preprocessing workflow."
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": null,
108 | "metadata": {},
109 | "outputs": [],
110 | "source": [
111 | "preproc01 = Workflow(name='preproc01', base_dir='.')"
112 | ]
113 | },
114 | {
115 | "cell_type": "markdown",
116 | "metadata": {},
117 | "source": [
118 | "Now, we can put all the nodes into this preprocessing workflow. We specify the data flow / execution flow of the workflow by connecting the corresponding nodes to each other."
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": null,
124 | "metadata": {},
125 | "outputs": [],
126 | "source": [
127 | "preproc01.connect([(slicetimer, mcflirt, [('slice_time_corrected_file', 'in_file')]),\n",
128 | " (mcflirt, smooth, [('out_file', 'in_file')])])"
129 | ]
130 | },
131 | {
132 | "cell_type": "markdown",
133 | "metadata": {},
134 | "source": [
135 | "To better understand what we did we can write out the workflow graph and visualize it directly in this notebook."
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "execution_count": null,
141 | "metadata": {},
142 | "outputs": [],
143 | "source": [
144 | "preproc01.write_graph(graph2use='orig')"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": null,
150 | "metadata": {},
151 | "outputs": [],
152 | "source": [
153 | "# Visualize graph\n",
154 | "from IPython.display import Image\n",
155 | "Image(filename=\"preproc01/graph_detailed.png\")"
156 | ]
157 | },
158 | {
159 | "cell_type": "markdown",
160 | "metadata": {},
161 | "source": [
162 | "# Run the workflow on one functional image\n",
163 | "\n",
164 | "Now, that we've created a workflow, let's run it on a functional image.\n",
165 | "\n",
166 | "For this, we first need to specify the input file of the very first node, i.e. the `slicetimer` node."
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": null,
172 | "metadata": {},
173 | "outputs": [],
174 | "source": [
175 | "slicetimer.inputs.in_file = '/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz'"
176 | ]
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "metadata": {},
181 | "source": [
182 | "To show off Nipype's parallelization power, let's run the workflow in parallel, on 5 processors and let's show the execution time:"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "metadata": {},
189 | "outputs": [],
190 | "source": [
191 | "%time preproc01.run('MultiProc', plugin_args={'n_procs': 5})"
192 | ]
193 | },
194 | {
195 | "cell_type": "markdown",
196 | "metadata": {},
197 | "source": [
198 | "## Conclusion\n",
199 | "\n",
200 | "Nice, the whole execution took ~2min. But wait... The parallelization didn't really help.\n",
201 | "\n",
202 | "That's true, but because there was no possibility to run the workflow in parallel. Each node depends on the output of the previous node."
203 | ]
204 | },
205 | {
206 | "cell_type": "markdown",
207 | "metadata": {},
208 | "source": [
209 | "# Results of `preproc01`\n",
210 | "\n",
211 | "So, what did we get? Let's look at the output folder `preproc01`:"
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": null,
217 | "metadata": {},
218 | "outputs": [],
219 | "source": [
220 | "!tree preproc01 -I '*js|*json|*pklz|_report|*.dot|*html'"
221 | ]
222 | },
223 | {
224 | "cell_type": "markdown",
225 | "metadata": {},
226 | "source": [
227 | "# Rerunning of a workflow"
228 | ]
229 | },
230 | {
231 | "cell_type": "markdown",
232 | "metadata": {},
233 | "source": [
234 | "Now, for fun. Let's run the workflow again, but let's change the `fwhm` value of the Gaussian smoothing kernel to `2`."
235 | ]
236 | },
237 | {
238 | "cell_type": "code",
239 | "execution_count": null,
240 | "metadata": {},
241 | "outputs": [],
242 | "source": [
243 | "smooth.inputs.fwhm = 2"
244 | ]
245 | },
246 | {
247 | "cell_type": "markdown",
248 | "metadata": {},
249 | "source": [
250 | "And let's run the workflow again."
251 | ]
252 | },
253 | {
254 | "cell_type": "code",
255 | "execution_count": null,
256 | "metadata": {},
257 | "outputs": [],
258 | "source": [
259 | "%time preproc01.run('MultiProc', plugin_args={'n_procs': 5})"
260 | ]
261 | },
262 | {
263 | "cell_type": "markdown",
264 | "metadata": {},
265 | "source": [
266 | "## Conclusion\n",
267 | "\n",
268 | "Interesting, now it only took ~15s to execute the whole workflow again. **What happened?**\n",
269 | "\n",
270 | "As you can see from the log above, Nipype didn't execute the two nodes `slicetimer` and `mclfirt` again. This, because their input values didn't change from the last execution. The `preproc01` workflow therefore only had to rerun the node `smooth`."
271 | ]
272 | },
273 | {
274 | "cell_type": "markdown",
275 | "metadata": {},
276 | "source": [
277 | "# Running a workflow in parallel"
278 | ]
279 | },
280 | {
281 | "cell_type": "markdown",
282 | "metadata": {},
283 | "source": [
284 | "Ok, ok... Rerunning a workflow again is faster. That's nice and all, but I want more. **You spoke of parallel execution!**\n",
285 | "\n",
286 | "We saw that the `preproc01` workflow takes about ~2min to execute completely. So, if we would run the workflow on five functional images, it should take about ~10min total. This, of course, assuming the execution will be done sequentially. Now, let's see how long it takes if we run it in parallel."
287 | ]
288 | },
289 | {
290 | "cell_type": "code",
291 | "execution_count": null,
292 | "metadata": {},
293 | "outputs": [],
294 | "source": [
295 | "# First, let's copy/clone 'preproc01'\n",
296 | "preproc02 = preproc01.clone('preproc02')\n",
297 | "preproc03 = preproc01.clone('preproc03')\n",
298 | "preproc04 = preproc01.clone('preproc04')\n",
299 | "preproc05 = preproc01.clone('preproc05')"
300 | ]
301 | },
302 | {
303 | "cell_type": "markdown",
304 | "metadata": {},
305 | "source": [
306 | "We now have five different preprocessing workflows. If we want to run them in parallel, we can put them all in another workflow."
307 | ]
308 | },
309 | {
310 | "cell_type": "code",
311 | "execution_count": null,
312 | "metadata": {},
313 | "outputs": [],
314 | "source": [
315 | "metaflow = Workflow(name='metaflow', base_dir='.')"
316 | ]
317 | },
318 | {
319 | "cell_type": "code",
320 | "execution_count": null,
321 | "metadata": {},
322 | "outputs": [],
323 | "source": [
324 | "# Now we can add the five preproc workflows to the bigger metaflow\n",
325 | "metaflow.add_nodes([preproc01, preproc02, preproc03,\n",
326 | " preproc04, preproc05])"
327 | ]
328 | },
329 | {
330 | "cell_type": "markdown",
331 | "metadata": {},
332 | "source": [
333 | "**Note:** We now have a workflow (`metaflow`), that contains five other workflows (`preproc0?`), each of them containing three nodes.\n",
334 | "\n",
335 | "To better understand this, let's visualize this `metaflow`."
336 | ]
337 | },
338 | {
339 | "cell_type": "code",
340 | "execution_count": null,
341 | "metadata": {},
342 | "outputs": [],
343 | "source": [
344 | "# As before, let's write the graph of the workflow\n",
345 | "metaflow.write_graph(graph2use='flat')"
346 | ]
347 | },
348 | {
349 | "cell_type": "code",
350 | "execution_count": null,
351 | "metadata": {},
352 | "outputs": [],
353 | "source": [
354 | "# And visualize the graph\n",
355 | "from IPython.display import Image\n",
356 | "Image(filename=\"metaflow/graph_detailed.png\")"
357 | ]
358 | },
359 | {
360 | "cell_type": "markdown",
361 | "metadata": {},
362 | "source": [
363 | "Ah... so now we can see that the `metaflow` has potential for parallelization. So let's put it to test"
364 | ]
365 | },
366 | {
367 | "cell_type": "code",
368 | "execution_count": null,
369 | "metadata": {
370 | "scrolled": false
371 | },
372 | "outputs": [],
373 | "source": [
374 | "%time metaflow.run('MultiProc', plugin_args={'n_procs': 5})"
375 | ]
376 | },
377 | {
378 | "cell_type": "markdown",
379 | "metadata": {},
380 | "source": [
381 | "This time we can see that Nipype uses all available processors.\n",
382 | "\n",
383 | "And if all went well, the total execution time should still be around ~2min.\n",
384 | "\n",
385 | "That's why Nipype is so amazing. The days of opening multiple SPMs, FSLs, AFNIs etc. are past!"
386 | ]
387 | },
388 | {
389 | "cell_type": "markdown",
390 | "metadata": {},
391 | "source": [
392 | "# Results of `metaflow`"
393 | ]
394 | },
395 | {
396 | "cell_type": "code",
397 | "execution_count": null,
398 | "metadata": {},
399 | "outputs": [],
400 | "source": [
401 | "!tree metaflow -I '*js|*json|*pklz|_report|*.dot|*html'"
402 | ]
403 | }
404 | ],
405 | "metadata": {
406 | "kernelspec": {
407 | "display_name": "Python [default]",
408 | "language": "python",
409 | "name": "python3"
410 | },
411 | "language_info": {
412 | "codemirror_mode": {
413 | "name": "ipython",
414 | "version": 3
415 | },
416 | "file_extension": ".py",
417 | "mimetype": "text/x-python",
418 | "name": "python",
419 | "nbconvert_exporter": "python",
420 | "pygments_lexer": "ipython3",
421 | "version": "3.6.5"
422 | }
423 | },
424 | "nbformat": 4,
425 | "nbformat_minor": 2
426 | }
427 |
--------------------------------------------------------------------------------
/notebooks/resources_help.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Where to find help"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "## Neurostar\n",
15 | "\n",
16 | "[NeuroStars.org](https://neurostars.org/) is a platform similar to StackOverflow but dedicated to neuroscience and neuroinformatics. If you have a problem or would like to ask a question about how to do something in Nipype please submit a question to [NeuroStars.org](https://neurostars.org/) with a nipype tag.\n",
17 | "\n",
18 | "All previous Nipype questions are available here: https://neurostars.org/tags/nipype"
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "## Gitter\n",
26 | "\n",
27 | "[gitter.im](https://gitter.im/home/explore) stands under the motto 'where developers come to talk'. It is a place where developers change thoughts, opinions, ideas, and feedback to a specific software. Nipype's gitter channel can be found under https://gitter.im/nipy/nipype. Use it to directly speak with the community."
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "metadata": {},
33 | "source": [
34 | "## Github\n",
35 | "\n",
36 | "[github.com](https://github.com/nipy/nipype) is where the source code of Nipype is stored. Feel free to fork the repo and submit changes if you want. If you found a bug in the scripts or have a specific idea for changes, please open a new [issue](https://github.com/nipy/nipype/issues) and let the community help you."
37 | ]
38 | }
39 | ],
40 | "metadata": {
41 | "anaconda-cloud": {},
42 | "kernelspec": {
43 | "display_name": "Python [default]",
44 | "language": "python",
45 | "name": "python3"
46 | },
47 | "language_info": {
48 | "codemirror_mode": {
49 | "name": "ipython",
50 | "version": 3
51 | },
52 | "file_extension": ".py",
53 | "mimetype": "text/x-python",
54 | "name": "python",
55 | "nbconvert_exporter": "python",
56 | "pygments_lexer": "ipython3",
57 | "version": "3.6.5"
58 | }
59 | },
60 | "nbformat": 4,
61 | "nbformat_minor": 1
62 | }
63 |
--------------------------------------------------------------------------------
/notebooks/resources_installation.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Download and install\n",
8 | "\n",
9 | "This page covers the necessary steps to install Nipype.\n",
10 | "\n",
11 | "# 1. Install Nipype\n",
12 | "\n",
13 | "Getting Nipype to run on your system is rather straightforward. And there are multiple ways to do the installation:"
14 | ]
15 | },
16 | {
17 | "cell_type": "markdown",
18 | "metadata": {},
19 | "source": [
20 | "## Using docker\n",
21 | "\n",
22 | "- You can follow the [Nipype tutorial](https://miykael.github.io/nipype_tutorial)\n",
23 | "\n",
24 | "\n",
25 | "- You can pull the `nipype/nipype` image from Docker hub:\n",
26 | "\n",
27 | " docker pull nipype/nipype\n",
28 | "\n",
29 | "- You may also build custom docker containers with specific versions of software using [Neurodocker](https://github.com/kaczmarj/neurodocker) (see the [Neurodocker Tutorial](neurodocker.ipynb))."
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "metadata": {},
35 | "source": [
36 | "## Using conda\n",
37 | "\n",
38 | "If you have [conda](http://conda.pydata.org/docs/index.html), [miniconda](https://conda.io/miniconda.html) or [anaconda](https://www.continuum.io/why-anaconda) on your system, than installing Nipype can be done with just the following command:\n",
39 | "\n",
40 | " conda install --channel conda-forge nipype\n",
41 | "\n",
42 | "It is possible to list all of the versions of nipype available on your platform with:\n",
43 | "\n",
44 | " conda search nipype --channel conda-forge\n",
45 | "\n",
46 | "For more information, please see https://github.com/conda-forge/nipype-feedstock."
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "metadata": {},
52 | "source": [
53 | "## Using Pypi\n",
54 | "\n",
55 | "The installation process is similar to other Python packages.\n",
56 | "\n",
57 | "If you already have a Python environment set up, you can do:\n",
58 | "\n",
59 | " pip install nipype\n",
60 | "\n",
61 | "If you want to install all the optional features of ``nipype``, use the following command:\n",
62 | "\n",
63 | " pip install nipype[all]\n",
64 | "\n",
65 | "While `all` installs everything, one can also install select components as listed below:\n",
66 | "\n",
67 | "```python\n",
68 | "'doc': ['Sphinx>=1.4', 'matplotlib', 'pydotplus', 'pydot>=1.2.3'],\n",
69 | "'tests': ['pytest-cov', 'codecov'],\n",
70 | "'nipy': ['nitime', 'nilearn', 'dipy', 'nipy', 'matplotlib'],\n",
71 | "'profiler': ['psutil'],\n",
72 | "'duecredit': ['duecredit'],\n",
73 | "'xvfbwrapper': ['xvfbwrapper'],\n",
74 | "```"
75 | ]
76 | },
77 | {
78 | "cell_type": "markdown",
79 | "metadata": {},
80 | "source": [
81 | "## Debian and Ubuntu\n",
82 | "\n",
83 | "Add the [NeuroDebian](http://neuro.debian.org) repository and install the ``python-nipype`` package using ``apt-get`` or your favorite package manager:\n",
84 | "\n",
85 | " apt-get install python-nipype"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {},
91 | "source": [
92 | "## Using Github\n",
93 | "\n",
94 | "To make sure that you really have the newest version of Nipype on your system, you can run the `pip` command with a flag that points to the github repo:\n",
95 | "\n",
96 | " pip install git+https://github.com/nipy/nipype#egg=nipype"
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | "## Mac OS X\n",
104 | "\n",
105 | "The easiest way to get nipype running on Mac OS X is to install [Miniconda](https://conda.io/miniconda.html) and follow the instructions above. If you have a non-conda environment you can install nipype by typing:\n",
106 | "\n",
107 | " pip install nipype\n",
108 | "\n",
109 | "Note that the above procedure may require the availability of gcc on your system path to compile the traits package."
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {},
115 | "source": [
116 | "## From source\n",
117 | "\n",
118 | "- The most recent release is found here: https://github.com/nipy/nipype/releases/latest\n",
119 | "\n",
120 | "\n",
121 | "- The development version: [[zip](http://github.com/nipy/nipype/zipball/master), [tar.gz](http://github.com/nipy/nipype/tarball/master)]\n",
122 | "\n",
123 | "\n",
124 | "- For previous versions: [prior downloads](http://github.com/nipy/nipype/tags)\n",
125 | "\n",
126 | "\n",
127 | "- If you downloaded the source distribution named something\n",
128 | "like ``nipype-x.y.tar.gz``, then unpack the tarball, change into the\n",
129 | "``nipype-x.y`` directory and install nipype using:\n",
130 | "\n",
131 | " pip install .\n",
132 | "\n",
133 | "**Note:** Depending on permissions you may need to use ``sudo``."
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "metadata": {},
139 | "source": [
140 | "## Installation for developers\n",
141 | "\n",
142 | "Developers should start [here](http://nipype.readthedocs.io/en/latest/devel/testing_nipype.html).\n",
143 | "\n",
144 | "Developers can also use this docker container:\n",
145 | "\n",
146 | " docker pull nipype/nipype:master"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "# 2. Interface Dependencies\n",
154 | "\n",
155 | "Nipype provides wrappers around many neuroimaging tools and contains some algorithms. These tools will need to be installed for Nipype to run. You can create containers with different versions of these tools installed using [Neurodocker](https://github.com/kaczmarj/neurodocker) (see the [Neurodocker Tutorial](neurodocker.ipynb))."
156 | ]
157 | },
158 | {
159 | "cell_type": "markdown",
160 | "metadata": {},
161 | "source": [
162 | "# 3. Testing the install\n",
163 | "\n",
164 | "The best way to test the install is checking nipype's version and then running the tests:\n",
165 | "\n",
166 | "```python\n",
167 | "python -c \"import nipype; print(nipype.__version__)\"\n",
168 | "python -c \"import nipype; nipype.test(doctests=False)\"\n",
169 | "```\n",
170 | "\n",
171 | "The test will create a lot of output, but if all goes well you will see at the end something like this:\n",
172 | "\n",
173 | " ----------------------------------------------------------------------\n",
174 | " 2091 passed, 68 skipped, 7 xfailed, 1 warnings in 236.94 seconds\n",
175 | "\n",
176 | "The number of tests and time will vary depending on which interfaces you have installed on your system.\n",
177 | "\n",
178 | "Don’t worry if some modules are being skipped or marked as xfailed. As long as no main modules cause any problems, you’re fine. The number of tests and time will vary depending on which interfaces you have installed on your system. But if you receive an OK, errors=0 and failures=0 then everything is ready."
179 | ]
180 | }
181 | ],
182 | "metadata": {
183 | "kernelspec": {
184 | "display_name": "Python [default]",
185 | "language": "python",
186 | "name": "python3"
187 | },
188 | "language_info": {
189 | "codemirror_mode": {
190 | "name": "ipython",
191 | "version": 3
192 | },
193 | "file_extension": ".py",
194 | "mimetype": "text/x-python",
195 | "name": "python",
196 | "nbconvert_exporter": "python",
197 | "pygments_lexer": "ipython3",
198 | "version": "3.6.5"
199 | }
200 | },
201 | "nbformat": 4,
202 | "nbformat_minor": 2
203 | }
204 |
--------------------------------------------------------------------------------
/notebooks/resources_resources.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Helpful Resources\n",
8 | "\n",
9 | "\n",
10 | "## Learn more about Nipype\n",
11 | "\n",
12 | "- [Nipype homepage](http://nipype.readthedocs.io/en/latest/): This is the best place to learn all you need to know about Nipype. For beginner's, I recommend to check out the [Quickstart](http://nipype.readthedocs.io/en/latest/quickstart.html) section.\n",
13 | "- [Beginner's Guide](http://miykael.github.io/nipype-beginner-s-guide/): This beginner's guide is an in-depth step by step tutorial to Nipype.\n",
14 | "\n",
15 | "\n",
16 | "## Neuroimaging\n",
17 | "\n",
18 | "- [fMRI 4 Newbies](http://www.fmri4newbies.com/tutorials/): A crash course in brain imaging.\n",
19 | "- [Neurostars.org](https://neurostars.org/): If you have any questions about Neuroinformatics, this is the place to go! \n",
20 | "- [Design efficiency in FMRI](http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency): A nice and detailed guide on how to design a good fMRI study.\n",
21 | "- [Questions and Answers in MRI](http://mriquestions.com/how-does-fmri-work.html): This is a great resource for anybody who wants to understand MRI and the physics behind it a bit better!\n",
22 | "\n",
23 | "\n",
24 | "## Open Science Resources\n",
25 | "\n",
26 | "[Aya Ben-Yakov](https://twitter.com/aya_ben_yakov) put together a very [nice and comprehensive list](http://www.mrc-cbu.cam.ac.uk/openscience/resources/) of various tools and resources about open and reproducible science.\n",
27 | "\n",
28 | "\n",
29 | "## Learn Python\n",
30 | "\n",
31 | "- [A Byte of Python](http://python.swaroopch.com/): A very nice introduction to Python in general.\n",
32 | "- [A Crash Course in Python for Scientists](http://nbviewer.jupyter.org/gist/rpmuller/5920182): a very good introduction to Python and scientific programming (e.g. Numpy, Scipy, Matplotlib)\n",
33 | "- [Codecademy - Python](https://www.codecademy.com/learn/python): An interactive online training and introduction to Python.\n",
34 | "- [Learn Python the Hard Way](http://learnpythonthehardway.org/book/index.html): A very good step by step introduction to Python.\n",
35 | "- [Python Scientific Lecture Notes](http://www.scipy-lectures.org/): A very good and more detailed introduction to Python and scientific programming.\n",
36 | "- If you're looking for a Python based IDE like Eclipse or MATLAB, check out [Pycharm](https://www.jetbrains.com/pycharm/) or [Spyder](https://github.com/spyder-ide/spyder/).\n",
37 | "- [Programming with Python](http://swcarpentry.github.io/python-novice-inflammation/): This short introduction by *software carpentry* teaches you the basics of scientific programming on very practical examples.\n",
38 | "\n",
39 | "\n",
40 | "## Learn Git\n",
41 | "\n",
42 | "- [Got 15 minutes and want to learn Git?](https://try.github.io/levels/1/challenges/1): Github's own git tutorial. It's fun and very short.\n",
43 | "- [Git Real](http://gitreal.codeschool.com/) on [Code School](https://www.codeschool.com/): An interactive tutorial about GIT\n",
44 | "- [Top 10 Git Tutorials for Beginners](http://sixrevisions.com/resources/git-tutorials-beginners/)\n",
45 | "\n",
46 | "\n",
47 | "## Learn Unix Shell\n",
48 | "\n",
49 | "- [the Unix Shell](http://swcarpentry.github.io/shell-novice/): If you're new to Linux, here's a quick starter guide by software carpentry that teaches you the basics."
50 | ]
51 | }
52 | ],
53 | "metadata": {
54 | "anaconda-cloud": {},
55 | "kernelspec": {
56 | "display_name": "Python [default]",
57 | "language": "python",
58 | "name": "python3"
59 | },
60 | "language_info": {
61 | "codemirror_mode": {
62 | "name": "ipython",
63 | "version": 3
64 | },
65 | "file_extension": ".py",
66 | "mimetype": "text/x-python",
67 | "name": "python",
68 | "nbconvert_exporter": "python",
69 | "pygments_lexer": "ipython3",
70 | "version": "3.6.5"
71 | }
72 | },
73 | "nbformat": 4,
74 | "nbformat_minor": 1
75 | }
76 |
--------------------------------------------------------------------------------
/notebooks/scripts/ANTS_registration.py:
--------------------------------------------------------------------------------
1 | # Import modules
2 | from os.path import join as opj
3 | from nipype.interfaces.ants import Registration
4 | from nipype.interfaces.utility import IdentityInterface
5 | from nipype.interfaces.io import SelectFiles, DataSink
6 | from nipype import Workflow, Node
7 | from nipype.interfaces.fsl import Info
8 |
9 | # Specify variables
10 | experiment_dir = '/output'
11 | output_dir = 'antsdir'
12 | working_dir = 'workingdir'
13 | subject_list = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
14 |
15 | # Location of template file
16 | template = '/data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c/1mm_T1.nii.gz'
17 | # or alternatively template = Info.standard_image('MNI152_T1_1mm.nii.gz')
18 |
19 | # Registration - computes registration between subject's anatomy & the MNI template
20 | antsreg = Node(Registration(args='--float',
21 | collapse_output_transforms=True,
22 | fixed_image=template,
23 | initial_moving_transform_com=True,
24 | num_threads=4,
25 | output_inverse_warped_image=True,
26 | output_warped_image=True,
27 | sigma_units=['vox'] * 3,
28 | transforms=['Rigid', 'Affine', 'SyN'],
29 | terminal_output='file',
30 | winsorize_lower_quantile=0.005,
31 | winsorize_upper_quantile=0.995,
32 | convergence_threshold=[1e-06],
33 | convergence_window_size=[10],
34 | metric=['MI', 'MI', 'CC'],
35 | metric_weight=[1.0] * 3,
36 | number_of_iterations=[[1000, 500, 250, 100],
37 | [1000, 500, 250, 100],
38 | [100, 70, 50, 20]],
39 | radius_or_number_of_bins=[32, 32, 4],
40 | sampling_percentage=[0.25, 0.25, 1],
41 | sampling_strategy=['Regular', 'Regular', 'None'],
42 | shrink_factors=[[8, 4, 2, 1]] * 3,
43 | smoothing_sigmas=[[3, 2, 1, 0]] * 3,
44 | transform_parameters=[(0.1,), (0.1,),
45 | (0.1, 3.0, 0.0)],
46 | use_histogram_matching=True,
47 | write_composite_transform=True),
48 | name='antsreg')
49 |
50 | ###
51 | # Input & Output Stream
52 |
53 | # Infosource - a function free node to iterate over the list of subject names
54 | infosource = Node(IdentityInterface(fields=['subject_id']),
55 | name="infosource")
56 | infosource.iterables = [('subject_id', subject_list)]
57 |
58 | # SelectFiles - to grab the data (alternative to DataGrabber)
59 | anat_file = opj('sub-{subject_id}', 'ses-test', 'anat', 'sub-{subject_id}_ses-test_T1w.nii.gz')
60 | templates = {'anat': anat_file}
61 |
62 | selectfiles = Node(SelectFiles(templates,
63 | base_directory='/data/ds000114'),
64 | name="selectfiles")
65 |
66 | # Datasink - creates output folder for important outputs
67 | datasink = Node(DataSink(base_directory=experiment_dir,
68 | container=output_dir),
69 | name="datasink")
70 |
71 | # Use the following DataSink output substitutions
72 | substitutions = [('_subject_id_', '')]
73 | datasink.inputs.substitutions = substitutions
74 |
75 | ###
76 | # Specify Normalization Workflow & Connect Nodes
77 |
78 | # Initiation of the ANTS normalization workflow
79 | regflow = Workflow(name='regflow')
80 | regflow.base_dir = opj(experiment_dir, working_dir)
81 |
82 | # Connect workflow nodes
83 | regflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),
84 | (selectfiles, antsreg, [('anat', 'moving_image')]),
85 | (antsreg, datasink, [('warped_image',
86 | 'antsreg.@warped_image'),
87 | ('inverse_warped_image',
88 | 'antsreg.@inverse_warped_image'),
89 | ('composite_transform',
90 | 'antsreg.@transform'),
91 | ('inverse_composite_transform',
92 | 'antsreg.@inverse_transform')]),
93 | ])
94 |
95 | ###
96 | # Run Workflow
97 | regflow.write_graph(graph2use='flat')
98 | regflow.run('Linear')
99 |
--------------------------------------------------------------------------------
/notebooks/scripts/brainvolume.m:
--------------------------------------------------------------------------------
1 | load input_image.mat;
2 | total = sum(data(:) > 0)
--------------------------------------------------------------------------------
/notebooks/scripts/transform.tfm:
--------------------------------------------------------------------------------
1 | #Insight Transform File V1.0
2 | #Transform 0
3 | Transform: AffineTransform_double_3_3
4 | Parameters: 1.02009654 -0.00984231 0.00283729 -0.24555664 0.91639648 0.32458515 -0.01980156 -0.00296066 0.98863359 1.79024059 -13.02945168 -1.34438656
5 | FixedParameters: -3.37801369 17.43375029 8.46811160
6 |
--------------------------------------------------------------------------------
/notebooks/wip_nipype_cmd.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Running Nipype Interfaces from the command line (nipype_cmd)"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "The primary use of [Nipype](http://nipy.org/nipype/) is to build automated non-interactive pipelines.\n",
15 | "However, sometimes there is a need to run some interfaces quickly from the command line.\n",
16 | "This is especially useful when running Interfaces wrapping code that does not have\n",
17 | "command line equivalents (nipy or SPM). Being able to run Nipype interfaces opens new\n",
18 | "possibilities such as the inclusion of SPM processing steps in bash scripts.\n",
19 | "\n",
20 | "To run Nipype Interfaces you need to use the nipype_cmd tool that should already be installed.\n",
21 | "The tool allows you to list Interfaces available in a certain package:"
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {},
27 | "source": [
28 | " $nipype_cmd nipype.interfaces.nipy\n",
29 | "\n",
30 | " Available Interfaces:\n",
31 | " SpaceTimeRealigner\n",
32 | " Similarity\n",
33 | " ComputeMask\n",
34 | " FitGLM\n",
35 | " EstimateContrast"
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {},
41 | "source": [
42 | "After selecting a particular Interface you can learn what inputs it requires:"
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "metadata": {},
48 | "source": [
49 | " $nipype_cmd nipype.interfaces.nipy ComputeMask --help\n",
50 | "\n",
51 | "\tusage:nipype_cmd nipype.interfaces.nipy ComputeMask [-h] [--M M] [--cc CC]\n",
52 | "\t [--ignore_exception IGNORE_EXCEPTION]\n",
53 | "\t [--m M]\n",
54 | "\t [--reference_volume REFERENCE_VOLUME]\n",
55 | "\t mean_volume\n",
56 | "\n",
57 | "\tRun ComputeMask\n",
58 | "\n",
59 | "\tpositional arguments:\n",
60 | "\t mean_volume mean EPI image, used to compute the threshold for the\n",
61 | "\t mask\n",
62 | "\n",
63 | "\toptional arguments:\n",
64 | "\t -h, --help show this help message and exit\n",
65 | "\t --M M upper fraction of the histogram to be discarded\n",
66 | "\t --cc CC Keep only the largest connected component\n",
67 | "\t --ignore_exception IGNORE_EXCEPTION\n",
68 | "\t Print an error message instead of throwing an\n",
69 | "\t exception in case the interface fails to run\n",
70 | "\t --m M lower fraction of the histogram to be discarded\n",
71 | "\t --reference_volume REFERENCE_VOLUME\n",
72 | "\t reference volume used to compute the mask. If none is\n",
73 | "\t give, the mean volume is used."
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {},
79 | "source": [
80 | "Finally, you can run the Interface:"
81 | ]
82 | },
83 | {
84 | "cell_type": "markdown",
85 | "metadata": {},
86 | "source": [
87 | "\t$nipype_cmd nipype.interfaces.nipy ComputeMask mean.nii.gz"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "metadata": {},
93 | "source": [
94 | "All that from the command line without having to start python interpreter manually."
95 | ]
96 | }
97 | ],
98 | "metadata": {
99 | "kernelspec": {
100 | "display_name": "Python [default]",
101 | "language": "python",
102 | "name": "python3"
103 | },
104 | "language_info": {
105 | "codemirror_mode": {
106 | "name": "ipython",
107 | "version": 3
108 | },
109 | "file_extension": ".py",
110 | "mimetype": "text/x-python",
111 | "name": "python",
112 | "nbconvert_exporter": "python",
113 | "pygments_lexer": "ipython3",
114 | "version": "3.6.5"
115 | }
116 | },
117 | "nbformat": 4,
118 | "nbformat_minor": 2
119 | }
120 |
--------------------------------------------------------------------------------
/notebooks/wip_saving_workflows.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Saving Workflows and Nodes to a file (experimental)\n",
8 | "\n",
9 | "On top of the standard way of saving (i.e. serializing) objects in Python\n",
10 | "(see [pickle](http://docs.python.org/2/library/pickle.html)) Nipype\n",
11 | "provides methods to turn Workflows and nodes into human readable code.\n",
12 | "This is useful if you want to save a Workflow that you have generated\n",
13 | "on the fly for future use.\n",
14 | "\n",
15 | "# Example 1\n",
16 | "\n",
17 | "Let's first create a workflow:"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": null,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "from nipype.interfaces.fsl import BET, ImageMaths\n",
27 | "from nipype import Workflow, Node, MapNode\n",
28 | "from nipype.interfaces.utility import Function, IdentityInterface\n",
29 | "\n",
30 | "bet = Node(BET(), name='bet')\n",
31 | "bet.iterables = ('frac', [0.3, 0.4])\n",
32 | "\n",
33 | "bet2 = MapNode(BET(), name='bet2', iterfield=['infile'])\n",
34 | "bet2.iterables = ('frac', [0.4, 0.5])\n",
35 | "\n",
36 | "maths = Node(ImageMaths(), name='maths')\n",
37 | "\n",
38 | "def testfunc(in1):\n",
39 | " \"\"\"dummy func\n",
40 | " \"\"\"\n",
41 | " out = in1 + 'foo' + \"out1\"\n",
42 | " return out\n",
43 | "\n",
44 | "funcnode = Node(Function(input_names=['a'], output_names=['output'], function=testfunc),\n",
45 | " name='testfunc')\n",
46 | "funcnode.inputs.in1 = '-sub'\n",
47 | "func = lambda x: x\n",
48 | "\n",
49 | "inode = Node(IdentityInterface(fields=['a']), name='inode')\n",
50 | "\n",
51 | "wf = Workflow('testsave')\n",
52 | "wf.add_nodes([bet2])\n",
53 | "wf.connect(bet, 'mask_file', maths, 'in_file')\n",
54 | "wf.connect(bet2, ('mask_file', func), maths, 'in_file2')\n",
55 | "wf.connect(inode, 'a', funcnode, 'in1')\n",
56 | "wf.connect(funcnode, 'output', maths, 'op_string')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "To generate and export the Python code of this Workflow, we can use the `export` method:"
64 | ]
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": null,
69 | "metadata": {},
70 | "outputs": [],
71 | "source": [
72 | "wf.export('special_workflow.py')"
73 | ]
74 | },
75 | {
76 | "cell_type": "markdown",
77 | "metadata": {},
78 | "source": [
79 | "This will create a file `special_workflow.py` with the following content:"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "from nipype import Workflow, Node, MapNode\n",
89 | "from nipype.interfaces.utility import IdentityInterface\n",
90 | "from nipype.interfaces.utility import Function\n",
91 | "from nipype.utils.functions import getsource\n",
92 | "from nipype.interfaces.fsl.preprocess import BET\n",
93 | "from nipype.interfaces.fsl.utils import ImageMaths\n",
94 | "# Functions\n",
95 | "func = lambda x: x\n",
96 | "# Workflow\n",
97 | "testsave = Workflow(\"testsave\")\n",
98 | "# Node: testsave.inode\n",
99 | "inode = Node(IdentityInterface(fields=['a'], mandatory_inputs=True), name=\"inode\")\n",
100 | "# Node: testsave.testfunc\n",
101 | "testfunc = Node(Function(input_names=['a'], output_names=['output']), name=\"testfunc\")\n",
102 | "testfunc.interface.ignore_exception = False\n",
103 | "def testfunc_1(in1):\n",
104 | " \"\"\"dummy func\n",
105 | " \"\"\"\n",
106 | " out = in1 + 'foo' + \"out1\"\n",
107 | " return out\n",
108 | "\n",
109 | "testfunc.inputs.function_str = getsource(testfunc_1)\n",
110 | "testfunc.inputs.in1 = '-sub'\n",
111 | "testsave.connect(inode, \"a\", testfunc, \"in1\")\n",
112 | "# Node: testsave.bet2\n",
113 | "bet2 = MapNode(BET(), iterfield=['infile'], name=\"bet2\")\n",
114 | "bet2.interface.ignore_exception = False\n",
115 | "bet2.iterables = ('frac', [0.4, 0.5])\n",
116 | "bet2.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}\n",
117 | "bet2.inputs.output_type = 'NIFTI_GZ'\n",
118 | "bet2.terminal_output = 'stream'\n",
119 | "# Node: testsave.bet\n",
120 | "bet = Node(BET(), name=\"bet\")\n",
121 | "bet.interface.ignore_exception = False\n",
122 | "bet.iterables = ('frac', [0.3, 0.4])\n",
123 | "bet.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}\n",
124 | "bet.inputs.output_type = 'NIFTI_GZ'\n",
125 | "bet.terminal_output = 'stream'\n",
126 | "# Node: testsave.maths\n",
127 | "maths = Node(ImageMaths(), name=\"maths\")\n",
128 | "maths.interface.ignore_exception = False\n",
129 | "maths.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}\n",
130 | "maths.inputs.output_type = 'NIFTI_GZ'\n",
131 | "maths.terminal_output = 'stream'\n",
132 | "testsave.connect(bet2, ('mask_file', func), maths, \"in_file2\")\n",
133 | "testsave.connect(bet, \"mask_file\", maths, \"in_file\")\n",
134 | "testsave.connect(testfunc, \"output\", maths, \"op_string\")"
135 | ]
136 | },
137 | {
138 | "cell_type": "markdown",
139 | "metadata": {},
140 | "source": [
141 | "The file is ready to use and includes all the necessary imports."
142 | ]
143 | }
144 | ],
145 | "metadata": {
146 | "kernelspec": {
147 | "display_name": "Python [default]",
148 | "language": "python",
149 | "name": "python3"
150 | },
151 | "language_info": {
152 | "codemirror_mode": {
153 | "name": "ipython",
154 | "version": 3
155 | },
156 | "file_extension": ".py",
157 | "mimetype": "text/x-python",
158 | "name": "python",
159 | "nbconvert_exporter": "python",
160 | "pygments_lexer": "ipython3",
161 | "version": "3.6.5"
162 | }
163 | },
164 | "nbformat": 4,
165 | "nbformat_minor": 2
166 | }
167 |
--------------------------------------------------------------------------------
/static/css/homepage.css:
--------------------------------------------------------------------------------
1 | html,body {
2 | height: 100%;
3 | }
4 |
5 | body {
6 | overflow-y: scroll;
7 | }
8 |
9 | body {
10 | font-family: 'Helvetica','Corbel',sans-serif;
11 | font-size: 14px;
12 | margin: 0;
13 | color: #444;
14 | line-height: 1.4;
15 | min-width: 1000px;
16 | }
17 |
18 | article {
19 | padding: 0 20px;
20 | position: relative;
21 | }
22 |
23 | h1,h2,h3,h4,h5 {
24 | color: #111;
25 | font-family: inherit;
26 | font-weight: bold;
27 | }
28 |
29 | h1 {
30 | font-size: 25px;
31 | margin-bottom: 22px;
32 | }
33 |
34 | h2 {
35 | font-size: 20px;
36 | margin-bottom: 22px;
37 | }
38 |
39 | h3 {
40 | font-size: 18px;
41 | margin-bottom: 22px;
42 | }
43 |
44 | h4 {
45 | font-size: 15px;
46 | margin-bottom: 22px;
47 | }
48 |
49 | h5 {
50 | font-size: 15px;
51 | margin-bottom: -22px;
52 | }
53 |
54 | p {
55 | font-size: 14px;
56 | margin: 22px 0;
57 | }
58 |
59 | b,strong {
60 | font-weight: bold;
61 | }
62 |
63 | em,cite {
64 | font-style: italic;
65 | }
66 |
67 | hr {
68 | background: #CCC;
69 | border: 0;
70 | box-shadow: 0 2px 2px rgba(0,0,0,0.075);
71 | clear: both;
72 | color: #CCC;
73 | display: block;
74 | height: 1px;
75 | margin: 18px 0 36px 0;
76 | padding: 0;
77 | width: 100%;
78 | }
79 |
80 | hr.thin {
81 | margin-bottom: 18px;
82 | margin-left: auto;
83 | margin-right: auto;
84 | opacity: .40;
85 | filter: alpha(opacity=40);
86 | width: 50%;
87 | }
88 |
89 | a:link,a:visited,header a:visited,footer a:visited,.visited-no-recolor a:visited,a.visited-no-recolor:visited {
90 | color: #005987;
91 | }
92 |
93 | a:link.no-underline,a:visited.no-underline,header a:visited.no-underline,footer a:visited.no-underline,.visited-no-recolor a:visited.no-underline,a.visited-no-recolor:visited.no-underline {
94 | text-decoration: none;
95 | }
96 |
97 | a:visited {
98 | color: #7d00ad;
99 | }
100 |
101 | a:link:hover,a:link:focus,a:visited:hover,a:visited:focus {
102 | color: #707070;
103 | }
104 |
105 | a:link:hover.no-underline,a:link:focus.no-underline,a:visited:hover.no-underline,a:visited:focus.no-underline {
106 | text-decoration: none;
107 | }
108 |
109 | form {
110 | display: inline;
111 | }
112 |
113 | .fixed-width {
114 | font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif;
115 | }
116 |
117 | ul.styled-list {
118 | margin-left: 16px;
119 | }
120 |
121 | ul.styled-list {
122 | list-style: disc;
123 | }
124 |
125 | ol.styled-list {
126 | list-style: decimal;
127 | }
128 |
129 | #homepage p {
130 | opacity: .8;
131 | }
132 |
133 | .section-separator {
134 | margin: 50px auto;
135 | position: relative;
136 | }
137 |
138 | .section-separator h2 {
139 | color: #898989;
140 | text-align: center;
141 | width: auto;
142 | font-size: 140%;
143 | margin: 0;
144 | position: relative;
145 | z-index: 1;
146 | }
147 |
148 | #homepage .section-separator hr {
149 | position: relative;
150 | margin: 0;
151 | top: -1em;
152 | }
153 |
154 | .library-section .section-separator {
155 | margin: 30px auto;
156 | }
157 |
158 | .library-section .library-section-separator {
159 | margin-bottom: 5px;
160 | }
161 |
162 | .library-section li {
163 | margin-bottom: -0.5em;
164 | }
165 |
166 | .domain-table-container .subject-link {
167 | background: #fdfdfd;
168 | border-bottom: 1px solid #ddd;
169 | border-right: 1px solid #ddd;
170 | box-sizing: border-box;
171 | display: block;
172 | font-family: inherit;
173 | padding: 5px 10px;
174 | text-decoration: none;
175 | }
176 |
177 | .domain-table-container .subject-link:hover {
178 | background: #314453;
179 | color: #fff;
180 | text-decoration: none;
181 | }
182 |
183 | .domain-header {
184 | color: #314453;
185 | margin-bottom: 0;
186 | padding-left: 10px;
187 | padding-top: 25px;
188 | }
189 |
190 | .domain-table-container {
191 | border-top: 2px solid #314453;
192 | border-left: 1px solid #ddd;
193 | margin-top: 3px;
194 | }
195 |
196 | .domain-header.color01 .domain-title {
197 | color: hsl(0, 60%, 50%);
198 | margin-left: -10px;
199 | padding: 3px 10px;
200 | }
201 |
202 | .domain-table-container.color01 {
203 | border-top: 2px solid hsl(0, 60%, 50%);
204 | }
205 |
206 | .domain-table-container.color01 .subject-link:hover {
207 | background: hsl(0, 60%, 50%);
208 | }
209 |
210 | .domain-header.color02 .domain-title {
211 | color: hsl(20, 60%, 50%);
212 | margin-left: -10px;
213 | padding: 3px 10px;
214 | }
215 |
216 | .domain-table-container.color02 {
217 | border-top: 2px solid hsl(20, 60%, 50%);
218 | }
219 |
220 | .domain-table-container.color02 .subject-link:hover {
221 | background: hsl(20, 60%, 50%);
222 | }
223 |
224 | .domain-header.color03 .domain-title {
225 | color: hsl(50, 60%, 50%);
226 | margin-left: -10px;
227 | padding: 3px 10px;
228 | }
229 |
230 | .domain-table-container.color03 {
231 | border-top: 2px solid hsl(50, 60%, 50%);
232 | }
233 |
234 | .domain-table-container.color03 .subject-link:hover {
235 | background: hsl(50, 60%, 50%);
236 | }
237 |
238 | .domain-header.color04 .domain-title {
239 | color: hsl(150, 60%, 50%);
240 | margin-left: -10px;
241 | padding: 3px 10px;
242 | }
243 |
244 | .domain-table-container.color04 {
245 | border-top: 2px solid hsl(150, 60%, 50%);
246 | }
247 |
248 | .domain-table-container.color04 .subject-link:hover {
249 | background: hsl(150, 60%, 50%);
250 | }
251 |
252 | .domain-header.color05 .domain-title {
253 | color: hsl(205, 60%, 50%);
254 | margin-left: -10px;
255 | padding: 3px 10px;
256 | }
257 |
258 | .domain-table-container.color05 {
259 | border-top: 2px solid hsl(205, 60%, 50%);
260 | }
261 |
262 | .domain-table-container.color05 .subject-link:hover {
263 | background: hsl(205, 60%, 50%);
264 | }
265 |
266 | .domain-header.color06 .domain-title {
267 | color: hsl(240, 60%, 50%);
268 | margin-left: -10px;
269 | padding: 3px 10px;
270 | }
271 |
272 | .domain-table-container.color06 {
273 | border-top: 2px solid hsl(240, 60%, 50%);
274 | }
275 |
276 | .domain-table-container.color06 .subject-link:hover {
277 | background: hsl(240, 60%, 50%);
278 | }
279 |
280 | .domain-header.color07 .domain-title {
281 | color: hsl(280, 60%, 50%);
282 | margin-left: -10px;
283 | padding: 3px 10px;
284 | }
285 |
286 | .domain-table-container.color07 {
287 | border-top: 2px solid hsl(280, 60%, 50%);
288 | }
289 |
290 | .domain-table-container.color07 .subject-link:hover {
291 | background: hsl(280, 60%, 50%);
292 | }
293 |
--------------------------------------------------------------------------------
/static/images/bids.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/bids.png
--------------------------------------------------------------------------------
/static/images/datasink_flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/datasink_flow.png
--------------------------------------------------------------------------------
/static/images/example_FSL.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/example_FSL.png
--------------------------------------------------------------------------------
/static/images/example_Freesurfer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/example_Freesurfer.png
--------------------------------------------------------------------------------
/static/images/example_SPM12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/example_SPM12.png
--------------------------------------------------------------------------------
/static/images/gantt_chart.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/gantt_chart.png
--------------------------------------------------------------------------------
/static/images/iterables.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/iterables.png
--------------------------------------------------------------------------------
/static/images/itersource_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/itersource_1.png
--------------------------------------------------------------------------------
/static/images/itersource_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/itersource_2.png
--------------------------------------------------------------------------------
/static/images/joinnode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/joinnode.png
--------------------------------------------------------------------------------
/static/images/jupyter_function-completion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/jupyter_function-completion.png
--------------------------------------------------------------------------------
/static/images/jupyter_tab-4-times.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/jupyter_tab-4-times.png
--------------------------------------------------------------------------------
/static/images/jupyter_tab-once.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/jupyter_tab-once.png
--------------------------------------------------------------------------------
/static/images/jupyter_tab-twice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/jupyter_tab-twice.png
--------------------------------------------------------------------------------
/static/images/logoDocker.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/logoDocker.png
--------------------------------------------------------------------------------
/static/images/logoNipype_text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/logoNipype_text.png
--------------------------------------------------------------------------------
/static/images/logoNipype_tutorial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/logoNipype_tutorial.png
--------------------------------------------------------------------------------
/static/images/mapnode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/mapnode.png
--------------------------------------------------------------------------------
/static/images/nipype_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/nipype_architecture.png
--------------------------------------------------------------------------------
/static/images/nipype_example_graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/nipype_example_graph.png
--------------------------------------------------------------------------------
/static/images/node_sinlge_node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/node_sinlge_node.png
--------------------------------------------------------------------------------
/static/images/node_two_nodes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/node_two_nodes.png
--------------------------------------------------------------------------------
/static/images/python.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/python.png
--------------------------------------------------------------------------------
/static/images/synchronize_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/synchronize_1.png
--------------------------------------------------------------------------------
/static/images/synchronize_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miykael/nipype_tutorial/f11c9c7b8e7a1983918f1d517ec9cf3dfcb78236/static/images/synchronize_2.png
--------------------------------------------------------------------------------
/static/template_google_analytics.rst:
--------------------------------------------------------------------------------
1 |
11 |
12 |
--------------------------------------------------------------------------------
/test_notebooks.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from glob import glob
3 | import pytest
4 |
5 | def test_version():
6 | import nipype
7 | print("nipype version: ", nipype.__version__)
8 |
9 |
10 | def reduce_notebook_load(path):
11 | """
12 | Changes the number of subjects in examples and hands-on to two,
13 | to reduce computation time on CircleCi.
14 | """
15 |
16 | path_short = path[:-6] + '_short.ipynb'
17 |
18 | with open(path, 'r') as input_file, open(path_short, 'w') as output_file:
19 | for line in input_file:
20 |
21 | # Reduce subject_list in handson notebooks
22 | if '/handson' in path \
23 | and "subject_list = ['02', '03', '04'," in line:
24 | line = line.replace(
25 | "[\'02\', \'03\', \'04\', \'07\', \'08\', \'09\']",
26 | "[\'02\', \'07\']")
27 | elif '/example' in path:
28 |
29 | # Reduce subject_list in example notebooks
30 | if "subject_list = ['01', '02', '03'," in line:
31 | line = line.replace(
32 | "[\'01\', \'02\', \'03\', \'04\', \'05\', \'06\', \'07\', \'08\', \'09\', \'10\']",
33 | "[\'02\', \'03\']")
34 |
35 | elif "subject_list = ['02', '03'," in line:
36 | line = line.replace(
37 | "[\'02\', \'03\', \'04\', \'05\', \'07\', \'08\', \'09\']",
38 | "[\'02\', \'03\']")
39 |
40 | # Restrict output plots to subject 02
41 | elif "sub-01" in line:
42 | line = line.replace("sub-01", "sub-02")
43 |
44 | # Force plotting of sub-03-10 to be sub-02 in example_1stlevel
45 | if 'example_1stlevel' in path and "/sub-" in line:
46 | for s in range(3, 11):
47 | line = line.replace('sub-%02d' % s, 'sub-02')
48 |
49 | output_file.write(line)
50 |
51 | return path_short
52 |
53 |
54 | if __name__ == '__main__':
55 |
56 | test_version()
57 |
58 | # Notebooks that should be tested
59 | notebooks = []
60 |
61 | # Test mode that should be run
62 | test_mode = int(sys.argv[1])
63 |
64 | # Specifies which tests should be run
65 | if test_mode == 1:
66 |
67 | # Test introduction, basic and advanced notebooks
68 | notebooks += sorted(glob("/home/neuro/nipype_tutorial/notebooks/introduction*.ipynb"))
69 | notebooks += sorted(glob("/home/neuro/nipype_tutorial/notebooks/basic*.ipynb"))
70 | notebooks += sorted(glob("/home/neuro/nipype_tutorial/notebooks/advanced*.ipynb"))
71 |
72 | elif test_mode == 2:
73 |
74 | # Test example notebooks
75 | for n in ["/home/neuro/nipype_tutorial/notebooks/example_preprocessing.ipynb",
76 | "/home/neuro/nipype_tutorial/notebooks/example_1stlevel.ipynb",
77 | "/home/neuro/nipype_tutorial/notebooks/example_normalize.ipynb",
78 | "/home/neuro/nipype_tutorial/notebooks/example_2ndlevel.ipynb"]:
79 |
80 | print('Reducing: %s' % n)
81 | notebooks.append(reduce_notebook_load(n))
82 |
83 | elif test_mode == 3:
84 |
85 | # Test hands-on notebooks
86 | for n in ["/home/neuro/nipype_tutorial/notebooks/handson_preprocessing.ipynb",
87 | "/home/neuro/nipype_tutorial/notebooks/handson_analysis.ipynb"]:
88 |
89 | print('Reducing: %s' % n)
90 | notebooks.append(reduce_notebook_load(n))
91 |
92 | # testing all tests from the notebooks list
93 | pytest_exit_code = pytest.main(["--nbval-lax", "--nbval-cell-timeout", "7200", "-vs"] + notebooks)
94 | sys.exit(pytest_exit_code)
95 |
--------------------------------------------------------------------------------
/update_pages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | while true; do
4 | read -p "Did you submit all other changes and are you ready to update the pages? " yn
5 | case $yn in
6 | [Yy]* )
7 |
8 | TIMESTAMP=`date +'%Y-%m-%d %H:%M:%S'`
9 |
10 | # Clean out .ipynb_checkpoints in folder structure
11 | rm -rf `find -name ".ipynb_checkpoints"`
12 |
13 | # Reset the counts of all notebook cells
14 | find . -type f -name "*.ipynb" \
15 | -exec sed -i 's/execution_count\b.*$/execution_count": null,/' {} +
16 |
17 | # Convert the notebooks to HTML
18 | jupyter nbconvert --to html --template full index.ipynb --output-dir=docs
19 | jupyter nbconvert --to html --template full notebooks/*ipynb \
20 | --output-dir=docs/notebooks/
21 |
22 | # Replace all .ipynb-links with .html-liks
23 | find docs -type f -name "*.html" -exec sed -i 's/ipynb\&/html\&/g' {} +
24 | find docs -type f -name "*.html" -exec sed -i 's/ipynb#/html#/g' {} +
25 | find docs -type f -name "*.html" -exec sed -i 's/ipynb\"/html\"/g' {} +
26 |
27 | # Delete the Button "Show HTML code" from index.html
28 | sed -i '/Show HTML code/d' docs/index.html
29 |
30 | # Add Google Analytics script to each homepage
31 | for h in `find docs -maxdepth 2 -name "*html"`
32 | do
33 | sed '/<\/head>/ {r static/template_google_analytics.rst
34 | d}' $h > tmp.rst
35 |
36 | mv tmp.rst $h
37 | done
38 |
39 | # Add Footer to all html-notebooks
40 | for h in `find docs -maxdepth 2 -name "*html"`
41 | do
42 | sed -i 's/<\/body>/<\/body>