├── .editorconfig ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE.md └── dependabot.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── .travis.yml ├── AUTHORS.rst ├── CONTRIBUTING.rst ├── HISTORY.rst ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── adnipy ├── __init__.py ├── adni.py ├── adnipy.py └── data.py ├── appveyor.yml ├── docs ├── Makefile ├── adnipy.rst ├── authors.rst ├── conf.py ├── contributing.rst ├── history.rst ├── index.rst ├── installation.rst ├── make.bat ├── modules.rst ├── readme.rst └── usage.rst ├── environment.yml ├── requirements_dev.txt ├── setup.cfg ├── setup.py └── tests ├── __init__.py ├── test_adni.py ├── test_adnipy.py └── test_data.py /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 4 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | charset = utf-8 11 | end_of_line = lf 12 | 13 | [*.yaml] 14 | indent_size = 2 15 | 16 | [*.bat] 17 | indent_style = tab 18 | end_of_line = crlf 19 | 20 | [LICENSE] 21 | insert_final_newline = false 22 | 23 | [Makefile] 24 | indent_style = tab 25 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | - adnipy version: 2 | - Python version: 3 | - Operating System: 4 | 5 | ### Description 6 | 7 | Describe what you were trying to get done. 8 | Tell us what happened, what went wrong, and what you expected to happen. 9 | 10 | ### What I Did 11 | 12 | Paste the command(s) you ran and the output. 13 | If there was a crash, please include the traceback here. 14 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "06:00" 8 | timezone: Etc/GMT+1 9 | open-pull-requests-limit: 10 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### Python ### 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | env/ 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *.cover 48 | .hypothesis/ 49 | .pytest_cache/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | local_settings.py 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # SageMath parsed files 79 | *.sage.py 80 | 81 | # dotenv 82 | .env 83 | 84 | # virtualenv 85 | .venv 86 | venv/ 87 | ENV/ 88 | 89 | # Spyder project settings 90 | .spyderproject 91 | .spyproject 92 | 93 | # Rope project settings 94 | .ropeproject 95 | 96 | # mkdocs documentation 97 | /site 98 | 99 | # mypy 100 | .mypy_cache/ 101 | 102 | ### JupyterNotebooks ### 103 | .ipynb_checkpoints 104 | */.ipynb_checkpoints/* 105 | 106 | ### VisualStudioCode ### 107 | .devcontainer/ 108 | .vscode/ 109 | 110 | ### Data ### 111 | *.csv 112 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v5.0.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: end-of-file-fixer 9 | - id: check-yaml 10 | - id: check-added-large-files 11 | - id: check-case-conflict 12 | - id: requirements-txt-fixer 13 | - repo: https://github.com/psf/black 14 | rev: 25.1.0 15 | hooks: 16 | - id: black 17 | language_version: python3 18 | - repo: https://github.com/PyCQA/flake8 19 | rev: 7.3.0 20 | hooks: 21 | - id: flake8 22 | additional_dependencies: 23 | - flake8-bugbear 24 | - flake8-docstrings 25 | - flake8-import-order 26 | - pep8-naming 27 | - repo: https://github.com/timothycrosley/isort/ 28 | rev: 6.0.1 29 | hooks: 30 | - id: isort 31 | 32 | - repo: https://github.com/PyCQA/pylint/ 33 | rev: v3.3.7 34 | hooks: 35 | - id: pylint 36 | args: [--disable=E0401] 37 | - repo: https://github.com/pre-commit/mirrors-prettier 38 | rev: "v4.0.0-alpha.8" 39 | hooks: 40 | - id: prettier 41 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the OS, Python version and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.8" 13 | 14 | # Build documentation in the docs/ directory with Sphinx 15 | sphinx: 16 | configuration: docs/conf.py 17 | 18 | # Optionally build your docs in additional formats such as PDF and ePub 19 | formats: 20 | - pdf 21 | - epub 22 | 23 | # Optionally declare the Python requirements required to build your docs 24 | python: 25 | install: 26 | - method: pip 27 | path: . 28 | - requirements: requirements_dev.txt 29 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Config file for automatic testing at travis-ci.org 2 | 3 | language: python 4 | python: 5 | - "3.8" 6 | - "3.9" 7 | - "3.10" 8 | - "3.11" 9 | 10 | # Command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors 11 | install: 12 | - pip install tox-travis 13 | - pip install codecov 14 | 15 | # Command to run tests, e.g. python setup.py test 16 | script: tox 17 | 18 | after_success: codecov 19 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Credits 3 | ======= 4 | 5 | Development Lead 6 | ---------------- 7 | 8 | * Maximilian Cosmo Sitter 9 | 10 | Contributors 11 | ------------ 12 | 13 | None yet. Why not be the first? 14 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ============ 4 | Contributing 5 | ============ 6 | 7 | Contributions are welcome, and they are greatly appreciated! Every little bit 8 | helps, and credit will always be given. 9 | 10 | You can contribute in many ways: 11 | 12 | Types of Contributions 13 | ---------------------- 14 | 15 | Report Bugs 16 | ~~~~~~~~~~~ 17 | 18 | Report bugs at https://github.com/mcsitter/adnipy/issues. 19 | 20 | If you are reporting a bug, please include: 21 | 22 | * Your operating system name and version. 23 | * Any details about your local setup that might be helpful in troubleshooting. 24 | * Detailed steps to reproduce the bug. 25 | 26 | Fix Bugs 27 | ~~~~~~~~ 28 | 29 | Look through the GitHub issues for bugs. Anything tagged with "bug" and "help 30 | wanted" is open to whoever wants to implement it. 31 | 32 | Implement Features 33 | ~~~~~~~~~~~~~~~~~~ 34 | 35 | Look through the GitHub issues for features. Anything tagged with "enhancement" 36 | and "help wanted" is open to whoever wants to implement it. 37 | 38 | Write Documentation 39 | ~~~~~~~~~~~~~~~~~~~ 40 | 41 | adnipy could always use more documentation, whether as part of the 42 | official adnipy docs, in docstrings, or even on the web in blog posts, 43 | articles, and such. 44 | 45 | Submit Feedback 46 | ~~~~~~~~~~~~~~~ 47 | 48 | The best way to send feedback is to file an issue at https://github.com/mcsitter/adnipy/issues. 49 | 50 | If you are proposing a feature: 51 | 52 | * Explain in detail how it would work. 53 | * Keep the scope as narrow as possible, to make it easier to implement. 54 | * Remember that this is a volunteer-driven project, and that contributions 55 | are welcome :) 56 | 57 | Get Started! 58 | ------------ 59 | 60 | Ready to contribute? Here's how to set up `adnipy` for local development. 61 | 62 | 1. Fork the `adnipy` repo on GitHub. 63 | 2. Clone your fork locally:: 64 | 65 | $ git clone git@github.com:your_name_here/adnipy.git 66 | 67 | 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: 68 | 69 | $ mkvirtualenv adnipy 70 | $ cd adnipy/ 71 | $ python setup.py develop 72 | 73 | 4. Create a branch for local development:: 74 | 75 | $ git checkout -b name-of-your-bugfix-or-feature 76 | 77 | Now you can make your changes locally. 78 | 79 | 5. When you're done making changes, check that your changes pass flake8 and the 80 | tests, including testing other Python versions with tox:: 81 | 82 | $ flake8 adnipy tests 83 | $ python setup.py test or py.test 84 | $ tox 85 | 86 | To get flake8 and tox, just pip install them into your virtualenv. 87 | 88 | 6. Commit your changes and push your branch to GitHub:: 89 | 90 | $ git add . 91 | $ git commit -m "Your detailed description of your changes." 92 | $ git push origin name-of-your-bugfix-or-feature 93 | 94 | 7. Submit a pull request through the GitHub website. 95 | 96 | Pull Request Guidelines 97 | ----------------------- 98 | 99 | Before you submit a pull request, check that it meets these guidelines: 100 | 101 | 1. The pull request should include tests. 102 | 2. If the pull request adds functionality, the docs should be updated. Put 103 | your new functionality into a function with a docstring, and add the 104 | feature to the list in README.rst. 105 | 3. The pull request should work for Python 3.5, 3.6, 3.7 and 3.8. Check 106 | https://travis-ci.org/mcsitter/adnipy/pull_requests 107 | and make sure that the tests pass for all supported Python versions. 108 | 109 | Tips 110 | ---- 111 | 112 | To run a subset of tests:: 113 | 114 | 115 | $ $ py.test tests.test_adnipy 116 | 117 | Deploying 118 | --------- 119 | 120 | A reminder for the maintainers on how to deploy. 121 | Make sure all your changes are committed (including an entry in HISTORY.rst). 122 | Then run:: 123 | 124 | $ bumpversion patch # possible: major / minor / patch 125 | $ git push 126 | $ git push --tags 127 | 128 | Travis will then deploy to PyPI if tests pass. 129 | -------------------------------------------------------------------------------- /HISTORY.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | History 3 | ======= 4 | 5 | 0.0.1 (2019-09-05) 6 | ------------------ 7 | 8 | * First release on GitHub. 9 | * First release on PyPI. 10 | 11 | 0.1.0 (2019-10-25) 12 | ------------------ 13 | 14 | * Improved documentation. 15 | * Added pandas dataframe class extension for ADNI 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019, Maximilian Cosmo Sitter 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include AUTHORS.rst 2 | include CONTRIBUTING.rst 3 | include HISTORY.rst 4 | include LICENSE 5 | include README.rst 6 | 7 | recursive-include tests * 8 | recursive-exclude * __pycache__ 9 | recursive-exclude * *.py[co] 10 | 11 | recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif 12 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: docs 2 | .DEFAULT_GOAL := help 3 | 4 | define BROWSER_PYSCRIPT 5 | import os, webbrowser, sys 6 | 7 | try: 8 | from urllib import pathname2url 9 | except: 10 | from urllib.request import pathname2url 11 | 12 | webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1]))) 13 | endef 14 | export BROWSER_PYSCRIPT 15 | 16 | define PRINT_HELP_PYSCRIPT 17 | import re, sys 18 | 19 | for line in sys.stdin: 20 | match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) 21 | if match: 22 | target, help = match.groups() 23 | print("%-20s %s" % (target, help)) 24 | endef 25 | export PRINT_HELP_PYSCRIPT 26 | 27 | BROWSER := python -c "$$BROWSER_PYSCRIPT" 28 | 29 | help: 30 | @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) 31 | 32 | clean: clean-build clean-pyc clean-test clean-docs ## remove all build, test, coverage, Python artifacts and documentation 33 | 34 | clean-build: ## remove build artifacts 35 | rm -fr build/ 36 | rm -fr dist/ 37 | rm -fr .eggs/ 38 | find . -name '*.egg-info' -exec rm -fr {} + 39 | find . -name '*.egg' -exec rm -f {} + 40 | 41 | clean-pyc: ## remove Python file artifacts 42 | find . -name '*.pyc' -exec rm -f {} + 43 | find . -name '*.pyo' -exec rm -f {} + 44 | find . -name '*~' -exec rm -f {} + 45 | find . -name '__pycache__' -exec rm -fr {} + 46 | 47 | clean-test: ## remove test and coverage artifacts 48 | rm -fr .tox/ 49 | rm -f .coverage 50 | rm -fr htmlcov/ 51 | rm -fr .pytest_cache 52 | 53 | clean-docs: ## remove Sphinx documentation 54 | rm -f docs/adnipy.rst 55 | rm -f docs/modules.rst 56 | $(MAKE) -C docs clean 57 | 58 | lint: ## check style with flake8 59 | flake8 adnipy tests 60 | 61 | test: ## run tests quickly with the default Python 62 | py.test 63 | 64 | test-all: ## run tests on every Python version with tox 65 | tox 66 | 67 | coverage: ## check code coverage quickly with the default Python 68 | coverage run --source adnipy -m pytest 69 | coverage report -m 70 | coverage html 71 | $(BROWSER) htmlcov/index.html 72 | 73 | docs: ## generate Sphinx HTML documentation, including API docs 74 | rm -f docs/adnipy.rst 75 | rm -f docs/modules.rst 76 | sphinx-apidoc -o docs/ adnipy 77 | $(MAKE) -C docs clean 78 | $(MAKE) -C docs html 79 | $(BROWSER) docs/_build/html/index.html 80 | 81 | servedocs: docs ## compile the docs watching for changes 82 | watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D . 83 | 84 | release: dist ## package and upload a release 85 | twine upload dist/* 86 | 87 | dist: clean ## builds source and wheel package 88 | python setup.py sdist 89 | python setup.py bdist_wheel 90 | ls -l dist 91 | 92 | install: clean ## install the package to the active Python's site-packages 93 | python setup.py install 94 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ====== 2 | adnipy 3 | ====== 4 | 5 | .. image:: https://ci.appveyor.com/api/projects/status/xmkthg4jogd4eewb/branch/master?svg=true 6 | :target: https://ci.appveyor.com/project/mcsitter/adnipy 7 | :alt: appveyor build status 8 | 9 | .. image:: https://readthedocs.org/projects/adnipy/badge/?version=latest 10 | :target: https://adnipy.readthedocs.io/en/latest/?badge=latest 11 | :alt: documentation build status 12 | 13 | .. image:: https://img.shields.io/github/license/mcsitter/adnipy 14 | :target: https://github.com/mcsitter/adnipy/blob/master/LICENSE 15 | :alt: license 16 | 17 | .. image:: https://codecov.io/gh/mcsitter/adnipy/branch/master/graph/badge.svg 18 | :target: https://codecov.io/gh/mcsitter/adnipy 19 | :alt: coverage 20 | 21 | 22 | Process ADNI study data with adnipy. 23 | 24 | 25 | Adnipy is a python package designed for working with the `ADNI database`_. 26 | It also offers some handy tools for file operations. 27 | 28 | * Free software: MIT license 29 | * Documentation: https://adnipy.readthedocs.io 30 | 31 | 32 | Credits 33 | ------- 34 | 35 | This package was created with Cookiecutter_ and the 36 | `audreyr/cookiecutter-pypackage`_ project template. 37 | 38 | .. _Cookiecutter: https://github.com/audreyr/cookiecutter 39 | .. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage 40 | .. _`ADNI database`: http://adni.loni.usc.edu/ 41 | -------------------------------------------------------------------------------- /adnipy/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """Top-level package for adnipy.""" 4 | 5 | __author__ = """Maximilian Cosmo Sitter""" 6 | __email__ = "msitter@smail.uni-koeln.de" 7 | __version__ = "0.1.0" 8 | 9 | # Let users know if they're missing any of our hard dependencies 10 | import matplotlib 11 | import pandas as pd 12 | 13 | from .adni import ADNI 14 | from .adnipy import get_matching_images, read_csv, timedelta 15 | 16 | del matplotlib, pd 17 | 18 | 19 | # module level doc-string 20 | __doc__ = """Process ADNI study data with adnipy.""" 21 | -------------------------------------------------------------------------------- /adnipy/adni.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """Pandas dataframe extension for ADNI.""" 4 | 5 | # pylint: disable=R0914 6 | 7 | # Third party imports 8 | import pandas as pd 9 | 10 | 11 | @pd.api.extensions.register_dataframe_accessor("adni") 12 | class ADNI: 13 | """Dataframe deals with ADNI data. 14 | 15 | This class presents methods, which are designed to work with data from the 16 | ADNI database. 17 | """ 18 | 19 | DATES = [ 20 | # Collections 21 | "Acq Date", 22 | "Downloaded", 23 | # ADNIMERGE 24 | "EXAMDATE", 25 | "EXAMDATE_bl", 26 | "update_stamp", 27 | # DESIKANLAB 28 | "USERDATE", 29 | "update_stamp", 30 | # TAUMETA 31 | "USERDATE", 32 | "USERDATE2", 33 | "SCANDATE", 34 | "TAUTRANDT", 35 | "update_stamp", 36 | # TAUMETA3 37 | "USERDATE", 38 | "USERDATE2", 39 | "SCANDATE", 40 | "TRANDATE", 41 | "update_stamp", 42 | ] 43 | INDEX = ["Subject ID", "Image ID"] 44 | MAPPER = { 45 | # Collections 46 | "Image": "Image ID", 47 | "Image Data ID": "Image ID", 48 | "Subject": "Subject ID", 49 | "Acq Date": "SCANDATE", 50 | # ADNIMERGE 51 | "PTID": "Subject ID", 52 | # TAUMETA3 53 | "ASSAYTIME": "TAUTIME", 54 | } 55 | 56 | def __init__(self, pandas_dataframe): 57 | """Pass dataframe to the _df attribute of ADNI object. 58 | 59 | Parameters 60 | ---------- 61 | pandas_dataframe : pd.DataFrame 62 | This dataframe will be stored in the _df attribute. 63 | 64 | Attributes 65 | ---------- 66 | _df : pd.DataFrame 67 | This represents the dataframe object, which calls the method. 68 | 69 | """ 70 | self._df = pandas_dataframe 71 | 72 | def standard_column_names(self): 73 | """Rename dataframe columns to module standard. 74 | 75 | This function helps when working with multiple dataframes, 76 | since the same data can have different names. 77 | It will also call `rid()` on the dataframe. 78 | 79 | Returns 80 | ------- 81 | pd.DataFrame 82 | This will have standardized columns names. 83 | 84 | See Also 85 | -------- 86 | rid 87 | 88 | Examples 89 | -------- 90 | >>> subjects = pd.DataFrame({"Subject": ["101_S_1001", "102_S_1002"]}) 91 | >>> subjects 92 | Subject 93 | 0 101_S_1001 94 | 1 102_S_1002 95 | >>> subjects.adni.standard_column_names() 96 | "VISCODE2" not included. 97 | Subject ID RID 98 | 0 101_S_1001 1001 99 | 1 102_S_1002 1002 100 | 101 | >>> images = pd.DataFrame({"Image": [100001, 100002]}) 102 | >>> images 103 | Image 104 | 0 100001 105 | 1 100002 106 | >>> images.adni.standard_column_names() 107 | "VISCODE2" not included. 108 | Image ID 109 | 0 100001 110 | 1 100002 111 | 112 | """ 113 | self._df = self._df.rename(mapper=self.MAPPER, axis="columns") 114 | 115 | if "VISCODE2" in self._df.columns: 116 | self._df["VISCODE"] = self._df["VISCODE2"] 117 | del self._df["VISCODE2"] 118 | 119 | else: 120 | print('"VISCODE2" not included.') 121 | 122 | self._df = self.rid() 123 | 124 | return self._df 125 | 126 | def standard_dates(self): 127 | """Change type of date columns to datetime. 128 | 129 | Returns 130 | ------- 131 | pd.DataFrame 132 | Dates will have the appropriate dtype. 133 | 134 | """ 135 | for date in self.DATES: 136 | if date in self._df.columns: 137 | self._df.loc[:, date] = pd.to_datetime(self._df.loc[:, date]) 138 | 139 | return self._df 140 | 141 | def standard_index(self, index=None): 142 | """Process dataframes into a standardized format. 143 | 144 | The output is easy to read. 145 | Applying functions the the output may not work as expected. 146 | 147 | Parameters 148 | ---------- 149 | index : list of str, default None 150 | These columns will be the new index. 151 | 152 | Returns 153 | ------- 154 | pd.DataFrame 155 | An easy to read dataframe for humans. 156 | 157 | """ 158 | if index is None: 159 | index = ["Subject ID", "Image ID", "RID", "Visit", "SCANDATE"] 160 | 161 | dataframe = self._df.reset_index() 162 | dataframe = dataframe.set_index( 163 | [column for column in index if column in dataframe.columns] 164 | ) 165 | 166 | if "index" in dataframe.columns: 167 | dataframe = dataframe.drop(columns="index") 168 | dataframe = dataframe.dropna(axis="columns", how="all") 169 | dataframe = dataframe.sort_index() 170 | 171 | return dataframe 172 | 173 | def rid(self): 174 | """Add a roster ID column. 175 | 176 | Will not work if 'RID' is already present or 'Subject ID' is missing. 177 | 178 | Returns 179 | ------- 180 | pd.DataFrame 181 | Dataframe with a 'RID' column. 182 | 183 | Examples 184 | -------- 185 | >>> subjects = {"Subject ID": ["100_S_1000", "101_S_1001"]} 186 | >>> collection = pd.DataFrame(subjects) 187 | >>> collection 188 | Subject ID 189 | 0 100_S_1000 190 | 1 101_S_1001 191 | >>> collection.adni.rid() 192 | Subject ID RID 193 | 0 100_S_1000 1000 194 | 1 101_S_1001 1001 195 | 196 | """ 197 | collection = self._df 198 | missing_rid = "RID" not in collection.columns 199 | contains_subject_id = "Subject ID" in collection.columns 200 | if missing_rid and contains_subject_id: 201 | collection["RID"] = collection["Subject ID"].map( 202 | lambda subject_id: pd.to_numeric(subject_id[-4:]) 203 | ) 204 | 205 | return collection 206 | 207 | def drop_dynamic(self): 208 | """Remove images which are dynamic. 209 | 210 | Drops all rows, in which the Description contains 'Dynamic'. 211 | 212 | Returns 213 | ------- 214 | pd.DataFrame 215 | All images that are not dynamic. 216 | 217 | """ 218 | no_dynamic = self._df[~self._df["Description"].str.contains("Dynamic")] 219 | 220 | return no_dynamic 221 | 222 | def groups(self, grouped_mci=True): 223 | """Create a dataframe for each group and save it to a csv file. 224 | 225 | Parameters 226 | ---------- 227 | grouped_mci : bool, default True 228 | If true, 'LMCI' and 'EMCI' are treated like 'MCI'. 229 | However, the original values will stills be in the output. 230 | 231 | Returns 232 | ------- 233 | dict 234 | Dictionnairy with a dataframe for each group. 235 | 236 | """ 237 | collection = self._df 238 | 239 | # creates dataframe for each group 240 | group_names = collection["Group"].unique() 241 | groups = {} 242 | for group in group_names: 243 | group_df = collection[collection["Group"] == group] 244 | groups[group] = group_df 245 | 246 | # groups MCIs 247 | if grouped_mci is True: 248 | mci = collection[collection["Group"].isin(["MCI", "LMCI", "EMCI"])] 249 | if not mci.empty: 250 | groups["MCI"] = mci 251 | if "LMCI" in group_names: 252 | del groups["LMCI"] 253 | if "EMCI" in group_names: 254 | del groups["EMCI"] 255 | 256 | return groups 257 | 258 | def longitudinal(self): 259 | """ 260 | Keep only longitudinal data. 261 | 262 | This requires an 'RID' or 'Subject ID' column in the dataframe. 263 | Do not use if multiple images are present for a single timepoint. 264 | 265 | Parameters 266 | ---------- 267 | images : pd.DataFrame 268 | This dataframe will be modified. 269 | 270 | Returns 271 | ------- 272 | pd.DataFrame 273 | A dataframe with only longitudinal data. 274 | 275 | See Also 276 | -------- 277 | drop_dynamic 278 | 279 | """ 280 | images = self.rid() 281 | 282 | longitudinal = images[images["RID"].duplicated(keep=False)] 283 | 284 | return longitudinal 285 | 286 | def timepoints(self, second="first"): 287 | """Extract timepoints from a dataframe. 288 | 289 | Parameters 290 | ---------- 291 | second : {'first' or 'last'}, default 'first' 292 | 'last' to have the latest, 'first' to have the earliest values 293 | for timepoint 2. 294 | 295 | """ 296 | dataframe = self._df 297 | 298 | dataframe.reset_index(inplace=True) 299 | dataframe.set_index(self.INDEX, inplace=True) 300 | dataframe.sort_index(inplace=True) 301 | if "index" in dataframe.columns: 302 | dataframe = dataframe.drop(columns="index") 303 | if "Description" in dataframe.columns: 304 | raise ValueError( 305 | "Make sure that 'Description' is not in columns " 306 | "and only one image per timepoint is in the pd.DataFrame." 307 | ) 308 | df_subjects = dataframe.index.get_level_values(0) 309 | df_images = dataframe.index.get_level_values(1) 310 | 311 | timepoints = {} 312 | 313 | if second == "first": 314 | total_timepoints = max(df_subjects.value_counts()) 315 | for i in range(total_timepoints): 316 | timepoint = i + 1 317 | timepoint_df = dataframe[~df_subjects.duplicated(keep="first")] 318 | timepoint_str = "Timepoint " + str(timepoint) 319 | timepoints[timepoint_str] = timepoint_df 320 | dataframe = dataframe[ 321 | ~df_images.isin(timepoint_df.index.get_level_values(1)) 322 | ] 323 | df_subjects = dataframe.index.get_level_values(0) 324 | df_images = dataframe.index.get_level_values(1) 325 | 326 | elif second == "last": 327 | timepoint_1 = dataframe[~df_subjects.duplicated()] 328 | timepoints["Timepoint 1"] = timepoint_1 329 | timepoint_1_images = timepoint_1.index.get_level_values(1) 330 | after_timepoint_1 = dataframe[~df_images.isin(timepoint_1_images)] 331 | 332 | after_tp_1_images = after_timepoint_1.index.get_level_values(0) 333 | timepoint_2_last = after_timepoint_1[ 334 | ~after_tp_1_images.duplicated(keep="last") 335 | ] 336 | timepoints["Timepoint 2"] = timepoint_2_last 337 | 338 | return timepoints 339 | -------------------------------------------------------------------------------- /adnipy/adnipy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """Process ADNI study data with adnipy.""" 4 | 5 | # Standard library imports 6 | import warnings 7 | 8 | # Third party imports 9 | import pandas as pd 10 | 11 | 12 | def read_csv(file): 13 | """Return a csv file as a pandas.DataFrame. 14 | 15 | Recognizes missing values used in the ADNI database. 16 | 17 | Parameters 18 | ---------- 19 | file : str, pathlib.Path 20 | The path to the .csv file. 21 | 22 | Returns 23 | ------- 24 | pd.DataFrame 25 | Returns the file as a dataframe. 26 | 27 | See Also 28 | -------- 29 | standard_column_names 30 | standard_dates 31 | standard_index 32 | 33 | """ 34 | # empty values 35 | na_values = ["-1", "-4"] 36 | 37 | # prevents UserWarnings on large files like ADNIMERGE 38 | dtype = { 39 | "ABETA": object, 40 | "TAU": object, 41 | "TAU_bl": object, 42 | "PTAU": object, 43 | "PTAU_bl": object, 44 | } 45 | 46 | dataframe = pd.read_csv(file, dtype=dtype, na_values=na_values) 47 | 48 | return dataframe 49 | 50 | 51 | def timedelta(old, new): 52 | """Get timedelta between timepoints. 53 | 54 | Parameters 55 | ---------- 56 | old : pd.DataFrame 57 | This is the older dataframe. 58 | new : pd.DataFrame 59 | This is the newer dataframe. 60 | 61 | Returns 62 | ------- 63 | pd.Series 64 | The content will be timedelta values. Look into numpy for more options. 65 | 66 | """ 67 | old = old.reset_index() 68 | old = old.set_index("Subject ID") 69 | 70 | new = new.reset_index() 71 | new = new.set_index("Subject ID") 72 | 73 | timedeltas = old["SCANDATE"] - new["SCANDATE"] 74 | 75 | return timedeltas 76 | 77 | 78 | def get_matching_images(left, right): 79 | """Match different scan types based on closest date. 80 | 81 | The columns 'Subject ID' and 'SCANDATE' are required. 82 | 83 | Parameters 84 | ---------- 85 | left : pd.DataFrame 86 | Dataframe containing the tau scans. 87 | right : pd.DataFrame 88 | Dataframe containing the mri scans. 89 | 90 | Returns 91 | ------- 92 | pd.DataFrame 93 | For each timepoint there is a match from both inputs. 94 | 95 | """ 96 | left = left.set_index(["Subject ID", "SCANDATE"]) 97 | left = left.sort_index() 98 | 99 | right = right.set_index(["Subject ID", "SCANDATE"]) 100 | right = right.sort_index() 101 | 102 | missing_match = [] 103 | matching_images = [] 104 | right_subjects = right.index.get_level_values(0) 105 | 106 | def closest_date(subject, index): 107 | """Get closest date from list.""" 108 | unique_dates = subject.index.unique() 109 | closest_date = min(unique_dates, key=lambda x, index=index: abs(x - index[1])) 110 | 111 | return closest_date 112 | 113 | for index in left.index: 114 | if index[0] in right_subjects: 115 | subject = right.loc[index[0]] 116 | date = closest_date(subject, index) 117 | matching_image = right.loc[index[0], date] 118 | image = left.loc[[index]] 119 | image["Image ID_r"] = matching_image.values[0] 120 | matching_images.append(image) 121 | else: 122 | missing_match.append(index) 123 | 124 | matching_images_df = pd.concat(matching_images) 125 | matching_images_df = matching_images_df.rename(columns={"Image ID": "Image ID_l"}) 126 | 127 | if missing_match: 128 | missing_match_str = str(set(missing_match)) 129 | message = "Could not find matching images for:" + missing_match_str 130 | warnings.warn(message, stacklevel=1) 131 | 132 | return matching_images_df 133 | -------------------------------------------------------------------------------- /adnipy/data.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """Process data created in Matlab.""" 4 | 5 | # Standard library imports 6 | import re 7 | 8 | # Third party imports 9 | import pandas as pd 10 | 11 | 12 | def image_id_from_filename(filename): 13 | """Extract image ID of single ADNI .nii filename. 14 | 15 | Images from the ADNI database have a specific formatting. 16 | Using regular expressions the image ID can be extracted from filenames. 17 | 18 | Parameters 19 | ---------- 20 | filename : str 21 | It must contain the Image ID at the end. 22 | 23 | Returns 24 | ------- 25 | numpy.int64 26 | Image as a integer. 27 | 28 | Examples 29 | -------- 30 | >>> image_id_from_filename("*_I123456.nii") 31 | 123456 32 | 33 | """ 34 | image_id_format = re.compile("_I([0-9]*).nii") 35 | 36 | image_id = re.search(image_id_format, filename).group(1) 37 | image_id = pd.to_numeric(image_id) 38 | 39 | return image_id 40 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | # appveyor.yml 2 | image: 3 | - Visual Studio 2019 4 | 5 | environment: 6 | matrix: 7 | - TOXENV: py38-pandas1 8 | - TOXENV: py38-pandas2 9 | - TOXENV: py38-pandas3 10 | - TOXENV: py39-pandas1 11 | - TOXENV: py39-pandas2 12 | - TOXENV: py39-pandas3 13 | - TOXENV: py310-pandas1 14 | - TOXENV: py310-pandas2 15 | - TOXENV: py310-pandas3 16 | - TOXENV: py311-pandas1 17 | - TOXENV: py311-pandas2 18 | - TOXENV: py311-pandas3 19 | - TOXENV: flake8 20 | - TOXENV: doc8 21 | 22 | build: off 23 | 24 | install: 25 | - py -m pip install tox 26 | 27 | test_script: 28 | - py -m tox 29 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = adnipy 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/adnipy.rst: -------------------------------------------------------------------------------- 1 | adnipy package 2 | ============== 3 | 4 | Submodules 5 | ---------- 6 | 7 | adnipy.adni module 8 | ------------------ 9 | 10 | .. automodule:: adnipy.adni 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | adnipy.adnipy module 16 | -------------------- 17 | 18 | .. automodule:: adnipy.adnipy 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | adnipy.data module 24 | ------------------ 25 | 26 | .. automodule:: adnipy.data 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: adnipy 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../AUTHORS.rst 2 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # 4 | # adnipy documentation build configuration file, created by 5 | # sphinx-quickstart on Fri Jun 9 13:47:02 2017. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | # If extensions (or modules to document with autodoc) are in another 17 | # directory, add these directories to sys.path here. If the directory is 18 | # relative to the documentation root, use os.path.abspath to make it 19 | # absolute, like shown here. 20 | # 21 | 22 | """adnipy documentation build configuration file.""" 23 | 24 | # pylint: disable=C0103,C0413,W0622 25 | 26 | import os 27 | import sys 28 | 29 | sys.path.insert(0, os.path.abspath("..")) 30 | 31 | import sphinx_rtd_theme # noqa: F401 pylint: disable=W0611 32 | 33 | import adnipy # noqa: E402 34 | 35 | # -- General configuration --------------------------------------------- 36 | 37 | # If your documentation needs a minimal Sphinx version, state it here. 38 | # 39 | # needs_sphinx = '1.0' 40 | 41 | # Add any Sphinx extension module names here, as strings. They can be 42 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 43 | extensions = [ 44 | "sphinx.ext.autodoc", 45 | "sphinx.ext.coverage", 46 | "sphinx.ext.imgconverter", 47 | "sphinx.ext.napoleon", 48 | "sphinx.ext.viewcode", 49 | ] 50 | # html4_writer = True 51 | 52 | # Add any paths that contain templates here, relative to this directory. 53 | templates_path = ["_templates"] 54 | 55 | # The suffix(es) of source filenames. 56 | # You can specify multiple suffix as a list of string: 57 | # 58 | # source_suffix = ['.rst', '.md'] 59 | source_suffix = ".rst" 60 | 61 | # The master toctree document. 62 | master_doc = "index" 63 | 64 | # General information about the project. 65 | project = "adnipy" 66 | copyright = "2019, Maximilian Cosmo Sitter" 67 | author = "Maximilian Cosmo Sitter" 68 | 69 | # The version info for the project you're documenting, acts as replacement 70 | # for |version| and |release|, also used in various other places throughout 71 | # the built documents. 72 | # 73 | # The short X.Y version. 74 | version = adnipy.__version__ 75 | # The full version, including alpha/beta/rc tags. 76 | release = adnipy.__version__ 77 | 78 | # The language for content autogenerated by Sphinx. Refer to documentation 79 | # for a list of supported languages. 80 | # 81 | # This is also used if you do content translation via gettext catalogs. 82 | # Usually you set "language" from the command line for these cases. 83 | language = "en" 84 | 85 | # List of patterns, relative to source directory, that match files and 86 | # directories to ignore when looking for source files. 87 | # This patterns also effect to html_static_path and html_extra_path 88 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 89 | 90 | # The name of the Pygments (syntax highlighting) style to use. 91 | pygments_style = "sphinx" 92 | 93 | # If true, `todo` and `todoList` produce output, else they produce nothing. 94 | todo_include_todos = False 95 | 96 | 97 | # -- Options for HTML output ------------------------------------------- 98 | 99 | # The theme to use for HTML and HTML Help pages. See the documentation for 100 | # a list of builtin themes. 101 | # 102 | html_theme = "sphinx_rtd_theme" 103 | 104 | # Theme options are theme-specific and customize the look and feel of a 105 | # theme further. For a list of options available for each theme, see the 106 | # documentation. 107 | # 108 | # html_theme_options = {} 109 | 110 | # Add any paths that contain custom static files (such as style sheets) here, 111 | # relative to this directory. They are copied after the builtin static files, 112 | # so a file named "default.css" will overwrite the builtin "default.css". 113 | # html_static_path = ['_static'] 114 | 115 | 116 | # -- Options for HTMLHelp output --------------------------------------- 117 | 118 | # Output file base name for HTML help builder. 119 | htmlhelp_basename = "adnipydoc" 120 | 121 | 122 | # -- Options for LaTeX output ------------------------------------------ 123 | 124 | latex_elements = { 125 | # The paper size ('letterpaper' or 'a4paper'). 126 | # 127 | "papersize": "a4paper", 128 | # The font size ('10pt', '11pt' or '12pt'). 129 | # 130 | # 'pointsize': '10pt', 131 | # Additional stuff for the LaTeX preamble. 132 | # 133 | # 'preamble': '', 134 | # Latex figure (float) alignment 135 | # 136 | # 'figure_align': 'htbp', 137 | } 138 | 139 | # Grouping the document tree into LaTeX files. List of tuples 140 | # (source start file, target name, title, author, documentclass 141 | # [howto, manual, or own class]). 142 | latex_documents = [ 143 | ( 144 | master_doc, 145 | "adnipy.tex", 146 | "adnipy Documentation", 147 | "Maximilian Cosmo Sitter", 148 | "manual", 149 | ) 150 | ] 151 | 152 | 153 | # -- Options for manual page output ------------------------------------ 154 | 155 | # One entry per manual page. List of tuples 156 | # (source start file, name, description, authors, manual section). 157 | man_pages = [(master_doc, "adnipy", "adnipy Documentation", [author], 1)] 158 | 159 | 160 | # -- Options for Texinfo output ---------------------------------------- 161 | 162 | # Grouping the document tree into Texinfo files. List of tuples 163 | # (source start file, target name, title, author, 164 | # dir menu entry, description, category) 165 | texinfo_documents = [ 166 | ( 167 | master_doc, 168 | "adnipy", 169 | "adnipy Documentation", 170 | author, 171 | "adnipy", 172 | "One line description of project.", 173 | "Miscellaneous", 174 | ) 175 | ] 176 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /docs/history.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../HISTORY.rst 2 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to adnipy's documentation! 2 | ====================================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: Contents: 7 | 8 | readme 9 | installation 10 | usage 11 | modules 12 | contributing 13 | authors 14 | history 15 | 16 | Indices and tables 17 | ================== 18 | * :ref:`genindex` 19 | * :ref:`modindex` 20 | * :ref:`search` 21 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ============ 4 | Installation 5 | ============ 6 | 7 | 8 | Stable release 9 | -------------- 10 | 11 | To install adnipy, run this command in your terminal: 12 | 13 | .. code-block:: console 14 | 15 | $ pip install adnipy 16 | 17 | This is the preferred method to install adnipy, as it will always install the 18 | most recent stable release. 19 | 20 | If you don't have `pip`_ installed, this `Python installation guide`_ can guide 21 | you through the process. 22 | 23 | .. _pip: https://pip.pypa.io 24 | .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ 25 | 26 | 27 | From sources 28 | ------------ 29 | 30 | The sources for adnipy can be downloaded from the `Github repo`_. 31 | 32 | You can either clone the public repository: 33 | 34 | .. code-block:: console 35 | 36 | $ git clone git://github.com/mcsitter/adnipy 37 | 38 | Or download the `tarball`_: 39 | 40 | .. code-block:: console 41 | 42 | $ curl -OL https://github.com/mcsitter/adnipy/tarball/master 43 | 44 | Once you have a copy of the source, you can install it with: 45 | 46 | .. code-block:: console 47 | 48 | $ python setup.py install 49 | 50 | 51 | .. _Github repo: https://github.com/mcsitter/adnipy 52 | .. _tarball: https://github.com/mcsitter/adnipy/tarball/master 53 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=python -msphinx 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=adnipy 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed, 20 | echo.then set the SPHINXBUILD environment variable to point to the full 21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the 22 | echo.Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/modules.rst: -------------------------------------------------------------------------------- 1 | adnipy 2 | ====== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | adnipy 8 | -------------------------------------------------------------------------------- /docs/readme.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | ===== 2 | Usage 3 | ===== 4 | 5 | To use adnipy in a project:: 6 | 7 | import adnipy 8 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: adnipy-dev 2 | channels: 3 | - defaults 4 | dependencies: 5 | # required 6 | - python 7 | - pip 8 | 9 | # code checks 10 | - flake8 11 | - flake8-import-order 12 | - pep8-naming 13 | 14 | # code formatting 15 | - black 16 | 17 | # documentation 18 | - sphinx 19 | 20 | # testing 21 | - pytest 22 | - pytest-cov 23 | - coverage 24 | 25 | # requirements 26 | - pandas 27 | - matplotlib 28 | - jupyter 29 | 30 | # distribution 31 | - twine 32 | 33 | # pypi packages 34 | - pip: 35 | # code checks 36 | - flake8-bugbear 37 | - flake8-docstrings 38 | 39 | # documentation 40 | - sphinx-rtd-theme 41 | - watchdog 42 | - doc8 43 | 44 | # testing 45 | - tox 46 | - tox-conda 47 | 48 | # distribution 49 | - bumpversion 50 | -------------------------------------------------------------------------------- /requirements_dev.txt: -------------------------------------------------------------------------------- 1 | matplotlib>=3.0.0 2 | pandas>=0.23.0 3 | pre-commit 4 | pytest 5 | sphinx-rtd-theme==3.0.2 6 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 0.1.0 3 | commit = True 4 | tag = True 5 | 6 | [aliases] 7 | test = pytest 8 | 9 | [isort] 10 | profile = black 11 | 12 | [pylint] 13 | max-line-length = 88 14 | 15 | [bumpversion:file:setup.py] 16 | search = version="{current_version}" 17 | replace = version="{new_version}" 18 | 19 | [bumpversion:file:adnipy/__init__.py] 20 | search = __version__ = "{current_version}" 21 | replace = __version__ = "{new_version}" 22 | 23 | [flake8] 24 | application-import-names = adnipy 25 | docstring-convention = numpy 26 | exclude = 27 | docs 28 | .eggs 29 | .tox 30 | .venv 31 | build 32 | max-line-length = 88 33 | extend-ignore = E203, W503 34 | import-order-style = pep8 35 | per-file-ignores = 36 | adnipy/__init__.py:F401, E402, I001 37 | docs/conf.py:E402 38 | 39 | [coverage:run] 40 | branch = True 41 | 42 | [tool:pytest] 43 | addopts = --doctest-modules --doctest-continue-on-failure --ignore=setup.py 44 | 45 | [tox:tox] 46 | envlist = 47 | py{38,39,310,311}-pandas{1,2,3} 48 | flake8 49 | doc8 50 | 51 | [travis] 52 | python = 53 | 3.11: py311 54 | 3.10: py310 55 | 3.9: py39 56 | 3.8: py38 57 | 58 | [testenv:flake8] 59 | deps = 60 | flake8 61 | flake8-bugbear 62 | flake8-docstrings 63 | flake8-import-order 64 | pep8-naming 65 | commands = 66 | flake8 67 | 68 | [testenv:doc8] 69 | deps = 70 | sphinx==1.8.5 71 | doc8 72 | commands = 73 | doc8 docs 74 | 75 | [testenv] 76 | setenv = 77 | PYTHONPATH = {toxinidir} 78 | deps = 79 | coverage 80 | pytest 81 | pandas1: pandas<2.0.0 82 | pandas2: pandas<2.1.0 83 | pandas3: pandas 84 | sphinx_rtd_theme 85 | commands = 86 | coverage run -a -m pytest --basetemp={envtmpdir} 87 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """The setup script.""" 5 | 6 | from setuptools import find_packages, setup 7 | 8 | with open("README.rst", encoding="utf-8") as readme_file: 9 | readme = readme_file.read() 10 | 11 | with open("HISTORY.rst", encoding="utf-8") as history_file: 12 | history = history_file.read() 13 | 14 | requirements = ["pandas>=0.23.0", "matplotlib>=3.0.0"] 15 | 16 | setup_requirements = ["pytest-runner"] 17 | 18 | test_requirements = ["pytest"] 19 | 20 | setup( 21 | author="Maximilian Cosmo Sitter", 22 | author_email="msitter@smail.uni-koeln.de", 23 | classifiers=[ 24 | "Development Status :: 1 - Planning", 25 | "Intended Audience :: Science/Research", 26 | "License :: OSI Approved :: MIT License", 27 | "Natural Language :: English", 28 | "Operating System :: OS Independent", 29 | "Programming Language :: Python :: 3.8", 30 | "Programming Language :: Python :: 3.9", 31 | "Programming Language :: Python :: 3.10", 32 | "Programming Language :: Python :: 3.11", 33 | ], 34 | description="Process ADNI study data with adnipy.", 35 | python_requires=">=3.8.0", 36 | platforms=["any"], 37 | install_requires=requirements, 38 | license="MIT license", 39 | long_description=readme + "\n\n" + history, 40 | include_package_data=True, 41 | keywords="adnipy", 42 | name="adnipy", 43 | packages=find_packages(include=["adnipy"]), 44 | setup_requires=setup_requirements, 45 | test_suite="tests", 46 | tests_require=test_requirements, 47 | url="https://github.com/mcsitter/adnipy", 48 | version="0.1.0", 49 | zip_safe=False, 50 | ) 51 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """Unit test package for adnipy.""" 4 | -------------------------------------------------------------------------------- /tests/test_adni.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """Tests for dataframe `adni` extension.""" 4 | 5 | # pylint: disable=W0621, R0801 6 | 7 | 8 | # Third party imports 9 | import pandas as pd 10 | import pytest 11 | 12 | from adnipy import adni # noqa: F401 pylint: disable=W0611 13 | 14 | 15 | @pytest.fixture 16 | def test_df(): 17 | """Provide sample dataframe for standardized testing.""" 18 | columns = [ 19 | "Subject ID", 20 | "Description", 21 | "Group", 22 | "VISCODE", 23 | "VISCODE2", 24 | "Image ID", 25 | "Acq Date", 26 | "RID", 27 | ] 28 | subjects = [ 29 | ["101_S_1001", "Average", "MCI", "m12", "m12", 100001, "1/01/2001", 1001], 30 | ["101_S_1001", "Average", "MCI", "m24", "m24", 200001, "1/01/2002", 1001], 31 | ["102_S_1002", "Average", "AD", "m12", "m12", 100002, "2/02/2002", 1002], 32 | ["102_S_1002", "Dynamic", "AD", "m12", "m12", 200002, "2/02/2002", 1002], 33 | ["103_S_1003", "Average", "LMCI", "m12", "m12", 100003, "3/03/2003", 1003], 34 | ["104_S_1004", "Average", "EMCI", "m12", "m12", 100004, "4/04/2004", 1004], 35 | ] 36 | 37 | dataframe = pd.DataFrame(subjects, columns=columns) 38 | 39 | return dataframe 40 | 41 | 42 | @pytest.fixture 43 | def test_timepoints(test_df): 44 | """Dictionairy for the timepoints in test_df if Description is ignored.""" 45 | test_df = test_df.drop(columns=["Description"]) 46 | timepoints = { 47 | "Timepoint 1": test_df.iloc[[0, 2, 4, 5]].set_index(["Subject ID", "Image ID"]), 48 | "Timepoint 2": test_df.iloc[[1, 3]].set_index(["Subject ID", "Image ID"]), 49 | } 50 | return timepoints 51 | 52 | 53 | def test_rid_from_subject_id(test_df): 54 | """Test creating RID from Subject ID.""" 55 | correct = test_df 56 | test_df = test_df.drop(columns="RID") 57 | with_rid = test_df.adni.rid() 58 | pd.testing.assert_frame_equal(correct, with_rid) 59 | 60 | 61 | def test_longitundal_only(test_df): 62 | """Test output dataframe being longitudinal only.""" 63 | correct = test_df.drop(index=[4, 5]) 64 | longitundal_only = test_df.adni.longitudinal() 65 | pd.testing.assert_frame_equal(correct, longitundal_only) 66 | 67 | 68 | def test_drop_dynamic_images(test_df): 69 | """Test dropping entries with dynamic description.""" 70 | correct = test_df.drop(index=[3]) 71 | no_dynamics = test_df.adni.drop_dynamic() 72 | pd.testing.assert_frame_equal(correct, no_dynamics) 73 | 74 | 75 | def test_drop_dynamic_without_description_columns(test_df): 76 | """Test dropping dynamic images without description column present.""" 77 | test_df = test_df.drop(columns=["Description"]) 78 | with pytest.raises(KeyError): 79 | test_df.adni.drop_dynamic() 80 | 81 | 82 | def test_standardizing_index(test_df): 83 | """Test conversion of index to standard.""" 84 | correct_index = ["Subject ID", "Image ID", "RID"] 85 | standard_index = test_df.adni.standard_index().index.names 86 | assert correct_index == standard_index 87 | 88 | 89 | def test_renaming_columns_to_standard(test_df): 90 | """Test renaming of column Acq Date to SCANDATE.""" 91 | correct = test_df.drop(columns=["VISCODE2"]).rename( 92 | columns={"Acq Date": "SCANDATE"} 93 | ) 94 | renamed = test_df.adni.standard_column_names() 95 | pd.testing.assert_frame_equal(correct, renamed) 96 | 97 | 98 | def test_extracting_groups_grouped_mci(test_df): 99 | """Test creating a datframe for each group.""" 100 | correct = { 101 | "AD": test_df.iloc[[2, 3]], 102 | "MCI": test_df.iloc[[0, 1]], 103 | "EMCI": test_df.loc[[5]], 104 | "LMCI": test_df.loc[[4]], 105 | } 106 | group_dict = test_df.adni.groups(grouped_mci=False) 107 | pd.testing.assert_frame_equal(correct["AD"], group_dict["AD"]) 108 | pd.testing.assert_frame_equal(correct["MCI"], group_dict["MCI"]) 109 | pd.testing.assert_frame_equal(correct["LMCI"], group_dict["LMCI"]) 110 | pd.testing.assert_frame_equal(correct["EMCI"], group_dict["EMCI"]) 111 | 112 | 113 | def test_extracting_groups_sperate_mci_groups(test_df): 114 | """Test creating a datframe for each group.""" 115 | correct = {"MCI": test_df.iloc[[0, 1, 4, 5]], "AD": test_df.iloc[[2, 3]]} 116 | group_dict = test_df.adni.groups() 117 | pd.testing.assert_frame_equal(correct["MCI"], group_dict["MCI"]) 118 | pd.testing.assert_frame_equal(correct["AD"], group_dict["AD"]) 119 | 120 | 121 | def test_timepoint_extracting_raises_error_with_description(test_df): 122 | """Test raising error if Description in columns.""" 123 | with pytest.raises(ValueError): 124 | test_df.adni.timepoints() 125 | 126 | 127 | def test_timepoint_extraction_second_timepoint_earliest(test_df, test_timepoints): 128 | """Test timepoint extraction with second='first'.""" 129 | correct = test_timepoints 130 | test_df = test_df.drop(columns="Description") 131 | timepoints = test_df.adni.timepoints() 132 | pd.testing.assert_frame_equal(correct["Timepoint 1"], timepoints["Timepoint 1"]) 133 | 134 | 135 | def test_timepoint_extraction_second_timepoint_latest(test_df, test_timepoints): 136 | """Test timepoint extraction with second='last'.""" 137 | correct = test_timepoints 138 | test_df = test_df.drop(columns="Description") 139 | timepoints = test_df.adni.timepoints(second="last") 140 | pd.testing.assert_frame_equal(correct["Timepoint 1"], timepoints["Timepoint 1"]) 141 | -------------------------------------------------------------------------------- /tests/test_adnipy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """Tests for `adnipy` package.""" 4 | 5 | # pylint: disable=W0621, R0801 6 | 7 | # Standard library imports 8 | import io 9 | 10 | # Third party imports 11 | import numpy as np 12 | import pandas as pd 13 | import pytest 14 | 15 | from adnipy import adnipy 16 | 17 | 18 | @pytest.fixture 19 | def test_df(): 20 | """Provide sample dataframe for standardized testing.""" 21 | columns = [ 22 | "Subject ID", 23 | "Description", 24 | "Group", 25 | "VISCODE", 26 | "VISCODE2", 27 | "Image ID", 28 | "Acq Date", 29 | "RID", 30 | ] 31 | subjects = [ 32 | ["101_S_1001", "Average", "MCI", "m12", "m12", 100001, "1/01/2001", 1001], 33 | ["101_S_1001", "Average", "MCI", "m24", "m24", 200001, "1/01/2002", 1001], 34 | ["102_S_1002", "Average", "AD", "m12", "m12", 100002, "2/02/2002", 1002], 35 | ["102_S_1002", "Dynamic", "AD", "m12", "m12", 200002, "2/02/2002", 1002], 36 | ["103_S_1003", "Average", "LMCI", "m12", "m12", 100003, "3/03/2003", 1003], 37 | ["104_S_1004", "Average", "EMCI", "m12", "m12", 100004, "4/04/2004", 1004], 38 | ] 39 | 40 | dataframe = pd.DataFrame(subjects, columns=columns) 41 | 42 | return dataframe 43 | 44 | 45 | @pytest.fixture 46 | def test_file(): 47 | """Provide sample file which contains same data as test_df.""" 48 | file = io.StringIO( 49 | "Subject ID,Description,Group,VISCODE,VISCODE2,Image ID,Acq Date,RID\n" 50 | "101_S_1001,Average,MCI,m12,m12,100001,1/01/2001,1001\n" 51 | "101_S_1001,Average,MCI,m24,m24,200001,1/01/2002,1001\n" 52 | "102_S_1002,Average,AD,m12,m12,100002,2/02/2002,1002\n" 53 | "102_S_1002,Dynamic,AD,m12,m12,200002,2/02/2002,1002\n" 54 | "103_S_1003,Average,LMCI,m12,m12,100003,3/03/2003,1003\n" 55 | "104_S_1004,Average,EMCI,m12,m12,100004,4/04/2004,1004\n" 56 | ) 57 | return file 58 | 59 | 60 | @pytest.fixture 61 | def test_timepoints(test_df): 62 | """Dictionairy for the timepoints in test_df if Description is ignored.""" 63 | test_df = test_df.drop(columns=["Description"]) 64 | timepoints = { 65 | "Timepoint 1": test_df.iloc[[0, 2, 4, 5]].set_index(["Subject ID", "Image ID"]), 66 | "Timepoint 2": test_df.iloc[[1, 3]].set_index(["Subject ID", "Image ID"]), 67 | } 68 | return timepoints 69 | 70 | 71 | def test_calculating_timedelta_of_scandate(test_timepoints): 72 | """Test calculating timedelta of 2 dataframes.""" 73 | for timepoint, dataframe in test_timepoints.items(): 74 | timepoint_df = dataframe.rename(columns={"Acq Date": "SCANDATE"}) 75 | timepoint_df["SCANDATE"] = pd.to_datetime(timepoint_df["SCANDATE"]) 76 | test_timepoints[timepoint] = timepoint_df 77 | correct_dtype = np.dtype("