├── .codecov.yml
├── .gitattributes
├── .github
├── release.yml
└── workflows
│ ├── python-publish.yml
│ └── testing.yml
├── .gitignore
├── .readthedocs.yml
├── .zenodo.json
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── docs
├── Makefile
├── _static
│ ├── nimare.css
│ └── theme_overrides.css
├── _templates
│ ├── class.rst
│ ├── function.rst
│ ├── module.rst
│ └── navbar.html
├── about.rst
├── api.rst
├── conf.py
├── contributing.rst
├── index.rst
├── installation.rst
├── links.rst
├── make.bat
├── references.bib
└── sphinxext
│ └── github_link.py
├── examples
├── 01_basic_io
│ ├── README.txt
│ └── plot_create_dataset.py
├── 02_meta-analysis
│ ├── README.txt
│ ├── plot_meta-analysis_walkthrough.py
│ └── plot_run_meta-analysis.py
└── README.txt
├── pymare
├── __init__.py
├── _version.py
├── core.py
├── datasets
│ ├── __init__.py
│ └── metadat.py
├── effectsize
│ ├── __init__.py
│ ├── base.py
│ ├── expressions.json
│ └── expressions.py
├── estimators
│ ├── __init__.py
│ ├── combination.py
│ └── estimators.py
├── resources
│ └── datasets
│ │ ├── michael2013.json
│ │ └── michael2013.tsv
├── results.py
├── stats.py
├── tests
│ ├── conftest.py
│ ├── test_combination_tests.py
│ ├── test_core.py
│ ├── test_datasets.py
│ ├── test_effectsize_base.py
│ ├── test_effectsize_expressions.py
│ ├── test_estimators.py
│ ├── test_results.py
│ ├── test_stan_estimators.py
│ ├── test_stats.py
│ └── test_utils.py
└── utils.py
├── pypi_description.md
├── pyproject.toml
├── setup.cfg
├── setup.py
└── versioneer.py
/.codecov.yml:
--------------------------------------------------------------------------------
1 | codecov:
2 | notify:
3 | require_ci_to_pass: yes
4 |
5 | coverage:
6 | status:
7 | project:
8 | default:
9 | # basic
10 | target: auto
11 | threshold: 2%
12 | patch:
13 | default:
14 | target: auto
15 | threshold: 2%
16 | ignore:
17 | - 'pymare/tests/'
18 | - 'pymare/_version.py'
19 | - 'pymare/info.py'
20 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | pymare/_version.py export-subst
2 |
--------------------------------------------------------------------------------
/.github/release.yml:
--------------------------------------------------------------------------------
1 | changelog:
2 | exclude:
3 | labels:
4 | - ignore-for-release
5 | categories:
6 | - title: 🛠 Breaking Changes
7 | labels:
8 | - breaking-change
9 | - title: 🎉 Exciting New Features
10 | labels:
11 | - enhancement
12 | - title: 👎 Deprecations
13 | labels:
14 | - deprecation
15 | - title: 🐛 Bug Fixes
16 | labels:
17 | - bug
18 | - title: Other Changes
19 | labels:
20 | - "*"
21 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | name: Upload Python Package
5 |
6 | on:
7 | release:
8 | types: [published]
9 |
10 | jobs:
11 | deploy:
12 |
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Set up Python
18 | uses: actions/setup-python@v2
19 | with:
20 | python-version: '3.8'
21 | - name: Install dependencies
22 | run: |
23 | python -m pip install --upgrade pip
24 | pip install setuptools wheel twine
25 | - name: Build and publish
26 | env:
27 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
29 | run: |
30 | python setup.py sdist bdist_wheel
31 | twine upload dist/*
32 |
--------------------------------------------------------------------------------
/.github/workflows/testing.yml:
--------------------------------------------------------------------------------
1 | name: "Run Tests"
2 |
3 | on:
4 | push:
5 | branches:
6 | - "master"
7 | pull_request:
8 | branches:
9 | - "*"
10 | schedule:
11 | # Run tests every Sunday at 12am
12 | - cron: "0 0 * * 0"
13 |
14 | permissions:
15 | contents: read
16 | checks: write
17 | pull-requests: write
18 |
19 | concurrency:
20 | group: environment-${{ github.ref }}
21 | cancel-in-progress: true
22 |
23 | jobs:
24 | # Determine if tests should be run based on commit message.
25 | check_skip:
26 | runs-on: ubuntu-latest
27 | outputs:
28 | skip: ${{ steps.result_step.outputs.ci-skip }}
29 | steps:
30 | - uses: actions/checkout@v3
31 | with:
32 | fetch-depth: 0
33 | - id: result_step
34 | uses: mstachniuk/ci-skip@master
35 | with:
36 | commit-filter: "[skip ci];[ci skip];[skip github]"
37 | commit-filter-separator: ";"
38 |
39 | run_tests:
40 | needs: check_skip
41 | if: ${{ needs.check_skip.outputs.skip == 'false' }}
42 | runs-on: ${{ matrix.os }}
43 | strategy:
44 | fail-fast: false
45 | matrix:
46 | os: ["ubuntu-latest", "macos-13"]
47 | python-version: ["3.8", "3.9", "3.10", "3.11"]
48 |
49 | name: ${{ matrix.os }} with Python ${{ matrix.python-version }}
50 | defaults:
51 | run:
52 | shell: bash
53 | steps:
54 | - uses: actions/checkout@v3
55 | - name: "Set up python"
56 | uses: actions/setup-python@v2
57 | with:
58 | python-version: ${{ matrix.python-version }}
59 | - name: "Display Python version"
60 | shell: bash {0}
61 | run: python -c "import sys; print(sys.version)"
62 | - name: "Install PyMARE"
63 | shell: bash {0}
64 | run: pip install -e .[tests,stan]
65 | - name: "Run tests"
66 | shell: bash {0}
67 | run: python -m pytest --pyargs pymare --cov=pymare
68 | - name: "Upload coverage to CodeCov"
69 | uses: codecov/codecov-action@v4
70 | if: success()
71 |
72 | flake8-lint:
73 | runs-on: ubuntu-latest
74 | name: Lint with flake8
75 | steps:
76 | - name: Check out source repository
77 | uses: actions/checkout@v3
78 |
79 | - name: Set up Python environment
80 | uses: actions/setup-python@v2
81 | with:
82 | python-version: "3.8"
83 |
84 | - name: "Install the package"
85 | shell: bash {0}
86 | run: |
87 | python -m pip install --progress-bar off --upgrade pip setuptools wheel
88 | python -m pip install -e .[tests]
89 |
90 | - name: "Run flake8"
91 | shell: bash {0}
92 | run: |
93 | flake8 pymare
94 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | docs/generated/
2 | docs/modules/
3 | docs/auto_examples/
4 | historical/
5 |
6 | #
7 | # Byte-compiled / optimized / DLL files
8 | __pycache__/
9 | *.py[cod]
10 | *$py.class
11 |
12 | # C extensions
13 | *.so
14 |
15 | # Distribution / packaging
16 | .Python
17 | env/
18 | build/
19 | develop-eggs/
20 | dist/
21 | downloads/
22 | eggs/
23 | .eggs/
24 | lib/
25 | lib64/
26 | parts/
27 | sdist/
28 | var/
29 | *.egg-info/*
30 | .installed.cfg
31 | *.egg
32 |
33 | # PyInstaller
34 | # Usually these files are written by a python script from a template
35 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
36 | *.manifest
37 | *.spec
38 |
39 | # Installer logs
40 | pip-log.txt
41 | pip-delete-this-directory.txt
42 |
43 | # Unit test / coverage reports
44 | htmlcov/
45 | .tox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *,cover
52 | .hypothesis/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 |
62 | # Flask stuff:
63 | instance/
64 | .webassets-cache
65 |
66 | # Scrapy stuff:
67 | .scrapy
68 |
69 | # Sphinx documentation
70 | docs/_build/
71 |
72 | # PyBuilder
73 | target/
74 |
75 | # IPython Notebook
76 | .ipynb_checkpoints
77 |
78 | # pyenv
79 | .python-version
80 |
81 | # celery beat schedule file
82 | celerybeat-schedule
83 |
84 | # dotenv
85 | .env
86 |
87 | # virtualenv
88 | venv/
89 | ENV/
90 |
91 | # Spyder project settings
92 | .spyderproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | *.DS_Store
98 | *.orig
99 |
100 | # Sphinx
101 | doc/_build/
102 | doc/auto_examples/
103 | doc/gen_modules/
104 | doc/generated/
105 |
106 | # Pytest
107 | .pytest_cache/
108 |
109 | # Vim
110 | .*.sw[op]
111 |
112 | # Emacs and others
113 | *~
114 |
115 | # Ignore test sql database files
116 | bidsdb.sqlite
117 | fmriprep.sqlite
118 |
119 | .vscode
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | build:
9 | os: "ubuntu-22.04"
10 | tools:
11 | python: "3.9"
12 |
13 | # Build documentation in the docs/ directory with Sphinx
14 | sphinx:
15 | configuration: docs/conf.py
16 |
17 | python:
18 | install:
19 | - method: pip
20 | path: .
21 | extra_requirements:
22 | - doc
23 |
--------------------------------------------------------------------------------
/.zenodo.json:
--------------------------------------------------------------------------------
1 | {
2 | "creators": [
3 | {
4 | "name": "Yarkoni, Tal",
5 | "affiliation": "University of Texas at Austin",
6 | "orcid": "0000-0002-6558-5113"
7 | },
8 | {
9 | "name": "Salo, Taylor",
10 | "affiliation": "Florida International University",
11 | "orcid": "0000-0001-9813-3167"
12 | },
13 | {
14 | "name": "Peraza, Julio A.",
15 | "affiliation": "Florida International University",
16 | "orcid": "0000-0003-3816-5903"
17 | },
18 | {
19 | "name": "Nichols, Thomas E.",
20 | "affiliation": "Big Data Institute, University of Oxford",
21 | "orcid": "0000-0002-4516-5103"
22 | }
23 | ],
24 | "keywords": [
25 | "meta-analysis"
26 | ],
27 | "license": "MIT",
28 | "upload_type": "software"
29 | }
30 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, gender identity and expression, level of experience,
9 | education, socio-economic status, nationality, personal appearance, race,
10 | religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting our code of conduct enforcer, James Kent, at **james.kent@austin.utexas.edu**.
59 | All complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances.
61 | The project team is obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72 |
73 | [homepage]: https://www.contributor-covenant.org
74 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to PyMARE
2 |
3 | Welcome to the PyMARE repository! We're excited you're here and want to contribute.
4 |
5 | These guidelines are designed to make it as easy as possible to get involved. If you have any questions that aren't discussed below, please let us know by opening an [issue][link_issues]!
6 |
7 | Before you start you'll need to set up a free [GitHub][link_github] account and sign in. Here are some [instructions][link_signupinstructions].
8 |
9 | ## Governance
10 |
11 | Governance is a hugely important part of any project.
12 | It is especially important to have clear process and communication channels for open source projects that rely on a distributed network of volunteers, such as ``PyMARE``.
13 |
14 | `PyMARE` is currently supported by a small group of core developers.
15 | Even with only a couple of individuals involved in decision making processes, we've found that setting expectations and communicating a shared vision has great value.
16 |
17 | By starting the governance structure early in our development, we hope to welcome more people into the contributing team.
18 | We are committed to continuing to update the governance structures as necessary.
19 | Every member of the ``PyMARE`` community is encouraged to comment on these processes and suggest improvements.
20 |
21 | All potential changes to ``PyMARE`` are explicitly and openly discussed in the described channels of communication, and we strive for consensus amongst all community members.
22 |
23 | ## Code of conduct
24 |
25 | All ``PyMARE`` community members are expected to follow our [code of conduct](https://github.com/neurostuff/PyMARE/blob/master/CODE_OF_CONDUCT.md) during any interaction with the project.
26 | That includes- but is not limited to- online conversations, in-person workshops or development sprints, and when giving talks about the software.
27 |
28 | As stated in the code, severe or repeated violations by community members may result in exclusion from collective decision-making and rejection of future contributions to the ``PyMARE`` project.
29 |
30 | ## Labels
31 |
32 | The current list of labels are [here][link_labels] and include:
33 |
34 | * [](https://github.com/neurostuff/PyMARE/labels/good%20first%20issue)
35 | *These issues contain a task that a member of the team has determined should require minimal knowledge of the existing codebase, and should be good for people new to the project.*
36 | If you are interested in contributing to PyMARE, but aren't sure where to start, we encourage you to take a look at these issues in particular.
37 |
38 | * [](https://github.com/neurostuff/PyMARE/labels/help%20wanted)
39 | *These issues contain a task that a member of the team has determined we need additional help with.*
40 | If you feel that you can contribute to one of these issues, we especially encourage you to do so!
41 |
42 | * [](https://github.com/neurostuff/PyMARE/labels/bug)
43 | *These issues point to problems in the project.*
44 | If you find new a bug, please give as much detail as possible in your issue, including steps to recreate the error.
45 | If you experience the same bug as one already listed, please add any additional information that you have as a comment.
46 |
47 | * [](https://github.com/neurostuff/PyMARE/labels/enhancement)
48 | *These issues are asking for new features to be added to the project.*
49 | Please try to make sure that your requested feature is distinct from any others that have already been requested or implemented. If you find one that's similar but there are subtle differences please reference the other request in your issue.
50 |
51 | ## Making a change
52 |
53 | We appreciate all contributions to PyMARE, but those accepted fastest will follow a workflow similar to the following:
54 |
55 | **1. Comment on an existing issue or open a new issue referencing your addition.**
56 |
57 | This allows other members of the PyMARE development team to confirm that you aren't overlapping with work that's currently underway and that everyone is on the same page with the goal of the work you're going to carry out.
58 |
59 | [This blog][link_pushpullblog] is a nice explanation of why putting this work in up front is so useful to everyone involved.
60 |
61 | **2. Fork PyMARE.**
62 |
63 | [Fork][link_fork] the [PyMARE repository][link_pymare] to your profile.
64 |
65 | This is now your own unique copy of PyMARE. Changes here won't effect anyone else's work, so it's a safe space to explore edits to the code!
66 |
67 | Make sure to [keep your fork up to date][link_updateupstreamwiki] with the master repository.
68 |
69 | **3. Make the changes you've discussed.**
70 |
71 | Try to keep the changes focused. We've found that working on a [new branch][link_branches] makes it easier to keep your changes targeted.
72 |
73 | When you're creating your pull request, please do your best to follow PyMARE's preferred style conventions. Namely, documentation should follow the [numpydoc](https://numpydoc.readthedocs.io/en/latest/) convention and code should adhere to [PEP8](https://www.python.org/dev/peps/pep-0008/) as much as possible.
74 |
75 | **4. Submit a pull request.**
76 |
77 | Submit a [pull request][link_pullrequest].
78 |
79 | A member of the development team will review your changes to confirm that they can be merged into the main codebase.
80 |
81 | ## Recognizing contributions
82 |
83 | We welcome and recognize all contributions from documentation to testing to code development. You can see a list of current contributors in our [zenodo][link_zenodo] file. If you are new to the project, don't forget to add your name and affiliation there!
84 |
85 | ## Thank you!
86 |
87 | You're awesome.
88 |
89 | * NOTE: These guidelines are based on contributing guidelines from the [STEMMRoleModels][link_stemmrolemodels] project.
90 |
91 | [link_github]: https://github.com/
92 | [link_pymare]: https://github.com/neurostuff/PyMARE
93 | [link_signupinstructions]: https://help.github.com/articles/signing-up-for-a-new-github-account
94 | [link_react]: https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comments
95 | [link_issues]: https://github.com/neurostuff/PyMARE/issues
96 | [link_labels]: https://github.com/neurostuff/PyMARE/labels
97 | [link_discussingissues]: https://help.github.com/articles/discussing-projects-in-issues-and-pull-requests
98 |
99 | [link_pullrequest]: https://help.github.com/articles/creating-a-pull-request/
100 | [link_fork]: https://help.github.com/articles/fork-a-repo/
101 | [link_pushpullblog]: https://www.igvita.com/2011/12/19/dont-push-your-pull-requests/
102 | [link_branches]: https://help.github.com/articles/creating-and-deleting-branches-within-your-repository/
103 | [link_updateupstreamwiki]: https://help.github.com/articles/syncing-a-fork/
104 | [link_stemmrolemodels]: https://github.com/KirstieJane/STEMMRoleModels
105 | [link_zenodo]: https://github.com/neurostuff/PyMARE/blob/master/.zenodo.json
106 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020- pymare developers
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include versioneer.py
2 | include pymare/_version.py
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PyMARE: Python Meta-Analysis & Regression Engine
2 | A Python library for mixed-effects meta-regression (including meta-analysis).
3 |
4 | [](https://pypi.python.org/pypi/pymare/)
5 | [](https://pypi.python.org/pypi/pymare/)
6 | [](https://opensource.org/licenses/MIT)
7 | [](https://zenodo.org/badge/latestdoi/228903736)
8 | [](http://pymare.readthedocs.io/en/latest/?badge=latest)
9 | 
10 | [](https://codecov.io/gh/neurostuff/pymare)
11 |
12 | **PyMARE is alpha software under heavy development; we reserve the right to make major changes to the API.**
13 |
14 | ## Quickstart
15 | Install PyMARE from PyPI:
16 | ```
17 | pip install pymare
18 | ```
19 |
20 |
21 | Or for the bleeding-edge GitHub version:
22 |
23 | ```
24 | pip install git+https://github.com/neurostuff/pymare.git
25 | ```
26 |
27 | Suppose we have parameter estimates from 8 studies, along with corresponding variances, and a single (fixed) covariate:
28 |
29 | ```python
30 | y = np.array([-1, 0.5, 0.5, 0.5, 1, 1, 2, 10]) # study-level estimates
31 | v = np.array([1, 1, 2.4, 0.5, 1, 1, 1.2, 1.5]) # study-level variances
32 | X = np.array([1, 1, 2, 2, 4, 4, 2.8, 2.8]) # a fixed study-level covariate
33 | ```
34 |
35 | We can conduct a mixed-effects meta-regression using restricted maximum-likelihood (ReML)estimation in PyMARE using the high-level `meta_regression` function:
36 |
37 | ```python
38 | from pymare import meta_regression
39 |
40 | result = meta_regression(y, v, X, names=['my_cov'], add_intercept=True,
41 | method='REML')
42 | print(result.to_df())
43 | ```
44 |
45 | This produces the following output:
46 |
47 | ```
48 | name estimate se z-score p-val ci_0.025 ci_0.975
49 | 0 intercept -0.106579 2.993715 -0.035601 0.971600 -5.974153 5.760994
50 | 1 my_cov 0.769961 1.113344 0.691575 0.489204 -1.412153 2.952075
51 | ```
52 |
53 | Alternatively, we can achieve the same outcome using PyMARE's object-oriented API (which the `meta_regression` function wraps):
54 |
55 | ```python
56 |
57 | from pymare import Dataset
58 | from pymare.estimators import VarianceBasedLikelihoodEstimator
59 |
60 | # A handy container we can pass to any estimator
61 | dataset = Dataset(y, v, X)
62 | # Estimator class for likelihood-based methods when variances are known
63 | estimator = VarianceBasedLikelihoodEstimator(method='REML')
64 | # All estimators expose a fit_dataset() method that takes a `Dataset`
65 | # instance as the first (and usually only) argument.
66 | estimator.fit_dataset(dataset)
67 | # Post-fitting we can obtain a MetaRegressionResults instance via .summary()
68 | results = estimator.summary()
69 | # Print summary of results as a pandas DataFrame
70 | print(result.to_df())
71 | ```
72 |
73 | And if we want to be even more explicit, we can avoid the `Dataset` abstraction
74 | entirely (though we'll lose some convenient validation checks):
75 |
76 | ```python
77 | estimator = VarianceBasedLikelihoodEstimator(method='REML')
78 |
79 | # X must be 2-d; this is one of the things the Dataset implicitly handles.
80 | X = X[:, None]
81 |
82 | estimator.fit(y, v, X)
83 |
84 | results = estimator.summary()
85 | ```
86 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = python -msphinx
7 | SPHINXPROJ = PyMARE
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | clean:
16 | rm -r _build generated auto_examples
17 |
18 | html-noplot:
19 | $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(SOURCEDIR) $(BUILDDIR)/html
20 | @echo
21 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
22 |
23 | .PHONY: help clean Makefile
24 |
25 | # Catch-all target: route all unknown targets to Sphinx using the new
26 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
27 | %: Makefile
28 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
29 |
--------------------------------------------------------------------------------
/docs/_static/nimare.css:
--------------------------------------------------------------------------------
1 | /*Alterations to the theme defaults*/
2 |
3 | /* Sidebar header (and topbar for mobile) */
4 | .wy-side-nav-search, .wy-nav-top {
5 | background: #899fdf;
6 | }
7 |
8 | /* Sphinx gallery example titles */
9 | /* Taken from scikit-learn */
10 | #examples {
11 | font-family: -apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";
12 | }
13 |
14 | .sphx-glr-thumbcontainer span.caption-text {
15 | font-style: normal;
16 | font-size: 0.9rem;
17 | text-align: center;
18 | }
19 |
20 | #examples h1 {
21 | border-radius: 0.3rem;
22 | background-color: #b8a6db;
23 | text-align: center;
24 | font-size: 2rem;
25 | font-weight: 500;
26 | margin-bottom: 1rem;
27 | padding: 0.5rem;
28 | font-family: -apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";
29 | }
30 |
31 | #examples h2 {
32 | border-radius: 0.3rem;
33 | background-color: #d589d8;
34 | text-align: center;
35 | font-size: 1.5rem;
36 | font-weight: 500;
37 | margin-bottom: 1rem;
38 | padding: 0.5rem;
39 | font-family: -apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";
40 | }
41 |
42 | .rst-content img {
43 | max-width: 800px;
44 | }
45 |
46 | /* Enforce nice versionmodified titles */
47 | :root {
48 |
49 | /*****************************************************************************
50 | * Color
51 | *
52 | * Colors are defined in rgb string way, "red, green, blue"
53 | **/
54 | --pst-color-primary: 19, 6, 84;
55 | --pst-color-success: 40, 167, 69;
56 | --pst-color-info: 0, 123, 255; /*23, 162, 184;*/
57 | --pst-color-warning: 255, 193, 7;
58 | --pst-color-danger: 220, 53, 69;
59 | --pst-color-text-base: 51, 51, 51;
60 |
61 | --pst-color-h1: var(--pst-color-primary);
62 | --pst-color-h2: var(--pst-color-primary);
63 | --pst-color-h3: var(--pst-color-text-base);
64 | --pst-color-h4: var(--pst-color-text-base);
65 | --pst-color-h5: var(--pst-color-text-base);
66 | --pst-color-h6: var(--pst-color-text-base);
67 | --pst-color-paragraph: var(--pst-color-text-base);
68 | --pst-color-link: 0, 91, 129;
69 | --pst-color-link-hover: 227, 46, 0;
70 | --pst-color-headerlink: 198, 15, 15;
71 | --pst-color-headerlink-hover: 255, 255, 255;
72 | --pst-color-preformatted-text: 34, 34, 34;
73 | --pst-color-preformatted-background: 250, 250, 250;
74 | --pst-color-inline-code: 232, 62, 140;
75 |
76 | --pst-color-active-navigation: 19, 6, 84;
77 | --pst-color-navbar-link: 77, 77, 77;
78 | --pst-color-navbar-link-hover: var(--pst-color-active-navigation);
79 | --pst-color-navbar-link-active: var(--pst-color-active-navigation);
80 | --pst-color-sidebar-link: 77, 77, 77;
81 | --pst-color-sidebar-link-hover: var(--pst-color-active-navigation);
82 | --pst-color-sidebar-link-active: var(--pst-color-active-navigation);
83 | --pst-color-sidebar-expander-background-hover: 244, 244, 244;
84 | --pst-color-sidebar-caption: 77, 77, 77;
85 | --pst-color-toc-link: 119, 117, 122;
86 | --pst-color-toc-link-hover: var(--pst-color-active-navigation);
87 | --pst-color-toc-link-active: var(--pst-color-active-navigation);
88 |
89 | /*****************************************************************************
90 | * Icon
91 | **/
92 |
93 | /* font awesome icons*/
94 | --pst-icon-check-circle: "\f058";
95 | --pst-icon-info-circle: "\f05a";
96 | --pst-icon-exclamation-triangle: "\f071";
97 | --pst-icon-exclamation-circle: "\f06a";
98 | --pst-icon-times-circle: "\f057";
99 | --pst-icon-lightbulb: "\f0eb";
100 |
101 | /*****************************************************************************
102 | * Admonitions
103 | **/
104 |
105 | --pst-color-admonition-default: var(--pst-color-info);
106 | --pst-color-admonition-note: var(--pst-color-info);
107 | --pst-color-admonition-attention: var(--pst-color-warning);
108 | --pst-color-admonition-caution: var(--pst-color-warning);
109 | --pst-color-admonition-warning: var(--pst-color-warning);
110 | --pst-color-admonition-danger: var(--pst-color-danger);
111 | --pst-color-admonition-error: var(--pst-color-danger);
112 | --pst-color-admonition-hint: var(--pst-color-success);
113 | --pst-color-admonition-tip: var(--pst-color-success);
114 | --pst-color-admonition-important: var(--pst-color-success);
115 |
116 | --pst-icon-admonition-default: var(--pst-icon-info-circle);
117 | --pst-icon-admonition-note: var(--pst-icon-info-circle);
118 | --pst-icon-admonition-attention: var(--pst-icon-exclamation-circle);
119 | --pst-icon-admonition-caution: var(--pst-icon-exclamation-triangle);
120 | --pst-icon-admonition-warning: var(--pst-icon-exclamation-triangle);
121 | --pst-icon-admonition-danger: var(--pst-icon-exclamation-triangle);
122 | --pst-icon-admonition-error: var(--pst-icon-times-circle);
123 | --pst-icon-admonition-hint: var(--pst-icon-lightbulb);
124 | --pst-icon-admonition-tip: var(--pst-icon-lightbulb);
125 | --pst-icon-admonition-important: var(--pst-icon-exclamation-circle);
126 |
127 | /** versionmodified **/
128 | --pst-color-versionmodified-default: var(--pst-color-info);
129 | --pst-color-versionmodified-added: var(--pst-color-success);
130 | --pst-color-versionmodified-changed: var(--pst-color-warning);
131 | --pst-color-versionmodified-deprecated: var(--pst-color-danger);
132 |
133 | --pst-icon-versionmodified-default: var(--pst-icon-exclamation-circle);
134 | --pst-icon-versionmodified-added: var(--pst-icon-exclamation-circle);
135 | --pst-icon-versionmodified-changed: var(--pst-icon-exclamation-circle);
136 | --pst-icon-versionmodified-deprecated: var(--pst-icon-exclamation-circle);
137 | }
138 |
139 | .versionadded,
140 | .versionchanged,
141 | .deprecated {
142 | margin: 0.5em auto;
143 | padding: 0 0.6rem 0 0.6rem;
144 | overflow: hidden;
145 | page-break-inside: avoid;
146 | border-left: 0.2rem solid;
147 | border-color: rgba(var(--pst-color-versionmodified-default), 1);
148 | border-radius: 0.2rem;
149 | box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05),
150 | 0 0 0.0625rem rgba(0, 0, 0, 0.1);
151 | transition: color 250ms, background-color 250ms, border-color 250ms;
152 | background-color: rgba(var(--pst-color-versionmodified-default), 0.1);
153 | }
154 |
155 | div.admonition-references.admonition .label {
156 | border-left: 0px !important;
157 | background: inherit !important;
158 | }
159 |
160 | div.versionchanged > p {
161 | margin-bottom: 5px;
162 | margin-top: 5px;
163 | }
164 |
165 | div.versionchanged > ul.simple {
166 | margin-bottom: 5px;
167 | margin-top: 5px;
168 | }
169 |
170 | div.versionadded > p {
171 | margin-bottom: 5px;
172 | margin-top: 5px;
173 | }
174 |
175 | div.versionadded > ul.simple {
176 | margin-bottom: 5px;
177 | margin-top: 5px;
178 | }
179 |
180 | div.deprecated > p {
181 | margin-bottom: 5px;
182 | margin-top: 5px;
183 | }
184 |
185 | div.deprecated > ul.simple {
186 | margin-bottom: 5px;
187 | margin-top: 5px;
188 | }
189 |
190 | .versionadded {
191 | background-color: rgba(var(--pst-color-versionmodified-added), 0.1);
192 | border-color: rgba(var(--pst-color-versionmodified-added), 1);
193 | }
194 |
195 | .versionmodified.added {
196 | font-style: normal;
197 | font-weight: 700;
198 | }
199 |
200 | .versionmodified.added::before {
201 | font-family: "FontAwesome";
202 | margin-right: 0.6rem;
203 | content: var(--pst-icon-versionmodified-added);
204 | color: rgba(var(--pst-color-versionmodified-added), 1);
205 | }
206 |
207 | .versionchanged {
208 | background-color: rgba(var(--pst-color-versionmodified-changed), 0.1);
209 | border-color: rgba(var(--pst-color-versionmodified-changed), 1);
210 | }
211 |
212 | .versionmodified.changed {
213 | font-style: normal;
214 | font-weight: 700;
215 | }
216 |
217 | .versionmodified.changed::before {
218 | font-family: "FontAwesome";
219 | margin-right: 0.6rem;
220 | content: var(--pst-icon-versionmodified-changed);
221 | color: rgba(var(--pst-color-versionmodified-changed), 1);
222 | }
223 |
224 | .deprecated {
225 | background-color: rgba(var(--pst-color-versionmodified-deprecated), 0.1);
226 | border-color: rgba(var(--pst-color-versionmodified-deprecated), 1);
227 | }
228 |
229 | .versionmodified.deprecated {
230 | font-style: normal;
231 | font-weight: 700;
232 | border-radius: 0rem;
233 | border-left: 0rem;
234 | background-color: inherit;
235 | box-shadow: None;
236 | }
237 |
238 | .versionmodified.deprecated::before {
239 | font-family: "FontAwesome";
240 | margin-right: 0.6rem;
241 | content: var(--pst-icon-versionmodified-deprecated);
242 | color: rgba(var(--pst-color-versionmodified-deprecated), 1);
243 | }
244 |
--------------------------------------------------------------------------------
/docs/_static/theme_overrides.css:
--------------------------------------------------------------------------------
1 | /* override table width restrictions */
2 | @media screen and (min-width: 767px) {
3 |
4 | .wy-table-responsive table td {
5 | /* !important prevents the common CSS stylesheets from overriding
6 | this as on RTD they are loaded after this stylesheet */
7 | white-space: normal !important;
8 | }
9 |
10 | .wy-table-responsive {
11 | overflow: visible !important;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/docs/_templates/class.rst:
--------------------------------------------------------------------------------
1 | :mod:`{{module}}`.{{objname}}
2 | {{ underline }}==============
3 |
4 | .. currentmodule:: {{ module }}
5 |
6 | .. autoclass:: {{ objname }}
7 | :members:
8 | :inherited-members:
9 | :show-inheritance:
10 | {% block methods %}
11 | {% endblock %}
12 |
13 | .. include:: {{fullname}}.examples
14 |
15 | .. raw:: html
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/_templates/function.rst:
--------------------------------------------------------------------------------
1 | :mod:`{{module}}`.{{objname}}
2 | {{ underline }}====================
3 |
4 | .. currentmodule:: {{ module }}
5 |
6 | .. autofunction:: {{ objname }}
7 |
8 | .. include:: {{fullname}}.{{item}}examples
9 |
10 | .. raw:: html
11 |
12 |
--------------------------------------------------------------------------------
/docs/_templates/module.rst:
--------------------------------------------------------------------------------
1 | {{ fullname }}
2 | {{ underline }}
3 |
4 | .. automodule:: {{ fullname }}
5 |
6 | {% block classes %}
7 | {% if classes %}
8 | .. rubric:: Classes
9 |
10 | .. autosummary::
11 | :toctree:
12 | :template: class.rst
13 | {% for item in classes %}
14 | {{ item }}
15 | {%- endfor %}
16 | {% endif %}
17 | {% endblock %}
18 |
19 | {% block functions %}
20 | {% if functions %}
21 | .. rubric:: Functions
22 |
23 | .. autosummary::
24 | :toctree:
25 | :template: function.rst
26 | {% for item in functions %}
27 | {{ item }}
28 | {%- endfor %}
29 | {% endif %}
30 | {% endblock %}
31 |
32 | {% block exceptions %}
33 | {% if exceptions %}
34 | .. rubric:: Exceptions
35 |
36 | .. autosummary::
37 | :toctree:
38 | {% for item in exceptions %}
39 | {{ item }}
40 | {%- endfor %}
41 | {% endif %}
42 | {% endblock %}
43 |
--------------------------------------------------------------------------------
/docs/_templates/navbar.html:
--------------------------------------------------------------------------------
1 |
2 |
9 |
10 | Home ·
11 |
15 | Documentation ·
16 | Command Line ·
17 | API
18 |
--------------------------------------------------------------------------------
/docs/about.rst:
--------------------------------------------------------------------------------
1 | .. include:: links.rst
2 |
3 | About PyMARE
4 | ============
5 |
6 | PyMARE does meta-analyses and meta-regressions in Python.
7 |
8 | PyMARE has largely been conceived and developed as support for `NiMARE`_, a library for performing neuroimaging meta-analyses.
9 | As such, PyMARE provides only a small subset of the functionality of other meta-analysis libraries, such as `metafor`_.
10 | If you need to perform a meta-analysis that is not supported by PyMARE, we suggest using `metafor`_ instead.
11 |
12 | .. tip::
13 |
14 | If you want to see where PyMARE fits within the NeuroStore 2.0 ecosystem, check out
15 | `neurostuff.github.io `_.
16 |
--------------------------------------------------------------------------------
/docs/api.rst:
--------------------------------------------------------------------------------
1 | API
2 | ===
3 |
4 | .. _api_core_ref:
5 |
6 | :mod:`pymare.core`: Core objects
7 | --------------------------------------------------
8 |
9 | .. automodule:: pymare.core
10 | :no-members:
11 | :no-inherited-members:
12 |
13 | .. currentmodule:: pymare
14 |
15 | .. autosummary::
16 | :toctree: generated/
17 | :template: class.rst
18 |
19 | core.Dataset
20 |
21 | :template: function.rst
22 |
23 | core.meta_regression
24 |
25 |
26 | .. _api_estimators_ref:
27 |
28 | :mod:`pymare.estimators`: Meta-analytic algorithms
29 | --------------------------------------------------
30 |
31 | .. automodule:: pymare.estimators
32 | :no-members:
33 | :no-inherited-members:
34 |
35 | .. currentmodule:: pymare
36 |
37 | .. autosummary::
38 | :toctree: generated/
39 | :template: class.rst
40 |
41 | estimators.WeightedLeastSquares
42 | estimators.DerSimonianLaird
43 | estimators.VarianceBasedLikelihoodEstimator
44 | estimators.SampleSizeBasedLikelihoodEstimator
45 | estimators.StanMetaRegression
46 | estimators.Hedges
47 | estimators.StoufferCombinationTest
48 | estimators.FisherCombinationTest
49 | estimators.estimators.BaseEstimator
50 |
51 |
52 | .. _api_results_ref:
53 |
54 | :mod:`pymare.results`: Meta-analytic results
55 | ------------------------------------------------------
56 |
57 | .. automodule:: pymare.results
58 | :no-members:
59 | :no-inherited-members:
60 |
61 | .. currentmodule:: pymare
62 |
63 | .. autosummary::
64 | :toctree: generated/
65 | :template: class.rst
66 |
67 | results.MetaRegressionResults
68 | results.CombinationTestResults
69 | results.PermutationTestResults
70 | results.BayesianMetaRegressionResults
71 |
72 |
73 | .. _api_effectsize_ref:
74 |
75 | :mod:`pymare.effectsize`: Effect size computation/conversion
76 | ------------------------------------------------------------
77 |
78 | .. automodule:: pymare.effectsize
79 | :no-members:
80 | :no-inherited-members:
81 |
82 | .. currentmodule:: pymare
83 |
84 | .. autosummary::
85 | :toctree: generated/
86 | :template: class.rst
87 |
88 | effectsize.OneSampleEffectSizeConverter
89 | effectsize.TwoSampleEffectSizeConverter
90 | effectsize.Expression
91 |
92 | :template: function.rst
93 | effectsize.solve_system
94 | effectsize.select_expressions
95 | effectsize.compute_measure
96 |
97 | .. _api_stats_ref:
98 |
99 | :mod:`pymare.stats`: Miscellaneous statistical functions
100 | --------------------------------------------------------
101 |
102 | .. automodule:: pymare.stats
103 | :no-members:
104 | :no-inherited-members:
105 |
106 | .. currentmodule:: pymare
107 |
108 | .. autosummary::
109 | :toctree: generated/
110 | :template: function.rst
111 |
112 | stats.weighted_least_squares
113 | stats.ensure_2d
114 | stats.q_profile
115 | stats.q_gen
116 | stats.bonferroni
117 | stats.fdr
118 |
119 | :mod:`pymare.datasets`: Meta-analytic datasets
120 | ----------------------------------------------
121 |
122 | .. automodule:: pymare.datasets
123 | :no-members:
124 | :no-inherited-members:
125 |
126 | .. currentmodule:: pymare
127 |
128 | .. autosummary::
129 | :toctree: generated/
130 | :template: function.rst
131 |
132 | datasets.michael2013
133 |
134 | :mod:`pymare.utils`: Miscellaneous utility functions
135 | ----------------------------------------------------
136 |
137 | .. automodule:: pymare.utils
138 | :no-members:
139 | :no-inherited-members:
140 |
141 | .. currentmodule:: pymare
142 |
143 | .. autosummary::
144 | :toctree: generated/
145 | :template: function.rst
146 |
147 | utils.get_resource_path
148 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | #
4 | # PyMARE documentation build configuration file, created by
5 | # sphinx-quickstart
6 | #
7 | # This file is execfile()d with the current directory set to its
8 | # containing dir.
9 | #
10 | # Note that not all possible configuration values are present in this
11 | # autogenerated file.
12 | #
13 | # All configuration values have a default; values that are commented out
14 | # serve to show the default.
15 |
16 | # If extensions (or modules to document with autodoc) are in another directory,
17 | # add these directories to sys.path here. If the directory is relative to the
18 | # documentation root, use os.path.abspath to make it absolute, like shown here.
19 | import os
20 | import sys
21 | from datetime import datetime
22 | from distutils.version import LooseVersion
23 |
24 | import sphinx
25 | from m2r2 import MdInclude
26 |
27 | sys.path.insert(0, os.path.abspath("sphinxext"))
28 | sys.path.insert(0, os.path.abspath(os.path.pardir))
29 |
30 | from github_link import make_linkcode_resolve
31 |
32 | import pymare
33 |
34 | # -- General configuration ------------------------------------------------
35 |
36 | # If your documentation needs a minimal Sphinx version, state it here.
37 | needs_sphinx = "3.5"
38 |
39 | # generate autosummary even if no references
40 | autosummary_generate = True
41 | add_module_names = False
42 |
43 | # Add any Sphinx extension module names here, as strings. They can be
44 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
45 | # ones.
46 | extensions = [
47 | "sphinx.ext.autodoc", # standard
48 | "sphinx.ext.autosummary", # standard
49 | "sphinx.ext.doctest", # runs doctests
50 | "sphinx.ext.intersphinx", # links code to other packages
51 | "sphinx.ext.linkcode", # links to code from api
52 | "sphinx.ext.napoleon", # alternative to numpydoc
53 | "sphinx_copybutton", # for copying code snippets
54 | "sphinx_gallery.gen_gallery", # example gallery
55 | "sphinxarg.ext", # argparse
56 | "sphinxcontrib.bibtex", # for foot-citations
57 | "recommonmark", # markdown parser
58 | ]
59 |
60 | if LooseVersion(sphinx.__version__) < LooseVersion("1.4"):
61 | extensions.append("sphinx.ext.pngmath")
62 | else:
63 | extensions.append("sphinx.ext.imgmath")
64 |
65 | # Add any paths that contain templates here, relative to this directory.
66 | templates_path = ["_templates"]
67 |
68 | # source_suffix = ['.rst', '.md']
69 | source_suffix = ".rst"
70 |
71 | # The master toctree document.
72 | master_doc = "index"
73 |
74 | # General information about the project.
75 | project = "PyMARE"
76 | copyright = "2018-" + datetime.today().strftime("%Y") + ", PyMARE developers"
77 | author = "PyMARE developers"
78 |
79 | # The version info for the project you're documenting, acts as replacement for
80 | # |version| and |release|, also used in various other places throughout the
81 | # built documents.
82 | #
83 | # The short X.Y version.
84 | version = pymare.__version__
85 | # The full version, including alpha/beta/rc tags.
86 | release = pymare.__version__
87 |
88 | # The language for content autogenerated by Sphinx. Refer to documentation
89 | # for a list of supported languages.
90 | #
91 | # This is also used if you do content translation via gettext catalogs.
92 | # Usually you set "language" from the command line for these cases.
93 | language = None
94 |
95 | # List of patterns, relative to source directory, that match files and
96 | # directories to ignore when looking for source files.
97 | # This patterns also effect to html_static_path and html_extra_path
98 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "utils/*"]
99 |
100 | # The reST default role (used for this markup: `text`) to use for all documents.
101 | default_role = "autolink"
102 |
103 | # The name of the Pygments (syntax highlighting) style to use.
104 | pygments_style = "default"
105 |
106 | # -----------------------------------------------------------------------------
107 | # Napoleon settings
108 | # -----------------------------------------------------------------------------
109 | napoleon_google_docstring = False
110 | napoleon_numpy_docstring = True
111 | napoleon_include_init_with_doc = True
112 | napoleon_include_private_with_doc = False
113 | napoleon_include_special_with_doc = False
114 | napoleon_use_admonition_for_examples = True
115 | napoleon_use_admonition_for_notes = True
116 | napoleon_use_admonition_for_references = True
117 | napoleon_use_ivar = True
118 | napoleon_use_param = True
119 | napoleon_use_keyword = True
120 | napoleon_use_rtype = True
121 | napoleon_preprocess_types = False
122 | napoleon_type_aliases = None
123 | napoleon_attr_annotations = True
124 |
125 | # -----------------------------------------------------------------------------
126 | # HTML output
127 | # -----------------------------------------------------------------------------
128 | # The theme to use for HTML and HTML Help pages.
129 | # See the documentation for a list of builtin themes.
130 | html_theme = "sphinx_rtd_theme"
131 |
132 | # Theme options are theme-specific and customize the look and feel of a theme
133 | # further. For a list of options available for each theme, see the documentation.
134 | html_theme_options = {
135 | "includehidden": False, # don't show hidden TOCs in sidebar
136 | }
137 | html_sidebars = {"**": ["globaltoc.html", "relations.html", "searchbox.html", "indexsidebar.html"]}
138 |
139 | # Add any paths that contain custom static files (such as style sheets) here,
140 | # relative to this directory. They are copied after the builtin static files,
141 | # so a file named "default.css" will overwrite the builtin "default.css".
142 | html_static_path = ["_static"]
143 |
144 | # html_favicon = "_static/nimare_favicon.png"
145 | # html_logo = "_static/nimare_banner.png"
146 |
147 | # -----------------------------------------------------------------------------
148 | # HTMLHelp output
149 | # -----------------------------------------------------------------------------
150 | # Output file base name for HTML help builder.
151 | htmlhelp_basename = "pymaredoc"
152 |
153 | # The following is used by sphinx.ext.linkcode to provide links to github
154 | linkcode_resolve = make_linkcode_resolve(
155 | "pymare",
156 | "https://github.com/neurostuff/pymare/blob/{revision}/{package}/{path}#L{lineno}",
157 | )
158 |
159 | # -----------------------------------------------------------------------------
160 | # intersphinx
161 | # -----------------------------------------------------------------------------
162 | _python_version_str = "{0.major}.{0.minor}".format(sys.version_info)
163 | _python_doc_base = "https://docs.python.org/" + _python_version_str
164 | intersphinx_mapping = {
165 | "python": (_python_doc_base, None),
166 | "numpy": ("https://docs.scipy.org/doc/numpy", (None, "./_intersphinx/numpy-objects.inv")),
167 | "scipy": (
168 | "https://docs.scipy.org/doc/scipy/reference",
169 | (None, "./_intersphinx/scipy-objects.inv"),
170 | ),
171 | "sklearn": ("https://scikit-learn.org/stable", (None, "./_intersphinx/sklearn-objects.inv")),
172 | "matplotlib": ("https://matplotlib.org/", (None, "https://matplotlib.org/objects.inv")),
173 | "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
174 | "statsmodels": ("http://www.statsmodels.org/stable/", None),
175 | }
176 |
177 | sphinx_gallery_conf = {
178 | # path to your examples scripts
179 | "examples_dirs": "../examples",
180 | # path where to save gallery generated examples
181 | "gallery_dirs": "auto_examples",
182 | "backreferences_dir": "generated",
183 | # Modules for which function level galleries are created. In
184 | # this case sphinx_gallery and numpy in a tuple of strings.
185 | "doc_module": ("pymare",),
186 | "ignore_pattern": r"utils/.",
187 | "reference_url": {
188 | # The module you locally document uses None
189 | "pymare": None,
190 | },
191 | }
192 |
193 | # Generate the plots for the gallery
194 | plot_gallery = True
195 |
196 | # -----------------------------------------------------------------------------
197 | # sphinxcontrib-bibtex
198 | # -----------------------------------------------------------------------------
199 | bibtex_bibfiles = ["./references.bib"]
200 | bibtex_style = "unsrt"
201 | bibtex_reference_style = "author_year"
202 | bibtex_footbibliography_header = ""
203 |
204 |
205 | def setup(app):
206 | """From https://github.com/rtfd/sphinx_rtd_theme/issues/117."""
207 | app.add_css_file("theme_overrides.css")
208 | app.add_css_file("nimare.css")
209 | app.connect("autodoc-process-docstring", generate_example_rst)
210 | # Fix to https://github.com/sphinx-doc/sphinx/issues/7420
211 | # from https://github.com/life4/deal/commit/7f33cbc595ed31519cefdfaaf6f415dada5acd94
212 | # from m2r to make `mdinclude` work
213 | app.add_config_value("no_underscore_emphasis", False, "env")
214 | app.add_config_value("m2r_parse_relative_links", False, "env")
215 | app.add_config_value("m2r_anonymous_references", False, "env")
216 | app.add_config_value("m2r_disable_inline_math", False, "env")
217 | app.add_config_value("m2r_use_mermaid", True, "env")
218 | app.add_directive("mdinclude", MdInclude)
219 |
220 |
221 | def generate_example_rst(app, what, name, obj, options, lines):
222 | # generate empty examples files, so that we don't get
223 | # inclusion errors if there are no examples for a class / module
224 | folder = os.path.join(app.srcdir, "generated")
225 | if not os.path.isdir(folder):
226 | os.makedirs(folder)
227 | examples_path = os.path.join(app.srcdir, "generated", "%s.examples" % name)
228 | if not os.path.exists(examples_path):
229 | # touch file
230 | open(examples_path, "w").close()
231 |
--------------------------------------------------------------------------------
/docs/contributing.rst:
--------------------------------------------------------------------------------
1 | .. mdinclude:: ../CONTRIBUTING.md
2 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. include::
2 |
3 | PyMARE: Python Meta-Analysis & Regression Engine
4 | ================================================
5 |
6 | PyMARE is a Python package for meta-analyses and meta-regressions.
7 |
8 | .. image:: https://img.shields.io/pypi/v/pymare.svg
9 | :target: https://pypi.python.org/pypi/pymare/
10 | :alt: Latest Version
11 |
12 | .. image:: https://img.shields.io/pypi/pyversions/pymare.svg
13 | :target: https://pypi.python.org/pypi/pymare/
14 | :alt: PyPI - Python Version
15 |
16 | .. image:: https://img.shields.io/badge/License-MIT-blue.svg
17 | :target: https://opensource.org/licenses/MIT
18 | :alt: License
19 |
20 | .. image:: https://zenodo.org/badge/228903736.svg
21 | :target: https://zenodo.org/badge/latestdoi/228903736
22 | :alt: Zenodo
23 |
24 | .. image:: https://github.com/neurostuff/pymare/actions/workflows/testing.yml/badge.svg
25 | :target: https://github.com/neurostuff/pymare/actions/workflows/testing.yml/badge.svg
26 | :alt: GitHub CI
27 |
28 | .. image:: https://readthedocs.org/projects/pymare/badge/?version=latest
29 | :target: http://pymare.readthedocs.io/en/latest/?badge=latest
30 | :alt: Documentation Status
31 |
32 | .. image:: https://codecov.io/gh/neurostuff/PyMARE/branch/master/graph/badge.svg
33 | :target: https://codecov.io/gh/neurostuff/pymare
34 | :alt: Codecov
35 |
36 | .. toctree::
37 | :maxdepth: 2
38 | :caption: Contents:
39 |
40 | about
41 | installation
42 | auto_examples/index
43 | contributing
44 | api
45 |
46 | Indices and tables
47 | ------------------
48 |
49 | * :ref:`genindex`
50 | * :ref:`modindex`
51 | * :ref:`search`
52 |
--------------------------------------------------------------------------------
/docs/installation.rst:
--------------------------------------------------------------------------------
1 | .. include:: links.rst
2 |
3 | Installation
4 | ============
5 |
6 | PyMARE can be installed from pip. To install the latest official release:
7 |
8 | .. code-block:: bash
9 |
10 | pip install pymare
11 |
12 | If you want to use the most up-to-date version, you can install from the ``master`` branch:
13 |
14 | .. code-block:: bash
15 |
16 | pip install git+https://github.com/neurostuff/PyMARE.git
17 |
18 | PyMARE requires Python >=3.8 and a number of packages.
19 | For a complete list, please see ``setup.cfg``.
20 |
--------------------------------------------------------------------------------
/docs/links.rst:
--------------------------------------------------------------------------------
1 | .. _Anaconda: https://www.anaconda.com/download/#macos
2 |
3 | .. _metafor: https://www.metafor-project.org/doku.php
4 |
5 | .. _NiMARE: https://nimare.readthedocs.io
6 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=python -msphinx
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 | set SPHINXPROJ=meica
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed,
20 | echo.then set the SPHINXBUILD environment variable to point to the full
21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the
22 | echo.Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/docs/references.bib:
--------------------------------------------------------------------------------
1 | @article{benjamini1995controlling,
2 | title={Controlling the false discovery rate: a practical and powerful approach to multiple testing},
3 | author={Benjamini, Yoav and Hochberg, Yosef},
4 | journal={Journal of the Royal statistical society: series B (Methodological)},
5 | volume={57},
6 | number={1},
7 | pages={289--300},
8 | year={1995},
9 | publisher={Wiley Online Library},
10 | url={https://doi.org/10.1111/j.2517-6161.1995.tb02031.x},
11 | doi={10.1111/j.2517-6161.1995.tb02031.x}
12 | }
13 |
14 | @article{benjamini2001control,
15 | author={Yoav Benjamini and Daniel Yekutieli},
16 | title={The control of the false discovery rate in multiple testing under dependency},
17 | volume={29},
18 | journal={The Annals of Statistics},
19 | number={4},
20 | publisher={Institute of Mathematical Statistics},
21 | pages={1165 -- 1188},
22 | year={2001},
23 | doi={10.1214/aos/1013699998},
24 | url={https://doi.org/10.1214/aos/1013699998}
25 | }
26 |
27 | @article{bonferroni1936teoria,
28 | title={Teoria statistica delle classi e calcolo delle probabilita},
29 | author={Bonferroni, Carlo},
30 | journal={Pubblicazioni del R Istituto Superiore di Scienze Economiche e Commericiali di Firenze},
31 | volume={8},
32 | pages={3--62},
33 | year={1936}
34 | }
35 |
36 | @article{brockwell2001comparison,
37 | title={A comparison of statistical methods for meta-analysis},
38 | author={Brockwell, Sarah E and Gordon, Ian R},
39 | journal={Statistics in medicine},
40 | volume={20},
41 | number={6},
42 | pages={825--840},
43 | year={2001},
44 | publisher={Wiley Online Library},
45 | url={https://doi.org/10.1002/sim.650},
46 | doi={10.1002/sim.650}
47 | }
48 |
49 | @article{cochran1954combination,
50 | title={The combination of estimates from different experiments},
51 | author={Cochran, William G},
52 | journal={Biometrics},
53 | volume={10},
54 | number={1},
55 | pages={101--129},
56 | year={1954},
57 | publisher={JSTOR}
58 | }
59 |
60 | @article{dersimonian1986meta,
61 | title={Meta-analysis in clinical trials},
62 | author={DerSimonian, Rebecca and Laird, Nan},
63 | journal={Controlled clinical trials},
64 | volume={7},
65 | number={3},
66 | pages={177--188},
67 | year={1986},
68 | publisher={Elsevier},
69 | url={https://doi.org/10.1093/biomet/asx001},
70 | doi={10.1093/biomet/asx001}
71 | }
72 |
73 | @article{fisher1946statistical,
74 | title={Statistical methods for research workers.},
75 | author={Fisher, Ronald Aylmer and others},
76 | journal={Statistical methods for research workers.},
77 | number={10th. ed.},
78 | year={1946},
79 | publisher={Oliver and Boyd}
80 | }
81 |
82 | @book{hedges2014statistical,
83 | title={Statistical methods for meta-analysis},
84 | author={Hedges, Larry V and Olkin, Ingram},
85 | year={2014},
86 | publisher={Academic press}
87 | }
88 |
89 | @article{higgins2002quantifying,
90 | title={Quantifying heterogeneity in a meta-analysis},
91 | author={Higgins, Julian PT and Thompson, Simon G},
92 | journal={Statistics in medicine},
93 | volume={21},
94 | number={11},
95 | pages={1539--1558},
96 | year={2002},
97 | publisher={Wiley Online Library}
98 | }
99 |
100 | @article{kosmidis2017improving,
101 | title={Improving the accuracy of likelihood-based inference in meta-analysis and meta-regression},
102 | author={Kosmidis, Ioannis and Guolo, Annamaria and Varin, Cristiano},
103 | journal={Biometrika},
104 | volume={104},
105 | number={2},
106 | pages={489--496},
107 | year={2017},
108 | publisher={Oxford University Press},
109 | url={https://doi.org/10.1093/biomet/asx001},
110 | doi={10.1093/biomet/asx001}
111 | }
112 |
113 | @article{michael2013non,
114 | title={On the (non) persuasive power of a brain image},
115 | author={Michael, Robert B and Newman, Eryn J and Vuorre, Matti and Cumming, Geoff and Garry, Maryanne},
116 | journal={Psychonomic bulletin \& review},
117 | volume={20},
118 | number={4},
119 | pages={720--725},
120 | year={2013},
121 | publisher={Springer},
122 | url={https://doi.org/10.3758/s13423-013-0391-6},
123 | doi={10.3758/s13423-013-0391-6}
124 | }
125 |
126 | @article{sangnawakij2019meta,
127 | title={Meta-analysis without study-specific variance information: Heterogeneity case},
128 | author={Sangnawakij, Patarawan and B{\"o}hning, Dankmar and Niwitpong, Sa-Aat and Adams, Stephen and Stanton, Michael and Holling, Heinz},
129 | journal={Statistical Methods in Medical Research},
130 | volume={28},
131 | number={1},
132 | pages={196--210},
133 | year={2019},
134 | publisher={SAGE Publications Sage UK: London, England},
135 | url={https://doi.org/10.1177/0962280217718867},
136 | doi={10.1177/0962280217718867}
137 | }
138 |
139 | @article{shaffer1995multiple,
140 | title={Multiple hypothesis testing},
141 | author={Shaffer, Juliet Popper},
142 | journal={Annual review of psychology},
143 | volume={46},
144 | number={1},
145 | pages={561--584},
146 | year={1995},
147 | publisher={Annual Reviews 4139 El Camino Way, PO Box 10139, Palo Alto, CA 94303-0139, USA}
148 | }
149 |
150 | @article{stouffer1949american,
151 | title={The american soldier: Adjustment during army life.(studies in social psychology in world war ii), vol. 1},
152 | author={Stouffer, Samuel A and Suchman, Edward A and DeVinney, Leland C and Star, Shirley A and Williams Jr, Robin M},
153 | journal={Studies in social psychology in World War II},
154 | year={1949},
155 | publisher={Princeton Univ. Press}
156 | }
157 |
158 | @article{veroniki2016methods,
159 | title={Methods to estimate the between-study variance and its uncertainty in meta-analysis},
160 | author={Veroniki, Areti Angeliki and Jackson, Dan and Viechtbauer, Wolfgang and Bender, Ralf and Bowden, Jack and Knapp, Guido and Kuss, Oliver and Higgins, Julian PT and Langan, Dean and Salanti, Georgia},
161 | journal={Research synthesis methods},
162 | volume={7},
163 | number={1},
164 | pages={55--79},
165 | year={2016},
166 | publisher={Wiley Online Library},
167 | url={https://doi.org/10.1002/jrsm.1164},
168 | doi={10.1002/jrsm.1164}
169 | }
170 |
171 | @article{viechtbauer2007confidence,
172 | title={Confidence intervals for the amount of heterogeneity in meta-analysis},
173 | author={Viechtbauer, Wolfgang},
174 | journal={Statistics in medicine},
175 | volume={26},
176 | number={1},
177 | pages={37--52},
178 | year={2007},
179 | publisher={Wiley Online Library},
180 | url={https://doi.org/10.1002/sim.2514},
181 | doi={10.1002/sim.2514}
182 | }
183 |
184 | @Manual{white2022metadat,
185 | title={metadat: Meta-Analysis Datasets},
186 | author={Thomas White and Daniel Noble and Alistair Senior and W. Kyle Hamilton and Wolfgang Viechtbauer},
187 | year={2022},
188 | note={R package version 1.2-0},
189 | url={https://CRAN.R-project.org/package=metadat}
190 | }
191 |
192 | @article{winkler2016non,
193 | title={Non-parametric combination and related permutation tests for neuroimaging},
194 | author={Winkler, Anderson M and Webster, Matthew A and Brooks, Jonathan C and Tracey, Irene and Smith, Stephen M and Nichols, Thomas E},
195 | journal={Human brain mapping},
196 | volume={37},
197 | number={4},
198 | pages={1486--1511},
199 | year={2016},
200 | publisher={Wiley Online Library},
201 | url={https://doi.org/10.1002/hbm.23115},
202 | doi={10.1002/hbm.23115}
203 | }
204 |
--------------------------------------------------------------------------------
/docs/sphinxext/github_link.py:
--------------------------------------------------------------------------------
1 | """
2 | This script comes from scikit-learn:
3 | https://github.com/scikit-learn/scikit-learn/blob/master/doc/sphinxext/github_link.py
4 | """
5 | from operator import attrgetter
6 | import inspect
7 | import subprocess
8 | import os
9 | import sys
10 | from functools import partial
11 |
12 | REVISION_CMD = "git rev-parse --short HEAD"
13 |
14 |
15 | def _get_git_revision():
16 | try:
17 | revision = subprocess.check_output(REVISION_CMD.split()).strip()
18 | except (subprocess.CalledProcessError, OSError):
19 | print("Failed to execute git to get revision")
20 | return None
21 | return revision.decode("utf-8")
22 |
23 |
24 | def _linkcode_resolve(domain, info, package, url_fmt, revision):
25 | """Determine a link to online source for a class/method/function
26 |
27 | This is called by sphinx.ext.linkcode
28 |
29 | An example with a long-untouched module that everyone has
30 | >>> _linkcode_resolve('py', {'module': 'tty',
31 | ... 'fullname': 'setraw'},
32 | ... package='tty',
33 | ... url_fmt='http://hg.python.org/cpython/file/'
34 | ... '{revision}/Lib/{package}/{path}#L{lineno}',
35 | ... revision='xxxx')
36 | 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
37 | """
38 |
39 | if revision is None:
40 | return
41 | if domain not in ("py", "pyx"):
42 | return
43 | if not info.get("module") or not info.get("fullname"):
44 | return
45 |
46 | class_name = info["fullname"].split(".")[0]
47 | if type(class_name) != str:
48 | # Python 2 only
49 | class_name = class_name.encode("utf-8")
50 | try:
51 | module = __import__(info["module"], fromlist=[class_name])
52 | obj = attrgetter(info["fullname"])(module)
53 | except Exception:
54 | fn = None
55 | return
56 |
57 | try:
58 | fn = inspect.getsourcefile(obj)
59 | except Exception:
60 | fn = None
61 | if not fn:
62 | try:
63 | fn = inspect.getsourcefile(sys.modules[obj.__module__])
64 | except Exception:
65 | fn = None
66 | if not fn:
67 | return
68 |
69 | fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__))
70 | try:
71 | lineno = inspect.getsourcelines(obj)[1]
72 | except Exception:
73 | lineno = ""
74 | return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
75 |
76 |
77 | def make_linkcode_resolve(package, url_fmt):
78 | """Returns a linkcode_resolve function for the given URL format
79 |
80 | revision is a git commit reference (hash or name)
81 |
82 | package is the name of the root module of the package
83 |
84 | url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
85 | 'blob/{revision}/{package}/'
86 | '{path}#L{lineno}')
87 | """
88 | revision = _get_git_revision()
89 | return partial(_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt)
90 |
--------------------------------------------------------------------------------
/examples/01_basic_io/README.txt:
--------------------------------------------------------------------------------
1 | .. _io-examples-index:
2 |
3 | Basic I/O
4 | ---------
5 |
--------------------------------------------------------------------------------
/examples/01_basic_io/plot_create_dataset.py:
--------------------------------------------------------------------------------
1 | """
2 | .. _io1:
3 |
4 | ==================
5 | Creating a dataset
6 | ==================
7 |
8 | In PyMARE, operations are performed on :class:`~pymare.core.Dataset` objects.
9 | Datasets are very lightweight objects that store the data used for
10 | meta-analyses, including study-level estimates (y), variances (v),
11 | predictors (X), and sample sizes (n).
12 | """
13 | ###############################################################################
14 | # Start with the necessary imports
15 | # --------------------------------
16 | from pprint import pprint
17 |
18 | import pandas as pd
19 |
20 | from pymare import core
21 |
22 | ###############################################################################
23 | # Datasets can be created from arrays
24 | # -----------------------------------
25 | # The simplest way to create a dataset is to pass in arguments as numpy arrays.
26 | #
27 | # ``y`` refers to the study-level estimates, ``v`` to the variances,
28 | # ``X`` to any study-level regressors, and ``n`` to the sample sizes.
29 | #
30 | # Not all Estimators require all of these arguments, so not all need to be
31 | # used in a given Dataset.
32 | y = [2, 4, 6]
33 | v = [100, 100, 100]
34 | X = [[5, 9], [2, 8], [1, 7]]
35 |
36 | dataset = core.Dataset(y=y, v=v, X=X, X_names=["X1", "X7"])
37 |
38 | pprint(vars(dataset))
39 |
40 | ###############################################################################
41 | # Datasets have the :meth:`~pymare.core.Dataset.to_df` method.
42 | dataset.to_df()
43 |
44 | ###############################################################################
45 | # Datasets can also be created from pandas DataFrames
46 | # ---------------------------------------------------
47 | df = pd.DataFrame(
48 | {
49 | "y": [2, 4, 6],
50 | "v_alt": [100, 100, 100],
51 | "X1": [5, 2, 1],
52 | "X7": [9, 8, 7],
53 | }
54 | )
55 |
56 | dataset = core.Dataset(v="v_alt", X=["X1", "X7"], data=df, add_intercept=False)
57 |
58 | pprint(vars(dataset))
59 |
60 | ###############################################################################
61 | # Datasets can also contain multiple dependent variables
62 | # ------------------------------------------------------
63 | # These variables are analyzed in parallel, but as unrelated variables,
64 | # rather than as potentially correlated ones.
65 | #
66 | # This is particularly useful for image-based neuroimaging meta-analyses.
67 | # For more information about this, see `NiMARE `_.
68 | y = [
69 | [2, 4, 6], # Estimates for first study's three outcome variables.
70 | [3, 2, 1], # Estimates for second study's three outcome variables.
71 | ]
72 | v = [
73 | [100, 100, 100], # Estimate variances for first study's three outcome variables.
74 | [8, 4, 2], # Estimate variances for second study's three outcome variables.
75 | ]
76 | X = [
77 | [5, 9], # Predictors for first study. Same across all three outcome variables.
78 | [2, 8], # Predictors for second study. Same across all three outcome variables.
79 | ]
80 |
81 | dataset = core.Dataset(y=y, v=v, X=X, X_names=["X1", "X7"])
82 |
83 | pprint(vars(dataset))
84 |
--------------------------------------------------------------------------------
/examples/02_meta-analysis/README.txt:
--------------------------------------------------------------------------------
1 | .. _meta-examples-index:
2 |
3 | Running Meta-Analyses
4 | ---------------------
5 |
--------------------------------------------------------------------------------
/examples/02_meta-analysis/plot_meta-analysis_walkthrough.py:
--------------------------------------------------------------------------------
1 | # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
2 | # ex: set sts=4 ts=4 sw=4 et:
3 | r"""
4 | .. _meta_walkthrough:
5 |
6 | ================================================
7 | Run Estimators on a simulated dataset
8 | ================================================
9 |
10 | PyMARE implements a range of meta-analytic estimators.
11 | In this example, we build a simulated dataset with a known ground truth and
12 | use it to compare PyMARE's estimators.
13 |
14 | .. note::
15 | The variance of the true effect is composed of both between-study variance
16 | (:math:`\tau^{2}`) and within-study variance (:math:`\sigma^{2}`).
17 | Within-study variance is generally taken from sampling variance values from
18 | individual studies (``v``), while between-study variance can be estimated
19 | via a number of methods.
20 | """
21 | # sphinx_gallery_thumbnail_number = 3
22 | import matplotlib.pyplot as plt
23 | import numpy as np
24 | import seaborn as sns
25 | from scipy import stats
26 |
27 | from pymare import core, estimators
28 | from pymare.stats import var_to_ci
29 |
30 | sns.set_style("whitegrid")
31 |
32 | ###############################################################################
33 | # Here we simulate a dataset
34 | # -----------------------------------------------------------------------------
35 | # This is a simple dataset with a one-sample design.
36 | # We are interested in estimating the true effect size from a set of one-sample
37 | # studies.
38 | N_STUDIES = 40
39 | BETWEEN_STUDY_VAR = 400 # population variance
40 | between_study_sd = np.sqrt(BETWEEN_STUDY_VAR)
41 | TRUE_EFFECT = 20
42 | sample_sizes = np.round(np.random.normal(loc=50, scale=20, size=N_STUDIES)).astype(int)
43 | within_study_vars = np.random.normal(loc=400, scale=100, size=N_STUDIES)
44 | study_means = np.random.normal(loc=TRUE_EFFECT, scale=between_study_sd, size=N_STUDIES)
45 |
46 | sample_sizes[sample_sizes <= 1] = 2
47 | within_study_vars = np.abs(within_study_vars)
48 |
49 | # Convert data types and match PyMARE nomenclature
50 | y = study_means
51 | X = np.ones((N_STUDIES))
52 | v = within_study_vars
53 | n = sample_sizes
54 | sd = np.sqrt(v * n)
55 | z = y / sd
56 | p = stats.norm.sf(abs(z)) * 2
57 |
58 | ###############################################################################
59 | # Plot variable distributions
60 | # -----------------------------------------------------------------------------
61 | fig, axes = plt.subplots(nrows=5, figsize=(6, 5))
62 | sns.distplot(y, ax=axes[0], bins=20)
63 | axes[0].set_title("y")
64 | sns.distplot(v, ax=axes[1], bins=20)
65 | axes[1].set_title("v")
66 | sns.distplot(n, ax=axes[2], bins=20)
67 | axes[2].set_title("n")
68 | sns.distplot(z, ax=axes[3], bins=20)
69 | axes[3].set_title("z")
70 | sns.distplot(p, ax=axes[4], bins=20)
71 | axes[4].set_title("p")
72 | for i in range(5):
73 | axes[i].set_yticks([])
74 | fig.tight_layout()
75 |
76 | ###############################################################################
77 | # Plot means and confidence intervals
78 | # -----------------------------------
79 | # Here we can show study-wise mean effect and CIs, along with the true effect
80 | # and CI corresponding to the between-study variance.
81 | fig, ax = plt.subplots(figsize=(6, 14))
82 | study_ticks = np.arange(N_STUDIES)
83 |
84 | # Get 95% CI for individual studies
85 | lower_bounds, upper_bounds = var_to_ci(y, v, n)
86 | ax.scatter(y, study_ticks + 1)
87 | for study in study_ticks:
88 | ax.plot((lower_bounds[study], upper_bounds[study]), (study + 1, study + 1), color="blue")
89 | ax.axvline(0, color="gray", alpha=0.2, linestyle="--", label="Zero")
90 | ax.axvline(np.mean(y), color="orange", alpha=0.2, label="Mean of Observed Effects")
91 |
92 | # Get 95% CI for true effect
93 | lower_bound, upper_bound = var_to_ci(TRUE_EFFECT, BETWEEN_STUDY_VAR, 1)
94 | ax.scatter((TRUE_EFFECT,), (N_STUDIES + 1,), color="green", label="True Effect")
95 | ax.plot(
96 | (lower_bound, upper_bound),
97 | (N_STUDIES + 1, N_STUDIES + 1),
98 | color="green",
99 | linewidth=3,
100 | label="Between-Study 95% CI",
101 | )
102 | ax.set_ylim((0, N_STUDIES + 2))
103 | ax.set_xlabel("Mean (95% CI)")
104 | ax.set_ylabel("Study")
105 | ax.legend()
106 | fig.tight_layout()
107 |
108 | ###############################################################################
109 | # Create a Dataset object containing the data
110 | # --------------------------------------------
111 | dset = core.Dataset(y=y, X=None, v=v, n=n, add_intercept=True)
112 |
113 | # Here is a dictionary to house results across models
114 | results = {}
115 |
116 | ###############################################################################
117 | # Fit models
118 | # -----------------------------------------------------------------------------
119 | # When you have ``z`` or ``p``:
120 | #
121 | # - :class:`pymare.estimators.StoufferCombinationTest`
122 | # - :class:`pymare.estimators.FisherCombinationTest`
123 | #
124 | # When you have ``y`` and ``v`` and don't want to estimate between-study variance:
125 | #
126 | # - :class:`pymare.estimators.WeightedLeastSquares`
127 | #
128 | # When you have ``y`` and ``v`` and want to estimate between-study variance:
129 | #
130 | # - :class:`pymare.estimators.DerSimonianLaird`
131 | # - :class:`pymare.estimators.Hedges`
132 | # - :class:`pymare.estimators.VarianceBasedLikelihoodEstimator`
133 | #
134 | # When you have ``y`` and ``n`` and want to estimate between-study variance:
135 | #
136 | # - :class:`pymare.estimators.SampleSizeBasedLikelihoodEstimator`
137 | #
138 | # When you have ``y`` and ``v`` and want a hierarchical model:
139 | #
140 | # - :class:`pymare.estimators.StanMetaRegression`
141 |
142 | ###############################################################################
143 | # First, we have "combination models", which combine p and/or z values
144 | # `````````````````````````````````````````````````````````````````````````````
145 | # The two combination models in PyMARE are Stouffer's and Fisher's Tests.
146 | #
147 | # Notice that these models don't use :class:`~pymare.core.Dataset` objects.
148 | stouff = estimators.StoufferCombinationTest()
149 | stouff.fit(z[:, None])
150 | print("Stouffers")
151 | print("p: {}".format(stouff.params_["p"]))
152 | print()
153 |
154 | fisher = estimators.FisherCombinationTest()
155 | fisher.fit(z[:, None])
156 | print("Fishers")
157 | print("p: {}".format(fisher.params_["p"]))
158 |
159 | ###############################################################################
160 | # Now we have a fixed effects model
161 | # `````````````````````````````````````````````````````````````````````````````
162 | # This estimator does not attempt to estimate between-study variance.
163 | # Instead, it takes ``tau2`` (:math:`\tau^{2}`) as an argument.
164 | wls = estimators.WeightedLeastSquares()
165 | wls.fit_dataset(dset)
166 | wls_summary = wls.summary()
167 | results["Weighted Least Squares"] = wls_summary.to_df()
168 | print("Weighted Least Squares")
169 | print(wls_summary.to_df().T)
170 |
171 | ###############################################################################
172 | # Methods that estimate between-study variance
173 | # `````````````````````````````````````````````````````````````````````````````
174 | # The ``DerSimonianLaird``, ``Hedges``, and ``VarianceBasedLikelihoodEstimator``
175 | # estimators all estimate between-study variance from the data, and use ``y``
176 | # and ``v``.
177 | #
178 | # ``DerSimonianLaird`` and ``Hedges`` use relatively simple methods for
179 | # estimating between-study variance, while ``VarianceBasedLikelihoodEstimator``
180 | # can use either maximum-likelihood (ML) or restricted maximum-likelihood (REML)
181 | # to iteratively estimate it.
182 | dsl = estimators.DerSimonianLaird()
183 | dsl.fit_dataset(dset)
184 | dsl_summary = dsl.summary()
185 | results["DerSimonian-Laird"] = dsl_summary.to_df()
186 | print("DerSimonian-Laird")
187 | print(dsl_summary.to_df().T)
188 | print()
189 |
190 | hedge = estimators.Hedges()
191 | hedge.fit_dataset(dset)
192 | hedge_summary = hedge.summary()
193 | results["Hedges"] = hedge_summary.to_df()
194 | print("Hedges")
195 | print(hedge_summary.to_df().T)
196 | print()
197 |
198 | vb_ml = estimators.VarianceBasedLikelihoodEstimator(method="ML")
199 | vb_ml.fit_dataset(dset)
200 | vb_ml_summary = vb_ml.summary()
201 | results["Variance-Based with ML"] = vb_ml_summary.to_df()
202 | print("Variance-Based with ML")
203 | print(vb_ml_summary.to_df().T)
204 | print()
205 |
206 | vb_reml = estimators.VarianceBasedLikelihoodEstimator(method="REML")
207 | vb_reml.fit_dataset(dset)
208 | vb_reml_summary = vb_reml.summary()
209 | results["Variance-Based with REML"] = vb_reml_summary.to_df()
210 | print("Variance-Based with REML")
211 | print(vb_reml_summary.to_df().T)
212 | print()
213 |
214 | # The ``SampleSizeBasedLikelihoodEstimator`` estimates between-study variance
215 | # using ``y`` and ``n``, but assumes within-study variance is homogenous
216 | # across studies.
217 | sb_ml = estimators.SampleSizeBasedLikelihoodEstimator(method="ML")
218 | sb_ml.fit_dataset(dset)
219 | sb_ml_summary = sb_ml.summary()
220 | results["Sample Size-Based with ML"] = sb_ml_summary.to_df()
221 | print("Sample Size-Based with ML")
222 | print(sb_ml_summary.to_df().T)
223 | print()
224 |
225 | sb_reml = estimators.SampleSizeBasedLikelihoodEstimator(method="REML")
226 | sb_reml.fit_dataset(dset)
227 | sb_reml_summary = sb_reml.summary()
228 | results["Sample Size-Based with REML"] = sb_reml_summary.to_df()
229 | print("Sample Size-Based with REML")
230 | print(sb_reml_summary.to_df().T)
231 |
232 | ###############################################################################
233 | # What about the Stan estimator?
234 | # `````````````````````````````````````````````````````````````````````````````
235 | # We're going to skip this one here because of how computationally intensive it
236 | # is.
237 |
238 | ###############################################################################
239 | # Let's check out our results!
240 | # `````````````````````````````````````````````````````````````````````````````
241 | fig, ax = plt.subplots(figsize=(6, 6))
242 |
243 | for i, (estimator_name, summary_df) in enumerate(results.items()):
244 | ax.scatter((summary_df.loc[0, "estimate"],), (i + 1,), label=estimator_name)
245 | ax.plot(
246 | (summary_df.loc[0, "ci_0.025"], summary_df.loc[0, "ci_0.975"]),
247 | (i + 1, i + 1),
248 | linewidth=3,
249 | )
250 |
251 | # Get 95% CI for true effect
252 | lower_bound, upper_bound = var_to_ci(TRUE_EFFECT, BETWEEN_STUDY_VAR, 1)
253 | ax.scatter((TRUE_EFFECT,), (i + 2,), label="True Effect")
254 | ax.plot(
255 | (lower_bound, upper_bound),
256 | (i + 2, i + 2),
257 | linewidth=3,
258 | label="Between-Study 95% CI",
259 | )
260 | ax.set_ylim((0, i + 3))
261 | ax.set_yticklabels([None] + list(results.keys()) + ["True Effect"])
262 |
263 | ax.set_xlabel("Mean (95% CI)")
264 | fig.tight_layout()
265 |
--------------------------------------------------------------------------------
/examples/02_meta-analysis/plot_run_meta-analysis.py:
--------------------------------------------------------------------------------
1 | # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
2 | # ex: set sts=4 ts=4 sw=4 et:
3 | """
4 | .. _meta_basics:
5 |
6 | =====================================
7 | The Basics of Running a Meta-Analysis
8 | =====================================
9 |
10 | Here we walk through the basic steps of running a meta-analysis with PyMARE.
11 | """
12 | ###############################################################################
13 | # Start with the necessary imports
14 | # -----------------------------------------------------------------------------
15 | from pprint import pprint
16 |
17 | from pymare import core, datasets, estimators
18 |
19 | ###############################################################################
20 | # Load the data
21 | # -----------------------------------------------------------------------------
22 | # We will use the :footcite:t:`michael2013non` dataset, which comes from the
23 | # metadat library :footcite:p:`white2022metadat`.
24 | #
25 | # We only want to do a mean analysis, so we won't have any covariates except for
26 | # an intercept.
27 | data, meta = datasets.michael2013()
28 | dset = core.Dataset(data=data, y="yi", v="vi", X=None, add_intercept=True)
29 | dset.to_df()
30 |
31 | ###############################################################################
32 | # Now we fit a model
33 | # -----------------------------------------------------------------------------
34 | # You must first initialize the estimator, after which you can use
35 | # :meth:`~pymare.estimators.estimators.BaseEstimator.fit` to fit the model to
36 | # numpy arrays, or
37 | # :meth:`~pymare.estimators.estimators.BaseEstimator.fit_dataset` to fit it to
38 | # a :class:`~pymare.core.Dataset`.
39 | #
40 | # .. tip::
41 | # We generally recommend using
42 | # :meth:`~pymare.estimators.estimators.BaseEstimator.fit_dataset` over
43 | # :meth:`~pymare.estimators.estimators.BaseEstimator.fit`.
44 | #
45 | # There are a number of methods, such as
46 | # :meth:`~pymare.results.MetaRegressionResults.get_heterogeneity_stats` and
47 | # :meth:`~pymare.results.MetaRegressionResults.permutation_test`,
48 | # which only work when the Estimator is fitted to a Dataset.
49 | #
50 | # However, :meth:`~pymare.estimators.estimators.BaseEstimator.fit` requires
51 | # less memory than :meth:`~pymare.estimators.estimators.BaseEstimator.fit_dataset`,
52 | # so it can be useful for large-scale meta-analyses,
53 | # such as neuroimaging image-based meta-analyses.
54 | #
55 | # The :meth:`~pymare.estimators.estimators.BaseEstimator.summary` function
56 | # will return a :class:`~pymare.results.MetaRegressionResults` object,
57 | # which contains the results of the analysis.
58 | est = estimators.WeightedLeastSquares().fit_dataset(dset)
59 | results = est.summary()
60 | results.to_df()
61 |
62 | ###############################################################################
63 | # We can also extract some useful information from the results object
64 | # -----------------------------------------------------------------------------
65 | # The :meth:`~pymare.results.MetaRegressionResults.get_heterogeneity_stats`
66 | # method will calculate heterogeneity statistics.
67 | pprint(results.get_heterogeneity_stats())
68 |
69 | ###############################################################################
70 | # The :meth:`~pymare.results.MetaRegressionResults.get_re_stats` method will
71 | # estimate the confidence interval for :math:`\tau^2`.
72 | pprint(results.get_re_stats())
73 |
74 | ###############################################################################
75 | # The :meth:`~pymare.results.MetaRegressionResults.permutation_test` method
76 | # will run a permutation test to estimate more accurate p-values.
77 | perm_results = results.permutation_test(n_perm=1000)
78 | perm_results.to_df()
79 |
80 | ###############################################################################
81 | # References
82 | # -----------------------------------------------------------------------------
83 | # .. footbibliography::
84 |
--------------------------------------------------------------------------------
/examples/README.txt:
--------------------------------------------------------------------------------
1 | .. _examples-index:
2 |
3 | Examples
4 | ===================
5 |
--------------------------------------------------------------------------------
/pymare/__init__.py:
--------------------------------------------------------------------------------
1 | """PyMARE: Python Meta-Analysis & Regression Engine."""
2 |
3 | import sys
4 | import warnings
5 |
6 | from .core import Dataset, meta_regression
7 | from .effectsize import OneSampleEffectSizeConverter, TwoSampleEffectSizeConverter
8 |
9 | __all__ = [
10 | "Dataset",
11 | "meta_regression",
12 | "OneSampleEffectSizeConverter",
13 | "TwoSampleEffectSizeConverter",
14 | ]
15 |
16 | from . import _version
17 |
18 | __version__ = _version.get_versions()["version"]
19 | del _version
20 |
21 |
22 | def _py367_deprecation_warning():
23 | """Deprecation warnings message.
24 |
25 | Notes
26 | -----
27 | Adapted from NiMARE.
28 | """
29 | py36_warning = (
30 | "Python 3.6 and 3.7 support is deprecated and will be removed in release 0.0.5 of PyMARE. "
31 | "Consider switching to Python 3.8, 3.9."
32 | )
33 | warnings.filterwarnings("once", message=py36_warning)
34 | warnings.warn(message=py36_warning, category=FutureWarning, stacklevel=3)
35 |
36 |
37 | def _python_deprecation_warnings():
38 | """Raise deprecation warnings.
39 |
40 | Notes
41 | -----
42 | Adapted from NiMARE.
43 | """
44 | if sys.version_info.major == 3 and (
45 | sys.version_info.minor == 6 or sys.version_info.minor == 7
46 | ):
47 | _py367_deprecation_warning()
48 |
49 |
50 | _python_deprecation_warnings()
51 |
--------------------------------------------------------------------------------
/pymare/core.py:
--------------------------------------------------------------------------------
1 | """Core classes and functions."""
2 |
3 | from functools import partial
4 |
5 | import numpy as np
6 | import pandas as pd
7 |
8 | from pymare.utils import _check_inputs_shape, _listify
9 |
10 | from .estimators import (
11 | DerSimonianLaird,
12 | Hedges,
13 | SampleSizeBasedLikelihoodEstimator,
14 | StanMetaRegression,
15 | VarianceBasedLikelihoodEstimator,
16 | WeightedLeastSquares,
17 | )
18 | from .stats import ensure_2d
19 |
20 |
21 | class Dataset:
22 | """Container for input data and arguments to estimators.
23 |
24 | Parameters
25 | ----------
26 | y : None or :obj:`numpy.ndarray` of shape (K,) or :obj:`str`, optional
27 | 1d array of study-level estimates with length K, or the name of the column in data
28 | containing the y values.
29 | Default = None.
30 | v : None or :obj:`numpy.ndarray` of shape (K,) or :obj:`str`, optional
31 | 1d array of study-level variances with length K, or the name of the column in data
32 | containing v values.
33 | Default = None.
34 | X : None or :obj:`numpy.ndarray` of shape (K,[P]) or :obj:`list` of :obj:`str`, optional
35 | 1d or 2d array containing study-level predictors (dimensions K x P),
36 | or a list of strings giving the names of the columns in data containing the X values.
37 | Default = None.
38 | n : None or :obj:`numpy.ndarray` of shape (K,) or :obj:`str`, optional
39 | 1d array of study-level sample sizes (length K), or the name of the corresponding column
40 | in ``data``.
41 | Default = None.
42 | data : None or :obj:`pandas.DataFrame`, optional
43 | A pandas DataFrame containing y, v, X, and/or n values.
44 | By default, columns are expected to have the same names as arguments
45 | (e.g., the y values will be expected in the 'y' column).
46 | This can be modified by passing strings giving column names to any of the ``y``, ``v``,
47 | ``X``, or ``n`` arguments.
48 | Default = None.
49 | X_names : None or :obj:`list` of :obj:`str`, optional
50 | List of length P containing the names of the predictors.
51 | Ignored if ``data`` is provided (use ``X`` to specify columns).
52 | Default = None.
53 | add_intercept : :obj:`bool`, optional
54 | If True, an intercept column is automatically added to the predictor matrix.
55 | If False, the predictors matrix is passed as-is to estimators.
56 | Default = True.
57 | """
58 |
59 | def __init__(
60 | self, y=None, v=None, X=None, n=None, data=None, X_names=None, add_intercept=True
61 | ):
62 | if y is None and data is None:
63 | raise ValueError(
64 | "If no y values are provided, a pandas DataFrame "
65 | "containing a 'y' column must be passed to the "
66 | "data argument."
67 | )
68 |
69 | if (X is None) and (not add_intercept):
70 | raise ValueError("If no X matrix is provided, add_intercept must be True!")
71 |
72 | # Extract columns from DataFrame
73 | if data is not None:
74 | y = data.loc[:, y or "y"].values
75 |
76 | # v is optional
77 | if (v is not None) or ("v" in data.columns):
78 | v = data.loc[:, v or "v"].values
79 |
80 | # X is optional
81 | if (X is not None) or ("X" in data.columns):
82 | X_names = X or "X"
83 | X = data.loc[:, X_names].values
84 |
85 | # n is optional
86 | if (n is not None) or ("n" in data.columns):
87 | n = data.loc[:, n or "n"].values
88 |
89 | self.y = ensure_2d(y)
90 | self.v = ensure_2d(v)
91 | self.n = ensure_2d(n)
92 | X, names = self._get_predictors(X, X_names, add_intercept)
93 | self.X = X
94 | self.X_names = names
95 |
96 | _check_inputs_shape(self.y, self.X, "y", "X", row=True)
97 | _check_inputs_shape(self.y, self.v, "y", "v", row=True, column=True)
98 | _check_inputs_shape(self.y, self.n, "y", "n", row=True, column=True)
99 |
100 | def _get_predictors(self, X, names, add_intercept):
101 | if X is None and not add_intercept:
102 | raise ValueError(
103 | "No fixed predictors found. If no X matrix is "
104 | "provided, add_intercept must be True!"
105 | )
106 |
107 | X = pd.DataFrame(X)
108 | if names is not None:
109 | X.columns = _listify(names)
110 |
111 | if add_intercept:
112 | intercept = pd.DataFrame({"intercept": np.ones(len(self.y))})
113 | X = pd.concat([intercept, X], axis=1)
114 |
115 | return X.values, X.columns.tolist()
116 |
117 | def to_df(self):
118 | """Convert the dataset to a pandas DataFrame.
119 |
120 | Returns
121 | -------
122 | :obj:`pandas.DataFrame`
123 | A DataFrame containing the y, v, X, and n values.
124 | """
125 | if self.y.shape[1] == 1:
126 | df = pd.DataFrame({"y": self.y[:, 0]})
127 |
128 | if self.v is not None:
129 | df["v"] = self.v[:, 0]
130 |
131 | if self.n is not None:
132 | df["n"] = self.n[:, 0]
133 |
134 | df[self.X_names] = self.X
135 |
136 | else:
137 | all_dfs = []
138 | for i_set in range(self.y.shape[1]):
139 | df = pd.DataFrame(
140 | {
141 | "set": np.full(self.y.shape[0], i_set),
142 | "y": self.y[:, i_set],
143 | }
144 | )
145 |
146 | if self.v is not None:
147 | df["v"] = self.v[:, i_set]
148 |
149 | if self.n is not None:
150 | df["n"] = self.n[:, i_set]
151 |
152 | # X is the same across sets
153 | df[self.X_names] = self.X
154 |
155 | all_dfs.append(df)
156 |
157 | df = pd.concat(all_dfs, axis=0)
158 |
159 | return df
160 |
161 |
162 | def meta_regression(
163 | y=None,
164 | v=None,
165 | X=None,
166 | n=None,
167 | data=None,
168 | X_names=None,
169 | add_intercept=True,
170 | method="ML",
171 | ci_method="QP",
172 | alpha=0.05,
173 | **kwargs,
174 | ):
175 | """Fit the standard meta-regression/meta-analysis model to provided data.
176 |
177 | Parameters
178 | ----------
179 | y : None or :obj:`numpy.ndarray` of shape (K,) or :obj:`str`, optional
180 | 1d array of study-level estimates with length K, or the name of the column in data
181 | containing the y values.
182 | Default = None.
183 | v : None or :obj:`numpy.ndarray` of shape (K,) or :obj:`str`, optional
184 | 1d array of study-level variances with length K, or the name of the column in data
185 | containing v values.
186 | Default = None.
187 | X : None or :obj:`numpy.ndarray` of shape (K,[P]) or :obj:`list` of :obj:`str`, optional
188 | 1d or 2d array containing study-level predictors (dimensions K x P),
189 | or a list of strings giving the names of the columns in data containing the X values.
190 | Default = None.
191 | n : None or :obj:`numpy.ndarray` of shape (K,) or :obj:`str`, optional
192 | 1d array of study-level sample sizes (length K), or the name of the corresponding column
193 | in ``data``.
194 | Default = None.
195 | data : None or :obj:`pandas.DataFrame` or :obj:`~pymare.core.Dataset`, optional
196 | If a Dataset instance is passed, the y, v, X, n and associated arguments are ignored,
197 | and data is passed directly to the selected estimator.
198 | If a pandas DataFrame, y, v, X and/or n values are taken from the DF columns.
199 | By default, columns are expected to have the same names as arguments
200 | (e.g., the y values will be expected in the 'y' column).
201 | This can be modified by passing strings giving column names to any of the y, v, X, or n
202 | arguments.
203 | X_names : None or :obj:`list` of :obj:`str`, optional
204 | List of length P containing the names of the predictors.
205 | Ignored if ``data`` is provided (use ``X`` to specify columns).
206 | Default = None.
207 | add_intercept : :obj:`bool`, optional
208 | If True, an intercept column is automatically added to the predictor matrix.
209 | If False, the predictors matrix is passed as-is to estimators.
210 | Default = True.
211 | method : {"ML", "REML", "DL", "HE", "WLS", "FE", "Stan"}, optional
212 | Name of estimation method. Default = 'ML'.
213 | Supported estimators include:
214 |
215 | - 'ML': Maximum-likelihood estimator
216 | - 'REML': Restricted maximum-likelihood estimator
217 | - 'DL': DerSimonian-Laird estimator
218 | - 'HE': Hedges estimator
219 | - 'WLS' or 'FE': Weighted least squares (fixed effects only)
220 | - 'Stan': Full Bayesian MCMC estimation via Stan
221 | ci_method : {"QP"}, optional
222 | Estimation method to use when computing uncertainty estimates.
223 | Currently only 'QP' is supported. Default = 'QP'.
224 | Ignored if ``method == 'Stan'``.
225 | alpha : :obj:`float`, optional
226 | Desired alpha level (CIs will have 1 - alpha coverage). Default = 0.05.
227 | **kwargs
228 | Optional keyword arguments to pass onto the chosen estimator.
229 |
230 | Returns
231 | -------
232 | :obj:`~pymare.results.MetaRegressionResults` or \
233 | :obj:`~pymare.results.BayesianMetaRegressionResults`
234 | A MetaRegressionResults or BayesianMetaRegressionResults instance,
235 | depending on the specified method ('Stan' will return the latter; all
236 | other methods return the former).
237 | """
238 | # if data is None or not isinstance(data, Dataset):
239 | if data is None or not data.__class__.__name__ == "Dataset":
240 | data = Dataset(y, v, X, n, data, X_names, add_intercept)
241 |
242 | method = method.lower()
243 |
244 | if method in ["ml", "reml"]:
245 | if v is not None:
246 | est_cls = partial(VarianceBasedLikelihoodEstimator, method=method)
247 | elif n is not None:
248 | est_cls = partial(SampleSizeBasedLikelihoodEstimator, method=method)
249 | else:
250 | raise ValueError("If method is ML or REML, one of `v` or `n` must be passed!")
251 | else:
252 | est_cls = {
253 | "dl": DerSimonianLaird,
254 | "wls": WeightedLeastSquares,
255 | "fe": WeightedLeastSquares,
256 | "stan": StanMetaRegression,
257 | "he": Hedges,
258 | }[method]
259 |
260 | # Get estimates
261 | est = est_cls(**kwargs)
262 | est.fit_dataset(data)
263 | return est.summary()
264 |
--------------------------------------------------------------------------------
/pymare/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | """Open meta-analytic datasets."""
2 |
3 | from .metadat import michael2013
4 |
5 | __all__ = [
6 | "michael2013",
7 | ]
8 |
--------------------------------------------------------------------------------
/pymare/datasets/metadat.py:
--------------------------------------------------------------------------------
1 | """Datasets from metadat."""
2 |
3 | import json
4 | import os.path as op
5 |
6 | import pandas as pd
7 |
8 | from pymare.utils import get_resource_path
9 |
10 |
11 | def michael2013():
12 | """Load a dataset of studies on the persuasive power of a brain image.
13 |
14 | This dataset was published in :footcite:t:`michael2013non`,
15 | and was curated in metadat :footcite:p:`white2022metadat`.
16 |
17 | Returns
18 | -------
19 | df : :obj:`~pandas.DataFrame`
20 | A dataframe with the following columns:
21 |
22 | - ``"Study"``: the study name
23 | - ``"No_brain_n"``: the sample size for no-brain-image condition
24 | - ``"No_brain_m"``: mean agreement rating for no-brain-image condition
25 | - ``"No_brain_s"``: standard deviation of agreement rating for no-brain-image condition
26 | - ``"Brain_n"``: the sample size for brain-image condition
27 | - ``"Brain_m"``: mean agreement rating for brain-image condition
28 | - ``"Brain_s"``: standard deviation of agreement rating for brain-image condition
29 | - ``"Included_Critique"``: whether a critique was included in the study or not
30 | - ``"Medium"``: the medium of the study
31 | - ``"Compensation"``: notes on the compensation of the study
32 | - ``"Participant_Pool"``: notes on where participants were recruited
33 | - ``"yi"``: Raw mean difference, calculated as Brain_m - No_brain_m
34 | - ``"vi"``: Corresponding sampling variance
35 |
36 | metadata : :obj:`dict`
37 | A dictionary with metadata about the columns in the dataset.
38 |
39 | Notes
40 | -----
41 | For more information about this dataset, see metadat's documentation:
42 | https://wviechtb.github.io/metadat/reference/dat.michael2013.html
43 |
44 | References
45 | ----------
46 | .. footbibliography::
47 | """
48 | dataset_dir = op.join(get_resource_path(), "datasets")
49 | tsv_file = op.join(dataset_dir, "michael2013.tsv")
50 | json_file = op.join(dataset_dir, "michael2013.json")
51 | df = pd.read_table(tsv_file)
52 | with open(json_file, "r") as fo:
53 | metadata = json.load(fo)
54 |
55 | return df, metadata
56 |
--------------------------------------------------------------------------------
/pymare/effectsize/__init__.py:
--------------------------------------------------------------------------------
1 | """Tools for converting between effect-size measures."""
2 |
3 | from .base import (
4 | OneSampleEffectSizeConverter,
5 | TwoSampleEffectSizeConverter,
6 | compute_measure,
7 | solve_system,
8 | )
9 | from .expressions import Expression, select_expressions
10 |
11 | __all__ = [
12 | "OneSampleEffectSizeConverter",
13 | "TwoSampleEffectSizeConverter",
14 | "solve_system",
15 | "Expression",
16 | "select_expressions",
17 | "compute_measure",
18 | ]
19 |
--------------------------------------------------------------------------------
/pymare/effectsize/base.py:
--------------------------------------------------------------------------------
1 | """Tools for effect size computation/conversion."""
2 |
3 | from abc import ABCMeta
4 | from collections import defaultdict
5 | from functools import partial
6 |
7 | import numpy as np
8 | from sympy import Symbol, lambdify, solve
9 |
10 | from pymare import Dataset
11 |
12 | from .expressions import select_expressions
13 |
14 | SYMPY_MODULES = ["numpy", "scipy"]
15 |
16 |
17 | def solve_system(system, known_vars=None):
18 | """Solve and evaluate a system of SymPy equations given known inputs.
19 |
20 | Parameters
21 | ----------
22 | system : :obj:`list` of :obj:`sympy.core.expr.Expr`
23 | A list of SymPy expressions defining the system to solve.
24 | known_vars : None or :obj:`dict`, optional
25 | A dictionary of known variables to use
26 | when evaluating the solution. Keys are the names of parameters
27 | (e.g., 'sem', 't'), values are numerical data types (including
28 | numpy arrays). Default = None.
29 |
30 | Returns
31 | -------
32 | :obj:`dict`
33 | A dictionary of newly computed values, where the keys are parameter
34 | names and the values are numerical data types.
35 |
36 | Notes
37 | -----
38 | The returned dictionary contains only keys that were not passed in as
39 | input (i.e., already known variables will be ignored).
40 | """
41 | system = system.copy()
42 |
43 | known_vars = known_vars or {}
44 |
45 | # Get base system of equations and construct symbol dict
46 | symbols = set().union(*[eq.free_symbols for eq in system])
47 | symbols = {s.name: s for s in list(symbols)}
48 |
49 | # Add a dummy equation for each known variable
50 | dummies = set()
51 | for name in known_vars.keys():
52 | if name not in symbols:
53 | continue
54 | dummy = Symbol("_%s" % name)
55 | dummies.add(dummy)
56 | system.append(symbols[name] - dummy)
57 |
58 | # Solve the system for all existing symbols.
59 | # NOTE: previously we used the nonlinsolve() solver instead of solve().
60 | # for inscrutable reasons, nonlinsolve behaves unpredictably, and sometimes
61 | # fails to produce solutions even for repeated runs of the exact same
62 | # inputs. Conclusion: do not use nonlinsolve.
63 | symbols = list(symbols.values())
64 | solutions = solve(system, symbols)
65 |
66 | if not solutions:
67 | return {}
68 |
69 | # solver will return a dict if there's only one non-dummy expression
70 | if isinstance(solutions, dict):
71 | solutions = [[solutions[s] for s in symbols]]
72 |
73 | # Prepare the dummy list and data args in a fixed order
74 | dummy_list = list(dummies)
75 | data_args = [known_vars[var.name.strip("_")] for var in dummy_list]
76 |
77 | # Compute any solved vars via numpy and store in new dict
78 | results = {}
79 | for i, sol in enumerate(solutions[0]):
80 | name = symbols[i].name
81 | free = sol.free_symbols
82 | if not (free - dummies) and not (len(free) == 1 and list(free)[0].name.strip("_") == name):
83 | func = lambdify(dummy_list, sol, modules=SYMPY_MODULES)
84 | results[name] = func(*data_args)
85 |
86 | return results
87 |
88 |
89 | class EffectSizeConverter(metaclass=ABCMeta):
90 | """Base class for effect size converters."""
91 |
92 | def __init__(self, data=None, **kwargs):
93 | kwargs = {k: v for k, v in kwargs.items() if v is not None}
94 |
95 | if data is not None:
96 | kwargs = self._collect_variables(data, kwargs)
97 |
98 | # Do any subclass-specific validation
99 | kwargs = self._validate(kwargs)
100 |
101 | # Scalars are fine, but lists and tuples break lambdified expressions
102 | for k, v in kwargs.items():
103 | if isinstance(v, (list, tuple)):
104 | kwargs[k] = np.array(v)
105 |
106 | self.known_vars = {}
107 | self._system_cache = defaultdict(dict)
108 | self.update_data(**kwargs)
109 |
110 | @staticmethod
111 | def _collect_variables(data, kwargs):
112 | # consolidate variables from pandas DF and keyword arguments, giving
113 | # precedence to the latter.
114 | kwargs = kwargs.copy()
115 | df_cols = {col: data.loc[:, col].values for col in data.columns}
116 | df_cols.update(kwargs)
117 | return df_cols
118 |
119 | def _validate(self, kwargs):
120 | return kwargs
121 |
122 | def __getattr__(self, key):
123 | """Access an instance attribute."""
124 | if key.startswith("get_"):
125 | stat = key.replace("get_", "")
126 | return partial(self.get, stat=stat)
127 |
128 | def update_data(self, incremental=False, **kwargs):
129 | """Update instance data.
130 |
131 | Parameters
132 | ----------
133 | incremental : :obj:`bool`, optional
134 | If True, updates data incrementally (i.e., existing data will be preserved unless
135 | they're overwritten by incoming keys). If False, all existing data is dropped first.
136 | Default = False.
137 | **kwargs
138 | Data values or arrays; keys are the names of the quantities.
139 | All inputs to ``__init__`` are valid.
140 | """
141 | if not incremental:
142 | self.known_vars = {}
143 | self.known_vars.update(kwargs)
144 |
145 | def _get_system(self, stat):
146 | # Retrieve a system of equations capable of solving for desired stat.
147 | known = set([k for k, v in self.known_vars.items() if v is not None])
148 |
149 | # get system from cache if available
150 | cached = self._system_cache.get(stat, {})
151 | for k, system in cached.items():
152 | if known.issuperset(k):
153 | return system
154 |
155 | # otherwise try to get a sufficient system
156 | exprs = select_expressions(target=stat, known_vars=known, type=self._type)
157 | if exprs is None:
158 | return None
159 | system = [exp.sympy for exp in exprs]
160 |
161 | # update the cache
162 | if system:
163 | free_syms = set().union(*[exp.symbols for exp in exprs])
164 | set_key = frozenset([s.name for s in free_syms])
165 | self._system_cache[stat][set_key] = system
166 |
167 | return system
168 |
169 | def to_dataset(self, measure, **kwargs):
170 | """Convert conversion results to a Dataset."""
171 | measure = measure.lower()
172 | y = self.get(measure)
173 | v = self.get("v_{}".format(measure), error=False)
174 | try:
175 | n = self.get("n")
176 | except:
177 | n = None
178 | return Dataset(y=y, v=v, n=n, **kwargs)
179 |
180 | def get(self, stat, error=True):
181 | """Compute and return values for the specified statistic, if possible.
182 |
183 | Parameters
184 | ----------
185 | stat : :obj:`str`
186 | The name of the quantity to retrieve.
187 | error : :obj:`bool`, optional
188 | Specifies behavior in the event that the requested quantity cannot be computed.
189 | If True (default), raises an exception. If False, returns None.
190 |
191 | Returns
192 | -------
193 | :obj:`float` or :obj:`numpy.ndarray`
194 | A float or ndarray containing the requested parameter values, if successfully computed.
195 |
196 | Notes
197 | -----
198 | All values computed via get() are internally cached. Do not try to
199 | update the instance's known values directly; any change to input
200 | data require either initialization of a new instance, or a call to
201 | update_data().
202 | """
203 | stat = stat.lower()
204 |
205 | if stat in self.known_vars:
206 | return self.known_vars[stat]
207 |
208 | system = self._get_system(stat)
209 | if system is not None:
210 | result = solve_system(system, self.known_vars)
211 |
212 | if error and (system is None or result is None):
213 | known = list(self.known_vars.keys())
214 | raise ValueError(
215 | "Unable to solve for statistic '{}' given the "
216 | "known quantities ({}).".format(stat, known)
217 | )
218 |
219 | self.known_vars.update(result)
220 | return result[stat]
221 |
222 |
223 | class OneSampleEffectSizeConverter(EffectSizeConverter):
224 | """Effect size converter for metric involving a single group/set of scores.
225 |
226 | Parameters
227 | ----------
228 | data : None or :obj:`pandas.DataFrame`, optional
229 | Optional pandas DataFrame to extract variables from.
230 | Column names must match the controlled names listed below for
231 | kwargs. If additional kwargs are provided, they will take
232 | precedence over the values in the data frame.
233 | Default = None.
234 | m : None or :obj:`numpy.ndarray`, optional
235 | Means or other continuous estimates
236 | sd : None or :obj:`numpy.ndarray`, optional
237 | Standard deviations
238 | n : None or :obj:`numpy.ndarray`, optional
239 | Sample sizes
240 | r : None or :obj:`numpy.ndarray`, optional
241 | Correlation coefficients
242 | **kwargs
243 | Optional keyword arguments providing additional inputs. All
244 | values must be floats, 1d ndarrays, or any iterable that can be
245 | converted to an ndarray. All variables must have the same length.
246 |
247 | Notes
248 | -----
249 | All input variables are assumed to reflect study- or analysis-level
250 | summaries, and are _not_ individual data points. E.g., do not pass in
251 | a vector of point estimates as `m` and a scalar for the SDs `sd`.
252 | The lengths of all inputs must match.
253 | """
254 |
255 | _type = 1
256 |
257 | def __init__(self, data=None, m=None, sd=None, n=None, r=None, **kwargs):
258 | super().__init__(data, m=m, sd=sd, n=n, r=r, **kwargs)
259 |
260 | def to_dataset(self, measure="RM", **kwargs):
261 | """Get a Pymare Dataset with y and v mapped to the specified measure.
262 |
263 | Parameters
264 | ----------
265 | measure : {"RM", "SM", "D", "R", "ZR"}, optional
266 | The measure to map to the Dataset's y and v attributes
267 | (where y is the desired measure, and v is its variance). Valid values include:
268 |
269 | - 'RM': Raw mean of the group. This is the default.
270 | - 'SM': Standardized mean. This is often called Hedges g.
271 | (one-sample), or equivalently, Cohen's one-sample d with
272 | a bias correction applied.
273 | - 'D': Cohen's d. Note that no bias correction is applied
274 | (use 'SM' instead).
275 | - 'R': Raw correlation coefficient.
276 | - 'ZR': Fisher z-transformed correlation coefficient.
277 | **kwargs
278 | Optional keyword arguments to pass onto the Dataset
279 | initializer. Provides a way of supplementing the generated y
280 | and v arrays with additional arguments (e.g., X, X_names, n).
281 | See pymare.Dataset docs for details.
282 |
283 | Returns
284 | -------
285 | :obj:`~pymare.core.Dataset`
286 |
287 | Notes
288 | -----
289 | Measures 'RM', 'SM', and 'D' require m, sd, and n as inputs.
290 | Measures 'R' and 'ZR' require r and n as inputs.
291 | """
292 | return super().to_dataset(measure, **kwargs)
293 |
294 |
295 | class TwoSampleEffectSizeConverter(EffectSizeConverter):
296 | """Effect size converter for two-sample comparisons.
297 |
298 | Parameters
299 | ----------
300 | data : None or :obj:`pandas.DataFrame`, optional
301 | Optional pandas DataFrame to extract variables from.
302 | Column names must match the controlled names listed below for
303 | kwargs. If additional kwargs are provided, they will take
304 | precedence over the values in the data frame.
305 | m1 : None or :obj:`numpy.ndarray`, optional
306 | Means for group 1
307 | m2 : None or :obj:`numpy.ndarray`, optional
308 | Means for group 2
309 | sd1 : None or :obj:`numpy.ndarray`, optional
310 | Standard deviations for group 1
311 | sd2 : None or :obj:`numpy.ndarray`, optional
312 | Standard deviations for group 2
313 | n1 : None or :obj:`numpy.ndarray`, optional
314 | Sample sizes for group 1
315 | n2 : None or :obj:`numpy.ndarray`, optional
316 | Sample sizes for group 2
317 | **kwargs
318 | Optional keyword arguments providing additional inputs. All
319 | values must be floats, 1d ndarrays, or any iterable that can be
320 | converted to an ndarray.
321 |
322 | Notes
323 | -----
324 | All input variables are assumed to reflect study- or analysis-level
325 | summaries, and are _not_ individual data points. E.g., do not pass in
326 | a vector of point estimates as `m1` and a scalar for the SDs `sd1`.
327 | The lengths of all inputs must match. All variables must be passed in
328 | as pairs (e.g., if m1 is provided, m2 must also be provided).
329 |
330 | When using the TwoSampleEffectSizeConverter, it is assumed that the
331 | variable pairs are from independent samples. Paired-sampled comparisons
332 | are not currently supported.
333 | """
334 |
335 | _type = 2
336 |
337 | def __init__(
338 | self, data=None, m1=None, m2=None, sd1=None, sd2=None, n1=None, n2=None, **kwargs
339 | ):
340 | super().__init__(data, m1=m1, m2=m2, sd1=sd1, sd2=sd2, n1=n1, n2=n2, **kwargs)
341 |
342 | def _validate(self, kwargs):
343 | # Validate that all inputs were passed in pairs
344 | var_names = set([v.strip("[12]") for v in kwargs.keys()])
345 | pair_vars = var_names - {"d"}
346 | for var in pair_vars:
347 | name1, name2 = "%s1" % var, "%s2" % var
348 | var1, var2 = kwargs.get(name1), kwargs.get(name2)
349 | if (var1 is None) != (var2 is None):
350 | raise ValueError(
351 | "Input variable '{}' must be provided in pairs; please "
352 | "provide both {} and {} (or neither).".format(var, name1, name2)
353 | )
354 | return kwargs
355 |
356 | def to_dataset(self, measure="SMD", **kwargs):
357 | """Get a Pymare Dataset with y and v mapped to the specified measure.
358 |
359 | Parameters
360 | ----------
361 | measure : {"SMD", "RMD", "D"}, optional
362 | The measure to map to the Dataset's y and v
363 | attributes (where y is the desired measure, and v is its
364 | variance). Valid values include:
365 |
366 | - 'SMD': Standardized mean difference between groups. This
367 | is often called Hedges g, or equivalently, Cohen's d with
368 | a bias correction applied.
369 | This is the default.
370 | - 'RMD': Raw mean difference between groups.
371 | - 'D': Cohen's d. Note that no bias correction is applied
372 | (use 'SMD' instead).
373 | **kwargs
374 | Optional keyword arguments to pass onto the Dataset
375 | initializer. Provides a way of supplementing the generated y
376 | and v arrays with additional arguments (e.g., X, X_names, n).
377 | See pymare.Dataset docs for details.
378 |
379 | Returns
380 | -------
381 | :obj:`~pymare.core.Dataset`
382 |
383 | Notes
384 | -----
385 | All measures require that m1, m2, sd1, sd2, n1, and n2 be passed in
386 | as inputs (or be solvable from the passed inputs).
387 | """
388 | return super().to_dataset(measure, **kwargs)
389 |
390 |
391 | def compute_measure(
392 | measure,
393 | data=None,
394 | comparison="infer",
395 | return_type="tuple",
396 | m=None,
397 | sd=None,
398 | n=None,
399 | r=None,
400 | m1=None,
401 | m2=None,
402 | sd1=None,
403 | sd2=None,
404 | n1=None,
405 | n2=None,
406 | **dataset_kwargs,
407 | ):
408 | """Auto-detect and apply the right converter class.
409 |
410 | Parameters
411 | ----------
412 | measure : {"RM", "SM", "R", "ZR", "RMD", "SMD", "D"}
413 | The desired output effect size measure. Valid values are
414 | listed below, with the required named inputs in parentheses:
415 |
416 | - 'RM' (m, sd, n): Raw mean of the group.
417 | - 'SM' (m, sd, n): Standardized mean. This is often called Hedges
418 | g. (one-sample), or equivalently, Cohen's one-sample d with a
419 | bias correction applied.
420 | - 'R' (r, n): Raw correlation coefficient.
421 | - 'ZR' (r, n): Fisher z-transformed correlation coefficient.
422 | - 'RMD' (m1, m2, sd1, sd2, n1, n2): Raw mean difference between
423 | groups.
424 | - 'SMD' (m1, m2, sd1, sd2, n1, n2): Standardized mean difference
425 | between groups. This is often called Hedges g, or equivalently,
426 | Cohen's d with a bias correction applied.
427 | - 'D' (m, sd, n, or m1, m2, sd1, sd2, n1, n2): Cohen's d. No bias
428 | correction is applied (for that, use 'SM' or 'SMD' instead).
429 | Note that 'D' can be either one-sample or two-sample. This is
430 | specified in type, or (if type=='infer'), inferred from the
431 | passed arguments.
432 | data : None or :obj:`pandas.DataFrame`, optional
433 | A pandas DataFrame to extract variables
434 | from. Column names must match the names of other args ('m', 'sd',
435 | 'n2', etc.). If both a DataFrame and keyword arguments are
436 | provided, the two will be merged, with variables passed as separate
437 | arguments taking precedence over DataFrame columns in the event of
438 | a clash.
439 | comparison : {"infer", 1, 2}, optional
440 | The type of originating comparison.
441 | This is currently unnecessary, as the type can be deterministically
442 | inferred from the input arguments and measure, but may become
443 | necessary in future, and provides a way of imposing constraints on
444 | code embedded in larger pipelines. Valid values:
445 |
446 | - 'infer' (default): Infer the type of comparison from the input
447 | arguments.
448 | - 1: One-group comparison. Must be accompanied by some/all of the
449 | following named variables: m, sd, n, r.
450 | - 2: Two-group comparison. Independent samples are assumed. Must be
451 | accompanied by some/all of the following named variables: m1,
452 | m2, sd1, sd2, n1, n2.
453 | return_type : {"tuple", "dict", "dataset", "converter"}, optional
454 | Controls what gets returned. Valid values:
455 |
456 | - 'tuple': A 2-tuple, where the first element is a 1-d array
457 | containing the computed estimates (i.e., y), and the second
458 | element is a 1-d array containing the associated sampling
459 | variances.
460 | - 'dict': A dictionary with keys 'y' and 'v' that map to the arrays
461 | described for 'tuple'.
462 | - 'dataset': A pymare Dataset instance, with y and v attributes set
463 | to the corresponding arrays. Note that additional keyword
464 | arguments can be passed onto the Dataset init via kwargs.
465 | - 'converter': The EffectSizeConverter class internally initialized
466 | to handle the desired computation. The target measures will
467 | have already been computed (and hence, cached), and can be
468 | retrieved via get_('{measure}') and get_('v_{measure}')
469 | m : None or :obj:`numpy.ndarray`, optional
470 | Means or other estimates in single-group case
471 | sd : None or :obj:`numpy.ndarray`, optional
472 | Standard deviations in single-group case
473 | n : None or :obj:`numpy.ndarray`, optional
474 | Sample sizes in single-group case
475 | r : None or :obj:`numpy.ndarray`, optional
476 | Correlation coefficients
477 | m1 : None or :obj:`numpy.ndarray`, optional
478 | Means for group 1
479 | m2 : None or :obj:`numpy.ndarray`, optional
480 | Means for group 2
481 | sd1 : None or :obj:`numpy.ndarray`, optional
482 | Standard deviations for group 1
483 | sd2 : None or :obj:`numpy.ndarray`, optional
484 | Standard deviations for group 2
485 | n1 : None or :obj:`numpy.ndarray`, optional
486 | Sample sizes for group 1
487 | n2 : None or :obj:`numpy.ndarray`, optional
488 | Sample sizes for group 2
489 | **dataset_kwargs
490 | Optional keyword arguments passed on to the Dataset initializer.
491 | Ignored unless return_type == 'dataset'.
492 |
493 | Returns
494 | -------
495 | :obj:`tuple` or :obj:`dict` or :obj:`~pymare.core.Dataset`, depending on ``return_type``.
496 | """
497 | var_args = dict(m=m, sd=sd, n=n, r=r, m1=m1, m2=m2, sd1=sd1, sd2=sd2, n1=n1, n2=n2)
498 | var_args = {k: v for k, v in var_args.items() if v is not None}
499 |
500 | if data is not None:
501 | var_args = EffectSizeConverter._collect_variables(data, var_args)
502 |
503 | valid_measures = {"RM", "SM", "R", "ZR", "RMD", "SMD", "D"}
504 | if measure not in valid_measures:
505 | raise ValueError(
506 | "Invalid measures '{}'; must be one of {}.".format(measure, valid_measures)
507 | )
508 |
509 | # Select or infer converter class
510 | if comparison == "infer":
511 | one_samp_inputs = {"m", "sd", "n", "r"}
512 | two_samp_inputs = {"m1", "m2", "sd1", "sd2", "n1", "n2"}
513 |
514 | if measure in {"RM", "SM", "R", "ZR"}:
515 | comparison = 1
516 | elif measure in {"RMD", "SMD"}:
517 | comparison = 2
518 | elif measure in {"D"}:
519 | arg_set = set(var_args.keys())
520 | if (arg_set & one_samp_inputs) and not (arg_set & two_samp_inputs):
521 | comparison = 1
522 | elif (arg_set & two_samp_inputs) and not (arg_set & one_samp_inputs):
523 | comparison = 2
524 | else:
525 | raise ValueError(
526 | "Requested measure (D) and provided data arguments ({}) "
527 | "are insufficient to determine comparison; either provide"
528 | " data consistent with only one-group or two-group "
529 | "measures, or explicitly set comparison to 1 or 2.".format(arg_set)
530 | )
531 |
532 | if comparison == 1:
533 | conv_cls = OneSampleEffectSizeConverter
534 | elif comparison == 2:
535 | conv_cls = TwoSampleEffectSizeConverter
536 | else:
537 | raise ValueError(
538 | "Invalid comparison type '{}'! Valid values are "
539 | "'infer', 1, and 2.".format(comparison)
540 | )
541 |
542 | conv = conv_cls(**var_args)
543 | y = conv.get(measure)
544 | v = conv.get("v_{}".format(measure))
545 |
546 | return_type = return_type.lower()
547 | if return_type == "tuple":
548 | return (y, v)
549 | elif return_type == "dict":
550 | return {"y": y, "v": v}
551 | elif return_type == "dataset":
552 | return conv.to_dataset(measure, **dataset_kwargs)
553 | elif return_type == "converter":
554 | return conv
555 | else:
556 | raise ValueError(
557 | "Invalid return_type value '{}'. Must be one of "
558 | "'tuple', 'dict', 'dataset', or 'converter'.".format(return_type)
559 | )
560 |
--------------------------------------------------------------------------------
/pymare/effectsize/expressions.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "expression": "rm - m",
4 | "type": 1,
5 | "description": "Raw mean"
6 | },
7 | {
8 | "expression": "v_rm - sd**2 / n",
9 | "type": 1,
10 | "description": "Variance of raw mean"
11 | },
12 | {
13 | "expression": "d - m / sd",
14 | "type": 1,
15 | "description": "Cohen's d (one-sample)"
16 | },
17 | {
18 | "expression": "v_d - ((n - 1)/(n - 3)) * (1 / n + d**2) - d**2 / j**2 * n",
19 | "type": 1,
20 | "description": "Variance of Cohen's d"
21 | },
22 | {
23 | "expression": "j - (1 - (3 / (4 * (n - 1) - 1)))",
24 | "type": 1,
25 | "description": "Approximate correction factor for standardized mean"
26 | },
27 | {
28 | "expression": "sm - d * j",
29 | "type": 1,
30 | "description": "Standardized mean (Hedges's g)"
31 | },
32 | {
33 | "expression": "v_sm - ((n - 1)/(n - 3)) * j**2 * (1 / n + d**2) - d**2",
34 | "type": 1,
35 | "description": "Variance of standardized mean"
36 | },
37 | {
38 | "expression": "r - r",
39 | "type": 1,
40 | "description": "Raw correlation coefficient"
41 | },
42 | {
43 | "expression": "v_r - (1 - r**2) / (n - 2)",
44 | "type": 1,
45 | "description": "Variance of raw correlation coefficient"
46 | },
47 | {
48 | "expression": "zr - atanh(r)",
49 | "type": 1,
50 | "description": "Fisher r-to-z transformed correlation coefficient"
51 | },
52 | {
53 | "expression": "v_zr - 1 / (n - 3)",
54 | "type": 1,
55 | "description": "Variance of Fisher r-to-z transformed correlation"
56 | },
57 | {
58 | "expression": "rmd - (m1 - m2)",
59 | "type": 2,
60 | "description": "Raw mean difference"
61 | },
62 | {
63 | "expression": "v_rmd - (sd1**2 / n1) + (sd2**2 / n2)",
64 | "type": 2,
65 | "description": "Variance of raw mean difference"
66 | },
67 | {
68 | "expression": "sdp - sqrt((sd1**2 * (n1 - 1) + sd2**2 * (n2 - 1)) / (n1 + n2 - 2))",
69 | "type": 2,
70 | "description": "Pooled standard deviation (Cohen version)"
71 | },
72 | {
73 | "expression": "d - (m1 - m2) / sdp",
74 | "type": 2,
75 | "description": "Cohen's d (two-sample)"
76 | },
77 | {
78 | "expression": "v_d - ((n1 + n2)/(n1 * n2) + d**2 / 2 * (n1 + n2 - 2))",
79 | "type": 2,
80 | "description": "Variance of Cohen's d"
81 | },
82 | {
83 | "expression": "j - (1 - (3 / (4 * (n1 + n2) - 9)))",
84 | "type": 2,
85 | "description": "Approximate bias correction factor for Hedges' g"
86 | },
87 | {
88 | "expression": "smd - d * j",
89 | "type": 2,
90 | "description": "Standardized mean difference (Hedges's g)"
91 | },
92 | {
93 | "expression": "v_smd - j**2 * v_d",
94 | "type": 2,
95 | "description": "Variance of SMD"
96 | }
97 | ]
98 |
--------------------------------------------------------------------------------
/pymare/effectsize/expressions.py:
--------------------------------------------------------------------------------
1 | """Statistical expressions."""
2 |
3 | import json
4 | from collections import defaultdict
5 | from itertools import chain
6 | from pathlib import Path
7 |
8 | from sympy import Symbol, sympify
9 |
10 |
11 | class Expression:
12 | """Represent a single statistical expression.
13 |
14 | Parameters
15 | ----------
16 | expr : :obj:`str`
17 | String representation of the mathematical expression.
18 | description : :obj:`str`, optional
19 | Optional text description of expression.
20 | type : :obj:`int`, optional
21 | Indicates whether the expression applies in the one-sample case (1), two-sample case (2),
22 | or both (0).
23 | """
24 |
25 | def __init__(self, expression, description=None, type=0):
26 | self._locals = {}
27 | exec("from sympy.stats import *", self._locals)
28 |
29 | self.expr = expression
30 | self.description = description
31 | self.sympy = sympify(expression, locals=self._locals)
32 | self.type = type
33 | self.symbols = self.sympy.free_symbols
34 |
35 |
36 | def _load_expressions():
37 | expressions = []
38 | path = Path(__file__).parent / "expressions.json"
39 | expr_list = json.load(open(path, "r"))
40 | for expr in expr_list:
41 | expr = Expression(**expr)
42 | expressions.append(expr)
43 |
44 | one_samp = [e for e in expressions if e.type == 1]
45 | two_samp = [e for e in expressions if e.type == 2]
46 |
47 | return one_samp, two_samp
48 |
49 |
50 | # Construct the 1-sample and 2-sample expression sets at import time
51 | one_sample_expressions, two_sample_expressions = _load_expressions()
52 |
53 |
54 | def select_expressions(target, known_vars, type=1):
55 | """Select a minimal system of expressions needed to solve for the target.
56 |
57 | Parameters
58 | ----------
59 | target : :obj:`str`
60 | The named statistic to solve for ('t', 'd', 'g', etc.).
61 | known_vars : :obj:`set` or :obj:`str`
62 | A set of strings giving the names of the known variables.
63 | type : :obj:`int`, optional
64 | Restrict the system to expressions that apply in the one-sample case (1),
65 | two-sample case (2), or both (None).
66 |
67 | Returns
68 | -------
69 | :obj:`list` of :obj:`~pymare.effectsize.Expression` or None
70 | A list of Expression instances, or None if there is no solution.
71 | """
72 | exp_dict = defaultdict(list)
73 |
74 | exprs = one_sample_expressions if type == 1 else two_sample_expressions
75 |
76 | # make sure target exists before going any further
77 | all_symbols = set().union(*[e.symbols for e in exprs])
78 | if Symbol(target) not in all_symbols:
79 | raise ValueError(
80 | "Target symbol '{}' cannot be found in any of the "
81 | "known expressions).".format(target)
82 | )
83 |
84 | for exp in exprs:
85 | for sym in exp.symbols:
86 | if sym not in known_vars:
87 | exp_dict[sym.name].append(exp)
88 |
89 | def df_search(sym, exprs, known, visited):
90 | """Recursively select expressions needed to solve for sym."""
91 | results = []
92 |
93 | for exp in exp_dict[sym]:
94 | candidates = []
95 |
96 | sym_names = set(s.name for s in exp.symbols)
97 |
98 | # Abort if we're cycling
99 | if visited & sym_names:
100 | continue
101 |
102 | new_exprs = list(exprs) + [exp]
103 | free_symbols = sym_names - known.union({sym})
104 | _visited = set(visited) | {sym}
105 |
106 | # If there are no more free symbols, we're done
107 | if not free_symbols:
108 | results.append((new_exprs, _visited))
109 | continue
110 |
111 | # Loop over remaining free symbols and recurse
112 | candidates = [df_search(child, new_exprs, known, _visited) for child in free_symbols]
113 | candidates = [c for c in candidates if c is not None]
114 |
115 | # Make sure we've covered all free symbols in the expression
116 | symbols_found = set().union(*[c[1] for c in candidates])
117 | if free_symbols - symbols_found:
118 | continue
119 |
120 | # TODO: compact the resulting set, as it could currently include
121 | # redundant equations.
122 | merged = list(set().union(*chain([c[0] for c in candidates])))
123 | results.append((merged, symbols_found))
124 |
125 | if not results:
126 | return None
127 |
128 | # Order solutions by number of expressions
129 | results.sort(key=lambda x: len(x[0]))
130 | return results[0]
131 |
132 | # base case
133 | candidates = df_search(target, [], known_vars, set())
134 | return None if not candidates else candidates[0]
135 |
--------------------------------------------------------------------------------
/pymare/estimators/__init__.py:
--------------------------------------------------------------------------------
1 | """Estimators for meta-analyses and meta-regressions."""
2 |
3 | from .combination import FisherCombinationTest, StoufferCombinationTest
4 | from .estimators import (
5 | DerSimonianLaird,
6 | Hedges,
7 | SampleSizeBasedLikelihoodEstimator,
8 | StanMetaRegression,
9 | VarianceBasedLikelihoodEstimator,
10 | WeightedLeastSquares,
11 | )
12 |
13 | __all__ = [
14 | "WeightedLeastSquares",
15 | "DerSimonianLaird",
16 | "VarianceBasedLikelihoodEstimator",
17 | "SampleSizeBasedLikelihoodEstimator",
18 | "StanMetaRegression",
19 | "Hedges",
20 | "StoufferCombinationTest",
21 | "FisherCombinationTest",
22 | ]
23 |
--------------------------------------------------------------------------------
/pymare/estimators/combination.py:
--------------------------------------------------------------------------------
1 | """Estimators for combination (p/z) tests."""
2 |
3 | import warnings
4 | from abc import abstractmethod
5 |
6 | import numpy as np
7 | import scipy.stats as ss
8 |
9 | from ..results import CombinationTestResults
10 | from .estimators import BaseEstimator
11 |
12 |
13 | class CombinationTest(BaseEstimator):
14 | """Base class for methods based on combining p/z values."""
15 |
16 | def __init__(self, mode="directed"):
17 | mode = mode.lower()
18 | if mode not in {"directed", "undirected", "concordant"}:
19 | raise ValueError(
20 | "Invalid mode; must be one of 'directed', 'undirected', or 'concordant'."
21 | )
22 | if mode == "undirected":
23 | warnings.warn(
24 | "You have opted to conduct an 'undirected' test. Are you sure "
25 | "this is what you want? If you're looking for the analog of a "
26 | "conventional two-tailed test, use 'concordant'."
27 | )
28 | self.mode = mode
29 |
30 | @abstractmethod
31 | def p_value(self, z, *args, **kwargs):
32 | """Calculate p-values."""
33 | pass
34 |
35 | def _z_to_p(self, z):
36 | return ss.norm.sf(z)
37 |
38 | def fit(self, z, *args, **kwargs):
39 | """Fit the estimator to z-values."""
40 | # This resets the Estimator's dataset_ attribute. fit_dataset will overwrite if called.
41 | self.dataset_ = None
42 |
43 | if self.mode == "concordant":
44 | ose = self.__class__(mode="directed")
45 | p1 = ose.p_value(z, *args, **kwargs)
46 | p2 = ose.p_value(-z, *args, **kwargs)
47 | p = np.minimum(1, 2 * np.minimum(p1, p2))
48 | z_calc = ss.norm.isf(p)
49 | z_calc[p2 < p1] *= -1
50 | else:
51 | if self.mode == "undirected":
52 | z = np.abs(z)
53 | p = self.p_value(z, *args, **kwargs)
54 | z_calc = ss.norm.isf(p)
55 |
56 | self.params_ = {"p": p, "z": z_calc}
57 | return self
58 |
59 | def summary(self):
60 | """Generate a summary of the estimator results."""
61 | if not hasattr(self, "params_"):
62 | name = self.__class__.__name__
63 | raise ValueError(
64 | "This {} instance hasn't been fitted yet. Please "
65 | "call fit() before summary().".format(name)
66 | )
67 | return CombinationTestResults(
68 | self, self.dataset_, z=self.params_["z"], p=self.params_["p"]
69 | )
70 |
71 |
72 | class StoufferCombinationTest(CombinationTest):
73 | """Stouffer's Z-score meta-analysis method.
74 |
75 | Takes a set of independent z-scores and combines them via Stouffer's
76 | :footcite:p:`stouffer1949american` method to produce a fixed-effect estimate of the combined
77 | effect.
78 |
79 | Parameters
80 | ----------
81 | mode : {"directed", "undirected", "concordant"}, optional
82 | The type of test to perform-- i.e., what null hypothesis to reject.
83 | See :footcite:t:`winkler2016non` for details.
84 | Valid options are:
85 |
86 | - 'directed': tests a directional hypothesis--i.e., that the
87 | observed value is consistently greater than 0 in the input
88 | studies. This is the default.
89 | - 'undirected': tests an undirected hypothesis--i.e., that the
90 | observed value differs from 0 in the input studies, but
91 | allowing the direction of the deviation to vary by study.
92 | - 'concordant': equivalent to two directed tests, one for each
93 | sign, with correction for 2 tests.
94 |
95 | Notes
96 | -----
97 | (1) All input z-scores are assumed to correspond to one-sided p-values.
98 | Do NOT pass in z-scores that have been directly converted from
99 | two-tailed p-values, as these do not preserve directional
100 | information.
101 | (2) The 'directed' and 'undirected' modes are NOT the same as
102 | one-tailed and two-tailed tests. In general, users who want to test
103 | directed hypotheses should use the 'directed' mode, and users who
104 | want to test for consistent effects in either the positive or
105 | negative direction should use the 'concordant' mode. The
106 | 'undirected' mode tests a fairly uncommon null that doesn't
107 | constrain the sign of effects to be consistent across studies
108 | (one can think of it as a test of extremity). In the vast majority
109 | of meta-analysis applications, this mode is not appropriate, and
110 | users should instead opt for 'directed' or 'concordant'.
111 | (3) This estimator does not support meta-regression; any moderators
112 | passed in to fit() as the X array will be ignored.
113 |
114 | References
115 | ----------
116 | .. footbibliography::
117 | """
118 |
119 | # Maps Dataset attributes onto fit() args; see BaseEstimator for details.
120 | _dataset_attr_map = {"z": "y", "w": "n", "g": "v"}
121 |
122 | def _inflation_term(self, z, w, g, corr=None):
123 | """Calculate the variance inflation term for each group.
124 |
125 | This term is used to adjust the variance of the combined z-score when
126 | multiple sample come from the same study.
127 |
128 | Parameters
129 | ----------
130 | z : :obj:`numpy.ndarray` of shape (n, d)
131 | Array of z-values.
132 | w : :obj:`numpy.ndarray` of shape (n, d)
133 | Array of weights.
134 | g : :obj:`numpy.ndarray` of shape (n, d)
135 | Array of group labels.
136 | corr : :obj:`numpy.ndarray` of shape (n, n), optional
137 | The correlation matrix of the z-values. If None, it will be calculated.
138 |
139 | Returns
140 | -------
141 | sigma : float
142 | The variance inflation term.
143 | """
144 | # Only center if the samples are not all the same, to prevent division by zero
145 | # when calculating the correlation matrix.
146 | # This centering is problematic for N=2
147 | all_samples_same = np.all(np.equal(z, z[0]), axis=0).all()
148 | z = z if all_samples_same else z - z.mean(0)
149 |
150 | # Use the value from one feature, as all features have the same groups and weights
151 | groups = g[:, 0]
152 | weights = w[:, 0]
153 |
154 | # Loop over groups
155 | unique_groups = np.unique(groups)
156 |
157 | sigma = 0
158 | for group in unique_groups:
159 | group_indices = np.where(groups == group)[0]
160 | group_z = z[group_indices]
161 |
162 | # For groups with only one sample the contribution to the summand is 0
163 | n_samples = len(group_indices)
164 | if n_samples < 2:
165 | continue
166 |
167 | # Calculate the within group correlation matrix and sum the non-diagonal elements
168 | if corr is None:
169 | if z.shape[1] < 2:
170 | raise ValueError("The number of features must be greater than 1.")
171 | group_corr = np.corrcoef(group_z, rowvar=True)
172 | else:
173 | group_corr = corr[group_indices][:, group_indices]
174 |
175 | upper_indices = np.triu_indices(n_samples, k=1)
176 | non_diag_corr = group_corr[upper_indices]
177 | w_i, w_j = weights[upper_indices[0]], weights[upper_indices[1]]
178 |
179 | sigma += (2 * w_i * w_j * non_diag_corr).sum()
180 |
181 | return sigma
182 |
183 | def fit(self, z, w=None, g=None, corr=None):
184 | """Fit the estimator to z-values, optionally with weights and groups."""
185 | return super().fit(z, w=w, g=g, corr=corr)
186 |
187 | def p_value(self, z, w=None, g=None, corr=None):
188 | """Calculate p-values."""
189 | if w is None:
190 | w = np.ones_like(z)
191 |
192 | if g is None and corr is not None:
193 | warnings.warn("Correlation matrix provided without groups. Ignoring.")
194 |
195 | if g is not None and corr is not None and g.shape[0] != corr.shape[0]:
196 | raise ValueError("Group labels must have the same length as the correlation matrix.")
197 |
198 | # Calculate the variance inflation term, sum of non-diagonal elements of sigma.
199 | sigma = self._inflation_term(z, w, g, corr=corr) if g is not None else 0
200 |
201 | # The sum of diagonal elements of sigma is given by (w**2).sum(0).
202 | variance = (w**2).sum(0) + sigma
203 |
204 | cz = (z * w).sum(0) / np.sqrt(variance)
205 | return ss.norm.sf(cz)
206 |
207 |
208 | class FisherCombinationTest(CombinationTest):
209 | """Fisher's method for combining p-values.
210 |
211 | Takes a set of independent z-scores and combines them via Fisher's
212 | :footcite:p:`fisher1946statistical` method to produce a fixed-effect estimate of the combined
213 | effect.
214 |
215 | Parameters
216 | ----------
217 | mode : {"directed", "undirected", "concordant"}, optional
218 | The type of test to perform-- i.e., what null hypothesis to reject.
219 | See :footcite:t:`winkler2016non` for details.
220 | Valid options are:
221 |
222 | - 'directed': tests a directional hypothesis--i.e., that the
223 | observed value is consistently greater than 0 in the input
224 | studies. This is the default.
225 | - 'undirected': tests an undirected hypothesis--i.e., that the
226 | observed value differs from 0 in the input studies, but
227 | allowing the direction of the deviation to vary by study.
228 | - 'concordant': equivalent to two directed tests, one for each
229 | sign, with correction for 2 tests.
230 |
231 | Notes
232 | -----
233 | (1) All input z-scores are assumed to correspond to one-sided p-values.
234 | Do NOT pass in z-scores that have been directly converted from
235 | two-tailed p-values, as these do not preserve directional
236 | information.
237 | (2) The 'directed' and 'undirected' modes are NOT the same as
238 | one-tailed and two-tailed tests. In general, users who want to test
239 | directed hypotheses should use the 'directed' mode, and users who
240 | want to test for consistent effects in either the positive or
241 | negative direction should use the 'concordant' mode. The
242 | 'undirected' mode tests a fairly uncommon null that doesn't
243 | constrain the sign of effects to be consistent across studies
244 | (one can think of it as a test of extremity). In the vast majority
245 | of meta-analysis applications, this mode is not appropriate, and
246 | users should instead opt for 'directed' or 'concordant'.
247 | (3) This estimator does not support meta-regression; any moderators
248 | passed in to fit() as the X array will be ignored.
249 |
250 | References
251 | ----------
252 | .. footbibliography::
253 | """
254 |
255 | # Maps Dataset attributes onto fit() args; see BaseEstimator for details.
256 | _dataset_attr_map = {"z": "y"}
257 |
258 | def p_value(self, z):
259 | """Calculate p-values."""
260 | p = self._z_to_p(z)
261 | chi2 = -2 * np.log(p).sum(0)
262 | return ss.chi2.sf(chi2, 2 * z.shape[0])
263 |
--------------------------------------------------------------------------------
/pymare/resources/datasets/michael2013.json:
--------------------------------------------------------------------------------
1 | {
2 | "Study": {
3 | "LongName": "Study Name",
4 | "Description": "Name of the study: Citation - Experiment - Subgroup."
5 | },
6 | "No_brain_n": {
7 | "LongName": "No-Brain-Image Sample Size",
8 | "Description": "Sample size for no-brain-image condition."
9 | },
10 | "No_brain_m": {
11 | "LongName": "No-Brain-Image Mean Agreement Rating",
12 | "Description": "Mean agreement rating for no-brain-image condition."
13 | },
14 | "No_brain_s": {
15 | "LongName": "No-Brain-Image Standard Deviation",
16 | "Description": "Standard deviation for no-brain-image condition."
17 | },
18 | "Brain_n": {
19 | "LongName": "Brain-Image Sample Size",
20 | "Description": "Sample size for brain-image condition."
21 | },
22 | "Brain_m": {
23 | "LongName": "Brain-Image Mean Agreement Rating",
24 | "Description": "Mean agreement rating for brain-image condition."
25 | },
26 | "Brain_s": {
27 | "LongName": "Brain-Image Standard Deviation",
28 | "Description": "Standard deviation for brain-image condition."
29 | },
30 | "Included_Critique": {
31 | "LongName": "Included Critique",
32 | "Description": "Whether a critical commentary was included in the study or not.",
33 | "Levelts": {
34 | "Critique": "article included critical commentary on conclusions",
35 | "No_Critique": "article did not include critical commentary on conclusions"
36 | }
37 | },
38 | "Medium": {
39 | "LongName": "Experiment Medium",
40 | "Description": "The medium in which the experiment was conducted.",
41 | "Levels": {
42 | "Paper": "conducted in person",
43 | "Online": "conducted online"
44 | }
45 | },
46 | "Compensation": {
47 | "LongName": "Compensation",
48 | "Description": "Notes on compensation provided to participants."
49 | },
50 | "Participant_Pool": {
51 | "LongName": "Participant Pool",
52 | "Description": "Notes on where participants were recruited."
53 | },
54 | "yi": {
55 | "LongName": "Mean Difference",
56 | "Description": "Raw mean difference, calculated as Brain_m - No_brain_m."
57 | },
58 | "vi": {
59 | "LongName": "Sampling Variance",
60 | "Description": "Corresponding sampling variance."
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/pymare/resources/datasets/michael2013.tsv:
--------------------------------------------------------------------------------
1 | Study No_brain_n No_brain_m No_brain_s Brain_n Brain_m Brain_s Included_Critique Medium Compensation Participant_Pool yi vi
2 | McCabe and Castel, 2008 - Experiment 3 - No Critique 28 2.89 0.79 26 3.12 0.65 No_critique Paper Course credit Colorado State University undergraduates 0.23 0.0385392857142857
3 | Michael et al., 2013 - Experiment 1 99 2.9 0.58 98 2.86 0.61 No_critique Online US$0.30 Mechanical Turk -0.04 0.00719491857349
4 | Michael et al., 2013 - Experiment 2 42 2.62 0.54 33 2.85 0.57 No_critique Online Course credit Victoria undergraduate subject pool 0.23 0.0167883116883117
5 | Michael et al., 2013 - Experiment 3 24 2.96 0.36 21 3.07 0.55 No_critique Paper Movie voucher Wellington high school students 0.11 0.0198047619047619
6 | Michael et al., 2013 - Experiment 4 184 2.93 0.6 184 2.89 0.6 No_critique Online US$0.50 Mechanical Turk -0.04 0.0039130434782608
7 | Michael et al., 2013 - Experiment 5 274 2.86 0.59 255 2.91 0.52 No_critique Paper Course credit Victoria Intro Psyc subject pool 0.0500000000000003 0.0023308301130671
8 | McCabe and Castel, 2008 - Experiment 3 - Critique 26 2.69 0.55 28 3.0 0.54 Critique Paper Course credit Colorado State University undergraduates 0.31 0.0220489010989011
9 | Michael et al., 2013 - Experiment 6 58 2.5 0.84 55 2.6 0.83 Critique Online US$0.50 Mechanical Turk 0.1 0.0246909717868339
10 | Michael et al., 2013 - Experiment 7 34 2.41 0.78 34 2.74 0.51 Critique Paper None General Public 0.33 0.0255441176470588
11 | Michael et al., 2013 - Experiment 8 98 2.73 0.67 93 2.68 0.69 Critique Online US$0.50 Mechanical Turk -0.0499999999999998 0.0096999670836076
12 | Michael et al., 2013 - Experiment 9 99 2.54 0.66 95 2.72 0.68 Critique Online US$0.50 Mechanical Turk 0.18 0.0092673684210526
13 | Michael et al., 2013 - Experiment 10 94 2.66 0.65 97 2.64 0.71 Critique Online US$0.50 Mechanical Turk -0.02 0.0096915880675586
14 |
--------------------------------------------------------------------------------
/pymare/stats.py:
--------------------------------------------------------------------------------
1 | """Miscellaneous statistical functions."""
2 |
3 | import numpy as np
4 | import scipy.stats as ss
5 | from scipy.optimize import Bounds, minimize
6 |
7 |
8 | def weighted_least_squares(y, v, X, tau2=0.0, return_cov=False):
9 | """Perform 2-D weighted least squares.
10 |
11 | Parameters
12 | ----------
13 | y : :obj:`numpy.ndarray`
14 | 2-d array of estimates (studies x parallel datasets)
15 | v : :obj:`numpy.ndarray`
16 | 2-d array of sampling variances
17 | X : :obj:`numpy.ndarray`
18 | Fixed effect design matrix
19 | tau2 : :obj:`float`, optional
20 | tau^2 estimate to use for weights.
21 | Default = 0.
22 | return_cov : :obj:`bool`, optional
23 | Whether or not to return the inverse cov matrix.
24 | Default = False.
25 |
26 | Returns
27 | -------
28 | params[, cov]
29 | If return_cov is True, returns both fixed parameter estimates and the
30 | inverse covariance matrix; if False, only the parameter estimates.
31 | """
32 | w = 1.0 / (v + tau2)
33 |
34 | # Einsum indices: k = studies, p = predictors, i = parallel iterates
35 | wX = np.einsum("kp,ki->ipk", X, w)
36 | cov = wX.dot(X)
37 |
38 | # numpy >= 1.8 inverts stacked matrices along the first N - 2 dims, so we
39 | # can vectorize computation along the second dimension (parallel datasets)
40 | precision = np.linalg.pinv(cov).T
41 |
42 | pwX = np.einsum("ipk,qpi->iqk", wX, precision)
43 | beta = np.einsum("ipk,ik->ip", pwX, y.T).T
44 |
45 | return (beta, precision) if return_cov else beta
46 |
47 |
48 | def ensure_2d(arr):
49 | """Ensure the passed array has 2 dimensions."""
50 | if arr is None:
51 | return arr
52 |
53 | try:
54 | arr = np.array(arr)
55 | except:
56 | return arr
57 |
58 | if arr.ndim == 1:
59 | arr = arr[:, None]
60 |
61 | return arr
62 |
63 |
64 | def q_profile(y, v, X, alpha=0.05):
65 | """Get the CI for tau^2 via the Q-Profile method.
66 |
67 | Parameters
68 | ----------
69 | y : :obj:`numpy.ndarray` of shape (K,)
70 | 1d array of study-level estimates
71 | v : :obj:`numpy.ndarray` of shape (K,)
72 | 1d array of study-level variances
73 | X : :obj:`numpy.ndarray` of shape (K[, P])
74 | 1d or 2d array containing study-level predictors
75 | (including intercept); has dimensions K x P, where K is the number
76 | of studies and P is the number of predictor variables.
77 | alpha : :obj:`float`, optional
78 | alpha value defining the coverage of the CIs,
79 | where width(CI) = 1 - alpha. Default = 0.05.
80 |
81 | Returns
82 | -------
83 | :obj:`dict`
84 | A dictionary with keys 'ci_l' and 'ci_u', corresponding to the lower
85 | and upper bounds of the tau^2 confidence interval, respectively.
86 |
87 | Notes
88 | -----
89 | Following the :footcite:t:`viechtbauer2007confidence` implementation,
90 | this method returns the interval that gives an equal probability mass at both tails
91 | (i.e., ``P(tau^2 <= lower_bound) == P(tau^2 >= upper_bound) == alpha/2``),
92 | and *not* the smallest possible range of tau^2 values that provides the desired coverage.
93 |
94 | References
95 | ----------
96 | .. footbibliography::
97 | """
98 | k, p = X.shape
99 | df = k - p
100 | l_crit = ss.chi2.ppf(1 - alpha / 2, df)
101 | u_crit = ss.chi2.ppf(alpha / 2, df)
102 | args = (ensure_2d(y), ensure_2d(v), X)
103 | bds = Bounds([0], [np.inf], keep_feasible=True)
104 |
105 | # Use the D-L estimate of tau^2 as a starting point; when using a fixed
106 | # value, minimize() sometimes fails to stay in bounds.
107 | from .estimators import DerSimonianLaird
108 |
109 | ub_start = 2 * DerSimonianLaird().fit(y, v, X).params_["tau2"]
110 |
111 | lb = minimize(lambda x: (q_gen(*args, x) - l_crit) ** 2, [0], bounds=bds).x[0]
112 | ub = minimize(lambda x: (q_gen(*args, x) - u_crit) ** 2, ub_start, bounds=bds).x[0]
113 | return {"ci_l": lb, "ci_u": ub}
114 |
115 |
116 | def q_gen(y, v, X, tau2):
117 | """Calculate a generalized form of Cochran's Q-statistic.
118 |
119 | This version of the Q statistic is described in :footcite:t:`veroniki2016methods`.
120 |
121 | Parameters
122 | ----------
123 | y : :obj:`numpy.ndarray`
124 | 1d array of study-level estimates
125 | v : :obj:`numpy.ndarray`
126 | 1d array of study-level variances
127 | X : :obj:`numpy.ndarray`
128 | 1d or 2d array containing study-level predictors
129 | (including intercept); has dimensions K x P, where K is the number
130 | of studies and P is the number of predictor variables.
131 | tau2 : :obj:`float`
132 | Between-study variance. Must be >= 0.
133 |
134 | Returns
135 | -------
136 | :obj:`float`
137 | A float giving the value of Cochran's Q-statistic.
138 |
139 | References
140 | ----------
141 | .. footbibliography::
142 | """
143 | if np.any(tau2 < 0):
144 | raise ValueError("Value of tau^2 must be >= 0.")
145 |
146 | beta = weighted_least_squares(y, v, X, tau2)
147 | w = 1.0 / (v + tau2)
148 | return (w * (y - X.dot(beta)) ** 2).sum(0)
149 |
150 |
151 | def bonferroni(p_values):
152 | """Perform Bonferroni correction on p values.
153 |
154 | This correction is based on the one described in :footcite:t:`bonferroni1936teoria` and
155 | :footcite:t:`shaffer1995multiple`.
156 |
157 | .. versionadded:: 0.0.4
158 |
159 | Parameters
160 | ----------
161 | p_values : :obj:`numpy.ndarray`
162 | Uncorrected p values.
163 |
164 | Returns
165 | -------
166 | p_corr : :obj:`numpy.ndarray`
167 | Corrected p values.
168 |
169 | References
170 | ----------
171 | .. footbibliography::
172 | """
173 | p_corr = p_values * p_values.size
174 | p_corr[p_corr > 1] = 1
175 | return p_corr
176 |
177 |
178 | def fdr(p_values, q=0.05, method="bh"):
179 | """Perform FDR correction on p values.
180 |
181 | .. versionadded:: 0.0.4
182 |
183 | Parameters
184 | ----------
185 | p_values : :obj:`numpy.ndarray`
186 | Array of p values.
187 | q : :obj:`float`, optional
188 | Alpha value. Default is 0.05.
189 | method : {"bh", "by"}, optional
190 | Method to use for correction.
191 | Either "bh" (Benjamini-Hochberg :footcite:p:`benjamini1995controlling`) or
192 | "by" (Benjamini-Yekutieli :footcite:p:`benjamini2001control`).
193 | Default is "bh".
194 |
195 | Returns
196 | -------
197 | p_adjusted : :obj:`numpy.ndarray`
198 | Array of adjusted p values.
199 |
200 | Notes
201 | -----
202 | This function is adapted from ``statsmodels``, which is licensed under a BSD-3 license.
203 |
204 | References
205 | ----------
206 | .. footbibliography::
207 |
208 | See Also
209 | --------
210 | statsmodels.stats.multitest.fdrcorrection
211 | """
212 | sort_idx = np.argsort(p_values)
213 | revert_idx = np.argsort(sort_idx)
214 | p_sorted = p_values[sort_idx]
215 |
216 | n_tests = p_values.size
217 |
218 | # empirical cumulative density function
219 | ecdf = np.linspace(0, 1, n_tests + 1)[1:]
220 | if method == "by":
221 | # NOTE: I don't know what cm stands for
222 | cm = np.sum(1 / np.arange(1, n_tests + 1))
223 | ecdffactor = ecdf / cm
224 | else:
225 | ecdffactor = ecdf
226 |
227 | p_adjusted = p_sorted / ecdffactor
228 | p_adjusted = np.minimum.accumulate(p_adjusted[::-1])[::-1]
229 | # NOTE: Why not this?
230 | # p_adjusted = np.maximum.accumulate(p_adjusted)
231 |
232 | p_adjusted[p_adjusted > 1] = 1
233 | p_adjusted = p_adjusted[revert_idx]
234 |
235 | return p_adjusted
236 |
237 |
238 | def var_to_ci(y, v, n):
239 | """Convert sampling variance to 95% CI."""
240 | term = 1.96 * np.sqrt(v) / np.sqrt(n)
241 | return y - term, y + term
242 |
--------------------------------------------------------------------------------
/pymare/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """Data for tests."""
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from pymare import Dataset
7 |
8 |
9 | @pytest.fixture(scope="package")
10 | def variables():
11 | """Build basic numpy variables."""
12 | y = np.array([[-1, 0.5, 0.5, 0.5, 1, 1, 2, 10]]).T
13 | v = np.array([[1, 1, 2.4, 0.5, 1, 1, 1.2, 1.5]]).T
14 | X = np.array([1, 1, 2, 2, 4, 4, 2.8, 2.8])
15 | return (y, v, X)
16 |
17 |
18 | @pytest.fixture(scope="package")
19 | def small_variance_variables(variables):
20 | """Make highly correlated variables."""
21 | y, v, X = variables
22 | y = X.copy()
23 | v /= 10
24 | return (y, v, X)
25 |
26 |
27 | @pytest.fixture(scope="package")
28 | def dataset(variables):
29 | """Build a Dataset compiled from the variables fixture."""
30 | return Dataset(*variables, X_names=["my_covariate"])
31 |
32 |
33 | @pytest.fixture(scope="package")
34 | def small_variance_dataset(small_variance_variables):
35 | """Build a Dataset compiled from the small variance variables fixture."""
36 | return Dataset(*small_variance_variables, X_names=["my_covariate"])
37 |
38 |
39 | @pytest.fixture(scope="package")
40 | def small_dataset_2d(variables):
41 | """Build a small Dataset with 2D data."""
42 | y = np.array([[1.5, 1.9, 2.2], [4, 2, 1]]).T
43 | v = np.array([[1, 0.8, 3], [1, 1.5, 1]]).T
44 | return Dataset(y, v)
45 |
46 |
47 | @pytest.fixture(scope="package")
48 | def dataset_2d(variables):
49 | """Build a larger Dataset with 2D data."""
50 | y, v, X = variables
51 | y = np.repeat(y, 3, axis=1)
52 | y[:, 1] = np.random.randint(-10, 10, size=len(y))
53 | v = np.repeat(v, 3, axis=1)
54 | v[:, 1] = np.random.randint(2, 10, size=len(v))
55 | return Dataset(y, v, X)
56 |
57 |
58 | @pytest.fixture(scope="package")
59 | def dataset_n():
60 | """Build a Dataset with sample sizes, but no variances."""
61 | y = np.array([[-3.0, -0.5, 0.0, -5.01, 0.35, -2.0, -6.0, -4.0, -4.3, -0.1, -1.0]]).T
62 | n = (
63 | np.array(
64 | [[16, 16, 20.548, 32.597, 14.0, 11.118, 4.444, 12.414, 26.963, 130.556, 126.76]]
65 | ).T
66 | / 2
67 | )
68 | return Dataset(y, n=n)
69 |
70 |
71 | @pytest.fixture(scope="package")
72 | def vars_with_intercept():
73 | """Build basic numpy variables with intercepts included in the design matrix."""
74 | y = np.array([[-1, 0.5, 0.5, 0.5, 1, 1, 2, 10]]).T
75 | v = np.array([[1, 1, 2.4, 0.5, 1, 1, 1.2, 1.5]]).T
76 | X = np.array([np.ones(8), [1, 1, 2, 2, 4, 4, 2.8, 2.8]]).T
77 | return (y, v, X)
78 |
--------------------------------------------------------------------------------
/pymare/tests/test_combination_tests.py:
--------------------------------------------------------------------------------
1 | """Tests for pymare.estimators.combination."""
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from pymare import Dataset
7 | from pymare.estimators import FisherCombinationTest, StoufferCombinationTest
8 |
9 | _z1 = np.array([2.1, 0.7, -0.2, 4.1, 3.8])[:, None]
10 | _z2 = np.c_[_z1, np.array([-0.6, -1.61, -2.3, -0.8, -4.01])[:, None]]
11 |
12 | _params = [
13 | (StoufferCombinationTest, _z1, "directed", [4.69574]),
14 | (StoufferCombinationTest, _z1, "undirected", [4.87462819]),
15 | (StoufferCombinationTest, _z1, "concordant", [4.55204117]),
16 | (StoufferCombinationTest, _z2, "directed", [4.69574275, -4.16803071]),
17 | (StoufferCombinationTest, _z2, "undirected", [4.87462819, 4.16803071]),
18 | (StoufferCombinationTest, _z2, "concordant", [4.55204117, -4.00717817]),
19 | (FisherCombinationTest, _z1, "directed", [5.22413541]),
20 | (FisherCombinationTest, _z1, "undirected", [5.27449962]),
21 | (FisherCombinationTest, _z1, "concordant", [5.09434911]),
22 | (FisherCombinationTest, _z2, "directed", [5.22413541, -3.30626405]),
23 | (FisherCombinationTest, _z2, "undirected", [5.27449962, 4.27572965]),
24 | (FisherCombinationTest, _z2, "concordant", [5.09434911, -4.11869468]),
25 | ]
26 |
27 |
28 | @pytest.mark.parametrize("Cls,data,mode,expected", _params)
29 | def test_combination_test(Cls, data, mode, expected):
30 | """Test CombinationTest Estimators with numpy data."""
31 | results = Cls(mode).fit(data).params_
32 | assert np.allclose(results["z"], expected, atol=1e-5)
33 |
34 |
35 | @pytest.mark.parametrize("Cls,data,mode,expected", _params)
36 | def test_combination_test_from_dataset(Cls, data, mode, expected):
37 | """Test CombinationTest Estimators with PyMARE Datasets."""
38 | dset = Dataset(y=data)
39 | est = Cls(mode).fit_dataset(dset)
40 | results = est.summary()
41 | assert np.allclose(results.z, expected, atol=1e-5)
42 |
43 |
44 | def test_stouffer_adjusted():
45 | """Test StoufferCombinationTest with weights and groups."""
46 | # Test with weights and groups
47 | data = np.array(
48 | [
49 | [2.1, 0.7, -0.2, 4.1, 3.8],
50 | [1.1, 0.2, 0.4, 1.3, 1.5],
51 | [-0.6, -1.6, -2.3, -0.8, -4.0],
52 | [2.5, 1.7, 2.1, 2.3, 2.5],
53 | [3.1, 2.7, 3.1, 3.3, 3.5],
54 | [3.6, 3.2, 3.6, 3.8, 4.0],
55 | ]
56 | )
57 | weights = np.tile(np.array([4, 3, 4, 10, 15, 10]), (data.shape[1], 1)).T
58 | groups = np.tile(np.array([0, 0, 1, 2, 2, 2]), (data.shape[1], 1)).T
59 |
60 | results = StoufferCombinationTest("directed").fit(z=data, w=weights, g=groups).params_
61 |
62 | z_expected = np.array([5.00088912, 3.70356943, 4.05465924, 5.4633001, 5.18927878])
63 | assert np.allclose(results["z"], z_expected, atol=1e-5)
64 |
65 | # Test with weights and no groups. Limiting cases.
66 | # Limiting case 1: all correlations are one.
67 | n_maps_l1 = 5
68 | common_sample = np.array([2.1, 0.7, -0.2])
69 | data_l1 = np.tile(common_sample, (n_maps_l1, 1))
70 | groups_l1 = np.tile(np.array([0, 0, 0, 0, 0]), (data_l1.shape[1], 1)).T
71 |
72 | results_l1 = StoufferCombinationTest("directed").fit(z=data_l1, g=groups_l1).params_
73 |
74 | sigma_l1 = n_maps_l1 * (n_maps_l1 - 1) # Expected inflation term
75 | z_expected_l1 = n_maps_l1 * common_sample / np.sqrt(n_maps_l1 + sigma_l1)
76 | assert np.allclose(results_l1["z"], z_expected_l1, atol=1e-5)
77 |
78 | # Test with correlation matrix and groups.
79 | data_corr = data - data.mean(0)
80 | corr = np.corrcoef(data_corr, rowvar=True)
81 | results_corr = (
82 | StoufferCombinationTest("directed").fit(z=data, w=weights, g=groups, corr=corr).params_
83 | )
84 |
85 | z_corr_expected = np.array([5.00088912, 3.70356943, 4.05465924, 5.4633001, 5.18927878])
86 | assert np.allclose(results_corr["z"], z_corr_expected, atol=1e-5)
87 |
88 | # Test with no correlation matrix and groups, but only one feature.
89 | with pytest.raises(ValueError):
90 | StoufferCombinationTest("directed").fit(z=data[:, :1], w=weights[:, :1], g=groups)
91 |
92 | # Test with correlation matrix and groups of different shapes.
93 | with pytest.raises(ValueError):
94 | StoufferCombinationTest("directed").fit(z=data, w=weights, g=groups, corr=corr[:-2, :-2])
95 |
96 | # Test with correlation matrix and no groups.
97 | results1 = StoufferCombinationTest("directed").fit(z=_z1, corr=corr).params_
98 |
99 | assert np.allclose(results1["z"], [4.69574], atol=1e-5)
100 |
--------------------------------------------------------------------------------
/pymare/tests/test_core.py:
--------------------------------------------------------------------------------
1 | """Tests for pymare.core."""
2 |
3 | import numpy as np
4 | import pandas as pd
5 | import pytest
6 |
7 | from pymare import Dataset, meta_regression
8 |
9 |
10 | def test_dataset_init(variables):
11 | """Test Dataset creation from numpy arrays."""
12 | dataset = Dataset(*variables, X_names=["bork"])
13 |
14 | n = len(variables[0])
15 | assert dataset.X.shape == (n, 2)
16 | assert dataset.X_names == ["intercept", "bork"]
17 |
18 | dataset = Dataset(*variables, X_names=["bork"], add_intercept=False)
19 | assert dataset.X.shape == (n, 1)
20 | assert dataset.X_names == ["bork"]
21 |
22 | df = dataset.to_df()
23 | assert isinstance(df, pd.DataFrame)
24 |
25 |
26 | def test_dataset_init_2D():
27 | """Test Dataset creation from 2D numpy arrays."""
28 | n_studies, n_tests = 100, 10
29 | y = np.random.random((n_studies, n_tests))
30 | v = np.random.random((n_studies, n_tests))
31 | n = np.random.random((n_studies, n_tests))
32 | X = np.random.random((n_studies, 2))
33 | X_names = ["X1", "X2"]
34 | dataset = Dataset(y=y, v=v, n=n, X=X, X_names=X_names)
35 |
36 | assert dataset.y.shape == (n_studies, n_tests)
37 | assert dataset.X.shape == (n_studies, 3)
38 | assert dataset.X_names == ["intercept", "X1", "X2"]
39 |
40 | df = dataset.to_df()
41 | assert isinstance(df, pd.DataFrame)
42 |
43 |
44 | def test_dataset_init_from_df(variables):
45 | """Test Dataset creation from a DataFrame."""
46 | df = pd.DataFrame(
47 | {
48 | "y": [2, 4, 6],
49 | "v_alt": [100, 100, 100],
50 | "sample_size": [10, 20, 30],
51 | "X1": [5, 2, 1],
52 | "X7": [9, 8, 7],
53 | }
54 | )
55 | dataset = Dataset(v="v_alt", X=["X1", "X7"], n="sample_size", data=df)
56 | assert dataset.X.shape == (3, 3)
57 | assert dataset.X_names == ["intercept", "X1", "X7"]
58 | assert np.array_equal(dataset.y, np.array([[2, 4, 6]]).T)
59 | assert np.array_equal(dataset.v, np.array([[100, 100, 100]]).T)
60 | assert np.array_equal(dataset.n, np.array([[10, 20, 30]]).T)
61 |
62 | df2 = dataset.to_df()
63 | assert isinstance(df2, pd.DataFrame)
64 |
65 | # y is undefined
66 | df = pd.DataFrame({"v": [100, 100, 100], "X": [5, 2, 1], "n": [10, 20, 30]})
67 | with pytest.raises(KeyError):
68 | dataset = Dataset(data=df)
69 |
70 | # X is undefined
71 | df = pd.DataFrame({"y": [2, 4, 6], "v_alt": [100, 100, 100], "n": [10, 20, 30]})
72 | dataset = Dataset(v="v_alt", data=df)
73 | assert dataset.X.shape == (3, 1)
74 | assert dataset.X_names == ["intercept"]
75 | assert np.array_equal(dataset.y, np.array([[2, 4, 6]]).T)
76 | assert np.array_equal(dataset.v, np.array([[100, 100, 100]]).T)
77 |
78 | # X is undefined, but add_intercept is False
79 | df = pd.DataFrame({"y": [2, 4, 6], "v_alt": [100, 100, 100], "n": [10, 20, 30]})
80 | with pytest.raises(ValueError):
81 | dataset = Dataset(v="v_alt", data=df, add_intercept=False)
82 |
83 | # v is undefined
84 | df = pd.DataFrame({"y": [2, 4, 6], "X": [5, 2, 1], "n": [10, 20, 30]})
85 | dataset = Dataset(data=df)
86 | assert dataset.X.shape == (3, 2)
87 | assert dataset.X_names == ["intercept", "X"]
88 | assert dataset.v is None
89 | assert np.array_equal(dataset.y, np.array([[2, 4, 6]]).T)
90 |
91 | # v is undefined
92 | df = pd.DataFrame({"y": [2, 4, 6], "X": [5, 2, 1], "v": [10, 20, 30]})
93 | dataset = Dataset(data=df)
94 | assert dataset.X.shape == (3, 2)
95 | assert dataset.X_names == ["intercept", "X"]
96 | assert dataset.n is None
97 | assert np.array_equal(dataset.y, np.array([[2, 4, 6]]).T)
98 |
99 |
100 | def test_meta_regression_1(variables):
101 | """Test meta_regression function."""
102 | results = meta_regression(*variables, X_names=["my_cov"], method="REML")
103 | beta, tau2 = results.fe_params, results.tau2
104 | assert np.allclose(beta.ravel(), [-0.1066, 0.7700], atol=1e-4)
105 | assert np.allclose(tau2, 10.9499, atol=1e-4)
106 | df = results.to_df()
107 | assert set(df["name"]) == {"my_cov", "intercept"}
108 |
109 |
110 | def test_meta_regression_2(dataset_n):
111 | """Test meta_regression function."""
112 | y, n = dataset_n.y, dataset_n.n
113 | df = meta_regression(y=y, n=n).to_df()
114 | assert df.shape == (1, 7)
115 |
--------------------------------------------------------------------------------
/pymare/tests/test_datasets.py:
--------------------------------------------------------------------------------
1 | """Tests for the pymare.datasets module."""
2 |
3 | import pandas as pd
4 |
5 | from pymare import datasets
6 |
7 |
8 | def test_michael2013():
9 | """Ensure that the Michael 2013 dataset is loadable."""
10 | data, meta = datasets.michael2013()
11 | assert isinstance(data, pd.DataFrame)
12 | assert data.shape == (12, 13)
13 | assert isinstance(meta, dict)
14 |
--------------------------------------------------------------------------------
/pymare/tests/test_effectsize_base.py:
--------------------------------------------------------------------------------
1 | """Tests for pymare.effectsize.base."""
2 |
3 | import numpy as np
4 | import pandas as pd
5 | import pytest
6 |
7 | from pymare import Dataset
8 | from pymare.effectsize import (
9 | OneSampleEffectSizeConverter,
10 | TwoSampleEffectSizeConverter,
11 | compute_measure,
12 | )
13 |
14 |
15 | @pytest.fixture(scope="module")
16 | def one_samp_data():
17 | """Create one-sample data for tests."""
18 | return {
19 | "m": np.array([7, 5, 4]),
20 | "sd": np.sqrt(np.array([4.2, 1.2, 1.9])),
21 | "n": np.array([24, 31, 40]),
22 | "r": np.array([0.2, 0.18, 0.3]),
23 | }
24 |
25 |
26 | @pytest.fixture(scope="module")
27 | def two_samp_data():
28 | """Create two-sample data for tests."""
29 | return {
30 | "m1": np.array([4, 2]),
31 | "sd1": np.sqrt(np.array([1, 9])),
32 | "n1": np.array([12, 15]),
33 | "m2": np.array([5, 2.5]),
34 | "sd2": np.sqrt(np.array([4, 16])),
35 | "n2": np.array([12, 16]),
36 | }
37 |
38 |
39 | def test_EffectSizeConverter_smoke_test(two_samp_data):
40 | """Perform a smoke test on the effect size converters."""
41 | data = two_samp_data
42 | esc = OneSampleEffectSizeConverter(m=data["m1"], sd=data["sd1"], n=data["n1"])
43 | assert set(esc.known_vars.keys()) == {"m", "sd", "n"}
44 | assert esc.get_sm().shape == data["m1"].shape
45 | assert not {"d", "sd"} - set(esc.known_vars.keys())
46 |
47 | esc = TwoSampleEffectSizeConverter(**data)
48 | assert set(esc.known_vars.keys()) == set(data.keys())
49 | assert np.allclose(esc.get_d(), np.array([-0.63246, -0.140744]), atol=1e-5)
50 | assert np.allclose(esc.get_smd(), np.array([-0.61065, -0.13707]), atol=1e-5)
51 |
52 |
53 | def test_esc_implicit_dtype_conversion():
54 | """Test effect size conversion with implicit datatype conversion."""
55 | esc = OneSampleEffectSizeConverter(m=[10, 12, 18])
56 | assert "m" in esc.known_vars
57 | assert isinstance(esc.known_vars["m"], np.ndarray)
58 | assert esc.known_vars["m"][1] == 12
59 |
60 |
61 | def test_EffectSizeConverter_from_df(two_samp_data):
62 | """Test effect size conversion from a DataFrame."""
63 | df = pd.DataFrame(two_samp_data)
64 | esc = TwoSampleEffectSizeConverter(df)
65 | assert np.allclose(esc.get_smd(), np.array([-0.61065, -0.13707]), atol=1e-5)
66 |
67 |
68 | def test_EffectSizeConverter_to_dataset(two_samp_data):
69 | """Test conversion of effect-size converter outputs to DataFrame."""
70 | esc = TwoSampleEffectSizeConverter(**two_samp_data)
71 | X = np.array([1, 2])
72 | dataset = esc.to_dataset(X=X, X_names=["dummy"])
73 | assert dataset.__class__.__name__ == "Dataset"
74 | assert dataset.X_names == ["intercept", "dummy"]
75 |
76 |
77 | def test_2d_array_conversion():
78 | """Test conversion of 2D data."""
79 | shape = (10, 2)
80 | data = {
81 | "m": np.random.randint(10, size=shape),
82 | "sd": np.random.randint(1, 10, size=shape),
83 | "n": np.ones(shape) * 40,
84 | }
85 | esc = OneSampleEffectSizeConverter(**data)
86 |
87 | sd = esc.get_d()
88 | assert np.array_equal(sd, data["m"] / data["sd"])
89 |
90 | # smoke test other parameters to make sure all generated numpy funcs can
91 | # handle 2d inputs.
92 | for stat in ["sm"]:
93 | result = esc.get(stat)
94 | assert result.shape == shape
95 |
96 |
97 | def test_convert_r_and_n_to_rz():
98 | """Test Fisher's R-to-Z transform."""
99 | r = [0.2, 0.16, 0.6]
100 | n = (68, 165, 17)
101 | esc = OneSampleEffectSizeConverter(r=r)
102 | zr = esc.get_zr()
103 | assert np.allclose(zr, np.arctanh(r))
104 | # Needs n
105 | with pytest.raises(ValueError, match="Unable to solve"):
106 | esc.get_v_zr()
107 | esc = OneSampleEffectSizeConverter(r=r, n=n)
108 | v_zr = esc.get("V_ZR")
109 | assert np.allclose(v_zr, 1 / (np.array(n) - 3))
110 | ds = esc.to_dataset(measure="ZR")
111 | assert np.allclose(ds.y.ravel(), zr)
112 | assert np.allclose(ds.v.ravel(), v_zr)
113 | assert ds.n is not None
114 |
115 |
116 | def test_convert_r_to_itself():
117 | """Test r-to-r conversion."""
118 | r = np.array([0.2, 0.16, 0.6])
119 | n = np.array((68, 165, 17))
120 | esc = OneSampleEffectSizeConverter(r=r)
121 | also_r = esc.get_r()
122 | assert np.array_equal(r, also_r)
123 | # Needs n
124 | with pytest.raises(ValueError, match="Unable to solve"):
125 | esc.get_v_r()
126 | esc = OneSampleEffectSizeConverter(r=r, n=n)
127 | v_r = esc.get("V_R")
128 | assert np.allclose(v_r, (1 - r**2) / (n - 2))
129 | ds = esc.to_dataset(measure="R")
130 | assert np.allclose(ds.y.ravel(), r)
131 | assert np.allclose(ds.v.ravel(), v_r)
132 | assert ds.n is not None
133 |
134 |
135 | def test_compute_measure(one_samp_data, two_samp_data):
136 | """Test the compute_measure function."""
137 | # Default args
138 | base_result = compute_measure("SM", **one_samp_data)
139 | assert isinstance(base_result, tuple)
140 | assert len(base_result) == 2
141 | assert base_result[0].shape == one_samp_data["m"].shape
142 |
143 | # Explicit and correct comparison type
144 | result2 = compute_measure("SM", comparison=1, **one_samp_data)
145 | assert np.array_equal(np.array(base_result), np.array(result2))
146 |
147 | # Incorrect comparison type fails downstream
148 | with pytest.raises(ValueError):
149 | compute_measure("SM", comparison=2, **one_samp_data)
150 |
151 | # Ambiguous comparison type
152 | with pytest.raises(ValueError, match=r"Requested measure \(D\)"):
153 | compute_measure("D", **one_samp_data, **two_samp_data)
154 |
155 | # Works with explicit comparison type: check for both comparison types
156 | result = compute_measure("D", comparison=1, **one_samp_data, **two_samp_data)
157 | conv = compute_measure("D", **one_samp_data, return_type="converter")
158 | assert isinstance(conv, OneSampleEffectSizeConverter)
159 | assert np.array_equal(result[1], conv.get_v_d())
160 |
161 | result = compute_measure("D", comparison=2, **one_samp_data, **two_samp_data)
162 | conv = compute_measure("D", **two_samp_data, return_type="converter")
163 | assert isinstance(conv, TwoSampleEffectSizeConverter)
164 | assert np.array_equal(result[1], conv.get_v_d())
165 |
166 | # Test other return types
167 | result = compute_measure("SM", return_type="dict", **one_samp_data)
168 | assert np.array_equal(base_result[1], result["v"])
169 |
170 | dset = compute_measure(
171 | "SM", return_type="dataset", **one_samp_data, X=[4, 3, 2], X_names=["my_covar"]
172 | )
173 | assert isinstance(dset, Dataset)
174 | assert np.array_equal(base_result[1], dset.v.ravel())
175 | assert dset.X.shape == (3, 2)
176 | assert dset.X_names == ["intercept", "my_covar"]
177 |
178 | # Test with input DataFrame
179 | df = pd.DataFrame(two_samp_data)
180 | result = compute_measure("RMD", df)
181 | assert np.array_equal(result[0], df["m1"].values - df["m2"].values)
182 |
183 | # Override one of the DF columns
184 | result = compute_measure("RMD", df, m1=[3, 3])
185 | assert not np.array_equal(result[0], df["m1"].values - df["m2"].values)
186 |
--------------------------------------------------------------------------------
/pymare/tests/test_effectsize_expressions.py:
--------------------------------------------------------------------------------
1 | """Tests for pymare.effectsize.expressions."""
2 |
3 | import pytest
4 | from sympy import Symbol
5 | from sympy.core.sympify import SympifyError
6 |
7 | from pymare.effectsize.expressions import Expression, select_expressions
8 |
9 |
10 | def _symbol_set(*args):
11 | return set([Symbol(a) for a in args])
12 |
13 |
14 | def test_Expression_init():
15 | """Test Expression initialization."""
16 | # Fails because SymPy can't parse expression
17 | with pytest.raises(SympifyError):
18 | Expression('This isn"t 29 a valid + expre55!on!')
19 |
20 | exp = Expression("x / 4 * y")
21 | assert exp.symbols == _symbol_set("x", "y")
22 | assert exp.description is None
23 | assert exp.type == 0
24 |
25 | exp = Expression("x + y - cos(z)", "Test expression", 1)
26 | assert exp.symbols == _symbol_set("x", "y", "z")
27 | assert exp.description == "Test expression"
28 | assert exp.type == 1
29 |
30 |
31 | def test_select_expressions():
32 | """Test select_expressions function."""
33 | exps = select_expressions("sd", {"d", "m"})
34 | assert len(exps) == 1
35 | assert exps[0].symbols == _symbol_set("sd", "d", "m")
36 |
37 | assert select_expressions("v_d", {"d"}) is None
38 |
39 | exps = select_expressions("sm", known_vars={"m", "n", "sd"})
40 | assert len(exps) == 3
41 | targets = {"j - 1 + 3/(4*n - 5)", "-d*j + sm", "d - m/sd"}
42 | assert set([str(e.sympy) for e in exps]) == targets
43 |
44 | assert select_expressions("v_d", {"d", "n"}, 2) is None
45 |
46 | exps = select_expressions("d", {"m1", "m2", "sd1", "sd2", "n1", "n2"}, 2)
47 | assert len(exps) == 2
48 | target = _symbol_set("d", "m1", "m2", "sdp")
49 | assert exps[0].symbols == target or exps[1].symbols == target
50 |
--------------------------------------------------------------------------------
/pymare/tests/test_estimators.py:
--------------------------------------------------------------------------------
1 | """Tests for pymare.estimators.estimators."""
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from pymare import Dataset
7 | from pymare.estimators import (
8 | DerSimonianLaird,
9 | Hedges,
10 | SampleSizeBasedLikelihoodEstimator,
11 | VarianceBasedLikelihoodEstimator,
12 | WeightedLeastSquares,
13 | )
14 |
15 |
16 | def test_weighted_least_squares_estimator(dataset):
17 | """Test WeightedLeastSquares estimator."""
18 | # ground truth values are from metafor package in R
19 | est = WeightedLeastSquares().fit_dataset(dataset)
20 | results = est.summary()
21 | beta, tau2 = results.fe_params, results.tau2
22 | fe_stats = results.get_fe_stats()
23 |
24 | # Check output shapes
25 | assert beta.shape == (2, 1)
26 | assert isinstance(tau2, float)
27 | assert fe_stats["est"].shape == (2, 1)
28 | assert fe_stats["se"].shape == (2, 1)
29 | assert fe_stats["ci_l"].shape == (2, 1)
30 | assert fe_stats["ci_u"].shape == (2, 1)
31 | assert fe_stats["z"].shape == (2, 1)
32 | assert fe_stats["p"].shape == (2, 1)
33 |
34 | # Check output values
35 | assert np.allclose(beta.ravel(), [-0.2725, 0.6935], atol=1e-4)
36 | assert tau2 == 0.0
37 |
38 | # With non-zero tau^2
39 | est = WeightedLeastSquares(8.0).fit_dataset(dataset)
40 | results = est.summary()
41 | beta, tau2 = results.fe_params, results.tau2
42 | assert np.allclose(beta.ravel(), [-0.1071, 0.7657], atol=1e-4)
43 | assert tau2 == 8.0
44 |
45 |
46 | def test_dersimonian_laird_estimator(dataset):
47 | """Test DerSimonianLaird estimator."""
48 | # ground truth values are from metafor package in R
49 | est = DerSimonianLaird().fit_dataset(dataset)
50 | results = est.summary()
51 | beta, tau2 = results.fe_params, results.tau2
52 | fe_stats = results.get_fe_stats()
53 |
54 | # Check output shapes
55 | assert beta.shape == (2, 1)
56 | assert tau2.shape == (1,)
57 | assert fe_stats["est"].shape == (2, 1)
58 | assert fe_stats["se"].shape == (2, 1)
59 | assert fe_stats["ci_l"].shape == (2, 1)
60 | assert fe_stats["ci_u"].shape == (2, 1)
61 | assert fe_stats["z"].shape == (2, 1)
62 | assert fe_stats["p"].shape == (2, 1)
63 |
64 | # Check output values
65 | assert np.allclose(beta.ravel(), [-0.1070, 0.7664], atol=1e-4)
66 | assert np.allclose(tau2, 8.3627, atol=1e-4)
67 |
68 |
69 | def test_2d_DL_estimator(dataset_2d):
70 | """Test DerSimonianLaird estimator on 2D Dataset."""
71 | results = DerSimonianLaird().fit_dataset(dataset_2d).summary()
72 | beta, tau2 = results.fe_params, results.tau2
73 | fe_stats = results.get_fe_stats()
74 |
75 | # Check output shapes
76 | assert beta.shape == (2, 3)
77 | assert tau2.shape == (3,)
78 | assert fe_stats["est"].shape == (2, 3)
79 | assert fe_stats["se"].shape == (2, 3)
80 | assert fe_stats["ci_l"].shape == (2, 3)
81 | assert fe_stats["ci_u"].shape == (2, 3)
82 | assert fe_stats["z"].shape == (2, 3)
83 | assert fe_stats["p"].shape == (2, 3)
84 |
85 | # Check output values
86 | # First and third sets are identical to previous DL test; second set is
87 | # randomly different.
88 | assert np.allclose(beta[:, 0], [-0.1070, 0.7664], atol=1e-4)
89 | assert np.allclose(tau2[0], 8.3627, atol=1e-4)
90 | assert not np.allclose(beta[:, 1], [-0.1070, 0.7664], atol=1e-4)
91 | assert not np.allclose(tau2[1], 8.3627, atol=1e-4)
92 | assert np.allclose(beta[:, 2], [-0.1070, 0.7664], atol=1e-4)
93 | assert np.allclose(tau2[2], 8.3627, atol=1e-4)
94 |
95 |
96 | def test_hedges_estimator(dataset):
97 | """Test Hedges estimator."""
98 | # ground truth values are from metafor package in R, except that metafor
99 | # always gives negligibly different values for tau2, likely due to
100 | # algorithmic differences in the computation.
101 | est = Hedges().fit_dataset(dataset)
102 | results = est.summary()
103 | beta, tau2 = results.fe_params, results.tau2
104 | fe_stats = results.get_fe_stats()
105 |
106 | # Check output shapes
107 | assert beta.shape == (2, 1)
108 | assert tau2.shape == (1,)
109 | assert fe_stats["est"].shape == (2, 1)
110 | assert fe_stats["se"].shape == (2, 1)
111 | assert fe_stats["ci_l"].shape == (2, 1)
112 | assert fe_stats["ci_u"].shape == (2, 1)
113 | assert fe_stats["z"].shape == (2, 1)
114 | assert fe_stats["p"].shape == (2, 1)
115 |
116 | # Check output values
117 | assert np.allclose(beta.ravel(), [-0.1066, 0.7704], atol=1e-4)
118 | assert np.allclose(tau2, 11.3881, atol=1e-4)
119 |
120 |
121 | def test_2d_hedges(dataset_2d):
122 | """Test Hedges estimator on 2D Dataset."""
123 | results = Hedges().fit_dataset(dataset_2d).summary()
124 | beta, tau2 = results.fe_params, results.tau2
125 | fe_stats = results.get_fe_stats()
126 |
127 | # Check output shapes
128 | assert beta.shape == (2, 3)
129 | assert tau2.shape == (3,)
130 | assert fe_stats["est"].shape == (2, 3)
131 | assert fe_stats["se"].shape == (2, 3)
132 | assert fe_stats["ci_l"].shape == (2, 3)
133 | assert fe_stats["ci_u"].shape == (2, 3)
134 | assert fe_stats["z"].shape == (2, 3)
135 | assert fe_stats["p"].shape == (2, 3)
136 |
137 | # First and third sets are identical to single dim test; second set is
138 | # randomly different.
139 | assert np.allclose(beta[:, 0], [-0.1066, 0.7704], atol=1e-4)
140 | assert np.allclose(tau2[0], 11.3881, atol=1e-4)
141 | assert not np.allclose(beta[:, 1], [-0.1070, 0.7664], atol=1e-4)
142 | assert not np.allclose(tau2[1], 11.3881, atol=1e-4)
143 | assert np.allclose(beta[:, 2], [-0.1066, 0.7704], atol=1e-4)
144 | assert np.allclose(tau2[2], 11.3881, atol=1e-4)
145 |
146 |
147 | def test_variance_based_maximum_likelihood_estimator(dataset):
148 | """Test VarianceBasedLikelihoodEstimator estimator."""
149 | # ground truth values are from metafor package in R
150 | est = VarianceBasedLikelihoodEstimator(method="ML").fit_dataset(dataset)
151 | results = est.summary()
152 | beta, tau2 = results.fe_params, results.tau2
153 | fe_stats = results.get_fe_stats()
154 |
155 | # Check output shapes
156 | assert beta.shape == (2, 1)
157 | assert tau2.shape == (1, 1)
158 | assert fe_stats["est"].shape == (2, 1)
159 | assert fe_stats["se"].shape == (2, 1)
160 | assert fe_stats["ci_l"].shape == (2, 1)
161 | assert fe_stats["ci_u"].shape == (2, 1)
162 | assert fe_stats["z"].shape == (2, 1)
163 | assert fe_stats["p"].shape == (2, 1)
164 |
165 | # Check output values
166 | assert np.allclose(beta.ravel(), [-0.1072, 0.7653], atol=1e-4)
167 | assert np.allclose(tau2, 7.7649, atol=1e-4)
168 |
169 |
170 | def test_variance_based_restricted_maximum_likelihood_estimator(dataset):
171 | """Test VarianceBasedLikelihoodEstimator estimator with REML."""
172 | # ground truth values are from metafor package in R
173 | est = VarianceBasedLikelihoodEstimator(method="REML").fit_dataset(dataset)
174 | results = est.summary()
175 | beta, tau2 = results.fe_params, results.tau2
176 | fe_stats = results.get_fe_stats()
177 |
178 | # Check output shapes
179 | assert beta.shape == (2, 1)
180 | assert tau2.shape == (1, 1)
181 | assert fe_stats["est"].shape == (2, 1)
182 | assert fe_stats["se"].shape == (2, 1)
183 | assert fe_stats["ci_l"].shape == (2, 1)
184 | assert fe_stats["ci_u"].shape == (2, 1)
185 | assert fe_stats["z"].shape == (2, 1)
186 | assert fe_stats["p"].shape == (2, 1)
187 |
188 | # Check output values
189 | assert np.allclose(beta.ravel(), [-0.1066, 0.7700], atol=1e-4)
190 | assert np.allclose(tau2, 10.9499, atol=1e-4)
191 |
192 |
193 | def test_sample_size_based_maximum_likelihood_estimator(dataset_n):
194 | """Test SampleSizeBasedLikelihoodEstimator estimator."""
195 | # test values have not been verified for convergence with other packages
196 | est = SampleSizeBasedLikelihoodEstimator(method="ML").fit_dataset(dataset_n)
197 | results = est.summary()
198 | beta = results.fe_params
199 | sigma2 = results.estimator.params_["sigma2"]
200 | tau2 = results.tau2
201 | fe_stats = results.get_fe_stats()
202 |
203 | # Check output shapes
204 | assert beta.shape == (1, 1)
205 | assert sigma2.shape == (1, 1)
206 | assert tau2.shape == (1, 1)
207 | assert fe_stats["est"].shape == (1, 1)
208 | assert fe_stats["se"].shape == (1, 1)
209 | assert fe_stats["ci_l"].shape == (1, 1)
210 | assert fe_stats["ci_u"].shape == (1, 1)
211 | assert fe_stats["z"].shape == (1, 1)
212 | assert fe_stats["p"].shape == (1, 1)
213 |
214 | # Check output values
215 | assert np.allclose(beta, [-2.0951], atol=1e-4)
216 | assert np.allclose(sigma2, 12.777, atol=1e-3)
217 | assert np.allclose(tau2, 2.8268, atol=1e-4)
218 |
219 |
220 | def test_sample_size_based_restricted_maximum_likelihood_estimator(dataset_n):
221 | """Test SampleSizeBasedLikelihoodEstimator REML estimator."""
222 | # test values have not been verified for convergence with other packages
223 | est = SampleSizeBasedLikelihoodEstimator(method="REML").fit_dataset(dataset_n)
224 | results = est.summary()
225 | beta = results.fe_params
226 | sigma2 = results.estimator.params_["sigma2"]
227 | tau2 = results.tau2
228 | fe_stats = results.get_fe_stats()
229 |
230 | # Check output shapes
231 | assert beta.shape == (1, 1)
232 | assert sigma2.shape == (1, 1)
233 | assert tau2.shape == (1, 1)
234 | assert fe_stats["est"].shape == (1, 1)
235 | assert fe_stats["se"].shape == (1, 1)
236 | assert fe_stats["ci_l"].shape == (1, 1)
237 | assert fe_stats["ci_u"].shape == (1, 1)
238 | assert fe_stats["z"].shape == (1, 1)
239 | assert fe_stats["p"].shape == (1, 1)
240 |
241 | # Check output values
242 | assert np.allclose(beta, [-2.1071], atol=1e-4)
243 | assert np.allclose(sigma2, 13.048, atol=1e-3)
244 | assert np.allclose(tau2, 3.2177, atol=1e-4)
245 |
246 |
247 | def test_2d_looping(dataset_2d):
248 | """Test 2D looping in estimators."""
249 | est = VarianceBasedLikelihoodEstimator().fit_dataset(dataset_2d)
250 | results = est.summary()
251 | beta, tau2 = results.fe_params, results.tau2
252 | fe_stats = results.get_fe_stats()
253 |
254 | # Check output shapes
255 | assert beta.shape == (2, 3)
256 | assert tau2.shape == (1, 3)
257 | assert fe_stats["est"].shape == (2, 3)
258 | assert fe_stats["se"].shape == (2, 3)
259 | assert fe_stats["ci_l"].shape == (2, 3)
260 | assert fe_stats["ci_u"].shape == (2, 3)
261 | assert fe_stats["z"].shape == (2, 3)
262 | assert fe_stats["p"].shape == (2, 3)
263 |
264 | # Check output values
265 | # First and third sets are identical to single dim test; 2nd is different
266 | assert np.allclose(beta[:, 0], [-0.1072, 0.7653], atol=1e-4)
267 | assert np.allclose(tau2[0, 0], 7.7649, atol=1e-4)
268 | assert not np.allclose(beta[:, 1], [-0.1072, 0.7653], atol=1e-4)
269 | assert not np.allclose(tau2[0, 1], 7.7649, atol=1e-4)
270 | assert np.allclose(beta[:, 2], [-0.1072, 0.7653], atol=1e-4)
271 | assert np.allclose(tau2[0, 2], 7.7649, atol=1e-4)
272 |
273 |
274 | def test_2d_loop_warning(dataset_2d):
275 | """Test 2D looping warning on certain estimators."""
276 | est = VarianceBasedLikelihoodEstimator()
277 | y = np.random.normal(size=(10, 100))
278 | v = np.random.randint(1, 50, size=(10, 100))
279 | dataset = Dataset(y, v)
280 | # Warning is raised when 2nd dim is > 10
281 | with pytest.warns(UserWarning, match="Input contains"):
282 | est.fit_dataset(dataset)
283 | # But not when it's smaller
284 | est.fit_dataset(dataset_2d)
285 |
--------------------------------------------------------------------------------
/pymare/tests/test_results.py:
--------------------------------------------------------------------------------
1 | """Tests for pymare.results."""
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from pymare import Dataset
7 | from pymare.estimators import (
8 | DerSimonianLaird,
9 | SampleSizeBasedLikelihoodEstimator,
10 | StoufferCombinationTest,
11 | VarianceBasedLikelihoodEstimator,
12 | WeightedLeastSquares,
13 | )
14 | from pymare.results import CombinationTestResults, MetaRegressionResults
15 |
16 |
17 | @pytest.fixture
18 | def fitted_estimator(dataset):
19 | """Create a fitted Estimator as a fixture."""
20 | est = DerSimonianLaird()
21 | return est.fit_dataset(dataset)
22 |
23 |
24 | @pytest.fixture
25 | def small_variance_estimator(small_variance_dataset):
26 | """Create a fitted Estimator with small variances as a fixture."""
27 | est = DerSimonianLaird()
28 | return est.fit_dataset(small_variance_dataset)
29 |
30 |
31 | @pytest.fixture
32 | def results(fitted_estimator):
33 | """Create a results object as a fixture."""
34 | return fitted_estimator.summary()
35 |
36 |
37 | @pytest.fixture
38 | def small_variance_results(small_variance_estimator):
39 | """Create a results object with small variances as a fixture."""
40 | return small_variance_estimator.summary()
41 |
42 |
43 | @pytest.fixture
44 | def results_2d(fitted_estimator, dataset_2d):
45 | """Create a 2D results object as a fixture."""
46 | est = VarianceBasedLikelihoodEstimator()
47 | return est.fit_dataset(dataset_2d).summary()
48 |
49 |
50 | def test_meta_regression_results_from_arrays(dataset):
51 | """Ensure that a MetaRegressionResults can be created from arrays.
52 |
53 | This is a regression test for a bug that caused the MetaRegressionResults
54 | to fail when Estimators were fitted to arrays instead of Datasets.
55 | See https://github.com/neurostuff/PyMARE/issues/52 for more info.
56 | """
57 | est = DerSimonianLaird()
58 | fitted_estimator = est.fit(y=dataset.y, X=dataset.X, v=dataset.v)
59 | results = fitted_estimator.summary()
60 | assert isinstance(results, MetaRegressionResults)
61 | assert results.fe_params.shape == (2, 1)
62 | assert results.fe_cov.shape == (2, 2, 1)
63 | assert results.tau2.shape == (1,)
64 |
65 | # fit overwrites dataset_ attribute with None
66 | assert fitted_estimator.dataset_ is None
67 | # fit_dataset overwrites it with the Dataset
68 | fitted_estimator.fit_dataset(dataset)
69 | assert isinstance(fitted_estimator.dataset_, Dataset)
70 | # fit sets it back to None
71 | fitted_estimator.fit(y=dataset.y, X=dataset.X, v=dataset.v)
72 | assert fitted_estimator.dataset_ is None
73 |
74 | # Some methods are not available if fit was used
75 | results = fitted_estimator.summary()
76 | with pytest.raises(ValueError):
77 | results.get_re_stats()
78 |
79 | with pytest.raises(ValueError):
80 | results.get_heterogeneity_stats()
81 |
82 | with pytest.raises(ValueError):
83 | results.to_df()
84 |
85 | with pytest.raises(ValueError):
86 | results.permutation_test(1000)
87 |
88 |
89 | def test_combination_test_results_from_arrays(dataset):
90 | """Ensure that a CombinationTestResults can be created from arrays.
91 |
92 | This is a regression test for a bug that caused the MetaRegressionResults
93 | to fail when Estimators were fitted to arrays instead of Datasets.
94 | See https://github.com/neurostuff/PyMARE/issues/52 for more info.
95 | """
96 | fitted_estimator = StoufferCombinationTest().fit(z=dataset.y)
97 | results = fitted_estimator.summary()
98 | assert isinstance(results, CombinationTestResults)
99 | assert results.p.shape == (1,)
100 |
101 | # fit overwrites dataset_ attribute with None
102 | assert fitted_estimator.dataset_ is None
103 |
104 | # fit_dataset overwrites it with the Dataset
105 | fitted_estimator.fit_dataset(Dataset(dataset.y))
106 | assert isinstance(fitted_estimator.dataset_, Dataset)
107 | # fit sets it back to None
108 | fitted_estimator.fit(z=dataset.y)
109 | assert fitted_estimator.dataset_ is None
110 |
111 | # Some methods are not available if fit was used
112 | with pytest.raises(ValueError):
113 | fitted_estimator.summary().permutation_test(1000)
114 |
115 |
116 | def test_meta_regression_results_init_1d(fitted_estimator):
117 | """Test MetaRegressionResults from 1D data."""
118 | est = fitted_estimator
119 | results = MetaRegressionResults(
120 | est, est.dataset_, est.params_["fe_params"], est.params_["inv_cov"], est.params_["tau2"]
121 | )
122 | assert isinstance(est.summary(), MetaRegressionResults)
123 | assert results.fe_params.shape == (2, 1)
124 | assert results.fe_cov.shape == (2, 2, 1)
125 | assert results.tau2.shape == (1,)
126 |
127 |
128 | def test_meta_regression_results_init_2d(results_2d):
129 | """Test MetaRegressionResults from 2D data."""
130 | assert isinstance(results_2d, MetaRegressionResults)
131 | assert results_2d.fe_params.shape == (2, 3)
132 | assert results_2d.fe_cov.shape == (2, 2, 3)
133 | assert results_2d.tau2.shape == (1, 3)
134 |
135 |
136 | def test_mrr_fe_se(results, results_2d):
137 | """Test MetaRegressionResults fixed-effect standard error estimates."""
138 | se_1d, se_2d = results.fe_se, results_2d.fe_se
139 | assert se_1d.shape == (2, 1)
140 | assert se_2d.shape == (2, 3)
141 | assert np.allclose(se_1d.T, [2.6512, 0.9857], atol=1e-4)
142 | assert np.allclose(se_2d[:, 0].T, [2.5656, 0.9538], atol=1e-4)
143 |
144 |
145 | def test_mrr_get_fe_stats(results):
146 | """Test MetaRegressionResults.get_fe_stats."""
147 | stats = results.get_fe_stats()
148 | assert isinstance(stats, dict)
149 | assert set(stats.keys()) == {"est", "se", "ci_l", "ci_u", "z", "p"}
150 | assert np.allclose(stats["ci_l"].T, [-5.3033, -1.1655], atol=1e-4)
151 | assert np.allclose(stats["p"].T, [0.9678, 0.4369], atol=1e-4)
152 |
153 |
154 | def test_mrr_get_re_stats(results_2d):
155 | """Test MetaRegressionResults.get_re_stats."""
156 | stats = results_2d.get_re_stats()
157 | assert isinstance(stats, dict)
158 | assert set(stats.keys()) == {"tau^2", "ci_l", "ci_u"}
159 | assert stats["tau^2"].shape == (1, 3)
160 | assert stats["ci_u"].shape == (3,)
161 | assert round(stats["tau^2"][0, 2], 4) == 7.7649
162 | assert round(stats["ci_l"][2], 4) == 3.8076
163 | assert round(stats["ci_u"][2], 2) == 59.61
164 |
165 |
166 | def test_mrr_get_heterogeneity_stats(results_2d):
167 | """Test MetaRegressionResults.get_heterogeneity_stats."""
168 | stats = results_2d.get_heterogeneity_stats()
169 | assert len(stats["Q"] == 3)
170 | assert round(stats["Q"][2], 4) == 53.8052
171 | assert round(stats["I^2"][0], 4) == 88.8487
172 | assert round(stats["H"][0], 4) == 2.9946
173 | assert stats["p(Q)"][0] < 1e-5
174 |
175 |
176 | def test_mrr_to_df(results):
177 | """Test conversion of MetaRegressionResults to DataFrame."""
178 | df = results.to_df()
179 | assert df.shape == (2, 7)
180 | col_names = {"estimate", "p-value", "z-score", "ci_0.025", "ci_0.975", "se", "name"}
181 | assert set(df.columns) == col_names
182 | assert np.allclose(df["p-value"].values, [0.9678, 0.4369], atol=1e-4)
183 |
184 |
185 | def test_small_variance_mrr_to_df(small_variance_results):
186 | """Test conversion of MetaRegressionResults to DataFrame."""
187 | df = small_variance_results.to_df()
188 | assert df.shape == (2, 7)
189 | col_names = {"estimate", "p-value", "z-score", "ci_0.025", "ci_0.975", "se", "name"}
190 | assert set(df.columns) == col_names
191 | assert np.allclose(df["p-value"].values, [1, np.finfo(np.float64).eps], atol=1e-4)
192 |
193 |
194 | def test_estimator_summary(dataset):
195 | """Test Estimator's summary method."""
196 | est = WeightedLeastSquares()
197 | # Fails if we haven't fitted yet
198 | with pytest.raises(ValueError):
199 | est.summary()
200 |
201 | est.fit_dataset(dataset)
202 | summary = est.summary()
203 | assert isinstance(summary, MetaRegressionResults)
204 |
205 |
206 | def test_exact_perm_test_2d_no_mods(small_dataset_2d):
207 | """Test the exact permutation test on 2D data."""
208 | results = DerSimonianLaird().fit_dataset(small_dataset_2d).summary()
209 | pmr = results.permutation_test(1000)
210 | assert pmr.n_perm == 8
211 | assert pmr.exact
212 | assert isinstance(pmr.results, MetaRegressionResults)
213 | assert pmr.perm_p["fe_p"].shape == (1, 2)
214 | assert pmr.perm_p["tau2_p"].shape == (2,)
215 |
216 |
217 | def test_approx_perm_test_1d_with_mods(results):
218 | """Test the approximate permutation test on 2D data."""
219 | pmr = results.permutation_test(1000)
220 | assert pmr.n_perm == 1000
221 | assert not pmr.exact
222 | assert isinstance(pmr.results, MetaRegressionResults)
223 | assert pmr.perm_p["fe_p"].shape == (2, 1)
224 | assert pmr.perm_p["tau2_p"].shape == (1,)
225 |
226 |
227 | def test_exact_perm_test_1d_no_mods():
228 | """Test the exact permutation test on 1D data."""
229 | dataset = Dataset([1, 1, 2, 1.3], [1.5, 1, 2, 4])
230 | results = DerSimonianLaird().fit_dataset(dataset).summary()
231 | pmr = results.permutation_test(867)
232 | assert pmr.n_perm == 16
233 | assert pmr.exact
234 | assert isinstance(pmr.results, MetaRegressionResults)
235 | assert pmr.perm_p["fe_p"].shape == (1, 1)
236 | assert pmr.perm_p["tau2_p"].shape == (1,)
237 |
238 |
239 | def test_approx_perm_test_with_n_based_estimator(dataset_n):
240 | """Test the approximate permutation test on an sample size-based Estimator."""
241 | results = SampleSizeBasedLikelihoodEstimator().fit_dataset(dataset_n).summary()
242 | pmr = results.permutation_test(100)
243 | assert pmr.n_perm == 100
244 | assert not pmr.exact
245 | assert isinstance(pmr.results, MetaRegressionResults)
246 | assert pmr.perm_p["fe_p"].shape == (1, 1)
247 | assert pmr.perm_p["tau2_p"].shape == (1,)
248 |
249 |
250 | def test_stouffers_perm_test_exact():
251 | """Test the exact permutation test on Stouffers Estimator."""
252 | dataset = Dataset([1, 1, 2, 1.3], [1.5, 1, 2, 4])
253 | results = StoufferCombinationTest().fit_dataset(dataset).summary()
254 | pmr = results.permutation_test(2000)
255 | assert pmr.n_perm == 16
256 | assert pmr.exact
257 | assert isinstance(pmr.results, CombinationTestResults)
258 | assert pmr.perm_p["fe_p"].shape == (1,)
259 | assert "tau2_p" not in pmr.perm_p
260 |
261 |
262 | def test_stouffers_perm_test_approx():
263 | """Test the approximate permutation test on Stouffers Estimator."""
264 | y = [2.8, -0.2, -1, 4.5, 1.9, 2.38, 0.6, 1.88, -0.4, 1.5, 3.163, 0.7]
265 | dataset = Dataset(y)
266 | results = StoufferCombinationTest().fit_dataset(dataset).summary()
267 | pmr = results.permutation_test(2000)
268 | assert not pmr.exact
269 | assert pmr.n_perm == 2000
270 | assert isinstance(pmr.results, CombinationTestResults)
271 | assert pmr.perm_p["fe_p"].shape == (1,)
272 | assert "tau2_p" not in pmr.perm_p
273 |
--------------------------------------------------------------------------------
/pymare/tests/test_stan_estimators.py:
--------------------------------------------------------------------------------
1 | """Tests for estimators that use stan."""
2 |
3 | import sys
4 |
5 | import pytest
6 |
7 | from pymare.estimators import StanMetaRegression
8 |
9 |
10 | @pytest.mark.skipif(sys.version_info < (3, 7), reason="requires python 3.7 or higher")
11 | def test_stan_estimator(dataset):
12 | """Run smoke test for StanMetaRegression."""
13 | # no ground truth here, so we use sanity checks and rough bounds
14 | est = StanMetaRegression(num_samples=3000).fit_dataset(dataset)
15 | results = est.summary()
16 | assert "BayesianMetaRegressionResults" == results.__class__.__name__
17 | summary = results.summary(["beta", "tau2"])
18 | beta1, beta2, tau2 = summary["mean"].values[:3]
19 | assert -0.5 < beta1 < 0.1
20 | assert 0.6 < beta2 < 0.9
21 | assert 3 < tau2 < 5
22 |
23 |
24 | @pytest.mark.skipif(sys.version_info < (3, 7), reason="requires python 3.7 or higher")
25 | def test_stan_2d_input_failure(dataset_2d):
26 | """Run smoke test for StanMetaRegression on 2D data."""
27 | with pytest.raises(ValueError) as exc:
28 | StanMetaRegression(num_samples=500).fit_dataset(dataset_2d)
29 | assert str(exc.value).startswith("The StanMetaRegression")
30 |
31 |
32 | def test_stan_python_36_failure(dataset):
33 | """Run smoke test for StanMetaRegression with Python 3.6."""
34 | if sys.version_info < (3, 7):
35 | # Raise error if StanMetaRegression is initialize with python 3.6 or lower
36 | with pytest.raises(RuntimeError):
37 | StanMetaRegression(num_samples=3000).fit_dataset(dataset)
38 |
--------------------------------------------------------------------------------
/pymare/tests/test_stats.py:
--------------------------------------------------------------------------------
1 | """Tests for pymare.stats."""
2 |
3 | from pymare import stats
4 |
5 |
6 | def test_q_gen(vars_with_intercept):
7 | """Test pymare.stats.q_gen."""
8 | result = stats.q_gen(*vars_with_intercept, 8)
9 | assert round(result[0], 4) == 8.0161
10 |
11 |
12 | def test_q_profile(vars_with_intercept):
13 | """Test pymare.stats.q_profile."""
14 | bounds = stats.q_profile(*vars_with_intercept, 0.05)
15 | assert set(bounds.keys()) == {"ci_l", "ci_u"}
16 | assert round(bounds["ci_l"], 4) == 3.8076
17 | assert round(bounds["ci_u"], 2) == 59.61
18 |
19 |
20 | def test_var_to_ci():
21 | """Test pymare.stats.var_to_ci.
22 |
23 | This is basically a smoke test. We should improve it.
24 | """
25 | ci = stats.var_to_ci(0.05, 0.5, n=20)
26 | assert round(ci[0], 4) == -0.2599
27 | assert round(ci[1], 4) == 0.3599
28 |
--------------------------------------------------------------------------------
/pymare/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | """Tests for pymare.utils."""
2 |
3 | import os.path as op
4 |
5 | import numpy as np
6 | import pytest
7 |
8 | from pymare import utils
9 |
10 |
11 | def test_get_resource_path():
12 | """Test nimare.utils.get_resource_path."""
13 | print(utils.get_resource_path())
14 | assert op.isdir(utils.get_resource_path())
15 |
16 |
17 | def test_check_inputs_shape():
18 | """Test nimare.utils._check_inputs_shape."""
19 | n_rows = 5
20 | n_columns = 4
21 | n_pred = 3
22 | y = np.random.randint(1, 100, size=(n_rows, n_columns))
23 | v = np.random.randint(1, 100, size=(n_rows + 1, n_columns))
24 | n = np.random.randint(1, 100, size=(n_rows, n_columns))
25 | X = np.random.randint(1, 100, size=(n_rows, n_pred))
26 | X_names = [f"X{x}" for x in range(n_pred)]
27 |
28 | utils._check_inputs_shape(y, X, "y", "X", row=True)
29 | utils._check_inputs_shape(y, n, "y", "n", row=True, column=True)
30 | utils._check_inputs_shape(X, np.array(X_names)[None, :], "X", "X_names", column=True)
31 |
32 | # Raise error if the number of rows and columns of v don't match y
33 | with pytest.raises(ValueError):
34 | utils._check_inputs_shape(y, v, "y", "v", row=True, column=True)
35 |
36 | # Raise error if neither row or column is True
37 | with pytest.raises(ValueError):
38 | utils._check_inputs_shape(y, n, "y", "n")
39 |
40 | # Dataset may be initialized with n or v as None
41 | utils._check_inputs_shape(y, None, "y", "n", row=True, column=True)
42 |
--------------------------------------------------------------------------------
/pymare/utils.py:
--------------------------------------------------------------------------------
1 | """Miscellaneous utility functions."""
2 |
3 | import os.path as op
4 |
5 | import numpy as np
6 |
7 |
8 | def get_resource_path():
9 | """Return the path to general resources, terminated with separator.
10 |
11 | Resources are kept outside package folder in "datasets".
12 | Based on function by Yaroslav Halchenko used in Neurosynth Python package.
13 | """
14 | return op.abspath(op.join(op.dirname(__file__), "resources") + op.sep)
15 |
16 |
17 | def _listify(obj):
18 | """Wrap all non-list or tuple objects in a list.
19 |
20 | This provides a simple way to accept flexible arguments.
21 | """
22 | return obj if isinstance(obj, (list, tuple, type(None), np.ndarray)) else [obj]
23 |
24 |
25 | def _check_inputs_shape(param1, param2, param1_name, param2_name, row=False, column=False):
26 | """Check whether 'param1' and 'param2' have the same shape.
27 |
28 | Parameters
29 | ----------
30 | param1 : array
31 | param2 : array
32 | param1_name : str
33 | param2_name : str
34 | row : bool, default to False.
35 | column : bool, default to False.
36 | """
37 | if (param1 is not None) and (param2 is not None):
38 | if row and not column:
39 | shape1 = param1.shape[0]
40 | shape2 = param2.shape[0]
41 | message = "rows"
42 | elif column and not row:
43 | shape1 = param1.shape[1]
44 | shape2 = param2.shape[1]
45 | message = "columns"
46 | elif row and column:
47 | shape1 = param1.shape
48 | shape2 = param2.shape
49 | message = "rows and columns"
50 | else:
51 | raise ValueError("At least one of the two parameters (row or column) should be True.")
52 |
53 | if shape1 != shape2:
54 | raise ValueError(
55 | f"{param1_name} and {param2_name} should have the same number of {message}. "
56 | f"You provided {param1_name} with shape {param1.shape} and {param2_name} "
57 | f"with shape {param2.shape}."
58 | )
59 |
--------------------------------------------------------------------------------
/pypi_description.md:
--------------------------------------------------------------------------------
1 | # PyMARE: Python Meta-Analysis & Regression Engine
2 |
3 | A Python library for mixed-effects meta-regression (including
4 | meta-analysis).
5 |
6 | # License
7 |
8 | PYMARE is licensed under the terms of the MIT license. See
9 | the file \'LICENSE\' for information on the history of this software,
10 | terms & conditions for usage, and a DISCLAIMER OF ALL WARRANTIES.
11 |
12 | All trademarks referenced herein are property of their respective
13 | holders.
14 |
15 | Copyright (c) 2019\--, PyMARE developers
16 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools==68.2.2", "wheel"]
3 |
4 | [tool.black]
5 | line-length = 99
6 | target-version = ["py39"]
7 | include = '\.pyi?$'
8 | exclude = '''
9 |
10 | (
11 | /(
12 | \.eggs # exclude a few common directories in the
13 | | \.git # root of the project
14 | | \.github
15 | | \.hg
16 | | \.pytest_cache
17 | | _build
18 | | build
19 | | dist
20 | )/
21 | | versioneer.py
22 | | pymare/_version.py
23 | )
24 | '''
25 |
26 | [tool.isort]
27 | profile = "black"
28 | multi_line_output = 3
29 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | url = https://github.com/neurostuff/PyMARE
3 | license = MIT
4 | author = PyMARE developers
5 | author_email = tsalo006@fiu.edu
6 | maintainer = Taylor Salo
7 | maintainer_email = tsalo006@fiu.edu
8 | description = PyMARE: Python Meta-Analysis & Regression Engine
9 | description_file = README.md
10 | long_description = file: pypi_description.md
11 | long_description_content_type = text/markdown
12 | classifiers =
13 | Development Status :: 3 - Alpha
14 | Environment :: Console
15 | Intended Audience :: Science/Research
16 | License :: OSI Approved :: MIT License
17 | Operating System :: OS Independent
18 | Programming Language :: Python :: 3.8
19 | Programming Language :: Python :: 3.9
20 | Programming Language :: Python :: 3.10
21 | Programming Language :: Python :: 3.11
22 | Topic :: Scientific/Engineering
23 |
24 | [options]
25 | python_requires = >= 3.8
26 | install_requires =
27 | numpy>=1.8.0,<2.0; python_version == "3.9" and extra == 'stan'
28 | numpy>=1.8.0; python_version != "3.9" or extra != 'stan'
29 | pandas
30 | scipy<1.13; python_version == "3.9" and extra == 'stan'
31 | scipy; python_version != "3.9" or extra != 'stan'
32 | sympy
33 | wrapt
34 | packages = find:
35 | include_package_data = False
36 |
37 | [options.extras_require]
38 | doc =
39 | m2r2
40 | matplotlib
41 | mistune
42 | numpydoc
43 | pillow
44 | recommonmark
45 | seaborn
46 | sphinx>=3.5
47 | sphinx-argparse
48 | sphinx-copybutton
49 | sphinx_gallery
50 | sphinx_rtd_theme
51 | sphinxcontrib-bibtex
52 | sphinxcontrib-mermaid
53 | docutils>=0.18.1,<0.21
54 | tests =
55 | codecov
56 | coverage
57 | coveralls
58 | flake8
59 | flake8-black
60 | flake8-docstrings
61 | flake8-isort
62 | pytest
63 | pytest-cov
64 | stan =
65 | pystan
66 | arviz
67 | all =
68 | %(doc)s
69 | %(tests)s
70 | %(stan)s
71 |
72 | [options.package_data]
73 | * =
74 | tests/data/*
75 | resources/*
76 | resources/datasets/*
77 | effectsize/*.json
78 |
79 | [versioneer]
80 | VCS = git
81 | style = pep440
82 | versionfile_source = pymare/_version.py
83 | versionfile_build = pymare/_version.py
84 | tag_prefix =
85 | parentdir_prefix =
86 |
87 | [flake8]
88 | max-line-length = 99
89 | exclude=*build/,_version.py
90 | putty-ignore =
91 | */__init__.py : +F401
92 | per-file-ignores =
93 | */__init__.py:D401
94 | ignore = E203,E402,E722,W503
95 | docstring-convention = numpy
96 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """PyMARE setup script."""
3 | from setuptools import setup
4 |
5 | import versioneer
6 |
7 | if __name__ == "__main__":
8 | setup(
9 | name="PyMARE",
10 | version=versioneer.get_version(),
11 | cmdclass=versioneer.get_cmdclass(),
12 | zip_safe=False,
13 | )
14 |
--------------------------------------------------------------------------------