├── asaptools ├── tests │ ├── __init__.py │ ├── vprinterTests.py │ ├── partitionTests.py │ ├── partitionArrayTests.py │ ├── timekeeperTests.py │ ├── simpleCommP1STests.py │ ├── simpleCommParDivTests.py │ └── simpleCommParTests.py ├── __init__.py ├── vprinter.py ├── timekeeper.py ├── partition.py └── simplecomm.py ├── requirements.txt ├── .gitattributes ├── docs ├── source │ ├── readme.rst │ ├── changelog.rst │ ├── license.rst │ ├── vprinter.rst │ ├── partition.rst │ ├── simplecomm.rst │ ├── timekeeper.rst │ ├── asaptools.rst │ ├── index.rst │ └── conf.py ├── Makefile └── make.bat ├── .gitignore ├── readthedocs.yml ├── pyproject.toml ├── ci ├── environment.yml └── runtests.sh ├── MANIFEST.in ├── .github └── workflows │ ├── linting.yml │ ├── pypipublish.yml │ └── tests.yml ├── codecov.yml ├── setup.cfg ├── .pre-commit-config.yaml ├── CHANGELOG.rst ├── setup.py ├── README.rst ├── CONTRIBUTING.rst └── LICENSE.txt /asaptools/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | mpi4py 2 | numpy 3 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | asaptools/_version.py export-subst 2 | -------------------------------------------------------------------------------- /docs/source/readme.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../README.rst 2 | -------------------------------------------------------------------------------- /docs/source/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../CHANGELOG.rst 2 | -------------------------------------------------------------------------------- /docs/source/license.rst: -------------------------------------------------------------------------------- 1 | Product License 2 | =============== 3 | 4 | .. include:: ../../LICENSE.txt 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.egg-info 3 | .idea 4 | .vscode 5 | .coverage* 6 | coverage.xml 7 | __pycache__ 8 | -------------------------------------------------------------------------------- /readthedocs.yml: -------------------------------------------------------------------------------- 1 | conda: 2 | file: ci/environment.yml 3 | python: 4 | version: 3 5 | setup_py_install: true 6 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 100 3 | target-version = ['py37'] 4 | skip-string-normalization = true 5 | -------------------------------------------------------------------------------- /docs/source/vprinter.rst: -------------------------------------------------------------------------------- 1 | 2 | asaptools.vprinter module 3 | ------------------------- 4 | 5 | .. automodule:: asaptools.vprinter 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/source/partition.rst: -------------------------------------------------------------------------------- 1 | 2 | asaptools.partition module 3 | -------------------------- 4 | 5 | .. automodule:: asaptools.partition 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/source/simplecomm.rst: -------------------------------------------------------------------------------- 1 | 2 | asaptools.simplecomm module 3 | --------------------------- 4 | 5 | .. automodule:: asaptools.simplecomm 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/source/timekeeper.rst: -------------------------------------------------------------------------------- 1 | 2 | asaptools.timekeeper module 3 | --------------------------- 4 | 5 | .. automodule:: asaptools.timekeeper 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /ci/environment.yml: -------------------------------------------------------------------------------- 1 | name: asaptools 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - mpi4py 7 | - numpy 8 | - pytest 9 | - pytest-cov 10 | - sphinx 11 | - coverage 12 | - codecov 13 | - sphinx_rtd_theme 14 | - recommonmark 15 | - pip 16 | - pip: 17 | - sphinx_copybutton 18 | -------------------------------------------------------------------------------- /docs/source/asaptools.rst: -------------------------------------------------------------------------------- 1 | asaptools package 2 | ================= 3 | 4 | .. automodule:: asaptools 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | vprinter 16 | timekeeper 17 | partition 18 | simplecomm 19 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include CHANGELOG.rst 2 | include CONTRIBUTING.rst 3 | include LICENSE.txt 4 | include README.rst 5 | include requirements.txt 6 | 7 | recursive-include docs/source * 8 | include docs/Makefile docs/make.bat 9 | recursive-include asaptools *.py 10 | recursive-exclude * __pycache__ 11 | recursive-exclude * *.py[co] 12 | prune ci* 13 | -------------------------------------------------------------------------------- /.github/workflows/linting.yml: -------------------------------------------------------------------------------- 1 | name: linting 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | pull_request: 8 | branches: 9 | - '*' 10 | 11 | jobs: 12 | linting: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: actions/setup-python@v2 17 | - uses: pre-commit/action@v2.0.0 18 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | require_ci_to_pass: no 3 | max_report_age: off 4 | 5 | comment: false 6 | 7 | ignore: 8 | - 'setup.py' 9 | - 'versioneer.py' 10 | - 'asaptools/_version.py' 11 | 12 | coverage: 13 | precision: 2 14 | round: down 15 | status: 16 | project: 17 | default: 18 | target: 95 19 | informational: true 20 | patch: off 21 | changes: off 22 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = docs 3 | ignore = E203,E266,E501,W503,E722,E402,C901,E731 4 | max-line-length = 100 5 | max-complexity = 18 6 | select = B,C,E,F,W,T4,B9 7 | 8 | [isort] 9 | known_first_party = asaptools 10 | known_third_party = mpi4py,numpy,pkg_resources,setuptools 11 | multi_line_output=3 12 | include_trailing_comma=True 13 | force_grid_wrap=0 14 | combine_as_imports=True 15 | line_length=100 16 | skip= 17 | docs/source/conf.py 18 | setup.py 19 | -------------------------------------------------------------------------------- /ci/runtests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -eo pipefail 5 | 6 | coverage run -p -m asaptools.tests.vprinterTests 7 | coverage run -p -m asaptools.tests.timekeeperTests 8 | coverage run -p -m asaptools.tests.partitionTests 9 | coverage run -p -m asaptools.tests.partitionArrayTests 10 | coverage run -p -m asaptools.tests.simpleCommP1STests 11 | mpirun -np 4 coverage run -p -m asaptools.tests.simpleCommParTests 12 | mpirun -np 4 coverage run -p -m asaptools.tests.simpleCommParDivTests 13 | coverage combine 14 | coverage xml 15 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. ASAPTools documentation master file, created by 2 | sphinx-quickstart on Mon Mar 30 12:26:05 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to the ASAP Python Toolbox's documentation! 7 | =================================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | readme 15 | asaptools 16 | changelog 17 | license 18 | 19 | Indices and tables 20 | ================== 21 | 22 | * :ref:`genindex` 23 | * :ref:`modindex` 24 | * :ref:`search` 25 | -------------------------------------------------------------------------------- /.github/workflows/pypipublish.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package to PyPI 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | deploy: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v1 12 | - name: Set up Python 13 | uses: actions/setup-python@v1 14 | with: 15 | python-version: '3.x' 16 | - name: Install dependencies 17 | run: | 18 | python -m pip install --upgrade pip 19 | pip install setuptools setuptools-scm wheel twine 20 | - name: Build and publish 21 | env: 22 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 23 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 24 | run: | 25 | python setup.py sdist bdist_wheel 26 | twine upload dist/* 27 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v3.2.0 5 | hooks: 6 | - id: trailing-whitespace 7 | - id: end-of-file-fixer 8 | - id: check-docstring-first 9 | - id: check-json 10 | - id: check-yaml 11 | - id: double-quote-string-fixer 12 | 13 | - repo: https://github.com/ambv/black 14 | rev: 20.8b1 15 | hooks: 16 | - id: black 17 | 18 | - repo: https://github.com/keewis/blackdoc 19 | rev: v0.1.1 20 | hooks: 21 | - id: blackdoc 22 | 23 | - repo: https://gitlab.com/pycqa/flake8 24 | rev: 3.8.3 25 | hooks: 26 | - id: flake8 27 | 28 | - repo: https://github.com/asottile/seed-isort-config 29 | rev: v2.2.0 30 | hooks: 31 | - id: seed-isort-config 32 | 33 | - repo: https://github.com/pre-commit/mirrors-isort 34 | rev: v5.4.2 35 | hooks: 36 | - id: isort 37 | -------------------------------------------------------------------------------- /asaptools/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # flake8: noqa 3 | """ 4 | The ASAP Python Toolbox 5 | 6 | The ASAP Python Toolbox is a collection of stand-alone tools for doing simple 7 | tasks, from managing print messages with a set verbosity level, to 8 | keeping timing information, to managing simple MPI communication. 9 | 10 | Copyright 2020 University Corporation for Atmospheric Research 11 | 12 | Licensed under the Apache License, Version 2.0 (the "License"); 13 | you may not use this file except in compliance with the License. 14 | You may obtain a copy of the License at 15 | 16 | http://www.apache.org/licenses/LICENSE-2.0 17 | 18 | Unless required by applicable law or agreed to in writing, software 19 | distributed under the License is distributed on an "AS IS" BASIS, 20 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | See the License for the specific language governing permissions and 22 | limitations under the License. 23 | 24 | Send questions and comments to Kevin Paul (kpaul@ucar.edu). 25 | """ 26 | 27 | from pkg_resources import DistributionNotFound, get_distribution 28 | 29 | try: 30 | __version__ = get_distribution(__name__).version 31 | except DistributionNotFound: # pragma: no cover 32 | __version__ = '0.0.0' # pragma: no cover 33 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | on: 3 | push: 4 | branches: 5 | - '*' 6 | pull_request: 7 | branches: 8 | - '*' 9 | workflow_dispatch: # allows you to trigger manually 10 | 11 | jobs: 12 | build: 13 | name: python-${{ matrix.python }}-${{ matrix.mpi }} 14 | runs-on: ubuntu-latest 15 | defaults: 16 | run: 17 | shell: bash -l {0} 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | python: ['3.6', '3.7', '3.8'] 22 | mpi: ['mpich', 'openmpi'] 23 | env: 24 | PYTHON: ${{ matrix.python }} 25 | MPI: ${{ matrix.mpi }} 26 | OMPI_MCA_rmaps_base_oversubscribe: '1' 27 | OMPI_ALLOW_RUN_AS_ROOT: '1' 28 | OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: '1' 29 | steps: 30 | - uses: actions/checkout@v2 31 | - name: Cache conda 32 | uses: actions/cache@v2 33 | env: 34 | # Increase this value to reset cache if ci/environment.yml has not changed 35 | CACHE_NUMBER: 0 36 | with: 37 | path: ~/conda_pkgs_dir 38 | key: ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{ hashFiles('ci/environment.yml') }} 39 | - name: Setup miniconda 40 | uses: conda-incubator/setup-miniconda@v2 41 | with: 42 | activate-environment: asaptools # Defined in ci/environment.yml 43 | auto-update-conda: false 44 | python-version: ${{ matrix.python }} 45 | environment-file: ci/environment.yml 46 | use-only-tar-bz2: true # IMPORTANT: This needs to be set for caching to work properly! 47 | - name: Install MPI 48 | run: conda install ${{ matrix.mpi }} 49 | - name: Install package 50 | run: python -m pip install -e . 51 | - name: Check Installation 52 | run: | 53 | conda list 54 | pip list 55 | - name: Run Tests 56 | run: ci/runtests.sh 57 | - name: Upload code coverage to Codecov 58 | uses: codecov/codecov-action@v1 59 | with: 60 | file: ./coverage.xml 61 | flags: unittests 62 | env_vars: PYTHON,MPI 63 | name: codecov-umbrella 64 | fail_ci_if_error: false 65 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | Change Log 2 | ========== 3 | 4 | Copyright 2020 University Corporation for Atmospheric Research 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | 18 | 19 | Version 0.7.0 20 | ------------- 21 | 22 | - Big refactor to use GitHub workflows (adding testing for Python 23 | version 3.7 and 3.8) 24 | - Modernizing the package structure 25 | 26 | Version 0.6.0 27 | ------------- 28 | 29 | - Allowing for support of all Python 2.6+ (including Python 3+) 30 | 31 | Version 0.5.4 32 | ------------- 33 | 34 | - Bugfix: Special catch for dtype='c' (C-type char arrays) in check for 35 | Numpy arrays being bufferable 36 | 37 | Version 0.5.3 38 | ------------- 39 | 40 | - Updates just for PyPI release 41 | 42 | Version 0.5.2 43 | ------------- 44 | 45 | - Improved testing for send/recv data types 46 | - Backwards compatability with mpi4py version 1.3.1 47 | 48 | Version 0.5.1 49 | ------------- 50 | 51 | - Checking dtype of Numpy NDArrays before determing if buffered send/recv 52 | calls can be used. 53 | 54 | Version 0.5.0 55 | ------------- 56 | 57 | - Now requires Python >=2.7 and <3.0 58 | - Using more advanced features of Python 2.7 (over 2.6) 59 | - Changed Numpy NDArray type-checking to allow for masked arrays, instead of 60 | just NDArrays 61 | 62 | Version 0.4.2 63 | ------------- 64 | 65 | - Update setup script to setuptools (instead of distutils) 66 | 67 | Version 0.4.1 68 | ------------- 69 | 70 | - Bugfixes 71 | 72 | Version 0.4 73 | ----------- 74 | 75 | - Updating install to include LICENSE 76 | - Restructured source directory 77 | - Upload to PyPI 78 | 79 | Version 0.3 80 | ----------- 81 | 82 | - Repackaging the pyTools repo into a Python package with 83 | installation software and Sphinx-style documentation 84 | -------------------------------------------------------------------------------- /asaptools/tests/vprinterTests.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests of the verbose printer utility 3 | 4 | Copyright 2017, University Corporation for Atmospheric Research 5 | See the LICENSE.txt file for details 6 | """ 7 | 8 | from __future__ import print_function 9 | 10 | import sys 11 | import unittest 12 | from os import linesep 13 | 14 | from asaptools import vprinter 15 | 16 | try: 17 | from cStringIO import StringIO 18 | except ImportError: 19 | from io import StringIO 20 | 21 | 22 | class VPrinterTests(unittest.TestCase): 23 | def setUp(self): 24 | self.header = '[1] ' 25 | self.vprint = vprinter.VPrinter(header=self.header, verbosity=2) 26 | 27 | def testToStr(self): 28 | data = ['a', 'b', 'c', 1, 2, 3, 4.0, 5.0, 6.0] 29 | actual = self.vprint.to_str(*data) 30 | expected = ''.join([str(d) for d in data]) 31 | self.assertEqual(actual, expected) 32 | 33 | def testToStrHeader(self): 34 | data = ['a', 'b', 'c', 1, 2, 3, 4.0, 5.0, 6.0] 35 | actual = self.vprint.to_str(*data, header=True) 36 | expected = self.header + ''.join([str(d) for d in data]) 37 | self.assertEqual(actual, expected) 38 | 39 | def testVPrint(self): 40 | data = ['a', 'b', 'c', 1, 2, 3, 4.0, 5.0, 6.0] 41 | backup = sys.stdout 42 | sys.stdout = StringIO() 43 | self.vprint(*data) 44 | actual = sys.stdout.getvalue() 45 | sys.stdout.close() 46 | sys.stdout = backup 47 | expected = self.vprint.to_str(*data) + linesep 48 | self.assertEqual(actual, expected) 49 | 50 | def testVPrintHeader(self): 51 | data = ['a', 'b', 'c', 1, 2, 3, 4.0, 5.0, 6.0] 52 | backup = sys.stdout 53 | sys.stdout = StringIO() 54 | self.vprint(*data, header=True) 55 | actual = sys.stdout.getvalue() 56 | sys.stdout.close() 57 | sys.stdout = backup 58 | expected = self.vprint.to_str(*data, header=True) + linesep 59 | self.assertEqual(actual, expected) 60 | 61 | def testVPrintVerbosityCut(self): 62 | data = ['a', 'b', 'c', 1, 2, 3, 4.0, 5.0, 6.0] 63 | backup = sys.stdout 64 | sys.stdout = StringIO() 65 | self.vprint(*data, verbosity=3) 66 | actual = sys.stdout.getvalue() 67 | sys.stdout.close() 68 | sys.stdout = backup 69 | expected = '' 70 | self.assertEqual(actual, expected) 71 | 72 | 73 | if __name__ == '__main__': 74 | unittest.main() 75 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | ASAP Python Toolbox -- Setup Script 4 | 5 | 6 | Copyright 2020 University Corporation for Atmospheric Research 7 | 8 | Licensed under the Apache License, Version 2.0 (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at 11 | 12 | http://www.apache.org/licenses/LICENSE-2.0 13 | 14 | Unless required by applicable law or agreed to in writing, software 15 | distributed under the License is distributed on an "AS IS" BASIS, 16 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | See the License for the specific language governing permissions and 18 | limitations under the License. 19 | """ 20 | 21 | from os.path import exists 22 | 23 | from setuptools import find_packages, setup 24 | 25 | with open('requirements.txt') as f: 26 | install_requires = f.read().strip().split('\n') 27 | 28 | if exists('README.rst'): 29 | with open('README.rst') as f: 30 | long_description = f.read() 31 | else: 32 | long_description = '' 33 | 34 | CLASSIFIERS = [ 35 | 'Development Status :: 5 - Production/Stable', 36 | 'License :: OSI Approved :: Apache Software License', 37 | 'Operating System :: Unix', 38 | 'Operating System :: MacOS', 39 | 'Intended Audience :: Science/Research', 40 | 'Programming Language :: Python', 41 | 'Programming Language :: Python :: 3', 42 | 'Programming Language :: Python :: 3.6', 43 | 'Programming Language :: Python :: 3.7', 44 | 'Programming Language :: Python :: 3.8', 45 | 'Topic :: Scientific/Engineering', 46 | ] 47 | 48 | setup( 49 | name='asaptools', 50 | description='A collection of useful Python modules from the ' 51 | 'Application Scalability And Performance (ASAP) group ' 52 | 'at the National Center for Atmospheric Research', 53 | long_description=long_description, 54 | python_requires='>=3.6', 55 | maintainer='Kevin Paul', 56 | maintainer_email='kpaul@ucar.edu', 57 | classifiers=CLASSIFIERS, 58 | url='https://asappytools.readthedocs.io', 59 | project_urls={ 60 | 'Documentation': 'https://asappytools.readthedocs.io', 61 | 'Source': 'https://github.com/NCAR/ASAPPyTools', 62 | 'Tracker': 'https://github.com/NCAR/ASAPPyTools/issues', 63 | }, 64 | packages=find_packages(), 65 | package_dir={'asaptools': 'asaptools'}, 66 | include_package_data=True, 67 | install_requires=install_requires, 68 | license='Apache 2.0', 69 | zip_safe=False, 70 | keywords='mpi', 71 | use_scm_version={'version_scheme': 'post-release', 'local_scheme': 'dirty-tag'}, 72 | setup_requires=['setuptools_scm', 'setuptools>=30.3.0'], 73 | ) 74 | -------------------------------------------------------------------------------- /asaptools/tests/partitionTests.py: -------------------------------------------------------------------------------- 1 | """ 2 | These are the unit tests for the partition module functions 3 | 4 | Copyright 2017, University Corporation for Atmospheric Research 5 | See the LICENSE.txt file for details 6 | """ 7 | 8 | from __future__ import print_function 9 | 10 | import unittest 11 | 12 | from asaptools import partition 13 | 14 | 15 | class partitionTests(unittest.TestCase): 16 | 17 | """ 18 | Unit tests for the partition module 19 | """ 20 | 21 | def setUp(self): 22 | data = [list(range(3)), list(range(5)), list(range(7))] 23 | indices_sizes = [(0, 1), (1, 3), (5, 9)] 24 | self.inputs = [] 25 | for d in data: 26 | for (i, s) in indices_sizes: 27 | self.inputs.append((d, i, s)) 28 | 29 | def testOutOfBounds(self): 30 | self.assertRaises(IndexError, partition.EqualLength(), [1, 2, 3], 3, 3) 31 | self.assertRaises(IndexError, partition.EqualLength(), [1, 2, 3], 7, 3) 32 | 33 | def testDuplicate(self): 34 | for inp in self.inputs: 35 | pfunc = partition.Duplicate() 36 | actual = pfunc(*inp) 37 | expected = inp[0] 38 | self.assertEqual(actual, expected) 39 | 40 | def testEquallength(self): 41 | results = [ 42 | list(range(3)), 43 | [1], 44 | [], 45 | list(range(5)), 46 | [2, 3], 47 | [], 48 | list(range(7)), 49 | [3, 4], 50 | [5], 51 | ] 52 | for (ii, inp) in enumerate(self.inputs): 53 | pfunc = partition.EqualLength() 54 | actual = pfunc(*inp) 55 | expected = results[ii] 56 | self.assertEqual(actual, expected) 57 | 58 | def testEqualStride(self): 59 | for inp in self.inputs: 60 | pfunc = partition.EqualStride() 61 | actual = pfunc(*inp) 62 | expected = list(inp[0][inp[1] :: inp[2]]) 63 | self.assertEqual(actual, expected) 64 | 65 | def testSortedStride(self): 66 | for inp in self.inputs: 67 | weights = [(20 - i) for i in inp[0]] 68 | pfunc = partition.SortedStride() 69 | actual = pfunc(list(zip(inp[0], weights)), inp[1], inp[2]) 70 | expected = list(inp[0][:]) 71 | expected.reverse() 72 | expected = expected[inp[1] :: inp[2]] 73 | self.assertEqual(actual, expected) 74 | 75 | def testWeightBalanced(self): 76 | results = [ 77 | set([0, 1, 2]), 78 | set([1]), 79 | set(), 80 | set([3, 2, 4, 1, 0]), 81 | set([1]), 82 | set(), 83 | set([3, 2, 4, 1, 5, 0, 6]), 84 | set([3, 6]), 85 | set([4]), 86 | ] 87 | for (ii, inp) in enumerate(self.inputs): 88 | weights = [(3 - i) ** 2 for i in inp[0]] 89 | pfunc = partition.WeightBalanced() 90 | actual = set(pfunc(list(zip(inp[0], weights)), inp[1], inp[2])) 91 | expected = results[ii] 92 | self.assertEqual(actual, expected) 93 | 94 | 95 | if __name__ == '__main__': 96 | unittest.main() 97 | -------------------------------------------------------------------------------- /asaptools/tests/partitionArrayTests.py: -------------------------------------------------------------------------------- 1 | """ 2 | These are the unit tests for the partition module functions 3 | 4 | Copyright 2017, University Corporation for Atmospheric Research 5 | See the LICENSE.txt file for details 6 | """ 7 | 8 | from __future__ import absolute_import, division, print_function 9 | 10 | import unittest 11 | from os import linesep 12 | 13 | from numpy import arange, array, dstack, testing 14 | 15 | from asaptools import partition 16 | 17 | 18 | def test_info_msg(name, data, index, size, actual, expected): 19 | spcr = ' ' * len(name) 20 | msg = ''.join( 21 | [ 22 | linesep, 23 | name, 24 | ' - Data: ', 25 | str(data), 26 | linesep, 27 | spcr, 28 | ' - Index/Size: ', 29 | str(index), 30 | '/', 31 | str(size), 32 | linesep, 33 | spcr, 34 | ' - Actual: ', 35 | str(actual), 36 | linesep, 37 | spcr, 38 | ' - Expected: ', 39 | str(expected), 40 | ] 41 | ) 42 | return msg 43 | 44 | 45 | class partitionArrayTests(unittest.TestCase): 46 | 47 | """ 48 | Unit tests for the partition module 49 | """ 50 | 51 | def setUp(self): 52 | data = [arange(3), arange(5), arange(7)] 53 | indices_sizes = [(0, 1), (1, 3), (5, 9)] 54 | self.inputs = [] 55 | for d in data: 56 | for (i, s) in indices_sizes: 57 | self.inputs.append((d, i, s)) 58 | 59 | def testOutOfBounds(self): 60 | self.assertRaises(IndexError, partition.EqualLength(), [1, 2, 3], 3, 3) 61 | self.assertRaises(IndexError, partition.EqualStride(), [1, 2, 3], 7, 3) 62 | 63 | def testDuplicate(self): 64 | for inp in self.inputs: 65 | pfunc = partition.Duplicate() 66 | actual = pfunc(*inp) 67 | expected = inp[0] 68 | testing.assert_array_equal(actual, expected) 69 | 70 | def testEquallength(self): 71 | results = [ 72 | arange(3), 73 | array([1]), 74 | array([]), 75 | arange(5), 76 | array([2, 3]), 77 | array([]), 78 | arange(7), 79 | array([3, 4]), 80 | array([5]), 81 | ] 82 | for (ii, inp) in enumerate(self.inputs): 83 | pfunc = partition.EqualLength() 84 | actual = pfunc(*inp) 85 | expected = results[ii] 86 | testing.assert_array_equal(actual, expected) 87 | 88 | def testEqualStride(self): 89 | for inp in self.inputs: 90 | pfunc = partition.EqualStride() 91 | actual = pfunc(*inp) 92 | expected = inp[0][inp[1] :: inp[2]] 93 | testing.assert_array_equal(actual, expected) 94 | 95 | def testSortedStride(self): 96 | for inp in self.inputs: 97 | weights = array([(20 - i) for i in inp[0]]) 98 | pfunc = partition.SortedStride() 99 | data = dstack((inp[0], weights))[0] 100 | actual = pfunc(data, inp[1], inp[2]) 101 | expected = inp[0][::-1] 102 | expected = expected[inp[1] :: inp[2]] 103 | testing.assert_array_equal(actual, expected) 104 | 105 | def testWeightBalanced(self): 106 | results = [ 107 | set([0, 1, 2]), 108 | set([1]), 109 | set(), 110 | set([3, 2, 4, 1, 0]), 111 | set([1]), 112 | set(), 113 | set([3, 2, 4, 1, 5, 0, 6]), 114 | set([3, 6]), 115 | set([4]), 116 | ] 117 | for (ii, inp) in enumerate(self.inputs): 118 | weights = array([(3 - i) ** 2 for i in inp[0]]) 119 | pfunc = partition.WeightBalanced() 120 | data = dstack((inp[0], weights))[0] 121 | actual = set(pfunc(data, inp[1], inp[2])) 122 | expected = results[ii] 123 | self.assertEqual(actual, expected) 124 | 125 | 126 | if __name__ == '__main__': 127 | unittest.main() 128 | -------------------------------------------------------------------------------- /asaptools/tests/timekeeperTests.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests (serial only) for the TimeKeeper class 3 | 4 | Copyright 2017, University Corporation for Atmospheric Research 5 | See the LICENSE.txt file for details 6 | """ 7 | 8 | from __future__ import print_function 9 | 10 | import unittest 11 | from time import sleep 12 | 13 | from asaptools import timekeeper 14 | 15 | 16 | class TimeKeeperTests(unittest.TestCase): 17 | 18 | """ 19 | Tests for the TimeKeeper class 20 | """ 21 | 22 | def test_init(self): 23 | tk = timekeeper.TimeKeeper() 24 | self.assertEqual( 25 | type(tk), 26 | timekeeper.TimeKeeper, 27 | 'TimeKeeper class instantiated incorrectly.', 28 | ) 29 | 30 | def test_start_stop_names(self): 31 | tk = timekeeper.TimeKeeper() 32 | name = 'Test Clock' 33 | wait_time = 0.05 34 | tk.start(name) 35 | sleep(wait_time) 36 | tk.stop(name) 37 | self.assertTrue(name in tk._accumulated_times) 38 | self.assertTrue(name in tk._added_order) 39 | self.assertTrue(name in tk._start_times) 40 | 41 | def test_start_stop_values(self): 42 | tk = timekeeper.TimeKeeper() 43 | name = 'Test Clock' 44 | wait_time = 0.05 45 | tk.start(name) 46 | sleep(wait_time) 47 | tk.stop(name) 48 | dt = tk.get_time(name) 49 | dterr = abs(dt / wait_time - 1.0) 50 | self.assertTrue(dterr < 0.15) 51 | 52 | def test_start_stop_order_names(self): 53 | tk = timekeeper.TimeKeeper() 54 | name1 = 'Test Clock 1' 55 | name2 = 'Test Clock 2' 56 | wait_time = 0.01 57 | tk.start(name1) 58 | sleep(wait_time) 59 | tk.stop(name1) 60 | tk.start(name2) 61 | sleep(wait_time) 62 | tk.stop(name2) 63 | self.assertEqual(name1, tk._added_order[0]) 64 | self.assertEqual(name2, tk._added_order[1]) 65 | 66 | def test_start_stop_values2(self): 67 | tk = timekeeper.TimeKeeper() 68 | name1 = 'Test Clock 1' 69 | name2 = 'Test Clock 2' 70 | wait_time = 0.05 71 | tk.start(name1) 72 | sleep(2 * wait_time) 73 | tk.start(name2) 74 | sleep(wait_time) 75 | tk.stop(name1) 76 | sleep(wait_time) 77 | tk.stop(name2) 78 | dt1 = tk.get_time(name1) 79 | dt1err = abs(dt1 / (3 * wait_time) - 1.0) 80 | self.assertTrue(dt1err < 0.15) 81 | dt2 = tk.get_time(name2) 82 | dt2err = abs(dt2 / (2 * wait_time) - 1.0) 83 | self.assertTrue(dt2err < 0.15) 84 | 85 | def test_reset_values(self): 86 | tk = timekeeper.TimeKeeper() 87 | name = 'Test Clock' 88 | wait_time = 0.05 89 | tk.start(name) 90 | sleep(wait_time) 91 | tk.stop(name) 92 | tk.reset(name) 93 | self.assertEqual(0, tk.get_time(name)) 94 | 95 | def test_get_time(self): 96 | tk = timekeeper.TimeKeeper() 97 | name = 'Test Clock' 98 | wait_time = 0.05 99 | tk.start(name) 100 | sleep(wait_time) 101 | tk.stop(name) 102 | dt = tk.get_time(name) 103 | dterr = abs(dt / wait_time - 1.0) 104 | self.assertTrue(dterr < 0.15) 105 | 106 | def test_get_all_times(self): 107 | tk = timekeeper.TimeKeeper() 108 | name1 = 'Test Clock 1' 109 | name2 = 'Test Clock 2' 110 | wait_time = 0.05 111 | tk.start(name1) 112 | sleep(2 * wait_time) 113 | tk.start(name2) 114 | sleep(wait_time) 115 | tk.stop(name1) 116 | sleep(wait_time) 117 | tk.stop(name2) 118 | all_times = tk.get_all_times() 119 | expected_all_times = {name1: 3 * wait_time, name2: 2 * wait_time} 120 | self.assertTrue(len(expected_all_times.keys()) == len(all_times.keys())) 121 | self.assertTrue( 122 | all([i1 == i2 for i1, i2 in zip(expected_all_times.keys(), all_times.keys())]) 123 | ) 124 | self.assertAlmostEqual( 125 | list(expected_all_times.values())[0], list(all_times.values())[0], places=1 126 | ) 127 | self.assertAlmostEqual( 128 | list(expected_all_times.values())[1], list(all_times.values())[1], places=1 129 | ) 130 | 131 | 132 | if __name__ == '__main__': 133 | unittest.main() 134 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | The ASAP Python Toolbox 3 | ======================= 4 | 5 | |Circle| |Codecov| |Docs| |PyPI| 6 | 7 | The ASAP Python Toolbox is a collection of stand-alone tools for doing simple 8 | tasks, from managing print messages with a set verbosity level, to 9 | keeping timing information, to managing simple MPI communication. 10 | 11 | :COPYRIGHT: 2016-2019, University Corporation for Atmospheric Research 12 | :LICENSE: See the LICENSE.txt file for details 13 | 14 | Send questions and comments to Kevin Paul (kpaul@ucar.edu). 15 | 16 | Overview 17 | -------- 18 | 19 | The ASAP (Application Scalability And Performance) group at the National 20 | Center for Atmospheric Research maintains this collection of simple 21 | Python tools for managing tasks commonly used with its Python software. 22 | The modules contained in this package include: 23 | 24 | :vprinter: For managing print messages with verbosity-level specification 25 | :timekeeper: For managing multiple "stop watches" for timing metrics 26 | :partition: For various data partitioning algorithms 27 | :simplecomm: For simple MPI communication 28 | 29 | Only the simplecomm module depends on anything beyond the basic built-in 30 | Python packages. 31 | 32 | Dependencies 33 | ------------ 34 | 35 | All of the ASAP Python Toolbox tools are written to work with Python 2.6+ (including 36 | Python 3+). The vprinter, timekeeper, and partition modules are pure 37 | Python. The simplecomm module depends on mpi4py (>=1.3). 38 | 39 | This implies the dependency: 40 | 41 | - mpi4py depends on numpy (>-1.4) and MPI 42 | 43 | Easy Installation 44 | ----------------- 45 | 46 | The easiest way to install the ASAP Python Toolbox is from the Python 47 | Package Index (PyPI) with the pip package manager:: 48 | 49 | $ pip install [--user] asaptools 50 | 51 | The optional '--user' argument can be used to install the package in the 52 | local user's directory, which is useful if the user doesn't have root 53 | privileges. 54 | 55 | Obtaining the Source Code 56 | ------------------------- 57 | 58 | Currently, the most up-to-date source code is available via git from the 59 | site:: 60 | 61 | https://github.com/NCAR/ASAPPyTools 62 | 63 | Check out the most recent tag. The source is available in read-only 64 | mode to everyone, but special permissions can be given to those to make 65 | changes to the source. 66 | 67 | Building & Installation 68 | ----------------------- 69 | 70 | Installation of the ASAP Python Toolbox is very simple. After checking out the 71 | source from the above svn link, via:: 72 | 73 | $ git clone https://github.com/NCAR/ASAPPyTools 74 | 75 | change into the top-level source directory, check out the most recent tag, 76 | and run the Python distutils setup. On unix, this involves:: 77 | 78 | $ cd ASAPPyTools 79 | $ python setup.py install [--prefix-/path/to/install/location] 80 | 81 | The prefix is optional, as the default prefix is typically /usr/local on 82 | linux machines. However, you must have permissions to write to the 83 | prefix location, so you may want to choose a prefix location where you 84 | have write permissions. Like most distutils installations, you can 85 | alternatively install the pyTools with the --user option, which will 86 | automatically select (and create if it does not exist) the $HOME/.local 87 | directory in which to install. To do this, type (on unix machines):: 88 | 89 | $ python setup.py install --user 90 | 91 | This can be handy since the site-packages directory will be common for 92 | all user installs, and therefore only needs to be added to the 93 | PYTHONPATH once. 94 | 95 | Instructions & Use 96 | ------------------ 97 | 98 | For instructions on how to use the ASAP Python Toolbox, see the 99 | documentation_. 100 | 101 | 102 | .. _documentation: https://asappytools.readthedocs.io/en/latest/ 103 | 104 | .. |Circle| image:: https://img.shields.io/circleci/project/github/NCAR/ASAPPyTools.svg?logo=circleci 105 | :target: https://circleci.com/gh/NCAR/ASAPPyTools 106 | 107 | .. |Codecov| image:: https://img.shields.io/codecov/c/github/NCAR/ASAPPyTools.svg 108 | :target: https://codecov.io/gh/NCAR/ASAPPyTools 109 | 110 | .. |Docs| image:: https://readthedocs.org/projects/asappytools/badge/?version=latest 111 | :target: https://asappytools.readthedocs.io/en/latest/?badge=latest 112 | :alt: Documentation Status 113 | 114 | .. |PyPI| image:: https://img.shields.io/pypi/v/asaptools.svg 115 | :target: https://pypi.org/project/asaptools/ 116 | :alt: Python Package Index 117 | -------------------------------------------------------------------------------- /asaptools/vprinter.py: -------------------------------------------------------------------------------- 1 | """ 2 | A module containing the VPrinter class. 3 | 4 | This module contains the VPrinter class that enables clean printing to 5 | standard out (or a string) with verbosity-level print management. 6 | 7 | 8 | Copyright 2020 University Corporation for Atmospheric Research 9 | 10 | Licensed under the Apache License, Version 2.0 (the "License"); 11 | you may not use this file except in compliance with the License. 12 | You may obtain a copy of the License at 13 | 14 | http://www.apache.org/licenses/LICENSE-2.0 15 | 16 | Unless required by applicable law or agreed to in writing, software 17 | distributed under the License is distributed on an "AS IS" BASIS, 18 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | See the License for the specific language governing permissions and 20 | limitations under the License. 21 | """ 22 | 23 | from __future__ import print_function 24 | 25 | 26 | class VPrinter(object): 27 | 28 | """ 29 | A Verbosity-enabled Printing Class. 30 | 31 | The VPrinter is designed to print messages to standard out, or optionally 32 | a string, as determined by a pre-set verbosity-level and/or on which 33 | parallel rank the VPrinter is instantiated. 34 | 35 | Attributes: 36 | header (str): A string to prepend to any print messages before 37 | they are printed 38 | verbosity (int): The verbosity level to use when determining if a 39 | message should be printed 40 | """ 41 | 42 | def __init__(self, header='', verbosity=1): 43 | """ 44 | Constructor - Creates an instance of a VPrinter object. 45 | 46 | Keyword Arguments: 47 | header (str): A string to prepend to any print messages before 48 | they are printed 49 | verbosity (int): The verbosity level to use when determining if a 50 | message should be printed 51 | """ 52 | # The message header to prepend to messages if desired 53 | self.header = header 54 | 55 | # The verbosity level for determining if a message is printed 56 | self.verbosity = verbosity 57 | 58 | def to_str(self, *args, **kwargs): 59 | """ 60 | Concatenates string representations of the input arguments. 61 | 62 | This takes a list of arguments of any length, converts each argument 63 | to a string representation, and concatenates them into a single string. 64 | 65 | Parameters: 66 | args (list): A list of arguments supplied to the function. All 67 | of these arguments will be concatenated together. 68 | 69 | Keyword Arguments: 70 | kwargs (dict): The dictionary of keyword arguments 71 | passed to the function. 72 | 73 | Returns: 74 | str: A single string with the arguments given converted to strings 75 | and concatenated together (in order). If the keyword 76 | 'header==True' is supplied, then the 'header' string is 77 | prepended to the string before being output. 78 | 79 | Raises: 80 | TypeError: If the 'header' keyword argument is supplied and is 81 | not a bool 82 | """ 83 | out_args = [] 84 | if 'header' in kwargs: 85 | if type(kwargs['header']) is bool: 86 | if kwargs['header']: 87 | out_args.append(self.header) 88 | else: 89 | raise TypeError('Header keyword argument not bool') 90 | out_args.extend(args) 91 | 92 | return ''.join([str(arg) for arg in out_args]) 93 | 94 | def __call__(self, *args, **kwargs): 95 | """ 96 | Print the supplied arguments to standard out. 97 | 98 | Prints all supplied positional arguments to standard output, if the 99 | message verbosity is less than the VPrinter's verbosity level. Can 100 | also print a useful header based on the parallel rank and size. 101 | 102 | Parameters: 103 | args (list): A list of arguments supplied to the function. All 104 | of these arguments will be concatenated together. 105 | 106 | Keyword Arguments: 107 | kwargs (dict): The dictionary of keyword arguments 108 | passed to the function. 109 | 110 | Returns: 111 | None: However, if the 'verbosity' keyword argument is supplied, 112 | and the 'verbosity' value is less than the VPrinter object's 113 | 'verbosity' attribute, then it prints to stdout. Like 114 | the 'to_str' method, if the 'header' keyword is supplied and 115 | equal to 'True', then it prepends the output with the header. 116 | """ 117 | verbosity = 0 118 | if 'verbosity' in kwargs and type(kwargs['verbosity']) is int: 119 | verbosity = kwargs['verbosity'] 120 | 121 | if verbosity < self.verbosity: 122 | print(self.to_str(*args, **kwargs)) 123 | -------------------------------------------------------------------------------- /asaptools/timekeeper.py: -------------------------------------------------------------------------------- 1 | """ 2 | A module containing the TimeKeeper class. 3 | 4 | This module contains is a simple class to act as a time keeper for internal 5 | performance monitoring (namely, timing given processes). 6 | 7 | 8 | Copyright 2020 University Corporation for Atmospheric Research 9 | 10 | Licensed under the Apache License, Version 2.0 (the "License"); 11 | you may not use this file except in compliance with the License. 12 | You may obtain a copy of the License at 13 | 14 | http://www.apache.org/licenses/LICENSE-2.0 15 | 16 | Unless required by applicable law or agreed to in writing, software 17 | distributed under the License is distributed on an "AS IS" BASIS, 18 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | See the License for the specific language governing permissions and 20 | limitations under the License. 21 | """ 22 | 23 | from time import time 24 | 25 | 26 | class TimeKeeper(object): 27 | 28 | """ 29 | Class to keep timing recordings, start/stop/reset timers. 30 | 31 | Attributes: 32 | _time: The method to use for getting the time (e.g., time.time) 33 | _start_times (dict): A dictionary of start times for each named timer 34 | _accumulated_times (dict): A dictionary of the total accumulated times 35 | for each named timer 36 | _added_order (list): A list containing the name of each timer, in the 37 | order it was added to the TimeKeeper 38 | """ 39 | 40 | def __init__(self, time=time): 41 | """ 42 | Constructor. 43 | 44 | Keyword Arguments: 45 | time: The function to use for measuring the time. By default, 46 | it is the Python 'time.time()' method. 47 | """ 48 | 49 | # The method to use for time measurements 50 | self._time = time 51 | 52 | # Dictionary of start times associated with a string name 53 | self._start_times = {} 54 | 55 | # Dictionary of accumulated times associated with a string name 56 | self._accumulated_times = {} 57 | 58 | # List containing the order of the timers 59 | # (when added to the dictionaries) 60 | self._added_order = [] 61 | 62 | def reset(self, name): 63 | """ 64 | Method to reset a timer associated with a given name. 65 | 66 | If the name has never been used before, the timer is created and the 67 | accumulated time is set to 0. If the timer has been used before, the 68 | accumulated time is set to 0. 69 | 70 | Parameters: 71 | name: The name or ID of the timer to reset 72 | """ 73 | 74 | # Reset the named timer (creates it if it doesn't exist yet) 75 | if name not in self._added_order: 76 | self._added_order.append(name) 77 | self._accumulated_times[name] = 0.0 78 | self._start_times[name] = self._time() 79 | 80 | def start(self, name): 81 | """ 82 | Method to start a timer associated with a given name. 83 | 84 | If the name has never been used before, the timer is created and 85 | the accumulated time is set to 0. 86 | 87 | Parameters: 88 | name: The name or ID of the timer to start 89 | """ 90 | 91 | # Start the named timer (creates it if it doesn't exist yet) 92 | if name not in self._accumulated_times: 93 | self.reset(name) 94 | else: 95 | self._start_times[name] = self._time() 96 | 97 | def stop(self, name): 98 | """ 99 | Stop the timing and add the accumulated time to the timer. 100 | 101 | Method to stop a timer associated with a given name, and adds 102 | the accumulated time to the timer when stopped. If the given timer 103 | name has never been used before (either by calling reset() or start()), 104 | the timer is created and the accumulated time is set to 0. 105 | 106 | Parameters: 107 | name: The name or ID of the timer to stop 108 | """ 109 | 110 | # Stop the named timer, add to accumulated time 111 | if name not in self._accumulated_times: 112 | self.reset(name) 113 | else: 114 | self._accumulated_times[name] += self._time() - self._start_times[name] 115 | 116 | def get_names(self): 117 | """ 118 | Method to return the clock names in the order in which they were added. 119 | 120 | Returns: 121 | list: The list of timer names in the order they were added 122 | """ 123 | return self._added_order 124 | 125 | def get_time(self, name): 126 | """ 127 | Returns the accumulated time of the given timer. 128 | 129 | If the given timer name has never been created, it is created and the 130 | accumulated time is set to zero before returning. 131 | 132 | Parameters: 133 | name: The name or ID of the timer to stop 134 | 135 | Returns: 136 | float: The accumulated time of the named timer (or 0.0 if the 137 | named timer has never been created before). 138 | """ 139 | 140 | # Get the accumulated time 141 | if name not in self._accumulated_times: 142 | self.reset(name) 143 | return self._accumulated_times[name] 144 | 145 | def get_all_times(self): 146 | """ 147 | Returns the dictionary of accumulated times on the local processor. 148 | 149 | Returns: 150 | dict: The dictionary of accumulated times 151 | """ 152 | return self._accumulated_times 153 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | =================== 2 | Contribution Guide 3 | =================== 4 | 5 | Interested in helping build the ASAPPyTools? Have code from your work that 6 | you believe others will find useful? Have a few minutes to tackle an issue? 7 | 8 | Contributions are highly welcomed and appreciated. Every little help counts, 9 | so do not hesitate! 10 | 11 | The following sections cover some general guidelines 12 | regarding development in ASAPPyTools for maintainers and contributors. 13 | Nothing here is set in stone and can't be changed. 14 | Feel free to suggest improvements or changes in the workflow. 15 | 16 | 17 | 18 | .. contents:: Contribution links 19 | :depth: 2 20 | 21 | 22 | 23 | .. _submitfeedback: 24 | 25 | Feature requests and feedback 26 | ----------------------------- 27 | 28 | We'd also like to hear about your propositions and suggestions. Feel free to 29 | `submit them as issues `_ and: 30 | 31 | * Explain in detail how they should work. 32 | * Keep the scope as narrow as possible. This will make it easier to implement. 33 | 34 | 35 | .. _reportbugs: 36 | 37 | 38 | Report bugs 39 | ----------- 40 | 41 | Report bugs for ASAPPyTools in the `issue tracker `_. 42 | 43 | If you are reporting a bug, please include: 44 | 45 | * Your operating system name and version. 46 | * Any details about your local setup that might be helpful in troubleshooting, 47 | specifically the Python interpreter version, installed libraries, and ASAPPyTools 48 | version. 49 | * Detailed steps to reproduce the bug. 50 | 51 | If you can write a demonstration test that currently fails but should pass 52 | (xfail), that is a very useful commit to make as well, even if you cannot 53 | fix the bug itself. 54 | 55 | 56 | .. _fixbugs: 57 | 58 | Fix bugs 59 | -------- 60 | 61 | Look through the `GitHub issues for bugs `_. 62 | 63 | Talk to developers to find out how you can fix specific bugs. 64 | 65 | 66 | Write documentation 67 | ------------------- 68 | 69 | ASAPPyTools could always use more documentation. What exactly is needed? 70 | 71 | * More complementary documentation. Have you perhaps found something unclear? 72 | * Docstrings. There can never be too many of them. 73 | * Blog posts, articles and such -- they're all very appreciated. 74 | 75 | You can also edit documentation files directly in the GitHub web interface, 76 | without using a local copy. This can be convenient for small fixes. 77 | 78 | .. note:: 79 | Build the documentation locally with the following command: 80 | 81 | .. code:: bash 82 | 83 | $ conda env update -f ci/environment.yml 84 | $ cd docs 85 | $ make html 86 | 87 | The built documentation should be available in the ``docs/_build/``. 88 | 89 | 90 | 91 | .. _`pull requests`: 92 | .. _pull-requests: 93 | 94 | Preparing Pull Requests 95 | ----------------------- 96 | 97 | 98 | #. Fork the 99 | `ASAPPyTools GitHub repository `__. It's 100 | fine to use ``ASAPPyTools`` as your fork repository name because it will live 101 | under your user. 102 | 103 | #. Clone your fork locally using `git `_, connect your repository 104 | to the upstream (main project), and create a branch:: 105 | 106 | $ git clone git@github.com:YOUR_GITHUB_USERNAME/ASAPPyTools.git 107 | $ cd ASAPPyTools 108 | $ git remote add upstream git@github.com:NCAR/ASAPPyTools.git 109 | 110 | # now, to fix a bug or add feature create your own branch off "master": 111 | 112 | $ git checkout -b your-bugfix-feature-branch-name master 113 | 114 | If you need some help with Git, follow this quick start 115 | guide: https://git.wiki.kernel.org/index.php/QuickStart 116 | 117 | #. Install dependencies into a new conda environment:: 118 | 119 | $ conda env update -f ci/environment.yml 120 | $ conda activate asaptools 121 | 122 | #. Make an editable install of ASAPPyTools by running:: 123 | 124 | $ python -m pip install -e . 125 | 126 | 127 | 128 | #. Install `pre-commit `_ hooks on the ASAPPyTools repo:: 129 | 130 | $ pre-commit install 131 | 132 | Afterwards ``pre-commit`` will run whenever you commit. 133 | 134 | `pre-commit `_ is a framework for managing and maintaining multi-language pre-commit hooks 135 | to ensure code-style and code formatting is consistent. 136 | 137 | Now you have an environment called ``asaptools`` that you can work in. 138 | You’ll need to make sure to activate that environment next time you want 139 | to use it after closing the terminal or your system. 140 | 141 | 142 | #. Run all the tests 143 | 144 | Now running tests is as simple as issuing this command:: 145 | 146 | $ pytest --junitxml=test-reports/junit.xml --cov=./ 147 | 148 | 149 | This command will run tests via the "pytest" tool against Python 3.8. 150 | 151 | 152 | 153 | #. Create a new changelog entry in ``CHANGELOG.rst``: 154 | 155 | - The entry should be entered as: 156 | 157 | (``:pr:`#```) ````_`` 158 | 159 | where ```` is the description of the PR related to the change and ```` is 160 | the pull request number and ```` are your first and last names. 161 | 162 | - Add yourself to list of authors at the end of ``CHANGELOG.rst`` file if not there yet, in alphabetical order. 163 | 164 | 165 | #. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming. 166 | 167 | When committing, ``pre-commit`` will re-format the files if necessary. 168 | 169 | #. Commit and push once your tests pass and you are happy with your change(s):: 170 | 171 | $ git commit -a -m "" 172 | $ git push -u 173 | 174 | #. Finally, submit a pull request through the GitHub website using this data:: 175 | 176 | head-fork: YOUR_GITHUB_USERNAME/ASAPPyTools 177 | compare: your-branch-name 178 | 179 | base-fork: NCAR/ASAPPyTools 180 | base: master # if it's a bugfix or feature 181 | -------------------------------------------------------------------------------- /asaptools/tests/simpleCommP1STests.py: -------------------------------------------------------------------------------- 1 | """ 2 | Parallel-1-Serial Tests for the SimpleComm class 3 | 4 | The 'P1S' Test Suite specificially tests whether the serial behavior is the 5 | same as the 1-rank parallel behavior. If the 'Par' test suite passes with 6 | various communicator sizes (1, 2, ...), then this suite should be run to make 7 | sure that serial communication behaves consistently. 8 | 9 | Copyright 2017, University Corporation for Atmospheric Research 10 | See the LICENSE.txt file for details 11 | """ 12 | 13 | from __future__ import print_function 14 | 15 | import unittest 16 | 17 | import numpy as np 18 | from mpi4py import MPI 19 | 20 | from asaptools import simplecomm 21 | from asaptools.partition import Duplicate, EqualStride 22 | 23 | MPI_COMM_WORLD = MPI.COMM_WORLD 24 | 25 | 26 | class SimpleCommP1STests(unittest.TestCase): 27 | def setUp(self): 28 | self.scomm = simplecomm.create_comm(serial=True) 29 | self.pcomm = simplecomm.create_comm(serial=False) 30 | self.size = MPI_COMM_WORLD.Get_size() 31 | self.rank = MPI_COMM_WORLD.Get_rank() 32 | 33 | def testIsSerialLike(self): 34 | self.assertEqual(self.rank, 0, 'Rank not consistent with serial-like operation') 35 | self.assertEqual(self.size, 1, 'Size not consistent with serial-like operation') 36 | 37 | def testGetSize(self): 38 | sresult = self.scomm.get_size() 39 | presult = self.pcomm.get_size() 40 | self.assertEqual(sresult, presult) 41 | 42 | def testIsManager(self): 43 | sresult = self.scomm.is_manager() 44 | presult = self.pcomm.is_manager() 45 | self.assertEqual(sresult, presult) 46 | 47 | def testSumInt(self): 48 | data = 5 49 | sresult = self.scomm.allreduce(data, 'sum') 50 | presult = self.pcomm.allreduce(data, 'sum') 51 | self.assertEqual(sresult, presult) 52 | 53 | def testSumList(self): 54 | data = range(5) 55 | sresult = self.scomm.allreduce(data, 'sum') 56 | presult = self.pcomm.allreduce(data, 'sum') 57 | self.assertEqual(sresult, presult) 58 | 59 | def testSumDict(self): 60 | data = {'rank': self.rank, 'range': range(3 + self.rank)} 61 | sresult = self.scomm.allreduce(data, 'sum') 62 | presult = self.pcomm.allreduce(data, 'sum') 63 | self.assertEqual(sresult, presult) 64 | 65 | def testSumArray(self): 66 | data = np.arange(5) 67 | sresult = self.scomm.allreduce(data, 'sum') 68 | presult = self.pcomm.allreduce(data, 'sum') 69 | self.assertEqual(sresult, presult) 70 | 71 | def testMaxInt(self): 72 | data = 13 + self.rank 73 | sresult = self.scomm.allreduce(data, 'max') 74 | presult = self.pcomm.allreduce(data, 'max') 75 | self.assertEqual(sresult, presult) 76 | 77 | def testMaxList(self): 78 | data = range(5 + self.rank) 79 | sresult = self.scomm.allreduce(data, 'max') 80 | presult = self.pcomm.allreduce(data, 'max') 81 | self.assertEqual(sresult, presult) 82 | 83 | def testMaxDict(self): 84 | data = {'rank': self.rank, 'range': range(3 + self.rank)} 85 | sresult = self.scomm.allreduce(data, 'max') 86 | presult = self.pcomm.allreduce(data, 'max') 87 | self.assertEqual(sresult, presult) 88 | 89 | def testMaxArray(self): 90 | data = np.arange(5 + self.rank) 91 | sresult = self.scomm.allreduce(data, 'max') 92 | presult = self.pcomm.allreduce(data, 'max') 93 | self.assertEqual(sresult, presult) 94 | 95 | def testPartitionInt(self): 96 | data = 13 + self.rank 97 | sresult = self.scomm.partition(data, func=Duplicate()) 98 | presult = self.pcomm.partition(data, func=Duplicate()) 99 | self.assertEqual(sresult, presult) 100 | 101 | def testPartitionIntInvolved(self): 102 | data = 13 + self.rank 103 | sresult = self.scomm.partition(data, func=Duplicate(), involved=True) 104 | presult = self.pcomm.partition(data, func=Duplicate(), involved=True) 105 | self.assertEqual(sresult, presult) 106 | 107 | def testPartitionList(self): 108 | data = range(5 + self.rank) 109 | sresult = self.scomm.partition(data, func=EqualStride()) 110 | presult = self.pcomm.partition(data, func=EqualStride()) 111 | self.assertEqual(sresult, presult) 112 | 113 | def testPartitionListInvolved(self): 114 | data = range(5 + self.rank) 115 | sresult = self.scomm.partition(data, func=EqualStride(), involved=True) 116 | presult = self.pcomm.partition(data, func=EqualStride(), involved=True) 117 | self.assertEqual(sresult, presult) 118 | 119 | def testPartitionArray(self): 120 | data = np.arange(2 + self.rank) 121 | sresult = self.scomm.partition(data) 122 | presult = self.pcomm.partition(data) 123 | self.assertEqual(sresult, presult) 124 | 125 | def testPartitionArrayInvolved(self): 126 | data = np.arange(2 + self.rank) 127 | sresult = self.scomm.partition(data, involved=True) 128 | presult = self.pcomm.partition(data, involved=True) 129 | np.testing.assert_array_equal(sresult, presult) 130 | 131 | def testPartitionStrArray(self): 132 | data = np.array([c for c in 'abcdefghijklmnopqrstuvwxyz']) 133 | sresult = self.scomm.partition(data) 134 | presult = self.pcomm.partition(data) 135 | self.assertEqual(sresult, presult) 136 | 137 | def testPartitionStrArrayInvolved(self): 138 | data = np.array([c for c in 'abcdefghijklmnopqrstuvwxyz']) 139 | sresult = self.scomm.partition(data, involved=True) 140 | presult = self.pcomm.partition(data, involved=True) 141 | np.testing.assert_array_equal(sresult, presult) 142 | 143 | def testRationError(self): 144 | data = 10 145 | self.assertRaises(RuntimeError, self.scomm.ration, data) 146 | self.assertRaises(RuntimeError, self.pcomm.ration, data) 147 | 148 | def testCollectError(self): 149 | data = 10 150 | self.assertRaises(RuntimeError, self.scomm.collect, data) 151 | self.assertRaises(RuntimeError, self.pcomm.collect, data) 152 | 153 | 154 | if __name__ == '__main__': 155 | try: 156 | from cStringIO import StringIO 157 | except ImportError: 158 | from io import StringIO 159 | 160 | mystream = StringIO() 161 | tests = unittest.TestLoader().loadTestsFromTestCase(SimpleCommP1STests) 162 | unittest.TextTestRunner(stream=mystream).run(tests) 163 | MPI_COMM_WORLD.Barrier() 164 | 165 | results = MPI_COMM_WORLD.gather(mystream.getvalue()) 166 | if MPI_COMM_WORLD.Get_rank() == 0: 167 | for rank, result in enumerate(results): 168 | print('RESULTS FOR RANK ' + str(rank) + ':') 169 | print(str(result)) 170 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ASAPTools.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ASAPTools.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/ASAPTools" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ASAPTools" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source 10 | set I18NSPHINXOPTS=%SPHINXOPTS% source 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 2> nul 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\ASAPTools.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\ASAPTools.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # ASAPTools documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Mar 31 11:35:43 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import os 16 | import sys 17 | 18 | sys.path.insert(0, os.path.abspath('..')) 19 | import asaptools 20 | 21 | # If extensions (or modules to document with autodoc) are in another directory, 22 | # add these directories to sys.path here. If the directory is relative to the 23 | # documentation root, use os.path.abspath to make it absolute, like shown here. 24 | # sys.path.insert(0, os.path.abspath('.')) 25 | 26 | # -- General configuration ------------------------------------------------ 27 | 28 | # If your documentation needs a minimal Sphinx version, state it here. 29 | # needs_sphinx = '1.0' 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [ 35 | 'sphinx.ext.autodoc', 36 | 'sphinx.ext.viewcode', 37 | 'sphinx.ext.napoleon', 38 | 'sphinx_copybutton', 39 | ] 40 | 41 | # Add any paths that contain templates here, relative to this directory. 42 | templates_path = ['_templates'] 43 | 44 | # The suffix(es) of source filenames. 45 | # You can specify multiple suffix as a list of string: 46 | # source_suffix = ['.rst', '.md'] 47 | source_suffix = '.rst' 48 | 49 | # The encoding of source files. 50 | # source_encoding = 'utf-8-sig' 51 | 52 | # The master toctree document. 53 | master_doc = 'index' 54 | 55 | # General information about the project. 56 | project = u'ASAPTools' 57 | copyright = u'2015, University Corporation for Atmospheric Research' 58 | author = u'Kevin Paul, John Dennis, Sheri Mickelson, Haiying Xu' 59 | 60 | # The version info for the project you're documenting, acts as replacement for 61 | # |version| and |release|, also used in various other places throughout the 62 | # built documents. 63 | # 64 | 65 | # The short X.Y version. 66 | version = asaptools.__version__ 67 | # The full version, including alpha/beta/rc tags. 68 | release = asaptools.__version__ 69 | 70 | # The language for content autogenerated by Sphinx. Refer to documentation 71 | # for a list of supported languages. 72 | # 73 | # This is also used if you do content translation via gettext catalogs. 74 | # Usually you set "language" from the command line for these cases. 75 | language = None 76 | 77 | # There are two options for replacing |today|: either, you set today to some 78 | # non-false value, then it is used: 79 | # today = '' 80 | # Else, today_fmt is used as the format for a strftime call. 81 | # today_fmt = '%B %d, %Y' 82 | 83 | # List of patterns, relative to source directory, that match files and 84 | # directories to ignore when looking for source files. 85 | exclude_patterns = [] 86 | 87 | # The reST default role (used for this markup: `text`) to use for all 88 | # documents. 89 | # default_role = None 90 | 91 | # If true, '()' will be appended to :func: etc. cross-reference text. 92 | # add_function_parentheses = True 93 | 94 | # If true, the current module name will be prepended to all description 95 | # unit titles (such as .. function::). 96 | # add_module_names = True 97 | 98 | # If true, sectionauthor and moduleauthor directives will be shown in the 99 | # output. They are ignored by default. 100 | # show_authors = False 101 | 102 | # The name of the Pygments (syntax highlighting) style to use. 103 | pygments_style = 'sphinx' 104 | 105 | # A list of ignored prefixes for module index sorting. 106 | # modindex_common_prefix = [] 107 | 108 | # If true, keep warnings as "system message" paragraphs in the built documents. 109 | # keep_warnings = False 110 | 111 | # If true, `todo` and `todoList` produce output, else they produce nothing. 112 | todo_include_todos = False 113 | 114 | 115 | # -- Options for HTML output ---------------------------------------------- 116 | 117 | # The theme to use for HTML and HTML Help pages. See the documentation for 118 | # a list of builtin themes. 119 | html_theme = 'sphinx_rtd_theme' 120 | 121 | # Theme options are theme-specific and customize the look and feel of a theme 122 | # further. For a list of options available for each theme, see the 123 | # documentation. 124 | # html_theme_options = {} 125 | 126 | # Add any paths that contain custom themes here, relative to this directory. 127 | # html_theme_path = [] 128 | 129 | # The name for this set of Sphinx documents. If None, it defaults to 130 | # " v documentation". 131 | # html_title = "The ASAP Python Toolbox Documentation (Verion 0.4)" 132 | 133 | # A shorter title for the navigation bar. Default is the same as html_title. 134 | # html_short_title = None 135 | 136 | # The name of an image file (relative to this directory) to place at the top 137 | # of the sidebar. 138 | # html_logo = None 139 | 140 | # The name of an image file (within the static path) to use as favicon of the 141 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 142 | # pixels large. 143 | # html_favicon = None 144 | 145 | # Add any paths that contain custom static files (such as style sheets) here, 146 | # relative to this directory. They are copied after the builtin static files, 147 | # so a file named "default.css" will overwrite the builtin "default.css". 148 | html_static_path = ['_static'] 149 | 150 | # Add any extra paths that contain custom files (such as robots.txt or 151 | # .htaccess) here, relative to this directory. These files are copied 152 | # directly to the root of the documentation. 153 | # html_extra_path = [] 154 | 155 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 156 | # using the given strftime format. 157 | # html_last_updated_fmt = '%b %d, %Y' 158 | 159 | # If true, SmartyPants will be used to convert quotes and dashes to 160 | # typographically correct entities. 161 | # html_use_smartypants = True 162 | 163 | # Custom sidebar templates, maps document names to template names. 164 | # html_sidebars = {} 165 | 166 | # Additional templates that should be rendered to pages, maps page names to 167 | # template names. 168 | # html_additional_pages = {} 169 | 170 | # If false, no module index is generated. 171 | # html_domain_indices = True 172 | 173 | # If false, no index is generated. 174 | # html_use_index = True 175 | 176 | # If true, the index is split into individual pages for each letter. 177 | # html_split_index = False 178 | 179 | # If true, links to the reST sources are added to the pages. 180 | # html_show_sourcelink = True 181 | 182 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 183 | # html_show_sphinx = True 184 | 185 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 186 | # html_show_copyright = True 187 | 188 | # If true, an OpenSearch description file will be output, and all pages will 189 | # contain a tag referring to it. The value of this option must be the 190 | # base URL from which the finished HTML is served. 191 | # html_use_opensearch = '' 192 | 193 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 194 | # html_file_suffix = None 195 | 196 | # Language to be used for generating the HTML full-text search index. 197 | # Sphinx supports the following languages: 198 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 199 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 200 | # html_search_language = 'en' 201 | 202 | # A dictionary with options for the search language support, empty by default. 203 | # Now only 'ja' uses this config value 204 | # html_search_options = {'type': 'default'} 205 | 206 | # The name of a javascript file (relative to the configuration directory) that 207 | # implements a search results scorer. If empty, the default will be used. 208 | # html_search_scorer = 'scorer.js' 209 | 210 | # Output file base name for HTML help builder. 211 | htmlhelp_basename = 'ASAPToolsdoc' 212 | 213 | # -- Options for LaTeX output --------------------------------------------- 214 | 215 | latex_elements = { 216 | # The paper size ('letterpaper' or 'a4paper'). 217 | # 'papersize': 'letterpaper', 218 | # The font size ('10pt', '11pt' or '12pt'). 219 | # 'pointsize': '10pt', 220 | # Additional stuff for the LaTeX preamble. 221 | # 'preamble': '', 222 | # Latex figure (float) alignment 223 | # 'figure_align': 'htbp', 224 | } 225 | 226 | # Grouping the document tree into LaTeX files. List of tuples 227 | # (source start file, target name, title, 228 | # author, documentclass [howto, manual, or own class]). 229 | latex_documents = [ 230 | ( 231 | master_doc, 232 | 'ASAPTools.tex', 233 | u'The ASAP Python Toolbox Documentation', 234 | u'Kevin Paul, John Dennis, Sheri Mickelson, Haiying Xu', 235 | 'manual', 236 | ), 237 | ] 238 | 239 | # The name of an image file (relative to this directory) to place at the top of 240 | # the title page. 241 | # latex_logo = None 242 | 243 | # For "manual" documents, if this is true, then toplevel headings are parts, 244 | # not chapters. 245 | # latex_use_parts = False 246 | 247 | # If true, show page references after internal links. 248 | # latex_show_pagerefs = False 249 | 250 | # If true, show URL addresses after external links. 251 | # latex_show_urls = False 252 | 253 | # Documents to append as an appendix to all manuals. 254 | # latex_appendices = [] 255 | 256 | # If false, no module index is generated. 257 | # latex_domain_indices = True 258 | 259 | 260 | # -- Options for manual page output --------------------------------------- 261 | 262 | # One entry per manual page. List of tuples 263 | # (source start file, name, description, authors, manual section). 264 | man_pages = [(master_doc, 'asaptools', u'The ASAP Python Toolbox Documentation', [author], 1)] 265 | 266 | # If true, show URL addresses after external links. 267 | # man_show_urls = False 268 | 269 | 270 | # -- Options for Texinfo output ------------------------------------------- 271 | 272 | # Grouping the document tree into Texinfo files. List of tuples 273 | # (source start file, target name, title, author, 274 | # dir menu entry, description, category) 275 | texinfo_documents = [ 276 | ( 277 | master_doc, 278 | 'ASAPTools', 279 | u'The ASAP Python Toolbox Documentation', 280 | author, 281 | 'ASAPTools', 282 | 'One line description of project.', 283 | 'Miscellaneous', 284 | ), 285 | ] 286 | 287 | # Documents to append as an appendix to all manuals. 288 | # texinfo_appendices = [] 289 | 290 | # If false, no module index is generated. 291 | # texinfo_domain_indices = True 292 | 293 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 294 | # texinfo_show_urls = 'footnote' 295 | 296 | # If true, do not generate a @detailmenu in the "Top" node's menu. 297 | # texinfo_no_detailmenu = False 298 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /asaptools/partition.py: -------------------------------------------------------------------------------- 1 | """ 2 | A module for data partitioning functions. 3 | 4 | This provides a collection of 'partitioning' functions. A partitioning 5 | function is a three-argument function that takes, as the first argument, a 6 | given data object and, as the second argument, an index into that object and, 7 | as the third argument, a maximum index. The operation of the partitioning 8 | function is to return a subset of the data corresponding to the given index. 9 | 10 | By design, partitioning functions should keep the data "unchanged" except for 11 | subselecting parts of the data. 12 | 13 | 14 | Copyright 2020 University Corporation for Atmospheric Research 15 | 16 | Licensed under the Apache License, Version 2.0 (the "License"); 17 | you may not use this file except in compliance with the License. 18 | You may obtain a copy of the License at 19 | 20 | http://www.apache.org/licenses/LICENSE-2.0 21 | 22 | Unless required by applicable law or agreed to in writing, software 23 | distributed under the License is distributed on an "AS IS" BASIS, 24 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 25 | See the License for the specific language governing permissions and 26 | limitations under the License. 27 | """ 28 | 29 | from abc import ABCMeta, abstractmethod 30 | from operator import itemgetter 31 | 32 | 33 | class PartitionFunction(object): 34 | 35 | """ 36 | The abstract base-class for all Partitioning Function objects. 37 | 38 | A PartitionFunction object is one with a __call__ method that takes 39 | three arguments. The first argument is the data to be partitioned, the 40 | second argument is the index of the partition (or part) requested, and 41 | third argument is the number of partitions to assume when dividing 42 | the data. 43 | """ 44 | 45 | __metaclass__ = ABCMeta 46 | 47 | @staticmethod 48 | def _check_types(data, index, size): 49 | """ 50 | Check the types of the index and size arguments. 51 | 52 | Parameters: 53 | data: The data to be partitioned 54 | index (int): The index of the partition to return 55 | size (int): The number of partitions to make 56 | 57 | Raises: 58 | TypeError: The size or index arguments are not int 59 | IndexError: The size argument is less than 1, or the index 60 | argument is less than 0 or greater than or equal to size 61 | """ 62 | 63 | # Check the type of the index 64 | if type(index) is not int: 65 | raise TypeError('Partition index must be an integer') 66 | 67 | # Check the value of index 68 | if index > size - 1 or index < 0: 69 | raise IndexError('Partition index out of bounds') 70 | 71 | # Check the type of the size 72 | if type(size) is not int: 73 | raise TypeError('Partition size must be an integer') 74 | 75 | # Check the value of size 76 | if size < 1: 77 | raise IndexError('Partition size less than 1 is invalid') 78 | 79 | @staticmethod 80 | def _is_indexable(data): 81 | """ 82 | Check if the data object is indexable. 83 | 84 | Parameters: 85 | data: The data to be partitioned 86 | 87 | Returns: 88 | bool: True, if data is an indexable object. False, otherwise. 89 | """ 90 | if hasattr(data, '__len__') and hasattr(data, '__getitem__'): 91 | return True 92 | else: 93 | return False 94 | 95 | @staticmethod 96 | def _are_pairs(data): 97 | """ 98 | Check if the data object is an indexable list of pairs. 99 | 100 | Parameters: 101 | data: The data to be partitioned 102 | 103 | Returns: 104 | bool: True, if data is an indexable list of pairs. 105 | False, otherwise. 106 | """ 107 | if PartitionFunction._is_indexable(data): 108 | return all(map(lambda i: PartitionFunction._is_indexable(i) and len(i) == 2, data)) 109 | else: 110 | return False 111 | 112 | @abstractmethod 113 | def __call__(self): 114 | """ 115 | Implements the partition algorithm. 116 | """ 117 | return 118 | 119 | 120 | class Duplicate(PartitionFunction): 121 | 122 | """ 123 | Return a copy of the original input data in each partition. 124 | """ 125 | 126 | def __call__(self, data, index=0, size=1): 127 | """ 128 | Define the common interface for all partitioning functions. 129 | 130 | The abstract base class implements the check on the input for correct 131 | format and typing. 132 | 133 | Parameters: 134 | data: The data to be partitioned 135 | 136 | Keyword Arguments: 137 | index (int): A partition index into a part of the data 138 | size (int): The largest number of partitions allowed 139 | 140 | Returns: 141 | The indexed part of the data, assuming the data is divided into 142 | size parts. 143 | """ 144 | self._check_types(data, index, size) 145 | 146 | return data 147 | 148 | 149 | class EqualLength(PartitionFunction): 150 | 151 | """ 152 | Partition an indexable object by striding through the data. 153 | 154 | The initial object is "chopped" along its length into roughly equal length 155 | sublists. If the partition size is greater than the length of the input 156 | data, then it will return an empty list for 'empty' partitions. If the 157 | data is not indexable, then it will return the data for index=0 only, and 158 | an empty list otherwise. 159 | """ 160 | 161 | def __call__(self, data, index=0, size=1): 162 | """ 163 | Define the common interface for all partitioning functions. 164 | 165 | The abstract base class implements the check on the input for correct 166 | format and typing. 167 | 168 | Parameters: 169 | data: The data to be partitioned 170 | 171 | Keyword Arguments: 172 | index (int): A partition index into a part of the data 173 | size (int): The largest number of partitions allowed 174 | 175 | Returns: 176 | The indexed part of the data, assuming the data is divided into 177 | size parts. 178 | """ 179 | self._check_types(data, index, size) 180 | 181 | if self._is_indexable(data): 182 | (lenpart, remdata) = divmod(len(data), size) 183 | psizes = [lenpart] * size 184 | for i in range(remdata): 185 | psizes[i] += 1 186 | ibeg = 0 187 | for i in range(index): 188 | ibeg += psizes[i] 189 | iend = ibeg + psizes[index] 190 | return data[ibeg:iend] 191 | else: 192 | if index == 0: 193 | return [data] 194 | else: 195 | return [] 196 | 197 | 198 | class EqualStride(PartitionFunction): 199 | 200 | """ 201 | Partition an object by chopping the data into roughly equal lengths. 202 | 203 | This returns a sublist of an indexable object by "striding" through the 204 | data in steps equal to the partition size. If the partition size is 205 | greater than the length of the input data, then it will return an empty 206 | list for "empty" partitions. If the data is not indexable, then it will 207 | return the data for index=0 only, and an empty list otherwise. 208 | """ 209 | 210 | def __call__(self, data, index=0, size=1): 211 | """ 212 | Define the common interface for all partitioning functions. 213 | 214 | The abstract base class implements the check on the input for correct 215 | format and typing. 216 | 217 | Parameters: 218 | data: The data to be partitioned 219 | 220 | Keyword Arguments: 221 | index (int): A partition index into a part of the data 222 | size (int): The largest number of partitions allowed 223 | 224 | Returns: 225 | The indexed part of the data, assuming the data is divided into 226 | size parts. 227 | """ 228 | self._check_types(data, index, size) 229 | 230 | if self._is_indexable(data): 231 | if index < len(data): 232 | return data[index::size] 233 | else: 234 | return [] 235 | else: 236 | if index == 0: 237 | return [data] 238 | else: 239 | return [] 240 | 241 | 242 | class SortedStride(PartitionFunction): 243 | 244 | """ 245 | Partition an indexable list of pairs by striding through sorted data. 246 | 247 | The first index of each pair is assumed to be an item of data (which will 248 | be partitioned), and the second index in each pair is assumed to be a 249 | numeric weight. The pairs are first sorted by weight, and then partitions 250 | are returned by striding through the sorted data. 251 | 252 | The results are partitions of roughly equal length and roughly equal 253 | total weight. However, equal length is prioritized over total weight. 254 | """ 255 | 256 | def __call__(self, data, index=0, size=1): 257 | """ 258 | Define the common interface for all partitioning functions. 259 | 260 | The abstract base class implements the check on the input for correct 261 | format and typing. 262 | 263 | Parameters: 264 | data: The data to be partitioned 265 | 266 | Keyword Arguments: 267 | index (int): A partition index into a part of the data 268 | size (int): The largest number of partitions allowed 269 | 270 | Returns: 271 | The indexed part of the data, assuming the data is divided into 272 | size parts. 273 | """ 274 | self._check_types(data, index, size) 275 | 276 | if self._are_pairs(data): 277 | subdata = [q[0] for q in sorted(data, key=itemgetter(1))] 278 | return EqualStride()(subdata, index=index, size=size) 279 | else: 280 | return EqualStride()(data, index=index, size=size) 281 | 282 | 283 | class WeightBalanced(PartitionFunction): 284 | 285 | """ 286 | Partition an indexable list of pairs by balancing the total weight. 287 | 288 | The first index of each pair is assumed to be an item of data (which will 289 | be partitioned), and the second index in each pair is assumed to be a 290 | numeric weight. The data items are grouped via a "greedy" binning 291 | algorithm into partitions of roughly equal total weight. 292 | 293 | The results are partitions of roughly equal length and roughly equal 294 | total weight. However, equal total weight is prioritized over length. 295 | 296 | """ 297 | 298 | def __call__(self, data, index=0, size=1): 299 | """ 300 | Define the common interface for all partitioning functions. 301 | 302 | The abstract base class implements the check on the input for correct 303 | format and typing. 304 | 305 | Parameters: 306 | data: The data to be partitioned 307 | 308 | Keyword Arguments: 309 | index (int): A partition index into a part of the data 310 | size (int): The largest number of partitions allowed 311 | 312 | Returns: 313 | The indexed part of the data, assuming the data is divided into 314 | size parts. 315 | """ 316 | self._check_types(data, index, size) 317 | 318 | if self._are_pairs(data): 319 | sorted_pairs = sorted(data, key=itemgetter(1), reverse=True) 320 | partition = [] 321 | weights = [0] * size 322 | for (item, weight) in sorted_pairs: 323 | k = min(enumerate(weights), key=itemgetter(1))[0] 324 | if k == index: 325 | partition.append(item) 326 | weights[k] += weight 327 | return partition 328 | else: 329 | return EqualStride()(data, index=index, size=size) 330 | -------------------------------------------------------------------------------- /asaptools/tests/simpleCommParDivTests.py: -------------------------------------------------------------------------------- 1 | """ 2 | Parallel Tests with communicator division for the SimpleComm class 3 | 4 | Copyright 2017, University Corporation for Atmospheric Research 5 | See the LICENSE.txt file for details 6 | """ 7 | 8 | from __future__ import print_function 9 | 10 | import unittest 11 | 12 | from mpi4py import MPI 13 | 14 | from asaptools import simplecomm 15 | from asaptools.partition import Duplicate, EqualStride 16 | 17 | MPI_COMM_WORLD = MPI.COMM_WORLD 18 | 19 | 20 | class SimpleCommParDivTests(unittest.TestCase): 21 | def setUp(self): 22 | self.gcomm = simplecomm.create_comm() 23 | self.gsize = MPI_COMM_WORLD.Get_size() 24 | self.grank = MPI_COMM_WORLD.Get_rank() 25 | 26 | self.groups = ['a', 'b', 'c'] 27 | 28 | self.rank = int(self.grank // len(self.groups)) 29 | self.color = int(self.grank % len(self.groups)) 30 | self.group = self.groups[self.color] 31 | 32 | self.monocomm, self.multicomm = self.gcomm.divide(self.group) 33 | 34 | self.all_colors = [i % len(self.groups) for i in range(self.gsize)] 35 | self.all_groups = [self.groups[i] for i in self.all_colors] 36 | self.all_ranks = [int(i // len(self.groups)) for i in range(self.gsize)] 37 | 38 | def testGlobalRanksMatch(self): 39 | actual = self.gcomm.get_rank() 40 | expected = self.grank 41 | self.assertEqual(actual, expected) 42 | 43 | def testMonoGetRank(self): 44 | actual = self.monocomm.get_rank() 45 | expected = self.rank 46 | self.assertEqual(actual, expected) 47 | 48 | def testMultiGetRank(self): 49 | actual = self.multicomm.get_rank() 50 | expected = self.color 51 | self.assertEqual(actual, expected) 52 | 53 | def testMonoGetSize(self): 54 | actual = self.monocomm.get_size() 55 | expected = self.all_colors.count(self.color) 56 | self.assertEqual(actual, expected) 57 | 58 | def testMultiGetSize(self): 59 | actual = self.multicomm.get_size() 60 | expected = self.all_ranks.count(self.rank) 61 | self.assertEqual(actual, expected) 62 | 63 | def testMonoIsManager(self): 64 | actual = self.monocomm.is_manager() 65 | expected = self.rank == 0 66 | self.assertEqual(actual, expected) 67 | 68 | def testMultiIsManager(self): 69 | actual = self.multicomm.is_manager() 70 | expected = self.color == 0 71 | self.assertEqual(actual, expected) 72 | 73 | def testMonoSumInt(self): 74 | data = self.color + 1 75 | actual = self.monocomm.allreduce(data, 'sum') 76 | expected = self.monocomm.get_size() * data 77 | self.assertEqual(actual, expected) 78 | 79 | def testMultiSumInt(self): 80 | data = self.rank + 1 81 | actual = self.multicomm.allreduce(data, 'sum') 82 | expected = self.multicomm.get_size() * data 83 | self.assertEqual(actual, expected) 84 | 85 | def testMonoSumList(self): 86 | data = list(range(5)) 87 | actual = self.monocomm.allreduce(data, 'sum') 88 | expected = self.monocomm.get_size() * sum(data) 89 | self.assertEqual(actual, expected) 90 | 91 | def testMultiSumList(self): 92 | data = list(range(5)) 93 | actual = self.multicomm.allreduce(data, 'sum') 94 | expected = self.multicomm.get_size() * sum(data) 95 | self.assertEqual(actual, expected) 96 | 97 | def testMonoSumDict(self): 98 | data = {'a': list(range(3)), 'b': [5, 7]} 99 | actual = self.monocomm.allreduce(data, 'sum') 100 | expected = { 101 | 'a': self.monocomm.get_size() * sum(range(3)), 102 | 'b': self.monocomm.get_size() * sum([5, 7]), 103 | } 104 | self.assertEqual(actual, expected) 105 | 106 | def testMultiSumDict(self): 107 | data = {'a': list(range(3)), 'b': [5, 7]} 108 | actual = self.multicomm.allreduce(data, 'sum') 109 | expected = { 110 | 'a': self.multicomm.get_size() * sum(range(3)), 111 | 'b': self.multicomm.get_size() * sum([5, 7]), 112 | } 113 | self.assertEqual(actual, expected) 114 | 115 | def testMonoPartitionInt(self): 116 | data = self.grank 117 | actual = self.monocomm.partition(data, func=Duplicate()) 118 | if self.monocomm.is_manager(): 119 | expected = None 120 | else: 121 | expected = self.color # By chance! 122 | self.assertEqual(actual, expected) 123 | 124 | def testMultiPartitionInt(self): 125 | data = self.grank 126 | actual = self.multicomm.partition(data, func=Duplicate()) 127 | if self.multicomm.is_manager(): 128 | expected = None 129 | else: 130 | expected = self.rank * len(self.groups) 131 | self.assertEqual(actual, expected) 132 | 133 | def testMonoPartitionIntInvolved(self): 134 | data = self.grank 135 | actual = self.monocomm.partition(data, func=Duplicate(), involved=True) 136 | expected = self.color # By chance! 137 | self.assertEqual(actual, expected) 138 | 139 | def testMultiPartitionIntInvolved(self): 140 | data = self.grank 141 | actual = self.multicomm.partition(data, func=Duplicate(), involved=True) 142 | expected = self.rank * len(self.groups) 143 | self.assertEqual(actual, expected) 144 | 145 | def testMonoPartitionList(self): 146 | if self.monocomm.is_manager(): 147 | data = list(range(10 + self.grank)) 148 | else: 149 | data = None 150 | actual = self.monocomm.partition(data) 151 | if self.monocomm.is_manager(): 152 | expected = None 153 | else: 154 | expected = list(range(self.rank - 1, 10 + self.color, self.monocomm.get_size() - 1)) 155 | self.assertEqual(actual, expected) 156 | 157 | def testMultiPartitionList(self): 158 | if self.multicomm.is_manager(): 159 | data = list(range(10 + self.grank)) 160 | else: 161 | data = None 162 | actual = self.multicomm.partition(data) 163 | if self.multicomm.is_manager(): 164 | expected = None 165 | else: 166 | expected = list( 167 | range( 168 | self.color - 1, 169 | 10 + self.rank * len(self.groups), 170 | self.multicomm.get_size() - 1, 171 | ) 172 | ) 173 | self.assertEqual(actual, expected) 174 | 175 | def testMonoPartitionListInvolved(self): 176 | if self.monocomm.is_manager(): 177 | data = list(range(10 + self.grank)) 178 | else: 179 | data = None 180 | actual = self.monocomm.partition(data, func=EqualStride(), involved=True) 181 | expected = list(range(self.rank, 10 + self.color, self.monocomm.get_size())) 182 | self.assertEqual(actual, expected) 183 | 184 | def testMultiPartitionListInvolved(self): 185 | if self.multicomm.is_manager(): 186 | data = list(range(10 + self.grank)) 187 | else: 188 | data = None 189 | actual = self.multicomm.partition(data, func=EqualStride(), involved=True) 190 | expected = list( 191 | range(self.color, 10 + self.rank * len(self.groups), self.multicomm.get_size()) 192 | ) 193 | self.assertEqual(actual, expected) 194 | 195 | def testMonoCollectInt(self): 196 | if self.monocomm.is_manager(): 197 | data = None 198 | actual = [self.monocomm.collect() for _ in range(1, self.monocomm.get_size())] 199 | expected = [ 200 | i 201 | for i in enumerate( 202 | range(len(self.groups) + self.color, self.gsize, len(self.groups)), 203 | 1, 204 | ) 205 | ] 206 | else: 207 | data = self.grank 208 | actual = self.monocomm.collect(data) 209 | expected = None 210 | self.monocomm.sync() 211 | if self.monocomm.is_manager(): 212 | for a in actual: 213 | self.assertTrue(a in expected) 214 | else: 215 | self.assertEqual(actual, expected) 216 | 217 | def testMultiCollectInt(self): 218 | if self.multicomm.is_manager(): 219 | data = None 220 | actual = [self.multicomm.collect() for _ in range(1, self.multicomm.get_size())] 221 | expected = [ 222 | i 223 | for i in enumerate( 224 | [j + self.rank * len(self.groups) for j in range(1, self.multicomm.get_size())], 225 | 1, 226 | ) 227 | ] 228 | else: 229 | data = self.grank 230 | actual = self.multicomm.collect(data) 231 | expected = None 232 | self.multicomm.sync() 233 | if self.multicomm.is_manager(): 234 | for a in actual: 235 | self.assertTrue(a in expected) 236 | else: 237 | self.assertEqual(actual, expected) 238 | 239 | def testMonoCollectList(self): 240 | if self.monocomm.is_manager(): 241 | data = None 242 | actual = [self.monocomm.collect() for _ in range(1, self.monocomm.get_size())] 243 | expected = [ 244 | (i, list(range(x))) 245 | for i, x in enumerate( 246 | range(len(self.groups) + self.color, self.gsize, len(self.groups)), 247 | 1, 248 | ) 249 | ] 250 | else: 251 | data = list(range(self.grank)) 252 | actual = self.monocomm.collect(data) 253 | expected = None 254 | self.monocomm.sync() 255 | if self.monocomm.is_manager(): 256 | for a in actual: 257 | self.assertTrue(a in expected) 258 | else: 259 | self.assertEqual(actual, expected) 260 | 261 | def testMultiCollectList(self): 262 | if self.multicomm.is_manager(): 263 | data = None 264 | actual = [self.multicomm.collect() for _ in range(1, self.multicomm.get_size())] 265 | expected = [ 266 | (i, list(range(x))) 267 | for (i, x) in enumerate( 268 | [j + self.rank * len(self.groups) for j in range(1, self.multicomm.get_size())], 269 | 1, 270 | ) 271 | ] 272 | else: 273 | data = list(range(self.grank)) 274 | actual = self.multicomm.collect(data) 275 | expected = None 276 | self.multicomm.sync() 277 | if self.multicomm.is_manager(): 278 | for a in actual: 279 | self.assertTrue(a in expected) 280 | else: 281 | self.assertEqual(actual, expected) 282 | 283 | def testMonoRationInt(self): 284 | if self.monocomm.is_manager(): 285 | data = [100 * self.color + i for i in range(1, self.monocomm.get_size())] 286 | actual = [self.monocomm.ration(d) for d in data] 287 | expected = [None] * (self.monocomm.get_size() - 1) 288 | else: 289 | data = None 290 | actual = self.monocomm.ration() 291 | expected = [100 * self.color + i for i in range(1, self.monocomm.get_size())] 292 | self.monocomm.sync() 293 | if self.monocomm.is_manager(): 294 | self.assertEqual(actual, expected) 295 | else: 296 | self.assertTrue(actual in expected) 297 | 298 | def testMultiRationInt(self): 299 | if self.multicomm.is_manager(): 300 | data = [100 * self.rank + i for i in range(1, self.multicomm.get_size())] 301 | actual = [self.multicomm.ration(d) for d in data] 302 | expected = [None] * (self.multicomm.get_size() - 1) 303 | else: 304 | data = None 305 | actual = self.multicomm.ration() 306 | expected = [100 * self.rank + i for i in range(1, self.multicomm.get_size())] 307 | self.multicomm.sync() 308 | if self.multicomm.is_manager(): 309 | self.assertEqual(actual, expected) 310 | else: 311 | self.assertTrue(actual in expected) 312 | 313 | def testTreeScatterInt(self): 314 | if self.gcomm.is_manager(): 315 | data = 10 316 | else: 317 | data = None 318 | 319 | if self.monocomm.is_manager(): 320 | mydata = self.multicomm.partition(data, func=Duplicate(), involved=True) 321 | else: 322 | mydata = None 323 | 324 | actual = self.monocomm.partition(mydata, func=Duplicate(), involved=True) 325 | expected = 10 326 | self.assertEqual(actual, expected) 327 | 328 | def testTreeGatherInt(self): 329 | data = self.grank 330 | 331 | if self.monocomm.is_manager(): 332 | mydata = [data] 333 | for _ in range(1, self.monocomm.get_size()): 334 | mydata.append(self.monocomm.collect()[1]) 335 | else: 336 | mydata = self.monocomm.collect(data) 337 | 338 | if self.gcomm.is_manager(): 339 | actual = [mydata] 340 | for _ in range(1, self.multicomm.get_size()): 341 | actual.append(self.multicomm.collect()[1]) 342 | elif self.monocomm.is_manager(): 343 | actual = self.multicomm.collect(mydata) 344 | else: 345 | actual = None 346 | 347 | # expected = 10 348 | # self.assertEqual(actual, expected) 349 | 350 | 351 | if __name__ == '__main__': 352 | try: 353 | from cStringIO import StringIO 354 | except ImportError: 355 | from io import StringIO 356 | 357 | mystream = StringIO() 358 | tests = unittest.TestLoader().loadTestsFromTestCase(SimpleCommParDivTests) 359 | unittest.TextTestRunner(stream=mystream).run(tests) 360 | MPI_COMM_WORLD.Barrier() 361 | 362 | results = MPI_COMM_WORLD.gather(mystream.getvalue()) 363 | if MPI_COMM_WORLD.Get_rank() == 0: 364 | for rank, result in enumerate(results): 365 | print('RESULTS FOR RANK ' + str(rank) + ':') 366 | print(str(result)) 367 | -------------------------------------------------------------------------------- /asaptools/tests/simpleCommParTests.py: -------------------------------------------------------------------------------- 1 | """ 2 | Parallel Tests for the SimpleComm class 3 | 4 | Copyright 2017, University Corporation for Atmospheric Research 5 | See the LICENSE.txt file for details 6 | """ 7 | 8 | from __future__ import print_function, unicode_literals 9 | 10 | import unittest 11 | 12 | import numpy as np 13 | from mpi4py import MPI 14 | 15 | from asaptools import simplecomm 16 | from asaptools.partition import Duplicate, EqualStride 17 | 18 | MPI_COMM_WORLD = MPI.COMM_WORLD 19 | 20 | 21 | class SimpleCommParTests(unittest.TestCase): 22 | def setUp(self): 23 | self.gcomm = simplecomm.create_comm() 24 | self.size = MPI_COMM_WORLD.Get_size() 25 | self.rank = MPI_COMM_WORLD.Get_rank() 26 | 27 | def tearDown(self): 28 | pass 29 | 30 | def testGetSize(self): 31 | actual = self.gcomm.get_size() 32 | expected = self.size 33 | self.assertEqual(actual, expected) 34 | 35 | def testIsManager(self): 36 | actual = self.gcomm.is_manager() 37 | expected = self.rank == 0 38 | self.assertEqual(actual, expected) 39 | 40 | def testSumInt(self): 41 | data = 5 42 | actual = self.gcomm.allreduce(data, 'sum') 43 | expected = self.size * 5 44 | self.assertEqual(actual, expected) 45 | 46 | def testSumList(self): 47 | data = range(5) 48 | actual = self.gcomm.allreduce(data, 'sum') 49 | expected = self.size * sum(data) 50 | self.assertEqual(actual, expected) 51 | 52 | def testSumArray(self): 53 | data = np.arange(5) 54 | actual = self.gcomm.allreduce(data, 'sum') 55 | expected = self.size * sum(data) 56 | self.assertEqual(actual, expected) 57 | 58 | def testSumDict(self): 59 | data = {'a': range(3), 'b': [5, 7]} 60 | actual = self.gcomm.allreduce(data, 'sum') 61 | expected = {'a': self.size * sum(range(3)), 'b': self.size * sum([5, 7])} 62 | self.assertEqual(actual, expected) 63 | 64 | def testMaxInt(self): 65 | data = self.rank 66 | actual = self.gcomm.allreduce(data, 'max') 67 | expected = self.size - 1 68 | self.assertEqual(actual, expected) 69 | 70 | def testMaxList(self): 71 | data = range(2 + self.rank) 72 | actual = self.gcomm.allreduce(data, 'max') 73 | expected = self.size 74 | self.assertEqual(actual, expected) 75 | 76 | def testMaxArray(self): 77 | data = np.arange(2 + self.rank) 78 | actual = self.gcomm.allreduce(data, 'max') 79 | expected = self.size 80 | self.assertEqual(actual, expected) 81 | 82 | def testMaxDict(self): 83 | data = {'rank': self.rank, 'range': range(2 + self.rank)} 84 | actual = self.gcomm.allreduce(data, 'max') 85 | expected = {'rank': self.size - 1, 'range': self.size} 86 | self.assertEqual(actual, expected) 87 | 88 | def testPartitionInt(self): 89 | if self.gcomm.is_manager(): 90 | data = 10 91 | else: 92 | data = None 93 | actual = self.gcomm.partition(data, func=Duplicate()) 94 | if self.gcomm.is_manager(): 95 | expected = None 96 | else: 97 | expected = 10 98 | self.assertEqual(actual, expected) 99 | 100 | def testPartitionIntInvolved(self): 101 | if self.gcomm.is_manager(): 102 | data = 10 103 | else: 104 | data = None 105 | actual = self.gcomm.partition(data, func=Duplicate(), involved=True) 106 | expected = 10 107 | self.assertEqual(actual, expected) 108 | 109 | def testPartitionList(self): 110 | if self.gcomm.is_manager(): 111 | data = range(10) 112 | else: 113 | data = None 114 | actual = self.gcomm.partition(data) 115 | if self.gcomm.is_manager(): 116 | expected = None 117 | else: 118 | expected = range(self.rank - 1, 10, self.size - 1) 119 | self.assertEqual(actual, expected) 120 | 121 | def testPartitionListInvolved(self): 122 | if self.gcomm.is_manager(): 123 | data = range(10) 124 | else: 125 | data = None 126 | actual = self.gcomm.partition(data, involved=True) 127 | expected = range(self.rank, 10, self.size) 128 | self.assertEqual(actual, expected) 129 | 130 | def testPartitionArray(self): 131 | if self.gcomm.is_manager(): 132 | data = np.arange(10) 133 | else: 134 | data = None 135 | actual = self.gcomm.partition(data, func=EqualStride()) 136 | if self.gcomm.is_manager(): 137 | expected = None 138 | else: 139 | expected = np.arange(self.rank - 1, 10, self.size - 1) 140 | if self.gcomm.is_manager(): 141 | self.assertEqual(actual, expected) 142 | else: 143 | np.testing.assert_array_equal(actual, expected) 144 | 145 | def testPartitionStrArray(self): 146 | indata = list('abcdefghi') 147 | if self.gcomm.is_manager(): 148 | data = np.array(indata) 149 | else: 150 | data = None 151 | actual = self.gcomm.partition(data, func=EqualStride()) 152 | if self.gcomm.is_manager(): 153 | expected = None 154 | else: 155 | expected = np.array(indata[self.rank - 1 :: self.size - 1]) 156 | if self.gcomm.is_manager(): 157 | self.assertEqual(actual, expected) 158 | else: 159 | np.testing.assert_array_equal(actual, expected) 160 | 161 | def testPartitionCharArray(self): 162 | indata = list('abcdefghi') 163 | if self.gcomm.is_manager(): 164 | data = np.array(indata, dtype='c') 165 | else: 166 | data = None 167 | actual = self.gcomm.partition(data, func=EqualStride()) 168 | if self.gcomm.is_manager(): 169 | expected = None 170 | else: 171 | expected = np.array(indata[self.rank - 1 :: self.size - 1], dtype='c') 172 | if self.gcomm.is_manager(): 173 | self.assertEqual(actual, expected) 174 | else: 175 | np.testing.assert_array_equal(actual, expected) 176 | 177 | def testPartitionArrayInvolved(self): 178 | if self.gcomm.is_manager(): 179 | data = np.arange(10) 180 | else: 181 | data = None 182 | actual = self.gcomm.partition(data, func=EqualStride(), involved=True) 183 | expected = np.arange(self.rank, 10, self.size) 184 | np.testing.assert_array_equal(actual, expected) 185 | 186 | def testCollectInt(self): 187 | if self.gcomm.is_manager(): 188 | data = None 189 | actual = [self.gcomm.collect() for _ in range(1, self.size)] 190 | expected = [i for i in enumerate(range(1, self.size), 1)] 191 | else: 192 | data = self.rank 193 | actual = self.gcomm.collect(data) 194 | expected = None 195 | self.gcomm.sync() 196 | if self.gcomm.is_manager(): 197 | for a in actual: 198 | self.assertTrue(a in expected) 199 | else: 200 | self.assertEqual(actual, expected) 201 | 202 | def testCollectList(self): 203 | if self.gcomm.is_manager(): 204 | data = None 205 | actual = [self.gcomm.collect() for _ in range(1, self.size)] 206 | expected = [(i, range(i)) for i in range(1, self.size)] 207 | else: 208 | data = range(self.rank) 209 | actual = self.gcomm.collect(data) 210 | expected = None 211 | self.gcomm.sync() 212 | if self.gcomm.is_manager(): 213 | for a in actual: 214 | self.assertTrue(a in expected) 215 | else: 216 | self.assertEqual(actual, expected) 217 | 218 | def testCollectArray(self): 219 | if self.gcomm.is_manager(): 220 | data = None 221 | actual = [ 222 | (i, list(x)) for (i, x) in [self.gcomm.collect() for _ in range(1, self.size)] 223 | ] 224 | expected = [(i, list(np.arange(self.size) + i)) for i in range(1, self.size)] 225 | else: 226 | data = np.arange(self.size) + self.rank 227 | actual = self.gcomm.collect(data) 228 | expected = None 229 | self.gcomm.sync() 230 | if self.gcomm.is_manager(): 231 | for a in actual: 232 | self.assertTrue(a in expected) 233 | else: 234 | self.assertEqual(actual, expected) 235 | 236 | def testCollectStrArray(self): 237 | if self.gcomm.is_manager(): 238 | data = None 239 | actual = [ 240 | (i, list(x)) for (i, x) in [self.gcomm.collect() for _ in range(1, self.size)] 241 | ] 242 | expected = [ 243 | (i, list(map(str, list(np.arange(self.size) + i)))) for i in range(1, self.size) 244 | ] 245 | else: 246 | data = np.array([str(i + self.rank) for i in range(self.size)]) 247 | actual = self.gcomm.collect(data) 248 | expected = None 249 | self.gcomm.sync() 250 | if self.gcomm.is_manager(): 251 | for a in actual: 252 | self.assertTrue(a in expected) 253 | else: 254 | self.assertEqual(actual, expected) 255 | 256 | def testCollectCharArray(self): 257 | if self.gcomm.is_manager(): 258 | data = None 259 | actual = [ 260 | (i, list(x)) for (i, x) in [self.gcomm.collect() for _ in range(1, self.size)] 261 | ] 262 | expected = [ 263 | ( 264 | i, 265 | list(map(lambda c: str(c).encode(), list(np.arange(self.size) + i))), 266 | ) 267 | for i in range(1, self.size) 268 | ] 269 | else: 270 | data = np.array([str(i + self.rank) for i in range(self.size)], dtype='c') 271 | actual = self.gcomm.collect(data) 272 | expected = None 273 | self.gcomm.sync() 274 | if self.gcomm.is_manager(): 275 | for a in actual: 276 | self.assertTrue(a in expected) 277 | else: 278 | self.assertEqual(actual, expected) 279 | 280 | def testRationInt(self): 281 | if self.gcomm.is_manager(): 282 | data = range(1, self.size) 283 | actual = [self.gcomm.ration(d) for d in data] 284 | expected = [None] * (self.size - 1) 285 | else: 286 | data = None 287 | actual = self.gcomm.ration() 288 | expected = range(1, self.size) 289 | self.gcomm.sync() 290 | if self.gcomm.is_manager(): 291 | self.assertEqual(actual, expected) 292 | else: 293 | self.assertTrue(actual in expected) 294 | 295 | def testRationArray(self): 296 | if self.gcomm.is_manager(): 297 | data = np.arange(3 * (self.size - 1)) 298 | actual = [self.gcomm.ration(data[3 * i : 3 * (i + 1)]) for i in range(0, self.size - 1)] 299 | expected = [None] * (self.size - 1) 300 | else: 301 | data = None 302 | actual = self.gcomm.ration() 303 | expected = np.arange(3 * (self.size - 1)) 304 | self.gcomm.sync() 305 | if self.gcomm.is_manager(): 306 | self.assertEqual(actual, expected) 307 | else: 308 | contained = any( 309 | [ 310 | np.all(actual == expected[i : i + actual.size]) 311 | for i in range(expected.size - actual.size + 1) 312 | ] 313 | ) 314 | self.assertTrue(contained) 315 | 316 | def testRationStrArray(self): 317 | if self.gcomm.is_manager(): 318 | data = np.array(list(map(str, range(3 * (self.size - 1)))), dtype='c') 319 | actual = [ 320 | self.gcomm.ration(data[3 * i : 3 * (i + 1)]) for i in range(0, (self.size - 1)) 321 | ] 322 | expected = [None] * (self.size - 1) 323 | else: 324 | data = None 325 | actual = self.gcomm.ration() 326 | expected = np.array(list(map(str, range(3 * (self.size - 1)))), dtype='c') 327 | self.gcomm.sync() 328 | if self.gcomm.is_manager(): 329 | self.assertEqual(actual, expected) 330 | else: 331 | contained = any( 332 | [ 333 | np.all(actual == expected[i : i + actual.size]) 334 | for i in range(expected.size - actual.size + 1) 335 | ] 336 | ) 337 | self.assertTrue(contained) 338 | 339 | def testRationCharArray(self): 340 | if self.gcomm.is_manager(): 341 | data = np.array(list(map(str, range(3 * (self.size - 1)))), dtype='c') 342 | actual = [ 343 | self.gcomm.ration(data[3 * i : 3 * (i + 1)]) for i in range(0, (self.size - 1)) 344 | ] 345 | expected = [None] * (self.size - 1) 346 | else: 347 | data = None 348 | actual = self.gcomm.ration() 349 | expected = np.array(list(map(str, range(3 * (self.size - 1)))), dtype='c') 350 | self.gcomm.sync() 351 | if self.gcomm.is_manager(): 352 | self.assertEqual(actual, expected) 353 | else: 354 | contained = any( 355 | [ 356 | np.all(actual == expected[i : i + actual.size]) 357 | for i in range(expected.size - actual.size + 1) 358 | ] 359 | ) 360 | self.assertTrue(contained) 361 | 362 | 363 | if __name__ == '__main__': 364 | try: 365 | from cStringIO import StringIO 366 | except ImportError: 367 | from io import StringIO 368 | 369 | mystream = StringIO() 370 | tests = unittest.TestLoader().loadTestsFromTestCase(SimpleCommParTests) 371 | unittest.TextTestRunner(stream=mystream).run(tests) 372 | MPI_COMM_WORLD.Barrier() 373 | 374 | results = MPI_COMM_WORLD.gather(mystream.getvalue()) 375 | if MPI_COMM_WORLD.Get_rank() == 0: 376 | for rank, result in enumerate(results): 377 | print('RESULTS FOR RANK ' + str(rank) + ':') 378 | print(str(result)) 379 | -------------------------------------------------------------------------------- /asaptools/simplecomm.py: -------------------------------------------------------------------------------- 1 | """ 2 | A module for simple MPI communication. 3 | 4 | The SimpleComm class is designed to provide a simplified MPI-based 5 | communication strategy using the MPI4Py module. 6 | 7 | To accomplish this task, the SimpleComm object provides a single communication 8 | pattern with a simple, light-weight API. The communication pattern is a 9 | common 'manager'/'worker' pattern, with the 0th rank assumed to be the 10 | 'manager' rank. The SimpleComm API provides a way of sending data out from the 11 | 'manager' rank to the 'worker' ranks, and for collecting the data from the 12 | 'worker' ranks back on the 'manager' rank. 13 | 14 | **PARTITIONING:** 15 | 16 | Within the SimpleComm paradigm, the 'manager' rank is assumed to be responsible 17 | for partition (or distributing) the necessary work to the 'worker' ranks. 18 | The *partition* mathod provides this functionality. Using a *partition 19 | function*, the *partition* method takes data known on the 'manager' rank and 20 | gives each 'worker' rank a part of the data according to the algorithm of the 21 | partition function. 22 | 23 | The *partition* method is *synchronous*, meaning that every rank (from the 24 | 'manager' rank to all of the 'worker' ranks) must be in synch when the method 25 | is called. This means that every rank must participate in the call, and 26 | every rank will wait until all of the data has been partitioned before 27 | continuing. Remember, whenever the 'manager' rank speaks, all of the 28 | 'worker' ranks listen! And they continue to listen until dismissed by the 29 | 'manager' rank. 30 | 31 | Additionally, the 'manager' rank can be considered *involved* or *uninvolved* 32 | in the partition process. If the 'manager' rank is *involved*, then the 33 | master will take a part of the data for itself. If the 'manager' is 34 | *uninvolved*, then the data will be partitioned only across the 'worker' ranks. 35 | 36 | *Partitioning* is a *synchronous* communication call that implements a 37 | *static partitioning* algorithm. 38 | 39 | **RATIONING:** 40 | 41 | An alternative approach to the *partitioning* communication method is the 42 | *rationing* communication method. This method involves the individual 43 | 'worker' ranks requesting data to work on. In this approach, each 'worker' 44 | rank, when the 'worker' rank is ready, asks the 'manager' rank for a new 45 | piece of data on which to work. The 'manager' rank receives the request 46 | and gives the next piece of data for processing out to the requesting 47 | 'worker' rank. It doesn't matter what order the ranks request data, and 48 | they do not all have to request data at the same time. However, it is 49 | critical to understand that if a 'worker' requests data when the 'manager' 50 | rank does not listen for the request, or the 'manager' expects a 'worker' 51 | to request work but the 'worker' never makes the request, the entire 52 | process will hang and wait forever! 53 | 54 | *Rationing* is an *asynchronous* communication call that allows the 'manager' 55 | to implement a *dynamic partitioning* algorithm. 56 | 57 | **COLLECTING:** 58 | 59 | Once each 'worker' has received its assigned part of the data, the 'worker' 60 | will perform some work pertaining to the data it received. In such a case, 61 | the 'worker' may (though not necessarily) return one or more results back to 62 | the 'manager'. The *collect* method provides this functionality. 63 | 64 | The *collect* method is *asynchronous*, meaning that each slave can send 65 | its data back to the master at any time and in any order. Since the 'manager' 66 | rank does not care where the data came from, the 'manager' rank simply receives 67 | the result from the 'worker' rank and processes it. Hence, all that matters 68 | is that for every *collect* call made by all of the 'worker' ranks, a *collect* 69 | call must also be made by the 'manager' rank. 70 | 71 | The *collect* method is a *handshake* method, meaning that while the 'manager' 72 | rank doesn't care which 'worker' rank sends it data, the 'manager' rank does 73 | acknowledge the 'worker' rank and record the 'worker' rank's identity. 74 | 75 | **REDUCING:** 76 | 77 | In general, it is assumed that each 'worker' rank works independently from the 78 | other 'worker' ranks. However, it may be occasionally necessary for the 79 | 'worker' ranks to know something about the work being done on (or the data 80 | given to) each of the other ranks. The only allowed communication of this 81 | type is provided by the *allreduce* method. 82 | 83 | The *allreduce* method allows for *reductions* of the data distributed across 84 | all of the ranks to be made available to every rank. Reductions are operations 85 | such as 'max', 'min', 'sum', and 'prod', which compute and distribute to the 86 | ranks the 'maximum', 'minimum', 'sum', or 'product' of the data distributed 87 | across the ranks. Since the *reduction* computes a reduced quantity of data 88 | distributed across all ranks, the *allreduce* method is a *synchronous* method 89 | (i.e., all ranks must participate in the call, including the 'manager'). 90 | 91 | **DIVIDING:** 92 | 93 | It can be occasionally useful to subdivide the 'worker' ranks into different 94 | groups to perform different tasks in each group. When this is necessary, the 95 | 'manager' rank will assign itself and each 'worker' rank a *color* ID. Then, 96 | the 'manager' will assign each rank (including itself) to 2 new groups: 97 | 98 | 1. Each rank with the same color ID will be assigned to the same group, and 99 | within this new *color* group, each rank will be given a new rank ID 100 | ranging from 0 (identifying the color group's 'manager' rank) to the number 101 | of 'worker' ranks in the color group. This is called 102 | the *monocolor* grouping. 103 | 104 | 2. Each rank with the same new rank ID across all color groups will be assigned 105 | to the same group. Hence, all ranks with rank ID 0 (but different color 106 | IDs) will be in the same group, all ranks with rank ID 1 (but different 107 | color IDs) will be the in another group, etc. This is called the 108 | *multicolor* grouping. NOTE: This grouping will look like grouping (1) 109 | except with the rank ID and the color ID swapped. 110 | 111 | The *divide* method provides this functionality, and it returns 2 new 112 | SimpleComm objects for each of the 2 groupings described above. This means 113 | that within each group, the same *partition*, *collecting*, and *reducing* 114 | operations can be performed in the same way as described above for the *global* 115 | group. 116 | 117 | 118 | Copyright 2020 University Corporation for Atmospheric Research 119 | 120 | Licensed under the Apache License, Version 2.0 (the "License"); 121 | you may not use this file except in compliance with the License. 122 | You may obtain a copy of the License at 123 | 124 | http://www.apache.org/licenses/LICENSE-2.0 125 | 126 | Unless required by applicable law or agreed to in writing, software 127 | distributed under the License is distributed on an "AS IS" BASIS, 128 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 129 | See the License for the specific language governing permissions and 130 | limitations under the License. 131 | """ 132 | 133 | from collections import defaultdict 134 | from functools import partial # noqa: UnusedImport 135 | 136 | # Define the supported reduction operators 137 | OPERATORS = ['sum', 'prod', 'max', 'min'] 138 | 139 | # Define the reduction operators map (Maps names to function names. 140 | # The 'py' function names are passed to 'eval(*)' and executed as python code. 141 | # The 'np' function names are passed to 'getattr(numpy,*)' and executed as 142 | # numpy code. The 'mpi' function names are passed to 'getattr(mpi4py,*)' 143 | # and return an MPI operator object which is passed as an argument to MPI 144 | # reduce functions. 145 | _OP_MAP = { 146 | 'sum': {'py': 'sum', 'np': 'sum', 'mpi': 'SUM'}, 147 | 'prod': {'py': 'partial(reduce, lambda x, y: x * y)', 'np': 'prod', 'mpi': 'PROD'}, 148 | 'max': {'py': 'max', 'np': 'max', 'mpi': 'MAX'}, 149 | 'min': {'py': 'min', 'np': 'min', 'mpi': 'MIN'}, 150 | } 151 | 152 | 153 | def create_comm(serial=False): 154 | """ 155 | This is a factory function for creating SimpleComm objects. 156 | 157 | Depending on the argument given, it returns an instance of a serial or 158 | parallel SimpleComm object. 159 | 160 | Keyword Arguments: 161 | serial (bool): A boolean flag with True indicating the desire for a 162 | serial SimpleComm instance, and False incidicating the 163 | desire for a parallel SimpleComm instance. 164 | 165 | Returns: 166 | SimpleComm: An instance of a SimpleComm object, either serial 167 | (if serial == True) or parallel (if serial == False) 168 | 169 | Raises: 170 | TypeError: if the serial argument is not a bool. 171 | 172 | Examples: 173 | 174 | >>> sercomm = create_comm(serial=True) 175 | >>> type(sercomm) 176 | 177 | 178 | >>> parcomm = create_comm() 179 | >>> type(parcomm) 180 | 181 | """ 182 | if type(serial) is not bool: 183 | raise TypeError('Serial parameter must be a bool') 184 | if serial: 185 | return SimpleComm() 186 | else: 187 | return SimpleCommMPI() 188 | 189 | 190 | class SimpleComm(object): 191 | 192 | """ 193 | Simple Communicator for serial operation. 194 | 195 | Attributes: 196 | _numpy: Reference to the Numpy module, if found 197 | _color: The color associated with the communicator, if colored 198 | _group: The group ID associated with the communicator's color 199 | """ 200 | 201 | def __init__(self): 202 | """ 203 | Constructor. 204 | """ 205 | 206 | # Try importing the Numpy module 207 | try: 208 | import numpy 209 | except: 210 | numpy = None 211 | 212 | # To the Numpy module, if found 213 | self._numpy = numpy 214 | 215 | # The color ID associated with this communicator 216 | self._color = None 217 | 218 | # The group ID associated with the color 219 | self._group = None 220 | 221 | def _is_ndarray(self, obj): 222 | """ 223 | Helper function to determing if an object is a Numpy NDArray. 224 | 225 | Parameters: 226 | obj: The object to be tested 227 | 228 | Returns: 229 | bool: True if the object is a Numpy NDArray. False otherwise, 230 | or if the Numpy module was not found during 231 | the SimpleComm constructor. 232 | 233 | Examples: 234 | 235 | >>> _is_ndarray(1) 236 | False 237 | 238 | >>> alist = [1, 2, 3, 4] 239 | >>> _is_ndarray(alist) 240 | False 241 | 242 | >>> aarray = numpy.array(alist) 243 | >>> _is_ndarray(aarray) 244 | True 245 | """ 246 | if self._numpy: 247 | return isinstance(obj, self._numpy.ndarray) 248 | else: 249 | return False 250 | 251 | def get_size(self): 252 | """ 253 | Get the integer number of ranks in this communicator. 254 | 255 | The size includes the 'manager' rank. 256 | 257 | Returns: 258 | int: The integer number of ranks in this communicator. 259 | """ 260 | return 1 261 | 262 | def get_rank(self): 263 | """ 264 | Get the integer rank ID of this MPI process in this communicator. 265 | 266 | This call can be made independently from other ranks. 267 | 268 | Returns: 269 | int: The integer rank ID of this MPI process 270 | """ 271 | return 0 272 | 273 | def is_manager(self): 274 | """ 275 | Check if this MPI process is on the 'manager' rank (i.e., rank 0). 276 | 277 | This call can be made independently from other ranks. 278 | 279 | Returns: 280 | bool: True if this MPI process is on the master rank 281 | (or rank 0). False otherwise. 282 | """ 283 | return self.get_rank() == 0 284 | 285 | def get_color(self): 286 | """ 287 | Get the integer color ID of this MPI process in this communicator. 288 | 289 | By default, a communicator's color is None, but a communicator can 290 | be divided into color groups using the 'divide' method. 291 | 292 | This call can be made independently from other ranks. 293 | 294 | Returns: 295 | int: The color of this MPI communicator 296 | """ 297 | return self._color 298 | 299 | def get_group(self): 300 | """ 301 | Get the group ID of this MPI communicator. 302 | 303 | The group ID is the argument passed to the 'divide' method, and it 304 | represents a unique identifier for all ranks in the same color group. 305 | It can be any type of object (e.g., a string name). 306 | 307 | This call can be made independently from other ranks. 308 | 309 | Returns: 310 | The group ID of this communicator 311 | """ 312 | return self._group 313 | 314 | def sync(self): 315 | """ 316 | Synchronize all MPI processes at the point of this call. 317 | 318 | Immediately after this method is called, you can guarantee that all 319 | ranks in this communicator will be synchronized. 320 | 321 | This call must be made by all ranks. 322 | """ 323 | return 324 | 325 | def allreduce(self, data, op): 326 | """ 327 | Perform an MPI AllReduction operation. 328 | 329 | The data is "reduced" across all ranks in the communicator, and the 330 | result is returned to all ranks in the communicator. (Reduce 331 | operations such as 'sum', 'prod', 'min', and 'max' are allowed.) 332 | 333 | This call must be made by all ranks. 334 | 335 | Parameters: 336 | data: The data to be reduced 337 | op (str): A string identifier for a reduce operation (any string 338 | found in the OPERATORS list) 339 | 340 | Returns: 341 | The single value constituting the reduction of the input data. 342 | (The same value is returned on all ranks in this communicator.) 343 | """ 344 | if isinstance(data, dict): 345 | totals = {} 346 | for k, v in data.items(): 347 | totals[k] = SimpleComm.allreduce(self, v, op) 348 | return totals 349 | elif self._is_ndarray(data): 350 | return SimpleComm.allreduce(self, getattr(self._numpy, _OP_MAP[op]['np'])(data), op) 351 | elif hasattr(data, '__len__'): 352 | return SimpleComm.allreduce(self, eval(_OP_MAP[op]['py'])(data), op) 353 | else: 354 | return data 355 | 356 | def partition(self, data=None, func=None, involved=False, tag=0): 357 | """ 358 | Partition and send data from the 'manager' rank to 'worker' ranks. 359 | 360 | By default, the data is partitioned using an "equal stride" across the 361 | data, with the stride equal to the number of ranks involved in the 362 | partitioning. If a partition function is supplied via the `func` 363 | argument, then the data will be partitioned across the 'worker' ranks, 364 | giving each 'worker' rank a different part of the data according to 365 | the algorithm used by partition function supplied. 366 | 367 | If the `involved` argument is True, then a part of the data (as 368 | determined by the given partition function, if supplied) will be 369 | returned on the 'manager' rank. Otherwise, ('involved' argument is 370 | False) the data will be partitioned only across the 'worker' ranks. 371 | 372 | This call must be made by all ranks. 373 | 374 | Keyword Arguments: 375 | data: The data to be partitioned across the ranks in the 376 | communicator. 377 | func: A PartitionFunction object/function that returns a part 378 | of the data given the index and assumed size of the partition. 379 | involved (bool): True if a part of the data should be given to the 380 | 'manager' rank in addition to the 'worker' ranks. False 381 | otherwise. 382 | tag (int): A user-defined integer tag to uniquely specify this 383 | communication message. 384 | 385 | Returns: 386 | A (possibly partitioned) subset (i.e., part) of the data. Depending 387 | on the PartitionFunction used (or if it is used at all), this method 388 | may return a different part on each rank. 389 | """ 390 | op = func if func else lambda *x: x[0][x[1] :: x[2]] 391 | if involved: 392 | return op(data, 0, 1) 393 | else: 394 | return None 395 | 396 | def ration(self, data=None, tag=0): 397 | """ 398 | Send a single piece of data from the 'manager' rank to a 'worker' rank. 399 | 400 | If this method is called on a 'worker' rank, the worker will send a 401 | "request" for data to the 'manager' rank. When the 'manager' receives 402 | this request, the 'manager' rank sends a single piece of data back to 403 | the requesting 'worker' rank. 404 | 405 | For each call to this function on a given 'worker' rank, there must 406 | be a matching call to this function made on the 'manager' rank. 407 | 408 | NOTE: This method cannot be used for communication between the 409 | 'manager' rank and itself. Attempting this will cause the code to 410 | hang. 411 | 412 | Keyword Arguments: 413 | data: The data to be asynchronously sent to the 'worker' rank 414 | tag (int): A user-defined integer tag to uniquely specify this 415 | communication message 416 | 417 | Returns: 418 | On the 'worker' rank, the data sent by the manager. On the 419 | 'manager' rank, None. 420 | 421 | Raises: 422 | RuntimeError: If executed during a serial or 1-rank parallel run 423 | """ 424 | err_msg = 'Rationing cannot be used in serial operation' 425 | raise RuntimeError(err_msg) 426 | 427 | def collect(self, data=None, tag=0): 428 | """ 429 | Send data from a 'worker' rank to the 'manager' rank. 430 | 431 | If the calling MPI process is the 'manager' rank, then it will 432 | receive and return the data sent from the 'worker'. If the calling 433 | MPI process is a 'worker' rank, then it will send the data to the 434 | 'manager' rank. 435 | 436 | For each call to this function on a given 'worker' rank, there must 437 | be a matching call to this function made on the 'manager' rank. 438 | 439 | NOTE: This method cannot be used for communication between the 440 | 'manager' rank and itself. Attempting this will cause the code to 441 | hang. 442 | 443 | Keyword Arguments: 444 | data: The data to be collected asynchronously on the manager rank. 445 | tag (int): A user-defined integer tag to uniquely specify this 446 | communication message 447 | 448 | Returns: 449 | On the 'manager' rank, a tuple containing the source rank ID 450 | and the data collected. None on all other ranks. 451 | 452 | Raises: 453 | RuntimeError: If executed during a serial or 1-rank parallel run 454 | """ 455 | err_msg = 'Collection cannot be used in serial operation' 456 | raise RuntimeError(err_msg) 457 | 458 | def divide(self, group): 459 | """ 460 | Divide this communicator's ranks into groups. 461 | 462 | Creates and returns two (2) kinds of groups: 463 | 464 | 1. groups with ranks of the same color ID but different rank IDs 465 | (called a "monocolor" group), and 466 | 467 | 2. groups with ranks of the same rank ID but different color IDs 468 | (called a "multicolor" group). 469 | 470 | Parameters: 471 | group: A unique group ID to which will be assigned an integer 472 | color ID ranging from 0 to the number of group ID's 473 | supplied across all ranks 474 | 475 | Returns: 476 | tuple: A tuple containing (first) the "monocolor" SimpleComm for 477 | ranks with the same color ID (but different rank IDs) and 478 | (second) the "multicolor" SimpleComm for ranks with the same 479 | rank ID (but different color IDs) 480 | 481 | Raises: 482 | RuntimeError: If executed during a serial or 1-rank parallel run 483 | """ 484 | err_msg = 'Division cannot be done on a serial communicator' 485 | raise RuntimeError(err_msg) 486 | 487 | 488 | class SimpleCommMPI(SimpleComm): 489 | 490 | """ 491 | Simple Communicator using MPI. 492 | 493 | Attributes: 494 | PART_TAG: Partition Tag Identifier 495 | RATN_TAG: Ration Tag Identifier 496 | CLCT_TAG: Collect Tag Identifier 497 | REQ_TAG: Request Identifier 498 | MSG_TAG: Message Identifer 499 | ACK_TAG: Acknowledgement Identifier 500 | PYT_TAG: Python send/recv Identifier 501 | NPY_TAG: Numpy send/recv Identifier 502 | _mpi: A reference to the mpi4py.MPI module 503 | _comm: A reference to the mpi4py.MPI communicator 504 | """ 505 | 506 | PART_TAG = 1 # Partition Tag Identifier 507 | RATN_TAG = 2 # Ration Tag Identifier 508 | CLCT_TAG = 3 # Collect Tag Identifier 509 | 510 | REQ_TAG = 1 # Request Identifier 511 | MSG_TAG = 2 # Message Identifier 512 | ACK_TAG = 3 # Acknowledgement Identifier 513 | PYT_TAG = 4 # Python Data send/recv Identifier 514 | NPY_TAG = 5 # Numpy NDArray send/recv Identifier 515 | 516 | def __init__(self): 517 | """ 518 | Constructor. 519 | """ 520 | 521 | # Call the base class constructor 522 | super(SimpleCommMPI, self).__init__() 523 | 524 | # Try importing the MPI4Py MPI module 525 | try: 526 | from mpi4py import MPI 527 | except: 528 | err_msg = 'MPI could not be found.' 529 | raise ImportError(err_msg) 530 | 531 | # Hold on to the MPI module 532 | self._mpi = MPI 533 | 534 | # The MPI communicator (by default, COMM_WORLD) 535 | self._comm = self._mpi.COMM_WORLD 536 | 537 | def __del__(self): 538 | """ 539 | Destructor. 540 | 541 | Free the communicator if this SimpleComm goes out of scope 542 | """ 543 | if self._comm != self._mpi.COMM_WORLD: 544 | self._comm.Free() 545 | 546 | def _is_bufferable(self, obj): 547 | """ 548 | Check if the data is bufferable or not 549 | """ 550 | if self._is_ndarray(obj): 551 | if hasattr(self._mpi, '_typedict_c'): 552 | return obj.dtype.char in self._mpi._typedict_c 553 | elif hasattr(self._mpi, '__CTypeDict__'): 554 | return obj.dtype.char in self._mpi.__CTypeDict__ and obj.dtype.char != 'c' 555 | else: 556 | return False 557 | else: 558 | return False 559 | 560 | def get_size(self): 561 | """ 562 | Get the integer number of ranks in this communicator. 563 | 564 | The size includes the 'manager' rank. 565 | 566 | Returns: 567 | int: The integer number of ranks in this communicator. 568 | """ 569 | return self._comm.Get_size() 570 | 571 | def get_rank(self): 572 | """ 573 | Get the integer rank ID of this MPI process in this communicator. 574 | 575 | This call can be made independently from other ranks. 576 | 577 | Returns: 578 | int: The integer rank ID of this MPI process 579 | """ 580 | return self._comm.Get_rank() 581 | 582 | def sync(self): 583 | """ 584 | Synchronize all MPI processes at the point of this call. 585 | 586 | Immediately after this method is called, you can guarantee that all 587 | ranks in this communicator will be synchronized. 588 | 589 | This call must be made by all ranks. 590 | """ 591 | self._comm.Barrier() 592 | 593 | def allreduce(self, data, op): 594 | """ 595 | Perform an MPI AllReduction operation. 596 | 597 | The data is "reduced" across all ranks in the communicator, and the 598 | result is returned to all ranks in the communicator. (Reduce 599 | operations such as 'sum', 'prod', 'min', and 'max' are allowed.) 600 | 601 | This call must be made by all ranks. 602 | 603 | Parameters: 604 | data: The data to be reduced 605 | op (str): A string identifier for a reduce operation (any string 606 | found in the OPERATORS list) 607 | 608 | Returns: 609 | The single value constituting the reduction of the input data. 610 | (The same value is returned on all ranks in this communicator.) 611 | """ 612 | if isinstance(data, dict): 613 | all_list = self._comm.gather(SimpleComm.allreduce(self, data, op)) 614 | if self.is_manager(): 615 | all_dict = defaultdict(list) 616 | for d in all_list: 617 | for k, v in d.items(): 618 | all_dict[k].append(v) 619 | result = {} 620 | for k, v in all_dict.items(): 621 | result[k] = SimpleComm.allreduce(self, v, op) 622 | return self._comm.bcast(result) 623 | else: 624 | return self._comm.bcast(None) 625 | else: 626 | return self._comm.allreduce( 627 | SimpleComm.allreduce(self, data, op), 628 | op=getattr(self._mpi, _OP_MAP[op]['mpi']), 629 | ) 630 | 631 | def _tag_offset(self, method, message, user): 632 | """ 633 | Method to generate the tag for a given MPI message 634 | 635 | Parameters: 636 | method (int): One of PART_TAG, RATN_TAG, CLCT_TAG 637 | message (int): One of REQ_TAG, MSG_TAG, ACK_TAG, PYT_TAG, NPY_TAG 638 | user (int): A user-defined integer tag 639 | 640 | Returns: 641 | int: A new tag uniquely combining all of the method, message, and 642 | user tags together 643 | """ 644 | return 100 * user + 10 * method + message 645 | 646 | def partition(self, data=None, func=None, involved=False, tag=0): 647 | """ 648 | Partition and send data from the 'manager' rank to 'worker' ranks. 649 | 650 | By default, the data is partitioned using an "equal stride" across the 651 | data, with the stride equal to the number of ranks involved in the 652 | partitioning. If a partition function is supplied via the 'func' 653 | argument, then the data will be partitioned across the 'worker' ranks, 654 | giving each 'worker' rank a different part of the data according to 655 | the algorithm used by partition function supplied. 656 | 657 | If the 'involved' argument is True, then a part of the data (as 658 | determined by the given partition function, if supplied) will be 659 | returned on the 'manager' rank. Otherwise, ('involved' argument is 660 | False) the data will be partitioned only across the 'worker' ranks. 661 | 662 | This call must be made by all ranks. 663 | 664 | Keyword Arguments: 665 | data: The data to be partitioned across 666 | the ranks in the communicator. 667 | func: A PartitionFunction object/function that returns 668 | a part of the data given the index and assumed 669 | size of the partition. 670 | involved (bool): True, if a part of the data should be given 671 | to the 'manager' rank in addition to the 'worker' 672 | ranks. False, otherwise. 673 | tag (int): A user-defined integer tag to uniquely 674 | specify this communication message 675 | 676 | Returns: 677 | A (possibly partitioned) subset (i.e., part) of the data. 678 | Depending on the PartitionFunction used (or if it is used at all), 679 | this method may return a different part on each rank. 680 | """ 681 | if self.is_manager(): 682 | op = func if func else lambda *x: x[0][x[1] :: x[2]] 683 | j = 1 if not involved else 0 684 | for i in range(1, self.get_size()): 685 | 686 | # Get the part of the data to send to rank i 687 | part = op(data, i - j, self.get_size() - j) 688 | 689 | # Create the handshake message 690 | msg = {} 691 | msg['rank'] = self.get_rank() 692 | msg['buffer'] = self._is_bufferable(part) 693 | msg['shape'] = getattr(part, 'shape', None) 694 | msg['dtype'] = getattr(part, 'dtype', None) 695 | 696 | # Send the handshake message to the worker rank 697 | msg_tag = self._tag_offset(self.PART_TAG, self.MSG_TAG, tag) 698 | self._comm.send(msg, dest=i, tag=msg_tag) 699 | 700 | # Receive the acknowledgement from the worker 701 | ack_tag = self._tag_offset(self.PART_TAG, self.ACK_TAG, tag) 702 | ack = self._comm.recv(source=i, tag=ack_tag) 703 | 704 | # Check the acknowledgement, if bad skip this rank 705 | if not ack: 706 | continue 707 | 708 | # If OK, send the data to the worker 709 | if msg['buffer']: 710 | npy_tag = self._tag_offset(self.PART_TAG, self.NPY_TAG, tag) 711 | self._comm.Send(self._numpy.array(part), dest=i, tag=npy_tag) 712 | else: 713 | pyt_tag = self._tag_offset(self.PART_TAG, self.PYT_TAG, tag) 714 | self._comm.send(part, dest=i, tag=pyt_tag) 715 | 716 | if involved: 717 | return op(data, 0, self.get_size()) 718 | else: 719 | return None 720 | else: 721 | 722 | # Get the data message from the manager 723 | msg_tag = self._tag_offset(self.PART_TAG, self.MSG_TAG, tag) 724 | msg = self._comm.recv(source=0, tag=msg_tag) 725 | 726 | # Check the message content 727 | ack = type(msg) is dict and all( 728 | [key in msg for key in ['rank', 'buffer', 'shape', 'dtype']] 729 | ) 730 | 731 | # If the message is good, acknowledge 732 | ack_tag = self._tag_offset(self.PART_TAG, self.ACK_TAG, tag) 733 | self._comm.send(ack, dest=0, tag=ack_tag) 734 | 735 | # if acknowledgement is bad, skip 736 | if not ack: 737 | return None 738 | 739 | # Receive the data 740 | if msg['buffer']: 741 | npy_tag = self._tag_offset(self.PART_TAG, self.NPY_TAG, tag) 742 | recvd = self._numpy.empty(msg['shape'], dtype=msg['dtype']) 743 | self._comm.Recv(recvd, source=0, tag=npy_tag) 744 | else: 745 | pyt_tag = self._tag_offset(self.PART_TAG, self.PYT_TAG, tag) 746 | recvd = self._comm.recv(source=0, tag=pyt_tag) 747 | 748 | return recvd 749 | 750 | def ration(self, data=None, tag=0): 751 | """ 752 | Send a single piece of data from the 'manager' rank to a 'worker' rank. 753 | 754 | If this method is called on a 'worker' rank, the worker will send a 755 | "request" for data to the 'manager' rank. When the 'manager' receives 756 | this request, the 'manager' rank sends a single piece of data back to 757 | the requesting 'worker' rank. 758 | 759 | For each call to this function on a given 'worker' rank, there must 760 | be a matching call to this function made on the 'manager' rank. 761 | 762 | NOTE: This method cannot be used for communication between the 763 | 'manager' rank and itself. Attempting this will cause the code to 764 | hang. 765 | 766 | Keyword Arguments: 767 | data: The data to be asynchronously sent to the 'worker' rank 768 | tag (int): A user-defined integer tag to uniquely specify this 769 | communication message 770 | 771 | Returns: 772 | On the 'worker' rank, the data sent by the manager. On the 773 | 'manager' rank, None. 774 | 775 | Raises: 776 | RuntimeError: If executed during a serial or 1-rank parallel run 777 | """ 778 | if self.get_size() > 1: 779 | if self.is_manager(): 780 | 781 | # Listen for a requesting worker rank 782 | req_tag = self._tag_offset(self.RATN_TAG, self.REQ_TAG, tag) 783 | rank = self._comm.recv(source=self._mpi.ANY_SOURCE, tag=req_tag) 784 | 785 | # Create the handshake message 786 | msg = {} 787 | msg['buffer'] = self._is_bufferable(data) 788 | msg['shape'] = data.shape if hasattr(data, 'shape') else None 789 | msg['dtype'] = data.dtype if hasattr(data, 'dtype') else None 790 | 791 | # Send the handshake message to the requesting worker 792 | msg_tag = self._tag_offset(self.RATN_TAG, self.MSG_TAG, tag) 793 | self._comm.send(msg, dest=rank, tag=msg_tag) 794 | 795 | # Receive the acknowledgement from the requesting worker 796 | ack_tag = self._tag_offset(self.RATN_TAG, self.ACK_TAG, tag) 797 | ack = self._comm.recv(source=rank, tag=ack_tag) 798 | 799 | # Check the acknowledgement, if not OK skip 800 | if not ack: 801 | return 802 | 803 | # If OK, send the data to the requesting worker 804 | if msg['buffer']: 805 | npy_tag = self._tag_offset(self.RATN_TAG, self.NPY_TAG, tag) 806 | self._comm.Send(data, dest=rank, tag=npy_tag) 807 | else: 808 | pyt_tag = self._tag_offset(self.RATN_TAG, self.PYT_TAG, tag) 809 | self._comm.send(data, dest=rank, tag=pyt_tag) 810 | else: 811 | 812 | # Send a request for data to the manager 813 | req_tag = self._tag_offset(self.RATN_TAG, self.REQ_TAG, tag) 814 | self._comm.send(self.get_rank(), dest=0, tag=req_tag) 815 | 816 | # Receive the handshake message from the manager 817 | msg_tag = self._tag_offset(self.RATN_TAG, self.MSG_TAG, tag) 818 | msg = self._comm.recv(source=0, tag=msg_tag) 819 | 820 | # Check the message content 821 | ack = type(msg) is dict and all( 822 | [key in msg for key in ['buffer', 'shape', 'dtype']] 823 | ) 824 | 825 | # Send acknowledgement back to the manager 826 | ack_tag = self._tag_offset(self.RATN_TAG, self.ACK_TAG, tag) 827 | self._comm.send(ack, dest=0, tag=ack_tag) 828 | 829 | # If acknowledgement is bad, don't receive 830 | if not ack: 831 | return None 832 | 833 | # Receive the data from the manager 834 | if msg['buffer']: 835 | npy_tag = self._tag_offset(self.RATN_TAG, self.NPY_TAG, tag) 836 | recvd = self._numpy.empty(msg['shape'], dtype=msg['dtype']) 837 | self._comm.Recv(recvd, source=0, tag=npy_tag) 838 | else: 839 | pyt_tag = self._tag_offset(self.RATN_TAG, self.PYT_TAG, tag) 840 | recvd = self._comm.recv(source=0, tag=pyt_tag) 841 | return recvd 842 | else: 843 | err_msg = 'Rationing cannot be used in 1-rank parallel operation' 844 | raise RuntimeError(err_msg) 845 | 846 | def collect(self, data=None, tag=0): 847 | """ 848 | Send data from a 'worker' rank to the 'manager' rank. 849 | 850 | If the calling MPI process is the 'manager' rank, then it will 851 | receive and return the data sent from the 'worker'. If the calling 852 | MPI process is a 'worker' rank, then it will send the data to the 853 | 'manager' rank. 854 | 855 | For each call to this function on a given 'worker' rank, there must 856 | be a matching call to this function made on the 'manager' rank. 857 | 858 | NOTE: This method cannot be used for communication between the 859 | 'manager' rank and itself. Attempting this will cause the code to 860 | hang. 861 | 862 | Keyword Arguments: 863 | data: The data to be collected asynchronously 864 | on the 'manager' rank. 865 | tag (int): A user-defined integer tag to uniquely 866 | specify this communication message 867 | 868 | Returns: 869 | tuple: On the 'manager' rank, a tuple containing the source rank 870 | ID and the the data collected. None on all other ranks. 871 | 872 | Raises: 873 | RuntimeError: If executed during a serial or 1-rank parallel run 874 | """ 875 | if self.get_size() > 1: 876 | if self.is_manager(): 877 | 878 | # Receive the message from the worker 879 | msg_tag = self._tag_offset(self.CLCT_TAG, self.MSG_TAG, tag) 880 | msg = self._comm.recv(source=self._mpi.ANY_SOURCE, tag=msg_tag) 881 | 882 | # Check the message content 883 | ack = type(msg) is dict and all( 884 | [key in msg for key in ['rank', 'buffer', 'shape', 'dtype']] 885 | ) 886 | 887 | # Send acknowledgement back to the worker 888 | ack_tag = self._tag_offset(self.CLCT_TAG, self.ACK_TAG, tag) 889 | self._comm.send(ack, dest=msg['rank'], tag=ack_tag) 890 | 891 | # If acknowledgement is bad, don't receive 892 | if not ack: 893 | return None 894 | 895 | # Receive the data 896 | if msg['buffer']: 897 | npy_tag = self._tag_offset(self.CLCT_TAG, self.NPY_TAG, tag) 898 | recvd = self._numpy.empty(msg['shape'], dtype=msg['dtype']) 899 | self._comm.Recv(recvd, source=msg['rank'], tag=npy_tag) 900 | else: 901 | pyt_tag = self._tag_offset(self.CLCT_TAG, self.PYT_TAG, tag) 902 | recvd = self._comm.recv(source=msg['rank'], tag=pyt_tag) 903 | return msg['rank'], recvd 904 | 905 | else: 906 | 907 | # Create the handshake message 908 | msg = {} 909 | msg['rank'] = self.get_rank() 910 | msg['buffer'] = self._is_bufferable(data) 911 | msg['shape'] = data.shape if hasattr(data, 'shape') else None 912 | msg['dtype'] = data.dtype if hasattr(data, 'dtype') else None 913 | 914 | # Send the handshake message to the manager 915 | msg_tag = self._tag_offset(self.CLCT_TAG, self.MSG_TAG, tag) 916 | self._comm.send(msg, dest=0, tag=msg_tag) 917 | 918 | # Receive the acknowledgement from the manager 919 | ack_tag = self._tag_offset(self.CLCT_TAG, self.ACK_TAG, tag) 920 | ack = self._comm.recv(source=0, tag=ack_tag) 921 | 922 | # Check the acknowledgement, if not OK skip 923 | if not ack: 924 | return 925 | 926 | # If OK, send the data to the manager 927 | if msg['buffer']: 928 | npy_tag = self._tag_offset(self.CLCT_TAG, self.NPY_TAG, tag) 929 | self._comm.Send(data, dest=0, tag=npy_tag) 930 | else: 931 | pyt_tag = self._tag_offset(self.CLCT_TAG, self.PYT_TAG, tag) 932 | self._comm.send(data, dest=0, tag=pyt_tag) 933 | else: 934 | err_msg = 'Collection cannot be used in a 1-rank communicator' 935 | raise RuntimeError(err_msg) 936 | 937 | def divide(self, group): 938 | """ 939 | Divide this communicator's ranks into groups. 940 | 941 | Creates and returns two (2) kinds of groups: 942 | 943 | (1) groups with ranks of the same color ID but different rank IDs 944 | (called a "monocolor" group), and 945 | 946 | (2) groups with ranks of the same rank ID but different color IDs 947 | (called a "multicolor" group). 948 | 949 | Parameters: 950 | group: A unique group ID to which will be assigned an integer 951 | color ID ranging from 0 to the number of group ID's 952 | supplied across all ranks 953 | 954 | Returns: 955 | tuple: A tuple containing (first) the "monocolor" SimpleComm for 956 | ranks with the same color ID (but different rank IDs) and 957 | (second) the "multicolor" SimpleComm for ranks with the same 958 | rank ID (but different color IDs) 959 | 960 | Raises: 961 | RuntimeError: If executed during a serial or 1-rank parallel run 962 | """ 963 | if self.get_size() > 1: 964 | allgroups = list(self._comm.allgather(group)) 965 | color = allgroups.index(group) 966 | monocomm = SimpleCommMPI() 967 | monocomm._color = color 968 | monocomm._group = group 969 | monocomm._comm = self._comm.Split(color) 970 | 971 | rank = monocomm.get_rank() 972 | multicomm = SimpleCommMPI() 973 | multicomm._color = rank 974 | multicomm._group = rank 975 | multicomm._comm = self._comm.Split(rank) 976 | 977 | return monocomm, multicomm 978 | else: 979 | err_msg = 'Division cannot be done on a 1-rank communicator' 980 | raise RuntimeError(err_msg) 981 | --------------------------------------------------------------------------------