├── afar ├── tests │ ├── __init__.py │ ├── test_remotely.py │ ├── test_later.py │ ├── test_core.py │ └── test_notebook.ipynb ├── _exceptions.py ├── __init__.py ├── _where.py ├── _utils.py ├── _printing.py ├── _inspect.py ├── _reprs.py ├── _abra.py ├── _magic.py ├── _core.py └── _version.py ├── .gitattributes ├── requirements.txt ├── pyproject.toml ├── MANIFEST.in ├── setup.cfg ├── .github └── workflows │ ├── test_conda.yml │ └── test_pip.yml ├── setup.py ├── LICENSE ├── .gitignore ├── README.md └── versioneer.py /afar/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | afar/_version.py export-subst 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | innerscope >=0.5.1 2 | distributed >=2021.9.1 3 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 100 3 | extend-exclude = "test_notebook.ipynb" 4 | -------------------------------------------------------------------------------- /afar/_exceptions.py: -------------------------------------------------------------------------------- 1 | class AfarException(Exception): 2 | """Used to explicitly indicate where and how to execute the code of a context""" 3 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include afar *.py 2 | include setup.py 3 | include setup.cfg 4 | include README.md 5 | include LICENSE 6 | include MANIFEST.in 7 | include versioneer.py 8 | include requirements.txt 9 | include afar/_version.py 10 | -------------------------------------------------------------------------------- /afar/__init__.py: -------------------------------------------------------------------------------- 1 | """afar runs code within a context manager or IPython magic on a Dask cluster. 2 | 3 | >>> with afar.run, remotely: 4 | ... import dask_cudf 5 | ... df = dask_cudf.read_parquet("s3://...") 6 | ... result = df.sum().compute() 7 | 8 | or to use an IPython magic: 9 | 10 | >>> %load_ext afar 11 | >>> %afar z = x + y 12 | 13 | Read the documentation at https://github.com/eriknw/afar 14 | """ 15 | 16 | from . import _utils 17 | from ._core import get, run # noqa 18 | from ._version import get_versions 19 | from ._where import later, locally, remotely # noqa 20 | 21 | __version__ = get_versions()["version"] 22 | del get_versions 23 | 24 | if _utils.is_ipython(): 25 | from ._magic import new_magic # noqa 26 | 27 | 28 | def load_ipython_extension(ip): 29 | from ._magic import AfarMagic 30 | 31 | ip.register_magics(AfarMagic) 32 | -------------------------------------------------------------------------------- /afar/tests/test_remotely.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | from operator import add 4 | 5 | from dask.distributed import Client 6 | 7 | import afar 8 | 9 | # TODO: better testing infrastructure 10 | if __name__ == "__main__": 11 | 12 | client = Client() 13 | two = client.submit(add, 1, 1) 14 | 15 | with afar.run as results, afar.remotely: 16 | three = two + 1 17 | assert three.result() == 3 18 | 19 | with afar.get, afar.remotely(priority=1): 20 | five = two + three 21 | assert five == 5 22 | 23 | 24 | def test_runme(): 25 | assert subprocess.check_call([sys.executable, __file__]) == 0 26 | 27 | 28 | def test_simple(): 29 | client = Client() 30 | two = client.submit(add, 1, 1) 31 | 32 | with afar.run as results, afar.remotely: 33 | three = two + 1 34 | three = results["three"] 35 | assert three.result() == 3 36 | with afar.get as results, afar.remotely(priority=1): 37 | five = two + three 38 | assert results == {"five": 5} 39 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | 4 | [tool:pytest] 5 | testpaths = afar/tests 6 | 7 | [flake8] 8 | max-line-length = 100 9 | exclude = 10 | versioneer.py, 11 | afar/tests/, 12 | build/ 13 | ignore = 14 | E203, # whitespace before ':' 15 | E231, # Multiple spaces around "," 16 | W503, # line break before binary operator 17 | 18 | [isort] 19 | sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER 20 | profile = black 21 | skip_gitignore = true 22 | float_to_top = true 23 | default_section = THIRDPARTY 24 | known_first_party = afar 25 | 26 | [coverage:run] 27 | source = afar 28 | branch = True 29 | omit = 30 | afar/_version.py, 31 | afar/tests/* 32 | 33 | [coverage:report] 34 | # Regexes for lines to exclude from consideration 35 | exclude_lines = 36 | pragma: no cover 37 | 38 | raise AssertionError 39 | raise NotImplementedError 40 | 41 | [versioneer] 42 | VCS = git 43 | style = pep440 44 | versionfile_source = afar/_version.py 45 | versionfile_build = afar/_version.py 46 | tag_prefix = 47 | parentdir_prefix = afar- 48 | -------------------------------------------------------------------------------- /afar/_where.py: -------------------------------------------------------------------------------- 1 | from ._exceptions import AfarException 2 | 3 | _errors_to_locations = {} 4 | try: 5 | remotely 6 | except NameError as exc: 7 | _errors_to_locations[exc.args[0]] = "remotely" 8 | 9 | try: 10 | locally 11 | except NameError as exc: 12 | _errors_to_locations[exc.args[0]] = "locally" 13 | 14 | try: 15 | later 16 | except NameError as exc: 17 | _errors_to_locations[exc.args[0]] = "later" 18 | 19 | 20 | class Where: 21 | def __init__(self, where, client=None, submit_kwargs=None): 22 | self.where = where 23 | self.client = client 24 | self.submit_kwargs = submit_kwargs 25 | 26 | def __enter__(self): 27 | raise AfarException(self) 28 | 29 | def __exit__(self, exc_type, exc_value, exc_traceback): # pragma: no cover 30 | return False 31 | 32 | def __call__(self, client=None, **submit_kwargs): 33 | return Where(self.where, client, submit_kwargs) 34 | 35 | 36 | remotely = Where("remotely") 37 | locally = Where("locally") 38 | later = Where("later") 39 | 40 | 41 | def find_where(exc_type, exc_value): 42 | if issubclass(exc_type, AfarException): 43 | return exc_value.args[0] 44 | elif issubclass(exc_type, NameError) and exc_value.args[0] in _errors_to_locations: 45 | return globals()[_errors_to_locations[exc_value.args[0]]] 46 | else: 47 | return None 48 | -------------------------------------------------------------------------------- /afar/_utils.py: -------------------------------------------------------------------------------- 1 | import builtins 2 | import sys 3 | from types import CodeType 4 | 5 | from distributed.utils import is_kernel 6 | 7 | 8 | def is_terminal(): 9 | if not is_ipython(): 10 | return False 11 | from IPython import get_ipython 12 | 13 | return type(get_ipython()).__name__ == "TerminalInteractiveShell" 14 | 15 | 16 | def is_ipython(): 17 | return hasattr(builtins, "__IPYTHON__") and "IPython" in sys.modules 18 | 19 | 20 | def supports_async_output(): 21 | if is_kernel() and not is_terminal(): 22 | try: 23 | import ipywidgets # noqa 24 | except ImportError: 25 | return False 26 | return True 27 | return False 28 | 29 | 30 | if hasattr(CodeType, "replace"): 31 | code_replace = CodeType.replace 32 | else: 33 | 34 | def code_replace(code, *, co_code): 35 | return CodeType( 36 | code.co_argcount, 37 | code.co_kwonlyargcount, 38 | code.co_nlocals, 39 | code.co_stacksize, 40 | code.co_flags, 41 | co_code, 42 | code.co_consts, 43 | code.co_names, 44 | code.co_varnames, 45 | code.co_filename, 46 | code.co_name, 47 | code.co_firstlineno, 48 | code.co_lnotab, 49 | code.co_freevars, 50 | code.co_cellvars, 51 | ) 52 | -------------------------------------------------------------------------------- /.github/workflows/test_conda.yml: -------------------------------------------------------------------------------- 1 | name: Test conda 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | 8 | jobs: 9 | test: 10 | runs-on: ${{ matrix.os }} 11 | defaults: 12 | run: 13 | shell: bash -l {0} 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | os: ["ubuntu-latest", "macos-latest", "windows-latest"] 18 | python-version: ["3.7", "3.8", "3.9", "3.10"] # , "3.7.10 1_73_pypy"] 19 | # exclude: 20 | # - os: "windows-latest" 21 | # python-version: "3.7.10 1_73_pypy" 22 | steps: 23 | - name: Checkout 24 | uses: actions/checkout@v3 25 | - name: Conda 26 | uses: conda-incubator/setup-miniconda@v2 27 | with: 28 | auto-update-conda: true 29 | python-version: ${{ matrix.python-version }} 30 | channels: conda-forge 31 | activate-environment: afar 32 | - name: Install dependencies 33 | run: | 34 | conda install -y -c conda-forge distributed pytest innerscope 35 | pip install -e . 36 | - name: PyTest 37 | run: | 38 | which python # sanity check 39 | python -c 'from distributed.utils import is_kernel' 40 | python -c 'from dask import distributed' 41 | pytest 42 | - name: Notebook test 43 | run: | 44 | conda install -y -c conda-forge nbconvert jupyter 45 | jupyter nbconvert --to notebook --execute afar/tests/*ipynb 46 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | import versioneer 4 | 5 | install_requires = open("requirements.txt").read().strip().split("\n") 6 | with open("README.md") as f: 7 | long_description = f.read() 8 | 9 | setup( 10 | name="afar", 11 | version=versioneer.get_version(), 12 | cmdclass=versioneer.get_cmdclass(), 13 | description="Run code on a Dask cluster via a context manager or IPython magic", 14 | long_description=long_description, 15 | long_description_content_type="text/markdown", 16 | author="Erik Welch", 17 | author_email="erik.n.welch@gmail.com", 18 | url="https://github.com/eriknw/afar", 19 | packages=find_packages(), 20 | license="BSD", 21 | python_requires=">=3.7", 22 | setup_requires=[], 23 | install_requires=install_requires, 24 | tests_require=["pytest"], 25 | include_package_data=True, 26 | classifiers=[ 27 | "Development Status :: 4 - Beta", 28 | "License :: OSI Approved :: BSD License", 29 | "Operating System :: OS Independent", 30 | "Programming Language :: Python", 31 | "Programming Language :: Python :: 3", 32 | "Programming Language :: Python :: 3.7", 33 | "Programming Language :: Python :: 3.8", 34 | "Programming Language :: Python :: 3.9", 35 | "Programming Language :: Python :: 3.10", 36 | "Programming Language :: Python :: 3 :: Only", 37 | "Programming Language :: Python :: Implementation :: CPython", 38 | ], 39 | ) 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021 Erik Welch 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | a. Redistributions of source code must retain the above copyright notice, 9 | this list of conditions and the following disclaimer. 10 | b. Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in the 12 | documentation and/or other materials provided with the distribution. 13 | c. Neither the name of afar nor the names of its contributors 14 | may be used to endorse or promote products derived from this software 15 | without specific prior written permission. 16 | 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR 22 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 28 | DAMAGE. 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Vi 10 | *.swp 11 | *.swo 12 | *.swn 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # pyenv 81 | .python-version 82 | 83 | # celery beat schedule file 84 | celerybeat-schedule 85 | 86 | # SageMath parsed files 87 | *.sage.py 88 | 89 | # Environments 90 | .env 91 | .venv 92 | env/ 93 | venv/ 94 | ENV/ 95 | env.bak/ 96 | venv.bak/ 97 | 98 | # Spyder project settings 99 | .spyderproject 100 | .spyproject 101 | 102 | # Rope project settings 103 | .ropeproject 104 | 105 | # mkdocs documentation 106 | /site 107 | 108 | # mypy 109 | .mypy_cache/ 110 | 111 | # PyCharm 112 | .idea 113 | 114 | # Mac 115 | .DS_Store 116 | 117 | # VSCode 118 | .vscode 119 | 120 | # Generated IPython notebooks 121 | *.nbconvert.ipynb 122 | 123 | # Dask worker 124 | dask-worker-space/ 125 | -------------------------------------------------------------------------------- /.github/workflows/test_pip.yml: -------------------------------------------------------------------------------- 1 | name: Test pip 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | 8 | jobs: 9 | test: 10 | runs-on: ${{ matrix.os }} 11 | strategy: 12 | fail-fast: false 13 | matrix: 14 | os: ["ubuntu-latest", "macos-latest", "windows-latest"] 15 | python-version: ["3.7", "3.8", "3.9", "3.10", "pypy-3.7"] 16 | #exclude: 17 | # - os: "ubuntu-latest" 18 | # python-version: "pypy-3.7" 19 | # - os: "macos-latest" 20 | # python-version: "pypy-3.7" 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v3 24 | - name: Set up Python 25 | uses: actions/setup-python@v3 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | - name: Install dependencies 29 | run: | 30 | python -m pip install --upgrade pip setuptools wheel 31 | pip install -r requirements.txt 32 | pip install -e . 33 | - name: PyTest 34 | run: | 35 | pip install pytest coverage 36 | python -c 'from distributed.utils import is_kernel' 37 | python -c 'from dask import distributed' 38 | coverage run --branch -m pytest 39 | - name: Style checks 40 | if: (! contains(matrix.python-version, 'pypy')) 41 | run: | 42 | pip install black flake8 43 | flake8 . 44 | black . --check --diff 45 | - name: Coverage 46 | env: 47 | GITHUB_TOKEN: ${{ secrets.github_token }} 48 | COVERALLS_FLAG_NAME: ${{ matrix.python-version}} 49 | COVERALLS_PARALLEL: true 50 | run: | 51 | pip install coveralls 52 | coverage report --show-missing 53 | coveralls --service=github 54 | - name: Notebook test 55 | if: (! contains(matrix.python-version, 'pypy')) 56 | run: | 57 | pip install nbconvert jupyter 58 | jupyter nbconvert --to notebook --execute afar/tests/*ipynb 59 | 60 | finish: 61 | needs: test 62 | runs-on: ubuntu-latest 63 | steps: 64 | - name: Coveralls Finished 65 | uses: coverallsapp/github-action@master 66 | with: 67 | github-token: ${{ secrets.github_token }} 68 | parallel-finished: true 69 | -------------------------------------------------------------------------------- /afar/_printing.py: -------------------------------------------------------------------------------- 1 | """Classes used to capture print statements within a Dask task.""" 2 | import builtins 3 | import sys 4 | from io import StringIO 5 | from threading import Lock, local 6 | 7 | from dask.distributed import get_worker 8 | 9 | 10 | # Here's the plan: we'll capture all print statements to stdout and stderr 11 | # on the current thread. But, we need to leave the other threads alone! 12 | # So, use `threading.local` and a lock for some ugly capturing. 13 | class LocalPrint(local): 14 | printer = None 15 | # Update fields from `functools.WRAPPER_ASSIGNMENTS` as if we wrapped print 16 | # See: https://github.com/eriknw/afar/issues/29 17 | __module__ = builtins.print.__module__ 18 | __name__ = builtins.print.__name__ 19 | __qualname__ = builtins.print.__qualname__ 20 | __doc__ = builtins.print.__doc__ 21 | 22 | def __call__(self, *args, **kwargs): 23 | return self.printer(*args, **kwargs) 24 | 25 | 26 | class PrintRecorder: 27 | n = 0 28 | local_print = LocalPrint() 29 | print_lock = Lock() 30 | 31 | def __init__(self, channel, key): 32 | self.channel = channel 33 | self.key = key 34 | 35 | def __enter__(self): 36 | with self.print_lock: 37 | if PrintRecorder.n == 0: 38 | LocalPrint.printer = builtins.print 39 | builtins.print = self.local_print 40 | PrintRecorder.n += 1 41 | self.local_print.printer = self 42 | return self 43 | 44 | def __exit__(self, exc_type, exc_value, exc_traceback): 45 | with self.print_lock: 46 | PrintRecorder.n -= 1 47 | if PrintRecorder.n == 0: 48 | builtins.print = LocalPrint.printer 49 | self.local_print.printer = LocalPrint.printer 50 | return False 51 | 52 | def __call__(self, *args, file=None, **kwargs): 53 | if file is None or file is sys.stdout: 54 | file = StringIO() 55 | stream_name = "stdout" 56 | elif file is sys.stderr: 57 | file = StringIO() 58 | stream_name = "stderr" 59 | else: 60 | stream_name = None 61 | LocalPrint.printer(*args, **kwargs, file=file) 62 | if stream_name is not None: 63 | try: 64 | worker = get_worker() 65 | except ValueError: 66 | pass 67 | else: 68 | worker.log_event(self.channel, (self.key, stream_name, file.getvalue())) 69 | # Print locally too 70 | stream = sys.stdout if stream_name == "stdout" else sys.stderr 71 | LocalPrint.printer(file.getvalue(), end="", file=stream) 72 | -------------------------------------------------------------------------------- /afar/tests/test_later.py: -------------------------------------------------------------------------------- 1 | from pytest import raises 2 | 3 | import afar 4 | 5 | 6 | def test_later_doesnt_execute(): 7 | run = afar.run() 8 | with run, later: 9 | 1 / 0 10 | assert run.context_body == [" 1 / 0\n"] 11 | 12 | 13 | def test_single_line(): 14 | with raises(RuntimeError, match="please put the context body on a new line"): 15 | # fmt: off 16 | with afar.run, later: pass 17 | # fmt: on 18 | 19 | 20 | def test_later_bodies(): 21 | run = afar.run() 22 | with run, later: 23 | pass 24 | 25 | assert run.context_body == [" pass\n", "\n"] 26 | 27 | with raises(Exception, match="missing"): 28 | with run: 29 | pass 30 | 31 | with run, later: 32 | b = a + 1 33 | c = a + b 34 | 35 | assert run.context_body == [" b = a + 1\n", " c = a + b\n", "\n"] 36 | 37 | with raises(Exception, match="missing"): 38 | with run: 39 | with later: 40 | pass 41 | 42 | # It would be nice if we could make these fail 43 | with run, later as z: 44 | pass 45 | 46 | with run, later as [z, *other]: 47 | pass 48 | 49 | with run, later, z: 50 | pass 51 | 52 | # fmt: off 53 | with \ 54 | run, \ 55 | later \ 56 | : 57 | 58 | pass 59 | 60 | # fmt: on 61 | 62 | assert run.context_body == ["\n", " pass\n", "\n", " # fmt: on\n", "\n"] 63 | 64 | # fmt: off 65 | with \ 66 | run as c, \ 67 | later( \ 68 | d= \ 69 | ":" \ 70 | ) \ 71 | : 72 | 73 | f 74 | g 75 | h( 76 | z 77 | = 78 | 2 79 | ) 80 | 81 | # fmt: on 82 | 83 | assert run.context_body == [ 84 | "\n", 85 | " f\n", 86 | " g\n", 87 | " h(\n", 88 | " z\n", 89 | " =\n", 90 | " 2\n", 91 | " )\n", 92 | "\n", 93 | " # fmt: on\n", 94 | "\n", 95 | ] 96 | 97 | # fmt: off 98 | with \ 99 | run, \ 100 | later: 101 | # : 102 | ( 103 | 1 104 | + 105 | 2 106 | ) 107 | x = ( 108 | 3 109 | + 110 | 4 111 | ) 112 | # fmt: on 113 | assert run.context_body == [ 114 | " # :\n", 115 | " (\n", 116 | " 1\n", 117 | " +\n", 118 | " 2\n", 119 | " )\n", 120 | ] 121 | -------------------------------------------------------------------------------- /afar/_inspect.py: -------------------------------------------------------------------------------- 1 | """Utilities to get the lines of the context body.""" 2 | import dis 3 | from inspect import findsource 4 | 5 | from ._utils import is_ipython 6 | 7 | 8 | def get_lines(frame): 9 | try: 10 | lines, offset = findsource(frame) 11 | except OSError: 12 | # Try to fine the source if we are in %%time or %%timeit magic 13 | if frame.f_code.co_filename in {"", ""} and is_ipython(): 14 | from IPython import get_ipython 15 | 16 | ip = get_ipython() 17 | if ip is None: 18 | raise 19 | cell = ip.history_manager._i00 # The current cell! 20 | lines = cell.splitlines(keepends=True) 21 | # strip the magic 22 | for i, line in enumerate(lines): 23 | if line.strip().startswith("%%time"): 24 | lines = lines[i + 1 :] 25 | break 26 | else: 27 | raise 28 | # strip blank lines 29 | for i, line in enumerate(lines): 30 | if line.strip(): 31 | if i: 32 | lines = lines[i:] 33 | lines[-1] += "\n" 34 | break 35 | else: 36 | raise 37 | else: 38 | raise 39 | return lines 40 | 41 | 42 | def get_body_start(lines, with_start): 43 | line = lines[with_start] 44 | stripped = line.lstrip() 45 | body = line[: len(line) - len(stripped)] + " pass\n" 46 | body *= 2 47 | with_lines = [stripped] 48 | try: 49 | code = compile(stripped, "", "exec") 50 | except Exception: 51 | pass 52 | else: 53 | raise RuntimeError( 54 | "Failed to analyze the context! When using afar, " 55 | "please put the context body on a new line." 56 | ) 57 | for i, line in enumerate(lines[with_start:]): 58 | if i > 0: 59 | with_lines.append(line) 60 | if ":" in line: 61 | source = "".join(with_lines) + body 62 | try: 63 | code = compile(source, "", "exec") 64 | except Exception: 65 | pass 66 | else: 67 | num_with = code.co_code.count(dis.opmap["SETUP_WITH"]) 68 | body_start = with_start + i + 1 69 | return num_with, body_start 70 | raise RuntimeError("Failed to analyze the context!") 71 | 72 | 73 | def get_body(lines): 74 | head = "def f():\n with x:\n " 75 | tail = " pass\n pass\n" 76 | while lines: 77 | source = head + " ".join(lines) + tail 78 | try: 79 | compile(source, "", "exec") 80 | except Exception: 81 | lines.pop() 82 | else: 83 | return lines 84 | raise RuntimeError("Failed to analyze the context body!") 85 | -------------------------------------------------------------------------------- /afar/tests/test_core.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | 3 | import pytest 4 | from pytest import raises 5 | 6 | import afar 7 | 8 | 9 | def test_a_modest_beginning(): 10 | with afar.run(), locally: 11 | x = 1 12 | y = x + 1 13 | 14 | with afar.run(), afar.locally: 15 | pass 16 | 17 | with afar.run(), locally: 18 | pass 19 | 20 | with afar.run(), afar.locally: 21 | pass 22 | 23 | with raises(NameError, match="locallyblah"): 24 | with afar.run(), locallyblah: 25 | pass 26 | 27 | with raises(Exception, match="`afar.run` is missing a location"): 28 | with afar.run(): 29 | pass 30 | 31 | 32 | # Not the final API, but a useful step 33 | def test_temporary_assignment(): 34 | z = 1 35 | 36 | def f(): 37 | w = 10 38 | with afar.run() as results, locally: 39 | x = z 40 | y = x + 1 + w 41 | return results 42 | 43 | results = f() 44 | assert "x" not in results 45 | assert results["y"] == 12 46 | assert not hasattr(results, "w") 47 | assert not hasattr(results, "z") 48 | 49 | with afar.run as results, afar.locally: 50 | x = z 51 | y = x + 1 52 | with raises(UnboundLocalError): 53 | x 54 | assert results == {"y": 2} 55 | 56 | # fmt: off 57 | with \ 58 | afar.run() as results, \ 59 | locally \ 60 | : 61 | x = z 62 | y = x + 1 63 | assert results == {'y': 2} 64 | # fmt: on 65 | 66 | with afar.run("a") as results, locally: 67 | a = 1 68 | b = a + 1 69 | assert results == {"a": 1} 70 | 71 | 72 | def test_give_data(): 73 | data = {"a": 1} 74 | run = afar.run(data=data) 75 | with run, locally: 76 | b = a + 1 77 | assert run.data is data 78 | assert data == {"a": 1, "b": 2} 79 | c = 10 80 | with run, locally: 81 | d = a + b + c 82 | assert data == {"a": 1, "b": 2, "d": 13} 83 | 84 | # singleton doesn't save data 85 | with afar.run as data2, locally: 86 | e = 100 87 | assert afar.run.data is None 88 | assert data2 == {"e": 100} 89 | 90 | 91 | def test_endline(): 92 | # fmt: off 93 | with afar.run as results, locally: 94 | a = 1 95 | b = ( 96 | a 97 | + 98 | 2 99 | ) 100 | assert results == {"b": 3} 101 | # fmt: on 102 | 103 | 104 | def test_pickle(): 105 | run = afar.run() 106 | with run, locally: 107 | a = 1 108 | assert run.data == {"a": 1} 109 | func = run._magic_func 110 | s = pickle.dumps(func) 111 | func2 = pickle.loads(s) 112 | assert dict(func2()) == {"a": 1} 113 | assert func._scoped.func.__code__.co_code == func2._scoped.func.__code__.co_code 114 | 115 | 116 | def test_end_of_file(): 117 | data = {} 118 | end_of_file(data) 119 | assert data == {"y": 20} 120 | 121 | 122 | def end_of_file(data): 123 | with afar.run(data=data), locally: 124 | x = 10 125 | y = 2 * x 126 | -------------------------------------------------------------------------------- /afar/_reprs.py: -------------------------------------------------------------------------------- 1 | """Utilities to calculate the (pretty) repr of objects remotely and display locally.""" 2 | import sys 3 | import traceback 4 | 5 | 6 | class AttrRecorder: 7 | """Record which attributes are accessed. 8 | 9 | This is used to determine what repr methods IPython/Jupyter is trying to 10 | use, and in what order. 11 | """ 12 | 13 | def __init__(self): 14 | self._attrs = [] 15 | 16 | def __getattr__(self, attr): 17 | if "canary" not in attr and attr != "_ipython_display_": 18 | # _ipython_display_ requires sending the object back to the client. 19 | # Let's not bother with this hassle for now. 20 | self._attrs.append(attr) 21 | raise AttributeError(attr) 22 | 23 | 24 | def get_repr_methods(): 25 | """List of repr methods that IPython/Jupyter tries to use""" 26 | from IPython import get_ipython 27 | 28 | ip = get_ipython() 29 | if ip is None: 30 | return 31 | attr_recorder = AttrRecorder() 32 | ip.display_formatter.format(attr_recorder) 33 | return attr_recorder._attrs 34 | 35 | 36 | def repr_afar(val, repr_methods): 37 | """Compute the repr of an object for IPython/Jupyter. 38 | 39 | We call this on a remote object. 40 | """ 41 | if val is None: 42 | return None 43 | for method_name in repr_methods: 44 | method = getattr(val, method_name, None) 45 | if method is None: 46 | continue 47 | if method_name == "_ipython_display_": 48 | # Custom display! Send the object to the client 49 | # We don't allow _ipython_display_ at the moment 50 | return val, method_name, False 51 | try: 52 | rv = method() 53 | except NotImplementedError: 54 | continue 55 | except Exception: 56 | exc_info = sys.exc_info() 57 | rv = traceback.format_exception(*exc_info) 58 | return rv, method_name, True 59 | else: 60 | if rv is None: 61 | continue 62 | if method_name == "_repr_mimebundle_": 63 | if not isinstance(rv, (dict, tuple)): 64 | continue 65 | elif not isinstance(rv, str): 66 | continue 67 | return rv, method_name, False 68 | return repr(val), "__repr__", False 69 | 70 | 71 | class MimicRepr: 72 | def __init__(self, val, method_name): 73 | self.val = val 74 | self.method_name = method_name 75 | 76 | def __getattr__(self, attr): 77 | if attr != self.method_name: 78 | raise AttributeError(attr) 79 | return self._call 80 | 81 | def _call(self, *args, **kwargs): 82 | return self.val 83 | 84 | def __dir__(self): 85 | return [self.method_name] 86 | 87 | def __repr__(self): 88 | return self.val 89 | 90 | 91 | def display_repr(results, out=None): 92 | """Display results from `repr_afar` locally in IPython/Jupyter""" 93 | val, method_name, is_exception = results 94 | if is_exception: 95 | if out is None: 96 | print(val, file=sys.stderr) 97 | else: 98 | out.append_stderr(val) 99 | return 100 | if val is None and method_name is None: 101 | return 102 | 103 | from IPython.display import display 104 | 105 | if method_name == "_ipython_display_": 106 | # We don't allow _ipython_display_ at the moment 107 | if out is None: 108 | display(val) 109 | else: 110 | out.append_display_data(val) 111 | else: 112 | mimic = MimicRepr(val, method_name) 113 | if out is None: 114 | display(mimic) 115 | else: 116 | out.append_display_data(mimic) 117 | -------------------------------------------------------------------------------- /afar/_abra.py: -------------------------------------------------------------------------------- 1 | """Perform a magic trick: given lines of code, create a function to run remotely. 2 | 3 | This callable object is able to provide the values of the requested argument 4 | names and return the final expression so it can be displayed. 5 | """ 6 | import dis 7 | from types import FunctionType 8 | 9 | from dask.distributed import Future 10 | from innerscope import scoped_function 11 | 12 | from ._reprs import get_repr_methods 13 | from ._utils import code_replace, is_ipython 14 | 15 | 16 | def endswith_expr(func): 17 | """Does the function end with an expression (not assigment or return)""" 18 | code = func.__code__ 19 | co_code = code.co_code 20 | return ( 21 | len(co_code) > 6 22 | and co_code[-6] == dis.opmap["POP_TOP"] 23 | and co_code[-4] == dis.opmap["LOAD_CONST"] 24 | and co_code[-2] == dis.opmap["RETURN_VALUE"] 25 | and code.co_consts[co_code[-3]] is None 26 | ) 27 | 28 | 29 | def return_expr(func): 30 | """Create a new function from func that returns the final expression""" 31 | code = func.__code__ 32 | # remove POP_TOP and LOAD_CONST (None) 33 | co_code = code.co_code[:-6] + code.co_code[-2:] 34 | code = code_replace(code, co_code=co_code) 35 | rv = FunctionType( 36 | code, 37 | func.__globals__, 38 | name=func.__name__, 39 | argdefs=func.__defaults__, 40 | closure=func.__closure__, 41 | ) 42 | rv.__kwdefaults__ = func.__kwdefaults__ 43 | return rv 44 | 45 | 46 | def create_func(source, globals_dict, is_in_ipython): 47 | code = compile( 48 | source, 49 | "", 50 | "exec", 51 | ) 52 | locals_dict = {} 53 | exec(code, globals_dict, locals_dict) 54 | func = locals_dict["_afar_magic_"] 55 | display_expr = is_in_ipython and endswith_expr(func) 56 | if display_expr: 57 | func = return_expr(func) 58 | return func, display_expr 59 | 60 | 61 | class MagicFunction: 62 | def __init__(self, source, scoped, display_expr): 63 | self._source = source 64 | self._scoped = scoped 65 | self._display_expr = display_expr 66 | if display_expr: 67 | self._repr_methods = get_repr_methods() 68 | else: 69 | self._repr_methods = None 70 | 71 | def __call__(self): 72 | return self._scoped() 73 | 74 | def __getstate__(self): 75 | # Instead of trying to serialize the function we created with `compile` and `exec`, 76 | # let's save the source and recreate the function (and self._scoped) again. 77 | state = dict(self.__dict__) 78 | del state["_scoped"] 79 | state["outer_scope"] = self._scoped.outer_scope 80 | return state 81 | 82 | def __setstate__(self, state): 83 | outer_scope = state.pop("outer_scope") 84 | self.__dict__.update(state) 85 | func, _ = create_func(self._source, {}, self._display_expr) 86 | self._scoped = scoped_function(func, outer_scope) 87 | 88 | 89 | def cadabra(context_body, where, names, data, global_ns, local_ns): 90 | # Create a new function from the code block of the context. 91 | # For now, we require that the source code is available. 92 | source = "def _afar_magic_():\n" + "".join(context_body) 93 | func, display_expr = create_func(source, global_ns, is_ipython()) 94 | 95 | # If no variable names were given, only get the last assignment 96 | if not names: 97 | for inst in list(dis.get_instructions(func)): 98 | if inst.opname in {"STORE_NAME", "STORE_FAST", "STORE_DEREF", "STORE_GLOBAL"}: 99 | names = (inst.argval,) 100 | 101 | # Use innerscope! We only keep the globals, locals, and closures we need. 102 | scoped = scoped_function(func, data) 103 | if scoped.missing: 104 | # Gather the necessary closures and locals 105 | update = {key: local_ns[key] for key in scoped.missing if key in local_ns} 106 | scoped = scoped.bind(update) 107 | 108 | if where == "remotely": 109 | # Get ready to submit to dask.distributed by separating the Futures. 110 | futures = { 111 | key: val 112 | for key, val in scoped.outer_scope.items() 113 | if isinstance(val, Future) 114 | # TODO: what can/should we do if the future is in a bad state? 115 | } 116 | for key in futures: 117 | del scoped.outer_scope[key] 118 | else: 119 | futures = None 120 | magic_func = MagicFunction(source, scoped, display_expr) 121 | return magic_func, names, futures 122 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # **Afar** 2 | [![Python Version](https://img.shields.io/badge/python-3.7%20%7C%203.8%20%7C%203.9%20%7C%203.10-blue)](https://img.shields.io/badge/python-3.7%20%7C%203.8%20%7C%203.9%20%7C%203.10-blue) 3 | [![Version](https://img.shields.io/pypi/v/afar.svg)](https://pypi.org/project/afar/) 4 | [![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://github.com/eriknw/afar/blob/main/LICENSE) 5 | [![Build Status](https://github.com/eriknw/afar/workflows/Test/badge.svg)](https://github.com/eriknw/afar/actions) 6 | [![Coverage Status](https://coveralls.io/repos/eriknw/afar/badge.svg?branch=main)](https://coveralls.io/r/eriknw/afar) 7 | 8 | > **_One man's magic is another man's engineering_**
9 | > _Robert A. Heinlein_ 10 |
11 | 12 | ## Installation 13 | `afar` may be installed with pip: 14 | ```bash 15 | pip install afar 16 | ``` 17 | or with [conda](https://docs.conda.io/en/latest/): 18 | ```bash 19 | conda install -c conda-forge afar 20 | ``` 21 | 22 | ## What is it? 23 | `afar` allows you to run code on a remote [Dask](https://dask.org/) [cluster](https://distributed.dask.org/en/latest/) using context managers and [IPython magics](#Magic). For example: 24 | ```python 25 | import afar 26 | from dask.distributed import Client 27 | client = Client() 28 | 29 | with afar.run, remotely: 30 | import dask_cudf 31 | df = dask_cudf.read_parquet("s3://...") 32 | result = df.sum().compute() 33 | ``` 34 | Outside the context, `result` is a [Dask Future](https://docs.dask.org/en/latest/futures.html) whose data resides on a worker. `result.result()` is necessary to copy the data locally. 35 | 36 | By default, only the last assignment is saved. One can specify which variables to save: 37 | ```python 38 | with afar.run("one", "two"), remotely: 39 | one = 1 40 | two = one + 1 41 | ``` 42 | `one` and `two` are now both Futures. They can be used directly in other `afar.run` contexts: 43 | ```python 44 | with afar.run as data, remotely: 45 | three = one + two 46 | 47 | assert three.result() == 3 48 | assert data["three"].result() == 3 49 | ``` 50 | `data` above is a dictionary of variable names to Futures. It may be necessary at times to get the data from here. Alternatively, you may pass a mapping to `afar.run` to use as the data. 51 | ```python 52 | run = afar.run(data={"four": 4}) 53 | with run, remotely: 54 | seven = three + four 55 | assert run.data["seven"].result() == 7 56 | ``` 57 | If you want to automatically gather the data locally (to avoid calling `.result()`), use `afar.get` instead of `afar.run`: 58 | ```python 59 | with afar.get, remotely: 60 | five = two + three 61 | assert five == 5 62 | ``` 63 | ## Interactivity in Jupyter 64 | There are several enhancements when using `afar` in Jupyter Notebook or Qt console, JupyterLab, or any IPython-based frontend. 65 | 66 | The rich repr of the final expression will be displayed if it's not an assignment: 67 | ```python 68 | with afar.run, remotely: 69 | three + seven 70 | # displays 10! 71 | ``` 72 | 73 | Printing is captured and displayed locally and asynchronously: 74 | ```python 75 | with afar.run, remotely: 76 | print(three) 77 | print(seven, file=sys.stderr) 78 | # 3 79 | # 7 80 | ``` 81 | These are done asynchronously using `ipywidgets`. 82 | 83 | ### Magic! 84 | First load `afar` magic extension: 85 | ```python 86 | %load_ext afar 87 | ``` 88 | Now you can use `afar` as line or cell magic. `%%afar` is like `with afar.run, remotely:`. It can optionally accept a list of variable names to save: 89 | ```python 90 | %%afar x, y 91 | x = 1 92 | y = x + 1 93 | ``` 94 | and 95 | ```python 96 | z = %afar x + y 97 | ``` 98 | 99 | ### Custom Magic 100 | You can create your own IPython magic like `%afar` that has arguments baked in: 101 | ```python 102 | afar.new_magic("on_gpus", where=remotely(resources={"GPU": 1})) 103 | ``` 104 | then: 105 | ```python 106 | %%on_gpus 107 | import dask_cudf 108 | df = dask_cudf.read_parquet("s3://...") 109 | result = df.sum().compute() 110 | ``` 111 | 112 | ## Is this a good idea? 113 | 114 | I don't know, but it sure is a joy to use 😃 ! 115 | 116 | For motivation, see https://github.com/dask/distributed/issues/4003 117 | 118 | It's natural to be skeptical of unconventional syntax. And magic. `afar` is both unconventional and magical, yet it also works well and is surprisingly *fun*! Why not give it a try to see what you think? 119 | 120 | We're still exploring the usability of `afar` [and want to hear what you think](https://github.com/eriknw/afar/discussions). As you're learning `afar`, please ask yourself questions such as: 121 | - can we spell anything better? 122 | - does this offer opportunities? 123 | - what is surprising? 124 | - what is lacking? 125 | 126 | Here's an example of an opportunity: 127 | ```python 128 | on_gpus = afar.remotely(resources={"GPU": 1}) 129 | 130 | with afar.run, on_gpus: 131 | ... 132 | ``` 133 | This now works! Keyword arguments to `remotely` will be passed to [`client.submit`](https://distributed.dask.org/en/latest/api.html#distributed.Client.submit). 134 | 135 | I don't know about you, but I think this is starting to look and feel kinda nice, and it could probably be even better :) 136 | 137 | ## Caveats and Gotchas 138 | 139 | ### Repeatedly copying data 140 | 141 | `afar` automatically gets the data it needs--and only the data it needs--from the outer scope 142 | and sends it to the Dask cluster to compute on. Since we don't know whether local data has been modified 143 | between calls to `afar`, we serialize and send local variables every time we use `run` or `get`. 144 | This is generally fine: it works, it's safe, and is usually fast enough. However, if you do this 145 | frequently with large-ish data, the performance could suffer, and you may be using 146 | more memory on your local machine than necessary. 147 | 148 | With Dask, a common pattern is to send data to the cluster with `scatter` and get a `Future` back. This works: 149 | ```python 150 | A = np.arange(10**7) 151 | A = client.scatter(A) 152 | with afar.run, remotely: 153 | B = A + 1 154 | # A and B are now both Futures; their data is on the cluster 155 | ``` 156 | 157 | Another option is to pass `data` to `run`: 158 | ```python 159 | run = afar.run(data={"A": np.arange(10**7)}) 160 | with afar.run, remotely: 161 | B = A + 1 162 | # run.data["A"] and B are now both Futures; their data is on the cluster 163 | ``` 164 | Here's a nifty trick to use if you're in an IPython notebook: use `data=globals()`! 165 | ```python 166 | run = afar.run(data=globals()) 167 | A = np.arange(10**7) 168 | with run, remotely: 169 | B = A + 1 170 | # A and B are now both Futures; their data is on the cluster 171 | ``` 172 | ### Mutating remote data 173 | As with any Dask workload, one should be careful to not modify remote data that may be reused. 174 | 175 | ### Mutating local data 176 | Similarly, code run remotely isn't able to mutate local variables. For example: 177 | ```python 178 | d = {} 179 | with afar.run, remotely: 180 | d['key'] = 'value' 181 | # d == {} 182 | ``` 183 | ## *✨ This code is highly experimental and magical! ✨* 184 | 185 | -------------------------------------------------------------------------------- /afar/_magic.py: -------------------------------------------------------------------------------- 1 | """Define the IPython magic for using afar""" 2 | from textwrap import indent 3 | 4 | from dask.distributed import Client 5 | from IPython import get_ipython 6 | from IPython.core.error import UsageError 7 | from IPython.core.magic import Magics, line_cell_magic, magics_class, needs_local_scope 8 | 9 | from ._core import Run, get, run 10 | from ._where import Where, remotely 11 | 12 | DOC_TEMPLATE = """Execute the cell on a dask.distributed cluster. 13 | 14 | Usage, in line mode: 15 | %{name} [{get_arg}{run_arg}{data_arg}{where_arg}{client_arg}] code_to_run 16 | Usage, in cell mode 17 | %%{name} [{get_arg}{run_arg}{data_arg}{where_arg}{client_arg} ] 18 | code... 19 | code... 20 | 21 | Options: 22 | {get_desc} {run_desc} {data_desc} {where_desc} {client_desc} 23 | 24 | Variable names (space- or comma-separated) from the cell to copy to the local 25 | namespace as dask Future objects. 26 | 27 | All arguments given in options must be variable names in the local namespace. 28 | 29 | Examples 30 | -------- 31 | :: 32 | 33 | In [1]: %%{name} x, y 34 | ...: x = 1 35 | ...: y = x + 1 36 | 37 | In [2]: z = %{name} x + y 38 | 39 | """ 40 | DOC_DEFAULTS = { 41 | "name": "afar", 42 | "get_arg": "-g", 43 | "run_arg": " -r run", 44 | "data_arg": " -d data", 45 | "where_arg": " -w where", 46 | "client_arg": " -c client", 47 | "get_desc": ( 48 | "\n -g/--get\n" 49 | " Get results as values, not as futures. This uses `afar.get` instead of `afar.run`.\n" 50 | ), 51 | "run_desc": ( 52 | "\n -r/--run \n" 53 | " A `Run` object from the local namespace such as `run = afar.run(data=mydata)`\n" 54 | ), 55 | "data_desc": ( 56 | "\n -d/--data \n" 57 | " A MutableMapping to use for the data; typically part of a `Run` object.\n" 58 | ), 59 | "where_desc": ( 60 | "\n -w/--where \n" 61 | ' A `Where` object such as `where = remotely(resources={"GPU": 1})`\n' 62 | ), 63 | "client_desc": ( 64 | "\n -c/--client \n" 65 | " A `dask.distributed.Client` object from the local namespace.\n" 66 | ), 67 | } 68 | 69 | 70 | class AfarMagicBase(Magics): 71 | def _run(self, line, cell=None, *, local_ns, runner=None, data=None, where=None, client=None): 72 | options = "" 73 | args = [] 74 | if runner is None: 75 | options += "gr:" 76 | args.append("get") 77 | args.append("run=") 78 | if data is None: 79 | options += "d:" 80 | args.append("data=") 81 | if where is None: 82 | options += "w:" 83 | args.append("where=") 84 | if client is None: 85 | options += "c:" 86 | args.append("client=") 87 | opts, line = self.parse_options( 88 | line, 89 | options, 90 | *args, 91 | posix=False, 92 | strict=False, 93 | preserve_non_opts=True, 94 | ) 95 | if "r" in opts and "run" in opts: 96 | raise UsageError("-r and --run options may not be used at the same time") 97 | if "d" in opts and "data" in opts: 98 | raise UsageError("-d and --data options may not be used at the same time") 99 | if "w" in opts and "where" in opts: 100 | raise UsageError("-w and --where options may not be used at the same time") 101 | if "c" in opts and "client" in opts: 102 | raise UsageError("-c and --client options may not be used at the same time") 103 | 104 | not_found = "argument not found in local namespace" 105 | if runner is not None: 106 | pass 107 | elif "r" in opts or "run" in opts: 108 | runner = opts.get("r", opts.get("run")) 109 | if runner not in local_ns: 110 | raise UsageError(f"Variable name {runner!r} for -r or --run {not_found}") 111 | if not isinstance(runner, Run): 112 | raise UsageError(f"-r or --run argument must be of type Run; got: {type(runner)}") 113 | elif "g" in opts or "get" in opts: 114 | runner = get() 115 | else: 116 | runner = run() 117 | 118 | if data is not None: 119 | pass 120 | elif "d" in opts or "data" in opts: 121 | data = opts.get("d", opts.get("data")) 122 | if data not in local_ns: 123 | raise UsageError(f"Variable name {data!r} for -d or --data {not_found}") 124 | data = local_ns[data] 125 | else: 126 | data = runner.data 127 | if data is None: 128 | data = {} 129 | 130 | if where is not None: 131 | pass 132 | elif "w" in opts or "where" in opts: 133 | where = opts.get("w", opts.get("where")) 134 | if where not in local_ns: 135 | raise UsageError(f"Variable name {where!r} for -w or --where {not_found}") 136 | where = local_ns[where] 137 | if not isinstance(where, Where): 138 | raise UsageError( 139 | f"-w or --where argument must be of type Where; got: {type(where)}" 140 | ) 141 | else: 142 | where = remotely 143 | 144 | if client is not None: 145 | pass 146 | elif "c" in opts or "client" in opts: 147 | client = opts.get("c", opts.get("client")) 148 | if client not in local_ns: 149 | raise UsageError(f"Variable name {client!r} for -c or --client {not_found}") 150 | client = local_ns[client] 151 | if not isinstance(client, Client): 152 | raise UsageError( 153 | f"-c or --client argument must be of type Client; got: {type(client)}" 154 | ) 155 | else: 156 | client = runner.client or where.client 157 | 158 | if cell is None: 159 | names = () 160 | context_body = line 161 | else: 162 | # Comma-separated and/or space-separated variable names 163 | names = [ 164 | name 165 | for item in line.split("#")[0].split(",") 166 | for name in item.strip().split(" ") 167 | if name 168 | ] 169 | bad_names = [name for name in names if not name.isidentifier()] 170 | if bad_names: 171 | raise UsageError( 172 | f"The following are bad variable names: {bad_names}\n" 173 | "The %%afar magic accepts a list of variable names (after any options) " 174 | "to bring back to local scope. Example usage:\n\n" 175 | "%%afar x, y\nx = 1\ny = x + 1" 176 | ) 177 | context_body = cell 178 | if not names: 179 | names = runner.names 180 | context_body = indent(context_body, " ") 181 | return runner._run( 182 | where.where, 183 | context_body, 184 | names, 185 | data, 186 | global_ns=local_ns, 187 | local_ns=local_ns, 188 | client=client, 189 | submit_kwargs=where.submit_kwargs, 190 | return_expr=cell is None, 191 | ) 192 | 193 | 194 | @magics_class 195 | class AfarMagic(AfarMagicBase): 196 | @needs_local_scope 197 | @line_cell_magic 198 | def afar(self, line, cell=None, *, local_ns): 199 | return self._run(line, cell, local_ns=local_ns) 200 | 201 | afar.__doc__ = DOC_TEMPLATE.format(**DOC_DEFAULTS) 202 | 203 | 204 | def new_magic(name, *, run=None, data=None, where=None, client=None): 205 | """Create new IPython magic to run afar with the given arguments. 206 | 207 | This is like `%%afar` magic with arguments curried or "baked in". 208 | 209 | Examples 210 | -------- 211 | :: 212 | 213 | In [1]: afar.new_magic("on_gpus", where=remotely(resources={"GPU": 1})) 214 | 215 | In [2]: %%on_gpus x, y 216 | ...: x = 1 217 | ...: y = x + 1 218 | 219 | In [3]: z = %on_gpus x + y 220 | 221 | """ 222 | 223 | def magic_method(self, line, cell=None, *, local_ns): 224 | return self._run( 225 | line, cell, local_ns=local_ns, runner=run, data=data, where=where, client=client 226 | ) 227 | 228 | d = dict(DOC_DEFAULTS, name=name) 229 | if run is not None: 230 | d["get_arg"] = "" 231 | d["get_desc"] = "" 232 | d["run_arg"] = "" 233 | d["run_desc"] = "" 234 | if data is not None: 235 | d["data_arg"] = "" 236 | d["data_desc"] = "" 237 | if where is not None: 238 | d["where_arg"] = "" 239 | d["where_desc"] = "" 240 | if client is not None: 241 | d["client_arg"] = "" 242 | d["client_desc"] = "" 243 | magic_method.__name__ = name 244 | magic_method.__doc__ = DOC_TEMPLATE.format(**d) 245 | 246 | @magics_class 247 | class AfarNewMagic(AfarMagicBase): 248 | locals()[name] = needs_local_scope(line_cell_magic(magic_method)) 249 | 250 | ip = get_ipython() 251 | ip.register_magics(AfarNewMagic) 252 | -------------------------------------------------------------------------------- /afar/tests/test_notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "5fb62fe8-2ab2-4a33-a0d0-2c47d936068e", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import afar\n", 11 | "import pytest\n", 12 | "from pytest import raises" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "id": "0f2fa89d-3204-4d19-abc9-4ca4a47f27b1", 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "with afar.run(), locally:\n", 23 | " x = 1\n", 24 | " y = x + 1" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "id": "2edc7346-1c9f-4da6-ba7f-c7e78a711f1e", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "with afar.run(), afar.locally:\n", 35 | " pass" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "id": "48dc3a7a-4d4e-43bb-8734-24bffcb7acf3", 42 | "metadata": {}, 43 | "outputs": [], 44 | "source": [ 45 | "with afar.run(), locally:\n", 46 | " pass" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "id": "9c16144e-33d2-4393-a794-532060ed4c56", 53 | "metadata": {}, 54 | "outputs": [], 55 | "source": [ 56 | "with afar.run(), afar.locally:\n", 57 | " pass" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": null, 63 | "id": "aa3c91a8-17f9-4ae0-aaa0-e5f16065eda6", 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "with raises(NameError, match=\"locallyblah\"):\n", 68 | " with afar.run(), locallyblah:\n", 69 | " pass" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "id": "4fe3cc35-6e3c-4f33-9b49-c8b0128ead15", 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "with raises(Exception, match=\"`afar.run` is missing a location\"):\n", 80 | " with afar.run():\n", 81 | " pass" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "id": "4faf636c-8c05-4315-b6ec-527c111341f4", 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "z = 1\n", 92 | "\n", 93 | "def f():\n", 94 | " w = 10\n", 95 | " with afar.run() as results, locally:\n", 96 | " x = z\n", 97 | " y = x + 1 + w\n", 98 | " return results\n", 99 | "\n", 100 | "results = f()\n", 101 | "assert \"x\" not in results\n", 102 | "assert results[\"y\"] == 12\n", 103 | "assert not hasattr(results, \"w\")\n", 104 | "assert not hasattr(results, \"z\")" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": null, 110 | "id": "5b9d7ecf-a237-4afd-b056-fc0574ef59ea", 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "with afar.run as results, afar.locally:\n", 115 | " x = z\n", 116 | " y = x + 1" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": null, 122 | "id": "465e80b2-70fd-4475-b2d9-96a98c7b6e21", 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "with raises(NameError):\n", 127 | " x\n", 128 | "assert results == {\"y\": 2}" 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": null, 134 | "id": "b5fad424-e765-4de0-8d0a-3a94085a3ff1", 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "with \\\n", 139 | " afar.run() as results, \\\n", 140 | " locally \\\n", 141 | ":\n", 142 | " x = z\n", 143 | " y = x + 1\n", 144 | "assert results == {'y': 2}" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": null, 150 | "id": "c9812c52-0988-4664-94af-b4903acfc3b6", 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "data = {\"a\": 1}\n", 155 | "run = afar.run(data=data)\n", 156 | "with run, locally:\n", 157 | " b = a + 1" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": null, 163 | "id": "f8d033af-910a-412a-8370-7085da4ea79e", 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "assert run.data is data\n", 168 | "assert data == {\"a\": 1, \"b\": 2}\n", 169 | "c = 10" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": null, 175 | "id": "908f1ce7-9871-4422-808c-ed822e54d10f", 176 | "metadata": {}, 177 | "outputs": [], 178 | "source": [ 179 | "with run, locally:\n", 180 | " d = a + b + c\n", 181 | "assert data == {\"a\": 1, \"b\": 2, \"d\": 13}" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": null, 187 | "id": "9a64e546-e6be-4fbe-a475-99682ab4e3c7", 188 | "metadata": {}, 189 | "outputs": [], 190 | "source": [ 191 | "# singleton doesn't save data\n", 192 | "with afar.run as data2, locally:\n", 193 | " e = 100\n", 194 | "assert afar.run.data is None\n", 195 | "assert data2 == {\"e\": 100}" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "id": "27c4fe26-9556-45a4-bc52-0cf984fe173a", 202 | "metadata": {}, 203 | "outputs": [], 204 | "source": [ 205 | "with afar.run as results, locally:\n", 206 | " a = 1\n", 207 | " b = (\n", 208 | " a\n", 209 | " +\n", 210 | " 2\n", 211 | " )\n", 212 | "assert results == {\"b\": 3}" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "id": "5341c4a0-4e3a-4f0d-b0bf-6161011cccdd", 219 | "metadata": {}, 220 | "outputs": [], 221 | "source": [ 222 | "with afar.run as results, locally:\n", 223 | " a = 1\n", 224 | " b = (\n", 225 | " a\n", 226 | " +\n", 227 | " 2\n", 228 | " )" 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "id": "21610817-b607-47ad-8779-d7a8c6991d26", 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "assert results == {\"b\": 3}" 239 | ] 240 | }, 241 | { 242 | "cell_type": "code", 243 | "execution_count": null, 244 | "id": "8fa62f2c-880b-4b5c-b8e3-c88dc28661b6", 245 | "metadata": {}, 246 | "outputs": [], 247 | "source": [ 248 | "results" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": null, 254 | "id": "f1d186ba-7e2f-4134-937d-b0f0123ef4d4", 255 | "metadata": {}, 256 | "outputs": [], 257 | "source": [ 258 | "afar.run.context_body" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": null, 264 | "id": "db46db8b-324e-46ed-86d6-e3af7f814ab2", 265 | "metadata": {}, 266 | "outputs": [], 267 | "source": [ 268 | "run = afar.run()\n", 269 | "with run, later:\n", 270 | " 1 / 0\n", 271 | "assert run.context_body == [\" 1 / 0\\n\"]" 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": null, 277 | "id": "0a292f1a-e002-4ed9-b50a-14c82ed67d33", 278 | "metadata": {}, 279 | "outputs": [], 280 | "source": [ 281 | "with raises(RuntimeError, match=\"please put the context body on a new line\"):\n", 282 | " with afar.run, later: pass" 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": null, 288 | "id": "f9b447f9-5fd6-452f-88a8-645b9849ac6f", 289 | "metadata": {}, 290 | "outputs": [], 291 | "source": [ 292 | "with run, later:\n", 293 | " pass\n", 294 | "\n", 295 | "assert run.context_body == [\" pass\\n\", \"\\n\"]" 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": null, 301 | "id": "5b0a8a5d-e17e-4027-8255-031be11e116c", 302 | "metadata": {}, 303 | "outputs": [], 304 | "source": [ 305 | "with raises(Exception, match=\"missing\"):\n", 306 | " with run:\n", 307 | " pass" 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": null, 313 | "id": "af3bb026-640b-453a-9770-58d1cb5bc3db", 314 | "metadata": {}, 315 | "outputs": [], 316 | "source": [ 317 | "with run, later:\n", 318 | " b = a + 1\n", 319 | " c = a + b\n", 320 | "\n", 321 | "assert run.context_body == [\" b = a + 1\\n\", \" c = a + b\\n\", \"\\n\"]" 322 | ] 323 | }, 324 | { 325 | "cell_type": "code", 326 | "execution_count": null, 327 | "id": "78ed7fc1-5b04-4d33-a74f-b755cf2d3721", 328 | "metadata": {}, 329 | "outputs": [], 330 | "source": [ 331 | "with raises(Exception, match=\"missing\"):\n", 332 | " with run:\n", 333 | " with later:\n", 334 | " pass" 335 | ] 336 | }, 337 | { 338 | "cell_type": "code", 339 | "execution_count": null, 340 | "id": "6782bb6b-5f3d-4ae4-b1d1-838af3d433b8", 341 | "metadata": {}, 342 | "outputs": [], 343 | "source": [ 344 | "# It would be nice if we could make these fail\n", 345 | "with run, later as z:\n", 346 | " pass\n", 347 | "\n", 348 | "with run, later as [z, *other]:\n", 349 | " pass\n", 350 | "\n", 351 | "with run, later, z:\n", 352 | " pass" 353 | ] 354 | }, 355 | { 356 | "cell_type": "code", 357 | "execution_count": null, 358 | "id": "733d45ce-63b4-4b8b-88d1-474b3bc6ae5e", 359 | "metadata": {}, 360 | "outputs": [], 361 | "source": [ 362 | "with \\\n", 363 | " run, \\\n", 364 | " later \\\n", 365 | ":\n", 366 | "\n", 367 | " pass\n", 368 | "\n", 369 | "# fmt: on\n", 370 | "\n", 371 | "assert run.context_body == [\"\\n\", \" pass\\n\", \"\\n\", \"# fmt: on\\n\", \"\\n\"]" 372 | ] 373 | }, 374 | { 375 | "cell_type": "code", 376 | "execution_count": null, 377 | "id": "75525582-0c9b-418f-a04f-31ec90a3afe5", 378 | "metadata": {}, 379 | "outputs": [], 380 | "source": [ 381 | "with \\\n", 382 | " run as c, \\\n", 383 | " later( \\\n", 384 | " d= \\\n", 385 | " \":\" \\\n", 386 | " ) \\\n", 387 | ":\n", 388 | "\n", 389 | " f\n", 390 | " g\n", 391 | " h(\n", 392 | " z\n", 393 | " =\n", 394 | " 2\n", 395 | " )" 396 | ] 397 | }, 398 | { 399 | "cell_type": "code", 400 | "execution_count": null, 401 | "id": "5922020d-83f6-4580-9421-ce1d6971b352", 402 | "metadata": {}, 403 | "outputs": [], 404 | "source": [ 405 | "assert run.context_body == [\n", 406 | " \"\\n\",\n", 407 | " \" f\\n\",\n", 408 | " \" g\\n\",\n", 409 | " \" h(\\n\",\n", 410 | " \" z\\n\",\n", 411 | " \" =\\n\",\n", 412 | " \" 2\\n\",\n", 413 | " \" )\\n\",\n", 414 | "]" 415 | ] 416 | }, 417 | { 418 | "cell_type": "code", 419 | "execution_count": null, 420 | "id": "6ac00a91-248a-4f67-9856-a467485f1f50", 421 | "metadata": {}, 422 | "outputs": [], 423 | "source": [ 424 | "with \\\n", 425 | " run, \\\n", 426 | " later:\n", 427 | " # :\n", 428 | " (\n", 429 | " 1\n", 430 | " +\n", 431 | " 2\n", 432 | " )\n", 433 | "x = (\n", 434 | " 3\n", 435 | " +\n", 436 | " 4\n", 437 | ")\n", 438 | "assert run.context_body == [\n", 439 | " \" # :\\n\",\n", 440 | " \" (\\n\",\n", 441 | " \" 1\\n\",\n", 442 | " \" +\\n\",\n", 443 | " \" 2\\n\",\n", 444 | " \" )\\n\",\n", 445 | "]" 446 | ] 447 | }, 448 | { 449 | "cell_type": "code", 450 | "execution_count": null, 451 | "id": "06b47210-d489-4a0a-b5d1-bdad5ad710c4", 452 | "metadata": {}, 453 | "outputs": [], 454 | "source": [ 455 | "from operator import add\n", 456 | "from dask.distributed import Client\n", 457 | "\n", 458 | "client = Client()\n", 459 | "two = client.submit(add, 1, 1)\n", 460 | "\n", 461 | "with afar.run as results, afar.remotely:\n", 462 | " three = two + 1\n", 463 | "assert three.result() == 3\n", 464 | "\n", 465 | "with afar.get, afar.remotely(priority=1):\n", 466 | " five = two + three\n", 467 | "assert five == 5" 468 | ] 469 | }, 470 | { 471 | "cell_type": "code", 472 | "execution_count": null, 473 | "id": "9b685202-9541-46d9-87c4-b821b0fa55b9", 474 | "metadata": {}, 475 | "outputs": [], 476 | "source": [] 477 | } 478 | ], 479 | "metadata": { 480 | "kernelspec": { 481 | "display_name": "Python 3 (ipykernel)", 482 | "language": "python", 483 | "name": "python3" 484 | }, 485 | "language_info": { 486 | "codemirror_mode": { 487 | "name": "ipython", 488 | "version": 3 489 | }, 490 | "file_extension": ".py", 491 | "mimetype": "text/x-python", 492 | "name": "python", 493 | "nbconvert_exporter": "python", 494 | "pygments_lexer": "ipython3", 495 | "version": "3.8.10" 496 | } 497 | }, 498 | "nbformat": 4, 499 | "nbformat_minor": 5 500 | } 501 | -------------------------------------------------------------------------------- /afar/_core.py: -------------------------------------------------------------------------------- 1 | """Define the user-facing `run` object; this is where it all comes together.""" 2 | import dis 3 | import sys 4 | from inspect import currentframe 5 | from uuid import uuid4 6 | from weakref import WeakKeyDictionary, WeakSet 7 | 8 | from dask import distributed 9 | from dask.distributed import get_worker 10 | 11 | from ._abra import cadabra 12 | from ._inspect import get_body, get_body_start, get_lines 13 | from ._printing import PrintRecorder 14 | from ._reprs import display_repr, repr_afar 15 | from ._utils import supports_async_output 16 | from ._where import find_where 17 | 18 | 19 | class Run: 20 | _gather_data = False 21 | # Used to update outputs asynchronously 22 | _outputs = {} 23 | _channel = "afar-" + uuid4().hex 24 | 25 | def __init__(self, *names, client=None, data=None): 26 | self.names = names 27 | self.data = data 28 | self.client = client 29 | self.context_body = None 30 | # afar.run can be used as a singleton without calling it. 31 | # If we do this, we shouldn't keep data around. 32 | self._is_singleton = data is None 33 | self._frame = None 34 | # Used to cancel work 35 | self._client_to_futures = WeakKeyDictionary() 36 | # For now, save the following to help debug 37 | self._where = None 38 | self._magic_func = None 39 | self._body_start = None 40 | self._lines = None 41 | 42 | def __call__(self, *names, client=None, data=None): 43 | if data is None: 44 | if self.data is None: 45 | data = {} 46 | else: 47 | data = self.data 48 | if client is None: 49 | client = self.client 50 | return type(self)(*names, client=client, data=data) 51 | 52 | def __enter__(self): 53 | self._frame = currentframe().f_back 54 | with_lineno = self._frame.f_lineno - 1 55 | if self._is_singleton: 56 | if self.data: 57 | raise RuntimeError("uh oh!") 58 | self.data = {} 59 | 60 | lines = get_lines(self._frame) 61 | 62 | while not lines[with_lineno].lstrip().startswith("with"): 63 | with_lineno -= 1 64 | if with_lineno < 0: 65 | raise RuntimeError("Failed to analyze the context!") 66 | 67 | num_with, body_start = get_body_start(lines, with_lineno) 68 | if num_with < 2: 69 | # Best effort detection. This fails if there is a context *before* afar.run 70 | within = type(self).__name__.lower() 71 | raise RuntimeError( 72 | f"`afar.{within}` is missing a location. For example:\n\n" 73 | f">>> with afar.{within}, remotely:\n" 74 | f"... pass\n\n" 75 | f"Please specify a location such as adding `, remotely`." 76 | ) 77 | self._body_start = body_start 78 | self._lines = lines 79 | return self.data 80 | 81 | def __exit__(self, exc_type, exc_value, exc_traceback): 82 | self._where = None 83 | if self.data is None: 84 | if exc_type is None: 85 | raise RuntimeError("uh oh!") 86 | return False 87 | if exc_type is None or exc_traceback.tb_frame is not self._frame: 88 | return False 89 | where = find_where(exc_type, exc_value) 90 | if where is None: 91 | # The exception is valid 92 | return False 93 | 94 | try: 95 | return self._exit(where) 96 | except KeyboardInterrupt as exc: 97 | # Cancel all pending tasks 98 | if self._where == "remotely": 99 | self.cancel() 100 | raise exc from None 101 | except Exception as exc: 102 | raise exc from None 103 | finally: 104 | self._frame = None 105 | self._lines = None 106 | if self._is_singleton: 107 | self.data = None 108 | 109 | def _exit(self, where): 110 | frame = self._frame 111 | # What line does the context end? 112 | maxline = self._body_start 113 | for offset, line in dis.findlinestarts(frame.f_code): 114 | if line > maxline: 115 | maxline = line 116 | if offset > frame.f_lasti: 117 | endline = max(maxline, maxline - 1) 118 | break 119 | else: 120 | endline = maxline + 5 # give us some wiggle room 121 | 122 | context_body = get_body(self._lines[self._body_start : endline]) 123 | self._run( 124 | where.where, 125 | context_body, 126 | self.names, 127 | self.data, 128 | client=self.client or where.client, 129 | submit_kwargs=where.submit_kwargs, 130 | global_ns=frame.f_globals, 131 | local_ns=frame.f_locals, 132 | ) 133 | return True 134 | 135 | def _run( 136 | self, 137 | where, 138 | context_body, 139 | names, 140 | data, 141 | *, 142 | global_ns, 143 | local_ns, 144 | client=None, 145 | submit_kwargs=None, 146 | return_expr=False, 147 | ): 148 | self._where = where 149 | self.context_body = context_body 150 | if submit_kwargs is None: 151 | submit_kwargs = {} 152 | 153 | self._magic_func, names, futures = cadabra( 154 | context_body, where, names, data, global_ns, local_ns 155 | ) 156 | display_expr = self._magic_func._display_expr 157 | return_future = None 158 | 159 | if where == "remotely": 160 | if client is None: 161 | client = distributed.client._get_global_client() 162 | if client is None: 163 | raise TypeError( 164 | "No dask.distributed client found. " 165 | "You must create and connect to a Dask cluster before using afar." 166 | ) 167 | if client not in self._client_to_futures: 168 | weak_futures = WeakSet() 169 | self._client_to_futures[client] = weak_futures 170 | else: 171 | weak_futures = self._client_to_futures[client] 172 | 173 | to_scatter = data.keys() & self._magic_func._scoped.outer_scope.keys() 174 | if to_scatter: 175 | # Scatter value in `data` that we need in this calculation. 176 | # This moves data from local to remote, then keeps it remote. 177 | # Things in `data` may get reused, so it can be helpful to 178 | # move them. We could move everything in `data`, but we 179 | # only move the things we need. We could also scatter everything 180 | # in `self._magic_func._scoped.outer_scope`, but we can't reuse 181 | # them, because they may get modified locally. 182 | to_scatter = list(to_scatter) 183 | # I'm afraid to hash, because users may accidentally mutate things. 184 | scattered = client.scatter([data[key] for key in to_scatter], hash=False) 185 | scattered = dict(zip(to_scatter, scattered)) 186 | futures.update(scattered) 187 | data.update(scattered) 188 | for key in to_scatter: 189 | del self._magic_func._scoped.outer_scope[key] 190 | 191 | capture_print = True 192 | if capture_print and self._channel not in client._event_handlers: 193 | client.subscribe_topic(self._channel, self._handle_print) 194 | # When would be a good time to unsubscribe? 195 | async_print = capture_print and supports_async_output() 196 | if capture_print: 197 | unique_key = uuid4().hex 198 | self._setup_print(unique_key, async_print) 199 | else: 200 | unique_key = None 201 | 202 | # Scatter magic_func to avoid "Large object" UserWarning 203 | magic_func = client.scatter(self._magic_func, hash=False) 204 | weak_futures.add(magic_func) 205 | 206 | remote_dict = client.submit( 207 | run_afar, 208 | magic_func, 209 | names, 210 | futures, 211 | capture_print, 212 | self._channel, 213 | unique_key, 214 | pure=False, 215 | **submit_kwargs, 216 | ) 217 | weak_futures.add(remote_dict) 218 | magic_func.release() # Let go ASAP 219 | 220 | if self._gather_data: 221 | futures_to_name = { 222 | client.submit(get_afar, remote_dict, name, **submit_kwargs): name 223 | for name in names 224 | } 225 | weak_futures.update(futures_to_name) 226 | remote_dict.release() # Let go ASAP 227 | for future, result in distributed.as_completed(futures_to_name, with_results=True): 228 | data[futures_to_name[future]] = result 229 | else: 230 | for name in names: 231 | future = client.submit(get_afar, remote_dict, name, **submit_kwargs) 232 | weak_futures.add(future) 233 | data[name] = future 234 | remote_dict.release() # Let go ASAP 235 | elif where == "locally": 236 | # Run locally. This is handy for testing and debugging. 237 | results = self._magic_func() 238 | for name in names: 239 | data[name] = results[name] 240 | if display_expr: 241 | from IPython.dislpay import display 242 | 243 | display(results.return_value) 244 | if return_expr: 245 | return_future = results.return_value 246 | elif where == "later": 247 | return 248 | else: 249 | raise ValueError(f"Don't know where {where!r} is") 250 | 251 | # Try to update the variables in the frame. 252 | # This currently only works if f_locals is f_globals, or if tracing (don't ask). 253 | local_ns.update((name, data[name]) for name in names) 254 | return return_future 255 | 256 | def cancel(self, *, client=None, force=False): 257 | """Cancel pending tasks""" 258 | if client is not None: 259 | items = [(client, self._client_to_futures[client])] 260 | else: 261 | items = self._client_to_futures.items() 262 | for client, weak_futures in items: 263 | client.cancel( 264 | [future for future in weak_futures if future.status == "pending"], force=force 265 | ) 266 | weak_futures.clear() 267 | 268 | def _setup_print(self, key, async_print): 269 | if async_print: 270 | from IPython.display import display 271 | from ipywidgets import Output 272 | 273 | out = Output() 274 | display(out) 275 | out.append_stdout("\N{SPARKLES} Running afar... \N{SPARKLES}") 276 | else: 277 | out = None 278 | self._outputs[key] = [out, False] # False means has not been updated 279 | 280 | @classmethod 281 | def _handle_print(cls, event): 282 | # XXX: can we assume all messages from a single task arrive in FIFO order? 283 | _, msg = event 284 | key, action, payload = msg 285 | if key not in cls._outputs: 286 | return 287 | out, is_updated = cls._outputs[key] 288 | if out is not None: 289 | if action == "begin": 290 | if is_updated: 291 | out.outputs = type(out.outputs)() 292 | out.append_stdout("\N{SPARKLES} Running afar... (restarted) \N{SPARKLES}") 293 | cls._outputs[key][1] = False # is not updated 294 | else: 295 | if not is_updated: 296 | # Clear the "Running afar..." message 297 | out.outputs = type(out.outputs)() 298 | cls._outputs[key][1] = True # is updated 299 | # ipywidgets.Output is pretty slow if there are lots of messages 300 | if action == "stdout": 301 | out.append_stdout(payload) 302 | elif action == "stderr": 303 | out.append_stderr(payload) 304 | elif action == "stdout": 305 | print(payload, end="") 306 | elif action == "stderr": 307 | print(payload, end="", file=sys.stderr) 308 | if action == "display_expr": 309 | display_repr(payload, out=out) 310 | del cls._outputs[key] 311 | elif action == "finish": 312 | del cls._outputs[key] 313 | 314 | 315 | class Get(Run): 316 | """Unlike ``run``, ``get`` automatically gathers the data locally""" 317 | 318 | _gather_data = True 319 | 320 | 321 | def run_afar(magic_func, names, futures, capture_print, channel, unique_key): 322 | if capture_print: 323 | try: 324 | worker = get_worker() 325 | send_finish = True 326 | except ValueError: 327 | worker = None 328 | try: 329 | if capture_print and worker is not None: 330 | worker.log_event(channel, (unique_key, "begin", None)) 331 | rec = PrintRecorder(channel, unique_key) 332 | if "print" in magic_func._scoped.builtin_names and "print" not in futures: 333 | sfunc = magic_func._scoped.bind(futures, print=rec) 334 | else: 335 | sfunc = magic_func._scoped.bind(futures) 336 | with rec: 337 | results = sfunc() 338 | else: 339 | sfunc = magic_func._scoped.bind(futures) 340 | results = sfunc() 341 | 342 | rv = {key: results[key] for key in names} 343 | 344 | if magic_func._display_expr and worker is not None: 345 | # Hopefully computing the repr is fast. If it is slow, perhaps it would be 346 | # better to add the return value to rv and call repr_afar as a separate task. 347 | # Also, pretty_repr must be msgpack serializable if done via events. Hence, 348 | # custom _ipython_display_ doesn't work, and we resort to using a basic repr. 349 | pretty_repr = repr_afar(results.return_value, magic_func._repr_methods) 350 | if pretty_repr is not None: 351 | worker.log_event(channel, (unique_key, "display_expr", pretty_repr)) 352 | send_finish = False 353 | finally: 354 | if capture_print and worker is not None and send_finish: 355 | worker.log_event(channel, (unique_key, "finish", None)) 356 | return rv 357 | 358 | 359 | def get_afar(d, k): 360 | return d[k] 361 | 362 | 363 | run = Run() 364 | get = Get() 365 | -------------------------------------------------------------------------------- /afar/_version.py: -------------------------------------------------------------------------------- 1 | # This file helps to compute a version number in source trees obtained from 2 | # git-archive tarball (such as those provided by githubs download-from-tag 3 | # feature). Distribution tarballs (built by setup.py sdist) and build 4 | # directories (produced by setup.py build) will contain a much shorter file 5 | # that just contains the computed version number. 6 | 7 | # This file is released into the public domain. Generated by 8 | # versioneer-0.19 (https://github.com/python-versioneer/python-versioneer) 9 | 10 | """Git implementation of _version.py.""" 11 | 12 | import errno 13 | import os 14 | import re 15 | import subprocess 16 | import sys 17 | 18 | 19 | def get_keywords(): 20 | """Get the keywords needed to look up the version information.""" 21 | # these strings will be replaced by git during git-archive. 22 | # setup.py/versioneer.py will grep for the variable names, so they must 23 | # each be defined on a line of their own. _version.py will just call 24 | # get_keywords(). 25 | git_refnames = " (HEAD -> main)" 26 | git_full = "f3047ea685ab2638cc25fa6ac627314d02a7d314" 27 | git_date = "2022-04-19 15:30:08 -0500" 28 | keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} 29 | return keywords 30 | 31 | 32 | class VersioneerConfig: 33 | """Container for Versioneer configuration parameters.""" 34 | 35 | 36 | def get_config(): 37 | """Create, populate and return the VersioneerConfig() object.""" 38 | # these strings are filled in when 'setup.py versioneer' creates 39 | # _version.py 40 | cfg = VersioneerConfig() 41 | cfg.VCS = "git" 42 | cfg.style = "pep440" 43 | cfg.tag_prefix = "" 44 | cfg.parentdir_prefix = "afar-" 45 | cfg.versionfile_source = "afar/_version.py" 46 | cfg.verbose = False 47 | return cfg 48 | 49 | 50 | class NotThisMethod(Exception): 51 | """Exception raised if a method is not valid for the current scenario.""" 52 | 53 | 54 | LONG_VERSION_PY = {} 55 | HANDLERS = {} 56 | 57 | 58 | def register_vcs_handler(vcs, method): # decorator 59 | """Create decorator to mark a method as the handler of a VCS.""" 60 | 61 | def decorate(f): 62 | """Store f in HANDLERS[vcs][method].""" 63 | if vcs not in HANDLERS: 64 | HANDLERS[vcs] = {} 65 | HANDLERS[vcs][method] = f 66 | return f 67 | 68 | return decorate 69 | 70 | 71 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): 72 | """Call the given command(s).""" 73 | assert isinstance(commands, list) 74 | p = None 75 | for c in commands: 76 | try: 77 | dispcmd = str([c] + args) 78 | # remember shell=False, so use git.cmd on windows, not just git 79 | p = subprocess.Popen( 80 | [c] + args, 81 | cwd=cwd, 82 | env=env, 83 | stdout=subprocess.PIPE, 84 | stderr=(subprocess.PIPE if hide_stderr else None), 85 | ) 86 | break 87 | except EnvironmentError: 88 | e = sys.exc_info()[1] 89 | if e.errno == errno.ENOENT: 90 | continue 91 | if verbose: 92 | print("unable to run %s" % dispcmd) 93 | print(e) 94 | return None, None 95 | else: 96 | if verbose: 97 | print("unable to find command, tried %s" % (commands,)) 98 | return None, None 99 | stdout = p.communicate()[0].strip().decode() 100 | if p.returncode != 0: 101 | if verbose: 102 | print("unable to run %s (error)" % dispcmd) 103 | print("stdout was %s" % stdout) 104 | return None, p.returncode 105 | return stdout, p.returncode 106 | 107 | 108 | def versions_from_parentdir(parentdir_prefix, root, verbose): 109 | """Try to determine the version from the parent directory name. 110 | 111 | Source tarballs conventionally unpack into a directory that includes both 112 | the project name and a version string. We will also support searching up 113 | two directory levels for an appropriately named parent directory 114 | """ 115 | rootdirs = [] 116 | 117 | for i in range(3): 118 | dirname = os.path.basename(root) 119 | if dirname.startswith(parentdir_prefix): 120 | return { 121 | "version": dirname[len(parentdir_prefix) :], 122 | "full-revisionid": None, 123 | "dirty": False, 124 | "error": None, 125 | "date": None, 126 | } 127 | else: 128 | rootdirs.append(root) 129 | root = os.path.dirname(root) # up a level 130 | 131 | if verbose: 132 | print( 133 | "Tried directories %s but none started with prefix %s" 134 | % (str(rootdirs), parentdir_prefix) 135 | ) 136 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 137 | 138 | 139 | @register_vcs_handler("git", "get_keywords") 140 | def git_get_keywords(versionfile_abs): 141 | """Extract version information from the given file.""" 142 | # the code embedded in _version.py can just fetch the value of these 143 | # keywords. When used from setup.py, we don't want to import _version.py, 144 | # so we do it with a regexp instead. This function is not used from 145 | # _version.py. 146 | keywords = {} 147 | try: 148 | f = open(versionfile_abs, "r") 149 | for line in f.readlines(): 150 | if line.strip().startswith("git_refnames ="): 151 | mo = re.search(r'=\s*"(.*)"', line) 152 | if mo: 153 | keywords["refnames"] = mo.group(1) 154 | if line.strip().startswith("git_full ="): 155 | mo = re.search(r'=\s*"(.*)"', line) 156 | if mo: 157 | keywords["full"] = mo.group(1) 158 | if line.strip().startswith("git_date ="): 159 | mo = re.search(r'=\s*"(.*)"', line) 160 | if mo: 161 | keywords["date"] = mo.group(1) 162 | f.close() 163 | except EnvironmentError: 164 | pass 165 | return keywords 166 | 167 | 168 | @register_vcs_handler("git", "keywords") 169 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 170 | """Get version information from git keywords.""" 171 | if not keywords: 172 | raise NotThisMethod("no keywords at all, weird") 173 | date = keywords.get("date") 174 | if date is not None: 175 | # Use only the last line. Previous lines may contain GPG signature 176 | # information. 177 | date = date.splitlines()[-1] 178 | 179 | # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant 180 | # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 181 | # -like" string, which we must then edit to make compliant), because 182 | # it's been around since git-1.5.3, and it's too difficult to 183 | # discover which version we're using, or to work around using an 184 | # older one. 185 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 186 | refnames = keywords["refnames"].strip() 187 | if refnames.startswith("$Format"): 188 | if verbose: 189 | print("keywords are unexpanded, not using") 190 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 191 | refs = set([r.strip() for r in refnames.strip("()").split(",")]) 192 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 193 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 194 | TAG = "tag: " 195 | tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) 196 | if not tags: 197 | # Either we're using git < 1.8.3, or there really are no tags. We use 198 | # a heuristic: assume all version tags have a digit. The old git %d 199 | # expansion behaves like git log --decorate=short and strips out the 200 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 201 | # between branches and tags. By ignoring refnames without digits, we 202 | # filter out many common branch names like "release" and 203 | # "stabilization", as well as "HEAD" and "master". 204 | tags = set([r for r in refs if re.search(r"\d", r)]) 205 | if verbose: 206 | print("discarding '%s', no digits" % ",".join(refs - tags)) 207 | if verbose: 208 | print("likely tags: %s" % ",".join(sorted(tags))) 209 | for ref in sorted(tags): 210 | # sorting will prefer e.g. "2.0" over "2.0rc1" 211 | if ref.startswith(tag_prefix): 212 | r = ref[len(tag_prefix) :] 213 | if verbose: 214 | print("picking %s" % r) 215 | return { 216 | "version": r, 217 | "full-revisionid": keywords["full"].strip(), 218 | "dirty": False, 219 | "error": None, 220 | "date": date, 221 | } 222 | # no suitable tags, so version is "0+unknown", but full hex is still there 223 | if verbose: 224 | print("no suitable tags, using unknown + full revision id") 225 | return { 226 | "version": "0+unknown", 227 | "full-revisionid": keywords["full"].strip(), 228 | "dirty": False, 229 | "error": "no suitable tags", 230 | "date": None, 231 | } 232 | 233 | 234 | @register_vcs_handler("git", "pieces_from_vcs") 235 | def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): 236 | """Get version from 'git describe' in the root of the source tree. 237 | 238 | This only gets called if the git-archive 'subst' keywords were *not* 239 | expanded, and _version.py hasn't already been rewritten with a short 240 | version string, meaning we're inside a checked out source tree. 241 | """ 242 | GITS = ["git"] 243 | if sys.platform == "win32": 244 | GITS = ["git.cmd", "git.exe"] 245 | 246 | out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) 247 | if rc != 0: 248 | if verbose: 249 | print("Directory %s not under git control" % root) 250 | raise NotThisMethod("'git rev-parse --git-dir' returned error") 251 | 252 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] 253 | # if there isn't one, this yields HEX[-dirty] (no NUM) 254 | describe_out, rc = run_command( 255 | GITS, 256 | ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], 257 | cwd=root, 258 | ) 259 | # --long was added in git-1.5.5 260 | if describe_out is None: 261 | raise NotThisMethod("'git describe' failed") 262 | describe_out = describe_out.strip() 263 | full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) 264 | if full_out is None: 265 | raise NotThisMethod("'git rev-parse' failed") 266 | full_out = full_out.strip() 267 | 268 | pieces = {} 269 | pieces["long"] = full_out 270 | pieces["short"] = full_out[:7] # maybe improved later 271 | pieces["error"] = None 272 | 273 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 274 | # TAG might have hyphens. 275 | git_describe = describe_out 276 | 277 | # look for -dirty suffix 278 | dirty = git_describe.endswith("-dirty") 279 | pieces["dirty"] = dirty 280 | if dirty: 281 | git_describe = git_describe[: git_describe.rindex("-dirty")] 282 | 283 | # now we have TAG-NUM-gHEX or HEX 284 | 285 | if "-" in git_describe: 286 | # TAG-NUM-gHEX 287 | mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) 288 | if not mo: 289 | # unparseable. Maybe git-describe is misbehaving? 290 | pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out 291 | return pieces 292 | 293 | # tag 294 | full_tag = mo.group(1) 295 | if not full_tag.startswith(tag_prefix): 296 | if verbose: 297 | fmt = "tag '%s' doesn't start with prefix '%s'" 298 | print(fmt % (full_tag, tag_prefix)) 299 | pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix) 300 | return pieces 301 | pieces["closest-tag"] = full_tag[len(tag_prefix) :] 302 | 303 | # distance: number of commits since tag 304 | pieces["distance"] = int(mo.group(2)) 305 | 306 | # commit: short hex revision ID 307 | pieces["short"] = mo.group(3) 308 | 309 | else: 310 | # HEX: no tags 311 | pieces["closest-tag"] = None 312 | count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) 313 | pieces["distance"] = int(count_out) # total number of commits 314 | 315 | # commit date: see ISO-8601 comment in git_versions_from_keywords() 316 | date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() 317 | # Use only the last line. Previous lines may contain GPG signature 318 | # information. 319 | date = date.splitlines()[-1] 320 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 321 | 322 | return pieces 323 | 324 | 325 | def plus_or_dot(pieces): 326 | """Return a + if we don't already have one, else return a .""" 327 | if "+" in pieces.get("closest-tag", ""): 328 | return "." 329 | return "+" 330 | 331 | 332 | def render_pep440(pieces): 333 | """Build up version string, with post-release "local version identifier". 334 | 335 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 336 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 337 | 338 | Exceptions: 339 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 340 | """ 341 | if pieces["closest-tag"]: 342 | rendered = pieces["closest-tag"] 343 | if pieces["distance"] or pieces["dirty"]: 344 | rendered += plus_or_dot(pieces) 345 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 346 | if pieces["dirty"]: 347 | rendered += ".dirty" 348 | else: 349 | # exception #1 350 | rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) 351 | if pieces["dirty"]: 352 | rendered += ".dirty" 353 | return rendered 354 | 355 | 356 | def render_pep440_pre(pieces): 357 | """TAG[.post0.devDISTANCE] -- No -dirty. 358 | 359 | Exceptions: 360 | 1: no tags. 0.post0.devDISTANCE 361 | """ 362 | if pieces["closest-tag"]: 363 | rendered = pieces["closest-tag"] 364 | if pieces["distance"]: 365 | rendered += ".post0.dev%d" % pieces["distance"] 366 | else: 367 | # exception #1 368 | rendered = "0.post0.dev%d" % pieces["distance"] 369 | return rendered 370 | 371 | 372 | def render_pep440_post(pieces): 373 | """TAG[.postDISTANCE[.dev0]+gHEX] . 374 | 375 | The ".dev0" means dirty. Note that .dev0 sorts backwards 376 | (a dirty tree will appear "older" than the corresponding clean one), 377 | but you shouldn't be releasing software with -dirty anyways. 378 | 379 | Exceptions: 380 | 1: no tags. 0.postDISTANCE[.dev0] 381 | """ 382 | if pieces["closest-tag"]: 383 | rendered = pieces["closest-tag"] 384 | if pieces["distance"] or pieces["dirty"]: 385 | rendered += ".post%d" % pieces["distance"] 386 | if pieces["dirty"]: 387 | rendered += ".dev0" 388 | rendered += plus_or_dot(pieces) 389 | rendered += "g%s" % pieces["short"] 390 | else: 391 | # exception #1 392 | rendered = "0.post%d" % pieces["distance"] 393 | if pieces["dirty"]: 394 | rendered += ".dev0" 395 | rendered += "+g%s" % pieces["short"] 396 | return rendered 397 | 398 | 399 | def render_pep440_old(pieces): 400 | """TAG[.postDISTANCE[.dev0]] . 401 | 402 | The ".dev0" means dirty. 403 | 404 | Exceptions: 405 | 1: no tags. 0.postDISTANCE[.dev0] 406 | """ 407 | if pieces["closest-tag"]: 408 | rendered = pieces["closest-tag"] 409 | if pieces["distance"] or pieces["dirty"]: 410 | rendered += ".post%d" % pieces["distance"] 411 | if pieces["dirty"]: 412 | rendered += ".dev0" 413 | else: 414 | # exception #1 415 | rendered = "0.post%d" % pieces["distance"] 416 | if pieces["dirty"]: 417 | rendered += ".dev0" 418 | return rendered 419 | 420 | 421 | def render_git_describe(pieces): 422 | """TAG[-DISTANCE-gHEX][-dirty]. 423 | 424 | Like 'git describe --tags --dirty --always'. 425 | 426 | Exceptions: 427 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 428 | """ 429 | if pieces["closest-tag"]: 430 | rendered = pieces["closest-tag"] 431 | if pieces["distance"]: 432 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 433 | else: 434 | # exception #1 435 | rendered = pieces["short"] 436 | if pieces["dirty"]: 437 | rendered += "-dirty" 438 | return rendered 439 | 440 | 441 | def render_git_describe_long(pieces): 442 | """TAG-DISTANCE-gHEX[-dirty]. 443 | 444 | Like 'git describe --tags --dirty --always -long'. 445 | The distance/hash is unconditional. 446 | 447 | Exceptions: 448 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 449 | """ 450 | if pieces["closest-tag"]: 451 | rendered = pieces["closest-tag"] 452 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 453 | else: 454 | # exception #1 455 | rendered = pieces["short"] 456 | if pieces["dirty"]: 457 | rendered += "-dirty" 458 | return rendered 459 | 460 | 461 | def render(pieces, style): 462 | """Render the given version pieces into the requested style.""" 463 | if pieces["error"]: 464 | return { 465 | "version": "unknown", 466 | "full-revisionid": pieces.get("long"), 467 | "dirty": None, 468 | "error": pieces["error"], 469 | "date": None, 470 | } 471 | 472 | if not style or style == "default": 473 | style = "pep440" # the default 474 | 475 | if style == "pep440": 476 | rendered = render_pep440(pieces) 477 | elif style == "pep440-pre": 478 | rendered = render_pep440_pre(pieces) 479 | elif style == "pep440-post": 480 | rendered = render_pep440_post(pieces) 481 | elif style == "pep440-old": 482 | rendered = render_pep440_old(pieces) 483 | elif style == "git-describe": 484 | rendered = render_git_describe(pieces) 485 | elif style == "git-describe-long": 486 | rendered = render_git_describe_long(pieces) 487 | else: 488 | raise ValueError("unknown style '%s'" % style) 489 | 490 | return { 491 | "version": rendered, 492 | "full-revisionid": pieces["long"], 493 | "dirty": pieces["dirty"], 494 | "error": None, 495 | "date": pieces.get("date"), 496 | } 497 | 498 | 499 | def get_versions(): 500 | """Get version information or return default if unable to do so.""" 501 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have 502 | # __file__, we can work backwards from there to the root. Some 503 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which 504 | # case we can only use expanded keywords. 505 | 506 | cfg = get_config() 507 | verbose = cfg.verbose 508 | 509 | try: 510 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) 511 | except NotThisMethod: 512 | pass 513 | 514 | try: 515 | root = os.path.realpath(__file__) 516 | # versionfile_source is the relative path from the top of the source 517 | # tree (where the .git directory might live) to this file. Invert 518 | # this to find the root from __file__. 519 | for i in cfg.versionfile_source.split("/"): 520 | root = os.path.dirname(root) 521 | except NameError: 522 | return { 523 | "version": "0+unknown", 524 | "full-revisionid": None, 525 | "dirty": None, 526 | "error": "unable to find root of source tree", 527 | "date": None, 528 | } 529 | 530 | try: 531 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) 532 | return render(pieces, cfg.style) 533 | except NotThisMethod: 534 | pass 535 | 536 | try: 537 | if cfg.parentdir_prefix: 538 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 539 | except NotThisMethod: 540 | pass 541 | 542 | return { 543 | "version": "0+unknown", 544 | "full-revisionid": None, 545 | "dirty": None, 546 | "error": "unable to compute version", 547 | "date": None, 548 | } 549 | -------------------------------------------------------------------------------- /versioneer.py: -------------------------------------------------------------------------------- 1 | # Version: 0.19 2 | 3 | """The Versioneer - like a rocketeer, but for versions. 4 | 5 | The Versioneer 6 | ============== 7 | 8 | * like a rocketeer, but for versions! 9 | * https://github.com/python-versioneer/python-versioneer 10 | * Brian Warner 11 | * License: Public Domain 12 | * Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3 13 | * [![Latest Version][pypi-image]][pypi-url] 14 | * [![Build Status][travis-image]][travis-url] 15 | 16 | This is a tool for managing a recorded version number in distutils-based 17 | python projects. The goal is to remove the tedious and error-prone "update 18 | the embedded version string" step from your release process. Making a new 19 | release should be as easy as recording a new tag in your version-control 20 | system, and maybe making new tarballs. 21 | 22 | 23 | ## Quick Install 24 | 25 | * `pip install versioneer` to somewhere in your $PATH 26 | * add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md)) 27 | * run `versioneer install` in your source tree, commit the results 28 | * Verify version information with `python setup.py version` 29 | 30 | ## Version Identifiers 31 | 32 | Source trees come from a variety of places: 33 | 34 | * a version-control system checkout (mostly used by developers) 35 | * a nightly tarball, produced by build automation 36 | * a snapshot tarball, produced by a web-based VCS browser, like github's 37 | "tarball from tag" feature 38 | * a release tarball, produced by "setup.py sdist", distributed through PyPI 39 | 40 | Within each source tree, the version identifier (either a string or a number, 41 | this tool is format-agnostic) can come from a variety of places: 42 | 43 | * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows 44 | about recent "tags" and an absolute revision-id 45 | * the name of the directory into which the tarball was unpacked 46 | * an expanded VCS keyword ($Id$, etc) 47 | * a `_version.py` created by some earlier build step 48 | 49 | For released software, the version identifier is closely related to a VCS 50 | tag. Some projects use tag names that include more than just the version 51 | string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool 52 | needs to strip the tag prefix to extract the version identifier. For 53 | unreleased software (between tags), the version identifier should provide 54 | enough information to help developers recreate the same tree, while also 55 | giving them an idea of roughly how old the tree is (after version 1.2, before 56 | version 1.3). Many VCS systems can report a description that captures this, 57 | for example `git describe --tags --dirty --always` reports things like 58 | "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 59 | 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has 60 | uncommitted changes). 61 | 62 | The version identifier is used for multiple purposes: 63 | 64 | * to allow the module to self-identify its version: `myproject.__version__` 65 | * to choose a name and prefix for a 'setup.py sdist' tarball 66 | 67 | ## Theory of Operation 68 | 69 | Versioneer works by adding a special `_version.py` file into your source 70 | tree, where your `__init__.py` can import it. This `_version.py` knows how to 71 | dynamically ask the VCS tool for version information at import time. 72 | 73 | `_version.py` also contains `$Revision$` markers, and the installation 74 | process marks `_version.py` to have this marker rewritten with a tag name 75 | during the `git archive` command. As a result, generated tarballs will 76 | contain enough information to get the proper version. 77 | 78 | To allow `setup.py` to compute a version too, a `versioneer.py` is added to 79 | the top level of your source tree, next to `setup.py` and the `setup.cfg` 80 | that configures it. This overrides several distutils/setuptools commands to 81 | compute the version when invoked, and changes `setup.py build` and `setup.py 82 | sdist` to replace `_version.py` with a small static file that contains just 83 | the generated version data. 84 | 85 | ## Installation 86 | 87 | See [INSTALL.md](./INSTALL.md) for detailed installation instructions. 88 | 89 | ## Version-String Flavors 90 | 91 | Code which uses Versioneer can learn about its version string at runtime by 92 | importing `_version` from your main `__init__.py` file and running the 93 | `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can 94 | import the top-level `versioneer.py` and run `get_versions()`. 95 | 96 | Both functions return a dictionary with different flavors of version 97 | information: 98 | 99 | * `['version']`: A condensed version string, rendered using the selected 100 | style. This is the most commonly used value for the project's version 101 | string. The default "pep440" style yields strings like `0.11`, 102 | `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section 103 | below for alternative styles. 104 | 105 | * `['full-revisionid']`: detailed revision identifier. For Git, this is the 106 | full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". 107 | 108 | * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the 109 | commit date in ISO 8601 format. This will be None if the date is not 110 | available. 111 | 112 | * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that 113 | this is only accurate if run in a VCS checkout, otherwise it is likely to 114 | be False or None 115 | 116 | * `['error']`: if the version string could not be computed, this will be set 117 | to a string describing the problem, otherwise it will be None. It may be 118 | useful to throw an exception in setup.py if this is set, to avoid e.g. 119 | creating tarballs with a version string of "unknown". 120 | 121 | Some variants are more useful than others. Including `full-revisionid` in a 122 | bug report should allow developers to reconstruct the exact code being tested 123 | (or indicate the presence of local changes that should be shared with the 124 | developers). `version` is suitable for display in an "about" box or a CLI 125 | `--version` output: it can be easily compared against release notes and lists 126 | of bugs fixed in various releases. 127 | 128 | The installer adds the following text to your `__init__.py` to place a basic 129 | version in `YOURPROJECT.__version__`: 130 | 131 | from ._version import get_versions 132 | __version__ = get_versions()['version'] 133 | del get_versions 134 | 135 | ## Styles 136 | 137 | The setup.cfg `style=` configuration controls how the VCS information is 138 | rendered into a version string. 139 | 140 | The default style, "pep440", produces a PEP440-compliant string, equal to the 141 | un-prefixed tag name for actual releases, and containing an additional "local 142 | version" section with more detail for in-between builds. For Git, this is 143 | TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags 144 | --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the 145 | tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and 146 | that this commit is two revisions ("+2") beyond the "0.11" tag. For released 147 | software (exactly equal to a known tag), the identifier will only contain the 148 | stripped tag, e.g. "0.11". 149 | 150 | Other styles are available. See [details.md](details.md) in the Versioneer 151 | source tree for descriptions. 152 | 153 | ## Debugging 154 | 155 | Versioneer tries to avoid fatal errors: if something goes wrong, it will tend 156 | to return a version of "0+unknown". To investigate the problem, run `setup.py 157 | version`, which will run the version-lookup code in a verbose mode, and will 158 | display the full contents of `get_versions()` (including the `error` string, 159 | which may help identify what went wrong). 160 | 161 | ## Known Limitations 162 | 163 | Some situations are known to cause problems for Versioneer. This details the 164 | most significant ones. More can be found on Github 165 | [issues page](https://github.com/python-versioneer/python-versioneer/issues). 166 | 167 | ### Subprojects 168 | 169 | Versioneer has limited support for source trees in which `setup.py` is not in 170 | the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are 171 | two common reasons why `setup.py` might not be in the root: 172 | 173 | * Source trees which contain multiple subprojects, such as 174 | [Buildbot](https://github.com/buildbot/buildbot), which contains both 175 | "master" and "slave" subprojects, each with their own `setup.py`, 176 | `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI 177 | distributions (and upload multiple independently-installable tarballs). 178 | * Source trees whose main purpose is to contain a C library, but which also 179 | provide bindings to Python (and perhaps other languages) in subdirectories. 180 | 181 | Versioneer will look for `.git` in parent directories, and most operations 182 | should get the right version string. However `pip` and `setuptools` have bugs 183 | and implementation details which frequently cause `pip install .` from a 184 | subproject directory to fail to find a correct version string (so it usually 185 | defaults to `0+unknown`). 186 | 187 | `pip install --editable .` should work correctly. `setup.py install` might 188 | work too. 189 | 190 | Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in 191 | some later version. 192 | 193 | [Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking 194 | this issue. The discussion in 195 | [PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the 196 | issue from the Versioneer side in more detail. 197 | [pip PR#3176](https://github.com/pypa/pip/pull/3176) and 198 | [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve 199 | pip to let Versioneer work correctly. 200 | 201 | Versioneer-0.16 and earlier only looked for a `.git` directory next to the 202 | `setup.cfg`, so subprojects were completely unsupported with those releases. 203 | 204 | ### Editable installs with setuptools <= 18.5 205 | 206 | `setup.py develop` and `pip install --editable .` allow you to install a 207 | project into a virtualenv once, then continue editing the source code (and 208 | test) without re-installing after every change. 209 | 210 | "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a 211 | convenient way to specify executable scripts that should be installed along 212 | with the python package. 213 | 214 | These both work as expected when using modern setuptools. When using 215 | setuptools-18.5 or earlier, however, certain operations will cause 216 | `pkg_resources.DistributionNotFound` errors when running the entrypoint 217 | script, which must be resolved by re-installing the package. This happens 218 | when the install happens with one version, then the egg_info data is 219 | regenerated while a different version is checked out. Many setup.py commands 220 | cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into 221 | a different virtualenv), so this can be surprising. 222 | 223 | [Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes 224 | this one, but upgrading to a newer version of setuptools should probably 225 | resolve it. 226 | 227 | 228 | ## Updating Versioneer 229 | 230 | To upgrade your project to a new release of Versioneer, do the following: 231 | 232 | * install the new Versioneer (`pip install -U versioneer` or equivalent) 233 | * edit `setup.cfg`, if necessary, to include any new configuration settings 234 | indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. 235 | * re-run `versioneer install` in your source tree, to replace 236 | `SRC/_version.py` 237 | * commit any changed files 238 | 239 | ## Future Directions 240 | 241 | This tool is designed to make it easily extended to other version-control 242 | systems: all VCS-specific components are in separate directories like 243 | src/git/ . The top-level `versioneer.py` script is assembled from these 244 | components by running make-versioneer.py . In the future, make-versioneer.py 245 | will take a VCS name as an argument, and will construct a version of 246 | `versioneer.py` that is specific to the given VCS. It might also take the 247 | configuration arguments that are currently provided manually during 248 | installation by editing setup.py . Alternatively, it might go the other 249 | direction and include code from all supported VCS systems, reducing the 250 | number of intermediate scripts. 251 | 252 | ## Similar projects 253 | 254 | * [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time 255 | dependency 256 | * [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of 257 | versioneer 258 | 259 | ## License 260 | 261 | To make Versioneer easier to embed, all its code is dedicated to the public 262 | domain. The `_version.py` that it creates is also in the public domain. 263 | Specifically, both are released under the Creative Commons "Public Domain 264 | Dedication" license (CC0-1.0), as described in 265 | https://creativecommons.org/publicdomain/zero/1.0/ . 266 | 267 | [pypi-image]: https://img.shields.io/pypi/v/versioneer.svg 268 | [pypi-url]: https://pypi.python.org/pypi/versioneer/ 269 | [travis-image]: 270 | https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg 271 | [travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer 272 | 273 | """ 274 | 275 | import configparser 276 | import errno 277 | import json 278 | import os 279 | import re 280 | import subprocess 281 | import sys 282 | 283 | 284 | class VersioneerConfig: 285 | """Container for Versioneer configuration parameters.""" 286 | 287 | 288 | def get_root(): 289 | """Get the project root directory. 290 | 291 | We require that all commands are run from the project root, i.e. the 292 | directory that contains setup.py, setup.cfg, and versioneer.py . 293 | """ 294 | root = os.path.realpath(os.path.abspath(os.getcwd())) 295 | setup_py = os.path.join(root, "setup.py") 296 | versioneer_py = os.path.join(root, "versioneer.py") 297 | if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): 298 | # allow 'python path/to/setup.py COMMAND' 299 | root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) 300 | setup_py = os.path.join(root, "setup.py") 301 | versioneer_py = os.path.join(root, "versioneer.py") 302 | if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): 303 | err = ( 304 | "Versioneer was unable to run the project root directory. " 305 | "Versioneer requires setup.py to be executed from " 306 | "its immediate directory (like 'python setup.py COMMAND'), " 307 | "or in a way that lets it use sys.argv[0] to find the root " 308 | "(like 'python path/to/setup.py COMMAND')." 309 | ) 310 | raise VersioneerBadRootError(err) 311 | try: 312 | # Certain runtime workflows (setup.py install/develop in a setuptools 313 | # tree) execute all dependencies in a single python process, so 314 | # "versioneer" may be imported multiple times, and python's shared 315 | # module-import table will cache the first one. So we can't use 316 | # os.path.dirname(__file__), as that will find whichever 317 | # versioneer.py was first imported, even in later projects. 318 | me = os.path.realpath(os.path.abspath(__file__)) 319 | me_dir = os.path.normcase(os.path.splitext(me)[0]) 320 | vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) 321 | if me_dir != vsr_dir: 322 | print( 323 | "Warning: build in %s is using versioneer.py from %s" 324 | % (os.path.dirname(me), versioneer_py) 325 | ) 326 | except NameError: 327 | pass 328 | return root 329 | 330 | 331 | def get_config_from_root(root): 332 | """Read the project setup.cfg file to determine Versioneer config.""" 333 | # This might raise EnvironmentError (if setup.cfg is missing), or 334 | # configparser.NoSectionError (if it lacks a [versioneer] section), or 335 | # configparser.NoOptionError (if it lacks "VCS="). See the docstring at 336 | # the top of versioneer.py for instructions on writing your setup.cfg . 337 | setup_cfg = os.path.join(root, "setup.cfg") 338 | parser = configparser.ConfigParser() 339 | with open(setup_cfg, "r") as f: 340 | parser.read_file(f) 341 | VCS = parser.get("versioneer", "VCS") # mandatory 342 | 343 | def get(parser, name): 344 | if parser.has_option("versioneer", name): 345 | return parser.get("versioneer", name) 346 | return None 347 | 348 | cfg = VersioneerConfig() 349 | cfg.VCS = VCS 350 | cfg.style = get(parser, "style") or "" 351 | cfg.versionfile_source = get(parser, "versionfile_source") 352 | cfg.versionfile_build = get(parser, "versionfile_build") 353 | cfg.tag_prefix = get(parser, "tag_prefix") 354 | if cfg.tag_prefix in ("''", '""'): 355 | cfg.tag_prefix = "" 356 | cfg.parentdir_prefix = get(parser, "parentdir_prefix") 357 | cfg.verbose = get(parser, "verbose") 358 | return cfg 359 | 360 | 361 | class NotThisMethod(Exception): 362 | """Exception raised if a method is not valid for the current scenario.""" 363 | 364 | 365 | # these dictionaries contain VCS-specific tools 366 | LONG_VERSION_PY = {} 367 | HANDLERS = {} 368 | 369 | 370 | def register_vcs_handler(vcs, method): # decorator 371 | """Create decorator to mark a method as the handler of a VCS.""" 372 | 373 | def decorate(f): 374 | """Store f in HANDLERS[vcs][method].""" 375 | if vcs not in HANDLERS: 376 | HANDLERS[vcs] = {} 377 | HANDLERS[vcs][method] = f 378 | return f 379 | 380 | return decorate 381 | 382 | 383 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): 384 | """Call the given command(s).""" 385 | assert isinstance(commands, list) 386 | p = None 387 | for c in commands: 388 | try: 389 | dispcmd = str([c] + args) 390 | # remember shell=False, so use git.cmd on windows, not just git 391 | p = subprocess.Popen( 392 | [c] + args, 393 | cwd=cwd, 394 | env=env, 395 | stdout=subprocess.PIPE, 396 | stderr=(subprocess.PIPE if hide_stderr else None), 397 | ) 398 | break 399 | except EnvironmentError: 400 | e = sys.exc_info()[1] 401 | if e.errno == errno.ENOENT: 402 | continue 403 | if verbose: 404 | print("unable to run %s" % dispcmd) 405 | print(e) 406 | return None, None 407 | else: 408 | if verbose: 409 | print("unable to find command, tried %s" % (commands,)) 410 | return None, None 411 | stdout = p.communicate()[0].strip().decode() 412 | if p.returncode != 0: 413 | if verbose: 414 | print("unable to run %s (error)" % dispcmd) 415 | print("stdout was %s" % stdout) 416 | return None, p.returncode 417 | return stdout, p.returncode 418 | 419 | 420 | LONG_VERSION_PY[ 421 | "git" 422 | ] = r''' 423 | # This file helps to compute a version number in source trees obtained from 424 | # git-archive tarball (such as those provided by githubs download-from-tag 425 | # feature). Distribution tarballs (built by setup.py sdist) and build 426 | # directories (produced by setup.py build) will contain a much shorter file 427 | # that just contains the computed version number. 428 | 429 | # This file is released into the public domain. Generated by 430 | # versioneer-0.19 (https://github.com/python-versioneer/python-versioneer) 431 | 432 | """Git implementation of _version.py.""" 433 | 434 | import errno 435 | import os 436 | import re 437 | import subprocess 438 | import sys 439 | 440 | 441 | def get_keywords(): 442 | """Get the keywords needed to look up the version information.""" 443 | # these strings will be replaced by git during git-archive. 444 | # setup.py/versioneer.py will grep for the variable names, so they must 445 | # each be defined on a line of their own. _version.py will just call 446 | # get_keywords(). 447 | git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" 448 | git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" 449 | git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" 450 | keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} 451 | return keywords 452 | 453 | 454 | class VersioneerConfig: 455 | """Container for Versioneer configuration parameters.""" 456 | 457 | 458 | def get_config(): 459 | """Create, populate and return the VersioneerConfig() object.""" 460 | # these strings are filled in when 'setup.py versioneer' creates 461 | # _version.py 462 | cfg = VersioneerConfig() 463 | cfg.VCS = "git" 464 | cfg.style = "%(STYLE)s" 465 | cfg.tag_prefix = "%(TAG_PREFIX)s" 466 | cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" 467 | cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" 468 | cfg.verbose = False 469 | return cfg 470 | 471 | 472 | class NotThisMethod(Exception): 473 | """Exception raised if a method is not valid for the current scenario.""" 474 | 475 | 476 | LONG_VERSION_PY = {} 477 | HANDLERS = {} 478 | 479 | 480 | def register_vcs_handler(vcs, method): # decorator 481 | """Create decorator to mark a method as the handler of a VCS.""" 482 | def decorate(f): 483 | """Store f in HANDLERS[vcs][method].""" 484 | if vcs not in HANDLERS: 485 | HANDLERS[vcs] = {} 486 | HANDLERS[vcs][method] = f 487 | return f 488 | return decorate 489 | 490 | 491 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, 492 | env=None): 493 | """Call the given command(s).""" 494 | assert isinstance(commands, list) 495 | p = None 496 | for c in commands: 497 | try: 498 | dispcmd = str([c] + args) 499 | # remember shell=False, so use git.cmd on windows, not just git 500 | p = subprocess.Popen([c] + args, cwd=cwd, env=env, 501 | stdout=subprocess.PIPE, 502 | stderr=(subprocess.PIPE if hide_stderr 503 | else None)) 504 | break 505 | except EnvironmentError: 506 | e = sys.exc_info()[1] 507 | if e.errno == errno.ENOENT: 508 | continue 509 | if verbose: 510 | print("unable to run %%s" %% dispcmd) 511 | print(e) 512 | return None, None 513 | else: 514 | if verbose: 515 | print("unable to find command, tried %%s" %% (commands,)) 516 | return None, None 517 | stdout = p.communicate()[0].strip().decode() 518 | if p.returncode != 0: 519 | if verbose: 520 | print("unable to run %%s (error)" %% dispcmd) 521 | print("stdout was %%s" %% stdout) 522 | return None, p.returncode 523 | return stdout, p.returncode 524 | 525 | 526 | def versions_from_parentdir(parentdir_prefix, root, verbose): 527 | """Try to determine the version from the parent directory name. 528 | 529 | Source tarballs conventionally unpack into a directory that includes both 530 | the project name and a version string. We will also support searching up 531 | two directory levels for an appropriately named parent directory 532 | """ 533 | rootdirs = [] 534 | 535 | for i in range(3): 536 | dirname = os.path.basename(root) 537 | if dirname.startswith(parentdir_prefix): 538 | return {"version": dirname[len(parentdir_prefix):], 539 | "full-revisionid": None, 540 | "dirty": False, "error": None, "date": None} 541 | else: 542 | rootdirs.append(root) 543 | root = os.path.dirname(root) # up a level 544 | 545 | if verbose: 546 | print("Tried directories %%s but none started with prefix %%s" %% 547 | (str(rootdirs), parentdir_prefix)) 548 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 549 | 550 | 551 | @register_vcs_handler("git", "get_keywords") 552 | def git_get_keywords(versionfile_abs): 553 | """Extract version information from the given file.""" 554 | # the code embedded in _version.py can just fetch the value of these 555 | # keywords. When used from setup.py, we don't want to import _version.py, 556 | # so we do it with a regexp instead. This function is not used from 557 | # _version.py. 558 | keywords = {} 559 | try: 560 | f = open(versionfile_abs, "r") 561 | for line in f.readlines(): 562 | if line.strip().startswith("git_refnames ="): 563 | mo = re.search(r'=\s*"(.*)"', line) 564 | if mo: 565 | keywords["refnames"] = mo.group(1) 566 | if line.strip().startswith("git_full ="): 567 | mo = re.search(r'=\s*"(.*)"', line) 568 | if mo: 569 | keywords["full"] = mo.group(1) 570 | if line.strip().startswith("git_date ="): 571 | mo = re.search(r'=\s*"(.*)"', line) 572 | if mo: 573 | keywords["date"] = mo.group(1) 574 | f.close() 575 | except EnvironmentError: 576 | pass 577 | return keywords 578 | 579 | 580 | @register_vcs_handler("git", "keywords") 581 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 582 | """Get version information from git keywords.""" 583 | if not keywords: 584 | raise NotThisMethod("no keywords at all, weird") 585 | date = keywords.get("date") 586 | if date is not None: 587 | # Use only the last line. Previous lines may contain GPG signature 588 | # information. 589 | date = date.splitlines()[-1] 590 | 591 | # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant 592 | # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 593 | # -like" string, which we must then edit to make compliant), because 594 | # it's been around since git-1.5.3, and it's too difficult to 595 | # discover which version we're using, or to work around using an 596 | # older one. 597 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 598 | refnames = keywords["refnames"].strip() 599 | if refnames.startswith("$Format"): 600 | if verbose: 601 | print("keywords are unexpanded, not using") 602 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 603 | refs = set([r.strip() for r in refnames.strip("()").split(",")]) 604 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 605 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 606 | TAG = "tag: " 607 | tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) 608 | if not tags: 609 | # Either we're using git < 1.8.3, or there really are no tags. We use 610 | # a heuristic: assume all version tags have a digit. The old git %%d 611 | # expansion behaves like git log --decorate=short and strips out the 612 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 613 | # between branches and tags. By ignoring refnames without digits, we 614 | # filter out many common branch names like "release" and 615 | # "stabilization", as well as "HEAD" and "master". 616 | tags = set([r for r in refs if re.search(r'\d', r)]) 617 | if verbose: 618 | print("discarding '%%s', no digits" %% ",".join(refs - tags)) 619 | if verbose: 620 | print("likely tags: %%s" %% ",".join(sorted(tags))) 621 | for ref in sorted(tags): 622 | # sorting will prefer e.g. "2.0" over "2.0rc1" 623 | if ref.startswith(tag_prefix): 624 | r = ref[len(tag_prefix):] 625 | if verbose: 626 | print("picking %%s" %% r) 627 | return {"version": r, 628 | "full-revisionid": keywords["full"].strip(), 629 | "dirty": False, "error": None, 630 | "date": date} 631 | # no suitable tags, so version is "0+unknown", but full hex is still there 632 | if verbose: 633 | print("no suitable tags, using unknown + full revision id") 634 | return {"version": "0+unknown", 635 | "full-revisionid": keywords["full"].strip(), 636 | "dirty": False, "error": "no suitable tags", "date": None} 637 | 638 | 639 | @register_vcs_handler("git", "pieces_from_vcs") 640 | def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): 641 | """Get version from 'git describe' in the root of the source tree. 642 | 643 | This only gets called if the git-archive 'subst' keywords were *not* 644 | expanded, and _version.py hasn't already been rewritten with a short 645 | version string, meaning we're inside a checked out source tree. 646 | """ 647 | GITS = ["git"] 648 | if sys.platform == "win32": 649 | GITS = ["git.cmd", "git.exe"] 650 | 651 | out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, 652 | hide_stderr=True) 653 | if rc != 0: 654 | if verbose: 655 | print("Directory %%s not under git control" %% root) 656 | raise NotThisMethod("'git rev-parse --git-dir' returned error") 657 | 658 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] 659 | # if there isn't one, this yields HEX[-dirty] (no NUM) 660 | describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", 661 | "--always", "--long", 662 | "--match", "%%s*" %% tag_prefix], 663 | cwd=root) 664 | # --long was added in git-1.5.5 665 | if describe_out is None: 666 | raise NotThisMethod("'git describe' failed") 667 | describe_out = describe_out.strip() 668 | full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) 669 | if full_out is None: 670 | raise NotThisMethod("'git rev-parse' failed") 671 | full_out = full_out.strip() 672 | 673 | pieces = {} 674 | pieces["long"] = full_out 675 | pieces["short"] = full_out[:7] # maybe improved later 676 | pieces["error"] = None 677 | 678 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 679 | # TAG might have hyphens. 680 | git_describe = describe_out 681 | 682 | # look for -dirty suffix 683 | dirty = git_describe.endswith("-dirty") 684 | pieces["dirty"] = dirty 685 | if dirty: 686 | git_describe = git_describe[:git_describe.rindex("-dirty")] 687 | 688 | # now we have TAG-NUM-gHEX or HEX 689 | 690 | if "-" in git_describe: 691 | # TAG-NUM-gHEX 692 | mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) 693 | if not mo: 694 | # unparseable. Maybe git-describe is misbehaving? 695 | pieces["error"] = ("unable to parse git-describe output: '%%s'" 696 | %% describe_out) 697 | return pieces 698 | 699 | # tag 700 | full_tag = mo.group(1) 701 | if not full_tag.startswith(tag_prefix): 702 | if verbose: 703 | fmt = "tag '%%s' doesn't start with prefix '%%s'" 704 | print(fmt %% (full_tag, tag_prefix)) 705 | pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" 706 | %% (full_tag, tag_prefix)) 707 | return pieces 708 | pieces["closest-tag"] = full_tag[len(tag_prefix):] 709 | 710 | # distance: number of commits since tag 711 | pieces["distance"] = int(mo.group(2)) 712 | 713 | # commit: short hex revision ID 714 | pieces["short"] = mo.group(3) 715 | 716 | else: 717 | # HEX: no tags 718 | pieces["closest-tag"] = None 719 | count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], 720 | cwd=root) 721 | pieces["distance"] = int(count_out) # total number of commits 722 | 723 | # commit date: see ISO-8601 comment in git_versions_from_keywords() 724 | date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], 725 | cwd=root)[0].strip() 726 | # Use only the last line. Previous lines may contain GPG signature 727 | # information. 728 | date = date.splitlines()[-1] 729 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 730 | 731 | return pieces 732 | 733 | 734 | def plus_or_dot(pieces): 735 | """Return a + if we don't already have one, else return a .""" 736 | if "+" in pieces.get("closest-tag", ""): 737 | return "." 738 | return "+" 739 | 740 | 741 | def render_pep440(pieces): 742 | """Build up version string, with post-release "local version identifier". 743 | 744 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 745 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 746 | 747 | Exceptions: 748 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 749 | """ 750 | if pieces["closest-tag"]: 751 | rendered = pieces["closest-tag"] 752 | if pieces["distance"] or pieces["dirty"]: 753 | rendered += plus_or_dot(pieces) 754 | rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) 755 | if pieces["dirty"]: 756 | rendered += ".dirty" 757 | else: 758 | # exception #1 759 | rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], 760 | pieces["short"]) 761 | if pieces["dirty"]: 762 | rendered += ".dirty" 763 | return rendered 764 | 765 | 766 | def render_pep440_pre(pieces): 767 | """TAG[.post0.devDISTANCE] -- No -dirty. 768 | 769 | Exceptions: 770 | 1: no tags. 0.post0.devDISTANCE 771 | """ 772 | if pieces["closest-tag"]: 773 | rendered = pieces["closest-tag"] 774 | if pieces["distance"]: 775 | rendered += ".post0.dev%%d" %% pieces["distance"] 776 | else: 777 | # exception #1 778 | rendered = "0.post0.dev%%d" %% pieces["distance"] 779 | return rendered 780 | 781 | 782 | def render_pep440_post(pieces): 783 | """TAG[.postDISTANCE[.dev0]+gHEX] . 784 | 785 | The ".dev0" means dirty. Note that .dev0 sorts backwards 786 | (a dirty tree will appear "older" than the corresponding clean one), 787 | but you shouldn't be releasing software with -dirty anyways. 788 | 789 | Exceptions: 790 | 1: no tags. 0.postDISTANCE[.dev0] 791 | """ 792 | if pieces["closest-tag"]: 793 | rendered = pieces["closest-tag"] 794 | if pieces["distance"] or pieces["dirty"]: 795 | rendered += ".post%%d" %% pieces["distance"] 796 | if pieces["dirty"]: 797 | rendered += ".dev0" 798 | rendered += plus_or_dot(pieces) 799 | rendered += "g%%s" %% pieces["short"] 800 | else: 801 | # exception #1 802 | rendered = "0.post%%d" %% pieces["distance"] 803 | if pieces["dirty"]: 804 | rendered += ".dev0" 805 | rendered += "+g%%s" %% pieces["short"] 806 | return rendered 807 | 808 | 809 | def render_pep440_old(pieces): 810 | """TAG[.postDISTANCE[.dev0]] . 811 | 812 | The ".dev0" means dirty. 813 | 814 | Exceptions: 815 | 1: no tags. 0.postDISTANCE[.dev0] 816 | """ 817 | if pieces["closest-tag"]: 818 | rendered = pieces["closest-tag"] 819 | if pieces["distance"] or pieces["dirty"]: 820 | rendered += ".post%%d" %% pieces["distance"] 821 | if pieces["dirty"]: 822 | rendered += ".dev0" 823 | else: 824 | # exception #1 825 | rendered = "0.post%%d" %% pieces["distance"] 826 | if pieces["dirty"]: 827 | rendered += ".dev0" 828 | return rendered 829 | 830 | 831 | def render_git_describe(pieces): 832 | """TAG[-DISTANCE-gHEX][-dirty]. 833 | 834 | Like 'git describe --tags --dirty --always'. 835 | 836 | Exceptions: 837 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 838 | """ 839 | if pieces["closest-tag"]: 840 | rendered = pieces["closest-tag"] 841 | if pieces["distance"]: 842 | rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) 843 | else: 844 | # exception #1 845 | rendered = pieces["short"] 846 | if pieces["dirty"]: 847 | rendered += "-dirty" 848 | return rendered 849 | 850 | 851 | def render_git_describe_long(pieces): 852 | """TAG-DISTANCE-gHEX[-dirty]. 853 | 854 | Like 'git describe --tags --dirty --always -long'. 855 | The distance/hash is unconditional. 856 | 857 | Exceptions: 858 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 859 | """ 860 | if pieces["closest-tag"]: 861 | rendered = pieces["closest-tag"] 862 | rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) 863 | else: 864 | # exception #1 865 | rendered = pieces["short"] 866 | if pieces["dirty"]: 867 | rendered += "-dirty" 868 | return rendered 869 | 870 | 871 | def render(pieces, style): 872 | """Render the given version pieces into the requested style.""" 873 | if pieces["error"]: 874 | return {"version": "unknown", 875 | "full-revisionid": pieces.get("long"), 876 | "dirty": None, 877 | "error": pieces["error"], 878 | "date": None} 879 | 880 | if not style or style == "default": 881 | style = "pep440" # the default 882 | 883 | if style == "pep440": 884 | rendered = render_pep440(pieces) 885 | elif style == "pep440-pre": 886 | rendered = render_pep440_pre(pieces) 887 | elif style == "pep440-post": 888 | rendered = render_pep440_post(pieces) 889 | elif style == "pep440-old": 890 | rendered = render_pep440_old(pieces) 891 | elif style == "git-describe": 892 | rendered = render_git_describe(pieces) 893 | elif style == "git-describe-long": 894 | rendered = render_git_describe_long(pieces) 895 | else: 896 | raise ValueError("unknown style '%%s'" %% style) 897 | 898 | return {"version": rendered, "full-revisionid": pieces["long"], 899 | "dirty": pieces["dirty"], "error": None, 900 | "date": pieces.get("date")} 901 | 902 | 903 | def get_versions(): 904 | """Get version information or return default if unable to do so.""" 905 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have 906 | # __file__, we can work backwards from there to the root. Some 907 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which 908 | # case we can only use expanded keywords. 909 | 910 | cfg = get_config() 911 | verbose = cfg.verbose 912 | 913 | try: 914 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, 915 | verbose) 916 | except NotThisMethod: 917 | pass 918 | 919 | try: 920 | root = os.path.realpath(__file__) 921 | # versionfile_source is the relative path from the top of the source 922 | # tree (where the .git directory might live) to this file. Invert 923 | # this to find the root from __file__. 924 | for i in cfg.versionfile_source.split('/'): 925 | root = os.path.dirname(root) 926 | except NameError: 927 | return {"version": "0+unknown", "full-revisionid": None, 928 | "dirty": None, 929 | "error": "unable to find root of source tree", 930 | "date": None} 931 | 932 | try: 933 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) 934 | return render(pieces, cfg.style) 935 | except NotThisMethod: 936 | pass 937 | 938 | try: 939 | if cfg.parentdir_prefix: 940 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 941 | except NotThisMethod: 942 | pass 943 | 944 | return {"version": "0+unknown", "full-revisionid": None, 945 | "dirty": None, 946 | "error": "unable to compute version", "date": None} 947 | ''' 948 | 949 | 950 | @register_vcs_handler("git", "get_keywords") 951 | def git_get_keywords(versionfile_abs): 952 | """Extract version information from the given file.""" 953 | # the code embedded in _version.py can just fetch the value of these 954 | # keywords. When used from setup.py, we don't want to import _version.py, 955 | # so we do it with a regexp instead. This function is not used from 956 | # _version.py. 957 | keywords = {} 958 | try: 959 | f = open(versionfile_abs, "r") 960 | for line in f.readlines(): 961 | if line.strip().startswith("git_refnames ="): 962 | mo = re.search(r'=\s*"(.*)"', line) 963 | if mo: 964 | keywords["refnames"] = mo.group(1) 965 | if line.strip().startswith("git_full ="): 966 | mo = re.search(r'=\s*"(.*)"', line) 967 | if mo: 968 | keywords["full"] = mo.group(1) 969 | if line.strip().startswith("git_date ="): 970 | mo = re.search(r'=\s*"(.*)"', line) 971 | if mo: 972 | keywords["date"] = mo.group(1) 973 | f.close() 974 | except EnvironmentError: 975 | pass 976 | return keywords 977 | 978 | 979 | @register_vcs_handler("git", "keywords") 980 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 981 | """Get version information from git keywords.""" 982 | if not keywords: 983 | raise NotThisMethod("no keywords at all, weird") 984 | date = keywords.get("date") 985 | if date is not None: 986 | # Use only the last line. Previous lines may contain GPG signature 987 | # information. 988 | date = date.splitlines()[-1] 989 | 990 | # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant 991 | # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 992 | # -like" string, which we must then edit to make compliant), because 993 | # it's been around since git-1.5.3, and it's too difficult to 994 | # discover which version we're using, or to work around using an 995 | # older one. 996 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 997 | refnames = keywords["refnames"].strip() 998 | if refnames.startswith("$Format"): 999 | if verbose: 1000 | print("keywords are unexpanded, not using") 1001 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 1002 | refs = set([r.strip() for r in refnames.strip("()").split(",")]) 1003 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 1004 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 1005 | TAG = "tag: " 1006 | tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) 1007 | if not tags: 1008 | # Either we're using git < 1.8.3, or there really are no tags. We use 1009 | # a heuristic: assume all version tags have a digit. The old git %d 1010 | # expansion behaves like git log --decorate=short and strips out the 1011 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 1012 | # between branches and tags. By ignoring refnames without digits, we 1013 | # filter out many common branch names like "release" and 1014 | # "stabilization", as well as "HEAD" and "master". 1015 | tags = set([r for r in refs if re.search(r"\d", r)]) 1016 | if verbose: 1017 | print("discarding '%s', no digits" % ",".join(refs - tags)) 1018 | if verbose: 1019 | print("likely tags: %s" % ",".join(sorted(tags))) 1020 | for ref in sorted(tags): 1021 | # sorting will prefer e.g. "2.0" over "2.0rc1" 1022 | if ref.startswith(tag_prefix): 1023 | r = ref[len(tag_prefix) :] 1024 | if verbose: 1025 | print("picking %s" % r) 1026 | return { 1027 | "version": r, 1028 | "full-revisionid": keywords["full"].strip(), 1029 | "dirty": False, 1030 | "error": None, 1031 | "date": date, 1032 | } 1033 | # no suitable tags, so version is "0+unknown", but full hex is still there 1034 | if verbose: 1035 | print("no suitable tags, using unknown + full revision id") 1036 | return { 1037 | "version": "0+unknown", 1038 | "full-revisionid": keywords["full"].strip(), 1039 | "dirty": False, 1040 | "error": "no suitable tags", 1041 | "date": None, 1042 | } 1043 | 1044 | 1045 | @register_vcs_handler("git", "pieces_from_vcs") 1046 | def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): 1047 | """Get version from 'git describe' in the root of the source tree. 1048 | 1049 | This only gets called if the git-archive 'subst' keywords were *not* 1050 | expanded, and _version.py hasn't already been rewritten with a short 1051 | version string, meaning we're inside a checked out source tree. 1052 | """ 1053 | GITS = ["git"] 1054 | if sys.platform == "win32": 1055 | GITS = ["git.cmd", "git.exe"] 1056 | 1057 | out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) 1058 | if rc != 0: 1059 | if verbose: 1060 | print("Directory %s not under git control" % root) 1061 | raise NotThisMethod("'git rev-parse --git-dir' returned error") 1062 | 1063 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] 1064 | # if there isn't one, this yields HEX[-dirty] (no NUM) 1065 | describe_out, rc = run_command( 1066 | GITS, 1067 | ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], 1068 | cwd=root, 1069 | ) 1070 | # --long was added in git-1.5.5 1071 | if describe_out is None: 1072 | raise NotThisMethod("'git describe' failed") 1073 | describe_out = describe_out.strip() 1074 | full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) 1075 | if full_out is None: 1076 | raise NotThisMethod("'git rev-parse' failed") 1077 | full_out = full_out.strip() 1078 | 1079 | pieces = {} 1080 | pieces["long"] = full_out 1081 | pieces["short"] = full_out[:7] # maybe improved later 1082 | pieces["error"] = None 1083 | 1084 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 1085 | # TAG might have hyphens. 1086 | git_describe = describe_out 1087 | 1088 | # look for -dirty suffix 1089 | dirty = git_describe.endswith("-dirty") 1090 | pieces["dirty"] = dirty 1091 | if dirty: 1092 | git_describe = git_describe[: git_describe.rindex("-dirty")] 1093 | 1094 | # now we have TAG-NUM-gHEX or HEX 1095 | 1096 | if "-" in git_describe: 1097 | # TAG-NUM-gHEX 1098 | mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) 1099 | if not mo: 1100 | # unparseable. Maybe git-describe is misbehaving? 1101 | pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out 1102 | return pieces 1103 | 1104 | # tag 1105 | full_tag = mo.group(1) 1106 | if not full_tag.startswith(tag_prefix): 1107 | if verbose: 1108 | fmt = "tag '%s' doesn't start with prefix '%s'" 1109 | print(fmt % (full_tag, tag_prefix)) 1110 | pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix) 1111 | return pieces 1112 | pieces["closest-tag"] = full_tag[len(tag_prefix) :] 1113 | 1114 | # distance: number of commits since tag 1115 | pieces["distance"] = int(mo.group(2)) 1116 | 1117 | # commit: short hex revision ID 1118 | pieces["short"] = mo.group(3) 1119 | 1120 | else: 1121 | # HEX: no tags 1122 | pieces["closest-tag"] = None 1123 | count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) 1124 | pieces["distance"] = int(count_out) # total number of commits 1125 | 1126 | # commit date: see ISO-8601 comment in git_versions_from_keywords() 1127 | date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() 1128 | # Use only the last line. Previous lines may contain GPG signature 1129 | # information. 1130 | date = date.splitlines()[-1] 1131 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 1132 | 1133 | return pieces 1134 | 1135 | 1136 | def do_vcs_install(manifest_in, versionfile_source, ipy): 1137 | """Git-specific installation logic for Versioneer. 1138 | 1139 | For Git, this means creating/changing .gitattributes to mark _version.py 1140 | for export-subst keyword substitution. 1141 | """ 1142 | GITS = ["git"] 1143 | if sys.platform == "win32": 1144 | GITS = ["git.cmd", "git.exe"] 1145 | files = [manifest_in, versionfile_source] 1146 | if ipy: 1147 | files.append(ipy) 1148 | try: 1149 | me = __file__ 1150 | if me.endswith(".pyc") or me.endswith(".pyo"): 1151 | me = os.path.splitext(me)[0] + ".py" 1152 | versioneer_file = os.path.relpath(me) 1153 | except NameError: 1154 | versioneer_file = "versioneer.py" 1155 | files.append(versioneer_file) 1156 | present = False 1157 | try: 1158 | f = open(".gitattributes", "r") 1159 | for line in f.readlines(): 1160 | if line.strip().startswith(versionfile_source): 1161 | if "export-subst" in line.strip().split()[1:]: 1162 | present = True 1163 | f.close() 1164 | except EnvironmentError: 1165 | pass 1166 | if not present: 1167 | f = open(".gitattributes", "a+") 1168 | f.write("%s export-subst\n" % versionfile_source) 1169 | f.close() 1170 | files.append(".gitattributes") 1171 | run_command(GITS, ["add", "--"] + files) 1172 | 1173 | 1174 | def versions_from_parentdir(parentdir_prefix, root, verbose): 1175 | """Try to determine the version from the parent directory name. 1176 | 1177 | Source tarballs conventionally unpack into a directory that includes both 1178 | the project name and a version string. We will also support searching up 1179 | two directory levels for an appropriately named parent directory 1180 | """ 1181 | rootdirs = [] 1182 | 1183 | for i in range(3): 1184 | dirname = os.path.basename(root) 1185 | if dirname.startswith(parentdir_prefix): 1186 | return { 1187 | "version": dirname[len(parentdir_prefix) :], 1188 | "full-revisionid": None, 1189 | "dirty": False, 1190 | "error": None, 1191 | "date": None, 1192 | } 1193 | else: 1194 | rootdirs.append(root) 1195 | root = os.path.dirname(root) # up a level 1196 | 1197 | if verbose: 1198 | print( 1199 | "Tried directories %s but none started with prefix %s" 1200 | % (str(rootdirs), parentdir_prefix) 1201 | ) 1202 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 1203 | 1204 | 1205 | SHORT_VERSION_PY = """ 1206 | # This file was generated by 'versioneer.py' (0.19) from 1207 | # revision-control system data, or from the parent directory name of an 1208 | # unpacked source archive. Distribution tarballs contain a pre-generated copy 1209 | # of this file. 1210 | 1211 | import json 1212 | 1213 | version_json = ''' 1214 | %s 1215 | ''' # END VERSION_JSON 1216 | 1217 | 1218 | def get_versions(): 1219 | return json.loads(version_json) 1220 | """ 1221 | 1222 | 1223 | def versions_from_file(filename): 1224 | """Try to determine the version from _version.py if present.""" 1225 | try: 1226 | with open(filename) as f: 1227 | contents = f.read() 1228 | except EnvironmentError: 1229 | raise NotThisMethod("unable to read _version.py") 1230 | mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) 1231 | if not mo: 1232 | mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) 1233 | if not mo: 1234 | raise NotThisMethod("no version_json in _version.py") 1235 | return json.loads(mo.group(1)) 1236 | 1237 | 1238 | def write_to_version_file(filename, versions): 1239 | """Write the given version number to the given _version.py file.""" 1240 | os.unlink(filename) 1241 | contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) 1242 | with open(filename, "w") as f: 1243 | f.write(SHORT_VERSION_PY % contents) 1244 | 1245 | print("set %s to '%s'" % (filename, versions["version"])) 1246 | 1247 | 1248 | def plus_or_dot(pieces): 1249 | """Return a + if we don't already have one, else return a .""" 1250 | if "+" in pieces.get("closest-tag", ""): 1251 | return "." 1252 | return "+" 1253 | 1254 | 1255 | def render_pep440(pieces): 1256 | """Build up version string, with post-release "local version identifier". 1257 | 1258 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 1259 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 1260 | 1261 | Exceptions: 1262 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 1263 | """ 1264 | if pieces["closest-tag"]: 1265 | rendered = pieces["closest-tag"] 1266 | if pieces["distance"] or pieces["dirty"]: 1267 | rendered += plus_or_dot(pieces) 1268 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 1269 | if pieces["dirty"]: 1270 | rendered += ".dirty" 1271 | else: 1272 | # exception #1 1273 | rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) 1274 | if pieces["dirty"]: 1275 | rendered += ".dirty" 1276 | return rendered 1277 | 1278 | 1279 | def render_pep440_pre(pieces): 1280 | """TAG[.post0.devDISTANCE] -- No -dirty. 1281 | 1282 | Exceptions: 1283 | 1: no tags. 0.post0.devDISTANCE 1284 | """ 1285 | if pieces["closest-tag"]: 1286 | rendered = pieces["closest-tag"] 1287 | if pieces["distance"]: 1288 | rendered += ".post0.dev%d" % pieces["distance"] 1289 | else: 1290 | # exception #1 1291 | rendered = "0.post0.dev%d" % pieces["distance"] 1292 | return rendered 1293 | 1294 | 1295 | def render_pep440_post(pieces): 1296 | """TAG[.postDISTANCE[.dev0]+gHEX] . 1297 | 1298 | The ".dev0" means dirty. Note that .dev0 sorts backwards 1299 | (a dirty tree will appear "older" than the corresponding clean one), 1300 | but you shouldn't be releasing software with -dirty anyways. 1301 | 1302 | Exceptions: 1303 | 1: no tags. 0.postDISTANCE[.dev0] 1304 | """ 1305 | if pieces["closest-tag"]: 1306 | rendered = pieces["closest-tag"] 1307 | if pieces["distance"] or pieces["dirty"]: 1308 | rendered += ".post%d" % pieces["distance"] 1309 | if pieces["dirty"]: 1310 | rendered += ".dev0" 1311 | rendered += plus_or_dot(pieces) 1312 | rendered += "g%s" % pieces["short"] 1313 | else: 1314 | # exception #1 1315 | rendered = "0.post%d" % pieces["distance"] 1316 | if pieces["dirty"]: 1317 | rendered += ".dev0" 1318 | rendered += "+g%s" % pieces["short"] 1319 | return rendered 1320 | 1321 | 1322 | def render_pep440_old(pieces): 1323 | """TAG[.postDISTANCE[.dev0]] . 1324 | 1325 | The ".dev0" means dirty. 1326 | 1327 | Exceptions: 1328 | 1: no tags. 0.postDISTANCE[.dev0] 1329 | """ 1330 | if pieces["closest-tag"]: 1331 | rendered = pieces["closest-tag"] 1332 | if pieces["distance"] or pieces["dirty"]: 1333 | rendered += ".post%d" % pieces["distance"] 1334 | if pieces["dirty"]: 1335 | rendered += ".dev0" 1336 | else: 1337 | # exception #1 1338 | rendered = "0.post%d" % pieces["distance"] 1339 | if pieces["dirty"]: 1340 | rendered += ".dev0" 1341 | return rendered 1342 | 1343 | 1344 | def render_git_describe(pieces): 1345 | """TAG[-DISTANCE-gHEX][-dirty]. 1346 | 1347 | Like 'git describe --tags --dirty --always'. 1348 | 1349 | Exceptions: 1350 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 1351 | """ 1352 | if pieces["closest-tag"]: 1353 | rendered = pieces["closest-tag"] 1354 | if pieces["distance"]: 1355 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 1356 | else: 1357 | # exception #1 1358 | rendered = pieces["short"] 1359 | if pieces["dirty"]: 1360 | rendered += "-dirty" 1361 | return rendered 1362 | 1363 | 1364 | def render_git_describe_long(pieces): 1365 | """TAG-DISTANCE-gHEX[-dirty]. 1366 | 1367 | Like 'git describe --tags --dirty --always -long'. 1368 | The distance/hash is unconditional. 1369 | 1370 | Exceptions: 1371 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 1372 | """ 1373 | if pieces["closest-tag"]: 1374 | rendered = pieces["closest-tag"] 1375 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 1376 | else: 1377 | # exception #1 1378 | rendered = pieces["short"] 1379 | if pieces["dirty"]: 1380 | rendered += "-dirty" 1381 | return rendered 1382 | 1383 | 1384 | def render(pieces, style): 1385 | """Render the given version pieces into the requested style.""" 1386 | if pieces["error"]: 1387 | return { 1388 | "version": "unknown", 1389 | "full-revisionid": pieces.get("long"), 1390 | "dirty": None, 1391 | "error": pieces["error"], 1392 | "date": None, 1393 | } 1394 | 1395 | if not style or style == "default": 1396 | style = "pep440" # the default 1397 | 1398 | if style == "pep440": 1399 | rendered = render_pep440(pieces) 1400 | elif style == "pep440-pre": 1401 | rendered = render_pep440_pre(pieces) 1402 | elif style == "pep440-post": 1403 | rendered = render_pep440_post(pieces) 1404 | elif style == "pep440-old": 1405 | rendered = render_pep440_old(pieces) 1406 | elif style == "git-describe": 1407 | rendered = render_git_describe(pieces) 1408 | elif style == "git-describe-long": 1409 | rendered = render_git_describe_long(pieces) 1410 | else: 1411 | raise ValueError("unknown style '%s'" % style) 1412 | 1413 | return { 1414 | "version": rendered, 1415 | "full-revisionid": pieces["long"], 1416 | "dirty": pieces["dirty"], 1417 | "error": None, 1418 | "date": pieces.get("date"), 1419 | } 1420 | 1421 | 1422 | class VersioneerBadRootError(Exception): 1423 | """The project root directory is unknown or missing key files.""" 1424 | 1425 | 1426 | def get_versions(verbose=False): 1427 | """Get the project version from whatever source is available. 1428 | 1429 | Returns dict with two keys: 'version' and 'full'. 1430 | """ 1431 | if "versioneer" in sys.modules: 1432 | # see the discussion in cmdclass.py:get_cmdclass() 1433 | del sys.modules["versioneer"] 1434 | 1435 | root = get_root() 1436 | cfg = get_config_from_root(root) 1437 | 1438 | assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" 1439 | handlers = HANDLERS.get(cfg.VCS) 1440 | assert handlers, "unrecognized VCS '%s'" % cfg.VCS 1441 | verbose = verbose or cfg.verbose 1442 | assert cfg.versionfile_source is not None, "please set versioneer.versionfile_source" 1443 | assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" 1444 | 1445 | versionfile_abs = os.path.join(root, cfg.versionfile_source) 1446 | 1447 | # extract version from first of: _version.py, VCS command (e.g. 'git 1448 | # describe'), parentdir. This is meant to work for developers using a 1449 | # source checkout, for users of a tarball created by 'setup.py sdist', 1450 | # and for users of a tarball/zipball created by 'git archive' or github's 1451 | # download-from-tag feature or the equivalent in other VCSes. 1452 | 1453 | get_keywords_f = handlers.get("get_keywords") 1454 | from_keywords_f = handlers.get("keywords") 1455 | if get_keywords_f and from_keywords_f: 1456 | try: 1457 | keywords = get_keywords_f(versionfile_abs) 1458 | ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) 1459 | if verbose: 1460 | print("got version from expanded keyword %s" % ver) 1461 | return ver 1462 | except NotThisMethod: 1463 | pass 1464 | 1465 | try: 1466 | ver = versions_from_file(versionfile_abs) 1467 | if verbose: 1468 | print("got version from file %s %s" % (versionfile_abs, ver)) 1469 | return ver 1470 | except NotThisMethod: 1471 | pass 1472 | 1473 | from_vcs_f = handlers.get("pieces_from_vcs") 1474 | if from_vcs_f: 1475 | try: 1476 | pieces = from_vcs_f(cfg.tag_prefix, root, verbose) 1477 | ver = render(pieces, cfg.style) 1478 | if verbose: 1479 | print("got version from VCS %s" % ver) 1480 | return ver 1481 | except NotThisMethod: 1482 | pass 1483 | 1484 | try: 1485 | if cfg.parentdir_prefix: 1486 | ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 1487 | if verbose: 1488 | print("got version from parentdir %s" % ver) 1489 | return ver 1490 | except NotThisMethod: 1491 | pass 1492 | 1493 | if verbose: 1494 | print("unable to compute version") 1495 | 1496 | return { 1497 | "version": "0+unknown", 1498 | "full-revisionid": None, 1499 | "dirty": None, 1500 | "error": "unable to compute version", 1501 | "date": None, 1502 | } 1503 | 1504 | 1505 | def get_version(): 1506 | """Get the short version string for this project.""" 1507 | return get_versions()["version"] 1508 | 1509 | 1510 | def get_cmdclass(cmdclass=None): 1511 | """Get the custom setuptools/distutils subclasses used by Versioneer. 1512 | 1513 | If the package uses a different cmdclass (e.g. one from numpy), it 1514 | should be provide as an argument. 1515 | """ 1516 | if "versioneer" in sys.modules: 1517 | del sys.modules["versioneer"] 1518 | # this fixes the "python setup.py develop" case (also 'install' and 1519 | # 'easy_install .'), in which subdependencies of the main project are 1520 | # built (using setup.py bdist_egg) in the same python process. Assume 1521 | # a main project A and a dependency B, which use different versions 1522 | # of Versioneer. A's setup.py imports A's Versioneer, leaving it in 1523 | # sys.modules by the time B's setup.py is executed, causing B to run 1524 | # with the wrong versioneer. Setuptools wraps the sub-dep builds in a 1525 | # sandbox that restores sys.modules to it's pre-build state, so the 1526 | # parent is protected against the child's "import versioneer". By 1527 | # removing ourselves from sys.modules here, before the child build 1528 | # happens, we protect the child from the parent's versioneer too. 1529 | # Also see https://github.com/python-versioneer/python-versioneer/issues/52 1530 | 1531 | cmds = {} if cmdclass is None else cmdclass.copy() 1532 | 1533 | # we add "version" to both distutils and setuptools 1534 | from distutils.core import Command 1535 | 1536 | class cmd_version(Command): 1537 | description = "report generated version string" 1538 | user_options = [] 1539 | boolean_options = [] 1540 | 1541 | def initialize_options(self): 1542 | pass 1543 | 1544 | def finalize_options(self): 1545 | pass 1546 | 1547 | def run(self): 1548 | vers = get_versions(verbose=True) 1549 | print("Version: %s" % vers["version"]) 1550 | print(" full-revisionid: %s" % vers.get("full-revisionid")) 1551 | print(" dirty: %s" % vers.get("dirty")) 1552 | print(" date: %s" % vers.get("date")) 1553 | if vers["error"]: 1554 | print(" error: %s" % vers["error"]) 1555 | 1556 | cmds["version"] = cmd_version 1557 | 1558 | # we override "build_py" in both distutils and setuptools 1559 | # 1560 | # most invocation pathways end up running build_py: 1561 | # distutils/build -> build_py 1562 | # distutils/install -> distutils/build ->.. 1563 | # setuptools/bdist_wheel -> distutils/install ->.. 1564 | # setuptools/bdist_egg -> distutils/install_lib -> build_py 1565 | # setuptools/install -> bdist_egg ->.. 1566 | # setuptools/develop -> ? 1567 | # pip install: 1568 | # copies source tree to a tempdir before running egg_info/etc 1569 | # if .git isn't copied too, 'git describe' will fail 1570 | # then does setup.py bdist_wheel, or sometimes setup.py install 1571 | # setup.py egg_info -> ? 1572 | 1573 | # we override different "build_py" commands for both environments 1574 | if "build_py" in cmds: 1575 | _build_py = cmds["build_py"] 1576 | elif "setuptools" in sys.modules: 1577 | from setuptools.command.build_py import build_py as _build_py 1578 | else: 1579 | from distutils.command.build_py import build_py as _build_py 1580 | 1581 | class cmd_build_py(_build_py): 1582 | def run(self): 1583 | root = get_root() 1584 | cfg = get_config_from_root(root) 1585 | versions = get_versions() 1586 | _build_py.run(self) 1587 | # now locate _version.py in the new build/ directory and replace 1588 | # it with an updated value 1589 | if cfg.versionfile_build: 1590 | target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) 1591 | print("UPDATING %s" % target_versionfile) 1592 | write_to_version_file(target_versionfile, versions) 1593 | 1594 | cmds["build_py"] = cmd_build_py 1595 | 1596 | if "setuptools" in sys.modules: 1597 | from setuptools.command.build_ext import build_ext as _build_ext 1598 | else: 1599 | from distutils.command.build_ext import build_ext as _build_ext 1600 | 1601 | class cmd_build_ext(_build_ext): 1602 | def run(self): 1603 | root = get_root() 1604 | cfg = get_config_from_root(root) 1605 | versions = get_versions() 1606 | _build_ext.run(self) 1607 | if self.inplace: 1608 | # build_ext --inplace will only build extensions in 1609 | # build/lib<..> dir with no _version.py to write to. 1610 | # As in place builds will already have a _version.py 1611 | # in the module dir, we do not need to write one. 1612 | return 1613 | # now locate _version.py in the new build/ directory and replace 1614 | # it with an updated value 1615 | target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source) 1616 | print("UPDATING %s" % target_versionfile) 1617 | write_to_version_file(target_versionfile, versions) 1618 | 1619 | cmds["build_ext"] = cmd_build_ext 1620 | 1621 | if "cx_Freeze" in sys.modules: # cx_freeze enabled? 1622 | from cx_Freeze.dist import build_exe as _build_exe 1623 | 1624 | # nczeczulin reports that py2exe won't like the pep440-style string 1625 | # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. 1626 | # setup(console=[{ 1627 | # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION 1628 | # "product_version": versioneer.get_version(), 1629 | # ... 1630 | 1631 | class cmd_build_exe(_build_exe): 1632 | def run(self): 1633 | root = get_root() 1634 | cfg = get_config_from_root(root) 1635 | versions = get_versions() 1636 | target_versionfile = cfg.versionfile_source 1637 | print("UPDATING %s" % target_versionfile) 1638 | write_to_version_file(target_versionfile, versions) 1639 | 1640 | _build_exe.run(self) 1641 | os.unlink(target_versionfile) 1642 | with open(cfg.versionfile_source, "w") as f: 1643 | LONG = LONG_VERSION_PY[cfg.VCS] 1644 | f.write( 1645 | LONG 1646 | % { 1647 | "DOLLAR": "$", 1648 | "STYLE": cfg.style, 1649 | "TAG_PREFIX": cfg.tag_prefix, 1650 | "PARENTDIR_PREFIX": cfg.parentdir_prefix, 1651 | "VERSIONFILE_SOURCE": cfg.versionfile_source, 1652 | } 1653 | ) 1654 | 1655 | cmds["build_exe"] = cmd_build_exe 1656 | del cmds["build_py"] 1657 | 1658 | if "py2exe" in sys.modules: # py2exe enabled? 1659 | from py2exe.distutils_buildexe import py2exe as _py2exe 1660 | 1661 | class cmd_py2exe(_py2exe): 1662 | def run(self): 1663 | root = get_root() 1664 | cfg = get_config_from_root(root) 1665 | versions = get_versions() 1666 | target_versionfile = cfg.versionfile_source 1667 | print("UPDATING %s" % target_versionfile) 1668 | write_to_version_file(target_versionfile, versions) 1669 | 1670 | _py2exe.run(self) 1671 | os.unlink(target_versionfile) 1672 | with open(cfg.versionfile_source, "w") as f: 1673 | LONG = LONG_VERSION_PY[cfg.VCS] 1674 | f.write( 1675 | LONG 1676 | % { 1677 | "DOLLAR": "$", 1678 | "STYLE": cfg.style, 1679 | "TAG_PREFIX": cfg.tag_prefix, 1680 | "PARENTDIR_PREFIX": cfg.parentdir_prefix, 1681 | "VERSIONFILE_SOURCE": cfg.versionfile_source, 1682 | } 1683 | ) 1684 | 1685 | cmds["py2exe"] = cmd_py2exe 1686 | 1687 | # we override different "sdist" commands for both environments 1688 | if "sdist" in cmds: 1689 | _sdist = cmds["sdist"] 1690 | elif "setuptools" in sys.modules: 1691 | from setuptools.command.sdist import sdist as _sdist 1692 | else: 1693 | from distutils.command.sdist import sdist as _sdist 1694 | 1695 | class cmd_sdist(_sdist): 1696 | def run(self): 1697 | versions = get_versions() 1698 | self._versioneer_generated_versions = versions 1699 | # unless we update this, the command will keep using the old 1700 | # version 1701 | self.distribution.metadata.version = versions["version"] 1702 | return _sdist.run(self) 1703 | 1704 | def make_release_tree(self, base_dir, files): 1705 | root = get_root() 1706 | cfg = get_config_from_root(root) 1707 | _sdist.make_release_tree(self, base_dir, files) 1708 | # now locate _version.py in the new base_dir directory 1709 | # (remembering that it may be a hardlink) and replace it with an 1710 | # updated value 1711 | target_versionfile = os.path.join(base_dir, cfg.versionfile_source) 1712 | print("UPDATING %s" % target_versionfile) 1713 | write_to_version_file(target_versionfile, self._versioneer_generated_versions) 1714 | 1715 | cmds["sdist"] = cmd_sdist 1716 | 1717 | return cmds 1718 | 1719 | 1720 | CONFIG_ERROR = """ 1721 | setup.cfg is missing the necessary Versioneer configuration. You need 1722 | a section like: 1723 | 1724 | [versioneer] 1725 | VCS = git 1726 | style = pep440 1727 | versionfile_source = src/myproject/_version.py 1728 | versionfile_build = myproject/_version.py 1729 | tag_prefix = 1730 | parentdir_prefix = myproject- 1731 | 1732 | You will also need to edit your setup.py to use the results: 1733 | 1734 | import versioneer 1735 | setup(version=versioneer.get_version(), 1736 | cmdclass=versioneer.get_cmdclass(), ...) 1737 | 1738 | Please read the docstring in ./versioneer.py for configuration instructions, 1739 | edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. 1740 | """ 1741 | 1742 | SAMPLE_CONFIG = """ 1743 | # See the docstring in versioneer.py for instructions. Note that you must 1744 | # re-run 'versioneer.py setup' after changing this section, and commit the 1745 | # resulting files. 1746 | 1747 | [versioneer] 1748 | #VCS = git 1749 | #style = pep440 1750 | #versionfile_source = 1751 | #versionfile_build = 1752 | #tag_prefix = 1753 | #parentdir_prefix = 1754 | 1755 | """ 1756 | 1757 | INIT_PY_SNIPPET = """ 1758 | from ._version import get_versions 1759 | __version__ = get_versions()['version'] 1760 | del get_versions 1761 | """ 1762 | 1763 | 1764 | def do_setup(): 1765 | """Do main VCS-independent setup function for installing Versioneer.""" 1766 | root = get_root() 1767 | try: 1768 | cfg = get_config_from_root(root) 1769 | except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: 1770 | if isinstance(e, (EnvironmentError, configparser.NoSectionError)): 1771 | print("Adding sample versioneer config to setup.cfg", file=sys.stderr) 1772 | with open(os.path.join(root, "setup.cfg"), "a") as f: 1773 | f.write(SAMPLE_CONFIG) 1774 | print(CONFIG_ERROR, file=sys.stderr) 1775 | return 1 1776 | 1777 | print(" creating %s" % cfg.versionfile_source) 1778 | with open(cfg.versionfile_source, "w") as f: 1779 | LONG = LONG_VERSION_PY[cfg.VCS] 1780 | f.write( 1781 | LONG 1782 | % { 1783 | "DOLLAR": "$", 1784 | "STYLE": cfg.style, 1785 | "TAG_PREFIX": cfg.tag_prefix, 1786 | "PARENTDIR_PREFIX": cfg.parentdir_prefix, 1787 | "VERSIONFILE_SOURCE": cfg.versionfile_source, 1788 | } 1789 | ) 1790 | 1791 | ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") 1792 | if os.path.exists(ipy): 1793 | try: 1794 | with open(ipy, "r") as f: 1795 | old = f.read() 1796 | except EnvironmentError: 1797 | old = "" 1798 | if INIT_PY_SNIPPET not in old: 1799 | print(" appending to %s" % ipy) 1800 | with open(ipy, "a") as f: 1801 | f.write(INIT_PY_SNIPPET) 1802 | else: 1803 | print(" %s unmodified" % ipy) 1804 | else: 1805 | print(" %s doesn't exist, ok" % ipy) 1806 | ipy = None 1807 | 1808 | # Make sure both the top-level "versioneer.py" and versionfile_source 1809 | # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so 1810 | # they'll be copied into source distributions. Pip won't be able to 1811 | # install the package without this. 1812 | manifest_in = os.path.join(root, "MANIFEST.in") 1813 | simple_includes = set() 1814 | try: 1815 | with open(manifest_in, "r") as f: 1816 | for line in f: 1817 | if line.startswith("include "): 1818 | for include in line.split()[1:]: 1819 | simple_includes.add(include) 1820 | except EnvironmentError: 1821 | pass 1822 | # That doesn't cover everything MANIFEST.in can do 1823 | # (http://docs.python.org/2/distutils/sourcedist.html#commands), so 1824 | # it might give some false negatives. Appending redundant 'include' 1825 | # lines is safe, though. 1826 | if "versioneer.py" not in simple_includes: 1827 | print(" appending 'versioneer.py' to MANIFEST.in") 1828 | with open(manifest_in, "a") as f: 1829 | f.write("include versioneer.py\n") 1830 | else: 1831 | print(" 'versioneer.py' already in MANIFEST.in") 1832 | if cfg.versionfile_source not in simple_includes: 1833 | print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) 1834 | with open(manifest_in, "a") as f: 1835 | f.write("include %s\n" % cfg.versionfile_source) 1836 | else: 1837 | print(" versionfile_source already in MANIFEST.in") 1838 | 1839 | # Make VCS-specific changes. For git, this means creating/changing 1840 | # .gitattributes to mark _version.py for export-subst keyword 1841 | # substitution. 1842 | do_vcs_install(manifest_in, cfg.versionfile_source, ipy) 1843 | return 0 1844 | 1845 | 1846 | def scan_setup_py(): 1847 | """Validate the contents of setup.py against Versioneer's expectations.""" 1848 | found = set() 1849 | setters = False 1850 | errors = 0 1851 | with open("setup.py", "r") as f: 1852 | for line in f.readlines(): 1853 | if "import versioneer" in line: 1854 | found.add("import") 1855 | if "versioneer.get_cmdclass()" in line: 1856 | found.add("cmdclass") 1857 | if "versioneer.get_version()" in line: 1858 | found.add("get_version") 1859 | if "versioneer.VCS" in line: 1860 | setters = True 1861 | if "versioneer.versionfile_source" in line: 1862 | setters = True 1863 | if len(found) != 3: 1864 | print("") 1865 | print("Your setup.py appears to be missing some important items") 1866 | print("(but I might be wrong). Please make sure it has something") 1867 | print("roughly like the following:") 1868 | print("") 1869 | print(" import versioneer") 1870 | print(" setup( version=versioneer.get_version(),") 1871 | print(" cmdclass=versioneer.get_cmdclass(), ...)") 1872 | print("") 1873 | errors += 1 1874 | if setters: 1875 | print("You should remove lines like 'versioneer.VCS = ' and") 1876 | print("'versioneer.versionfile_source = ' . This configuration") 1877 | print("now lives in setup.cfg, and should be removed from setup.py") 1878 | print("") 1879 | errors += 1 1880 | return errors 1881 | 1882 | 1883 | if __name__ == "__main__": 1884 | cmd = sys.argv[1] 1885 | if cmd == "setup": 1886 | errors = do_setup() 1887 | errors += scan_setup_py() 1888 | if errors: 1889 | sys.exit(1) 1890 | --------------------------------------------------------------------------------