├── .cruft.json ├── .gitattributes ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md ├── codeql-config.yml ├── dependabot.yml └── workflows │ ├── add-to-project.yml │ ├── codeql-analysis.yml │ ├── publish-docs.yml │ ├── release.yml │ ├── static_analysis.yml │ ├── template-sync.yml │ ├── tests.yml │ └── windows_tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── LICENSE ├── MAINTAINERS.md ├── MANIFEST.in ├── README.md ├── docs ├── commands.md ├── gen_blocks_catalog.py ├── gen_examples_catalog.py ├── gen_home_page.py ├── img │ ├── favicon.ico │ └── prefect-logo-mark-solid-white-500.png └── stylesheets │ └── extra.css ├── mkdocs.yml ├── prefect_shell ├── __init__.py ├── _version.py └── commands.py ├── requirements-dev.txt ├── requirements.txt ├── setup.cfg ├── setup.py ├── tests ├── __init__.py ├── conftest.py ├── test_block_standards.py ├── test_commands.py └── test_commands_windows.py └── versioneer.py /.cruft.json: -------------------------------------------------------------------------------- 1 | { 2 | "template": "https://github.com/PrefectHQ/prefect-collection-template", 3 | "commit": "67c8f4005588e20b746b44fc2ae2f58b8923d194", 4 | "context": { 5 | "cookiecutter": { 6 | "full_name": "Prefect Technologies, Inc.", 7 | "email": "help@prefect.io", 8 | "github_organization": "PrefectHQ", 9 | "collection_name": "prefect-shell", 10 | "collection_slug": "prefect_shell", 11 | "collection_short_description": "Prefect tasks and subflows for interacting with shell commands.", 12 | "_copy_without_render": [ 13 | ".github/workflows/*.yml" 14 | ], 15 | "_template": "https://github.com/PrefectHQ/prefect-collection-template" 16 | } 17 | }, 18 | "directory": null, 19 | "checkout": null 20 | } 21 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | prefect_shell/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @PrefectHQ/open-source 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Expectation / Proposal 4 | 5 | # Traceback / Example 6 | 7 | - [ ] I would like to [help contribute](https://PrefectHQ.github.io/prefect-shell/#contributing) a pull request to resolve this! 8 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Closes 6 | 7 | ### Example 8 | 9 | 10 | ### Screenshots 11 | 17 | 18 | ### Checklist 19 | 20 | 21 | - [ ] References any related issue by including "Closes #" or "Closes ". 22 | - If no issue exists and your change is not a small fix, please [create an issue](https://github.com/PrefectHQ/prefect-shell/issues/new/choose) first. 23 | - [ ] Includes tests or only affects documentation. 24 | - [ ] Passes `pre-commit` checks. 25 | - Run `pre-commit install && pre-commit run --all` locally for formatting and linting. 26 | - [ ] Includes screenshots of documentation updates. 27 | - Run `mkdocs serve` view documentation locally. 28 | - [ ] Summarizes PR's changes in [CHANGELOG.md](https://github.com/PrefectHQ/prefect-shell/blob/main/CHANGELOG.md) 29 | -------------------------------------------------------------------------------- /.github/codeql-config.yml: -------------------------------------------------------------------------------- 1 | paths-ignore: 2 | - tests/**/test_*.py 3 | - versioneer.py 4 | - prefect_shell/_version.py -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | 4 | - package-ecosystem: "pip" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | 9 | - package-ecosystem: "github-actions" 10 | directory: "/" 11 | schedule: 12 | interval: "daily" -------------------------------------------------------------------------------- /.github/workflows/add-to-project.yml: -------------------------------------------------------------------------------- 1 | name: Add issues to integrations board 2 | 3 | on: 4 | issues: 5 | types: 6 | - opened 7 | 8 | jobs: 9 | 10 | add-to-project: 11 | name: Add issue to project 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: tibdex/github-app-token@v1 15 | id: generate-token 16 | name: Generate GitHub token 17 | with: 18 | app_id: ${{ secrets.SYNC_APP_ID }} 19 | private_key: ${{ secrets.SYNC_APP_PRIVATE_KEY }} 20 | 21 | - uses: actions/add-to-project@v0.4.0 22 | with: 23 | project-url: ${{ secrets.ADD_TO_PROJECT_URL }} 24 | github-token: ${{ steps.generate-token.outputs.token }} 25 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: CodeQL 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | analyze: 10 | name: Analyze 11 | runs-on: ubuntu-latest 12 | permissions: 13 | actions: read 14 | contents: read 15 | security-events: write 16 | 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | language: 21 | - python 22 | 23 | steps: 24 | - name: Checkout repository 25 | uses: actions/checkout@v4 26 | 27 | - name: Initialize CodeQL 28 | uses: github/codeql-action/init@v2 29 | with: 30 | languages: ${{ matrix.language }} 31 | config-file: ./.github/codeql-config.yml 32 | queries: security-and-quality 33 | 34 | - name: Perform CodeQL Analysis 35 | uses: github/codeql-action/analyze@v2 36 | -------------------------------------------------------------------------------- /.github/workflows/publish-docs.yml: -------------------------------------------------------------------------------- 1 | name: Publish docs 2 | 3 | on: 4 | workflow_dispatch 5 | 6 | jobs: 7 | build-and-publish-docs: 8 | name: Build and publish docs 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v4 13 | 14 | - name: Set up Python 3.10 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: "3.10" 18 | cache: pip 19 | cache-dependency-path: requirements*.txt 20 | 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | python -m pip install --upgrade --upgrade-strategy eager -e ".[dev]" 25 | mkdocs build 26 | 27 | - name: Publish docs 28 | uses: JamesIves/github-pages-deploy-action@v4.4.1 29 | with: 30 | branch: docs 31 | folder: site 32 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Build & Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | 8 | jobs: 9 | build-release: 10 | name: Build Release 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | 15 | - name: Set up Python 16 | uses: actions/setup-python@v4 17 | with: 18 | python-version: 3.8 19 | 20 | - name: Install packages 21 | run: | 22 | python -m pip install --upgrade pip build 23 | python -m pip install --upgrade --upgrade-strategy eager -e .[dev] 24 | 25 | - name: Build a binary wheel and a source tarball 26 | run: | 27 | python -m build --sdist --wheel --outdir dist/ 28 | 29 | - name: Publish build artifacts 30 | uses: actions/upload-artifact@v3 31 | with: 32 | name: built-package 33 | path: "./dist" 34 | 35 | publish-release: 36 | name: Publish release to PyPI 37 | needs: [build-release] 38 | environment: "prod" 39 | runs-on: ubuntu-latest 40 | 41 | steps: 42 | - name: Download build artifacts 43 | uses: actions/download-artifact@v3 44 | with: 45 | name: built-package 46 | path: "./dist" 47 | 48 | - name: Publish distribution to PyPI 49 | uses: pypa/gh-action-pypi-publish@release/v1 50 | with: 51 | password: ${{ secrets.PYPI_API_TOKEN }} 52 | verbose: true 53 | 54 | build-and-publish-docs: 55 | name: Build and publish docs 56 | needs: [build-release, publish-release] 57 | runs-on: ubuntu-latest 58 | 59 | steps: 60 | - uses: actions/checkout@v4 61 | 62 | - name: Set up Python 3.10 63 | uses: actions/setup-python@v4 64 | with: 65 | python-version: "3.10" 66 | cache: pip 67 | cache-dependency-path: requirements*.txt 68 | 69 | - name: Build docs 70 | run: | 71 | python -m pip install --upgrade pip 72 | python -m pip install --upgrade --upgrade-strategy eager -e .[dev] 73 | mkdocs build 74 | 75 | - name: Publish docs 76 | uses: JamesIves/github-pages-deploy-action@v4.4.1 77 | with: 78 | branch: docs 79 | folder: site 80 | -------------------------------------------------------------------------------- /.github/workflows/static_analysis.yml: -------------------------------------------------------------------------------- 1 | name: Static analysis 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | pre-commit-checks: 7 | name: Pre-commit checks 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - uses: actions/checkout@v4 12 | with: 13 | persist-credentials: false 14 | 15 | - name: Set up Python 16 | uses: actions/setup-python@v4 17 | with: 18 | python-version: 3.9 19 | 20 | - name: Install pre-commit 21 | run: | 22 | python -m pip install --upgrade pip 23 | pip install pre-commit 24 | 25 | - name: Run pre-commit 26 | run: | 27 | pre-commit run --show-diff-on-failure --color=always --all-files 28 | -------------------------------------------------------------------------------- /.github/workflows/template-sync.yml: -------------------------------------------------------------------------------- 1 | name: Template Synchronization 2 | on: 3 | schedule: 4 | - cron: "0 0 * * *" 5 | workflow_dispatch: 6 | 7 | jobs: 8 | submit-update-pr: 9 | name: Submit update PR 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | 14 | - name: Set up Python 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: 3.9 18 | 19 | - name: Install cruft 20 | run: pip install "cookiecutter>=1.7.3,<2.0.0" cruft 21 | 22 | - name: Perform updates 23 | run: cruft update -y 24 | 25 | - uses: tibdex/github-app-token@v1 26 | id: generate-token 27 | name: Generate GitHub token 28 | with: 29 | app_id: ${{ secrets.SYNC_APP_ID }} 30 | private_key: ${{ secrets.SYNC_APP_PRIVATE_KEY }} 31 | 32 | - name: Submit PR 33 | uses: peter-evans/create-pull-request@v4 34 | with: 35 | commit-message: Updating collection with changes to prefect-collection-template 36 | token: ${{ steps.generate-token.outputs.token }} 37 | branch: sync-with-template 38 | delete-branch: true 39 | title: Sync Collection with changes to prefect-collection-template 40 | body: | 41 | Automated PR created to propagate changes from prefect-collection-template to this collection 42 | 43 | Feel free to make any necessary changes to this PR before merging. 44 | labels: | 45 | template sync 46 | automated pr 47 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | run-tests: 7 | name: Run Tests 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | python-version: 12 | - "3.8" 13 | - "3.9" 14 | - "3.10" 15 | fail-fast: false 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v4 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | cache: pip 24 | cache-dependency-path: requirements*.txt 25 | 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | python -m pip install --upgrade --upgrade-strategy eager -e ".[dev]" 30 | 31 | - name: Run tests 32 | env: 33 | PREFECT_ORION_DATABASE_CONNECTION_URL: "sqlite+aiosqlite:///./orion-tests.db" 34 | run: | 35 | coverage run --branch -m pytest tests -vv 36 | coverage report 37 | 38 | - name: Run mkdocs build 39 | run: | 40 | mkdocs build --verbose --clean 41 | -------------------------------------------------------------------------------- /.github/workflows/windows_tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | run-tests: 7 | name: Run Windows Tests 8 | runs-on: windows-latest 9 | strategy: 10 | matrix: 11 | python-version: 12 | # Prefect Core only tests Windows against 3.9 currently. 13 | - "3.9" 14 | - "3.10" 15 | fail-fast: false 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v4 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | cache: pip 24 | cache-dependency-path: requirements*.txt 25 | 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | python -m pip install --upgrade --upgrade-strategy eager -e ".[dev]" 30 | 31 | - name: Run tests 32 | env: 33 | PREFECT_ORION_DATABASE_CONNECTION_URL: "sqlite+aiosqlite:///./orion-tests.db" 34 | run: | 35 | pytest tests -vv 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # OS files 132 | .DS_Store 133 | 134 | # VS Code 135 | .vscode 136 | 137 | # Jupyter notebook 138 | *.ipynb 139 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pycqa/isort 3 | rev: 5.12.0 4 | hooks: 5 | - id: isort 6 | language_version: python3 7 | - repo: https://github.com/psf/black 8 | rev: 22.3.0 9 | hooks: 10 | - id: black 11 | language_version: python3 12 | - repo: https://github.com/pycqa/flake8 13 | rev: 4.0.1 14 | hooks: 15 | - id: flake8 16 | - repo: https://github.com/econchick/interrogate 17 | rev: 1.5.0 18 | hooks: 19 | - id: interrogate 20 | args: [-vv] 21 | pass_filenames: false 22 | - repo: https://github.com/fsouza/autoflake8 23 | rev: v0.3.2 24 | hooks: 25 | - id: autoflake8 26 | language_version: python3 27 | args: [ 28 | '--in-place', 29 | ] 30 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## Unreleased 9 | 10 | ### Added 11 | 12 | ### Deprecated 13 | 14 | ### Removed 15 | 16 | ### Fixed 17 | 18 | ### Security 19 | 20 | ## 0.1.5 21 | 22 | Released on February 17, 2023. 23 | 24 | ### Changed 25 | - Change the behavior of the `ShellOperation` `stream_output` parameter. Setting it to `False` will now only turn off the logging and not send `stdout` and `stderr` to `DEVNULL`. The previous behavior can be achieved by manually setting `stdout`/`stderr` to `DEVNULL` through the `open_kwargs` arguments. - [#67](https://github.com/PrefectHQ/prefect-shell/issues/67) 26 | 27 | ### Fixed 28 | - Using `ShellOperation` on Windows - [#70](https://github.com/PrefectHQ/prefect-shell/issues/70) 29 | 30 | ## 0.1.4 31 | 32 | Released on February 2nd, 2023. 33 | 34 | ### Added 35 | 36 | - `ShellOperation` job block - [#55](https://github.com/PrefectHQ/prefect-shell/pull/55) 37 | 38 | ### Fixed 39 | 40 | - If using PowerShell, set exit code to that of command - [#51](https://github.com/PrefectHQ/prefect-shell/pull/51) 41 | 42 | ## 0.1.3 43 | 44 | Released on October 26th, 2022. 45 | 46 | ### Added 47 | 48 | - Added `cwd` keyword argument in `shell_run_command` - [#41](https://github.com/PrefectHQ/prefect-shell/pull/41) 49 | 50 | ### Changed 51 | - Have `shell_run_command` default to `shell="powershell" if sys.platform == "win32" else "bash"` - [#47](https://github.com/PrefectHQ/prefect-shell/pull/47) 52 | 53 | ## 0.1.2 54 | 55 | Released on October 7th, 2022. 56 | 57 | ### Added 58 | 59 | - Added `extension` keyword argument in `shell_run_command` - [#37](https://github.com/PrefectHQ/prefect-shell/pull/37) 60 | 61 | ### Fixed 62 | 63 | - Use current environment in `shell_run_command` - [#28](https://github.com/PrefectHQ/prefect-shell/pull/28) 64 | - Using `shell_run_command` on Windows environment - [#37](https://github.com/PrefectHQ/prefect-shell/pull/37) 65 | 66 | ## 0.1.1 67 | 68 | Released on August 2nd, 2022. 69 | 70 | ### Changed 71 | 72 | - Improve error visibility on failure - [#17](https://github.com/PrefectHQ/prefect-shell/pull/17) 73 | - Updated tests to be compatible with core Prefect library (v2.0b9) and bumped required version - [#21](https://github.com/PrefectHQ/prefect-shell/pull/21) 74 | 75 | ### Fixed 76 | - Fixed running commands that do not return any output - [#23](https://github.com/PrefectHQ/prefect-shell/pull/23) 77 | 78 | ### Removed 79 | - Removed `utils.run_shell_command`; can be accessed using `commands.run_shell_command.fn` - [#19](https://github.com/PrefectHQ/prefect-shell/pull/19) 80 | 81 | ## 0.1.0 82 | 83 | Released on March 9th, 2022. 84 | 85 | ### Added 86 | 87 | - `shell_run_command` task and utility - [#1](https://github.com/PrefectHQ/prefect-shell/pull/1) 88 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2021 Prefect Technologies, Inc. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | # prefect-shell 2 | 3 | ## Getting Started 4 | 5 | Now that you've bootstrapped a project, follow the steps below to get started developing your Prefect Collection! 6 | 7 | ### Python setup 8 | 9 | Requires an installation of Python 3.7+ 10 | 11 | We recommend using a Python virtual environment manager such as pipenv, conda or virtualenv. 12 | 13 | ### GitHub setup 14 | 15 | Create a Git respoitory for the newly generated collection and create the first commit: 16 | 17 | ```bash 18 | git init 19 | git add . 20 | git commit -m "Initial commit: project generated by prefect-collection-template" 21 | ``` 22 | 23 | Then, create a new repo following the prompts at: 24 | https://github.com/organizations/PrefectHQ/repositories/new 25 | 26 | Upon creation, push the repository to GitHub: 27 | ```bash 28 | git remote add origin https://github.com/PrefectHQ/prefect-shell.git 29 | git branch -M main 30 | git push -u origin main 31 | ``` 32 | 33 | It's recommended to setup some protection rules for main at: 34 | https://github.com/PrefectHQ/prefect-shell/settings/branches 35 | 36 | - Require a pull request before merging 37 | - Require approvals 38 | 39 | Lastly, [code owners](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) for the repository can be set, like this [example here](https://github.com/PrefectHQ/prefect/blob/master/.github/CODEOWNERS). 40 | 41 | ### Project setup 42 | 43 | To setup your project run the following: 44 | 45 | ```bash 46 | # Create an editable install of your project 47 | pip install -e ".[dev]" 48 | 49 | # Configure pre-commit hooks 50 | pre-commit install 51 | ``` 52 | 53 | To verify the setup was successful you can run the following: 54 | 55 | - Run the tests for tasks and flows in the collection: 56 | ```bash 57 | pytest tests 58 | ``` 59 | - Serve the docs with `mkdocs`: 60 | ```bash 61 | mkdocs serve 62 | ``` 63 | 64 | ## Developing tasks and flows 65 | 66 | For information about the use and development of tasks and flow, check out the [flows](https://orion-docs.prefect.io/concepts/flows/) and [tasks](https://orion-docs.prefect.io/concepts/tasks/) concepts docs in the Prefect docs. 67 | 68 | ## Writing documentation 69 | 70 | This collection has been setup to with [mkdocs](https://www.mkdocs.org/) for automatically generated documentation. The signatures and docstrings of your tasks and flow will be used to generate documentation for the users of this collection. You can make changes to the structure of the generated documentation by editing the `mkdocs.yml` file in this project. 71 | 72 | Once you have working code, replace the default "Write and run a flow" example in `README.md` to match your collection. 73 | 74 | ## Development lifecycle 75 | 76 | ### CI Pipeline 77 | 78 | This collection comes with [GitHub Actions](https://docs.github.com/en/actions) for testing and linting. To add additional actions, you can add jobs in the `.github/workflows` folder. Upon a pull request, the pipeline will run linting via [`black`](https://black.readthedocs.io/en/stable/), [`flake8`](https://flake8.pycqa.org/en/latest/), [`interrogate`](https://interrogate.readthedocs.io/en/latest/), and unit tests via `pytest` alongside `coverage`. 79 | 80 | `interrogate` will tell you which methods, functions, classes, and modules have docstrings, and which do not--the job has a fail threshold of 95%, meaning that it will fail if more than 5% of the codebase is undocumented. We recommend following the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) for docstring format. 81 | 82 | Simiarly, `coverage` ensures that the codebase includes tests--the job has a fail threshold of 80%, meaning that it will fail if more than 20% of the codebase is missing tests. 83 | 84 | ### Track Issues on Project Board 85 | 86 | To automatically add issues to a GitHub Project Board, you'll need a [secret added](https://docs.github.com/en/actions/security-guides/encrypted-secrets#creating-encrypted-secrets-for-an-environment) to the repository. Specifically, a secret named `ADD_TO_PROJECT_URL`, formatted like `https://github.com/orgs//projects/`. 87 | 88 | 89 | ### Package and Publish 90 | 91 | GitHub actions will handle packaging and publishing of your collection to [PyPI](https://pypi.org/) so other Prefect users can your collection in their flows. 92 | 93 | To publish to PyPI, you'll need a PyPI account and to generate an API token to authenticate with PyPI when publishing new versions of your collection. The [PyPI documentation](https://pypi.org/help/#apitoken) outlines the steps needed to get an API token. 94 | 95 | Once you've obtained a PyPI API token, [create a GitHub secret](https://docs.github.com/en/actions/security-guides/encrypted-secrets#creating-encrypted-secrets-for-a-repository) named `PYPI_API_TOKEN`. 96 | 97 | To publish a new version of your collection, [create a new GitHub release](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release) and tag it with the version that you want to deploy (e.g. v0.3.2). This will trigger a workflow to publish the new version on PyPI and deploy the updated docs to GitHub pages. 98 | 99 | Upon publishing, a `docs` branch is automatically created. To hook this up to GitHub Pages, simply head over to https://github.com/PrefectHQ/prefect-shell/settings/pages, select `docs` under the dropdown menu, keep the default `/root` folder, `Save`, and upon refresh, you should see a prompt stating "Your site is published at https://PrefectHQ.github.io/prefect-shell". Don't forget to add this link to the repo's "About" section, under "Website" so users can access the docs easily. 100 | 101 | Feel free to [submit your collection](https://orion-docs.prefect.io/collections/overview/#listing-in-the-collections-catalog) to the Prefect [Collections Catalog](https://orion-docs.prefect.io/collections/catalog/)! 102 | 103 | ## Further guidance 104 | 105 | If you run into any issues during the bootstrapping process, feel free to open an issue in the [prefect-collection-template](https://github.com/PrefectHQ/prefect-collection-template) repository. 106 | 107 | If you have any questions or issues while developing your collection, you can find help in either the [Prefect Discourse forum](https://discourse.prefect.io/) or the [Prefect Slack community](https://prefect.io/slack). 108 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Things to always exclude 2 | global-exclude .git* 3 | global-exclude .ipynb_checkpoints 4 | global-exclude *.py[co] 5 | global-exclude __pycache__/** 6 | 7 | # Top-level Config 8 | include versioneer.py 9 | include prefect_shell/_version.py 10 | include LICENSE 11 | include MANIFEST.in 12 | include setup.cfg 13 | include requirements.txt 14 | include requirements-dev.txt 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > [!NOTE] 2 | > Active development of this project has moved within PrefectHQ/prefect. The code can be found [here](https://github.com/PrefectHQ/prefect/tree/main/src/integrations/prefect-shell) and documentation [here](https://docs.prefect.io/latest/integrations/prefect-shell). 3 | > Please open issues and PRs against PrefectHQ/prefect instead of this repository. 4 | 5 | 6 | # Integrating shell commands into your dataflow with `prefect-shell` 7 | 8 |

9 | 10 |
11 | 12 | PyPI 13 | 14 | 15 | 16 | 17 | 18 | 19 |
20 | 21 | 22 | 23 | 24 |

25 | 26 | Visit the full docs [here](https://PrefectHQ.github.io/prefect-shell) to see additional examples and the API reference. 27 | 28 | The prefect-shell collection makes it easy to execute shell commands in your Prefect flows. Check out the examples below to get started! 29 | 30 | ## Getting Started 31 | 32 | ### Integrate with Prefect flows 33 | 34 | With prefect-shell, you can bring your trusty shell commands (and/or scripts) straight into the Prefect flow party, complete with awesome Prefect logging. 35 | 36 | No more separate logs, just seamless integration. Let's get the shell-abration started! 37 | 38 | ```python 39 | from prefect import flow 40 | from datetime import datetime 41 | from prefect_shell import ShellOperation 42 | 43 | @flow 44 | def download_data(): 45 | today = datetime.today().strftime("%Y%m%d") 46 | 47 | # for short running operations, you can use the `run` method 48 | # which automatically manages the context 49 | ShellOperation( 50 | commands=[ 51 | "mkdir -p data", 52 | "mkdir -p data/${today}" 53 | ], 54 | env={"today": today} 55 | ).run() 56 | 57 | # for long running operations, you can use a context manager 58 | with ShellOperation( 59 | commands=[ 60 | "curl -O https://masie_web.apps.nsidc.org/pub/DATASETS/NOAA/G02135/north/daily/data/N_seaice_extent_daily_v3.0.csv", 61 | ], 62 | working_dir=f"data/{today}", 63 | ) as download_csv_operation: 64 | 65 | # trigger runs the process in the background 66 | download_csv_process = download_csv_operation.trigger() 67 | 68 | # then do other things here in the meantime, like download another file 69 | ... 70 | 71 | # when you're ready, wait for the process to finish 72 | download_csv_process.wait_for_completion() 73 | 74 | # if you'd like to get the output lines, you can use the `fetch_result` method 75 | output_lines = download_csv_process.fetch_result() 76 | 77 | download_data() 78 | ``` 79 | 80 | Outputs: 81 | ```bash 82 | 14:48:16.550 | INFO | prefect.engine - Created flow run 'tentacled-chachalaca' for flow 'download-data' 83 | 14:48:17.977 | INFO | Flow run 'tentacled-chachalaca' - PID 19360 triggered with 2 commands running inside the '.' directory. 84 | 14:48:17.987 | INFO | Flow run 'tentacled-chachalaca' - PID 19360 completed with return code 0. 85 | 14:48:17.994 | INFO | Flow run 'tentacled-chachalaca' - PID 19363 triggered with 1 commands running inside the PosixPath('data/20230201') directory. 86 | 14:48:18.009 | INFO | Flow run 'tentacled-chachalaca' - PID 19363 stream output: 87 | % Total % Received % Xferd Average Speed Time Time Time Current 88 | Dl 89 | 14:48:18.010 | INFO | Flow run 'tentacled-chachalaca' - PID 19363 stream output: 90 | oad Upload Total Spent Left Speed 91 | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 92 | 14:48:18.840 | INFO | Flow run 'tentacled-chachalaca' - PID 19363 stream output: 93 | 11 1630k 11 192k 0 0 229k 0 0:00:07 --:--:-- 0:00:07 231k 94 | 14:48:19.839 | INFO | Flow run 'tentacled-chachalaca' - PID 19363 stream output: 95 | 83 1630k 83 1368k 0 0 745k 0 0:00:02 0:00:01 0:00:01 747k 96 | 14:48:19.993 | INFO | Flow run 'tentacled-chachalaca' - PID 19363 stream output: 97 | 100 1630k 100 1630k 0 0 819k 0 0 98 | 14:48:19.994 | INFO | Flow run 'tentacled-chachalaca' - PID 19363 stream output: 99 | :00:01 0:00:01 --:--:-- 821k 100 | 14:48:19.996 | INFO | Flow run 'tentacled-chachalaca' - PID 19363 completed with return code 0. 101 | 14:48:19.998 | INFO | Flow run 'tentacled-chachalaca' - Successfully closed all open processes. 102 | 14:48:20.203 | INFO | Flow run 'tentacled-chachalaca' - Finished in state Completed() 103 | ``` 104 | 105 | !!! info "Utilize Previously Saved Blocks" 106 | 107 | You can save commands within a `ShellOperation` block, then reuse them across multiple flows, or even plain Python scripts. 108 | 109 | Save the block with desired commands: 110 | 111 | ```python 112 | from prefect_shell import ShellOperation 113 | 114 | ping_op = ShellOperation(commands=["ping -t 1 prefect.io"]) 115 | ping_op.save("block-name") 116 | ``` 117 | 118 | Load the saved block: 119 | 120 | ```python 121 | from prefect_shell import ShellOperation 122 | 123 | ping_op = ShellOperation.load("block-name") 124 | ``` 125 | 126 | To [view and edit the blocks](https://orion-docs.prefect.io/ui/blocks/) on Prefect UI: 127 | 128 | ```bash 129 | prefect block register -m prefect_shell 130 | ``` 131 | 132 | ## Resources 133 | 134 | For more tips on how to use tasks and flows in a Collection, check out [Using Collections](https://orion-docs.prefect.io/collections/usage/)! 135 | 136 | ### Installation 137 | 138 | Install `prefect-shell` with `pip`: 139 | 140 | ```bash 141 | pip install -U prefect-shell 142 | ``` 143 | 144 | A list of available blocks in `prefect-shell` and their setup instructions can be found [here](https://PrefectHQ.github.io/prefect-shell/blocks_catalog). 145 | 146 | Requires an installation of Python 3.7+. 147 | 148 | We recommend using a Python virtual environment manager such as pipenv, conda or virtualenv. 149 | 150 | These tasks are designed to work with Prefect 2. For more information about how to use Prefect, please refer to the [Prefect documentation](https://orion-docs.prefect.io/). 151 | 152 | ### Feedback 153 | 154 | If you encounter any bugs while using `prefect-shell`, feel free to open an issue in the [prefect-shell](https://github.com/PrefectHQ/prefect-shell) repository. 155 | 156 | If you have any questions or issues while using `prefect-shell`, you can find help in either the [Prefect Discourse forum](https://discourse.prefect.io/) or the [Prefect Slack community](https://prefect.io/slack). 157 | 158 | Feel free to star or watch [`prefect-shell`](https://github.com/PrefectHQ/prefect-shell) for updates too! 159 | 160 | ### Contributing 161 | 162 | If you'd like to help contribute to fix an issue or add a feature to `prefect-shell`, please [propose changes through a pull request from a fork of the repository](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). 163 | 164 | Here are the steps: 165 | 166 | 1. [Fork the repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo#forking-a-repository) 167 | 2. [Clone the forked repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo#cloning-your-forked-repository) 168 | 3. Install the repository and its dependencies: 169 | ``` 170 | pip install -e ".[dev]" 171 | ``` 172 | 4. Make desired changes 173 | 5. Add tests 174 | 6. Insert an entry to [CHANGELOG.md](https://github.com/PrefectHQ/prefect-shell/blob/main/CHANGELOG.md) 175 | 7. Install `pre-commit` to perform quality checks prior to commit: 176 | ``` 177 | pre-commit install 178 | ``` 179 | 8. `git commit`, `git push`, and create a pull request 180 | -------------------------------------------------------------------------------- /docs/commands.md: -------------------------------------------------------------------------------- 1 | ::: prefect_shell.commands 2 | -------------------------------------------------------------------------------- /docs/gen_blocks_catalog.py: -------------------------------------------------------------------------------- 1 | """ 2 | Discovers all blocks and generates a list of them in the docs 3 | under the Blocks Catalog heading. 4 | """ 5 | 6 | from pathlib import Path 7 | from textwrap import dedent 8 | 9 | import mkdocs_gen_files 10 | from prefect.blocks.core import Block 11 | from prefect.utilities.dispatch import get_registry_for_type 12 | from prefect.utilities.importtools import from_qualified_name, to_qualified_name 13 | 14 | COLLECTION_SLUG = "prefect_shell" 15 | 16 | 17 | def find_module_blocks(): 18 | blocks = get_registry_for_type(Block) 19 | collection_blocks = [ 20 | block 21 | for block in blocks.values() 22 | if to_qualified_name(block).startswith(COLLECTION_SLUG) 23 | ] 24 | module_blocks = {} 25 | for block in collection_blocks: 26 | block_name = block.__name__ 27 | module_nesting = tuple(to_qualified_name(block).split(".")[1:-1]) 28 | if module_nesting not in module_blocks: 29 | module_blocks[module_nesting] = [] 30 | module_blocks[module_nesting].append(block_name) 31 | return module_blocks 32 | 33 | 34 | def insert_blocks_catalog(generated_file): 35 | module_blocks = find_module_blocks() 36 | if len(module_blocks) == 0: 37 | return 38 | generated_file.write( 39 | dedent( 40 | f""" 41 | Below is a list of Blocks available for registration in 42 | `prefect-shell`. 43 | 44 | To register blocks in this module to 45 | [view and edit them](https://orion-docs.prefect.io/ui/blocks/) 46 | on Prefect Cloud: 47 | ```bash 48 | prefect block register -m {COLLECTION_SLUG} 49 | ``` 50 | """ 51 | ) 52 | ) 53 | generated_file.write( 54 | "Note, to use the `load` method on Blocks, you must already have a block document " # noqa 55 | "[saved through code](https://orion-docs.prefect.io/concepts/blocks/#saving-blocks) " # noqa 56 | "or [saved through the UI](https://orion-docs.prefect.io/ui/blocks/).\n" 57 | ) 58 | for module_nesting, block_names in module_blocks.items(): 59 | module_path = f"{COLLECTION_SLUG}." + " ".join(module_nesting) 60 | module_title = ( 61 | module_path.replace(COLLECTION_SLUG, "") 62 | .lstrip(".") 63 | .replace("_", " ") 64 | .title() 65 | ) 66 | generated_file.write(f"## [{module_title} Module][{module_path}]\n") 67 | for block_name in block_names: 68 | block_obj = from_qualified_name(f"{module_path}.{block_name}") 69 | block_description = block_obj.get_description() 70 | if not block_description.endswith("."): 71 | block_description += "." 72 | generated_file.write( 73 | f"[{block_name}][{module_path}.{block_name}]\n\n{block_description}\n\n" 74 | ) 75 | generated_file.write( 76 | dedent( 77 | f""" 78 | To load the {block_name}: 79 | ```python 80 | from prefect import flow 81 | from {module_path} import {block_name} 82 | 83 | @flow 84 | def my_flow(): 85 | my_block = {block_name}.load("block-name") 86 | 87 | my_flow() 88 | ``` 89 | """ 90 | ) 91 | ) 92 | generated_file.write( 93 | f"For additional examples, check out the [{module_title} Module]" 94 | f"(../examples_catalog/#{module_nesting[-1]}-module) " 95 | f"under Examples Catalog.\n" 96 | ) 97 | 98 | 99 | blocks_catalog_path = Path("blocks_catalog.md") 100 | with mkdocs_gen_files.open(blocks_catalog_path, "w") as generated_file: 101 | insert_blocks_catalog(generated_file) 102 | -------------------------------------------------------------------------------- /docs/gen_examples_catalog.py: -------------------------------------------------------------------------------- 1 | """ 2 | Locates all the examples in the Collection and puts them in a single page. 3 | """ 4 | 5 | import re 6 | from collections import defaultdict 7 | from inspect import getmembers, isclass, isfunction, ismodule 8 | from pathlib import Path 9 | from textwrap import dedent 10 | from types import ModuleType 11 | from typing import Callable, Set, Union 12 | 13 | import mkdocs_gen_files 14 | from griffe.dataclasses import Docstring 15 | from griffe.docstrings.dataclasses import DocstringSectionKind 16 | from griffe.docstrings.parsers import Parser, parse 17 | from prefect.logging.loggers import disable_logger 18 | from prefect.utilities.importtools import to_qualified_name 19 | 20 | import prefect_shell 21 | 22 | COLLECTION_SLUG = "prefect_shell" 23 | 24 | 25 | def skip_parsing(name: str, obj: Union[ModuleType, Callable], module_nesting: str): 26 | """ 27 | Skips parsing the object if it's a private object or if it's not in the 28 | module nesting, preventing imports from other libraries from being added to the 29 | examples catalog. 30 | """ 31 | try: 32 | wrong_module = not to_qualified_name(obj).startswith(module_nesting) 33 | except AttributeError: 34 | wrong_module = False 35 | return obj.__doc__ is None or name.startswith("_") or wrong_module 36 | 37 | 38 | def skip_block_load_code_example(code_example: str) -> bool: 39 | """ 40 | Skips the code example if it's just showing how to load a Block. 41 | """ 42 | return re.search(r'\.load\("BLOCK_NAME"\)\s*$', code_example.rstrip("`")) 43 | 44 | 45 | def get_code_examples(obj: Union[ModuleType, Callable]) -> Set[str]: 46 | """ 47 | Gathers all the code examples within an object. 48 | """ 49 | code_examples = set() 50 | with disable_logger("griffe.docstrings.google"): 51 | with disable_logger("griffe.agents.nodes"): 52 | docstring = Docstring(obj.__doc__) 53 | parsed_sections = parse(docstring, Parser.google) 54 | 55 | for section in parsed_sections: 56 | if section.kind == DocstringSectionKind.examples: 57 | code_example = "\n".join( 58 | (part[1] for part in section.as_dict().get("value", [])) 59 | ) 60 | if not skip_block_load_code_example(code_example): 61 | code_examples.add(code_example) 62 | if section.kind == DocstringSectionKind.admonition: 63 | value = section.as_dict().get("value", {}) 64 | if value.get("annotation") == "example": 65 | code_example = value.get("description") 66 | if not skip_block_load_code_example(code_example): 67 | code_examples.add(code_example) 68 | 69 | return code_examples 70 | 71 | 72 | code_examples_grouping = defaultdict(set) 73 | for module_name, module_obj in getmembers(prefect_shell, ismodule): 74 | 75 | module_nesting = f"{COLLECTION_SLUG}.{module_name}" 76 | # find all module examples 77 | if skip_parsing(module_name, module_obj, module_nesting): 78 | continue 79 | code_examples_grouping[module_name] |= get_code_examples(module_obj) 80 | 81 | # find all class and method examples 82 | for class_name, class_obj in getmembers(module_obj, isclass): 83 | if skip_parsing(class_name, class_obj, module_nesting): 84 | continue 85 | code_examples_grouping[module_name] |= get_code_examples(class_obj) 86 | for method_name, method_obj in getmembers(class_obj, isfunction): 87 | if skip_parsing(method_name, method_obj, module_nesting): 88 | continue 89 | code_examples_grouping[module_name] |= get_code_examples(method_obj) 90 | 91 | # find all function examples 92 | for function_name, function_obj in getmembers(module_obj, isfunction): 93 | if skip_parsing(function_name, function_obj, module_nesting): 94 | continue 95 | code_examples_grouping[module_name] |= get_code_examples(function_obj) 96 | 97 | 98 | examples_catalog_path = Path("examples_catalog.md") 99 | with mkdocs_gen_files.open(examples_catalog_path, "w") as generated_file: 100 | generated_file.write( 101 | dedent( 102 | """ 103 | # Examples Catalog 104 | 105 | Below is a list of examples for `prefect-shell`. 106 | """ 107 | ) 108 | ) 109 | for module_name, code_examples in code_examples_grouping.items(): 110 | if len(code_examples) == 0: 111 | continue 112 | module_title = module_name.replace("_", " ").title() 113 | generated_file.write( 114 | f"## [{module_title} Module][{COLLECTION_SLUG}.{module_name}]\n" 115 | ) 116 | for code_example in code_examples: 117 | generated_file.write(code_example + "\n") 118 | -------------------------------------------------------------------------------- /docs/gen_home_page.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copies README.md to index.md. 3 | """ 4 | 5 | from pathlib import Path 6 | 7 | import mkdocs_gen_files 8 | 9 | # Home page 10 | 11 | readme_path = Path("README.md") 12 | docs_index_path = Path("index.md") 13 | 14 | with open(readme_path, "r") as readme: 15 | with mkdocs_gen_files.open(docs_index_path, "w") as generated_file: 16 | for line in readme: 17 | if line.startswith("Visit the full docs [here]("): 18 | continue # prevent linking to itself 19 | generated_file.write(line) 20 | 21 | mkdocs_gen_files.set_edit_path(Path(docs_index_path), readme_path) 22 | -------------------------------------------------------------------------------- /docs/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrefectHQ/prefect-shell/ae5076526b82c6a2ce8d377a910cc9ac81a0345d/docs/img/favicon.ico -------------------------------------------------------------------------------- /docs/img/prefect-logo-mark-solid-white-500.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PrefectHQ/prefect-shell/ae5076526b82c6a2ce8d377a910cc9ac81a0345d/docs/img/prefect-logo-mark-solid-white-500.png -------------------------------------------------------------------------------- /docs/stylesheets/extra.css: -------------------------------------------------------------------------------- 1 | /* theme */ 2 | :root > * { 3 | /* theme */ 4 | --md-primary-fg-color: #115AF4; 5 | --md-primary-fg-color--light: #115AF4; 6 | --md-primary-fg-color--dark: #115AF4; 7 | } 8 | 9 | /* Table formatting */ 10 | .md-typeset table:not([class]) td { 11 | padding: 0.5em 1.25em; 12 | } 13 | .md-typeset table:not([class]) th { 14 | padding: 0.5em 1.25em; 15 | } 16 | 17 | /* convenience class to keep lines from breaking 18 | useful for wrapping table cell text in a span 19 | to force column width */ 20 | .no-wrap { 21 | white-space: nowrap; 22 | } 23 | 24 | 25 | /* dark mode slate theme */ 26 | /* dark mode code overrides */ 27 | [data-md-color-scheme="slate"] { 28 | --md-code-bg-color: #252a33; 29 | --md-code-fg-color: #eee; 30 | --md-code-hl-color: #3b3d54; 31 | --md-code-hl-name-color: #eee; 32 | } 33 | 34 | /* dark mode link overrides */ 35 | [data-md-color-scheme="slate"] .md-typeset a { 36 | color: var(--blue); 37 | } 38 | 39 | [data-md-color-scheme="slate"] .md-typeset a:hover { 40 | font-weight: bold; 41 | } 42 | 43 | /* dark mode nav overrides */ 44 | [data-md-color-scheme="slate"] .md-nav--primary .md-nav__item--active>.md-nav__link { 45 | color: var(--blue); 46 | font-weight: bold; 47 | } 48 | 49 | [data-md-color-scheme="slate"] .md-nav--primary .md-nav__link--active { 50 | color: var(--blue); 51 | font-weight: bold; 52 | } 53 | 54 | /* dark mode collection catalog overrides */ 55 | [data-md-color-scheme="slate"] .collection-item { 56 | background-color: #3b3d54; 57 | } 58 | 59 | /* dark mode recipe collection overrides */ 60 | [data-md-color-scheme="slate"] .recipe-item { 61 | background-color: #3b3d54; 62 | } 63 | 64 | /* dark mode API doc overrides */ 65 | [data-md-color-scheme="slate"] .prefect-table th { 66 | background-color: #3b3d54; 67 | } -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: prefect-shell 2 | site_url: https://PrefectHQ.github.io/prefect-shell 3 | repo_url: https://github.com/PrefectHQ/prefect-shell 4 | edit_uri: edit/main/docs 5 | theme: 6 | name: material 7 | favicon: img/favicon.ico 8 | palette: 9 | - media: "(prefers-color-scheme)" 10 | toggle: 11 | icon: material/brightness-auto 12 | name: Switch to light mode 13 | - media: "(prefers-color-scheme: light)" 14 | accent: blue 15 | primary: blue 16 | scheme: default 17 | toggle: 18 | icon: material/weather-sunny 19 | name: Switch to dark mode 20 | - media: "(prefers-color-scheme: dark)" 21 | accent: blue 22 | primary: blue 23 | scheme: slate 24 | toggle: 25 | icon: material/weather-night 26 | name: Switch to light mode 27 | icon: 28 | repo: fontawesome/brands/github 29 | logo: img/prefect-logo-mark-solid-white-500.png 30 | font: 31 | text: Inter 32 | code: Source Code Pro 33 | features: 34 | - content.code.copy 35 | - content.code.annotate 36 | extra_css: 37 | - stylesheets/extra.css 38 | markdown_extensions: 39 | - admonition 40 | - attr_list 41 | - codehilite 42 | - md_in_html 43 | - meta 44 | - pymdownx.highlight: 45 | use_pygments: true 46 | - pymdownx.superfences 47 | - pymdownx.tabbed 48 | - pymdownx.inlinehilite 49 | - pymdownx.snippets 50 | 51 | plugins: 52 | - search 53 | - gen-files: 54 | scripts: 55 | - docs/gen_home_page.py 56 | - docs/gen_examples_catalog.py 57 | - docs/gen_blocks_catalog.py 58 | - mkdocstrings: 59 | handlers: 60 | python: 61 | options: 62 | show_root_heading: True 63 | show_object_full_path: False 64 | show_category_heading: True 65 | show_signature: False 66 | show_bases: True 67 | heading_level: 1 68 | watch: 69 | - prefect_shell/ 70 | - README.md 71 | 72 | nav: 73 | - Home: index.md 74 | - Blocks Catalog: blocks_catalog.md 75 | - Examples Catalog: examples_catalog.md 76 | - API Reference: 77 | - Commands: commands.md 78 | 79 | extra: 80 | social: 81 | - icon: fontawesome/brands/slack 82 | link: https://www.prefect.io/slack/ 83 | - icon: fontawesome/brands/discourse 84 | link: https://discourse.prefect.io/ 85 | - icon: fontawesome/brands/youtube 86 | link: https://www.youtube.com/c/PrefectIO/videos 87 | - icon: fontawesome/regular/newspaper 88 | link: https://prefect.io/guide/ 89 | - icon: fontawesome/brands/twitter 90 | link: https://twitter.com/PrefectIO 91 | - icon: fontawesome/brands/linkedin 92 | link: https://www.linkedin.com/company/prefect/ 93 | - icon: fontawesome/brands/github 94 | link: https://github.com/PrefectHQ/prefect 95 | - icon: fontawesome/brands/docker 96 | link: https://hub.docker.com/r/prefecthq/prefect/ 97 | - icon: fontawesome/brands/python 98 | link: https://pypi.org/project/prefect/ 99 | analytics: 100 | provider: google 101 | property: G-8CSMBCQDKN 102 | -------------------------------------------------------------------------------- /prefect_shell/__init__.py: -------------------------------------------------------------------------------- 1 | from . import _version 2 | from .commands import shell_run_command, ShellOperation # noqa 3 | 4 | __version__ = _version.get_versions()["version"] 5 | -------------------------------------------------------------------------------- /prefect_shell/_version.py: -------------------------------------------------------------------------------- 1 | # This file helps to compute a version number in source trees obtained from 2 | # git-archive tarball (such as those provided by githubs download-from-tag 3 | # feature). Distribution tarballs (built by setup.py sdist) and build 4 | # directories (produced by setup.py build) will contain a much shorter file 5 | # that just contains the computed version number. 6 | 7 | # This file is released into the public domain. Generated by 8 | # versioneer-0.21 (https://github.com/python-versioneer/python-versioneer) 9 | 10 | """Git implementation of _version.py.""" 11 | 12 | import errno 13 | import os 14 | import re 15 | import subprocess 16 | import sys 17 | from typing import Callable, Dict 18 | 19 | 20 | def get_keywords(): 21 | """Get the keywords needed to look up the version information.""" 22 | # these strings will be replaced by git during git-archive. 23 | # setup.py/versioneer.py will grep for the variable names, so they must 24 | # each be defined on a line of their own. _version.py will just call 25 | # get_keywords(). 26 | git_refnames = " (HEAD -> main)" 27 | git_full = "ae5076526b82c6a2ce8d377a910cc9ac81a0345d" 28 | git_date = "2024-04-26 11:33:36 -0500" 29 | keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} 30 | return keywords 31 | 32 | 33 | class VersioneerConfig: 34 | """Container for Versioneer configuration parameters.""" 35 | 36 | 37 | def get_config(): 38 | """Create, populate and return the VersioneerConfig() object.""" 39 | # these strings are filled in when 'setup.py versioneer' creates 40 | # _version.py 41 | cfg = VersioneerConfig() 42 | cfg.VCS = "git" 43 | cfg.style = "pep440" 44 | cfg.tag_prefix = "" 45 | cfg.parentdir_prefix = "" 46 | cfg.versionfile_source = "prefect_shell/_version.py" 47 | cfg.verbose = False 48 | return cfg 49 | 50 | 51 | class NotThisMethod(Exception): 52 | """Exception raised if a method is not valid for the current scenario.""" 53 | 54 | 55 | LONG_VERSION_PY: Dict[str, str] = {} 56 | HANDLERS: Dict[str, Dict[str, Callable]] = {} 57 | 58 | 59 | def register_vcs_handler(vcs, method): # decorator 60 | """Create decorator to mark a method as the handler of a VCS.""" 61 | 62 | def decorate(f): 63 | """Store f in HANDLERS[vcs][method].""" 64 | if vcs not in HANDLERS: 65 | HANDLERS[vcs] = {} 66 | HANDLERS[vcs][method] = f 67 | return f 68 | 69 | return decorate 70 | 71 | 72 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): 73 | """Call the given command(s).""" 74 | assert isinstance(commands, list) 75 | process = None 76 | for command in commands: 77 | try: 78 | dispcmd = str([command] + args) 79 | # remember shell=False, so use git.cmd on windows, not just git 80 | process = subprocess.Popen( 81 | [command] + args, 82 | cwd=cwd, 83 | env=env, 84 | stdout=subprocess.PIPE, 85 | stderr=(subprocess.PIPE if hide_stderr else None), 86 | ) 87 | break 88 | except OSError: 89 | e = sys.exc_info()[1] 90 | if e.errno == errno.ENOENT: 91 | continue 92 | if verbose: 93 | print("unable to run %s" % dispcmd) 94 | print(e) 95 | return None, None 96 | else: 97 | if verbose: 98 | print("unable to find command, tried %s" % (commands,)) 99 | return None, None 100 | stdout = process.communicate()[0].strip().decode() 101 | if process.returncode != 0: 102 | if verbose: 103 | print("unable to run %s (error)" % dispcmd) 104 | print("stdout was %s" % stdout) 105 | return None, process.returncode 106 | return stdout, process.returncode 107 | 108 | 109 | def versions_from_parentdir(parentdir_prefix, root, verbose): 110 | """Try to determine the version from the parent directory name. 111 | 112 | Source tarballs conventionally unpack into a directory that includes both 113 | the project name and a version string. We will also support searching up 114 | two directory levels for an appropriately named parent directory 115 | """ 116 | rootdirs = [] 117 | 118 | for _ in range(3): 119 | dirname = os.path.basename(root) 120 | if dirname.startswith(parentdir_prefix): 121 | return { 122 | "version": dirname[len(parentdir_prefix) :], 123 | "full-revisionid": None, 124 | "dirty": False, 125 | "error": None, 126 | "date": None, 127 | } 128 | rootdirs.append(root) 129 | root = os.path.dirname(root) # up a level 130 | 131 | if verbose: 132 | print( 133 | "Tried directories %s but none started with prefix %s" 134 | % (str(rootdirs), parentdir_prefix) 135 | ) 136 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 137 | 138 | 139 | @register_vcs_handler("git", "get_keywords") 140 | def git_get_keywords(versionfile_abs): 141 | """Extract version information from the given file.""" 142 | # the code embedded in _version.py can just fetch the value of these 143 | # keywords. When used from setup.py, we don't want to import _version.py, 144 | # so we do it with a regexp instead. This function is not used from 145 | # _version.py. 146 | keywords = {} 147 | try: 148 | with open(versionfile_abs, "r") as fobj: 149 | for line in fobj: 150 | if line.strip().startswith("git_refnames ="): 151 | mo = re.search(r'=\s*"(.*)"', line) 152 | if mo: 153 | keywords["refnames"] = mo.group(1) 154 | if line.strip().startswith("git_full ="): 155 | mo = re.search(r'=\s*"(.*)"', line) 156 | if mo: 157 | keywords["full"] = mo.group(1) 158 | if line.strip().startswith("git_date ="): 159 | mo = re.search(r'=\s*"(.*)"', line) 160 | if mo: 161 | keywords["date"] = mo.group(1) 162 | except OSError: 163 | pass 164 | return keywords 165 | 166 | 167 | @register_vcs_handler("git", "keywords") 168 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 169 | """Get version information from git keywords.""" 170 | if "refnames" not in keywords: 171 | raise NotThisMethod("Short version file found") 172 | date = keywords.get("date") 173 | if date is not None: 174 | # Use only the last line. Previous lines may contain GPG signature 175 | # information. 176 | date = date.splitlines()[-1] 177 | 178 | # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant 179 | # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 180 | # -like" string, which we must then edit to make compliant), because 181 | # it's been around since git-1.5.3, and it's too difficult to 182 | # discover which version we're using, or to work around using an 183 | # older one. 184 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 185 | refnames = keywords["refnames"].strip() 186 | if refnames.startswith("$Format"): 187 | if verbose: 188 | print("keywords are unexpanded, not using") 189 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 190 | refs = {r.strip() for r in refnames.strip("()").split(",")} 191 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 192 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 193 | TAG = "tag: " 194 | tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} 195 | if not tags: 196 | # Either we're using git < 1.8.3, or there really are no tags. We use 197 | # a heuristic: assume all version tags have a digit. The old git %d 198 | # expansion behaves like git log --decorate=short and strips out the 199 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 200 | # between branches and tags. By ignoring refnames without digits, we 201 | # filter out many common branch names like "release" and 202 | # "stabilization", as well as "HEAD" and "master". 203 | tags = {r for r in refs if re.search(r"\d", r)} 204 | if verbose: 205 | print("discarding '%s', no digits" % ",".join(refs - tags)) 206 | if verbose: 207 | print("likely tags: %s" % ",".join(sorted(tags))) 208 | for ref in sorted(tags): 209 | # sorting will prefer e.g. "2.0" over "2.0rc1" 210 | if ref.startswith(tag_prefix): 211 | r = ref[len(tag_prefix) :] 212 | # Filter out refs that exactly match prefix or that don't start 213 | # with a number once the prefix is stripped (mostly a concern 214 | # when prefix is '') 215 | if not re.match(r"\d", r): 216 | continue 217 | if verbose: 218 | print("picking %s" % r) 219 | return { 220 | "version": r, 221 | "full-revisionid": keywords["full"].strip(), 222 | "dirty": False, 223 | "error": None, 224 | "date": date, 225 | } 226 | # no suitable tags, so version is "0+unknown", but full hex is still there 227 | if verbose: 228 | print("no suitable tags, using unknown + full revision id") 229 | return { 230 | "version": "0+unknown", 231 | "full-revisionid": keywords["full"].strip(), 232 | "dirty": False, 233 | "error": "no suitable tags", 234 | "date": None, 235 | } 236 | 237 | 238 | @register_vcs_handler("git", "pieces_from_vcs") 239 | def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): 240 | """Get version from 'git describe' in the root of the source tree. 241 | 242 | This only gets called if the git-archive 'subst' keywords were *not* 243 | expanded, and _version.py hasn't already been rewritten with a short 244 | version string, meaning we're inside a checked out source tree. 245 | """ 246 | GITS = ["git"] 247 | TAG_PREFIX_REGEX = "*" 248 | if sys.platform == "win32": 249 | GITS = ["git.cmd", "git.exe"] 250 | TAG_PREFIX_REGEX = r"\*" 251 | 252 | _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) 253 | if rc != 0: 254 | if verbose: 255 | print("Directory %s not under git control" % root) 256 | raise NotThisMethod("'git rev-parse --git-dir' returned error") 257 | 258 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] 259 | # if there isn't one, this yields HEX[-dirty] (no NUM) 260 | describe_out, rc = runner( 261 | GITS, 262 | [ 263 | "describe", 264 | "--tags", 265 | "--dirty", 266 | "--always", 267 | "--long", 268 | "--match", 269 | "%s%s" % (tag_prefix, TAG_PREFIX_REGEX), 270 | ], 271 | cwd=root, 272 | ) 273 | # --long was added in git-1.5.5 274 | if describe_out is None: 275 | raise NotThisMethod("'git describe' failed") 276 | describe_out = describe_out.strip() 277 | full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) 278 | if full_out is None: 279 | raise NotThisMethod("'git rev-parse' failed") 280 | full_out = full_out.strip() 281 | 282 | pieces = {} 283 | pieces["long"] = full_out 284 | pieces["short"] = full_out[:7] # maybe improved later 285 | pieces["error"] = None 286 | 287 | branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) 288 | # --abbrev-ref was added in git-1.6.3 289 | if rc != 0 or branch_name is None: 290 | raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") 291 | branch_name = branch_name.strip() 292 | 293 | if branch_name == "HEAD": 294 | # If we aren't exactly on a branch, pick a branch which represents 295 | # the current commit. If all else fails, we are on a branchless 296 | # commit. 297 | branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) 298 | # --contains was added in git-1.5.4 299 | if rc != 0 or branches is None: 300 | raise NotThisMethod("'git branch --contains' returned error") 301 | branches = branches.split("\n") 302 | 303 | # Remove the first line if we're running detached 304 | if "(" in branches[0]: 305 | branches.pop(0) 306 | 307 | # Strip off the leading "* " from the list of branches. 308 | branches = [branch[2:] for branch in branches] 309 | if "master" in branches: 310 | branch_name = "master" 311 | elif not branches: 312 | branch_name = None 313 | else: 314 | # Pick the first branch that is returned. Good or bad. 315 | branch_name = branches[0] 316 | 317 | pieces["branch"] = branch_name 318 | 319 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 320 | # TAG might have hyphens. 321 | git_describe = describe_out 322 | 323 | # look for -dirty suffix 324 | dirty = git_describe.endswith("-dirty") 325 | pieces["dirty"] = dirty 326 | if dirty: 327 | git_describe = git_describe[: git_describe.rindex("-dirty")] 328 | 329 | # now we have TAG-NUM-gHEX or HEX 330 | 331 | if "-" in git_describe: 332 | # TAG-NUM-gHEX 333 | mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) 334 | if not mo: 335 | # unparsable. Maybe git-describe is misbehaving? 336 | pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out 337 | return pieces 338 | 339 | # tag 340 | full_tag = mo.group(1) 341 | if not full_tag.startswith(tag_prefix): 342 | if verbose: 343 | fmt = "tag '%s' doesn't start with prefix '%s'" 344 | print(fmt % (full_tag, tag_prefix)) 345 | pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( 346 | full_tag, 347 | tag_prefix, 348 | ) 349 | return pieces 350 | pieces["closest-tag"] = full_tag[len(tag_prefix) :] 351 | 352 | # distance: number of commits since tag 353 | pieces["distance"] = int(mo.group(2)) 354 | 355 | # commit: short hex revision ID 356 | pieces["short"] = mo.group(3) 357 | 358 | else: 359 | # HEX: no tags 360 | pieces["closest-tag"] = None 361 | count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) 362 | pieces["distance"] = int(count_out) # total number of commits 363 | 364 | # commit date: see ISO-8601 comment in git_versions_from_keywords() 365 | date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() 366 | # Use only the last line. Previous lines may contain GPG signature 367 | # information. 368 | date = date.splitlines()[-1] 369 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 370 | 371 | return pieces 372 | 373 | 374 | def plus_or_dot(pieces): 375 | """Return a + if we don't already have one, else return a .""" 376 | if "+" in pieces.get("closest-tag", ""): 377 | return "." 378 | return "+" 379 | 380 | 381 | def render_pep440(pieces): 382 | """Build up version string, with post-release "local version identifier". 383 | 384 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 385 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 386 | 387 | Exceptions: 388 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 389 | """ 390 | if pieces["closest-tag"]: 391 | rendered = pieces["closest-tag"] 392 | if pieces["distance"] or pieces["dirty"]: 393 | rendered += plus_or_dot(pieces) 394 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 395 | if pieces["dirty"]: 396 | rendered += ".dirty" 397 | else: 398 | # exception #1 399 | rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) 400 | if pieces["dirty"]: 401 | rendered += ".dirty" 402 | return rendered 403 | 404 | 405 | def render_pep440_branch(pieces): 406 | """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . 407 | 408 | The ".dev0" means not master branch. Note that .dev0 sorts backwards 409 | (a feature branch will appear "older" than the master branch). 410 | 411 | Exceptions: 412 | 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] 413 | """ 414 | if pieces["closest-tag"]: 415 | rendered = pieces["closest-tag"] 416 | if pieces["distance"] or pieces["dirty"]: 417 | if pieces["branch"] != "master": 418 | rendered += ".dev0" 419 | rendered += plus_or_dot(pieces) 420 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 421 | if pieces["dirty"]: 422 | rendered += ".dirty" 423 | else: 424 | # exception #1 425 | rendered = "0" 426 | if pieces["branch"] != "master": 427 | rendered += ".dev0" 428 | rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) 429 | if pieces["dirty"]: 430 | rendered += ".dirty" 431 | return rendered 432 | 433 | 434 | def pep440_split_post(ver): 435 | """Split pep440 version string at the post-release segment. 436 | 437 | Returns the release segments before the post-release and the 438 | post-release version number (or -1 if no post-release segment is present). 439 | """ 440 | vc = str.split(ver, ".post") 441 | return vc[0], int(vc[1] or 0) if len(vc) == 2 else None 442 | 443 | 444 | def render_pep440_pre(pieces): 445 | """TAG[.postN.devDISTANCE] -- No -dirty. 446 | 447 | Exceptions: 448 | 1: no tags. 0.post0.devDISTANCE 449 | """ 450 | if pieces["closest-tag"]: 451 | if pieces["distance"]: 452 | # update the post release segment 453 | tag_version, post_version = pep440_split_post(pieces["closest-tag"]) 454 | rendered = tag_version 455 | if post_version is not None: 456 | rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) 457 | else: 458 | rendered += ".post0.dev%d" % (pieces["distance"]) 459 | else: 460 | # no commits, use the tag as the version 461 | rendered = pieces["closest-tag"] 462 | else: 463 | # exception #1 464 | rendered = "0.post0.dev%d" % pieces["distance"] 465 | return rendered 466 | 467 | 468 | def render_pep440_post(pieces): 469 | """TAG[.postDISTANCE[.dev0]+gHEX] . 470 | 471 | The ".dev0" means dirty. Note that .dev0 sorts backwards 472 | (a dirty tree will appear "older" than the corresponding clean one), 473 | but you shouldn't be releasing software with -dirty anyways. 474 | 475 | Exceptions: 476 | 1: no tags. 0.postDISTANCE[.dev0] 477 | """ 478 | if pieces["closest-tag"]: 479 | rendered = pieces["closest-tag"] 480 | if pieces["distance"] or pieces["dirty"]: 481 | rendered += ".post%d" % pieces["distance"] 482 | if pieces["dirty"]: 483 | rendered += ".dev0" 484 | rendered += plus_or_dot(pieces) 485 | rendered += "g%s" % pieces["short"] 486 | else: 487 | # exception #1 488 | rendered = "0.post%d" % pieces["distance"] 489 | if pieces["dirty"]: 490 | rendered += ".dev0" 491 | rendered += "+g%s" % pieces["short"] 492 | return rendered 493 | 494 | 495 | def render_pep440_post_branch(pieces): 496 | """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . 497 | 498 | The ".dev0" means not master branch. 499 | 500 | Exceptions: 501 | 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] 502 | """ 503 | if pieces["closest-tag"]: 504 | rendered = pieces["closest-tag"] 505 | if pieces["distance"] or pieces["dirty"]: 506 | rendered += ".post%d" % pieces["distance"] 507 | if pieces["branch"] != "master": 508 | rendered += ".dev0" 509 | rendered += plus_or_dot(pieces) 510 | rendered += "g%s" % pieces["short"] 511 | if pieces["dirty"]: 512 | rendered += ".dirty" 513 | else: 514 | # exception #1 515 | rendered = "0.post%d" % pieces["distance"] 516 | if pieces["branch"] != "master": 517 | rendered += ".dev0" 518 | rendered += "+g%s" % pieces["short"] 519 | if pieces["dirty"]: 520 | rendered += ".dirty" 521 | return rendered 522 | 523 | 524 | def render_pep440_old(pieces): 525 | """TAG[.postDISTANCE[.dev0]] . 526 | 527 | The ".dev0" means dirty. 528 | 529 | Exceptions: 530 | 1: no tags. 0.postDISTANCE[.dev0] 531 | """ 532 | if pieces["closest-tag"]: 533 | rendered = pieces["closest-tag"] 534 | if pieces["distance"] or pieces["dirty"]: 535 | rendered += ".post%d" % pieces["distance"] 536 | if pieces["dirty"]: 537 | rendered += ".dev0" 538 | else: 539 | # exception #1 540 | rendered = "0.post%d" % pieces["distance"] 541 | if pieces["dirty"]: 542 | rendered += ".dev0" 543 | return rendered 544 | 545 | 546 | def render_git_describe(pieces): 547 | """TAG[-DISTANCE-gHEX][-dirty]. 548 | 549 | Like 'git describe --tags --dirty --always'. 550 | 551 | Exceptions: 552 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 553 | """ 554 | if pieces["closest-tag"]: 555 | rendered = pieces["closest-tag"] 556 | if pieces["distance"]: 557 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 558 | else: 559 | # exception #1 560 | rendered = pieces["short"] 561 | if pieces["dirty"]: 562 | rendered += "-dirty" 563 | return rendered 564 | 565 | 566 | def render_git_describe_long(pieces): 567 | """TAG-DISTANCE-gHEX[-dirty]. 568 | 569 | Like 'git describe --tags --dirty --always -long'. 570 | The distance/hash is unconditional. 571 | 572 | Exceptions: 573 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 574 | """ 575 | if pieces["closest-tag"]: 576 | rendered = pieces["closest-tag"] 577 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 578 | else: 579 | # exception #1 580 | rendered = pieces["short"] 581 | if pieces["dirty"]: 582 | rendered += "-dirty" 583 | return rendered 584 | 585 | 586 | def render(pieces, style): 587 | """Render the given version pieces into the requested style.""" 588 | if pieces["error"]: 589 | return { 590 | "version": "unknown", 591 | "full-revisionid": pieces.get("long"), 592 | "dirty": None, 593 | "error": pieces["error"], 594 | "date": None, 595 | } 596 | 597 | if not style or style == "default": 598 | style = "pep440" # the default 599 | 600 | if style == "pep440": 601 | rendered = render_pep440(pieces) 602 | elif style == "pep440-branch": 603 | rendered = render_pep440_branch(pieces) 604 | elif style == "pep440-pre": 605 | rendered = render_pep440_pre(pieces) 606 | elif style == "pep440-post": 607 | rendered = render_pep440_post(pieces) 608 | elif style == "pep440-post-branch": 609 | rendered = render_pep440_post_branch(pieces) 610 | elif style == "pep440-old": 611 | rendered = render_pep440_old(pieces) 612 | elif style == "git-describe": 613 | rendered = render_git_describe(pieces) 614 | elif style == "git-describe-long": 615 | rendered = render_git_describe_long(pieces) 616 | else: 617 | raise ValueError("unknown style '%s'" % style) 618 | 619 | return { 620 | "version": rendered, 621 | "full-revisionid": pieces["long"], 622 | "dirty": pieces["dirty"], 623 | "error": None, 624 | "date": pieces.get("date"), 625 | } 626 | 627 | 628 | def get_versions(): 629 | """Get version information or return default if unable to do so.""" 630 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have 631 | # __file__, we can work backwards from there to the root. Some 632 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which 633 | # case we can only use expanded keywords. 634 | 635 | cfg = get_config() 636 | verbose = cfg.verbose 637 | 638 | try: 639 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) 640 | except NotThisMethod: 641 | pass 642 | 643 | try: 644 | root = os.path.realpath(__file__) 645 | # versionfile_source is the relative path from the top of the source 646 | # tree (where the .git directory might live) to this file. Invert 647 | # this to find the root from __file__. 648 | for _ in cfg.versionfile_source.split("/"): 649 | root = os.path.dirname(root) 650 | except NameError: 651 | return { 652 | "version": "0+unknown", 653 | "full-revisionid": None, 654 | "dirty": None, 655 | "error": "unable to find root of source tree", 656 | "date": None, 657 | } 658 | 659 | try: 660 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) 661 | return render(pieces, cfg.style) 662 | except NotThisMethod: 663 | pass 664 | 665 | try: 666 | if cfg.parentdir_prefix: 667 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 668 | except NotThisMethod: 669 | pass 670 | 671 | return { 672 | "version": "0+unknown", 673 | "full-revisionid": None, 674 | "dirty": None, 675 | "error": "unable to compute version", 676 | "date": None, 677 | } 678 | -------------------------------------------------------------------------------- /prefect_shell/commands.py: -------------------------------------------------------------------------------- 1 | """Tasks for interacting with shell commands""" 2 | 3 | import asyncio 4 | import logging 5 | import os 6 | import subprocess 7 | import sys 8 | import tempfile 9 | from contextlib import AsyncExitStack, contextmanager 10 | from typing import Any, Dict, Generator, List, Optional, Union 11 | 12 | import anyio 13 | from anyio.abc import Process 14 | from anyio.streams.text import TextReceiveStream 15 | from prefect import task 16 | from prefect.blocks.abstract import JobBlock, JobRun 17 | from prefect.logging import get_run_logger 18 | from prefect.utilities.asyncutils import sync_compatible 19 | from prefect.utilities.processutils import open_process 20 | from pydantic import VERSION as PYDANTIC_VERSION 21 | 22 | if PYDANTIC_VERSION.startswith("2."): 23 | from pydantic.v1 import DirectoryPath, Field, PrivateAttr 24 | else: 25 | from pydantic import DirectoryPath, Field, PrivateAttr 26 | 27 | 28 | @task 29 | async def shell_run_command( 30 | command: str, 31 | env: Optional[dict] = None, 32 | helper_command: Optional[str] = None, 33 | shell: Optional[str] = None, 34 | extension: Optional[str] = None, 35 | return_all: bool = False, 36 | stream_level: int = logging.INFO, 37 | cwd: Union[str, bytes, os.PathLike, None] = None, 38 | ) -> Union[List, str]: 39 | """ 40 | Runs arbitrary shell commands. 41 | 42 | Args: 43 | command: Shell command to be executed; can also be 44 | provided post-initialization by calling this task instance. 45 | env: Dictionary of environment variables to use for 46 | the subprocess; can also be provided at runtime. 47 | helper_command: String representing a shell command, which 48 | will be executed prior to the `command` in the same process. 49 | Can be used to change directories, define helper functions, etc. 50 | for different commands in a flow. 51 | shell: Shell to run the command with. 52 | extension: File extension to be appended to the command to be executed. 53 | return_all: Whether this task should return all lines of stdout as a list, 54 | or just the last line as a string. 55 | stream_level: The logging level of the stream; 56 | defaults to 20 equivalent to `logging.INFO`. 57 | cwd: The working directory context the command will be executed within 58 | 59 | Returns: 60 | If return all, returns all lines as a list; else the last line as a string. 61 | 62 | Example: 63 | List contents in the current directory. 64 | ```python 65 | from prefect import flow 66 | from prefect_shell import shell_run_command 67 | 68 | @flow 69 | def example_shell_run_command_flow(): 70 | return shell_run_command(command="ls .", return_all=True) 71 | 72 | example_shell_run_command_flow() 73 | ``` 74 | """ 75 | logger = get_run_logger() 76 | 77 | current_env = os.environ.copy() 78 | current_env.update(env or {}) 79 | 80 | if shell is None: 81 | # if shell is not specified: 82 | # use powershell for windows 83 | # use bash for other platforms 84 | shell = "powershell" if sys.platform == "win32" else "bash" 85 | 86 | extension = ".ps1" if shell.lower() == "powershell" else extension 87 | 88 | tmp = tempfile.NamedTemporaryFile(prefix="prefect-", suffix=extension, delete=False) 89 | try: 90 | if helper_command: 91 | tmp.write(helper_command.encode()) 92 | tmp.write(os.linesep.encode()) 93 | tmp.write(command.encode()) 94 | if shell.lower() == "powershell": 95 | # if powershell, set exit code to that of command 96 | tmp.write("\r\nExit $LastExitCode".encode()) 97 | tmp.close() 98 | 99 | shell_command = [shell, tmp.name] 100 | 101 | lines = [] 102 | async with await anyio.open_process( 103 | shell_command, env=current_env, cwd=cwd 104 | ) as process: 105 | async for text in TextReceiveStream(process.stdout): 106 | logger.log(level=stream_level, msg=text) 107 | lines.extend(text.rstrip().split("\n")) 108 | 109 | await process.wait() 110 | if process.returncode: 111 | stderr = "\n".join( 112 | [text async for text in TextReceiveStream(process.stderr)] 113 | ) 114 | if not stderr and lines: 115 | stderr = f"{lines[-1]}\n" 116 | msg = ( 117 | f"Command failed with exit code {process.returncode}:\n" f"{stderr}" 118 | ) 119 | raise RuntimeError(msg) 120 | finally: 121 | if os.path.exists(tmp.name): 122 | os.remove(tmp.name) 123 | 124 | line = lines[-1] if lines else "" 125 | return lines if return_all else line 126 | 127 | 128 | class ShellProcess(JobRun): 129 | """ 130 | A class representing a shell process. 131 | """ 132 | 133 | def __init__(self, shell_operation: "ShellOperation", process: Process): 134 | self._shell_operation = shell_operation 135 | self._process = process 136 | self._output = [] 137 | 138 | @property 139 | def pid(self) -> int: 140 | """ 141 | The PID of the process. 142 | 143 | Returns: 144 | The PID of the process. 145 | """ 146 | return self._process.pid 147 | 148 | @property 149 | def return_code(self) -> Optional[int]: 150 | """ 151 | The return code of the process. 152 | 153 | Returns: 154 | The return code of the process, or `None` if the process is still running. 155 | """ 156 | return self._process.returncode 157 | 158 | async def _capture_output(self, source): 159 | """ 160 | Capture output from source. 161 | """ 162 | async for output in TextReceiveStream(source): 163 | text = output.rstrip() 164 | if self._shell_operation.stream_output: 165 | self.logger.info(f"PID {self.pid} stream output:{os.linesep}{text}") 166 | self._output.extend(text.split(os.linesep)) 167 | 168 | @sync_compatible 169 | async def wait_for_completion(self) -> None: 170 | """ 171 | Wait for the shell command to complete after a process is triggered. 172 | """ 173 | self.logger.debug(f"Waiting for PID {self.pid} to complete.") 174 | 175 | await asyncio.gather( 176 | self._capture_output(self._process.stdout), 177 | self._capture_output(self._process.stderr), 178 | ) 179 | await self._process.wait() 180 | 181 | if self.return_code != 0: 182 | raise RuntimeError( 183 | f"PID {self.pid} failed with return code {self.return_code}." 184 | ) 185 | self.logger.info( 186 | f"PID {self.pid} completed with return code {self.return_code}." 187 | ) 188 | 189 | @sync_compatible 190 | async def fetch_result(self) -> List[str]: 191 | """ 192 | Retrieve the output of the shell operation. 193 | 194 | Returns: 195 | The lines output from the shell operation as a list. 196 | """ 197 | if self._process.returncode is None: 198 | self.logger.info("Process is still running, result may be incomplete.") 199 | return self._output 200 | 201 | 202 | class ShellOperation(JobBlock): 203 | """ 204 | A block representing a shell operation, containing multiple commands. 205 | 206 | For long-lasting operations, use the trigger method and utilize the block as a 207 | context manager for automatic closure of processes when context is exited. 208 | If not, manually call the close method to close processes. 209 | 210 | For short-lasting operations, use the run method. Context is automatically managed 211 | with this method. 212 | 213 | Attributes: 214 | commands: A list of commands to execute sequentially. 215 | stream_output: Whether to stream output. 216 | env: A dictionary of environment variables to set for the shell operation. 217 | working_dir: The working directory context the commands 218 | will be executed within. 219 | shell: The shell to use to execute the commands. 220 | extension: The extension to use for the temporary file. 221 | if unset defaults to `.ps1` on Windows and `.sh` on other platforms. 222 | 223 | Examples: 224 | Load a configured block: 225 | ```python 226 | from prefect_shell import ShellOperation 227 | 228 | shell_operation = ShellOperation.load("BLOCK_NAME") 229 | ``` 230 | """ 231 | 232 | _block_type_name = "Shell Operation" 233 | _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/0b47a017e1b40381de770c17647c49cdf6388d1c-250x250.png" # noqa: E501 234 | _documentation_url = "https://prefecthq.github.io/prefect-shell/commands/#prefect_shell.commands.ShellOperation" # noqa: E501 235 | 236 | commands: List[str] = Field( 237 | default=..., description="A list of commands to execute sequentially." 238 | ) 239 | stream_output: bool = Field(default=True, description="Whether to stream output.") 240 | env: Dict[str, str] = Field( 241 | default_factory=dict, 242 | title="Environment Variables", 243 | description="Environment variables to use for the subprocess.", 244 | ) 245 | working_dir: DirectoryPath = Field( 246 | default=None, 247 | title="Working Directory", 248 | description=( 249 | "The absolute path to the working directory " 250 | "the command will be executed within." 251 | ), 252 | ) 253 | shell: str = Field( 254 | default=None, 255 | description=( 256 | "The shell to run the command with; if unset, " 257 | "defaults to `powershell` on Windows and `bash` on other platforms." 258 | ), 259 | ) 260 | extension: Optional[str] = Field( 261 | default=None, 262 | description=( 263 | "The extension to use for the temporary file; if unset, " 264 | "defaults to `.ps1` on Windows and `.sh` on other platforms." 265 | ), 266 | ) 267 | 268 | _exit_stack: AsyncExitStack = PrivateAttr( 269 | default_factory=AsyncExitStack, 270 | ) 271 | 272 | @contextmanager 273 | def _prep_trigger_command(self) -> Generator[str, None, None]: 274 | """ 275 | Write the commands to a temporary file, handling all the details of 276 | creating the file and cleaning it up afterwards. Then, return the command 277 | to run the temporary file. 278 | """ 279 | try: 280 | extension = self.extension or (".ps1" if sys.platform == "win32" else ".sh") 281 | temp_file = tempfile.NamedTemporaryFile( 282 | prefix="prefect-", 283 | suffix=extension, 284 | delete=False, 285 | ) 286 | 287 | joined_commands = os.linesep.join(self.commands) 288 | self.logger.debug( 289 | f"Writing the following commands to " 290 | f"{temp_file.name!r}:{os.linesep}{joined_commands}" 291 | ) 292 | temp_file.write(joined_commands.encode()) 293 | 294 | if self.shell is None and sys.platform == "win32" or extension == ".ps1": 295 | shell = "powershell" 296 | elif self.shell is None: 297 | shell = "bash" 298 | else: 299 | shell = self.shell.lower() 300 | 301 | if shell == "powershell": 302 | # if powershell, set exit code to that of command 303 | temp_file.write("\r\nExit $LastExitCode".encode()) 304 | temp_file.close() 305 | 306 | trigger_command = [shell, temp_file.name] 307 | yield trigger_command 308 | finally: 309 | if os.path.exists(temp_file.name): 310 | os.remove(temp_file.name) 311 | 312 | def _compile_kwargs(self, **open_kwargs: Dict[str, Any]) -> Dict[str, Any]: 313 | """ 314 | Helper method to compile the kwargs for `open_process` so it's not repeated 315 | across the run and trigger methods. 316 | """ 317 | trigger_command = self._exit_stack.enter_context(self._prep_trigger_command()) 318 | input_env = os.environ.copy() 319 | input_env.update(self.env) 320 | input_open_kwargs = dict( 321 | command=trigger_command, 322 | stdout=subprocess.PIPE, 323 | stderr=subprocess.PIPE, 324 | env=input_env, 325 | cwd=self.working_dir, 326 | **open_kwargs, 327 | ) 328 | return input_open_kwargs 329 | 330 | @sync_compatible 331 | async def trigger(self, **open_kwargs: Dict[str, Any]) -> ShellProcess: 332 | """ 333 | Triggers a shell command and returns the shell command run object 334 | to track the execution of the run. This method is ideal for long-lasting 335 | shell commands; for short-lasting shell commands, it is recommended 336 | to use the `run` method instead. 337 | 338 | Args: 339 | **open_kwargs: Additional keyword arguments to pass to `open_process`. 340 | 341 | Returns: 342 | A `ShellProcess` object. 343 | 344 | Examples: 345 | Sleep for 5 seconds and then print "Hello, world!": 346 | ```python 347 | from prefect_shell import ShellOperation 348 | 349 | with ShellOperation( 350 | commands=["sleep 5", "echo 'Hello, world!'"], 351 | ) as shell_operation: 352 | shell_process = shell_operation.trigger() 353 | shell_process.wait_for_completion() 354 | shell_output = shell_process.fetch_result() 355 | ``` 356 | """ 357 | input_open_kwargs = self._compile_kwargs(**open_kwargs) 358 | process = await self._exit_stack.enter_async_context( 359 | open_process(**input_open_kwargs) 360 | ) 361 | num_commands = len(self.commands) 362 | self.logger.info( 363 | f"PID {process.pid} triggered with {num_commands} commands running " 364 | f"inside the {(self.working_dir or '.')!r} directory." 365 | ) 366 | return ShellProcess(shell_operation=self, process=process) 367 | 368 | @sync_compatible 369 | async def run(self, **open_kwargs: Dict[str, Any]) -> List[str]: 370 | """ 371 | Runs a shell command, but unlike the trigger method, 372 | additionally waits and fetches the result directly, automatically managing 373 | the context. This method is ideal for short-lasting shell commands; 374 | for long-lasting shell commands, it is 375 | recommended to use the `trigger` method instead. 376 | 377 | Args: 378 | **open_kwargs: Additional keyword arguments to pass to `open_process`. 379 | 380 | Returns: 381 | The lines output from the shell command as a list. 382 | 383 | Examples: 384 | Sleep for 5 seconds and then print "Hello, world!": 385 | ```python 386 | from prefect_shell import ShellOperation 387 | 388 | shell_output = ShellOperation( 389 | commands=["sleep 5", "echo 'Hello, world!'"] 390 | ).run() 391 | ``` 392 | """ 393 | input_open_kwargs = self._compile_kwargs(**open_kwargs) 394 | async with open_process(**input_open_kwargs) as process: 395 | shell_process = ShellProcess(shell_operation=self, process=process) 396 | num_commands = len(self.commands) 397 | self.logger.info( 398 | f"PID {process.pid} triggered with {num_commands} commands running " 399 | f"inside the {(self.working_dir or '.')!r} directory." 400 | ) 401 | await shell_process.wait_for_completion() 402 | result = await shell_process.fetch_result() 403 | 404 | return result 405 | 406 | @sync_compatible 407 | async def close(self): 408 | """ 409 | Close the job block. 410 | """ 411 | await self._exit_stack.aclose() 412 | self.logger.info("Successfully closed all open processes.") 413 | 414 | async def aclose(self): 415 | """ 416 | Asynchronous version of the close method. 417 | """ 418 | await self.close() 419 | 420 | async def __aenter__(self) -> "ShellOperation": 421 | """ 422 | Asynchronous version of the enter method. 423 | """ 424 | return self 425 | 426 | async def __aexit__(self, *exc_info): 427 | """ 428 | Asynchronous version of the exit method. 429 | """ 430 | await self.close() 431 | 432 | def __enter__(self) -> "ShellOperation": 433 | """ 434 | Enter the context of the job block. 435 | """ 436 | return self 437 | 438 | def __exit__(self, *exc_info): 439 | """ 440 | Exit the context of the job block. 441 | """ 442 | self.close() 443 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | black 3 | flake8 4 | mypy 5 | mkdocs 6 | mkdocs-material 7 | mkdocstrings[python] 8 | isort 9 | pre-commit 10 | pytest-asyncio 11 | mock; python_version < '3.8' 12 | mkdocs-gen-files 13 | interrogate 14 | coverage 15 | pillow -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | prefect>=2.13.5 2 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = .git,__pycache__,build,dist 3 | per-file-ignores = 4 | setup.py:E501 5 | # Match black line-length 6 | max-line-length = 88 7 | extend-ignore = 8 | E203, 9 | 10 | [isort] 11 | skip = __init__.py 12 | profile = black 13 | skip_gitignore = True 14 | multi_line_output = 3 15 | 16 | [versioneer] 17 | VCS = git 18 | style = pep440 19 | versionfile_source = prefect_shell/_version.py 20 | versionfile_build = prefect_shell/_version.py 21 | tag_prefix = v 22 | parentdir_prefix = 23 | 24 | [tool:interrogate] 25 | ignore-init-module = True 26 | ignore_init_method = True 27 | exclude = prefect_shell/_version.py, tests, setup.py, versioneer.py, docs, site 28 | fail-under = 95 29 | omit-covered-files = True 30 | 31 | [coverage:run] 32 | omit = tests/*, prefect_shell/_version.py 33 | 34 | [coverage:report] 35 | fail_under = 80 36 | show_missing = True 37 | 38 | [tool:pytest] 39 | asyncio_mode = auto 40 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | import versioneer 4 | 5 | with open("requirements.txt") as install_requires_file: 6 | install_requires = install_requires_file.read().strip().split("\n") 7 | 8 | with open("requirements-dev.txt") as dev_requires_file: 9 | dev_requires = dev_requires_file.read().strip().split("\n") 10 | 11 | with open("README.md") as readme_file: 12 | readme = readme_file.read() 13 | 14 | setup( 15 | name="prefect-shell", 16 | description="Prefect tasks and subflows for interacting with shell commands.", 17 | license="Apache License 2.0", 18 | author="Prefect Technologies, Inc.", 19 | author_email="help@prefect.io", 20 | keywords="prefect", 21 | url="https://github.com/PrefectHQ/prefect-shell", 22 | long_description=readme, 23 | long_description_content_type="text/markdown", 24 | version=versioneer.get_version(), 25 | cmdclass=versioneer.get_cmdclass(), 26 | packages=find_packages(exclude=("tests", "docs")), 27 | python_requires=">=3.7", 28 | install_requires=install_requires, 29 | extras_require={"dev": dev_requires}, 30 | entry_points={ 31 | "prefect.collections": [ 32 | "prefect_shell = prefect_shell", 33 | ] 34 | }, 35 | classifiers=[ 36 | "Natural Language :: English", 37 | "Intended Audience :: Developers", 38 | "Intended Audience :: System Administrators", 39 | "License :: OSI Approved :: Apache Software License", 40 | "Programming Language :: Python :: 3 :: Only", 41 | "Programming Language :: Python :: 3.7", 42 | "Programming Language :: Python :: 3.8", 43 | "Programming Language :: Python :: 3.9", 44 | "Programming Language :: Python :: 3.10", 45 | "Topic :: Software Development :: Libraries", 46 | ], 47 | ) 48 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | sys.path.append(".") 4 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture(scope="function") 7 | def prefect_caplog(caplog): 8 | logger = logging.getLogger("prefect") 9 | 10 | # TODO: Determine a better pattern for this and expose for all tests 11 | logger.propagate = True 12 | 13 | try: 14 | yield caplog 15 | finally: 16 | logger.propagate = False 17 | 18 | 19 | @pytest.fixture(scope="function") 20 | def prefect_task_runs_caplog(prefect_caplog): 21 | logger = logging.getLogger("prefect.task_runs") 22 | 23 | # TODO: Determine a better pattern for this and expose for all tests 24 | logger.propagate = True 25 | 26 | try: 27 | yield prefect_caplog 28 | finally: 29 | logger.propagate = False 30 | -------------------------------------------------------------------------------- /tests/test_block_standards.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from prefect.blocks.core import Block 3 | from prefect.testing.standard_test_suites import BlockStandardTestSuite 4 | from prefect.utilities.dispatch import get_registry_for_type 5 | from prefect.utilities.importtools import to_qualified_name 6 | 7 | 8 | def find_module_blocks(): 9 | blocks = get_registry_for_type(Block) 10 | module_blocks = [ 11 | block 12 | for block in blocks.values() 13 | if to_qualified_name(block).startswith("prefect_shell") 14 | ] 15 | return module_blocks 16 | 17 | 18 | @pytest.mark.parametrize("block", find_module_blocks()) 19 | class TestAllBlocksAdhereToStandards(BlockStandardTestSuite): 20 | @pytest.fixture 21 | def block(self, block): 22 | return block 23 | -------------------------------------------------------------------------------- /tests/test_commands.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from pathlib import Path 5 | 6 | import pytest 7 | from prefect import flow 8 | from prefect.testing.utilities import AsyncMock 9 | 10 | from prefect_shell.commands import ShellOperation, shell_run_command 11 | 12 | if sys.platform == "win32": 13 | pytest.skip(reason="see test_commands_windows.py", allow_module_level=True) 14 | 15 | 16 | def test_shell_run_command_error(prefect_task_runs_caplog): 17 | @flow 18 | def test_flow(): 19 | return shell_run_command(command="ls this/is/invalid") 20 | 21 | match = "No such file or directory" 22 | with pytest.raises(RuntimeError, match=match): 23 | test_flow() 24 | 25 | assert len(prefect_task_runs_caplog.records) == 7 26 | 27 | 28 | def test_shell_run_command(prefect_task_runs_caplog): 29 | prefect_task_runs_caplog.set_level(logging.INFO) 30 | echo_msg = "_THIS_ IS WORKING!!!!" 31 | 32 | @flow 33 | def test_flow(): 34 | return shell_run_command(command=f"echo {echo_msg}") 35 | 36 | assert test_flow() == echo_msg 37 | assert echo_msg in prefect_task_runs_caplog.text 38 | 39 | 40 | def test_shell_run_command_stream_level(prefect_task_runs_caplog): 41 | prefect_task_runs_caplog.set_level(logging.WARNING) 42 | echo_msg = "_THIS_ IS WORKING!!!!" 43 | 44 | @flow 45 | def test_flow(): 46 | return shell_run_command( 47 | command=f"echo {echo_msg}", 48 | stream_level=logging.WARNING, 49 | ) 50 | 51 | assert test_flow() == echo_msg 52 | assert echo_msg in prefect_task_runs_caplog.text 53 | 54 | 55 | def test_shell_run_command_helper_command(): 56 | @flow 57 | def test_flow(): 58 | return shell_run_command(command="pwd", helper_command="cd $HOME") 59 | 60 | assert test_flow() == os.path.expandvars("$HOME") 61 | 62 | 63 | def test_shell_run_command_cwd(): 64 | @flow 65 | def test_flow(): 66 | return shell_run_command(command="pwd", cwd=Path.home()) 67 | 68 | assert test_flow() == os.fspath(Path.home()) 69 | 70 | 71 | def test_shell_run_command_return_all(): 72 | @flow 73 | def test_flow(): 74 | return shell_run_command(command="echo work! && echo yes!", return_all=True) 75 | 76 | assert test_flow() == ["work!", "yes!"] 77 | 78 | 79 | def test_shell_run_command_no_output(): 80 | @flow 81 | def test_flow(): 82 | return shell_run_command(command="sleep 1") 83 | 84 | assert test_flow() == "" 85 | 86 | 87 | def test_shell_run_command_uses_current_env(): 88 | @flow 89 | def test_flow(): 90 | return shell_run_command(command="echo $HOME") 91 | 92 | assert test_flow() == os.environ["HOME"] 93 | 94 | 95 | def test_shell_run_command_update_current_env(): 96 | @flow 97 | def test_flow(): 98 | return shell_run_command( 99 | command="echo $HOME && echo $TEST_VAR", 100 | env={"TEST_VAR": "test value"}, 101 | return_all=True, 102 | ) 103 | 104 | result = test_flow() 105 | assert result[0] == os.environ["HOME"] 106 | assert result[1] == "test value" 107 | 108 | 109 | class AsyncIter: 110 | def __init__(self, items): 111 | self.items = items 112 | 113 | async def __aiter__(self): 114 | for item in self.items: 115 | yield item 116 | 117 | 118 | @pytest.mark.parametrize("shell", [None, "bash", "zsh"]) 119 | def test_shell_run_command_override_shell(shell, monkeypatch): 120 | open_process_mock = AsyncMock() 121 | stdout_mock = AsyncMock() 122 | stdout_mock.receive.side_effect = lambda: b"received" 123 | open_process_mock.return_value.__aenter__.return_value = AsyncMock( 124 | stdout=stdout_mock 125 | ) 126 | open_process_mock.return_value.__aenter__.return_value.returncode = 0 127 | monkeypatch.setattr("anyio.open_process", open_process_mock) 128 | monkeypatch.setattr("prefect_shell.commands.TextReceiveStream", AsyncIter) 129 | 130 | @flow 131 | def test_flow(): 132 | return shell_run_command( 133 | command="echo 'testing'", 134 | shell=shell, 135 | ) 136 | 137 | test_flow() 138 | assert open_process_mock.call_args_list[0][0][0][0] == shell or "bash" 139 | 140 | 141 | class TestShellOperation: 142 | async def execute(self, op, method): 143 | if method == "run": 144 | return await op.run() 145 | elif method == "trigger": 146 | proc = await op.trigger() 147 | await proc.wait_for_completion() 148 | return await proc.fetch_result() 149 | 150 | @pytest.mark.parametrize("method", ["run", "trigger"]) 151 | async def test_error(self, method): 152 | op = ShellOperation(commands=["ls this/is/invalid"]) 153 | with pytest.raises(RuntimeError, match="return code"): 154 | await self.execute(op, method) 155 | 156 | @pytest.mark.parametrize("method", ["run", "trigger"]) 157 | async def test_output(self, prefect_task_runs_caplog, method): 158 | op = ShellOperation(commands=["echo 'testing\nthe output'", "echo good"]) 159 | assert await self.execute(op, method) == ["testing", "the output", "good"] 160 | records = prefect_task_runs_caplog.records 161 | assert len(records) == 3 162 | assert "triggered with 2 commands running" in records[0].message 163 | assert "stream output:\ntesting\nthe output\ngood" in records[1].message 164 | assert "completed with return code 0" in records[2].message 165 | 166 | @pytest.mark.parametrize("method", ["run", "trigger"]) 167 | async def test_stream_output(self, prefect_task_runs_caplog, method): 168 | # If stream_output is False, there should be output, 169 | # but no logs from the shell process 170 | op = ShellOperation( 171 | commands=["echo 'testing\nthe output'", "echo good"], stream_output=False 172 | ) 173 | assert await self.execute(op, method) == ["testing", "the output", "good"] 174 | records = prefect_task_runs_caplog.records 175 | assert len(records) == 2 176 | assert "triggered with 2 commands running" in records[0].message 177 | assert "completed with return code 0" in records[1].message 178 | 179 | @pytest.mark.parametrize("method", ["run", "trigger"]) 180 | async def test_current_env(self, method): 181 | op = ShellOperation(commands=["echo $HOME"]) 182 | assert await self.execute(op, method) == [os.environ["HOME"]] 183 | 184 | @pytest.mark.parametrize("method", ["run", "trigger"]) 185 | async def test_updated_env(self, method): 186 | op = ShellOperation(commands=["echo $HOME"], env={"HOME": "test_home"}) 187 | assert await self.execute(op, method) == ["test_home"] 188 | 189 | @pytest.mark.parametrize("method", ["run", "trigger"]) 190 | async def test_cwd(self, method): 191 | op = ShellOperation(commands=["pwd"], working_dir=Path.home()) 192 | assert await self.execute(op, method) == [os.fspath(Path.home())] 193 | 194 | @pytest.mark.parametrize("method", ["run", "trigger"]) 195 | @pytest.mark.parametrize("shell", [None, "bash", "zsh", "BASH", "ZSH"]) 196 | async def test_updated_shell(self, monkeypatch, method, shell): 197 | open_process_mock = AsyncMock(name="open_process") 198 | stdout_mock = AsyncMock(name="stdout_mock") 199 | stdout_mock.receive.side_effect = lambda: b"received" 200 | open_process_mock.return_value.__aenter__.return_value = AsyncMock( 201 | stdout=stdout_mock 202 | ) 203 | open_process_mock.return_value.returncode = 0 204 | monkeypatch.setattr("anyio.open_process", open_process_mock) 205 | monkeypatch.setattr("prefect_shell.commands.TextReceiveStream", AsyncIter) 206 | 207 | op = ShellOperation(commands=["pwd"], shell=shell, working_dir=Path.home()) 208 | await self.execute(op, method) 209 | assert open_process_mock.call_args_list[0][0][0][0] == (shell or "bash").lower() 210 | 211 | @pytest.mark.parametrize("method", ["run", "trigger"]) 212 | async def test_select_powershell(self, monkeypatch, method): 213 | open_process_mock = AsyncMock(name="open_process") 214 | stdout_mock = AsyncMock(name="stdout_mock") 215 | stdout_mock.receive.side_effect = lambda: b"received" 216 | open_process_mock.return_value.__aenter__.return_value = AsyncMock( 217 | stdout=stdout_mock 218 | ) 219 | open_process_mock.return_value.returncode = 0 220 | monkeypatch.setattr("anyio.open_process", open_process_mock) 221 | monkeypatch.setattr("prefect_shell.commands.TextReceiveStream", AsyncIter) 222 | 223 | await self.execute( 224 | ShellOperation(commands=["echo 'hey'"], extension=".ps1"), method 225 | ) 226 | assert open_process_mock.call_args_list[0][0][0][0] == "powershell" 227 | 228 | async def test_context_manager(self): 229 | async with ShellOperation(commands=["echo 'testing'"]) as op: 230 | proc = await op.trigger() 231 | await proc.wait_for_completion() 232 | await proc.fetch_result() == ["testing"] 233 | 234 | def test_async_context_manager(self): 235 | with ShellOperation(commands=["echo 'testing'"]) as op: 236 | proc = op.trigger() 237 | proc.wait_for_completion() 238 | proc.fetch_result() == ["testing"] 239 | -------------------------------------------------------------------------------- /tests/test_commands_windows.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import logging 3 | import os 4 | import sys 5 | from pathlib import Path 6 | 7 | import pytest 8 | from prefect import flow 9 | from prefect.testing.utilities import AsyncMock 10 | 11 | from prefect_shell.commands import ShellOperation, shell_run_command 12 | 13 | if sys.platform != "win32": 14 | pytest.skip(reason="see test_commands.py", allow_module_level=True) 15 | 16 | 17 | def test_shell_run_command_error_windows(prefect_task_runs_caplog): 18 | @flow 19 | def test_flow(): 20 | return shell_run_command(command="throw", return_all=True, shell="powershell") 21 | 22 | with pytest.raises(RuntimeError, match="Exception"): 23 | test_flow() 24 | 25 | assert len(prefect_task_runs_caplog.records) == 7 26 | 27 | 28 | def test_shell_run_command_windows(prefect_task_runs_caplog): 29 | prefect_task_runs_caplog.set_level(logging.INFO) 30 | echo_msg = "WORKING" 31 | 32 | @flow 33 | def test_flow(): 34 | msg = shell_run_command( 35 | command=f"echo {echo_msg}", return_all=True, shell="powershell" 36 | ) 37 | return msg 38 | 39 | print(prefect_task_runs_caplog.text) 40 | 41 | assert " ".join(test_flow()) == echo_msg 42 | for record in prefect_task_runs_caplog.records: 43 | if "WORKING" in record.msg: 44 | break # it's in the records 45 | else: 46 | raise AssertionError 47 | 48 | 49 | def test_shell_run_command_stream_level_windows(prefect_task_runs_caplog): 50 | prefect_task_runs_caplog.set_level(logging.WARNING) 51 | echo_msg = "WORKING" 52 | 53 | @flow 54 | def test_flow(): 55 | msg = shell_run_command( 56 | command=f"echo {echo_msg}", 57 | stream_level=logging.WARNING, 58 | return_all=True, 59 | shell="powershell", 60 | ) 61 | return msg 62 | 63 | print(prefect_task_runs_caplog.text) 64 | 65 | assert " ".join(test_flow()) == echo_msg 66 | for record in prefect_task_runs_caplog.records: 67 | if "WORKING" in record.msg: 68 | break # it's in the records 69 | else: 70 | raise AssertionError 71 | 72 | 73 | def test_shell_run_command_helper_command_windows(): 74 | @flow 75 | def test_flow(): 76 | return shell_run_command( 77 | command="Get-Location", 78 | helper_command="cd $env:USERPROFILE", 79 | shell="powershell", 80 | return_all=True, 81 | ) 82 | 83 | assert os.path.expandvars("$USERPROFILE") in test_flow() 84 | 85 | 86 | def test_shell_run_command_cwd(): 87 | @flow 88 | def test_flow(): 89 | return shell_run_command( 90 | command="echo 'work!'; Get-Location", 91 | shell="powershell", 92 | cwd=Path.home(), 93 | return_all=True, 94 | ) 95 | 96 | assert os.fspath(Path.home()) in test_flow() 97 | 98 | 99 | def test_shell_run_command_return_all(): 100 | @flow 101 | def test_flow(): 102 | return shell_run_command( 103 | command="echo 'work!'; echo 'yes!'", return_all=True, shell="powershell" 104 | ) 105 | 106 | result = test_flow() 107 | assert result[0].rstrip() == "work!" 108 | assert result[1].rstrip() == "yes!" 109 | 110 | 111 | def test_shell_run_command_no_output_windows(): 112 | @flow 113 | def test_flow(): 114 | return shell_run_command(command="sleep 1", shell="powershell") 115 | 116 | assert test_flow() == "" 117 | 118 | 119 | def test_shell_run_command_uses_current_env_windows(): 120 | @flow 121 | def test_flow(): 122 | return shell_run_command( 123 | command="echo $env:USERPROFILE", return_all=True, shell="powershell" 124 | ) 125 | 126 | result = test_flow() 127 | assert result[0].rstrip() == os.environ["USERPROFILE"] 128 | 129 | 130 | def test_shell_run_command_update_current_env_windows(): 131 | @flow 132 | def test_flow(): 133 | return shell_run_command( 134 | command="echo $env:USERPROFILE ; echo $env:TEST_VAR", 135 | helper_command="$env:TEST_VAR = 'test value'", 136 | env={"TEST_VAR": "test value"}, 137 | return_all=True, 138 | shell="powershell", 139 | ) 140 | 141 | result = test_flow() 142 | assert os.environ["USERPROFILE"] in " ".join(result) 143 | assert "test value" in result 144 | 145 | 146 | def test_shell_run_command_ensure_suffix_ps1(): 147 | @flow 148 | def test_flow(): 149 | return shell_run_command(command="1 + 1", shell="powershell", extension=".zzz") 150 | 151 | result = test_flow() 152 | assert result == "2" 153 | 154 | 155 | def test_shell_run_command_ensure_tmp_file_removed(): 156 | @flow 157 | def test_flow(): 158 | return shell_run_command( 159 | command="echo 'clean up after yourself!'", shell="powershell" 160 | ) 161 | 162 | test_flow() 163 | temp_dir = os.environ["TEMP"] 164 | assert len(glob.glob(f"{temp_dir}\\prefect-*.ps1")) == 0 165 | 166 | 167 | def test_shell_run_command_throw_exception_on_nonzero_exit_code(): 168 | @flow 169 | def test_flow(): 170 | return shell_run_command( 171 | command="ping ???", shell="powershell" # ping ??? returns exit code 1 172 | ) 173 | 174 | with pytest.raises(RuntimeError, match=r"Command failed with exit code 1"): 175 | test_flow() 176 | 177 | 178 | class AsyncIter: 179 | def __init__(self, items): 180 | self.items = items 181 | 182 | async def __aiter__(self): 183 | for item in self.items: 184 | yield item 185 | 186 | 187 | @pytest.mark.parametrize("shell", [None, "powershell", "powershell.exe"]) 188 | def test_shell_run_command_override_shell(shell, monkeypatch): 189 | open_process_mock = AsyncMock() 190 | stdout_mock = AsyncMock() 191 | stdout_mock.receive.side_effect = lambda: b"received" 192 | open_process_mock.return_value.__aenter__.return_value = AsyncMock( 193 | stdout=stdout_mock 194 | ) 195 | open_process_mock.return_value.__aenter__.return_value.returncode = 0 196 | monkeypatch.setattr("anyio.open_process", open_process_mock) 197 | monkeypatch.setattr("prefect_shell.commands.TextReceiveStream", AsyncIter) 198 | 199 | @flow 200 | def test_flow(): 201 | return shell_run_command( 202 | command="echo 'testing'", 203 | shell=shell, 204 | ) 205 | 206 | test_flow() 207 | assert open_process_mock.call_args_list[0][0][0][0] == shell or "powershell" 208 | 209 | 210 | class TestShellOperation: 211 | async def execute(self, op, method): 212 | if method == "run": 213 | return await op.run() 214 | elif method == "trigger": 215 | proc = await op.trigger() 216 | await proc.wait_for_completion() 217 | return await proc.fetch_result() 218 | 219 | def test_echo(self): 220 | op = ShellOperation(commands=["echo Hello"]) 221 | assert op.run() == ["Hello"] 222 | 223 | @pytest.mark.parametrize("method", ["run", "trigger"]) 224 | async def test_error(self, method): 225 | op = ShellOperation(commands=["throw"]) 226 | with pytest.raises(RuntimeError, match="return code"): 227 | await self.execute(op, method) 228 | 229 | @pytest.mark.parametrize("method", ["run", "trigger"]) 230 | async def test_output(self, prefect_task_runs_caplog, method): 231 | op = ShellOperation(commands=["echo 'testing'"]) 232 | assert await self.execute(op, method) == ["testing"] 233 | records = prefect_task_runs_caplog.records 234 | assert len(records) == 3 235 | assert "triggered with 1 commands running" in records[0].message 236 | assert "testing" in records[1].message 237 | assert "completed with return code 0" in records[2].message 238 | 239 | @pytest.mark.parametrize("method", ["run", "trigger"]) 240 | async def test_current_env(self, method): 241 | op = ShellOperation(commands=["echo $env:USERPROFILE"]) 242 | assert await self.execute(op, method) == [os.environ["USERPROFILE"]] 243 | 244 | @pytest.mark.parametrize("method", ["run", "trigger"]) 245 | async def test_updated_env(self, method): 246 | op = ShellOperation( 247 | commands=["echo $env:TEST_VAR"], env={"TEST_VAR": "test value"} 248 | ) 249 | assert await self.execute(op, method) == ["test value"] 250 | 251 | @pytest.mark.parametrize("method", ["run", "trigger"]) 252 | async def test_cwd(self, method): 253 | op = ShellOperation(commands=["Get-Location"], working_dir=Path.home()) 254 | assert os.fspath(Path.home()) in (await self.execute(op, method)) 255 | 256 | async def test_context_manager(self): 257 | async with ShellOperation(commands=["echo 'testing'"]) as op: 258 | proc = await op.trigger() 259 | await proc.wait_for_completion() 260 | await proc.fetch_result() == ["testing"] 261 | 262 | def test_async_context_manager(self): 263 | with ShellOperation(commands=["echo 'testing'"]) as op: 264 | proc = op.trigger() 265 | proc.wait_for_completion() 266 | proc.fetch_result() == ["testing", ""] 267 | -------------------------------------------------------------------------------- /versioneer.py: -------------------------------------------------------------------------------- 1 | # Version: 0.21 2 | 3 | """The Versioneer - like a rocketeer, but for versions. 4 | 5 | The Versioneer 6 | ============== 7 | 8 | * like a rocketeer, but for versions! 9 | * https://github.com/python-versioneer/python-versioneer 10 | * Brian Warner 11 | * License: Public Domain 12 | * Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3 13 | * [![Latest Version][pypi-image]][pypi-url] 14 | * [![Build Status][travis-image]][travis-url] 15 | 16 | This is a tool for managing a recorded version number in distutils-based 17 | python projects. The goal is to remove the tedious and error-prone "update 18 | the embedded version string" step from your release process. Making a new 19 | release should be as easy as recording a new tag in your version-control 20 | system, and maybe making new tarballs. 21 | 22 | 23 | ## Quick Install 24 | 25 | * `pip install versioneer` to somewhere in your $PATH 26 | * add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md)) 27 | * run `versioneer install` in your source tree, commit the results 28 | * Verify version information with `python setup.py version` 29 | 30 | ## Version Identifiers 31 | 32 | Source trees come from a variety of places: 33 | 34 | * a version-control system checkout (mostly used by developers) 35 | * a nightly tarball, produced by build automation 36 | * a snapshot tarball, produced by a web-based VCS browser, like github's 37 | "tarball from tag" feature 38 | * a release tarball, produced by "setup.py sdist", distributed through PyPI 39 | 40 | Within each source tree, the version identifier (either a string or a number, 41 | this tool is format-agnostic) can come from a variety of places: 42 | 43 | * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows 44 | about recent "tags" and an absolute revision-id 45 | * the name of the directory into which the tarball was unpacked 46 | * an expanded VCS keyword ($Id$, etc) 47 | * a `_version.py` created by some earlier build step 48 | 49 | For released software, the version identifier is closely related to a VCS 50 | tag. Some projects use tag names that include more than just the version 51 | string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool 52 | needs to strip the tag prefix to extract the version identifier. For 53 | unreleased software (between tags), the version identifier should provide 54 | enough information to help developers recreate the same tree, while also 55 | giving them an idea of roughly how old the tree is (after version 1.2, before 56 | version 1.3). Many VCS systems can report a description that captures this, 57 | for example `git describe --tags --dirty --always` reports things like 58 | "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 59 | 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has 60 | uncommitted changes). 61 | 62 | The version identifier is used for multiple purposes: 63 | 64 | * to allow the module to self-identify its version: `myproject.__version__` 65 | * to choose a name and prefix for a 'setup.py sdist' tarball 66 | 67 | ## Theory of Operation 68 | 69 | Versioneer works by adding a special `_version.py` file into your source 70 | tree, where your `__init__.py` can import it. This `_version.py` knows how to 71 | dynamically ask the VCS tool for version information at import time. 72 | 73 | `_version.py` also contains `$Revision$` markers, and the installation 74 | process marks `_version.py` to have this marker rewritten with a tag name 75 | during the `git archive` command. As a result, generated tarballs will 76 | contain enough information to get the proper version. 77 | 78 | To allow `setup.py` to compute a version too, a `versioneer.py` is added to 79 | the top level of your source tree, next to `setup.py` and the `setup.cfg` 80 | that configures it. This overrides several distutils/setuptools commands to 81 | compute the version when invoked, and changes `setup.py build` and `setup.py 82 | sdist` to replace `_version.py` with a small static file that contains just 83 | the generated version data. 84 | 85 | ## Installation 86 | 87 | See [INSTALL.md](./INSTALL.md) for detailed installation instructions. 88 | 89 | ## Version-String Flavors 90 | 91 | Code which uses Versioneer can learn about its version string at runtime by 92 | importing `_version` from your main `__init__.py` file and running the 93 | `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can 94 | import the top-level `versioneer.py` and run `get_versions()`. 95 | 96 | Both functions return a dictionary with different flavors of version 97 | information: 98 | 99 | * `['version']`: A condensed version string, rendered using the selected 100 | style. This is the most commonly used value for the project's version 101 | string. The default "pep440" style yields strings like `0.11`, 102 | `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section 103 | below for alternative styles. 104 | 105 | * `['full-revisionid']`: detailed revision identifier. For Git, this is the 106 | full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". 107 | 108 | * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the 109 | commit date in ISO 8601 format. This will be None if the date is not 110 | available. 111 | 112 | * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that 113 | this is only accurate if run in a VCS checkout, otherwise it is likely to 114 | be False or None 115 | 116 | * `['error']`: if the version string could not be computed, this will be set 117 | to a string describing the problem, otherwise it will be None. It may be 118 | useful to throw an exception in setup.py if this is set, to avoid e.g. 119 | creating tarballs with a version string of "unknown". 120 | 121 | Some variants are more useful than others. Including `full-revisionid` in a 122 | bug report should allow developers to reconstruct the exact code being tested 123 | (or indicate the presence of local changes that should be shared with the 124 | developers). `version` is suitable for display in an "about" box or a CLI 125 | `--version` output: it can be easily compared against release notes and lists 126 | of bugs fixed in various releases. 127 | 128 | The installer adds the following text to your `__init__.py` to place a basic 129 | version in `YOURPROJECT.__version__`: 130 | 131 | from ._version import get_versions 132 | __version__ = get_versions()['version'] 133 | del get_versions 134 | 135 | ## Styles 136 | 137 | The setup.cfg `style=` configuration controls how the VCS information is 138 | rendered into a version string. 139 | 140 | The default style, "pep440", produces a PEP440-compliant string, equal to the 141 | un-prefixed tag name for actual releases, and containing an additional "local 142 | version" section with more detail for in-between builds. For Git, this is 143 | TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags 144 | --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the 145 | tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and 146 | that this commit is two revisions ("+2") beyond the "0.11" tag. For released 147 | software (exactly equal to a known tag), the identifier will only contain the 148 | stripped tag, e.g. "0.11". 149 | 150 | Other styles are available. See [details.md](details.md) in the Versioneer 151 | source tree for descriptions. 152 | 153 | ## Debugging 154 | 155 | Versioneer tries to avoid fatal errors: if something goes wrong, it will tend 156 | to return a version of "0+unknown". To investigate the problem, run `setup.py 157 | version`, which will run the version-lookup code in a verbose mode, and will 158 | display the full contents of `get_versions()` (including the `error` string, 159 | which may help identify what went wrong). 160 | 161 | ## Known Limitations 162 | 163 | Some situations are known to cause problems for Versioneer. This details the 164 | most significant ones. More can be found on Github 165 | [issues page](https://github.com/python-versioneer/python-versioneer/issues). 166 | 167 | ### Subprojects 168 | 169 | Versioneer has limited support for source trees in which `setup.py` is not in 170 | the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are 171 | two common reasons why `setup.py` might not be in the root: 172 | 173 | * Source trees which contain multiple subprojects, such as 174 | [Buildbot](https://github.com/buildbot/buildbot), which contains both 175 | "master" and "slave" subprojects, each with their own `setup.py`, 176 | `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI 177 | distributions (and upload multiple independently-installable tarballs). 178 | * Source trees whose main purpose is to contain a C library, but which also 179 | provide bindings to Python (and perhaps other languages) in subdirectories. 180 | 181 | Versioneer will look for `.git` in parent directories, and most operations 182 | should get the right version string. However `pip` and `setuptools` have bugs 183 | and implementation details which frequently cause `pip install .` from a 184 | subproject directory to fail to find a correct version string (so it usually 185 | defaults to `0+unknown`). 186 | 187 | `pip install --editable .` should work correctly. `setup.py install` might 188 | work too. 189 | 190 | Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in 191 | some later version. 192 | 193 | [Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking 194 | this issue. The discussion in 195 | [PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the 196 | issue from the Versioneer side in more detail. 197 | [pip PR#3176](https://github.com/pypa/pip/pull/3176) and 198 | [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve 199 | pip to let Versioneer work correctly. 200 | 201 | Versioneer-0.16 and earlier only looked for a `.git` directory next to the 202 | `setup.cfg`, so subprojects were completely unsupported with those releases. 203 | 204 | ### Editable installs with setuptools <= 18.5 205 | 206 | `setup.py develop` and `pip install --editable .` allow you to install a 207 | project into a virtualenv once, then continue editing the source code (and 208 | test) without re-installing after every change. 209 | 210 | "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a 211 | convenient way to specify executable scripts that should be installed along 212 | with the python package. 213 | 214 | These both work as expected when using modern setuptools. When using 215 | setuptools-18.5 or earlier, however, certain operations will cause 216 | `pkg_resources.DistributionNotFound` errors when running the entrypoint 217 | script, which must be resolved by re-installing the package. This happens 218 | when the install happens with one version, then the egg_info data is 219 | regenerated while a different version is checked out. Many setup.py commands 220 | cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into 221 | a different virtualenv), so this can be surprising. 222 | 223 | [Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes 224 | this one, but upgrading to a newer version of setuptools should probably 225 | resolve it. 226 | 227 | 228 | ## Updating Versioneer 229 | 230 | To upgrade your project to a new release of Versioneer, do the following: 231 | 232 | * install the new Versioneer (`pip install -U versioneer` or equivalent) 233 | * edit `setup.cfg`, if necessary, to include any new configuration settings 234 | indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. 235 | * re-run `versioneer install` in your source tree, to replace 236 | `SRC/_version.py` 237 | * commit any changed files 238 | 239 | ## Future Directions 240 | 241 | This tool is designed to make it easily extended to other version-control 242 | systems: all VCS-specific components are in separate directories like 243 | src/git/ . The top-level `versioneer.py` script is assembled from these 244 | components by running make-versioneer.py . In the future, make-versioneer.py 245 | will take a VCS name as an argument, and will construct a version of 246 | `versioneer.py` that is specific to the given VCS. It might also take the 247 | configuration arguments that are currently provided manually during 248 | installation by editing setup.py . Alternatively, it might go the other 249 | direction and include code from all supported VCS systems, reducing the 250 | number of intermediate scripts. 251 | 252 | ## Similar projects 253 | 254 | * [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time 255 | dependency 256 | * [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of 257 | versioneer 258 | * [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools 259 | plugin 260 | 261 | ## License 262 | 263 | To make Versioneer easier to embed, all its code is dedicated to the public 264 | domain. The `_version.py` that it creates is also in the public domain. 265 | Specifically, both are released under the Creative Commons "Public Domain 266 | Dedication" license (CC0-1.0), as described in 267 | https://creativecommons.org/publicdomain/zero/1.0/ . 268 | 269 | [pypi-image]: https://img.shields.io/pypi/v/versioneer.svg 270 | [pypi-url]: https://pypi.python.org/pypi/versioneer/ 271 | [travis-image]: 272 | https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg 273 | [travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer 274 | 275 | """ 276 | # pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring 277 | # pylint:disable=missing-class-docstring,too-many-branches,too-many-statements 278 | # pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error 279 | # pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with 280 | # pylint:disable=attribute-defined-outside-init,too-many-arguments 281 | 282 | import configparser 283 | import errno 284 | import json 285 | import os 286 | import re 287 | import subprocess 288 | import sys 289 | from typing import Callable, Dict 290 | 291 | 292 | class VersioneerConfig: 293 | """Container for Versioneer configuration parameters.""" 294 | 295 | 296 | def get_root(): 297 | """Get the project root directory. 298 | 299 | We require that all commands are run from the project root, i.e. the 300 | directory that contains setup.py, setup.cfg, and versioneer.py . 301 | """ 302 | root = os.path.realpath(os.path.abspath(os.getcwd())) 303 | setup_py = os.path.join(root, "setup.py") 304 | versioneer_py = os.path.join(root, "versioneer.py") 305 | if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): 306 | # allow 'python path/to/setup.py COMMAND' 307 | root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) 308 | setup_py = os.path.join(root, "setup.py") 309 | versioneer_py = os.path.join(root, "versioneer.py") 310 | if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): 311 | err = ( 312 | "Versioneer was unable to run the project root directory. " 313 | "Versioneer requires setup.py to be executed from " 314 | "its immediate directory (like 'python setup.py COMMAND'), " 315 | "or in a way that lets it use sys.argv[0] to find the root " 316 | "(like 'python path/to/setup.py COMMAND')." 317 | ) 318 | raise VersioneerBadRootError(err) 319 | try: 320 | # Certain runtime workflows (setup.py install/develop in a setuptools 321 | # tree) execute all dependencies in a single python process, so 322 | # "versioneer" may be imported multiple times, and python's shared 323 | # module-import table will cache the first one. So we can't use 324 | # os.path.dirname(__file__), as that will find whichever 325 | # versioneer.py was first imported, even in later projects. 326 | my_path = os.path.realpath(os.path.abspath(__file__)) 327 | me_dir = os.path.normcase(os.path.splitext(my_path)[0]) 328 | vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) 329 | if me_dir != vsr_dir: 330 | print( 331 | "Warning: build in %s is using versioneer.py from %s" 332 | % (os.path.dirname(my_path), versioneer_py) 333 | ) 334 | except NameError: 335 | pass 336 | return root 337 | 338 | 339 | def get_config_from_root(root): 340 | """Read the project setup.cfg file to determine Versioneer config.""" 341 | # This might raise OSError (if setup.cfg is missing), or 342 | # configparser.NoSectionError (if it lacks a [versioneer] section), or 343 | # configparser.NoOptionError (if it lacks "VCS="). See the docstring at 344 | # the top of versioneer.py for instructions on writing your setup.cfg . 345 | setup_cfg = os.path.join(root, "setup.cfg") 346 | parser = configparser.ConfigParser() 347 | with open(setup_cfg, "r") as cfg_file: 348 | parser.read_file(cfg_file) 349 | VCS = parser.get("versioneer", "VCS") # mandatory 350 | 351 | # Dict-like interface for non-mandatory entries 352 | section = parser["versioneer"] 353 | 354 | cfg = VersioneerConfig() 355 | cfg.VCS = VCS 356 | cfg.style = section.get("style", "") 357 | cfg.versionfile_source = section.get("versionfile_source") 358 | cfg.versionfile_build = section.get("versionfile_build") 359 | cfg.tag_prefix = section.get("tag_prefix") 360 | if cfg.tag_prefix in ("''", '""'): 361 | cfg.tag_prefix = "" 362 | cfg.parentdir_prefix = section.get("parentdir_prefix") 363 | cfg.verbose = section.get("verbose") 364 | return cfg 365 | 366 | 367 | class NotThisMethod(Exception): 368 | """Exception raised if a method is not valid for the current scenario.""" 369 | 370 | 371 | # these dictionaries contain VCS-specific tools 372 | LONG_VERSION_PY: Dict[str, str] = {} 373 | HANDLERS: Dict[str, Dict[str, Callable]] = {} 374 | 375 | 376 | def register_vcs_handler(vcs, method): # decorator 377 | """Create decorator to mark a method as the handler of a VCS.""" 378 | 379 | def decorate(f): 380 | """Store f in HANDLERS[vcs][method].""" 381 | HANDLERS.setdefault(vcs, {})[method] = f 382 | return f 383 | 384 | return decorate 385 | 386 | 387 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): 388 | """Call the given command(s).""" 389 | assert isinstance(commands, list) 390 | process = None 391 | for command in commands: 392 | try: 393 | dispcmd = str([command] + args) 394 | # remember shell=False, so use git.cmd on windows, not just git 395 | process = subprocess.Popen( 396 | [command] + args, 397 | cwd=cwd, 398 | env=env, 399 | stdout=subprocess.PIPE, 400 | stderr=(subprocess.PIPE if hide_stderr else None), 401 | ) 402 | break 403 | except OSError: 404 | e = sys.exc_info()[1] 405 | if e.errno == errno.ENOENT: 406 | continue 407 | if verbose: 408 | print("unable to run %s" % dispcmd) 409 | print(e) 410 | return None, None 411 | else: 412 | if verbose: 413 | print("unable to find command, tried %s" % (commands,)) 414 | return None, None 415 | stdout = process.communicate()[0].strip().decode() 416 | if process.returncode != 0: 417 | if verbose: 418 | print("unable to run %s (error)" % dispcmd) 419 | print("stdout was %s" % stdout) 420 | return None, process.returncode 421 | return stdout, process.returncode 422 | 423 | 424 | LONG_VERSION_PY[ 425 | "git" 426 | ] = r''' 427 | # This file helps to compute a version number in source trees obtained from 428 | # git-archive tarball (such as those provided by githubs download-from-tag 429 | # feature). Distribution tarballs (built by setup.py sdist) and build 430 | # directories (produced by setup.py build) will contain a much shorter file 431 | # that just contains the computed version number. 432 | 433 | # This file is released into the public domain. Generated by 434 | # versioneer-0.21 (https://github.com/python-versioneer/python-versioneer) 435 | 436 | """Git implementation of _version.py.""" 437 | 438 | import errno 439 | import os 440 | import re 441 | import subprocess 442 | import sys 443 | from typing import Callable, Dict 444 | 445 | 446 | def get_keywords(): 447 | """Get the keywords needed to look up the version information.""" 448 | # these strings will be replaced by git during git-archive. 449 | # setup.py/versioneer.py will grep for the variable names, so they must 450 | # each be defined on a line of their own. _version.py will just call 451 | # get_keywords(). 452 | git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" 453 | git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" 454 | git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" 455 | keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} 456 | return keywords 457 | 458 | 459 | class VersioneerConfig: 460 | """Container for Versioneer configuration parameters.""" 461 | 462 | 463 | def get_config(): 464 | """Create, populate and return the VersioneerConfig() object.""" 465 | # these strings are filled in when 'setup.py versioneer' creates 466 | # _version.py 467 | cfg = VersioneerConfig() 468 | cfg.VCS = "git" 469 | cfg.style = "%(STYLE)s" 470 | cfg.tag_prefix = "%(TAG_PREFIX)s" 471 | cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" 472 | cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" 473 | cfg.verbose = False 474 | return cfg 475 | 476 | 477 | class NotThisMethod(Exception): 478 | """Exception raised if a method is not valid for the current scenario.""" 479 | 480 | 481 | LONG_VERSION_PY: Dict[str, str] = {} 482 | HANDLERS: Dict[str, Dict[str, Callable]] = {} 483 | 484 | 485 | def register_vcs_handler(vcs, method): # decorator 486 | """Create decorator to mark a method as the handler of a VCS.""" 487 | def decorate(f): 488 | """Store f in HANDLERS[vcs][method].""" 489 | if vcs not in HANDLERS: 490 | HANDLERS[vcs] = {} 491 | HANDLERS[vcs][method] = f 492 | return f 493 | return decorate 494 | 495 | 496 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, 497 | env=None): 498 | """Call the given command(s).""" 499 | assert isinstance(commands, list) 500 | process = None 501 | for command in commands: 502 | try: 503 | dispcmd = str([command] + args) 504 | # remember shell=False, so use git.cmd on windows, not just git 505 | process = subprocess.Popen([command] + args, cwd=cwd, env=env, 506 | stdout=subprocess.PIPE, 507 | stderr=(subprocess.PIPE if hide_stderr 508 | else None)) 509 | break 510 | except OSError: 511 | e = sys.exc_info()[1] 512 | if e.errno == errno.ENOENT: 513 | continue 514 | if verbose: 515 | print("unable to run %%s" %% dispcmd) 516 | print(e) 517 | return None, None 518 | else: 519 | if verbose: 520 | print("unable to find command, tried %%s" %% (commands,)) 521 | return None, None 522 | stdout = process.communicate()[0].strip().decode() 523 | if process.returncode != 0: 524 | if verbose: 525 | print("unable to run %%s (error)" %% dispcmd) 526 | print("stdout was %%s" %% stdout) 527 | return None, process.returncode 528 | return stdout, process.returncode 529 | 530 | 531 | def versions_from_parentdir(parentdir_prefix, root, verbose): 532 | """Try to determine the version from the parent directory name. 533 | 534 | Source tarballs conventionally unpack into a directory that includes both 535 | the project name and a version string. We will also support searching up 536 | two directory levels for an appropriately named parent directory 537 | """ 538 | rootdirs = [] 539 | 540 | for _ in range(3): 541 | dirname = os.path.basename(root) 542 | if dirname.startswith(parentdir_prefix): 543 | return {"version": dirname[len(parentdir_prefix):], 544 | "full-revisionid": None, 545 | "dirty": False, "error": None, "date": None} 546 | rootdirs.append(root) 547 | root = os.path.dirname(root) # up a level 548 | 549 | if verbose: 550 | print("Tried directories %%s but none started with prefix %%s" %% 551 | (str(rootdirs), parentdir_prefix)) 552 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 553 | 554 | 555 | @register_vcs_handler("git", "get_keywords") 556 | def git_get_keywords(versionfile_abs): 557 | """Extract version information from the given file.""" 558 | # the code embedded in _version.py can just fetch the value of these 559 | # keywords. When used from setup.py, we don't want to import _version.py, 560 | # so we do it with a regexp instead. This function is not used from 561 | # _version.py. 562 | keywords = {} 563 | try: 564 | with open(versionfile_abs, "r") as fobj: 565 | for line in fobj: 566 | if line.strip().startswith("git_refnames ="): 567 | mo = re.search(r'=\s*"(.*)"', line) 568 | if mo: 569 | keywords["refnames"] = mo.group(1) 570 | if line.strip().startswith("git_full ="): 571 | mo = re.search(r'=\s*"(.*)"', line) 572 | if mo: 573 | keywords["full"] = mo.group(1) 574 | if line.strip().startswith("git_date ="): 575 | mo = re.search(r'=\s*"(.*)"', line) 576 | if mo: 577 | keywords["date"] = mo.group(1) 578 | except OSError: 579 | pass 580 | return keywords 581 | 582 | 583 | @register_vcs_handler("git", "keywords") 584 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 585 | """Get version information from git keywords.""" 586 | if "refnames" not in keywords: 587 | raise NotThisMethod("Short version file found") 588 | date = keywords.get("date") 589 | if date is not None: 590 | # Use only the last line. Previous lines may contain GPG signature 591 | # information. 592 | date = date.splitlines()[-1] 593 | 594 | # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant 595 | # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 596 | # -like" string, which we must then edit to make compliant), because 597 | # it's been around since git-1.5.3, and it's too difficult to 598 | # discover which version we're using, or to work around using an 599 | # older one. 600 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 601 | refnames = keywords["refnames"].strip() 602 | if refnames.startswith("$Format"): 603 | if verbose: 604 | print("keywords are unexpanded, not using") 605 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 606 | refs = {r.strip() for r in refnames.strip("()").split(",")} 607 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 608 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 609 | TAG = "tag: " 610 | tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} 611 | if not tags: 612 | # Either we're using git < 1.8.3, or there really are no tags. We use 613 | # a heuristic: assume all version tags have a digit. The old git %%d 614 | # expansion behaves like git log --decorate=short and strips out the 615 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 616 | # between branches and tags. By ignoring refnames without digits, we 617 | # filter out many common branch names like "release" and 618 | # "stabilization", as well as "HEAD" and "master". 619 | tags = {r for r in refs if re.search(r'\d', r)} 620 | if verbose: 621 | print("discarding '%%s', no digits" %% ",".join(refs - tags)) 622 | if verbose: 623 | print("likely tags: %%s" %% ",".join(sorted(tags))) 624 | for ref in sorted(tags): 625 | # sorting will prefer e.g. "2.0" over "2.0rc1" 626 | if ref.startswith(tag_prefix): 627 | r = ref[len(tag_prefix):] 628 | # Filter out refs that exactly match prefix or that don't start 629 | # with a number once the prefix is stripped (mostly a concern 630 | # when prefix is '') 631 | if not re.match(r'\d', r): 632 | continue 633 | if verbose: 634 | print("picking %%s" %% r) 635 | return {"version": r, 636 | "full-revisionid": keywords["full"].strip(), 637 | "dirty": False, "error": None, 638 | "date": date} 639 | # no suitable tags, so version is "0+unknown", but full hex is still there 640 | if verbose: 641 | print("no suitable tags, using unknown + full revision id") 642 | return {"version": "0+unknown", 643 | "full-revisionid": keywords["full"].strip(), 644 | "dirty": False, "error": "no suitable tags", "date": None} 645 | 646 | 647 | @register_vcs_handler("git", "pieces_from_vcs") 648 | def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): 649 | """Get version from 'git describe' in the root of the source tree. 650 | 651 | This only gets called if the git-archive 'subst' keywords were *not* 652 | expanded, and _version.py hasn't already been rewritten with a short 653 | version string, meaning we're inside a checked out source tree. 654 | """ 655 | GITS = ["git"] 656 | TAG_PREFIX_REGEX = "*" 657 | if sys.platform == "win32": 658 | GITS = ["git.cmd", "git.exe"] 659 | TAG_PREFIX_REGEX = r"\*" 660 | 661 | _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, 662 | hide_stderr=True) 663 | if rc != 0: 664 | if verbose: 665 | print("Directory %%s not under git control" %% root) 666 | raise NotThisMethod("'git rev-parse --git-dir' returned error") 667 | 668 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] 669 | # if there isn't one, this yields HEX[-dirty] (no NUM) 670 | describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty", 671 | "--always", "--long", 672 | "--match", 673 | "%%s%%s" %% (tag_prefix, TAG_PREFIX_REGEX)], 674 | cwd=root) 675 | # --long was added in git-1.5.5 676 | if describe_out is None: 677 | raise NotThisMethod("'git describe' failed") 678 | describe_out = describe_out.strip() 679 | full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) 680 | if full_out is None: 681 | raise NotThisMethod("'git rev-parse' failed") 682 | full_out = full_out.strip() 683 | 684 | pieces = {} 685 | pieces["long"] = full_out 686 | pieces["short"] = full_out[:7] # maybe improved later 687 | pieces["error"] = None 688 | 689 | branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], 690 | cwd=root) 691 | # --abbrev-ref was added in git-1.6.3 692 | if rc != 0 or branch_name is None: 693 | raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") 694 | branch_name = branch_name.strip() 695 | 696 | if branch_name == "HEAD": 697 | # If we aren't exactly on a branch, pick a branch which represents 698 | # the current commit. If all else fails, we are on a branchless 699 | # commit. 700 | branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) 701 | # --contains was added in git-1.5.4 702 | if rc != 0 or branches is None: 703 | raise NotThisMethod("'git branch --contains' returned error") 704 | branches = branches.split("\n") 705 | 706 | # Remove the first line if we're running detached 707 | if "(" in branches[0]: 708 | branches.pop(0) 709 | 710 | # Strip off the leading "* " from the list of branches. 711 | branches = [branch[2:] for branch in branches] 712 | if "master" in branches: 713 | branch_name = "master" 714 | elif not branches: 715 | branch_name = None 716 | else: 717 | # Pick the first branch that is returned. Good or bad. 718 | branch_name = branches[0] 719 | 720 | pieces["branch"] = branch_name 721 | 722 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 723 | # TAG might have hyphens. 724 | git_describe = describe_out 725 | 726 | # look for -dirty suffix 727 | dirty = git_describe.endswith("-dirty") 728 | pieces["dirty"] = dirty 729 | if dirty: 730 | git_describe = git_describe[:git_describe.rindex("-dirty")] 731 | 732 | # now we have TAG-NUM-gHEX or HEX 733 | 734 | if "-" in git_describe: 735 | # TAG-NUM-gHEX 736 | mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) 737 | if not mo: 738 | # unparsable. Maybe git-describe is misbehaving? 739 | pieces["error"] = ("unable to parse git-describe output: '%%s'" 740 | %% describe_out) 741 | return pieces 742 | 743 | # tag 744 | full_tag = mo.group(1) 745 | if not full_tag.startswith(tag_prefix): 746 | if verbose: 747 | fmt = "tag '%%s' doesn't start with prefix '%%s'" 748 | print(fmt %% (full_tag, tag_prefix)) 749 | pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" 750 | %% (full_tag, tag_prefix)) 751 | return pieces 752 | pieces["closest-tag"] = full_tag[len(tag_prefix):] 753 | 754 | # distance: number of commits since tag 755 | pieces["distance"] = int(mo.group(2)) 756 | 757 | # commit: short hex revision ID 758 | pieces["short"] = mo.group(3) 759 | 760 | else: 761 | # HEX: no tags 762 | pieces["closest-tag"] = None 763 | count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) 764 | pieces["distance"] = int(count_out) # total number of commits 765 | 766 | # commit date: see ISO-8601 comment in git_versions_from_keywords() 767 | date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() 768 | # Use only the last line. Previous lines may contain GPG signature 769 | # information. 770 | date = date.splitlines()[-1] 771 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 772 | 773 | return pieces 774 | 775 | 776 | def plus_or_dot(pieces): 777 | """Return a + if we don't already have one, else return a .""" 778 | if "+" in pieces.get("closest-tag", ""): 779 | return "." 780 | return "+" 781 | 782 | 783 | def render_pep440(pieces): 784 | """Build up version string, with post-release "local version identifier". 785 | 786 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 787 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 788 | 789 | Exceptions: 790 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 791 | """ 792 | if pieces["closest-tag"]: 793 | rendered = pieces["closest-tag"] 794 | if pieces["distance"] or pieces["dirty"]: 795 | rendered += plus_or_dot(pieces) 796 | rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) 797 | if pieces["dirty"]: 798 | rendered += ".dirty" 799 | else: 800 | # exception #1 801 | rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], 802 | pieces["short"]) 803 | if pieces["dirty"]: 804 | rendered += ".dirty" 805 | return rendered 806 | 807 | 808 | def render_pep440_branch(pieces): 809 | """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . 810 | 811 | The ".dev0" means not master branch. Note that .dev0 sorts backwards 812 | (a feature branch will appear "older" than the master branch). 813 | 814 | Exceptions: 815 | 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] 816 | """ 817 | if pieces["closest-tag"]: 818 | rendered = pieces["closest-tag"] 819 | if pieces["distance"] or pieces["dirty"]: 820 | if pieces["branch"] != "master": 821 | rendered += ".dev0" 822 | rendered += plus_or_dot(pieces) 823 | rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) 824 | if pieces["dirty"]: 825 | rendered += ".dirty" 826 | else: 827 | # exception #1 828 | rendered = "0" 829 | if pieces["branch"] != "master": 830 | rendered += ".dev0" 831 | rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], 832 | pieces["short"]) 833 | if pieces["dirty"]: 834 | rendered += ".dirty" 835 | return rendered 836 | 837 | 838 | def pep440_split_post(ver): 839 | """Split pep440 version string at the post-release segment. 840 | 841 | Returns the release segments before the post-release and the 842 | post-release version number (or -1 if no post-release segment is present). 843 | """ 844 | vc = str.split(ver, ".post") 845 | return vc[0], int(vc[1] or 0) if len(vc) == 2 else None 846 | 847 | 848 | def render_pep440_pre(pieces): 849 | """TAG[.postN.devDISTANCE] -- No -dirty. 850 | 851 | Exceptions: 852 | 1: no tags. 0.post0.devDISTANCE 853 | """ 854 | if pieces["closest-tag"]: 855 | if pieces["distance"]: 856 | # update the post release segment 857 | tag_version, post_version = pep440_split_post(pieces["closest-tag"]) 858 | rendered = tag_version 859 | if post_version is not None: 860 | rendered += ".post%%d.dev%%d" %% (post_version+1, pieces["distance"]) 861 | else: 862 | rendered += ".post0.dev%%d" %% (pieces["distance"]) 863 | else: 864 | # no commits, use the tag as the version 865 | rendered = pieces["closest-tag"] 866 | else: 867 | # exception #1 868 | rendered = "0.post0.dev%%d" %% pieces["distance"] 869 | return rendered 870 | 871 | 872 | def render_pep440_post(pieces): 873 | """TAG[.postDISTANCE[.dev0]+gHEX] . 874 | 875 | The ".dev0" means dirty. Note that .dev0 sorts backwards 876 | (a dirty tree will appear "older" than the corresponding clean one), 877 | but you shouldn't be releasing software with -dirty anyways. 878 | 879 | Exceptions: 880 | 1: no tags. 0.postDISTANCE[.dev0] 881 | """ 882 | if pieces["closest-tag"]: 883 | rendered = pieces["closest-tag"] 884 | if pieces["distance"] or pieces["dirty"]: 885 | rendered += ".post%%d" %% pieces["distance"] 886 | if pieces["dirty"]: 887 | rendered += ".dev0" 888 | rendered += plus_or_dot(pieces) 889 | rendered += "g%%s" %% pieces["short"] 890 | else: 891 | # exception #1 892 | rendered = "0.post%%d" %% pieces["distance"] 893 | if pieces["dirty"]: 894 | rendered += ".dev0" 895 | rendered += "+g%%s" %% pieces["short"] 896 | return rendered 897 | 898 | 899 | def render_pep440_post_branch(pieces): 900 | """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . 901 | 902 | The ".dev0" means not master branch. 903 | 904 | Exceptions: 905 | 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] 906 | """ 907 | if pieces["closest-tag"]: 908 | rendered = pieces["closest-tag"] 909 | if pieces["distance"] or pieces["dirty"]: 910 | rendered += ".post%%d" %% pieces["distance"] 911 | if pieces["branch"] != "master": 912 | rendered += ".dev0" 913 | rendered += plus_or_dot(pieces) 914 | rendered += "g%%s" %% pieces["short"] 915 | if pieces["dirty"]: 916 | rendered += ".dirty" 917 | else: 918 | # exception #1 919 | rendered = "0.post%%d" %% pieces["distance"] 920 | if pieces["branch"] != "master": 921 | rendered += ".dev0" 922 | rendered += "+g%%s" %% pieces["short"] 923 | if pieces["dirty"]: 924 | rendered += ".dirty" 925 | return rendered 926 | 927 | 928 | def render_pep440_old(pieces): 929 | """TAG[.postDISTANCE[.dev0]] . 930 | 931 | The ".dev0" means dirty. 932 | 933 | Exceptions: 934 | 1: no tags. 0.postDISTANCE[.dev0] 935 | """ 936 | if pieces["closest-tag"]: 937 | rendered = pieces["closest-tag"] 938 | if pieces["distance"] or pieces["dirty"]: 939 | rendered += ".post%%d" %% pieces["distance"] 940 | if pieces["dirty"]: 941 | rendered += ".dev0" 942 | else: 943 | # exception #1 944 | rendered = "0.post%%d" %% pieces["distance"] 945 | if pieces["dirty"]: 946 | rendered += ".dev0" 947 | return rendered 948 | 949 | 950 | def render_git_describe(pieces): 951 | """TAG[-DISTANCE-gHEX][-dirty]. 952 | 953 | Like 'git describe --tags --dirty --always'. 954 | 955 | Exceptions: 956 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 957 | """ 958 | if pieces["closest-tag"]: 959 | rendered = pieces["closest-tag"] 960 | if pieces["distance"]: 961 | rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) 962 | else: 963 | # exception #1 964 | rendered = pieces["short"] 965 | if pieces["dirty"]: 966 | rendered += "-dirty" 967 | return rendered 968 | 969 | 970 | def render_git_describe_long(pieces): 971 | """TAG-DISTANCE-gHEX[-dirty]. 972 | 973 | Like 'git describe --tags --dirty --always -long'. 974 | The distance/hash is unconditional. 975 | 976 | Exceptions: 977 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 978 | """ 979 | if pieces["closest-tag"]: 980 | rendered = pieces["closest-tag"] 981 | rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) 982 | else: 983 | # exception #1 984 | rendered = pieces["short"] 985 | if pieces["dirty"]: 986 | rendered += "-dirty" 987 | return rendered 988 | 989 | 990 | def render(pieces, style): 991 | """Render the given version pieces into the requested style.""" 992 | if pieces["error"]: 993 | return {"version": "unknown", 994 | "full-revisionid": pieces.get("long"), 995 | "dirty": None, 996 | "error": pieces["error"], 997 | "date": None} 998 | 999 | if not style or style == "default": 1000 | style = "pep440" # the default 1001 | 1002 | if style == "pep440": 1003 | rendered = render_pep440(pieces) 1004 | elif style == "pep440-branch": 1005 | rendered = render_pep440_branch(pieces) 1006 | elif style == "pep440-pre": 1007 | rendered = render_pep440_pre(pieces) 1008 | elif style == "pep440-post": 1009 | rendered = render_pep440_post(pieces) 1010 | elif style == "pep440-post-branch": 1011 | rendered = render_pep440_post_branch(pieces) 1012 | elif style == "pep440-old": 1013 | rendered = render_pep440_old(pieces) 1014 | elif style == "git-describe": 1015 | rendered = render_git_describe(pieces) 1016 | elif style == "git-describe-long": 1017 | rendered = render_git_describe_long(pieces) 1018 | else: 1019 | raise ValueError("unknown style '%%s'" %% style) 1020 | 1021 | return {"version": rendered, "full-revisionid": pieces["long"], 1022 | "dirty": pieces["dirty"], "error": None, 1023 | "date": pieces.get("date")} 1024 | 1025 | 1026 | def get_versions(): 1027 | """Get version information or return default if unable to do so.""" 1028 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have 1029 | # __file__, we can work backwards from there to the root. Some 1030 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which 1031 | # case we can only use expanded keywords. 1032 | 1033 | cfg = get_config() 1034 | verbose = cfg.verbose 1035 | 1036 | try: 1037 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, 1038 | verbose) 1039 | except NotThisMethod: 1040 | pass 1041 | 1042 | try: 1043 | root = os.path.realpath(__file__) 1044 | # versionfile_source is the relative path from the top of the source 1045 | # tree (where the .git directory might live) to this file. Invert 1046 | # this to find the root from __file__. 1047 | for _ in cfg.versionfile_source.split('/'): 1048 | root = os.path.dirname(root) 1049 | except NameError: 1050 | return {"version": "0+unknown", "full-revisionid": None, 1051 | "dirty": None, 1052 | "error": "unable to find root of source tree", 1053 | "date": None} 1054 | 1055 | try: 1056 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) 1057 | return render(pieces, cfg.style) 1058 | except NotThisMethod: 1059 | pass 1060 | 1061 | try: 1062 | if cfg.parentdir_prefix: 1063 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 1064 | except NotThisMethod: 1065 | pass 1066 | 1067 | return {"version": "0+unknown", "full-revisionid": None, 1068 | "dirty": None, 1069 | "error": "unable to compute version", "date": None} 1070 | ''' 1071 | 1072 | 1073 | @register_vcs_handler("git", "get_keywords") 1074 | def git_get_keywords(versionfile_abs): 1075 | """Extract version information from the given file.""" 1076 | # the code embedded in _version.py can just fetch the value of these 1077 | # keywords. When used from setup.py, we don't want to import _version.py, 1078 | # so we do it with a regexp instead. This function is not used from 1079 | # _version.py. 1080 | keywords = {} 1081 | try: 1082 | with open(versionfile_abs, "r") as fobj: 1083 | for line in fobj: 1084 | if line.strip().startswith("git_refnames ="): 1085 | mo = re.search(r'=\s*"(.*)"', line) 1086 | if mo: 1087 | keywords["refnames"] = mo.group(1) 1088 | if line.strip().startswith("git_full ="): 1089 | mo = re.search(r'=\s*"(.*)"', line) 1090 | if mo: 1091 | keywords["full"] = mo.group(1) 1092 | if line.strip().startswith("git_date ="): 1093 | mo = re.search(r'=\s*"(.*)"', line) 1094 | if mo: 1095 | keywords["date"] = mo.group(1) 1096 | except OSError: 1097 | pass 1098 | return keywords 1099 | 1100 | 1101 | @register_vcs_handler("git", "keywords") 1102 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 1103 | """Get version information from git keywords.""" 1104 | if "refnames" not in keywords: 1105 | raise NotThisMethod("Short version file found") 1106 | date = keywords.get("date") 1107 | if date is not None: 1108 | # Use only the last line. Previous lines may contain GPG signature 1109 | # information. 1110 | date = date.splitlines()[-1] 1111 | 1112 | # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant 1113 | # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 1114 | # -like" string, which we must then edit to make compliant), because 1115 | # it's been around since git-1.5.3, and it's too difficult to 1116 | # discover which version we're using, or to work around using an 1117 | # older one. 1118 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 1119 | refnames = keywords["refnames"].strip() 1120 | if refnames.startswith("$Format"): 1121 | if verbose: 1122 | print("keywords are unexpanded, not using") 1123 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 1124 | refs = {r.strip() for r in refnames.strip("()").split(",")} 1125 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 1126 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 1127 | TAG = "tag: " 1128 | tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} 1129 | if not tags: 1130 | # Either we're using git < 1.8.3, or there really are no tags. We use 1131 | # a heuristic: assume all version tags have a digit. The old git %d 1132 | # expansion behaves like git log --decorate=short and strips out the 1133 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 1134 | # between branches and tags. By ignoring refnames without digits, we 1135 | # filter out many common branch names like "release" and 1136 | # "stabilization", as well as "HEAD" and "master". 1137 | tags = {r for r in refs if re.search(r"\d", r)} 1138 | if verbose: 1139 | print("discarding '%s', no digits" % ",".join(refs - tags)) 1140 | if verbose: 1141 | print("likely tags: %s" % ",".join(sorted(tags))) 1142 | for ref in sorted(tags): 1143 | # sorting will prefer e.g. "2.0" over "2.0rc1" 1144 | if ref.startswith(tag_prefix): 1145 | r = ref[len(tag_prefix) :] 1146 | # Filter out refs that exactly match prefix or that don't start 1147 | # with a number once the prefix is stripped (mostly a concern 1148 | # when prefix is '') 1149 | if not re.match(r"\d", r): 1150 | continue 1151 | if verbose: 1152 | print("picking %s" % r) 1153 | return { 1154 | "version": r, 1155 | "full-revisionid": keywords["full"].strip(), 1156 | "dirty": False, 1157 | "error": None, 1158 | "date": date, 1159 | } 1160 | # no suitable tags, so version is "0+unknown", but full hex is still there 1161 | if verbose: 1162 | print("no suitable tags, using unknown + full revision id") 1163 | return { 1164 | "version": "0+unknown", 1165 | "full-revisionid": keywords["full"].strip(), 1166 | "dirty": False, 1167 | "error": "no suitable tags", 1168 | "date": None, 1169 | } 1170 | 1171 | 1172 | @register_vcs_handler("git", "pieces_from_vcs") 1173 | def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): 1174 | """Get version from 'git describe' in the root of the source tree. 1175 | 1176 | This only gets called if the git-archive 'subst' keywords were *not* 1177 | expanded, and _version.py hasn't already been rewritten with a short 1178 | version string, meaning we're inside a checked out source tree. 1179 | """ 1180 | GITS = ["git"] 1181 | TAG_PREFIX_REGEX = "*" 1182 | if sys.platform == "win32": 1183 | GITS = ["git.cmd", "git.exe"] 1184 | TAG_PREFIX_REGEX = r"\*" 1185 | 1186 | _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) 1187 | if rc != 0: 1188 | if verbose: 1189 | print("Directory %s not under git control" % root) 1190 | raise NotThisMethod("'git rev-parse --git-dir' returned error") 1191 | 1192 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] 1193 | # if there isn't one, this yields HEX[-dirty] (no NUM) 1194 | describe_out, rc = runner( 1195 | GITS, 1196 | [ 1197 | "describe", 1198 | "--tags", 1199 | "--dirty", 1200 | "--always", 1201 | "--long", 1202 | "--match", 1203 | "%s%s" % (tag_prefix, TAG_PREFIX_REGEX), 1204 | ], 1205 | cwd=root, 1206 | ) 1207 | # --long was added in git-1.5.5 1208 | if describe_out is None: 1209 | raise NotThisMethod("'git describe' failed") 1210 | describe_out = describe_out.strip() 1211 | full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) 1212 | if full_out is None: 1213 | raise NotThisMethod("'git rev-parse' failed") 1214 | full_out = full_out.strip() 1215 | 1216 | pieces = {} 1217 | pieces["long"] = full_out 1218 | pieces["short"] = full_out[:7] # maybe improved later 1219 | pieces["error"] = None 1220 | 1221 | branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) 1222 | # --abbrev-ref was added in git-1.6.3 1223 | if rc != 0 or branch_name is None: 1224 | raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") 1225 | branch_name = branch_name.strip() 1226 | 1227 | if branch_name == "HEAD": 1228 | # If we aren't exactly on a branch, pick a branch which represents 1229 | # the current commit. If all else fails, we are on a branchless 1230 | # commit. 1231 | branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) 1232 | # --contains was added in git-1.5.4 1233 | if rc != 0 or branches is None: 1234 | raise NotThisMethod("'git branch --contains' returned error") 1235 | branches = branches.split("\n") 1236 | 1237 | # Remove the first line if we're running detached 1238 | if "(" in branches[0]: 1239 | branches.pop(0) 1240 | 1241 | # Strip off the leading "* " from the list of branches. 1242 | branches = [branch[2:] for branch in branches] 1243 | if "master" in branches: 1244 | branch_name = "master" 1245 | elif not branches: 1246 | branch_name = None 1247 | else: 1248 | # Pick the first branch that is returned. Good or bad. 1249 | branch_name = branches[0] 1250 | 1251 | pieces["branch"] = branch_name 1252 | 1253 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 1254 | # TAG might have hyphens. 1255 | git_describe = describe_out 1256 | 1257 | # look for -dirty suffix 1258 | dirty = git_describe.endswith("-dirty") 1259 | pieces["dirty"] = dirty 1260 | if dirty: 1261 | git_describe = git_describe[: git_describe.rindex("-dirty")] 1262 | 1263 | # now we have TAG-NUM-gHEX or HEX 1264 | 1265 | if "-" in git_describe: 1266 | # TAG-NUM-gHEX 1267 | mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) 1268 | if not mo: 1269 | # unparsable. Maybe git-describe is misbehaving? 1270 | pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out 1271 | return pieces 1272 | 1273 | # tag 1274 | full_tag = mo.group(1) 1275 | if not full_tag.startswith(tag_prefix): 1276 | if verbose: 1277 | fmt = "tag '%s' doesn't start with prefix '%s'" 1278 | print(fmt % (full_tag, tag_prefix)) 1279 | pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( 1280 | full_tag, 1281 | tag_prefix, 1282 | ) 1283 | return pieces 1284 | pieces["closest-tag"] = full_tag[len(tag_prefix) :] 1285 | 1286 | # distance: number of commits since tag 1287 | pieces["distance"] = int(mo.group(2)) 1288 | 1289 | # commit: short hex revision ID 1290 | pieces["short"] = mo.group(3) 1291 | 1292 | else: 1293 | # HEX: no tags 1294 | pieces["closest-tag"] = None 1295 | count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) 1296 | pieces["distance"] = int(count_out) # total number of commits 1297 | 1298 | # commit date: see ISO-8601 comment in git_versions_from_keywords() 1299 | date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() 1300 | # Use only the last line. Previous lines may contain GPG signature 1301 | # information. 1302 | date = date.splitlines()[-1] 1303 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) 1304 | 1305 | return pieces 1306 | 1307 | 1308 | def do_vcs_install(manifest_in, versionfile_source, ipy): 1309 | """Git-specific installation logic for Versioneer. 1310 | 1311 | For Git, this means creating/changing .gitattributes to mark _version.py 1312 | for export-subst keyword substitution. 1313 | """ 1314 | GITS = ["git"] 1315 | if sys.platform == "win32": 1316 | GITS = ["git.cmd", "git.exe"] 1317 | files = [manifest_in, versionfile_source] 1318 | if ipy: 1319 | files.append(ipy) 1320 | try: 1321 | my_path = __file__ 1322 | if my_path.endswith(".pyc") or my_path.endswith(".pyo"): 1323 | my_path = os.path.splitext(my_path)[0] + ".py" 1324 | versioneer_file = os.path.relpath(my_path) 1325 | except NameError: 1326 | versioneer_file = "versioneer.py" 1327 | files.append(versioneer_file) 1328 | present = False 1329 | try: 1330 | with open(".gitattributes", "r") as fobj: 1331 | for line in fobj: 1332 | if line.strip().startswith(versionfile_source): 1333 | if "export-subst" in line.strip().split()[1:]: 1334 | present = True 1335 | break 1336 | except OSError: 1337 | pass 1338 | if not present: 1339 | with open(".gitattributes", "a+") as fobj: 1340 | fobj.write(f"{versionfile_source} export-subst\n") 1341 | files.append(".gitattributes") 1342 | run_command(GITS, ["add", "--"] + files) 1343 | 1344 | 1345 | def versions_from_parentdir(parentdir_prefix, root, verbose): 1346 | """Try to determine the version from the parent directory name. 1347 | 1348 | Source tarballs conventionally unpack into a directory that includes both 1349 | the project name and a version string. We will also support searching up 1350 | two directory levels for an appropriately named parent directory 1351 | """ 1352 | rootdirs = [] 1353 | 1354 | for _ in range(3): 1355 | dirname = os.path.basename(root) 1356 | if dirname.startswith(parentdir_prefix): 1357 | return { 1358 | "version": dirname[len(parentdir_prefix) :], 1359 | "full-revisionid": None, 1360 | "dirty": False, 1361 | "error": None, 1362 | "date": None, 1363 | } 1364 | rootdirs.append(root) 1365 | root = os.path.dirname(root) # up a level 1366 | 1367 | if verbose: 1368 | print( 1369 | "Tried directories %s but none started with prefix %s" 1370 | % (str(rootdirs), parentdir_prefix) 1371 | ) 1372 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 1373 | 1374 | 1375 | SHORT_VERSION_PY = """ 1376 | # This file was generated by 'versioneer.py' (0.21) from 1377 | # revision-control system data, or from the parent directory name of an 1378 | # unpacked source archive. Distribution tarballs contain a pre-generated copy 1379 | # of this file. 1380 | 1381 | import json 1382 | 1383 | version_json = ''' 1384 | %s 1385 | ''' # END VERSION_JSON 1386 | 1387 | 1388 | def get_versions(): 1389 | return json.loads(version_json) 1390 | """ 1391 | 1392 | 1393 | def versions_from_file(filename): 1394 | """Try to determine the version from _version.py if present.""" 1395 | try: 1396 | with open(filename) as f: 1397 | contents = f.read() 1398 | except OSError: 1399 | raise NotThisMethod("unable to read _version.py") 1400 | mo = re.search( 1401 | r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S 1402 | ) 1403 | if not mo: 1404 | mo = re.search( 1405 | r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S 1406 | ) 1407 | if not mo: 1408 | raise NotThisMethod("no version_json in _version.py") 1409 | return json.loads(mo.group(1)) 1410 | 1411 | 1412 | def write_to_version_file(filename, versions): 1413 | """Write the given version number to the given _version.py file.""" 1414 | os.unlink(filename) 1415 | contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) 1416 | with open(filename, "w") as f: 1417 | f.write(SHORT_VERSION_PY % contents) 1418 | 1419 | print("set %s to '%s'" % (filename, versions["version"])) 1420 | 1421 | 1422 | def plus_or_dot(pieces): 1423 | """Return a + if we don't already have one, else return a .""" 1424 | if "+" in pieces.get("closest-tag", ""): 1425 | return "." 1426 | return "+" 1427 | 1428 | 1429 | def render_pep440(pieces): 1430 | """Build up version string, with post-release "local version identifier". 1431 | 1432 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 1433 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 1434 | 1435 | Exceptions: 1436 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 1437 | """ 1438 | if pieces["closest-tag"]: 1439 | rendered = pieces["closest-tag"] 1440 | if pieces["distance"] or pieces["dirty"]: 1441 | rendered += plus_or_dot(pieces) 1442 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 1443 | if pieces["dirty"]: 1444 | rendered += ".dirty" 1445 | else: 1446 | # exception #1 1447 | rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) 1448 | if pieces["dirty"]: 1449 | rendered += ".dirty" 1450 | return rendered 1451 | 1452 | 1453 | def render_pep440_branch(pieces): 1454 | """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . 1455 | 1456 | The ".dev0" means not master branch. Note that .dev0 sorts backwards 1457 | (a feature branch will appear "older" than the master branch). 1458 | 1459 | Exceptions: 1460 | 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] 1461 | """ 1462 | if pieces["closest-tag"]: 1463 | rendered = pieces["closest-tag"] 1464 | if pieces["distance"] or pieces["dirty"]: 1465 | if pieces["branch"] != "master": 1466 | rendered += ".dev0" 1467 | rendered += plus_or_dot(pieces) 1468 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 1469 | if pieces["dirty"]: 1470 | rendered += ".dirty" 1471 | else: 1472 | # exception #1 1473 | rendered = "0" 1474 | if pieces["branch"] != "master": 1475 | rendered += ".dev0" 1476 | rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) 1477 | if pieces["dirty"]: 1478 | rendered += ".dirty" 1479 | return rendered 1480 | 1481 | 1482 | def pep440_split_post(ver): 1483 | """Split pep440 version string at the post-release segment. 1484 | 1485 | Returns the release segments before the post-release and the 1486 | post-release version number (or -1 if no post-release segment is present). 1487 | """ 1488 | vc = str.split(ver, ".post") 1489 | return vc[0], int(vc[1] or 0) if len(vc) == 2 else None 1490 | 1491 | 1492 | def render_pep440_pre(pieces): 1493 | """TAG[.postN.devDISTANCE] -- No -dirty. 1494 | 1495 | Exceptions: 1496 | 1: no tags. 0.post0.devDISTANCE 1497 | """ 1498 | if pieces["closest-tag"]: 1499 | if pieces["distance"]: 1500 | # update the post release segment 1501 | tag_version, post_version = pep440_split_post(pieces["closest-tag"]) 1502 | rendered = tag_version 1503 | if post_version is not None: 1504 | rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) 1505 | else: 1506 | rendered += ".post0.dev%d" % (pieces["distance"]) 1507 | else: 1508 | # no commits, use the tag as the version 1509 | rendered = pieces["closest-tag"] 1510 | else: 1511 | # exception #1 1512 | rendered = "0.post0.dev%d" % pieces["distance"] 1513 | return rendered 1514 | 1515 | 1516 | def render_pep440_post(pieces): 1517 | """TAG[.postDISTANCE[.dev0]+gHEX] . 1518 | 1519 | The ".dev0" means dirty. Note that .dev0 sorts backwards 1520 | (a dirty tree will appear "older" than the corresponding clean one), 1521 | but you shouldn't be releasing software with -dirty anyways. 1522 | 1523 | Exceptions: 1524 | 1: no tags. 0.postDISTANCE[.dev0] 1525 | """ 1526 | if pieces["closest-tag"]: 1527 | rendered = pieces["closest-tag"] 1528 | if pieces["distance"] or pieces["dirty"]: 1529 | rendered += ".post%d" % pieces["distance"] 1530 | if pieces["dirty"]: 1531 | rendered += ".dev0" 1532 | rendered += plus_or_dot(pieces) 1533 | rendered += "g%s" % pieces["short"] 1534 | else: 1535 | # exception #1 1536 | rendered = "0.post%d" % pieces["distance"] 1537 | if pieces["dirty"]: 1538 | rendered += ".dev0" 1539 | rendered += "+g%s" % pieces["short"] 1540 | return rendered 1541 | 1542 | 1543 | def render_pep440_post_branch(pieces): 1544 | """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . 1545 | 1546 | The ".dev0" means not master branch. 1547 | 1548 | Exceptions: 1549 | 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] 1550 | """ 1551 | if pieces["closest-tag"]: 1552 | rendered = pieces["closest-tag"] 1553 | if pieces["distance"] or pieces["dirty"]: 1554 | rendered += ".post%d" % pieces["distance"] 1555 | if pieces["branch"] != "master": 1556 | rendered += ".dev0" 1557 | rendered += plus_or_dot(pieces) 1558 | rendered += "g%s" % pieces["short"] 1559 | if pieces["dirty"]: 1560 | rendered += ".dirty" 1561 | else: 1562 | # exception #1 1563 | rendered = "0.post%d" % pieces["distance"] 1564 | if pieces["branch"] != "master": 1565 | rendered += ".dev0" 1566 | rendered += "+g%s" % pieces["short"] 1567 | if pieces["dirty"]: 1568 | rendered += ".dirty" 1569 | return rendered 1570 | 1571 | 1572 | def render_pep440_old(pieces): 1573 | """TAG[.postDISTANCE[.dev0]] . 1574 | 1575 | The ".dev0" means dirty. 1576 | 1577 | Exceptions: 1578 | 1: no tags. 0.postDISTANCE[.dev0] 1579 | """ 1580 | if pieces["closest-tag"]: 1581 | rendered = pieces["closest-tag"] 1582 | if pieces["distance"] or pieces["dirty"]: 1583 | rendered += ".post%d" % pieces["distance"] 1584 | if pieces["dirty"]: 1585 | rendered += ".dev0" 1586 | else: 1587 | # exception #1 1588 | rendered = "0.post%d" % pieces["distance"] 1589 | if pieces["dirty"]: 1590 | rendered += ".dev0" 1591 | return rendered 1592 | 1593 | 1594 | def render_git_describe(pieces): 1595 | """TAG[-DISTANCE-gHEX][-dirty]. 1596 | 1597 | Like 'git describe --tags --dirty --always'. 1598 | 1599 | Exceptions: 1600 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 1601 | """ 1602 | if pieces["closest-tag"]: 1603 | rendered = pieces["closest-tag"] 1604 | if pieces["distance"]: 1605 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 1606 | else: 1607 | # exception #1 1608 | rendered = pieces["short"] 1609 | if pieces["dirty"]: 1610 | rendered += "-dirty" 1611 | return rendered 1612 | 1613 | 1614 | def render_git_describe_long(pieces): 1615 | """TAG-DISTANCE-gHEX[-dirty]. 1616 | 1617 | Like 'git describe --tags --dirty --always -long'. 1618 | The distance/hash is unconditional. 1619 | 1620 | Exceptions: 1621 | 1: no tags. HEX[-dirty] (note: no 'g' prefix) 1622 | """ 1623 | if pieces["closest-tag"]: 1624 | rendered = pieces["closest-tag"] 1625 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 1626 | else: 1627 | # exception #1 1628 | rendered = pieces["short"] 1629 | if pieces["dirty"]: 1630 | rendered += "-dirty" 1631 | return rendered 1632 | 1633 | 1634 | def render(pieces, style): 1635 | """Render the given version pieces into the requested style.""" 1636 | if pieces["error"]: 1637 | return { 1638 | "version": "unknown", 1639 | "full-revisionid": pieces.get("long"), 1640 | "dirty": None, 1641 | "error": pieces["error"], 1642 | "date": None, 1643 | } 1644 | 1645 | if not style or style == "default": 1646 | style = "pep440" # the default 1647 | 1648 | if style == "pep440": 1649 | rendered = render_pep440(pieces) 1650 | elif style == "pep440-branch": 1651 | rendered = render_pep440_branch(pieces) 1652 | elif style == "pep440-pre": 1653 | rendered = render_pep440_pre(pieces) 1654 | elif style == "pep440-post": 1655 | rendered = render_pep440_post(pieces) 1656 | elif style == "pep440-post-branch": 1657 | rendered = render_pep440_post_branch(pieces) 1658 | elif style == "pep440-old": 1659 | rendered = render_pep440_old(pieces) 1660 | elif style == "git-describe": 1661 | rendered = render_git_describe(pieces) 1662 | elif style == "git-describe-long": 1663 | rendered = render_git_describe_long(pieces) 1664 | else: 1665 | raise ValueError("unknown style '%s'" % style) 1666 | 1667 | return { 1668 | "version": rendered, 1669 | "full-revisionid": pieces["long"], 1670 | "dirty": pieces["dirty"], 1671 | "error": None, 1672 | "date": pieces.get("date"), 1673 | } 1674 | 1675 | 1676 | class VersioneerBadRootError(Exception): 1677 | """The project root directory is unknown or missing key files.""" 1678 | 1679 | 1680 | def get_versions(verbose=False): 1681 | """Get the project version from whatever source is available. 1682 | 1683 | Returns dict with two keys: 'version' and 'full'. 1684 | """ 1685 | if "versioneer" in sys.modules: 1686 | # see the discussion in cmdclass.py:get_cmdclass() 1687 | del sys.modules["versioneer"] 1688 | 1689 | root = get_root() 1690 | cfg = get_config_from_root(root) 1691 | 1692 | assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" 1693 | handlers = HANDLERS.get(cfg.VCS) 1694 | assert handlers, "unrecognized VCS '%s'" % cfg.VCS 1695 | verbose = verbose or cfg.verbose 1696 | assert ( 1697 | cfg.versionfile_source is not None 1698 | ), "please set versioneer.versionfile_source" 1699 | assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" 1700 | 1701 | versionfile_abs = os.path.join(root, cfg.versionfile_source) 1702 | 1703 | # extract version from first of: _version.py, VCS command (e.g. 'git 1704 | # describe'), parentdir. This is meant to work for developers using a 1705 | # source checkout, for users of a tarball created by 'setup.py sdist', 1706 | # and for users of a tarball/zipball created by 'git archive' or github's 1707 | # download-from-tag feature or the equivalent in other VCSes. 1708 | 1709 | get_keywords_f = handlers.get("get_keywords") 1710 | from_keywords_f = handlers.get("keywords") 1711 | if get_keywords_f and from_keywords_f: 1712 | try: 1713 | keywords = get_keywords_f(versionfile_abs) 1714 | ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) 1715 | if verbose: 1716 | print("got version from expanded keyword %s" % ver) 1717 | return ver 1718 | except NotThisMethod: 1719 | pass 1720 | 1721 | try: 1722 | ver = versions_from_file(versionfile_abs) 1723 | if verbose: 1724 | print("got version from file %s %s" % (versionfile_abs, ver)) 1725 | return ver 1726 | except NotThisMethod: 1727 | pass 1728 | 1729 | from_vcs_f = handlers.get("pieces_from_vcs") 1730 | if from_vcs_f: 1731 | try: 1732 | pieces = from_vcs_f(cfg.tag_prefix, root, verbose) 1733 | ver = render(pieces, cfg.style) 1734 | if verbose: 1735 | print("got version from VCS %s" % ver) 1736 | return ver 1737 | except NotThisMethod: 1738 | pass 1739 | 1740 | try: 1741 | if cfg.parentdir_prefix: 1742 | ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 1743 | if verbose: 1744 | print("got version from parentdir %s" % ver) 1745 | return ver 1746 | except NotThisMethod: 1747 | pass 1748 | 1749 | if verbose: 1750 | print("unable to compute version") 1751 | 1752 | return { 1753 | "version": "0+unknown", 1754 | "full-revisionid": None, 1755 | "dirty": None, 1756 | "error": "unable to compute version", 1757 | "date": None, 1758 | } 1759 | 1760 | 1761 | def get_version(): 1762 | """Get the short version string for this project.""" 1763 | return get_versions()["version"] 1764 | 1765 | 1766 | def get_cmdclass(cmdclass=None): 1767 | """Get the custom setuptools/distutils subclasses used by Versioneer. 1768 | 1769 | If the package uses a different cmdclass (e.g. one from numpy), it 1770 | should be provide as an argument. 1771 | """ 1772 | if "versioneer" in sys.modules: 1773 | del sys.modules["versioneer"] 1774 | # this fixes the "python setup.py develop" case (also 'install' and 1775 | # 'easy_install .'), in which subdependencies of the main project are 1776 | # built (using setup.py bdist_egg) in the same python process. Assume 1777 | # a main project A and a dependency B, which use different versions 1778 | # of Versioneer. A's setup.py imports A's Versioneer, leaving it in 1779 | # sys.modules by the time B's setup.py is executed, causing B to run 1780 | # with the wrong versioneer. Setuptools wraps the sub-dep builds in a 1781 | # sandbox that restores sys.modules to it's pre-build state, so the 1782 | # parent is protected against the child's "import versioneer". By 1783 | # removing ourselves from sys.modules here, before the child build 1784 | # happens, we protect the child from the parent's versioneer too. 1785 | # Also see https://github.com/python-versioneer/python-versioneer/issues/52 1786 | 1787 | cmds = {} if cmdclass is None else cmdclass.copy() 1788 | 1789 | # we add "version" to both distutils and setuptools 1790 | from distutils.core import Command 1791 | 1792 | class cmd_version(Command): 1793 | description = "report generated version string" 1794 | user_options = [] 1795 | boolean_options = [] 1796 | 1797 | def initialize_options(self): 1798 | pass 1799 | 1800 | def finalize_options(self): 1801 | pass 1802 | 1803 | def run(self): 1804 | vers = get_versions(verbose=True) 1805 | print("Version: %s" % vers["version"]) 1806 | print(" full-revisionid: %s" % vers.get("full-revisionid")) 1807 | print(" dirty: %s" % vers.get("dirty")) 1808 | print(" date: %s" % vers.get("date")) 1809 | if vers["error"]: 1810 | print(" error: %s" % vers["error"]) 1811 | 1812 | cmds["version"] = cmd_version 1813 | 1814 | # we override "build_py" in both distutils and setuptools 1815 | # 1816 | # most invocation pathways end up running build_py: 1817 | # distutils/build -> build_py 1818 | # distutils/install -> distutils/build ->.. 1819 | # setuptools/bdist_wheel -> distutils/install ->.. 1820 | # setuptools/bdist_egg -> distutils/install_lib -> build_py 1821 | # setuptools/install -> bdist_egg ->.. 1822 | # setuptools/develop -> ? 1823 | # pip install: 1824 | # copies source tree to a tempdir before running egg_info/etc 1825 | # if .git isn't copied too, 'git describe' will fail 1826 | # then does setup.py bdist_wheel, or sometimes setup.py install 1827 | # setup.py egg_info -> ? 1828 | 1829 | # we override different "build_py" commands for both environments 1830 | if "build_py" in cmds: 1831 | _build_py = cmds["build_py"] 1832 | elif "setuptools" in sys.modules: 1833 | from setuptools.command.build_py import build_py as _build_py 1834 | else: 1835 | from distutils.command.build_py import build_py as _build_py 1836 | 1837 | class cmd_build_py(_build_py): 1838 | def run(self): 1839 | root = get_root() 1840 | cfg = get_config_from_root(root) 1841 | versions = get_versions() 1842 | _build_py.run(self) 1843 | # now locate _version.py in the new build/ directory and replace 1844 | # it with an updated value 1845 | if cfg.versionfile_build: 1846 | target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) 1847 | print("UPDATING %s" % target_versionfile) 1848 | write_to_version_file(target_versionfile, versions) 1849 | 1850 | cmds["build_py"] = cmd_build_py 1851 | 1852 | if "build_ext" in cmds: 1853 | _build_ext = cmds["build_ext"] 1854 | elif "setuptools" in sys.modules: 1855 | from setuptools.command.build_ext import build_ext as _build_ext 1856 | else: 1857 | from distutils.command.build_ext import build_ext as _build_ext 1858 | 1859 | class cmd_build_ext(_build_ext): 1860 | def run(self): 1861 | root = get_root() 1862 | cfg = get_config_from_root(root) 1863 | versions = get_versions() 1864 | _build_ext.run(self) 1865 | if self.inplace: 1866 | # build_ext --inplace will only build extensions in 1867 | # build/lib<..> dir with no _version.py to write to. 1868 | # As in place builds will already have a _version.py 1869 | # in the module dir, we do not need to write one. 1870 | return 1871 | # now locate _version.py in the new build/ directory and replace 1872 | # it with an updated value 1873 | target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) 1874 | print("UPDATING %s" % target_versionfile) 1875 | write_to_version_file(target_versionfile, versions) 1876 | 1877 | cmds["build_ext"] = cmd_build_ext 1878 | 1879 | if "cx_Freeze" in sys.modules: # cx_freeze enabled? 1880 | from cx_Freeze.dist import build_exe as _build_exe 1881 | 1882 | # nczeczulin reports that py2exe won't like the pep440-style string 1883 | # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. 1884 | # setup(console=[{ 1885 | # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION 1886 | # "product_version": versioneer.get_version(), 1887 | # ... 1888 | 1889 | class cmd_build_exe(_build_exe): 1890 | def run(self): 1891 | root = get_root() 1892 | cfg = get_config_from_root(root) 1893 | versions = get_versions() 1894 | target_versionfile = cfg.versionfile_source 1895 | print("UPDATING %s" % target_versionfile) 1896 | write_to_version_file(target_versionfile, versions) 1897 | 1898 | _build_exe.run(self) 1899 | os.unlink(target_versionfile) 1900 | with open(cfg.versionfile_source, "w") as f: 1901 | LONG = LONG_VERSION_PY[cfg.VCS] 1902 | f.write( 1903 | LONG 1904 | % { 1905 | "DOLLAR": "$", 1906 | "STYLE": cfg.style, 1907 | "TAG_PREFIX": cfg.tag_prefix, 1908 | "PARENTDIR_PREFIX": cfg.parentdir_prefix, 1909 | "VERSIONFILE_SOURCE": cfg.versionfile_source, 1910 | } 1911 | ) 1912 | 1913 | cmds["build_exe"] = cmd_build_exe 1914 | del cmds["build_py"] 1915 | 1916 | if "py2exe" in sys.modules: # py2exe enabled? 1917 | from py2exe.distutils_buildexe import py2exe as _py2exe 1918 | 1919 | class cmd_py2exe(_py2exe): 1920 | def run(self): 1921 | root = get_root() 1922 | cfg = get_config_from_root(root) 1923 | versions = get_versions() 1924 | target_versionfile = cfg.versionfile_source 1925 | print("UPDATING %s" % target_versionfile) 1926 | write_to_version_file(target_versionfile, versions) 1927 | 1928 | _py2exe.run(self) 1929 | os.unlink(target_versionfile) 1930 | with open(cfg.versionfile_source, "w") as f: 1931 | LONG = LONG_VERSION_PY[cfg.VCS] 1932 | f.write( 1933 | LONG 1934 | % { 1935 | "DOLLAR": "$", 1936 | "STYLE": cfg.style, 1937 | "TAG_PREFIX": cfg.tag_prefix, 1938 | "PARENTDIR_PREFIX": cfg.parentdir_prefix, 1939 | "VERSIONFILE_SOURCE": cfg.versionfile_source, 1940 | } 1941 | ) 1942 | 1943 | cmds["py2exe"] = cmd_py2exe 1944 | 1945 | # we override different "sdist" commands for both environments 1946 | if "sdist" in cmds: 1947 | _sdist = cmds["sdist"] 1948 | elif "setuptools" in sys.modules: 1949 | from setuptools.command.sdist import sdist as _sdist 1950 | else: 1951 | from distutils.command.sdist import sdist as _sdist 1952 | 1953 | class cmd_sdist(_sdist): 1954 | def run(self): 1955 | versions = get_versions() 1956 | self._versioneer_generated_versions = versions 1957 | # unless we update this, the command will keep using the old 1958 | # version 1959 | self.distribution.metadata.version = versions["version"] 1960 | return _sdist.run(self) 1961 | 1962 | def make_release_tree(self, base_dir, files): 1963 | root = get_root() 1964 | cfg = get_config_from_root(root) 1965 | _sdist.make_release_tree(self, base_dir, files) 1966 | # now locate _version.py in the new base_dir directory 1967 | # (remembering that it may be a hardlink) and replace it with an 1968 | # updated value 1969 | target_versionfile = os.path.join(base_dir, cfg.versionfile_source) 1970 | print("UPDATING %s" % target_versionfile) 1971 | write_to_version_file( 1972 | target_versionfile, self._versioneer_generated_versions 1973 | ) 1974 | 1975 | cmds["sdist"] = cmd_sdist 1976 | 1977 | return cmds 1978 | 1979 | 1980 | CONFIG_ERROR = """ 1981 | setup.cfg is missing the necessary Versioneer configuration. You need 1982 | a section like: 1983 | 1984 | [versioneer] 1985 | VCS = git 1986 | style = pep440 1987 | versionfile_source = src/myproject/_version.py 1988 | versionfile_build = myproject/_version.py 1989 | tag_prefix = 1990 | parentdir_prefix = myproject- 1991 | 1992 | You will also need to edit your setup.py to use the results: 1993 | 1994 | import versioneer 1995 | setup(version=versioneer.get_version(), 1996 | cmdclass=versioneer.get_cmdclass(), ...) 1997 | 1998 | Please read the docstring in ./versioneer.py for configuration instructions, 1999 | edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. 2000 | """ 2001 | 2002 | SAMPLE_CONFIG = """ 2003 | # See the docstring in versioneer.py for instructions. Note that you must 2004 | # re-run 'versioneer.py setup' after changing this section, and commit the 2005 | # resulting files. 2006 | 2007 | [versioneer] 2008 | #VCS = git 2009 | #style = pep440 2010 | #versionfile_source = 2011 | #versionfile_build = 2012 | #tag_prefix = 2013 | #parentdir_prefix = 2014 | 2015 | """ 2016 | 2017 | OLD_SNIPPET = """ 2018 | from ._version import get_versions 2019 | __version__ = get_versions()['version'] 2020 | del get_versions 2021 | """ 2022 | 2023 | INIT_PY_SNIPPET = """ 2024 | from . import {0} 2025 | __version__ = {0}.get_versions()['version'] 2026 | """ 2027 | 2028 | 2029 | def do_setup(): 2030 | """Do main VCS-independent setup function for installing Versioneer.""" 2031 | root = get_root() 2032 | try: 2033 | cfg = get_config_from_root(root) 2034 | except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: 2035 | if isinstance(e, (OSError, configparser.NoSectionError)): 2036 | print("Adding sample versioneer config to setup.cfg", file=sys.stderr) 2037 | with open(os.path.join(root, "setup.cfg"), "a") as f: 2038 | f.write(SAMPLE_CONFIG) 2039 | print(CONFIG_ERROR, file=sys.stderr) 2040 | return 1 2041 | 2042 | print(" creating %s" % cfg.versionfile_source) 2043 | with open(cfg.versionfile_source, "w") as f: 2044 | LONG = LONG_VERSION_PY[cfg.VCS] 2045 | f.write( 2046 | LONG 2047 | % { 2048 | "DOLLAR": "$", 2049 | "STYLE": cfg.style, 2050 | "TAG_PREFIX": cfg.tag_prefix, 2051 | "PARENTDIR_PREFIX": cfg.parentdir_prefix, 2052 | "VERSIONFILE_SOURCE": cfg.versionfile_source, 2053 | } 2054 | ) 2055 | 2056 | ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") 2057 | if os.path.exists(ipy): 2058 | try: 2059 | with open(ipy, "r") as f: 2060 | old = f.read() 2061 | except OSError: 2062 | old = "" 2063 | module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] 2064 | snippet = INIT_PY_SNIPPET.format(module) 2065 | if OLD_SNIPPET in old: 2066 | print(" replacing boilerplate in %s" % ipy) 2067 | with open(ipy, "w") as f: 2068 | f.write(old.replace(OLD_SNIPPET, snippet)) 2069 | elif snippet not in old: 2070 | print(" appending to %s" % ipy) 2071 | with open(ipy, "a") as f: 2072 | f.write(snippet) 2073 | else: 2074 | print(" %s unmodified" % ipy) 2075 | else: 2076 | print(" %s doesn't exist, ok" % ipy) 2077 | ipy = None 2078 | 2079 | # Make sure both the top-level "versioneer.py" and versionfile_source 2080 | # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so 2081 | # they'll be copied into source distributions. Pip won't be able to 2082 | # install the package without this. 2083 | manifest_in = os.path.join(root, "MANIFEST.in") 2084 | simple_includes = set() 2085 | try: 2086 | with open(manifest_in, "r") as f: 2087 | for line in f: 2088 | if line.startswith("include "): 2089 | for include in line.split()[1:]: 2090 | simple_includes.add(include) 2091 | except OSError: 2092 | pass 2093 | # That doesn't cover everything MANIFEST.in can do 2094 | # (http://docs.python.org/2/distutils/sourcedist.html#commands), so 2095 | # it might give some false negatives. Appending redundant 'include' 2096 | # lines is safe, though. 2097 | if "versioneer.py" not in simple_includes: 2098 | print(" appending 'versioneer.py' to MANIFEST.in") 2099 | with open(manifest_in, "a") as f: 2100 | f.write("include versioneer.py\n") 2101 | else: 2102 | print(" 'versioneer.py' already in MANIFEST.in") 2103 | if cfg.versionfile_source not in simple_includes: 2104 | print( 2105 | " appending versionfile_source ('%s') to MANIFEST.in" 2106 | % cfg.versionfile_source 2107 | ) 2108 | with open(manifest_in, "a") as f: 2109 | f.write("include %s\n" % cfg.versionfile_source) 2110 | else: 2111 | print(" versionfile_source already in MANIFEST.in") 2112 | 2113 | # Make VCS-specific changes. For git, this means creating/changing 2114 | # .gitattributes to mark _version.py for export-subst keyword 2115 | # substitution. 2116 | do_vcs_install(manifest_in, cfg.versionfile_source, ipy) 2117 | return 0 2118 | 2119 | 2120 | def scan_setup_py(): 2121 | """Validate the contents of setup.py against Versioneer's expectations.""" 2122 | found = set() 2123 | setters = False 2124 | errors = 0 2125 | with open("setup.py", "r") as f: 2126 | for line in f.readlines(): 2127 | if "import versioneer" in line: 2128 | found.add("import") 2129 | if "versioneer.get_cmdclass()" in line: 2130 | found.add("cmdclass") 2131 | if "versioneer.get_version()" in line: 2132 | found.add("get_version") 2133 | if "versioneer.VCS" in line: 2134 | setters = True 2135 | if "versioneer.versionfile_source" in line: 2136 | setters = True 2137 | if len(found) != 3: 2138 | print("") 2139 | print("Your setup.py appears to be missing some important items") 2140 | print("(but I might be wrong). Please make sure it has something") 2141 | print("roughly like the following:") 2142 | print("") 2143 | print(" import versioneer") 2144 | print(" setup( version=versioneer.get_version(),") 2145 | print(" cmdclass=versioneer.get_cmdclass(), ...)") 2146 | print("") 2147 | errors += 1 2148 | if setters: 2149 | print("You should remove lines like 'versioneer.VCS = ' and") 2150 | print("'versioneer.versionfile_source = ' . This configuration") 2151 | print("now lives in setup.cfg, and should be removed from setup.py") 2152 | print("") 2153 | errors += 1 2154 | return errors 2155 | 2156 | 2157 | if __name__ == "__main__": 2158 | cmd = sys.argv[1] 2159 | if cmd == "setup": 2160 | errors = do_setup() 2161 | errors += scan_setup_py() 2162 | if errors: 2163 | sys.exit(1) 2164 | --------------------------------------------------------------------------------