├── .flake8 ├── .github └── workflows │ ├── durabletask-azuremanaged.yml │ └── durabletask.yml ├── .gitignore ├── .vscode ├── launch.json └── settings.json ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── LICENSE ├── Makefile ├── README.md ├── SECURITY.md ├── SUPPORT.md ├── dev-requirements.txt ├── durabletask-azuremanaged ├── __init__.py ├── durabletask │ └── azuremanaged │ │ ├── __init__.py │ │ ├── client.py │ │ ├── internal │ │ ├── access_token_manager.py │ │ ├── durabletask_grpc_interceptor.py │ │ └── py.typed │ │ ├── py.typed │ │ └── worker.py └── pyproject.toml ├── durabletask ├── __init__.py ├── client.py ├── internal │ ├── PROTO_SOURCE_COMMIT_HASH │ ├── grpc_interceptor.py │ ├── helpers.py │ ├── orchestrator_service_pb2.py │ ├── orchestrator_service_pb2.pyi │ ├── orchestrator_service_pb2_grpc.py │ └── shared.py ├── task.py └── worker.py ├── examples ├── README.md ├── activity_sequence.py ├── dts │ ├── README.md │ ├── dts_activity_sequence.py │ ├── dts_fanout_fanin.py │ └── requirements.txt ├── fanout_fanin.py └── human_interaction.py ├── pyproject.toml ├── requirements.txt └── tests ├── __init__.py ├── durabletask-azuremanaged ├── __init__.py ├── test_dts_activity_sequence.py ├── test_dts_orchestration_e2e.py └── test_durabletask_grpc_interceptor.py └── durabletask ├── test_activity_executor.py ├── test_client.py ├── test_concurrency_options.py ├── test_orchestration_e2e.py ├── test_orchestration_executor.py ├── test_worker_concurrency_loop.py └── test_worker_concurrency_loop_async.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E501,C901 3 | exclude = 4 | .git 5 | *_pb2* 6 | __pycache__ -------------------------------------------------------------------------------- /.github/workflows/durabletask-azuremanaged.yml: -------------------------------------------------------------------------------- 1 | name: Durable Task Scheduler SDK (durabletask-azuremanaged) 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | tags: 8 | - "azuremanaged-v*" # Only run for tags starting with "azuremanaged-v" 9 | pull_request: 10 | branches: 11 | - "main" 12 | 13 | jobs: 14 | lint: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Python 3.13 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: 3.13 22 | - name: Install dependencies 23 | working-directory: durabletask-azuremanaged 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install setuptools wheel tox 27 | pip install flake8 28 | - name: Run flake8 Linter 29 | working-directory: durabletask-azuremanaged 30 | run: flake8 . 31 | - name: Run flake8 Linter 32 | working-directory: tests/durabletask-azuremanaged 33 | run: flake8 . 34 | 35 | run-docker-tests: 36 | strategy: 37 | fail-fast: false 38 | matrix: 39 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] 40 | env: 41 | EMULATOR_VERSION: "latest" 42 | needs: lint 43 | runs-on: ubuntu-latest 44 | steps: 45 | - name: Checkout repository 46 | uses: actions/checkout@v4 47 | 48 | - name: Pull Docker image 49 | run: docker pull mcr.microsoft.com/dts/dts-emulator:$EMULATOR_VERSION 50 | 51 | - name: Run Docker container 52 | run: | 53 | docker run --name dtsemulator -d -p 8080:8080 mcr.microsoft.com/dts/dts-emulator:$EMULATOR_VERSION 54 | 55 | - name: Wait for container to be ready 56 | run: sleep 10 # Adjust if your service needs more time to start 57 | 58 | - name: Set environment variables 59 | run: | 60 | echo "TASKHUB=default" >> $GITHUB_ENV 61 | echo "ENDPOINT=http://localhost:8080" >> $GITHUB_ENV 62 | 63 | - name: Install durabletask dependencies 64 | run: | 65 | python -m pip install --upgrade pip 66 | pip install flake8 pytest 67 | pip install -r requirements.txt 68 | 69 | - name: Install durabletask-azuremanaged dependencies 70 | working-directory: examples/dts 71 | run: | 72 | python -m pip install --upgrade pip 73 | pip install -r requirements.txt 74 | 75 | - name: Run the tests 76 | working-directory: tests/durabletask-azuremanaged 77 | run: | 78 | pytest -m "dts" --verbose 79 | 80 | publish: 81 | if: startsWith(github.ref, 'refs/tags/azuremanaged-v') # Only run if a matching tag is pushed 82 | needs: run-docker-tests 83 | runs-on: ubuntu-latest 84 | steps: 85 | - name: Checkout code 86 | uses: actions/checkout@v4 87 | 88 | - name: Extract version from tag 89 | run: echo "VERSION=${GITHUB_REF#refs/tags/azuremanaged-v}" >> $GITHUB_ENV # Extract version from the tag 90 | 91 | - name: Set up Python 92 | uses: actions/setup-python@v5 93 | with: 94 | python-version: "3.13" # Adjust Python version as needed 95 | 96 | - name: Install dependencies 97 | run: | 98 | python -m pip install --upgrade pip 99 | pip install build twine 100 | 101 | - name: Build package from directory durabletask-azuremanaged 102 | working-directory: durabletask-azuremanaged 103 | run: | 104 | python -m build 105 | 106 | - name: Check package 107 | working-directory: durabletask-azuremanaged 108 | run: | 109 | twine check dist/* 110 | 111 | - name: Publish package to PyPI 112 | env: 113 | TWINE_USERNAME: __token__ 114 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN_AZUREMANAGED }} # Store your PyPI API token in GitHub Secrets 115 | working-directory: durabletask-azuremanaged 116 | run: | 117 | twine upload dist/* -------------------------------------------------------------------------------- /.github/workflows/durabletask.yml: -------------------------------------------------------------------------------- 1 | name: Durable Task SDK (durabletask) 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | tags: 8 | - "v*" # Only run for tags starting with "v" 9 | pull_request: 10 | branches: 11 | - "main" 12 | 13 | jobs: 14 | lint-and-unit-tests: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Python 3.13 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: 3.13 22 | - name: Install dependencies 23 | run: | 24 | python -m pip install --upgrade pip 25 | pip install setuptools wheel tox 26 | pip install flake8 27 | - name: Run flake8 Linter 28 | working-directory: durabletask 29 | run: flake8 . 30 | - name: "Run flake8 linter: tests" 31 | working-directory: tests/durabletask 32 | run: flake8 . 33 | - name: "Run flake8 linter: examples" 34 | working-directory: examples 35 | run: flake8 . 36 | 37 | run-tests: 38 | strategy: 39 | fail-fast: false 40 | matrix: 41 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] 42 | needs: lint-and-unit-tests 43 | runs-on: ubuntu-latest 44 | steps: 45 | - name: Checkout repository 46 | uses: actions/checkout@v4 47 | - name: Set up Python ${{ matrix.python-version }} 48 | uses: actions/setup-python@v5 49 | with: 50 | python-version: ${{ matrix.python-version }} 51 | - name: Install durabletask dependencies and the library itself 52 | run: | 53 | python -m pip install --upgrade pip 54 | pip install flake8 pytest 55 | pip install -r requirements.txt 56 | pip install . 57 | - name: Pytest unit tests 58 | working-directory: tests/durabletask 59 | run: | 60 | pytest -m "not e2e and not dts" --verbose 61 | # Sidecar for running e2e tests requires Go SDK 62 | - name: Install Go SDK 63 | uses: actions/setup-go@v5 64 | with: 65 | go-version: 'stable' 66 | # Install and run the durabletask-go sidecar for running e2e tests 67 | - name: Pytest e2e tests 68 | working-directory: tests/durabletask 69 | run: | 70 | go install github.com/microsoft/durabletask-go@main 71 | durabletask-go --port 4001 & 72 | pytest -m "e2e and not dts" --verbose 73 | 74 | publish: 75 | if: startsWith(github.ref, 'refs/tags/v') # Only run if a matching tag is pushed 76 | needs: run-tests 77 | runs-on: ubuntu-latest 78 | steps: 79 | - name: Checkout code 80 | uses: actions/checkout@v4 81 | 82 | - name: Extract version from tag 83 | run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV # Extract version from the tag 84 | 85 | - name: Set up Python 86 | uses: actions/setup-python@v5 87 | with: 88 | python-version: "3.13" # Adjust Python version as needed 89 | 90 | - name: Install dependencies 91 | run: | 92 | python -m pip install --upgrade pip 93 | pip install build twine 94 | 95 | - name: Build package from root directory 96 | run: | 97 | python -m build 98 | 99 | - name: Check package 100 | run: | 101 | twine check dist/* 102 | 103 | - name: Publish package to PyPI 104 | env: 105 | TWINE_USERNAME: __token__ 106 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} # Store your PyPI API token in GitHub Secrets 107 | run: | 108 | twine upload dist/* -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # IDEs 132 | .idea 133 | 134 | coverage.lcov -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Python: Debug Tests", 6 | "type": "python", 7 | "request": "launch", 8 | "program": "${file}", 9 | "cwd": "${fileDirname}", 10 | "purpose": [ 11 | "debug-test" 12 | ], 13 | "env": { 14 | // pytest-cov breaks debugging, so we have to disable it during debug sessions 15 | "PYTEST_ADDOPTS": "--no-cov", 16 | "PYTHONPATH": "${workspaceFolder}" 17 | }, 18 | "console": "integratedTerminal", 19 | "justMyCode": false 20 | } 21 | ] 22 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "[python]": { 3 | "editor.defaultFormatter": "ms-python.autopep8", 4 | "editor.formatOnSave": true, 5 | "editor.codeActionsOnSave": { 6 | "source.organizeImports": "explicit" 7 | }, 8 | "editor.rulers": [ 9 | 119 10 | ], 11 | }, 12 | "autopep8.args": [ 13 | "--max-line-length=119" 14 | ], 15 | "python.analysis.typeCheckingMode": "basic", 16 | "python.testing.pytestArgs": [ 17 | "-v", 18 | "--cov=durabletask/", 19 | "--cov-report=lcov", 20 | "tests/" 21 | ], 22 | "python.testing.unittestEnabled": false, 23 | "python.testing.pytestEnabled": true, 24 | "coverage-gutters.showLineCoverage": true, 25 | "coverage-gutters.coverageFileNames": [ 26 | "coverage.lcov", 27 | "lcov.info", 28 | "cov.xml", 29 | "coverage.xml", 30 | "jacoco.xml", 31 | "coverage.cobertura.xml" 32 | ], 33 | "makefile.configureOnOpen": false 34 | } -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## v0.3.0 9 | 10 | ### New 11 | 12 | - Added `ConcurrencyOptions` class for fine-grained concurrency control with separate limits for activities and orchestrations. The thread pool worker count can also be configured. 13 | 14 | ### Fixed 15 | 16 | - Fixed an issue where a worker could not recover after its connection was interrupted or severed 17 | 18 | ## v0.2.1 19 | 20 | ### New 21 | 22 | - Added `set_custom_status` orchestrator API ([#31](https://github.com/microsoft/durabletask-python/pull/31)) - contributed by [@famarting](https://github.com/famarting) 23 | - Added `purge_orchestration` client API ([#34](https://github.com/microsoft/durabletask-python/pull/34)) - contributed by [@famarting](https://github.com/famarting) 24 | - Added new `durabletask-azuremanaged` package for use with the [Durable Task Scheduler](https://learn.microsoft.com/azure/azure-functions/durable/durable-task-scheduler/durable-task-scheduler) - by [@RyanLettieri](https://github.com/RyanLettieri) 25 | 26 | ### Changes 27 | 28 | - Protos are compiled with gRPC 1.62.3 / protobuf 3.25.X instead of the latest release. This ensures compatibility with a wider range of grpcio versions for better compatibility with other packages / libraries ([#36](https://github.com/microsoft/durabletask-python/pull/36)) - by [@berndverst](https://github.com/berndverst) 29 | - Http and grpc protocols and their secure variants are stripped from the host name parameter if provided. Secure mode is enabled if the protocol provided is https or grpcs ([#38](https://github.com/microsoft/durabletask-python/pull/38) - by [@berndverst)(https://github.com/berndverst) 30 | - Improve ProtoGen by downloading proto file directly instead of using submodule ([#39](https://github.com/microsoft/durabletask-python/pull/39) - by [@berndverst](https://github.com/berndverst) 31 | 32 | ### Updates 33 | 34 | - Updated `durabletask-protobuf` submodule reference to latest 35 | 36 | ## v0.1.1a1 37 | 38 | ### New 39 | 40 | - Add recursive flag in terminate_orchestration to support cascade terminate ([#27](https://github.com/microsoft/durabletask-python/pull/27)) - contributed by [@shivamkm07](https://github.com/shivamkm07) 41 | 42 | ## v0.1.0 43 | 44 | ### New 45 | 46 | - Retry policies for activities and sub-orchestrations ([#11](https://github.com/microsoft/durabletask-python/pull/11)) - contributed by [@DeepanshuA](https://github.com/DeepanshuA) 47 | 48 | ### Fixed 49 | 50 | - Fix try/except in orchestrator functions not being handled correctly ([#21](https://github.com/microsoft/durabletask-python/pull/21)) - by [@cgillum](https://github.com/cgillum) 51 | - Updated `durabletask-protobuf` submodule reference to latest distributed tracing commit - by [@cgillum](https://github.com/cgillum) 52 | 53 | ## v0.1.0a5 54 | 55 | ### New 56 | 57 | - Adds support for secure channels ([#18](https://github.com/microsoft/durabletask-python/pull/18)) - contributed by [@elena-kolevska](https://github.com/elena-kolevska) 58 | 59 | ### Fixed 60 | 61 | - Fix zero argument values sent to activities as None ([#13](https://github.com/microsoft/durabletask-python/pull/13)) - contributed by [@DeepanshuA](https://github.com/DeepanshuA) 62 | 63 | ## v0.1.0a3 64 | 65 | ### New 66 | 67 | - Add gRPC metadata option ([#16](https://github.com/microsoft/durabletask-python/pull/16)) - contributed by [@DeepanshuA](https://github.com/DeepanshuA) 68 | 69 | ### Changes 70 | 71 | - Removed Python 3.7 support due to EOL ([#14](https://github.com/microsoft/durabletask-python/pull/14)) - contributed by [@berndverst](https://github.com/berndverst) 72 | 73 | ## v0.1.0a2 74 | 75 | ### New 76 | 77 | - Continue-as-new ([#9](https://github.com/microsoft/durabletask-python/pull/9)) 78 | - Support for Python 3.7+ ([#10](https://github.com/microsoft/durabletask-python/pull/10)) - contributed by [@DeepanshuA](https://github.com/DeepanshuA) 79 | 80 | ## v0.1.0a1 81 | 82 | Initial release, which includes the following features: 83 | 84 | - Orchestrations and activities 85 | - Durable timers 86 | - Sub-orchestrations 87 | - Suspend, resume, and terminate client operations 88 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | init: 2 | pip3 install -r requirements.txt 3 | 4 | test-unit: 5 | pytest -m "not e2e" --verbose 6 | 7 | test-e2e: 8 | pytest -m e2e --verbose 9 | 10 | install: 11 | python3 -m pip install . 12 | 13 | gen-proto: 14 | curl -o durabletask/internal/orchestrator_service.proto https://raw.githubusercontent.com/microsoft/durabletask-protobuf/refs/heads/main/protos/orchestrator_service.proto 15 | curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/microsoft/durabletask-protobuf/commits?path=protos/orchestrator_service.proto&sha=main&per_page=1" | jq -r '.[0].sha' >> durabletask/internal/PROTO_SOURCE_COMMIT_HASH 16 | python3 -m grpc_tools.protoc --proto_path=. --python_out=. --pyi_out=. --grpc_python_out=. ./durabletask/internal/orchestrator_service.proto 17 | rm durabletask/internal/*.proto 18 | 19 | .PHONY: init test-unit test-e2e gen-proto install 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Durable Task SDK for Python 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) 4 | [![Build Validation](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml/badge.svg)](https://github.com/microsoft/durabletask-python/actions/workflows/pr-validation.yml) 5 | [![PyPI version](https://badge.fury.io/py/durabletask.svg)](https://badge.fury.io/py/durabletask) 6 | 7 | This repo contains a Python SDK for use with the [Azure Durable Task Scheduler](https://techcommunity.microsoft.com/blog/appsonazureblog/announcing-limited-early-access-of-the-durable-task-scheduler-for-azure-durable-/4286526) and the [Durable Task Framework for Go](https://github.com/microsoft/durabletask-go). With this SDK, you can define, schedule, and manage durable orchestrations using ordinary Python code. 8 | 9 | ⚠️ **This SDK is currently under active development and is not yet ready for production use.** ⚠️ 10 | 11 | > Note that this SDK is **not** currently compatible with [Azure Durable Functions](https://docs.microsoft.com/azure/azure-functions/durable/durable-functions-overview). If you are looking for a Python SDK for Azure Durable Functions, please see [this repo](https://github.com/Azure/azure-functions-durable-python). 12 | 13 | ## Supported patterns 14 | 15 | The following orchestration patterns are currently supported. 16 | 17 | ### Function chaining 18 | 19 | An orchestration can chain a sequence of function calls using the following syntax: 20 | 21 | ```python 22 | # simple activity function that returns a greeting 23 | def hello(ctx: task.ActivityContext, name: str) -> str: 24 | return f'Hello {name}!' 25 | 26 | # orchestrator function that sequences the activity calls 27 | def sequence(ctx: task.OrchestrationContext, _): 28 | result1 = yield ctx.call_activity(hello, input='Tokyo') 29 | result2 = yield ctx.call_activity(hello, input='Seattle') 30 | result3 = yield ctx.call_activity(hello, input='London') 31 | 32 | return [result1, result2, result3] 33 | ``` 34 | 35 | You can find the full sample [here](./examples/activity_sequence.py). 36 | 37 | ### Fan-out/fan-in 38 | 39 | An orchestration can fan-out a dynamic number of function calls in parallel and then fan-in the results using the following syntax: 40 | 41 | ```python 42 | # activity function for getting the list of work items 43 | def get_work_items(ctx: task.ActivityContext, _) -> List[str]: 44 | # ... 45 | 46 | # activity function for processing a single work item 47 | def process_work_item(ctx: task.ActivityContext, item: str) -> int: 48 | # ... 49 | 50 | # orchestrator function that fans-out the work items and then fans-in the results 51 | def orchestrator(ctx: task.OrchestrationContext, _): 52 | # the number of work-items is unknown in advance 53 | work_items = yield ctx.call_activity(get_work_items) 54 | 55 | # fan-out: schedule the work items in parallel and wait for all of them to complete 56 | tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] 57 | results = yield task.when_all(tasks) 58 | 59 | # fan-in: summarize and return the results 60 | return {'work_items': work_items, 'results': results, 'total': sum(results)} 61 | ``` 62 | 63 | You can find the full sample [here](./examples/fanout_fanin.py). 64 | 65 | ### Human interaction and durable timers 66 | 67 | An orchestration can wait for a user-defined event, such as a human approval event, before proceding to the next step. In addition, the orchestration can create a timer with an arbitrary duration that triggers some alternate action if the external event hasn't been received: 68 | 69 | ```python 70 | def purchase_order_workflow(ctx: task.OrchestrationContext, order: Order): 71 | """Orchestrator function that represents a purchase order workflow""" 72 | # Orders under $1000 are auto-approved 73 | if order.Cost < 1000: 74 | return "Auto-approved" 75 | 76 | # Orders of $1000 or more require manager approval 77 | yield ctx.call_activity(send_approval_request, input=order) 78 | 79 | # Approvals must be received within 24 hours or they will be canceled. 80 | approval_event = ctx.wait_for_external_event("approval_received") 81 | timeout_event = ctx.create_timer(timedelta(hours=24)) 82 | winner = yield task.when_any([approval_event, timeout_event]) 83 | if winner == timeout_event: 84 | return "Canceled" 85 | 86 | # The order was approved 87 | yield ctx.call_activity(place_order, input=order) 88 | approval_details = approval_event.get_result() 89 | return f"Approved by '{approval_details.approver}'" 90 | ``` 91 | 92 | As an aside, you'll also notice that the example orchestration above works with custom business objects. Support for custom business objects includes support for custom classes, custom data classes, and named tuples. Serialization and deserialization of these objects is handled automatically by the SDK. 93 | 94 | You can find the full sample [here](./examples/human_interaction.py). 95 | 96 | ## Feature overview 97 | 98 | The following features are currently supported: 99 | 100 | ### Orchestrations 101 | 102 | Orchestrations are implemented using ordinary Python functions that take an `OrchestrationContext` as their first parameter. The `OrchestrationContext` provides APIs for starting child orchestrations, scheduling activities, and waiting for external events, among other things. Orchestrations are fault-tolerant and durable, meaning that they can automatically recover from failures and rebuild their local execution state. Orchestrator functions must be deterministic, meaning that they must always produce the same output given the same input. 103 | 104 | ### Activities 105 | 106 | Activities are implemented using ordinary Python functions that take an `ActivityContext` as their first parameter. Activity functions are scheduled by orchestrations and have at-least-once execution guarantees, meaning that they will be executed at least once but may be executed multiple times in the event of a transient failure. Activity functions are where the real "work" of any orchestration is done. 107 | 108 | ### Durable timers 109 | 110 | Orchestrations can schedule durable timers using the `create_timer` API. These timers are durable, meaning that they will survive orchestrator restarts and will fire even if the orchestrator is not actively in memory. Durable timers can be of any duration, from milliseconds to months. 111 | 112 | ### Sub-orchestrations 113 | 114 | Orchestrations can start child orchestrations using the `call_sub_orchestrator` API. Child orchestrations are useful for encapsulating complex logic and for breaking up large orchestrations into smaller, more manageable pieces. 115 | 116 | ### External events 117 | 118 | Orchestrations can wait for external events using the `wait_for_external_event` API. External events are useful for implementing human interaction patterns, such as waiting for a user to approve an order before continuing. 119 | 120 | ### Continue-as-new (TODO) 121 | 122 | Orchestrations can be continued as new using the `continue_as_new` API. This API allows an orchestration to restart itself from scratch, optionally with a new input. 123 | 124 | ### Suspend, resume, and terminate 125 | 126 | Orchestrations can be suspended using the `suspend_orchestration` client API and will remain suspended until resumed using the `resume_orchestration` client API. A suspended orchestration will stop processing new events, but will continue to buffer any that happen to arrive until resumed, ensuring that no data is lost. An orchestration can also be terminated using the `terminate_orchestration` client API. Terminated orchestrations will stop processing new events and will discard any buffered events. 127 | 128 | ### Retry policies (TODO) 129 | 130 | Orchestrations can specify retry policies for activities and sub-orchestrations. These policies control how many times and how frequently an activity or sub-orchestration will be retried in the event of a transient error. 131 | 132 | ## Getting Started 133 | 134 | ### Prerequisites 135 | 136 | - Python 3.9 137 | - A Durable Task-compatible sidecar, like [Dapr Workflow](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/) 138 | 139 | ### Installing the Durable Task Python client SDK 140 | 141 | Installation is currently only supported from source. Ensure pip, setuptools, and wheel are up-to-date. 142 | 143 | ```sh 144 | python3 -m pip install --upgrade pip setuptools wheel 145 | ``` 146 | 147 | To install this package from source, clone this repository and run the following command from the project root: 148 | 149 | ```sh 150 | python3 -m pip install . 151 | ``` 152 | 153 | ### Run the samples 154 | 155 | See the [examples](./examples) directory for a list of sample orchestrations and instructions on how to run them. 156 | 157 | ## Development 158 | 159 | The following is more information about how to develop this project. Note that development commands require that `make` is installed on your local machine. If you're using Windows, you can install `make` using [Chocolatey](https://chocolatey.org/) or use WSL. 160 | 161 | ### Generating protobufs 162 | 163 | ```sh 164 | pip3 install -r dev-requirements.txt 165 | make gen-proto 166 | ``` 167 | 168 | This will download the `orchestrator_service.proto` from the `microsoft/durabletask-protobuf` repo and compile it using `grpcio-tools`. The version of the source proto file that was downloaded can be found in the file `durabletask/internal/PROTO_SOURCE_COMMIT_HASH`. 169 | 170 | ### Running unit tests 171 | 172 | Unit tests can be run using the following command from the project root. Unit tests _don't_ require a sidecar process to be running. 173 | 174 | ```sh 175 | make test-unit 176 | ``` 177 | 178 | ### Running E2E tests 179 | 180 | The E2E (end-to-end) tests require a sidecar process to be running. You can use the Dapr sidecar for this or run a Durable Task test sidecar using the following command: 181 | 182 | ```sh 183 | go install github.com/microsoft/durabletask-go@main 184 | durabletask-go --port 4001 185 | ``` 186 | 187 | To run the E2E tests, run the following command from the project root: 188 | 189 | ```sh 190 | make test-e2e 191 | ``` 192 | 193 | ## Contributing 194 | 195 | This project welcomes contributions and suggestions. Most contributions require you to agree to a 196 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us 197 | the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. 198 | 199 | When you submit a pull request, a CLA bot will automatically determine whether you need to provide 200 | a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions 201 | provided by the bot. You will only need to do this once across all repos using our CLA. 202 | 203 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 204 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or 205 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 206 | 207 | ## Trademarks 208 | 209 | This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft 210 | trademarks or logos is subject to and must follow 211 | [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). 212 | Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. 213 | Any use of third-party trademarks or logos are subject to those third-party's policies. 214 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # TODO: The maintainer of this repo has not yet edited this file 2 | 3 | **REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? 4 | 5 | - **No CSS support:** Fill out this template with information about how to file issues and get help. 6 | - **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps. 7 | - **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide. 8 | 9 | *Then remove this first heading from this SUPPORT.MD file before publishing your repo.* 10 | 11 | # Support 12 | 13 | ## How to file issues and get help 14 | 15 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 16 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 17 | feature request as a new Issue. 18 | 19 | For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE 20 | FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER 21 | CHANNEL. WHERE WILL YOU HELP PEOPLE?**. 22 | 23 | ## Microsoft Support Policy 24 | 25 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above. 26 | -------------------------------------------------------------------------------- /dev-requirements.txt: -------------------------------------------------------------------------------- 1 | grpcio-tools 2 | -------------------------------------------------------------------------------- /durabletask-azuremanaged/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/durabletask-python/a49f1e044817865267e77fff389e3b3fd4d7b29d/durabletask-azuremanaged/__init__.py -------------------------------------------------------------------------------- /durabletask-azuremanaged/durabletask/azuremanaged/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/durabletask-python/a49f1e044817865267e77fff389e3b3fd4d7b29d/durabletask-azuremanaged/durabletask/azuremanaged/__init__.py -------------------------------------------------------------------------------- /durabletask-azuremanaged/durabletask/azuremanaged/client.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | from typing import Optional 5 | 6 | from azure.core.credentials import TokenCredential 7 | 8 | from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import ( 9 | DTSDefaultClientInterceptorImpl, 10 | ) 11 | from durabletask.client import TaskHubGrpcClient 12 | 13 | 14 | # Client class used for Durable Task Scheduler (DTS) 15 | class DurableTaskSchedulerClient(TaskHubGrpcClient): 16 | def __init__(self, *, 17 | host_address: str, 18 | taskhub: str, 19 | token_credential: Optional[TokenCredential], 20 | secure_channel: bool = True): 21 | 22 | if not taskhub: 23 | raise ValueError("Taskhub value cannot be empty. Please provide a value for your taskhub") 24 | 25 | interceptors = [DTSDefaultClientInterceptorImpl(token_credential, taskhub)] 26 | 27 | # We pass in None for the metadata so we don't construct an additional interceptor in the parent class 28 | # Since the parent class doesn't use anything metadata for anything else, we can set it as None 29 | super().__init__( 30 | host_address=host_address, 31 | secure_channel=secure_channel, 32 | metadata=None, 33 | interceptors=interceptors) 34 | -------------------------------------------------------------------------------- /durabletask-azuremanaged/durabletask/azuremanaged/internal/access_token_manager.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | from datetime import datetime, timedelta, timezone 4 | from typing import Optional 5 | 6 | from azure.core.credentials import AccessToken, TokenCredential 7 | 8 | import durabletask.internal.shared as shared 9 | 10 | 11 | # By default, when there's 10minutes left before the token expires, refresh the token 12 | class AccessTokenManager: 13 | 14 | _token: Optional[AccessToken] 15 | 16 | def __init__(self, token_credential: Optional[TokenCredential], refresh_interval_seconds: int = 600): 17 | self._scope = "https://durabletask.io/.default" 18 | self._refresh_interval_seconds = refresh_interval_seconds 19 | self._logger = shared.get_logger("token_manager") 20 | 21 | self._credential = token_credential 22 | 23 | if self._credential is not None: 24 | self._token = self._credential.get_token(self._scope) 25 | self.expiry_time = datetime.fromtimestamp(self._token.expires_on, tz=timezone.utc) 26 | else: 27 | self._token = None 28 | self.expiry_time = None 29 | 30 | def get_access_token(self) -> Optional[AccessToken]: 31 | if self._token is None or self.is_token_expired(): 32 | self.refresh_token() 33 | return self._token 34 | 35 | # Checks if the token is expired, or if it will expire in the next "refresh_interval_seconds" seconds. 36 | # For example, if the token is created to have a lifespan of 2 hours, and the refresh buffer is set to 30 minutes, 37 | # We will grab a new token when there're 30minutes left on the lifespan of the token 38 | def is_token_expired(self) -> bool: 39 | if self.expiry_time is None: 40 | return True 41 | return datetime.now(timezone.utc) >= (self.expiry_time - timedelta(seconds=self._refresh_interval_seconds)) 42 | 43 | def refresh_token(self): 44 | if self._credential is not None: 45 | self._token = self._credential.get_token(self._scope) 46 | 47 | # Convert UNIX timestamp to timezone-aware datetime 48 | self.expiry_time = datetime.fromtimestamp(self._token.expires_on, tz=timezone.utc) 49 | self._logger.debug(f"Token refreshed. Expires at: {self.expiry_time}") 50 | -------------------------------------------------------------------------------- /durabletask-azuremanaged/durabletask/azuremanaged/internal/durabletask_grpc_interceptor.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | from importlib.metadata import version 5 | from typing import Optional 6 | 7 | import grpc 8 | from azure.core.credentials import TokenCredential 9 | 10 | from durabletask.azuremanaged.internal.access_token_manager import AccessTokenManager 11 | from durabletask.internal.grpc_interceptor import ( 12 | DefaultClientInterceptorImpl, 13 | _ClientCallDetails, 14 | ) 15 | 16 | 17 | class DTSDefaultClientInterceptorImpl (DefaultClientInterceptorImpl): 18 | """The class implements a UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, 19 | StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an 20 | interceptor to add additional headers to all calls as needed.""" 21 | 22 | def __init__(self, token_credential: Optional[TokenCredential], taskhub_name: str): 23 | try: 24 | # Get the version of the azuremanaged package 25 | sdk_version = version('durabletask-azuremanaged') 26 | except Exception: 27 | # Fallback if version cannot be determined 28 | sdk_version = "unknown" 29 | user_agent = f"durabletask-python/{sdk_version}" 30 | self._metadata = [ 31 | ("taskhub", taskhub_name), 32 | ("x-user-agent", user_agent)] # 'user-agent' is a reserved header in grpc, so we use 'x-user-agent' instead 33 | super().__init__(self._metadata) 34 | 35 | if token_credential is not None: 36 | self._token_credential = token_credential 37 | self._token_manager = AccessTokenManager(token_credential=self._token_credential) 38 | access_token = self._token_manager.get_access_token() 39 | if access_token is not None: 40 | self._metadata.append(("authorization", f"Bearer {access_token.token}")) 41 | 42 | def _intercept_call( 43 | self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: 44 | """Internal intercept_call implementation which adds metadata to grpc metadata in the RPC 45 | call details.""" 46 | # Refresh the auth token if it is present and needed 47 | if self._metadata is not None: 48 | for i, (key, _) in enumerate(self._metadata): 49 | if key.lower() == "authorization": # Ensure case-insensitive comparison 50 | new_token = self._token_manager.get_access_token() # Get the new token 51 | if new_token is not None: 52 | self._metadata[i] = ("authorization", f"Bearer {new_token.token}") # Update the token 53 | 54 | return super()._intercept_call(client_call_details) 55 | -------------------------------------------------------------------------------- /durabletask-azuremanaged/durabletask/azuremanaged/internal/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/durabletask-python/a49f1e044817865267e77fff389e3b3fd4d7b29d/durabletask-azuremanaged/durabletask/azuremanaged/internal/py.typed -------------------------------------------------------------------------------- /durabletask-azuremanaged/durabletask/azuremanaged/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/durabletask-python/a49f1e044817865267e77fff389e3b3fd4d7b29d/durabletask-azuremanaged/durabletask/azuremanaged/py.typed -------------------------------------------------------------------------------- /durabletask-azuremanaged/durabletask/azuremanaged/worker.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | from typing import Optional 5 | 6 | from azure.core.credentials import TokenCredential 7 | 8 | from durabletask.azuremanaged.internal.durabletask_grpc_interceptor import \ 9 | DTSDefaultClientInterceptorImpl 10 | from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker 11 | 12 | 13 | # Worker class used for Durable Task Scheduler (DTS) 14 | class DurableTaskSchedulerWorker(TaskHubGrpcWorker): 15 | """A worker implementation for Azure Durable Task Scheduler (DTS). 16 | 17 | This class extends TaskHubGrpcWorker to provide integration with Azure's 18 | Durable Task Scheduler service. It handles authentication via Azure credentials 19 | and configures the necessary gRPC interceptors for DTS communication. 20 | 21 | Args: 22 | host_address (str): The gRPC endpoint address of the DTS service. 23 | taskhub (str): The name of the task hub. Cannot be empty. 24 | token_credential (Optional[TokenCredential]): Azure credential for authentication. 25 | If None, anonymous authentication will be used. 26 | secure_channel (bool, optional): Whether to use a secure gRPC channel (TLS). 27 | Defaults to True. 28 | concurrency_options (Optional[ConcurrencyOptions], optional): Configuration 29 | for controlling worker concurrency limits. If None, default concurrency 30 | settings will be used. 31 | 32 | Raises: 33 | ValueError: If taskhub is empty or None. 34 | 35 | Example: 36 | >>> from azure.identity import DefaultAzureCredential 37 | >>> from durabletask.azuremanaged import DurableTaskSchedulerWorker 38 | >>> from durabletask.worker import ConcurrencyOptions 39 | >>> 40 | >>> credential = DefaultAzureCredential() 41 | >>> concurrency = ConcurrencyOptions(max_concurrent_activities=10) 42 | >>> worker = DurableTaskSchedulerWorker( 43 | ... host_address="my-dts-service.azure.com:443", 44 | ... taskhub="my-task-hub", 45 | ... token_credential=credential, 46 | ... concurrency_options=concurrency 47 | ... ) 48 | 49 | Note: 50 | This worker automatically configures DTS-specific gRPC interceptors 51 | for authentication and task hub routing. The parent class metadata 52 | parameter is set to None since authentication is handled by the 53 | DTS interceptor. 54 | """ 55 | def __init__(self, *, 56 | host_address: str, 57 | taskhub: str, 58 | token_credential: Optional[TokenCredential], 59 | secure_channel: bool = True, 60 | concurrency_options: Optional[ConcurrencyOptions] = None): 61 | 62 | if not taskhub: 63 | raise ValueError("The taskhub value cannot be empty.") 64 | 65 | interceptors = [DTSDefaultClientInterceptorImpl(token_credential, taskhub)] 66 | 67 | # We pass in None for the metadata so we don't construct an additional interceptor in the parent class 68 | # Since the parent class doesn't use anything metadata for anything else, we can set it as None 69 | super().__init__( 70 | host_address=host_address, 71 | secure_channel=secure_channel, 72 | metadata=None, 73 | interceptors=interceptors, 74 | concurrency_options=concurrency_options) 75 | -------------------------------------------------------------------------------- /durabletask-azuremanaged/pyproject.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | # For more information on pyproject.toml, see https://peps.python.org/pep-0621/ 5 | 6 | [build-system] 7 | requires = ["setuptools", "wheel"] 8 | build-backend = "setuptools.build_meta" 9 | 10 | [project] 11 | name = "durabletask.azuremanaged" 12 | version = "0.2.0" 13 | description = "Durable Task Python SDK provider implementation for the Azure Durable Task Scheduler" 14 | keywords = [ 15 | "durable", 16 | "task", 17 | "workflow", 18 | "azure" 19 | ] 20 | classifiers = [ 21 | "Development Status :: 3 - Alpha", 22 | "Programming Language :: Python :: 3", 23 | "License :: OSI Approved :: MIT License", 24 | ] 25 | requires-python = ">=3.9" 26 | license = {file = "LICENSE"} 27 | readme = "README.md" 28 | dependencies = [ 29 | "durabletask>=0.3.0", 30 | "azure-identity>=1.19.0" 31 | ] 32 | 33 | [project.urls] 34 | repository = "https://github.com/microsoft/durabletask-python" 35 | changelog = "https://github.com/microsoft/durabletask-python/blob/main/CHANGELOG.md" 36 | 37 | [tool.setuptools.packages.find] 38 | include = ["durabletask.azuremanaged", "durabletask.azuremanaged.*"] 39 | 40 | [tool.pytest.ini_options] 41 | minversion = "6.0" 42 | -------------------------------------------------------------------------------- /durabletask/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | """Durable Task SDK for Python""" 5 | 6 | from durabletask.worker import ConcurrencyOptions 7 | 8 | __all__ = ["ConcurrencyOptions"] 9 | 10 | PACKAGE_NAME = "durabletask" 11 | -------------------------------------------------------------------------------- /durabletask/client.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import logging 5 | import uuid 6 | from dataclasses import dataclass 7 | from datetime import datetime 8 | from enum import Enum 9 | from typing import Any, Optional, Sequence, TypeVar, Union 10 | 11 | import grpc 12 | from google.protobuf import wrappers_pb2 13 | 14 | import durabletask.internal.helpers as helpers 15 | import durabletask.internal.orchestrator_service_pb2 as pb 16 | import durabletask.internal.orchestrator_service_pb2_grpc as stubs 17 | import durabletask.internal.shared as shared 18 | from durabletask import task 19 | from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl 20 | 21 | TInput = TypeVar('TInput') 22 | TOutput = TypeVar('TOutput') 23 | 24 | 25 | class OrchestrationStatus(Enum): 26 | """The status of an orchestration instance.""" 27 | RUNNING = pb.ORCHESTRATION_STATUS_RUNNING 28 | COMPLETED = pb.ORCHESTRATION_STATUS_COMPLETED 29 | FAILED = pb.ORCHESTRATION_STATUS_FAILED 30 | TERMINATED = pb.ORCHESTRATION_STATUS_TERMINATED 31 | CONTINUED_AS_NEW = pb.ORCHESTRATION_STATUS_CONTINUED_AS_NEW 32 | PENDING = pb.ORCHESTRATION_STATUS_PENDING 33 | SUSPENDED = pb.ORCHESTRATION_STATUS_SUSPENDED 34 | 35 | def __str__(self): 36 | return helpers.get_orchestration_status_str(self.value) 37 | 38 | 39 | @dataclass 40 | class OrchestrationState: 41 | instance_id: str 42 | name: str 43 | runtime_status: OrchestrationStatus 44 | created_at: datetime 45 | last_updated_at: datetime 46 | serialized_input: Optional[str] 47 | serialized_output: Optional[str] 48 | serialized_custom_status: Optional[str] 49 | failure_details: Optional[task.FailureDetails] 50 | 51 | def raise_if_failed(self): 52 | if self.failure_details is not None: 53 | raise OrchestrationFailedError( 54 | f"Orchestration '{self.instance_id}' failed: {self.failure_details.message}", 55 | self.failure_details) 56 | 57 | 58 | class OrchestrationFailedError(Exception): 59 | def __init__(self, message: str, failure_details: task.FailureDetails): 60 | super().__init__(message) 61 | self._failure_details = failure_details 62 | 63 | @property 64 | def failure_details(self): 65 | return self._failure_details 66 | 67 | 68 | def new_orchestration_state(instance_id: str, res: pb.GetInstanceResponse) -> Optional[OrchestrationState]: 69 | if not res.exists: 70 | return None 71 | 72 | state = res.orchestrationState 73 | 74 | failure_details = None 75 | if state.failureDetails.errorMessage != '' or state.failureDetails.errorType != '': 76 | failure_details = task.FailureDetails( 77 | state.failureDetails.errorMessage, 78 | state.failureDetails.errorType, 79 | state.failureDetails.stackTrace.value if not helpers.is_empty(state.failureDetails.stackTrace) else None) 80 | 81 | return OrchestrationState( 82 | instance_id, 83 | state.name, 84 | OrchestrationStatus(state.orchestrationStatus), 85 | state.createdTimestamp.ToDatetime(), 86 | state.lastUpdatedTimestamp.ToDatetime(), 87 | state.input.value if not helpers.is_empty(state.input) else None, 88 | state.output.value if not helpers.is_empty(state.output) else None, 89 | state.customStatus.value if not helpers.is_empty(state.customStatus) else None, 90 | failure_details) 91 | 92 | 93 | class TaskHubGrpcClient: 94 | 95 | def __init__(self, *, 96 | host_address: Optional[str] = None, 97 | metadata: Optional[list[tuple[str, str]]] = None, 98 | log_handler: Optional[logging.Handler] = None, 99 | log_formatter: Optional[logging.Formatter] = None, 100 | secure_channel: bool = False, 101 | interceptors: Optional[Sequence[shared.ClientInterceptor]] = None): 102 | 103 | # If the caller provided metadata, we need to create a new interceptor for it and 104 | # add it to the list of interceptors. 105 | if interceptors is not None: 106 | interceptors = list(interceptors) 107 | if metadata is not None: 108 | interceptors.append(DefaultClientInterceptorImpl(metadata)) 109 | elif metadata is not None: 110 | interceptors = [DefaultClientInterceptorImpl(metadata)] 111 | else: 112 | interceptors = None 113 | 114 | channel = shared.get_grpc_channel( 115 | host_address=host_address, 116 | secure_channel=secure_channel, 117 | interceptors=interceptors 118 | ) 119 | self._stub = stubs.TaskHubSidecarServiceStub(channel) 120 | self._logger = shared.get_logger("client", log_handler, log_formatter) 121 | 122 | def schedule_new_orchestration(self, orchestrator: Union[task.Orchestrator[TInput, TOutput], str], *, 123 | input: Optional[TInput] = None, 124 | instance_id: Optional[str] = None, 125 | start_at: Optional[datetime] = None, 126 | reuse_id_policy: Optional[pb.OrchestrationIdReusePolicy] = None) -> str: 127 | 128 | name = orchestrator if isinstance(orchestrator, str) else task.get_name(orchestrator) 129 | 130 | req = pb.CreateInstanceRequest( 131 | name=name, 132 | instanceId=instance_id if instance_id else uuid.uuid4().hex, 133 | input=wrappers_pb2.StringValue(value=shared.to_json(input)) if input is not None else None, 134 | scheduledStartTimestamp=helpers.new_timestamp(start_at) if start_at else None, 135 | version=wrappers_pb2.StringValue(value=""), 136 | orchestrationIdReusePolicy=reuse_id_policy, 137 | ) 138 | 139 | self._logger.info(f"Starting new '{name}' instance with ID = '{req.instanceId}'.") 140 | res: pb.CreateInstanceResponse = self._stub.StartInstance(req) 141 | return res.instanceId 142 | 143 | def get_orchestration_state(self, instance_id: str, *, fetch_payloads: bool = True) -> Optional[OrchestrationState]: 144 | req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) 145 | res: pb.GetInstanceResponse = self._stub.GetInstance(req) 146 | return new_orchestration_state(req.instanceId, res) 147 | 148 | def wait_for_orchestration_start(self, instance_id: str, *, 149 | fetch_payloads: bool = False, 150 | timeout: int = 60) -> Optional[OrchestrationState]: 151 | req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) 152 | try: 153 | self._logger.info(f"Waiting up to {timeout}s for instance '{instance_id}' to start.") 154 | res: pb.GetInstanceResponse = self._stub.WaitForInstanceStart(req, timeout=timeout) 155 | return new_orchestration_state(req.instanceId, res) 156 | except grpc.RpcError as rpc_error: 157 | if rpc_error.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # type: ignore 158 | # Replace gRPC error with the built-in TimeoutError 159 | raise TimeoutError("Timed-out waiting for the orchestration to start") 160 | else: 161 | raise 162 | 163 | def wait_for_orchestration_completion(self, instance_id: str, *, 164 | fetch_payloads: bool = True, 165 | timeout: int = 60) -> Optional[OrchestrationState]: 166 | req = pb.GetInstanceRequest(instanceId=instance_id, getInputsAndOutputs=fetch_payloads) 167 | try: 168 | self._logger.info(f"Waiting {timeout}s for instance '{instance_id}' to complete.") 169 | res: pb.GetInstanceResponse = self._stub.WaitForInstanceCompletion(req, timeout=timeout) 170 | state = new_orchestration_state(req.instanceId, res) 171 | if not state: 172 | return None 173 | 174 | if state.runtime_status == OrchestrationStatus.FAILED and state.failure_details is not None: 175 | details = state.failure_details 176 | self._logger.info(f"Instance '{instance_id}' failed: [{details.error_type}] {details.message}") 177 | elif state.runtime_status == OrchestrationStatus.TERMINATED: 178 | self._logger.info(f"Instance '{instance_id}' was terminated.") 179 | elif state.runtime_status == OrchestrationStatus.COMPLETED: 180 | self._logger.info(f"Instance '{instance_id}' completed.") 181 | 182 | return state 183 | except grpc.RpcError as rpc_error: 184 | if rpc_error.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # type: ignore 185 | # Replace gRPC error with the built-in TimeoutError 186 | raise TimeoutError("Timed-out waiting for the orchestration to complete") 187 | else: 188 | raise 189 | 190 | def raise_orchestration_event(self, instance_id: str, event_name: str, *, 191 | data: Optional[Any] = None): 192 | req = pb.RaiseEventRequest( 193 | instanceId=instance_id, 194 | name=event_name, 195 | input=wrappers_pb2.StringValue(value=shared.to_json(data)) if data else None) 196 | 197 | self._logger.info(f"Raising event '{event_name}' for instance '{instance_id}'.") 198 | self._stub.RaiseEvent(req) 199 | 200 | def terminate_orchestration(self, instance_id: str, *, 201 | output: Optional[Any] = None, 202 | recursive: bool = True): 203 | req = pb.TerminateRequest( 204 | instanceId=instance_id, 205 | output=wrappers_pb2.StringValue(value=shared.to_json(output)) if output else None, 206 | recursive=recursive) 207 | 208 | self._logger.info(f"Terminating instance '{instance_id}'.") 209 | self._stub.TerminateInstance(req) 210 | 211 | def suspend_orchestration(self, instance_id: str): 212 | req = pb.SuspendRequest(instanceId=instance_id) 213 | self._logger.info(f"Suspending instance '{instance_id}'.") 214 | self._stub.SuspendInstance(req) 215 | 216 | def resume_orchestration(self, instance_id: str): 217 | req = pb.ResumeRequest(instanceId=instance_id) 218 | self._logger.info(f"Resuming instance '{instance_id}'.") 219 | self._stub.ResumeInstance(req) 220 | 221 | def purge_orchestration(self, instance_id: str, recursive: bool = True): 222 | req = pb.PurgeInstancesRequest(instanceId=instance_id, recursive=recursive) 223 | self._logger.info(f"Purging instance '{instance_id}'.") 224 | self._stub.PurgeInstances(req) 225 | -------------------------------------------------------------------------------- /durabletask/internal/PROTO_SOURCE_COMMIT_HASH: -------------------------------------------------------------------------------- 1 | 443b333f4f65a438dc9eb4f090560d232afec4b7 2 | -------------------------------------------------------------------------------- /durabletask/internal/grpc_interceptor.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | from collections import namedtuple 5 | 6 | import grpc 7 | 8 | 9 | class _ClientCallDetails( 10 | namedtuple( 11 | '_ClientCallDetails', 12 | ['method', 'timeout', 'metadata', 'credentials', 'wait_for_ready', 'compression']), 13 | grpc.ClientCallDetails): 14 | """This is an implementation of the ClientCallDetails interface needed for interceptors. 15 | This class takes six named values and inherits the ClientCallDetails from grpc package. 16 | This class encloses the values that describe a RPC to be invoked. 17 | """ 18 | pass 19 | 20 | 21 | class DefaultClientInterceptorImpl ( 22 | grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, 23 | grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor): 24 | """The class implements a UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor, 25 | StreamUnaryClientInterceptor and StreamStreamClientInterceptor from grpc to add an 26 | interceptor to add additional headers to all calls as needed.""" 27 | 28 | def __init__(self, metadata: list[tuple[str, str]]): 29 | super().__init__() 30 | self._metadata = metadata 31 | 32 | def _intercept_call( 33 | self, client_call_details: _ClientCallDetails) -> grpc.ClientCallDetails: 34 | """Internal intercept_call implementation which adds metadata to grpc metadata in the RPC 35 | call details.""" 36 | if self._metadata is None: 37 | return client_call_details 38 | 39 | if client_call_details.metadata is not None: 40 | metadata = list(client_call_details.metadata) 41 | else: 42 | metadata = [] 43 | 44 | metadata.extend(self._metadata) 45 | client_call_details = _ClientCallDetails( 46 | client_call_details.method, client_call_details.timeout, metadata, 47 | client_call_details.credentials, client_call_details.wait_for_ready, client_call_details.compression) 48 | 49 | return client_call_details 50 | 51 | def intercept_unary_unary(self, continuation, client_call_details, request): 52 | new_client_call_details = self._intercept_call(client_call_details) 53 | return continuation(new_client_call_details, request) 54 | 55 | def intercept_unary_stream(self, continuation, client_call_details, request): 56 | new_client_call_details = self._intercept_call(client_call_details) 57 | return continuation(new_client_call_details, request) 58 | 59 | def intercept_stream_unary(self, continuation, client_call_details, request): 60 | new_client_call_details = self._intercept_call(client_call_details) 61 | return continuation(new_client_call_details, request) 62 | 63 | def intercept_stream_stream(self, continuation, client_call_details, request): 64 | new_client_call_details = self._intercept_call(client_call_details) 65 | return continuation(new_client_call_details, request) 66 | -------------------------------------------------------------------------------- /durabletask/internal/helpers.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import traceback 5 | from datetime import datetime 6 | from typing import Optional 7 | 8 | from google.protobuf import timestamp_pb2, wrappers_pb2 9 | 10 | import durabletask.internal.orchestrator_service_pb2 as pb 11 | 12 | # TODO: The new_xxx_event methods are only used by test code and should be moved elsewhere 13 | 14 | 15 | def new_orchestrator_started_event(timestamp: Optional[datetime] = None) -> pb.HistoryEvent: 16 | ts = timestamp_pb2.Timestamp() 17 | if timestamp is not None: 18 | ts.FromDatetime(timestamp) 19 | return pb.HistoryEvent(eventId=-1, timestamp=ts, orchestratorStarted=pb.OrchestratorStartedEvent()) 20 | 21 | 22 | def new_execution_started_event(name: str, instance_id: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: 23 | return pb.HistoryEvent( 24 | eventId=-1, 25 | timestamp=timestamp_pb2.Timestamp(), 26 | executionStarted=pb.ExecutionStartedEvent( 27 | name=name, 28 | input=get_string_value(encoded_input), 29 | orchestrationInstance=pb.OrchestrationInstance(instanceId=instance_id))) 30 | 31 | 32 | def new_timer_created_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent: 33 | ts = timestamp_pb2.Timestamp() 34 | ts.FromDatetime(fire_at) 35 | return pb.HistoryEvent( 36 | eventId=timer_id, 37 | timestamp=timestamp_pb2.Timestamp(), 38 | timerCreated=pb.TimerCreatedEvent(fireAt=ts) 39 | ) 40 | 41 | 42 | def new_timer_fired_event(timer_id: int, fire_at: datetime) -> pb.HistoryEvent: 43 | ts = timestamp_pb2.Timestamp() 44 | ts.FromDatetime(fire_at) 45 | return pb.HistoryEvent( 46 | eventId=-1, 47 | timestamp=timestamp_pb2.Timestamp(), 48 | timerFired=pb.TimerFiredEvent(fireAt=ts, timerId=timer_id) 49 | ) 50 | 51 | 52 | def new_task_scheduled_event(event_id: int, name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: 53 | return pb.HistoryEvent( 54 | eventId=event_id, 55 | timestamp=timestamp_pb2.Timestamp(), 56 | taskScheduled=pb.TaskScheduledEvent(name=name, input=get_string_value(encoded_input)) 57 | ) 58 | 59 | 60 | def new_task_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: 61 | return pb.HistoryEvent( 62 | eventId=-1, 63 | timestamp=timestamp_pb2.Timestamp(), 64 | taskCompleted=pb.TaskCompletedEvent(taskScheduledId=event_id, result=get_string_value(encoded_output)) 65 | ) 66 | 67 | 68 | def new_task_failed_event(event_id: int, ex: Exception) -> pb.HistoryEvent: 69 | return pb.HistoryEvent( 70 | eventId=-1, 71 | timestamp=timestamp_pb2.Timestamp(), 72 | taskFailed=pb.TaskFailedEvent(taskScheduledId=event_id, failureDetails=new_failure_details(ex)) 73 | ) 74 | 75 | 76 | def new_sub_orchestration_created_event( 77 | event_id: int, 78 | name: str, 79 | instance_id: str, 80 | encoded_input: Optional[str] = None) -> pb.HistoryEvent: 81 | return pb.HistoryEvent( 82 | eventId=event_id, 83 | timestamp=timestamp_pb2.Timestamp(), 84 | subOrchestrationInstanceCreated=pb.SubOrchestrationInstanceCreatedEvent( 85 | name=name, 86 | input=get_string_value(encoded_input), 87 | instanceId=instance_id) 88 | ) 89 | 90 | 91 | def new_sub_orchestration_completed_event(event_id: int, encoded_output: Optional[str] = None) -> pb.HistoryEvent: 92 | return pb.HistoryEvent( 93 | eventId=-1, 94 | timestamp=timestamp_pb2.Timestamp(), 95 | subOrchestrationInstanceCompleted=pb.SubOrchestrationInstanceCompletedEvent( 96 | result=get_string_value(encoded_output), 97 | taskScheduledId=event_id) 98 | ) 99 | 100 | 101 | def new_sub_orchestration_failed_event(event_id: int, ex: Exception) -> pb.HistoryEvent: 102 | return pb.HistoryEvent( 103 | eventId=-1, 104 | timestamp=timestamp_pb2.Timestamp(), 105 | subOrchestrationInstanceFailed=pb.SubOrchestrationInstanceFailedEvent( 106 | failureDetails=new_failure_details(ex), 107 | taskScheduledId=event_id) 108 | ) 109 | 110 | 111 | def new_failure_details(ex: Exception) -> pb.TaskFailureDetails: 112 | return pb.TaskFailureDetails( 113 | errorType=type(ex).__name__, 114 | errorMessage=str(ex), 115 | stackTrace=wrappers_pb2.StringValue(value=''.join(traceback.format_tb(ex.__traceback__))) 116 | ) 117 | 118 | 119 | def new_event_raised_event(name: str, encoded_input: Optional[str] = None) -> pb.HistoryEvent: 120 | return pb.HistoryEvent( 121 | eventId=-1, 122 | timestamp=timestamp_pb2.Timestamp(), 123 | eventRaised=pb.EventRaisedEvent(name=name, input=get_string_value(encoded_input)) 124 | ) 125 | 126 | 127 | def new_suspend_event() -> pb.HistoryEvent: 128 | return pb.HistoryEvent( 129 | eventId=-1, 130 | timestamp=timestamp_pb2.Timestamp(), 131 | executionSuspended=pb.ExecutionSuspendedEvent() 132 | ) 133 | 134 | 135 | def new_resume_event() -> pb.HistoryEvent: 136 | return pb.HistoryEvent( 137 | eventId=-1, 138 | timestamp=timestamp_pb2.Timestamp(), 139 | executionResumed=pb.ExecutionResumedEvent() 140 | ) 141 | 142 | 143 | def new_terminated_event(*, encoded_output: Optional[str] = None) -> pb.HistoryEvent: 144 | return pb.HistoryEvent( 145 | eventId=-1, 146 | timestamp=timestamp_pb2.Timestamp(), 147 | executionTerminated=pb.ExecutionTerminatedEvent( 148 | input=get_string_value(encoded_output) 149 | ) 150 | ) 151 | 152 | 153 | def get_string_value(val: Optional[str]) -> Optional[wrappers_pb2.StringValue]: 154 | if val is None: 155 | return None 156 | else: 157 | return wrappers_pb2.StringValue(value=val) 158 | 159 | 160 | def new_complete_orchestration_action( 161 | id: int, 162 | status: pb.OrchestrationStatus, 163 | result: Optional[str] = None, 164 | failure_details: Optional[pb.TaskFailureDetails] = None, 165 | carryover_events: Optional[list[pb.HistoryEvent]] = None) -> pb.OrchestratorAction: 166 | completeOrchestrationAction = pb.CompleteOrchestrationAction( 167 | orchestrationStatus=status, 168 | result=get_string_value(result), 169 | failureDetails=failure_details, 170 | carryoverEvents=carryover_events) 171 | 172 | return pb.OrchestratorAction(id=id, completeOrchestration=completeOrchestrationAction) 173 | 174 | 175 | def new_create_timer_action(id: int, fire_at: datetime) -> pb.OrchestratorAction: 176 | timestamp = timestamp_pb2.Timestamp() 177 | timestamp.FromDatetime(fire_at) 178 | return pb.OrchestratorAction(id=id, createTimer=pb.CreateTimerAction(fireAt=timestamp)) 179 | 180 | 181 | def new_schedule_task_action(id: int, name: str, encoded_input: Optional[str]) -> pb.OrchestratorAction: 182 | return pb.OrchestratorAction(id=id, scheduleTask=pb.ScheduleTaskAction( 183 | name=name, 184 | input=get_string_value(encoded_input) 185 | )) 186 | 187 | 188 | def new_timestamp(dt: datetime) -> timestamp_pb2.Timestamp: 189 | ts = timestamp_pb2.Timestamp() 190 | ts.FromDatetime(dt) 191 | return ts 192 | 193 | 194 | def new_create_sub_orchestration_action( 195 | id: int, 196 | name: str, 197 | instance_id: Optional[str], 198 | encoded_input: Optional[str]) -> pb.OrchestratorAction: 199 | return pb.OrchestratorAction(id=id, createSubOrchestration=pb.CreateSubOrchestrationAction( 200 | name=name, 201 | instanceId=instance_id, 202 | input=get_string_value(encoded_input) 203 | )) 204 | 205 | 206 | def is_empty(v: wrappers_pb2.StringValue): 207 | return v is None or v.value == '' 208 | 209 | 210 | def get_orchestration_status_str(status: pb.OrchestrationStatus): 211 | try: 212 | const_name = pb.OrchestrationStatus.Name(status) 213 | if const_name.startswith('ORCHESTRATION_STATUS_'): 214 | return const_name[len('ORCHESTRATION_STATUS_'):] 215 | except Exception: 216 | return "UNKNOWN" 217 | -------------------------------------------------------------------------------- /durabletask/internal/orchestrator_service_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: durabletask/internal/orchestrator_service.proto 4 | # Protobuf Python Version: 4.25.1 5 | """Generated protocol buffer code.""" 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import symbol_database as _symbol_database 9 | from google.protobuf.internal import builder as _builder 10 | # @@protoc_insertion_point(imports) 11 | 12 | _sym_db = _symbol_database.Default() 13 | 14 | 15 | from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 16 | from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 17 | from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 18 | from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 19 | 20 | 21 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/durabletask/internal/orchestrator_service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bgoogle/protobuf/empty.proto\"^\n\x15OrchestrationInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xed\x01\n\x0f\x41\x63tivityRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0e\n\x06taskId\x18\x05 \x01(\x05\x12)\n\x12parentTraceContext\x18\x06 \x01(\x0b\x32\r.TraceContext\"\xaa\x01\n\x10\x41\x63tivityResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06taskId\x18\x02 \x01(\x05\x12,\n\x06result\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x17\n\x0f\x63ompletionToken\x18\x05 \x01(\t\"\xb2\x01\n\x12TaskFailureDetails\x12\x11\n\terrorType\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\t\x12\x30\n\nstackTrace\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x0cinnerFailure\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\x12\x16\n\x0eisNonRetriable\x18\x05 \x01(\x08\"\xbf\x01\n\x12ParentInstanceInfo\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12*\n\x04name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\"i\n\x0cTraceContext\x12\x13\n\x0btraceParent\x18\x01 \x01(\t\x12\x12\n\x06spanID\x18\x02 \x01(\tB\x02\x18\x01\x12\x30\n\ntraceState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x88\x03\n\x15\x45xecutionStartedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x15orchestrationInstance\x18\x04 \x01(\x0b\x32\x16.OrchestrationInstance\x12+\n\x0eparentInstance\x18\x05 \x01(\x0b\x32\x13.ParentInstanceInfo\x12;\n\x17scheduledStartTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x12parentTraceContext\x18\x07 \x01(\x0b\x32\r.TraceContext\x12\x39\n\x13orchestrationSpanID\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xa7\x01\n\x17\x45xecutionCompletedEvent\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x03 \x01(\x0b\x32\x13.TaskFailureDetails\"X\n\x18\x45xecutionTerminatedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x02 \x01(\x08\"\xa9\x01\n\x12TaskScheduledEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x04 \x01(\x0b\x32\r.TraceContext\"[\n\x12TaskCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"W\n\x0fTaskFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"\xcf\x01\n$SubOrchestrationInstanceCreatedEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x12parentTraceContext\x18\x05 \x01(\x0b\x32\r.TraceContext\"o\n&SubOrchestrationInstanceCompletedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"k\n#SubOrchestrationInstanceFailedEvent\x12\x17\n\x0ftaskScheduledId\x18\x01 \x01(\x05\x12+\n\x0e\x66\x61ilureDetails\x18\x02 \x01(\x0b\x32\x13.TaskFailureDetails\"?\n\x11TimerCreatedEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x0fTimerFiredEvent\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07timerId\x18\x02 \x01(\x05\"\x1a\n\x18OrchestratorStartedEvent\"\x1c\n\x1aOrchestratorCompletedEvent\"_\n\x0e\x45ventSentEvent\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"M\n\x10\x45ventRaisedEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x05input\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\":\n\x0cGenericEvent\x12*\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x11HistoryStateEvent\x12/\n\x12orchestrationState\x18\x01 \x01(\x0b\x32\x13.OrchestrationState\"A\n\x12\x43ontinueAsNewEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"F\n\x17\x45xecutionSuspendedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"D\n\x15\x45xecutionResumedEvent\x12+\n\x05input\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x86\t\n\x0cHistoryEvent\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x10\x65xecutionStarted\x18\x03 \x01(\x0b\x32\x16.ExecutionStartedEventH\x00\x12\x36\n\x12\x65xecutionCompleted\x18\x04 \x01(\x0b\x32\x18.ExecutionCompletedEventH\x00\x12\x38\n\x13\x65xecutionTerminated\x18\x05 \x01(\x0b\x32\x19.ExecutionTerminatedEventH\x00\x12,\n\rtaskScheduled\x18\x06 \x01(\x0b\x32\x13.TaskScheduledEventH\x00\x12,\n\rtaskCompleted\x18\x07 \x01(\x0b\x32\x13.TaskCompletedEventH\x00\x12&\n\ntaskFailed\x18\x08 \x01(\x0b\x32\x10.TaskFailedEventH\x00\x12P\n\x1fsubOrchestrationInstanceCreated\x18\t \x01(\x0b\x32%.SubOrchestrationInstanceCreatedEventH\x00\x12T\n!subOrchestrationInstanceCompleted\x18\n \x01(\x0b\x32\'.SubOrchestrationInstanceCompletedEventH\x00\x12N\n\x1esubOrchestrationInstanceFailed\x18\x0b \x01(\x0b\x32$.SubOrchestrationInstanceFailedEventH\x00\x12*\n\x0ctimerCreated\x18\x0c \x01(\x0b\x32\x12.TimerCreatedEventH\x00\x12&\n\ntimerFired\x18\r \x01(\x0b\x32\x10.TimerFiredEventH\x00\x12\x38\n\x13orchestratorStarted\x18\x0e \x01(\x0b\x32\x19.OrchestratorStartedEventH\x00\x12<\n\x15orchestratorCompleted\x18\x0f \x01(\x0b\x32\x1b.OrchestratorCompletedEventH\x00\x12$\n\teventSent\x18\x10 \x01(\x0b\x32\x0f.EventSentEventH\x00\x12(\n\x0b\x65ventRaised\x18\x11 \x01(\x0b\x32\x11.EventRaisedEventH\x00\x12%\n\x0cgenericEvent\x18\x12 \x01(\x0b\x32\r.GenericEventH\x00\x12*\n\x0chistoryState\x18\x13 \x01(\x0b\x32\x12.HistoryStateEventH\x00\x12,\n\rcontinueAsNew\x18\x14 \x01(\x0b\x32\x13.ContinueAsNewEventH\x00\x12\x36\n\x12\x65xecutionSuspended\x18\x15 \x01(\x0b\x32\x18.ExecutionSuspendedEventH\x00\x12\x32\n\x10\x65xecutionResumed\x18\x16 \x01(\x0b\x32\x16.ExecutionResumedEventH\x00\x42\x0b\n\teventType\"~\n\x12ScheduleTaskAction\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x07version\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9c\x01\n\x1c\x43reateSubOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"?\n\x11\x43reateTimerAction\x12*\n\x06\x66ireAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"u\n\x0fSendEventAction\x12(\n\x08instance\x18\x01 \x01(\x0b\x32\x16.OrchestrationInstance\x12\x0c\n\x04name\x18\x02 \x01(\t\x12*\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb4\x02\n\x1b\x43ompleteOrchestrationAction\x12\x31\n\x13orchestrationStatus\x18\x01 \x01(\x0e\x32\x14.OrchestrationStatus\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x64\x65tails\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nnewVersion\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12&\n\x0f\x63\x61rryoverEvents\x18\x05 \x03(\x0b\x32\r.HistoryEvent\x12+\n\x0e\x66\x61ilureDetails\x18\x06 \x01(\x0b\x32\x13.TaskFailureDetails\"q\n\x1cTerminateOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x0f\n\x07recurse\x18\x03 \x01(\x08\"\xfa\x02\n\x12OrchestratorAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12+\n\x0cscheduleTask\x18\x02 \x01(\x0b\x32\x13.ScheduleTaskActionH\x00\x12?\n\x16\x63reateSubOrchestration\x18\x03 \x01(\x0b\x32\x1d.CreateSubOrchestrationActionH\x00\x12)\n\x0b\x63reateTimer\x18\x04 \x01(\x0b\x32\x12.CreateTimerActionH\x00\x12%\n\tsendEvent\x18\x05 \x01(\x0b\x32\x10.SendEventActionH\x00\x12=\n\x15\x63ompleteOrchestration\x18\x06 \x01(\x0b\x32\x1c.CompleteOrchestrationActionH\x00\x12?\n\x16terminateOrchestration\x18\x07 \x01(\x0b\x32\x1d.TerminateOrchestrationActionH\x00\x42\x18\n\x16orchestratorActionType\"\xda\x01\n\x13OrchestratorRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65xecutionId\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12!\n\npastEvents\x18\x03 \x03(\x0b\x32\r.HistoryEvent\x12 \n\tnewEvents\x18\x04 \x03(\x0b\x32\r.HistoryEvent\x12\x37\n\x10\x65ntityParameters\x18\x05 \x01(\x0b\x32\x1d.OrchestratorEntityParameters\"\x9d\x01\n\x14OrchestratorResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12$\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x13.OrchestratorAction\x12\x32\n\x0c\x63ustomStatus\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x17\n\x0f\x63ompletionToken\x18\x04 \x01(\t\"\xa3\x03\n\x15\x43reateInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1aorchestrationIdReusePolicy\x18\x06 \x01(\x0b\x32\x1b.OrchestrationIdReusePolicy\x12\x31\n\x0b\x65xecutionId\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x04tags\x18\x08 \x03(\x0b\x32 .CreateInstanceRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"w\n\x1aOrchestrationIdReusePolicy\x12-\n\x0foperationStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12*\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x1a.CreateOrchestrationAction\",\n\x16\x43reateInstanceResponse\x12\x12\n\ninstanceId\x18\x01 \x01(\t\"E\n\x12GetInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1b\n\x13getInputsAndOutputs\x18\x02 \x01(\x08\"V\n\x13GetInstanceResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12/\n\x12orchestrationState\x18\x02 \x01(\x0b\x32\x13.OrchestrationState\"Y\n\x15RewindInstanceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x18\n\x16RewindInstanceResponse\"\xa4\x05\n\x12OrchestrationState\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x13orchestrationStatus\x18\x04 \x01(\x0e\x32\x14.OrchestrationStatus\x12;\n\x17scheduledStartTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x34\n\x10\x63reatedTimestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x14lastUpdatedTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x05input\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06output\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63ustomStatus\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x0b \x01(\x0b\x32\x13.TaskFailureDetails\x12\x31\n\x0b\x65xecutionId\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x12\x63ompletedTimestamp\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x10parentInstanceId\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\"b\n\x11RaiseEventRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x14\n\x12RaiseEventResponse\"g\n\x10TerminateRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06output\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trecursive\x18\x03 \x01(\x08\"\x13\n\x11TerminateResponse\"R\n\x0eSuspendRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x11\n\x0fSuspendResponse\"Q\n\rResumeRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12,\n\x06reason\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x10\n\x0eResumeResponse\"6\n\x15QueryInstancesRequest\x12\x1d\n\x05query\x18\x01 \x01(\x0b\x32\x0e.InstanceQuery\"\x82\x03\n\rInstanceQuery\x12+\n\rruntimeStatus\x18\x01 \x03(\x0e\x32\x14.OrchestrationStatus\x12\x33\n\x0f\x63reatedTimeFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0ctaskHubNames\x18\x04 \x03(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10maxInstanceCount\x18\x05 \x01(\x05\x12\x37\n\x11\x63ontinuationToken\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10instanceIdPrefix\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1d\n\x15\x66\x65tchInputsAndOutputs\x18\x08 \x01(\x08\"\x82\x01\n\x16QueryInstancesResponse\x12/\n\x12orchestrationState\x18\x01 \x03(\x0b\x32\x13.OrchestrationState\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x80\x01\n\x15PurgeInstancesRequest\x12\x14\n\ninstanceId\x18\x01 \x01(\tH\x00\x12\x33\n\x13purgeInstanceFilter\x18\x02 \x01(\x0b\x32\x14.PurgeInstanceFilterH\x00\x12\x11\n\trecursive\x18\x03 \x01(\x08\x42\t\n\x07request\"\xaa\x01\n\x13PurgeInstanceFilter\x12\x33\n\x0f\x63reatedTimeFrom\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rcreatedTimeTo\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\rruntimeStatus\x18\x03 \x03(\x0e\x32\x14.OrchestrationStatus\"6\n\x16PurgeInstancesResponse\x12\x1c\n\x14\x64\x65letedInstanceCount\x18\x01 \x01(\x05\"0\n\x14\x43reateTaskHubRequest\x12\x18\n\x10recreateIfExists\x18\x01 \x01(\x08\"\x17\n\x15\x43reateTaskHubResponse\"\x16\n\x14\x44\x65leteTaskHubRequest\"\x17\n\x15\x44\x65leteTaskHubResponse\"\xaa\x01\n\x13SignalEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\trequestId\x18\x04 \x01(\t\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x16\n\x14SignalEntityResponse\"<\n\x10GetEntityRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x14\n\x0cincludeState\x18\x02 \x01(\x08\"D\n\x11GetEntityResponse\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\x12\x1f\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x0f.EntityMetadata\"\xcb\x02\n\x0b\x45ntityQuery\x12:\n\x14instanceIdStartsWith\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x10lastModifiedFrom\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\x0elastModifiedTo\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cincludeState\x18\x04 \x01(\x08\x12\x18\n\x10includeTransient\x18\x05 \x01(\x08\x12-\n\x08pageSize\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x37\n\x11\x63ontinuationToken\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"3\n\x14QueryEntitiesRequest\x12\x1b\n\x05query\x18\x01 \x01(\x0b\x32\x0c.EntityQuery\"s\n\x15QueryEntitiesResponse\x12!\n\x08\x65ntities\x18\x01 \x03(\x0b\x32\x0f.EntityMetadata\x12\x37\n\x11\x63ontinuationToken\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xdb\x01\n\x0e\x45ntityMetadata\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x34\n\x10lastModifiedTime\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x18\n\x10\x62\x61\x63klogQueueSize\x18\x03 \x01(\x05\x12.\n\x08lockedBy\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fserializedState\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x8f\x01\n\x19\x43leanEntityStorageRequest\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1b\n\x13removeEmptyEntities\x18\x02 \x01(\x08\x12\x1c\n\x14releaseOrphanedLocks\x18\x03 \x01(\x08\"\x92\x01\n\x1a\x43leanEntityStorageResponse\x12\x37\n\x11\x63ontinuationToken\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x1c\n\x14\x65mptyEntitiesRemoved\x18\x02 \x01(\x05\x12\x1d\n\x15orphanedLocksReleased\x18\x03 \x01(\x05\"]\n\x1cOrchestratorEntityParameters\x12=\n\x1a\x65ntityMessageReorderWindow\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x82\x01\n\x12\x45ntityBatchRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x31\n\x0b\x65ntityState\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12%\n\noperations\x18\x03 \x03(\x0b\x32\x11.OperationRequest\"\xb9\x01\n\x11\x45ntityBatchResult\x12!\n\x07results\x18\x01 \x03(\x0b\x32\x10.OperationResult\x12!\n\x07\x61\x63tions\x18\x02 \x03(\x0b\x32\x10.OperationAction\x12\x31\n\x0b\x65ntityState\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x0e\x66\x61ilureDetails\x18\x04 \x01(\x0b\x32\x13.TaskFailureDetails\"e\n\x10OperationRequest\x12\x11\n\toperation\x18\x01 \x01(\t\x12\x11\n\trequestId\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"w\n\x0fOperationResult\x12*\n\x07success\x18\x01 \x01(\x0b\x32\x17.OperationResultSuccessH\x00\x12*\n\x07\x66\x61ilure\x18\x02 \x01(\x0b\x32\x17.OperationResultFailureH\x00\x42\x0c\n\nresultType\"F\n\x16OperationResultSuccess\x12,\n\x06result\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"E\n\x16OperationResultFailure\x12+\n\x0e\x66\x61ilureDetails\x18\x01 \x01(\x0b\x32\x13.TaskFailureDetails\"\x9c\x01\n\x0fOperationAction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\'\n\nsendSignal\x18\x02 \x01(\x0b\x32\x11.SendSignalActionH\x00\x12=\n\x15startNewOrchestration\x18\x03 \x01(\x0b\x32\x1c.StartNewOrchestrationActionH\x00\x42\x15\n\x13operationActionType\"\x94\x01\n\x10SendSignalAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05input\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xce\x01\n\x1bStartNewOrchestrationAction\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\x07version\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05input\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rscheduledTime\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"j\n\x13GetWorkItemsRequest\x12+\n#maxConcurrentOrchestrationWorkItems\x18\x01 \x01(\x05\x12&\n\x1emaxConcurrentActivityWorkItems\x18\x02 \x01(\x05\"\xe1\x01\n\x08WorkItem\x12\x33\n\x13orchestratorRequest\x18\x01 \x01(\x0b\x32\x14.OrchestratorRequestH\x00\x12+\n\x0f\x61\x63tivityRequest\x18\x02 \x01(\x0b\x32\x10.ActivityRequestH\x00\x12,\n\rentityRequest\x18\x03 \x01(\x0b\x32\x13.EntityBatchRequestH\x00\x12!\n\nhealthPing\x18\x04 \x01(\x0b\x32\x0b.HealthPingH\x00\x12\x17\n\x0f\x63ompletionToken\x18\n \x01(\tB\t\n\x07request\"\x16\n\x14\x43ompleteTaskResponse\"\x0c\n\nHealthPing*\xb5\x02\n\x13OrchestrationStatus\x12 \n\x1cORCHESTRATION_STATUS_RUNNING\x10\x00\x12\"\n\x1eORCHESTRATION_STATUS_COMPLETED\x10\x01\x12)\n%ORCHESTRATION_STATUS_CONTINUED_AS_NEW\x10\x02\x12\x1f\n\x1bORCHESTRATION_STATUS_FAILED\x10\x03\x12!\n\x1dORCHESTRATION_STATUS_CANCELED\x10\x04\x12#\n\x1fORCHESTRATION_STATUS_TERMINATED\x10\x05\x12 \n\x1cORCHESTRATION_STATUS_PENDING\x10\x06\x12\"\n\x1eORCHESTRATION_STATUS_SUSPENDED\x10\x07*A\n\x19\x43reateOrchestrationAction\x12\t\n\x05\x45RROR\x10\x00\x12\n\n\x06IGNORE\x10\x01\x12\r\n\tTERMINATE\x10\x02\x32\xfc\n\n\x15TaskHubSidecarService\x12\x37\n\x05Hello\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12@\n\rStartInstance\x12\x16.CreateInstanceRequest\x1a\x17.CreateInstanceResponse\x12\x38\n\x0bGetInstance\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x41\n\x0eRewindInstance\x12\x16.RewindInstanceRequest\x1a\x17.RewindInstanceResponse\x12\x41\n\x14WaitForInstanceStart\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x46\n\x19WaitForInstanceCompletion\x12\x13.GetInstanceRequest\x1a\x14.GetInstanceResponse\x12\x35\n\nRaiseEvent\x12\x12.RaiseEventRequest\x1a\x13.RaiseEventResponse\x12:\n\x11TerminateInstance\x12\x11.TerminateRequest\x1a\x12.TerminateResponse\x12\x34\n\x0fSuspendInstance\x12\x0f.SuspendRequest\x1a\x10.SuspendResponse\x12\x31\n\x0eResumeInstance\x12\x0e.ResumeRequest\x1a\x0f.ResumeResponse\x12\x41\n\x0eQueryInstances\x12\x16.QueryInstancesRequest\x1a\x17.QueryInstancesResponse\x12\x41\n\x0ePurgeInstances\x12\x16.PurgeInstancesRequest\x1a\x17.PurgeInstancesResponse\x12\x31\n\x0cGetWorkItems\x12\x14.GetWorkItemsRequest\x1a\t.WorkItem0\x01\x12@\n\x14\x43ompleteActivityTask\x12\x11.ActivityResponse\x1a\x15.CompleteTaskResponse\x12H\n\x18\x43ompleteOrchestratorTask\x12\x15.OrchestratorResponse\x1a\x15.CompleteTaskResponse\x12?\n\x12\x43ompleteEntityTask\x12\x12.EntityBatchResult\x1a\x15.CompleteTaskResponse\x12>\n\rCreateTaskHub\x12\x15.CreateTaskHubRequest\x1a\x16.CreateTaskHubResponse\x12>\n\rDeleteTaskHub\x12\x15.DeleteTaskHubRequest\x1a\x16.DeleteTaskHubResponse\x12;\n\x0cSignalEntity\x12\x14.SignalEntityRequest\x1a\x15.SignalEntityResponse\x12\x32\n\tGetEntity\x12\x11.GetEntityRequest\x1a\x12.GetEntityResponse\x12>\n\rQueryEntities\x12\x15.QueryEntitiesRequest\x1a\x16.QueryEntitiesResponse\x12M\n\x12\x43leanEntityStorage\x12\x1a.CleanEntityStorageRequest\x1a\x1b.CleanEntityStorageResponseBf\n1com.microsoft.durabletask.implementation.protobufZ\x10/internal/protos\xaa\x02\x1eMicrosoft.DurableTask.Protobufb\x06proto3') 22 | 23 | _globals = globals() 24 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) 25 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'durabletask.internal.orchestrator_service_pb2', _globals) 26 | if _descriptor._USE_C_DESCRIPTORS == False: 27 | _globals['DESCRIPTOR']._options = None 28 | _globals['DESCRIPTOR']._serialized_options = b'\n1com.microsoft.durabletask.implementation.protobufZ\020/internal/protos\252\002\036Microsoft.DurableTask.Protobuf' 29 | _globals['_TRACECONTEXT'].fields_by_name['spanID']._options = None 30 | _globals['_TRACECONTEXT'].fields_by_name['spanID']._serialized_options = b'\030\001' 31 | _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._options = None 32 | _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_options = b'8\001' 33 | _globals['_ORCHESTRATIONSTATUS']._serialized_start=12232 34 | _globals['_ORCHESTRATIONSTATUS']._serialized_end=12541 35 | _globals['_CREATEORCHESTRATIONACTION']._serialized_start=12543 36 | _globals['_CREATEORCHESTRATIONACTION']._serialized_end=12608 37 | _globals['_ORCHESTRATIONINSTANCE']._serialized_start=177 38 | _globals['_ORCHESTRATIONINSTANCE']._serialized_end=271 39 | _globals['_ACTIVITYREQUEST']._serialized_start=274 40 | _globals['_ACTIVITYREQUEST']._serialized_end=511 41 | _globals['_ACTIVITYRESPONSE']._serialized_start=514 42 | _globals['_ACTIVITYRESPONSE']._serialized_end=684 43 | _globals['_TASKFAILUREDETAILS']._serialized_start=687 44 | _globals['_TASKFAILUREDETAILS']._serialized_end=865 45 | _globals['_PARENTINSTANCEINFO']._serialized_start=868 46 | _globals['_PARENTINSTANCEINFO']._serialized_end=1059 47 | _globals['_TRACECONTEXT']._serialized_start=1061 48 | _globals['_TRACECONTEXT']._serialized_end=1166 49 | _globals['_EXECUTIONSTARTEDEVENT']._serialized_start=1169 50 | _globals['_EXECUTIONSTARTEDEVENT']._serialized_end=1561 51 | _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_start=1564 52 | _globals['_EXECUTIONCOMPLETEDEVENT']._serialized_end=1731 53 | _globals['_EXECUTIONTERMINATEDEVENT']._serialized_start=1733 54 | _globals['_EXECUTIONTERMINATEDEVENT']._serialized_end=1821 55 | _globals['_TASKSCHEDULEDEVENT']._serialized_start=1824 56 | _globals['_TASKSCHEDULEDEVENT']._serialized_end=1993 57 | _globals['_TASKCOMPLETEDEVENT']._serialized_start=1995 58 | _globals['_TASKCOMPLETEDEVENT']._serialized_end=2086 59 | _globals['_TASKFAILEDEVENT']._serialized_start=2088 60 | _globals['_TASKFAILEDEVENT']._serialized_end=2175 61 | _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_start=2178 62 | _globals['_SUBORCHESTRATIONINSTANCECREATEDEVENT']._serialized_end=2385 63 | _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_start=2387 64 | _globals['_SUBORCHESTRATIONINSTANCECOMPLETEDEVENT']._serialized_end=2498 65 | _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_start=2500 66 | _globals['_SUBORCHESTRATIONINSTANCEFAILEDEVENT']._serialized_end=2607 67 | _globals['_TIMERCREATEDEVENT']._serialized_start=2609 68 | _globals['_TIMERCREATEDEVENT']._serialized_end=2672 69 | _globals['_TIMERFIREDEVENT']._serialized_start=2674 70 | _globals['_TIMERFIREDEVENT']._serialized_end=2752 71 | _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_start=2754 72 | _globals['_ORCHESTRATORSTARTEDEVENT']._serialized_end=2780 73 | _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_start=2782 74 | _globals['_ORCHESTRATORCOMPLETEDEVENT']._serialized_end=2810 75 | _globals['_EVENTSENTEVENT']._serialized_start=2812 76 | _globals['_EVENTSENTEVENT']._serialized_end=2907 77 | _globals['_EVENTRAISEDEVENT']._serialized_start=2909 78 | _globals['_EVENTRAISEDEVENT']._serialized_end=2986 79 | _globals['_GENERICEVENT']._serialized_start=2988 80 | _globals['_GENERICEVENT']._serialized_end=3046 81 | _globals['_HISTORYSTATEEVENT']._serialized_start=3048 82 | _globals['_HISTORYSTATEEVENT']._serialized_end=3116 83 | _globals['_CONTINUEASNEWEVENT']._serialized_start=3118 84 | _globals['_CONTINUEASNEWEVENT']._serialized_end=3183 85 | _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_start=3185 86 | _globals['_EXECUTIONSUSPENDEDEVENT']._serialized_end=3255 87 | _globals['_EXECUTIONRESUMEDEVENT']._serialized_start=3257 88 | _globals['_EXECUTIONRESUMEDEVENT']._serialized_end=3325 89 | _globals['_HISTORYEVENT']._serialized_start=3328 90 | _globals['_HISTORYEVENT']._serialized_end=4486 91 | _globals['_SCHEDULETASKACTION']._serialized_start=4488 92 | _globals['_SCHEDULETASKACTION']._serialized_end=4614 93 | _globals['_CREATESUBORCHESTRATIONACTION']._serialized_start=4617 94 | _globals['_CREATESUBORCHESTRATIONACTION']._serialized_end=4773 95 | _globals['_CREATETIMERACTION']._serialized_start=4775 96 | _globals['_CREATETIMERACTION']._serialized_end=4838 97 | _globals['_SENDEVENTACTION']._serialized_start=4840 98 | _globals['_SENDEVENTACTION']._serialized_end=4957 99 | _globals['_COMPLETEORCHESTRATIONACTION']._serialized_start=4960 100 | _globals['_COMPLETEORCHESTRATIONACTION']._serialized_end=5268 101 | _globals['_TERMINATEORCHESTRATIONACTION']._serialized_start=5270 102 | _globals['_TERMINATEORCHESTRATIONACTION']._serialized_end=5383 103 | _globals['_ORCHESTRATORACTION']._serialized_start=5386 104 | _globals['_ORCHESTRATORACTION']._serialized_end=5764 105 | _globals['_ORCHESTRATORREQUEST']._serialized_start=5767 106 | _globals['_ORCHESTRATORREQUEST']._serialized_end=5985 107 | _globals['_ORCHESTRATORRESPONSE']._serialized_start=5988 108 | _globals['_ORCHESTRATORRESPONSE']._serialized_end=6145 109 | _globals['_CREATEINSTANCEREQUEST']._serialized_start=6148 110 | _globals['_CREATEINSTANCEREQUEST']._serialized_end=6567 111 | _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_start=6524 112 | _globals['_CREATEINSTANCEREQUEST_TAGSENTRY']._serialized_end=6567 113 | _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_start=6569 114 | _globals['_ORCHESTRATIONIDREUSEPOLICY']._serialized_end=6688 115 | _globals['_CREATEINSTANCERESPONSE']._serialized_start=6690 116 | _globals['_CREATEINSTANCERESPONSE']._serialized_end=6734 117 | _globals['_GETINSTANCEREQUEST']._serialized_start=6736 118 | _globals['_GETINSTANCEREQUEST']._serialized_end=6805 119 | _globals['_GETINSTANCERESPONSE']._serialized_start=6807 120 | _globals['_GETINSTANCERESPONSE']._serialized_end=6893 121 | _globals['_REWINDINSTANCEREQUEST']._serialized_start=6895 122 | _globals['_REWINDINSTANCEREQUEST']._serialized_end=6984 123 | _globals['_REWINDINSTANCERESPONSE']._serialized_start=6986 124 | _globals['_REWINDINSTANCERESPONSE']._serialized_end=7010 125 | _globals['_ORCHESTRATIONSTATE']._serialized_start=7013 126 | _globals['_ORCHESTRATIONSTATE']._serialized_end=7689 127 | _globals['_RAISEEVENTREQUEST']._serialized_start=7691 128 | _globals['_RAISEEVENTREQUEST']._serialized_end=7789 129 | _globals['_RAISEEVENTRESPONSE']._serialized_start=7791 130 | _globals['_RAISEEVENTRESPONSE']._serialized_end=7811 131 | _globals['_TERMINATEREQUEST']._serialized_start=7813 132 | _globals['_TERMINATEREQUEST']._serialized_end=7916 133 | _globals['_TERMINATERESPONSE']._serialized_start=7918 134 | _globals['_TERMINATERESPONSE']._serialized_end=7937 135 | _globals['_SUSPENDREQUEST']._serialized_start=7939 136 | _globals['_SUSPENDREQUEST']._serialized_end=8021 137 | _globals['_SUSPENDRESPONSE']._serialized_start=8023 138 | _globals['_SUSPENDRESPONSE']._serialized_end=8040 139 | _globals['_RESUMEREQUEST']._serialized_start=8042 140 | _globals['_RESUMEREQUEST']._serialized_end=8123 141 | _globals['_RESUMERESPONSE']._serialized_start=8125 142 | _globals['_RESUMERESPONSE']._serialized_end=8141 143 | _globals['_QUERYINSTANCESREQUEST']._serialized_start=8143 144 | _globals['_QUERYINSTANCESREQUEST']._serialized_end=8197 145 | _globals['_INSTANCEQUERY']._serialized_start=8200 146 | _globals['_INSTANCEQUERY']._serialized_end=8586 147 | _globals['_QUERYINSTANCESRESPONSE']._serialized_start=8589 148 | _globals['_QUERYINSTANCESRESPONSE']._serialized_end=8719 149 | _globals['_PURGEINSTANCESREQUEST']._serialized_start=8722 150 | _globals['_PURGEINSTANCESREQUEST']._serialized_end=8850 151 | _globals['_PURGEINSTANCEFILTER']._serialized_start=8853 152 | _globals['_PURGEINSTANCEFILTER']._serialized_end=9023 153 | _globals['_PURGEINSTANCESRESPONSE']._serialized_start=9025 154 | _globals['_PURGEINSTANCESRESPONSE']._serialized_end=9079 155 | _globals['_CREATETASKHUBREQUEST']._serialized_start=9081 156 | _globals['_CREATETASKHUBREQUEST']._serialized_end=9129 157 | _globals['_CREATETASKHUBRESPONSE']._serialized_start=9131 158 | _globals['_CREATETASKHUBRESPONSE']._serialized_end=9154 159 | _globals['_DELETETASKHUBREQUEST']._serialized_start=9156 160 | _globals['_DELETETASKHUBREQUEST']._serialized_end=9178 161 | _globals['_DELETETASKHUBRESPONSE']._serialized_start=9180 162 | _globals['_DELETETASKHUBRESPONSE']._serialized_end=9203 163 | _globals['_SIGNALENTITYREQUEST']._serialized_start=9206 164 | _globals['_SIGNALENTITYREQUEST']._serialized_end=9376 165 | _globals['_SIGNALENTITYRESPONSE']._serialized_start=9378 166 | _globals['_SIGNALENTITYRESPONSE']._serialized_end=9400 167 | _globals['_GETENTITYREQUEST']._serialized_start=9402 168 | _globals['_GETENTITYREQUEST']._serialized_end=9462 169 | _globals['_GETENTITYRESPONSE']._serialized_start=9464 170 | _globals['_GETENTITYRESPONSE']._serialized_end=9532 171 | _globals['_ENTITYQUERY']._serialized_start=9535 172 | _globals['_ENTITYQUERY']._serialized_end=9866 173 | _globals['_QUERYENTITIESREQUEST']._serialized_start=9868 174 | _globals['_QUERYENTITIESREQUEST']._serialized_end=9919 175 | _globals['_QUERYENTITIESRESPONSE']._serialized_start=9921 176 | _globals['_QUERYENTITIESRESPONSE']._serialized_end=10036 177 | _globals['_ENTITYMETADATA']._serialized_start=10039 178 | _globals['_ENTITYMETADATA']._serialized_end=10258 179 | _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_start=10261 180 | _globals['_CLEANENTITYSTORAGEREQUEST']._serialized_end=10404 181 | _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_start=10407 182 | _globals['_CLEANENTITYSTORAGERESPONSE']._serialized_end=10553 183 | _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_start=10555 184 | _globals['_ORCHESTRATORENTITYPARAMETERS']._serialized_end=10648 185 | _globals['_ENTITYBATCHREQUEST']._serialized_start=10651 186 | _globals['_ENTITYBATCHREQUEST']._serialized_end=10781 187 | _globals['_ENTITYBATCHRESULT']._serialized_start=10784 188 | _globals['_ENTITYBATCHRESULT']._serialized_end=10969 189 | _globals['_OPERATIONREQUEST']._serialized_start=10971 190 | _globals['_OPERATIONREQUEST']._serialized_end=11072 191 | _globals['_OPERATIONRESULT']._serialized_start=11074 192 | _globals['_OPERATIONRESULT']._serialized_end=11193 193 | _globals['_OPERATIONRESULTSUCCESS']._serialized_start=11195 194 | _globals['_OPERATIONRESULTSUCCESS']._serialized_end=11265 195 | _globals['_OPERATIONRESULTFAILURE']._serialized_start=11267 196 | _globals['_OPERATIONRESULTFAILURE']._serialized_end=11336 197 | _globals['_OPERATIONACTION']._serialized_start=11339 198 | _globals['_OPERATIONACTION']._serialized_end=11495 199 | _globals['_SENDSIGNALACTION']._serialized_start=11498 200 | _globals['_SENDSIGNALACTION']._serialized_end=11646 201 | _globals['_STARTNEWORCHESTRATIONACTION']._serialized_start=11649 202 | _globals['_STARTNEWORCHESTRATIONACTION']._serialized_end=11855 203 | _globals['_GETWORKITEMSREQUEST']._serialized_start=11857 204 | _globals['_GETWORKITEMSREQUEST']._serialized_end=11963 205 | _globals['_WORKITEM']._serialized_start=11966 206 | _globals['_WORKITEM']._serialized_end=12191 207 | _globals['_COMPLETETASKRESPONSE']._serialized_start=12193 208 | _globals['_COMPLETETASKRESPONSE']._serialized_end=12215 209 | _globals['_HEALTHPING']._serialized_start=12217 210 | _globals['_HEALTHPING']._serialized_end=12229 211 | _globals['_TASKHUBSIDECARSERVICE']._serialized_start=12611 212 | _globals['_TASKHUBSIDECARSERVICE']._serialized_end=14015 213 | # @@protoc_insertion_point(module_scope) 214 | -------------------------------------------------------------------------------- /durabletask/internal/shared.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import dataclasses 5 | import json 6 | import logging 7 | from types import SimpleNamespace 8 | from typing import Any, Optional, Sequence, Union 9 | 10 | import grpc 11 | 12 | ClientInterceptor = Union[ 13 | grpc.UnaryUnaryClientInterceptor, 14 | grpc.UnaryStreamClientInterceptor, 15 | grpc.StreamUnaryClientInterceptor, 16 | grpc.StreamStreamClientInterceptor 17 | ] 18 | 19 | # Field name used to indicate that an object was automatically serialized 20 | # and should be deserialized as a SimpleNamespace 21 | AUTO_SERIALIZED = "__durabletask_autoobject__" 22 | 23 | SECURE_PROTOCOLS = ["https://", "grpcs://"] 24 | INSECURE_PROTOCOLS = ["http://", "grpc://"] 25 | 26 | 27 | def get_default_host_address() -> str: 28 | return "localhost:4001" 29 | 30 | 31 | def get_grpc_channel( 32 | host_address: Optional[str], 33 | secure_channel: bool = False, 34 | interceptors: Optional[Sequence[ClientInterceptor]] = None) -> grpc.Channel: 35 | 36 | if host_address is None: 37 | host_address = get_default_host_address() 38 | 39 | for protocol in SECURE_PROTOCOLS: 40 | if host_address.lower().startswith(protocol): 41 | secure_channel = True 42 | # remove the protocol from the host name 43 | host_address = host_address[len(protocol):] 44 | break 45 | 46 | for protocol in INSECURE_PROTOCOLS: 47 | if host_address.lower().startswith(protocol): 48 | secure_channel = False 49 | # remove the protocol from the host name 50 | host_address = host_address[len(protocol):] 51 | break 52 | 53 | # Create the base channel 54 | if secure_channel: 55 | channel = grpc.secure_channel(host_address, grpc.ssl_channel_credentials()) 56 | else: 57 | channel = grpc.insecure_channel(host_address) 58 | 59 | # Apply interceptors ONLY if they exist 60 | if interceptors: 61 | channel = grpc.intercept_channel(channel, *interceptors) 62 | return channel 63 | 64 | 65 | def get_logger( 66 | name_suffix: str, 67 | log_handler: Optional[logging.Handler] = None, 68 | log_formatter: Optional[logging.Formatter] = None) -> logging.Logger: 69 | logger = logging.Logger(f"durabletask-{name_suffix}") 70 | 71 | # Add a default log handler if none is provided 72 | if log_handler is None: 73 | log_handler = logging.StreamHandler() 74 | log_handler.setLevel(logging.INFO) 75 | logger.handlers.append(log_handler) 76 | 77 | # Set a default log formatter to our handler if none is provided 78 | if log_formatter is None: 79 | log_formatter = logging.Formatter( 80 | fmt="%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s", 81 | datefmt='%Y-%m-%d %H:%M:%S') 82 | log_handler.setFormatter(log_formatter) 83 | return logger 84 | 85 | 86 | def to_json(obj): 87 | return json.dumps(obj, cls=InternalJSONEncoder) 88 | 89 | 90 | def from_json(json_str): 91 | return json.loads(json_str, cls=InternalJSONDecoder) 92 | 93 | 94 | class InternalJSONEncoder(json.JSONEncoder): 95 | """JSON encoder that supports serializing specific Python types.""" 96 | 97 | def encode(self, obj: Any) -> str: 98 | # if the object is a namedtuple, convert it to a dict with the AUTO_SERIALIZED key added 99 | if isinstance(obj, tuple) and hasattr(obj, "_fields") and hasattr(obj, "_asdict"): 100 | d = obj._asdict() # type: ignore 101 | d[AUTO_SERIALIZED] = True 102 | obj = d 103 | return super().encode(obj) 104 | 105 | def default(self, obj): 106 | if dataclasses.is_dataclass(obj): 107 | # Dataclasses are not serializable by default, so we convert them to a dict and mark them for 108 | # automatic deserialization by the receiver 109 | d = dataclasses.asdict(obj) # type: ignore 110 | d[AUTO_SERIALIZED] = True 111 | return d 112 | elif isinstance(obj, SimpleNamespace): 113 | # Most commonly used for serializing custom objects that were previously serialized using our encoder 114 | d = vars(obj) 115 | d[AUTO_SERIALIZED] = True 116 | return d 117 | # This will typically raise a TypeError 118 | return json.JSONEncoder.default(self, obj) 119 | 120 | 121 | class InternalJSONDecoder(json.JSONDecoder): 122 | def __init__(self, *args, **kwargs): 123 | super().__init__(object_hook=self.dict_to_object, *args, **kwargs) 124 | 125 | def dict_to_object(self, d: dict[str, Any]): 126 | # If the object was serialized by the InternalJSONEncoder, deserialize it as a SimpleNamespace 127 | if d.pop(AUTO_SERIALIZED, False): 128 | return SimpleNamespace(**d) 129 | return d 130 | -------------------------------------------------------------------------------- /durabletask/task.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | # See https://peps.python.org/pep-0563/ 5 | from __future__ import annotations 6 | 7 | import math 8 | from abc import ABC, abstractmethod 9 | from datetime import datetime, timedelta 10 | from typing import Any, Callable, Generator, Generic, Optional, TypeVar, Union 11 | 12 | import durabletask.internal.helpers as pbh 13 | import durabletask.internal.orchestrator_service_pb2 as pb 14 | 15 | T = TypeVar('T') 16 | TInput = TypeVar('TInput') 17 | TOutput = TypeVar('TOutput') 18 | 19 | 20 | class OrchestrationContext(ABC): 21 | 22 | @property 23 | @abstractmethod 24 | def instance_id(self) -> str: 25 | """Get the ID of the current orchestration instance. 26 | 27 | The instance ID is generated and fixed when the orchestrator function 28 | is scheduled. It can be either auto-generated, in which case it is 29 | formatted as a UUID, or it can be user-specified with any format. 30 | 31 | Returns 32 | ------- 33 | str 34 | The ID of the current orchestration instance. 35 | """ 36 | pass 37 | 38 | @property 39 | @abstractmethod 40 | def current_utc_datetime(self) -> datetime: 41 | """Get the current date/time as UTC. 42 | 43 | This date/time value is derived from the orchestration history. It 44 | always returns the same value at specific points in the orchestrator 45 | function code, making it deterministic and safe for replay. 46 | 47 | Returns 48 | ------- 49 | datetime 50 | The current timestamp in a way that is safe for use by orchestrator functions 51 | """ 52 | pass 53 | 54 | @property 55 | @abstractmethod 56 | def is_replaying(self) -> bool: 57 | """Get the value indicating whether the orchestrator is replaying from history. 58 | 59 | This property is useful when there is logic that needs to run only when 60 | the orchestrator function is _not_ replaying. For example, certain 61 | types of application logging may become too noisy when duplicated as 62 | part of orchestrator function replay. The orchestrator code could check 63 | to see whether the function is being replayed and then issue the log 64 | statements when this value is `false`. 65 | 66 | Returns 67 | ------- 68 | bool 69 | Value indicating whether the orchestrator function is currently replaying. 70 | """ 71 | pass 72 | 73 | @abstractmethod 74 | def set_custom_status(self, custom_status: Any) -> None: 75 | """Set the orchestration instance's custom status. 76 | 77 | Parameters 78 | ---------- 79 | custom_status: Any 80 | A JSON-serializable custom status value to set. 81 | """ 82 | pass 83 | 84 | @abstractmethod 85 | def create_timer(self, fire_at: Union[datetime, timedelta]) -> Task: 86 | """Create a Timer Task to fire after at the specified deadline. 87 | 88 | Parameters 89 | ---------- 90 | fire_at: datetime.datetime | datetime.timedelta 91 | The time for the timer to trigger or a time delta from now. 92 | 93 | Returns 94 | ------- 95 | Task 96 | A Durable Timer Task that schedules the timer to wake up the orchestrator 97 | """ 98 | pass 99 | 100 | @abstractmethod 101 | def call_activity(self, activity: Union[Activity[TInput, TOutput], str], *, 102 | input: Optional[TInput] = None, 103 | retry_policy: Optional[RetryPolicy] = None) -> Task[TOutput]: 104 | """Schedule an activity for execution. 105 | 106 | Parameters 107 | ---------- 108 | activity: Union[Activity[TInput, TOutput], str] 109 | A reference to the activity function to call. 110 | input: Optional[TInput] 111 | The JSON-serializable input (or None) to pass to the activity. 112 | retry_policy: Optional[RetryPolicy] 113 | The retry policy to use for this activity call. 114 | 115 | Returns 116 | ------- 117 | Task 118 | A Durable Task that completes when the called activity function completes or fails. 119 | """ 120 | pass 121 | 122 | @abstractmethod 123 | def call_sub_orchestrator(self, orchestrator: Orchestrator[TInput, TOutput], *, 124 | input: Optional[TInput] = None, 125 | instance_id: Optional[str] = None, 126 | retry_policy: Optional[RetryPolicy] = None) -> Task[TOutput]: 127 | """Schedule sub-orchestrator function for execution. 128 | 129 | Parameters 130 | ---------- 131 | orchestrator: Orchestrator[TInput, TOutput] 132 | A reference to the orchestrator function to call. 133 | input: Optional[TInput] 134 | The optional JSON-serializable input to pass to the orchestrator function. 135 | instance_id: Optional[str] 136 | A unique ID to use for the sub-orchestration instance. If not specified, a 137 | random UUID will be used. 138 | retry_policy: Optional[RetryPolicy] 139 | The retry policy to use for this sub-orchestrator call. 140 | 141 | Returns 142 | ------- 143 | Task 144 | A Durable Task that completes when the called sub-orchestrator completes or fails. 145 | """ 146 | pass 147 | 148 | # TOOD: Add a timeout parameter, which allows the task to be canceled if the event is 149 | # not received within the specified timeout. This requires support for task cancellation. 150 | @abstractmethod 151 | def wait_for_external_event(self, name: str) -> Task: 152 | """Wait asynchronously for an event to be raised with the name `name`. 153 | 154 | Parameters 155 | ---------- 156 | name : str 157 | The event name of the event that the task is waiting for. 158 | 159 | Returns 160 | ------- 161 | Task[TOutput] 162 | A Durable Task that completes when the event is received. 163 | """ 164 | pass 165 | 166 | @abstractmethod 167 | def continue_as_new(self, new_input: Any, *, save_events: bool = False) -> None: 168 | """Continue the orchestration execution as a new instance. 169 | 170 | Parameters 171 | ---------- 172 | new_input : Any 173 | The new input to use for the new orchestration instance. 174 | save_events : bool 175 | A flag indicating whether to add any unprocessed external events in the new orchestration history. 176 | """ 177 | pass 178 | 179 | 180 | class FailureDetails: 181 | def __init__(self, message: str, error_type: str, stack_trace: Optional[str]): 182 | self._message = message 183 | self._error_type = error_type 184 | self._stack_trace = stack_trace 185 | 186 | @property 187 | def message(self) -> str: 188 | return self._message 189 | 190 | @property 191 | def error_type(self) -> str: 192 | return self._error_type 193 | 194 | @property 195 | def stack_trace(self) -> Optional[str]: 196 | return self._stack_trace 197 | 198 | 199 | class TaskFailedError(Exception): 200 | """Exception type for all orchestration task failures.""" 201 | 202 | def __init__(self, message: str, details: pb.TaskFailureDetails): 203 | super().__init__(message) 204 | self._details = FailureDetails( 205 | details.errorMessage, 206 | details.errorType, 207 | details.stackTrace.value if not pbh.is_empty(details.stackTrace) else None) 208 | 209 | @property 210 | def details(self) -> FailureDetails: 211 | return self._details 212 | 213 | 214 | class NonDeterminismError(Exception): 215 | pass 216 | 217 | 218 | class OrchestrationStateError(Exception): 219 | pass 220 | 221 | 222 | class Task(ABC, Generic[T]): 223 | """Abstract base class for asynchronous tasks in a durable orchestration.""" 224 | _result: T 225 | _exception: Optional[TaskFailedError] 226 | _parent: Optional[CompositeTask[T]] 227 | 228 | def __init__(self) -> None: 229 | super().__init__() 230 | self._is_complete = False 231 | self._exception = None 232 | self._parent = None 233 | 234 | @property 235 | def is_complete(self) -> bool: 236 | """Returns True if the task has completed, False otherwise.""" 237 | return self._is_complete 238 | 239 | @property 240 | def is_failed(self) -> bool: 241 | """Returns True if the task has failed, False otherwise.""" 242 | return self._exception is not None 243 | 244 | def get_result(self) -> T: 245 | """Returns the result of the task.""" 246 | if not self._is_complete: 247 | raise ValueError('The task has not completed.') 248 | elif self._exception is not None: 249 | raise self._exception 250 | return self._result 251 | 252 | def get_exception(self) -> TaskFailedError: 253 | """Returns the exception that caused the task to fail.""" 254 | if self._exception is None: 255 | raise ValueError('The task has not failed.') 256 | return self._exception 257 | 258 | 259 | class CompositeTask(Task[T]): 260 | """A task that is composed of other tasks.""" 261 | _tasks: list[Task] 262 | 263 | def __init__(self, tasks: list[Task]): 264 | super().__init__() 265 | self._tasks = tasks 266 | self._completed_tasks = 0 267 | self._failed_tasks = 0 268 | for task in tasks: 269 | task._parent = self 270 | if task.is_complete: 271 | self.on_child_completed(task) 272 | 273 | def get_tasks(self) -> list[Task]: 274 | return self._tasks 275 | 276 | @abstractmethod 277 | def on_child_completed(self, task: Task[T]): 278 | pass 279 | 280 | 281 | class WhenAllTask(CompositeTask[list[T]]): 282 | """A task that completes when all of its child tasks complete.""" 283 | 284 | def __init__(self, tasks: list[Task[T]]): 285 | super().__init__(tasks) 286 | self._completed_tasks = 0 287 | self._failed_tasks = 0 288 | 289 | @property 290 | def pending_tasks(self) -> int: 291 | """Returns the number of tasks that have not yet completed.""" 292 | return len(self._tasks) - self._completed_tasks 293 | 294 | def on_child_completed(self, task: Task[T]): 295 | if self.is_complete: 296 | raise ValueError('The task has already completed.') 297 | self._completed_tasks += 1 298 | if task.is_failed and self._exception is None: 299 | self._exception = task.get_exception() 300 | self._is_complete = True 301 | if self._completed_tasks == len(self._tasks): 302 | # The order of the result MUST match the order of the tasks provided to the constructor. 303 | self._result = [task.get_result() for task in self._tasks] 304 | self._is_complete = True 305 | 306 | def get_completed_tasks(self) -> int: 307 | return self._completed_tasks 308 | 309 | 310 | class CompletableTask(Task[T]): 311 | 312 | def __init__(self): 313 | super().__init__() 314 | self._retryable_parent = None 315 | 316 | def complete(self, result: T): 317 | if self._is_complete: 318 | raise ValueError('The task has already completed.') 319 | self._result = result 320 | self._is_complete = True 321 | if self._parent is not None: 322 | self._parent.on_child_completed(self) 323 | 324 | def fail(self, message: str, details: pb.TaskFailureDetails): 325 | if self._is_complete: 326 | raise ValueError('The task has already completed.') 327 | self._exception = TaskFailedError(message, details) 328 | self._is_complete = True 329 | if self._parent is not None: 330 | self._parent.on_child_completed(self) 331 | 332 | 333 | class RetryableTask(CompletableTask[T]): 334 | """A task that can be retried according to a retry policy.""" 335 | 336 | def __init__(self, retry_policy: RetryPolicy, action: pb.OrchestratorAction, 337 | start_time: datetime, is_sub_orch: bool) -> None: 338 | super().__init__() 339 | self._action = action 340 | self._retry_policy = retry_policy 341 | self._attempt_count = 1 342 | self._start_time = start_time 343 | self._is_sub_orch = is_sub_orch 344 | 345 | def increment_attempt_count(self) -> None: 346 | self._attempt_count += 1 347 | 348 | def compute_next_delay(self) -> Optional[timedelta]: 349 | if self._attempt_count >= self._retry_policy.max_number_of_attempts: 350 | return None 351 | 352 | retry_expiration: datetime = datetime.max 353 | if self._retry_policy.retry_timeout is not None and self._retry_policy.retry_timeout != datetime.max: 354 | retry_expiration = self._start_time + self._retry_policy.retry_timeout 355 | 356 | if self._retry_policy.backoff_coefficient is None: 357 | backoff_coefficient = 1.0 358 | else: 359 | backoff_coefficient = self._retry_policy.backoff_coefficient 360 | 361 | if datetime.utcnow() < retry_expiration: 362 | next_delay_f = math.pow(backoff_coefficient, self._attempt_count - 1) * self._retry_policy.first_retry_interval.total_seconds() 363 | 364 | if self._retry_policy.max_retry_interval is not None: 365 | next_delay_f = min(next_delay_f, self._retry_policy.max_retry_interval.total_seconds()) 366 | return timedelta(seconds=next_delay_f) 367 | 368 | return None 369 | 370 | 371 | class TimerTask(CompletableTask[T]): 372 | 373 | def __init__(self) -> None: 374 | super().__init__() 375 | 376 | def set_retryable_parent(self, retryable_task: RetryableTask): 377 | self._retryable_parent = retryable_task 378 | 379 | 380 | class WhenAnyTask(CompositeTask[Task]): 381 | """A task that completes when any of its child tasks complete.""" 382 | 383 | def __init__(self, tasks: list[Task]): 384 | super().__init__(tasks) 385 | 386 | def on_child_completed(self, task: Task): 387 | # The first task to complete is the result of the WhenAnyTask. 388 | if not self.is_complete: 389 | self._is_complete = True 390 | self._result = task 391 | 392 | 393 | def when_all(tasks: list[Task[T]]) -> WhenAllTask[T]: 394 | """Returns a task that completes when all of the provided tasks complete or when one of the tasks fail.""" 395 | return WhenAllTask(tasks) 396 | 397 | 398 | def when_any(tasks: list[Task]) -> WhenAnyTask: 399 | """Returns a task that completes when any of the provided tasks complete or fail.""" 400 | return WhenAnyTask(tasks) 401 | 402 | 403 | class ActivityContext: 404 | def __init__(self, orchestration_id: str, task_id: int): 405 | self._orchestration_id = orchestration_id 406 | self._task_id = task_id 407 | 408 | @property 409 | def orchestration_id(self) -> str: 410 | """Get the ID of the orchestration instance that scheduled this activity. 411 | 412 | Returns 413 | ------- 414 | str 415 | The ID of the current orchestration instance. 416 | """ 417 | return self._orchestration_id 418 | 419 | @property 420 | def task_id(self) -> int: 421 | """Get the task ID associated with this activity invocation. 422 | 423 | The task ID is an auto-incrementing integer that is unique within 424 | the scope of the orchestration instance. It can be used to distinguish 425 | between multiple activity invocations that are part of the same 426 | orchestration instance. 427 | 428 | Returns 429 | ------- 430 | str 431 | The ID of the current orchestration instance. 432 | """ 433 | return self._task_id 434 | 435 | 436 | # Orchestrators are generators that yield tasks and receive/return any type 437 | Orchestrator = Callable[[OrchestrationContext, TInput], Union[Generator[Task, Any, Any], TOutput]] 438 | 439 | # Activities are simple functions that can be scheduled by orchestrators 440 | Activity = Callable[[ActivityContext, TInput], TOutput] 441 | 442 | 443 | class RetryPolicy: 444 | """Represents the retry policy for an orchestration or activity function.""" 445 | 446 | def __init__(self, *, 447 | first_retry_interval: timedelta, 448 | max_number_of_attempts: int, 449 | backoff_coefficient: Optional[float] = 1.0, 450 | max_retry_interval: Optional[timedelta] = None, 451 | retry_timeout: Optional[timedelta] = None): 452 | """Creates a new RetryPolicy instance. 453 | 454 | Parameters 455 | ---------- 456 | first_retry_interval : timedelta 457 | The retry interval to use for the first retry attempt. 458 | max_number_of_attempts : int 459 | The maximum number of retry attempts. 460 | backoff_coefficient : Optional[float] 461 | The backoff coefficient to use for calculating the next retry interval. 462 | max_retry_interval : Optional[timedelta] 463 | The maximum retry interval to use for any retry attempt. 464 | retry_timeout : Optional[timedelta] 465 | The maximum amount of time to spend retrying the operation. 466 | """ 467 | # validate inputs 468 | if first_retry_interval < timedelta(seconds=0): 469 | raise ValueError('first_retry_interval must be >= 0') 470 | if max_number_of_attempts < 1: 471 | raise ValueError('max_number_of_attempts must be >= 1') 472 | if backoff_coefficient is not None and backoff_coefficient < 1: 473 | raise ValueError('backoff_coefficient must be >= 1') 474 | if max_retry_interval is not None and max_retry_interval < timedelta(seconds=0): 475 | raise ValueError('max_retry_interval must be >= 0') 476 | if retry_timeout is not None and retry_timeout < timedelta(seconds=0): 477 | raise ValueError('retry_timeout must be >= 0') 478 | 479 | self._first_retry_interval = first_retry_interval 480 | self._max_number_of_attempts = max_number_of_attempts 481 | self._backoff_coefficient = backoff_coefficient 482 | self._max_retry_interval = max_retry_interval 483 | self._retry_timeout = retry_timeout 484 | 485 | @property 486 | def first_retry_interval(self) -> timedelta: 487 | """The retry interval to use for the first retry attempt.""" 488 | return self._first_retry_interval 489 | 490 | @property 491 | def max_number_of_attempts(self) -> int: 492 | """The maximum number of retry attempts.""" 493 | return self._max_number_of_attempts 494 | 495 | @property 496 | def backoff_coefficient(self) -> Optional[float]: 497 | """The backoff coefficient to use for calculating the next retry interval.""" 498 | return self._backoff_coefficient 499 | 500 | @property 501 | def max_retry_interval(self) -> Optional[timedelta]: 502 | """The maximum retry interval to use for any retry attempt.""" 503 | return self._max_retry_interval 504 | 505 | @property 506 | def retry_timeout(self) -> Optional[timedelta]: 507 | """The maximum amount of time to spend retrying the operation.""" 508 | return self._retry_timeout 509 | 510 | 511 | def get_name(fn: Callable) -> str: 512 | """Returns the name of the provided function""" 513 | name = fn.__name__ 514 | if name == '': 515 | raise ValueError('Cannot infer a name from a lambda function. Please provide a name explicitly.') 516 | 517 | return name 518 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | This directory contains examples of how to author durable orchestrations using the Durable Task Python SDK. 4 | 5 | ## Prerequisites 6 | 7 | All the examples assume that you have a Durable Task-compatible sidecar running locally. There are two options for this: 8 | 9 | 1. Install the latest version of the [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/), which contains and exposes an embedded version of the Durable Task engine. The setup process (which requires Docker) will configure the workflow engine to store state in a local Redis container. 10 | 11 | 2. Clone and run the [Durable Task Sidecar](https://github.com/microsoft/durabletask-go) project locally (requires Go 1.18 or higher). Orchestration state will be stored in a local sqlite database. 12 | 13 | ## Running the examples 14 | 15 | With one of the sidecars running, you can simply execute any of the examples in this directory using `python3`: 16 | 17 | ```sh 18 | python3 ./activity_sequence.py 19 | ``` 20 | 21 | In some cases, the sample may require command-line parameters or user inputs. In these cases, the sample will print out instructions on how to proceed. 22 | 23 | ## List of examples 24 | 25 | - [Activity sequence](./activity_sequence.py): Orchestration that schedules three activity calls in a sequence. 26 | - [Fan-out/fan-in](./fanout_fanin.py): Orchestration that schedules a dynamic number of activity calls in parallel, waits for all of them to complete, and then performs an aggregation on the results. 27 | - [Human interaction](./human_interaction.py): Orchestration that waits for a human to approve an order before continuing. 28 | -------------------------------------------------------------------------------- /examples/activity_sequence.py: -------------------------------------------------------------------------------- 1 | """End-to-end sample that demonstrates how to configure an orchestrator 2 | that calls an activity function in a sequence and prints the outputs.""" 3 | from durabletask import client, task, worker 4 | 5 | 6 | def hello(ctx: task.ActivityContext, name: str) -> str: 7 | """Activity function that returns a greeting""" 8 | return f'Hello {name}!' 9 | 10 | 11 | def sequence(ctx: task.OrchestrationContext, _): 12 | """Orchestrator function that calls the 'hello' activity function in a sequence""" 13 | # call "hello" activity function in a sequence 14 | result1 = yield ctx.call_activity(hello, input='Tokyo') 15 | result2 = yield ctx.call_activity(hello, input='Seattle') 16 | result3 = yield ctx.call_activity(hello, input='London') 17 | 18 | # return an array of results 19 | return [result1, result2, result3] 20 | 21 | 22 | # configure and start the worker 23 | with worker.TaskHubGrpcWorker() as w: 24 | w.add_orchestrator(sequence) 25 | w.add_activity(hello) 26 | w.start() 27 | 28 | # create a client, start an orchestration, and wait for it to finish 29 | c = client.TaskHubGrpcClient() 30 | instance_id = c.schedule_new_orchestration(sequence) 31 | state = c.wait_for_orchestration_completion(instance_id, timeout=10) 32 | if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: 33 | print(f'Orchestration completed! Result: {state.serialized_output}') 34 | elif state: 35 | print(f'Orchestration failed: {state.failure_details}') 36 | -------------------------------------------------------------------------------- /examples/dts/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | This directory contains examples of how to author durable orchestrations using the Durable Task Python SDK in conjunction with the Durable Task Scheduler (DTS). Please note that the installation instructions provided below will use the version of DTS directly from the your branch rather than installing through PyPI. 4 | 5 | ## Prerequisites 6 | 7 | There are 2 separate ways to run an example: 8 | 1. Using the emulator. 9 | 2. Using a real scheduler and taskhub. 10 | 11 | All the examples by defualt assume that you have a Durable Task Scheduler taskhub created. 12 | 13 | ## Running with a scheduler and taskhub resource 14 | The simplest way to create a taskhub is by using the az cli commands: 15 | 16 | 1. Create a scheduler: 17 | az durabletask scheduler create --resource-group --name --location --ip-allowlist "[0.0.0.0/0]" --sku-capacity 1 --sku-name "Dedicated" --tags "{}" 18 | 19 | 1. Create your taskhub 20 | 21 | ```bash 22 | az durabletask taskhub create --resource-group --scheduler-name --name 23 | ``` 24 | 25 | 1. Retrieve the endpoint for the scheduler. This can be done by locating the taskhub in the portal. 26 | 27 | 1. Set the appropriate environment variables for the TASKHUB and ENDPOINT 28 | 29 | ```bash 30 | export TASKHUB= 31 | export ENDPOINT= 32 | ``` 33 | 34 | 1. Since the samples rely on azure identity, ensure the package is installed and up-to-date 35 | 36 | ```bash 37 | python3 -m pip install azure-identity 38 | ``` 39 | 40 | 1. Install the correct packages from the top level of this repository, i.e. durabletask-python/ 41 | 42 | ```bash 43 | python3 -m pip install . 44 | ``` 45 | 46 | 1. Install the DTS specific packages from the durabletask-python/durabletask-azuremanaged directory 47 | 48 | ```bash 49 | pip3 install -e . 50 | ``` 51 | 52 | 1. Grant yourself the `Durable Task Data Contributor` role over your scheduler 53 | 54 | ## Running with the emulator 55 | The emulator is a simulation of a scheduler and taskhub. It is the 'backend' of the durabletask-azuremanaged system packaged up into an easy to use docker container. For these steps, it is assumed that you are using port 8080. 56 | 57 | In order to use the emulator for the examples, perform the following steps: 58 | 1. Install docker if it is not already installed. 59 | 60 | 2. Pull down the docker image for the emulator: 61 | `docker pull mcr.microsoft.com/dts/dts-emulator:v0.0.4` 62 | 63 | 3. Run the emulator and wait a few seconds for the container to be ready: 64 | `docker run --name dtsemulator -d -p 8080:8080 mcr.microsoft.com/dts/dts-emulator:v0.0.4` 65 | 66 | 4. Set the environment variables that are referenced and used in the examples: 67 | 1. If you are using windows powershell: 68 | `$env:TASKHUB="default"` 69 | `$env:ENDPOINT="http://localhost:8080"` 70 | 2. If you are using bash: 71 | `export TASKHUB=default` 72 | `export ENDPOINT=http://localhost:8080` 73 | 74 | 5. Finally, edit the examples to change the `token_credential` input of both the `DurableTaskSchedulerWorker` and `DurableTaskSchedulerClient` to a value of `None` 75 | 76 | 77 | ## Running the examples 78 | 79 | Now, you can simply execute any of the examples in this directory using `python3`: 80 | 81 | ```sh 82 | python3 dts_activity_sequence.py 83 | ``` 84 | -------------------------------------------------------------------------------- /examples/dts/dts_activity_sequence.py: -------------------------------------------------------------------------------- 1 | """End-to-end sample that demonstrates how to configure an orchestrator 2 | that calls an activity function in a sequence and prints the outputs.""" 3 | import os 4 | 5 | from azure.identity import DefaultAzureCredential 6 | 7 | from durabletask import client, task 8 | from durabletask.azuremanaged.client import DurableTaskSchedulerClient 9 | from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker 10 | 11 | 12 | def hello(ctx: task.ActivityContext, name: str) -> str: 13 | """Activity function that returns a greeting""" 14 | return f'Hello {name}!' 15 | 16 | 17 | def sequence(ctx: task.OrchestrationContext, _): 18 | """Orchestrator function that calls the 'hello' activity function in a sequence""" 19 | # call "hello" activity function in a sequence 20 | result1 = yield ctx.call_activity(hello, input='Tokyo') 21 | result2 = yield ctx.call_activity(hello, input='Seattle') 22 | result3 = yield ctx.call_activity(hello, input='London') 23 | 24 | # return an array of results 25 | return [result1, result2, result3] 26 | 27 | 28 | # Read the environment variable 29 | taskhub_name = os.getenv("TASKHUB") 30 | 31 | # Check if the variable exists 32 | if taskhub_name: 33 | print(f"The value of TASKHUB is: {taskhub_name}") 34 | else: 35 | print("TASKHUB is not set. Please set the TASKHUB environment variable to the name of the taskhub you wish to use") 36 | print("If you are using windows powershell, run the following: $env:TASKHUB=\"\"") 37 | print("If you are using bash, run the following: export TASKHUB=\"\"") 38 | exit() 39 | 40 | # Read the environment variable 41 | endpoint = os.getenv("ENDPOINT") 42 | 43 | # Check if the variable exists 44 | if endpoint: 45 | print(f"The value of ENDPOINT is: {endpoint}") 46 | else: 47 | print("ENDPOINT is not set. Please set the ENDPOINT environment variable to the endpoint of the scheduler") 48 | print("If you are using windows powershell, run the following: $env:ENDPOINT=\"\"") 49 | print("If you are using bash, run the following: export ENDPOINT=\"\"") 50 | exit() 51 | 52 | # Note that any azure-identity credential type and configuration can be used here as DTS supports various credential 53 | # types such as Managed Identities 54 | credential = DefaultAzureCredential() 55 | 56 | # configure and start the worker 57 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 58 | taskhub=taskhub_name, token_credential=credential) as w: 59 | w.add_orchestrator(sequence) 60 | w.add_activity(hello) 61 | w.start() 62 | 63 | # Construct the client and run the orchestrations 64 | c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 65 | taskhub=taskhub_name, token_credential=credential) 66 | instance_id = c.schedule_new_orchestration(sequence) 67 | state = c.wait_for_orchestration_completion(instance_id, timeout=60) 68 | if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: 69 | print(f'Orchestration completed! Result: {state.serialized_output}') 70 | elif state: 71 | print(f'Orchestration failed: {state.failure_details}') 72 | -------------------------------------------------------------------------------- /examples/dts/dts_fanout_fanin.py: -------------------------------------------------------------------------------- 1 | """End-to-end sample that demonstrates how to configure an orchestrator 2 | that a dynamic number activity functions in parallel, waits for them all 3 | to complete, and prints an aggregate summary of the outputs.""" 4 | import os 5 | import random 6 | import time 7 | 8 | from azure.identity import DefaultAzureCredential 9 | 10 | from durabletask import client, task 11 | from durabletask.azuremanaged.client import DurableTaskSchedulerClient 12 | from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker 13 | 14 | 15 | def get_work_items(ctx: task.ActivityContext, _) -> list[str]: 16 | """Activity function that returns a list of work items""" 17 | # return a random number of work items 18 | count = random.randint(2, 10) 19 | print(f'generating {count} work items...') 20 | return [f'work item {i}' for i in range(count)] 21 | 22 | 23 | def process_work_item(ctx: task.ActivityContext, item: str) -> int: 24 | """Activity function that returns a result for a given work item""" 25 | print(f'processing work item: {item}') 26 | 27 | # simulate some work that takes a variable amount of time 28 | time.sleep(random.random() * 5) 29 | 30 | # return a result for the given work item, which is also a random number in this case 31 | return random.randint(0, 10) 32 | 33 | 34 | def orchestrator(ctx: task.OrchestrationContext, _): 35 | """Orchestrator function that calls the 'get_work_items' and 'process_work_item' 36 | activity functions in parallel, waits for them all to complete, and prints 37 | an aggregate summary of the outputs""" 38 | 39 | work_items: list[str] = yield ctx.call_activity(get_work_items) 40 | 41 | # execute the work-items in parallel and wait for them all to return 42 | tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] 43 | results: list[int] = yield task.when_all(tasks) 44 | 45 | # return an aggregate summary of the results 46 | return { 47 | 'work_items': work_items, 48 | 'results': results, 49 | 'total': sum(results), 50 | } 51 | 52 | 53 | # Read the environment variable 54 | taskhub_name = os.getenv("TASKHUB") 55 | 56 | # Check if the variable exists 57 | if taskhub_name: 58 | print(f"The value of TASKHUB is: {taskhub_name}") 59 | else: 60 | print("TASKHUB is not set. Please set the TASKHUB environment variable to the name of the taskhub you wish to use") 61 | print("If you are using windows powershell, run the following: $env:TASKHUB=\"\"") 62 | print("If you are using bash, run the following: export TASKHUB=\"\"") 63 | exit() 64 | 65 | # Read the environment variable 66 | endpoint = os.getenv("ENDPOINT") 67 | 68 | # Check if the variable exists 69 | if endpoint: 70 | print(f"The value of ENDPOINT is: {endpoint}") 71 | else: 72 | print("ENDPOINT is not set. Please set the ENDPOINT environment variable to the endpoint of the scheduler") 73 | print("If you are using windows powershell, run the following: $env:ENDPOINT=\"\"") 74 | print("If you are using bash, run the following: export ENDPOINT=\"\"") 75 | exit() 76 | 77 | credential = DefaultAzureCredential() 78 | 79 | # configure and start the worker 80 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 81 | taskhub=taskhub_name, token_credential=credential) as w: 82 | w.add_orchestrator(orchestrator) 83 | w.add_activity(process_work_item) 84 | w.add_activity(get_work_items) 85 | w.start() 86 | 87 | # create a client, start an orchestration, and wait for it to finish 88 | c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 89 | taskhub=taskhub_name, token_credential=credential) 90 | instance_id = c.schedule_new_orchestration(orchestrator) 91 | state = c.wait_for_orchestration_completion(instance_id, timeout=30) 92 | if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: 93 | print(f'Orchestration completed! Result: {state.serialized_output}') 94 | elif state: 95 | print(f'Orchestration failed: {state.failure_details}') 96 | exit() 97 | -------------------------------------------------------------------------------- /examples/dts/requirements.txt: -------------------------------------------------------------------------------- 1 | autopep8 2 | grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible 3 | protobuf 4 | azure-identity 5 | durabletask-azuremanaged 6 | durabletask -------------------------------------------------------------------------------- /examples/fanout_fanin.py: -------------------------------------------------------------------------------- 1 | """End-to-end sample that demonstrates how to configure an orchestrator 2 | that a dynamic number activity functions in parallel, waits for them all 3 | to complete, and prints an aggregate summary of the outputs.""" 4 | import random 5 | import time 6 | 7 | from durabletask import client, task, worker 8 | 9 | 10 | def get_work_items(ctx: task.ActivityContext, _) -> list[str]: 11 | """Activity function that returns a list of work items""" 12 | # return a random number of work items 13 | count = random.randint(2, 10) 14 | print(f'generating {count} work items...') 15 | return [f'work item {i}' for i in range(count)] 16 | 17 | 18 | def process_work_item(ctx: task.ActivityContext, item: str) -> int: 19 | """Activity function that returns a result for a given work item""" 20 | print(f'processing work item: {item}') 21 | 22 | # simulate some work that takes a variable amount of time 23 | time.sleep(random.random() * 5) 24 | 25 | # return a result for the given work item, which is also a random number in this case 26 | return random.randint(0, 10) 27 | 28 | 29 | def orchestrator(ctx: task.OrchestrationContext, _): 30 | """Orchestrator function that calls the 'get_work_items' and 'process_work_item' 31 | activity functions in parallel, waits for them all to complete, and prints 32 | an aggregate summary of the outputs""" 33 | 34 | work_items: list[str] = yield ctx.call_activity(get_work_items) 35 | 36 | # execute the work-items in parallel and wait for them all to return 37 | tasks = [ctx.call_activity(process_work_item, input=item) for item in work_items] 38 | results: list[int] = yield task.when_all(tasks) 39 | 40 | # return an aggregate summary of the results 41 | return { 42 | 'work_items': work_items, 43 | 'results': results, 44 | 'total': sum(results), 45 | } 46 | 47 | 48 | # configure and start the worker 49 | with worker.TaskHubGrpcWorker() as w: 50 | w.add_orchestrator(orchestrator) 51 | w.add_activity(process_work_item) 52 | w.add_activity(get_work_items) 53 | w.start() 54 | 55 | # create a client, start an orchestration, and wait for it to finish 56 | c = client.TaskHubGrpcClient() 57 | instance_id = c.schedule_new_orchestration(orchestrator) 58 | state = c.wait_for_orchestration_completion(instance_id, timeout=30) 59 | if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: 60 | print(f'Orchestration completed! Result: {state.serialized_output}') 61 | elif state: 62 | print(f'Orchestration failed: {state.failure_details}') 63 | -------------------------------------------------------------------------------- /examples/human_interaction.py: -------------------------------------------------------------------------------- 1 | """End-to-end sample that demonstrates how to configure an orchestrator 2 | that waits for an "approval" event before proceding to the next step. If 3 | the approval isn't received within a specified timeout, the order that is 4 | represented by the orchestration is automatically cancelled.""" 5 | 6 | import threading 7 | import time 8 | from collections import namedtuple 9 | from dataclasses import dataclass 10 | from datetime import timedelta 11 | 12 | from durabletask import client, task, worker 13 | 14 | 15 | @dataclass 16 | class Order: 17 | """Represents a purchase order""" 18 | Cost: float 19 | Product: str 20 | Quantity: int 21 | 22 | def __str__(self): 23 | return f'{self.Product} ({self.Quantity})' 24 | 25 | 26 | def send_approval_request(_: task.ActivityContext, order: Order) -> None: 27 | """Activity function that sends an approval request to the manager""" 28 | time.sleep(5) 29 | print(f'*** Sending approval request for order: {order}') 30 | 31 | 32 | def place_order(_: task.ActivityContext, order: Order) -> None: 33 | """Activity function that places an order""" 34 | print(f'*** Placing order: {order}') 35 | 36 | 37 | def purchase_order_workflow(ctx: task.OrchestrationContext, order: Order): 38 | """Orchestrator function that represents a purchase order workflow""" 39 | # Orders under $1000 are auto-approved 40 | if order.Cost < 1000: 41 | return "Auto-approved" 42 | 43 | # Orders of $1000 or more require manager approval 44 | yield ctx.call_activity(send_approval_request, input=order) 45 | 46 | # Approvals must be received within 24 hours or they will be canceled. 47 | approval_event = ctx.wait_for_external_event("approval_received") 48 | timeout_event = ctx.create_timer(timedelta(hours=24)) 49 | winner = yield task.when_any([approval_event, timeout_event]) 50 | if winner == timeout_event: 51 | return "Cancelled" 52 | 53 | # The order was approved 54 | yield ctx.call_activity(place_order, input=order) 55 | approval_details = approval_event.get_result() 56 | return f"Approved by '{approval_details.approver}'" 57 | 58 | 59 | if __name__ == "__main__": 60 | import argparse 61 | 62 | parser = argparse.ArgumentParser(description="Order purchasing workflow demo.") 63 | parser.add_argument("--cost", type=int, default=2000, help="Cost of the order") 64 | parser.add_argument("--approver", type=str, default="Me", help="Approver name") 65 | parser.add_argument("--timeout", type=int, default=60, help="Timeout in seconds") 66 | args = parser.parse_args() 67 | 68 | # configure and start the worker 69 | with worker.TaskHubGrpcWorker() as w: 70 | w.add_orchestrator(purchase_order_workflow) 71 | w.add_activity(send_approval_request) 72 | w.add_activity(place_order) 73 | w.start() 74 | 75 | c = client.TaskHubGrpcClient() 76 | 77 | # Start a purchase order workflow using the user input 78 | order = Order(args.cost, "MyProduct", 1) 79 | instance_id = c.schedule_new_orchestration(purchase_order_workflow, input=order) 80 | 81 | def prompt_for_approval(): 82 | input("Press [ENTER] to approve the order...\n") 83 | approval_event = namedtuple("Approval", ["approver"])(args.approver) 84 | c.raise_orchestration_event(instance_id, "approval_received", data=approval_event) 85 | 86 | # Prompt the user for approval on a background thread 87 | threading.Thread(target=prompt_for_approval, daemon=True).start() 88 | 89 | # Wait for the orchestration to complete 90 | try: 91 | state = c.wait_for_orchestration_completion(instance_id, timeout=args.timeout + 2) 92 | if not state: 93 | print("Workflow not found!") # not expected 94 | elif state.runtime_status == client.OrchestrationStatus.COMPLETED: 95 | print(f'Orchestration completed! Result: {state.serialized_output}') 96 | else: 97 | state.raise_if_failed() # raises an exception 98 | except TimeoutError: 99 | print("*** Orchestration timed out!") 100 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | # For more information on pyproject.toml, see https://peps.python.org/pep-0621/ 5 | 6 | [build-system] 7 | requires = ["setuptools", "wheel"] 8 | build-backend = "setuptools.build_meta" 9 | 10 | [project] 11 | name = "durabletask" 12 | version = "0.3.0" 13 | description = "A Durable Task Client SDK for Python" 14 | keywords = [ 15 | "durable", 16 | "task", 17 | "workflow" 18 | ] 19 | classifiers = [ 20 | "Development Status :: 3 - Alpha", 21 | "Programming Language :: Python :: 3", 22 | "License :: OSI Approved :: MIT License", 23 | ] 24 | requires-python = ">=3.9" 25 | license = {file = "LICENSE"} 26 | readme = "README.md" 27 | dependencies = [ 28 | "grpcio", 29 | "protobuf", 30 | "asyncio" 31 | ] 32 | 33 | [project.urls] 34 | repository = "https://github.com/microsoft/durabletask-python" 35 | changelog = "https://github.com/microsoft/durabletask-python/blob/main/CHANGELOG.md" 36 | 37 | [tool.setuptools.packages.find] 38 | include = ["durabletask", "durabletask.*"] 39 | 40 | [tool.pytest.ini_options] 41 | minversion = "6.0" 42 | testpaths = ["tests"] 43 | markers = [ 44 | "e2e: mark a test as an end-to-end test that requires a running sidecar" 45 | ] 46 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | autopep8 2 | grpcio>=1.60.0 # 1.60.0 is the version introducing protobuf 1.25.X support, newer versions are backwards compatible 3 | protobuf 4 | pytest 5 | pytest-cov 6 | azure-identity 7 | asyncio -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/durabletask-python/a49f1e044817865267e77fff389e3b3fd4d7b29d/tests/__init__.py -------------------------------------------------------------------------------- /tests/durabletask-azuremanaged/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/durabletask-python/a49f1e044817865267e77fff389e3b3fd4d7b29d/tests/durabletask-azuremanaged/__init__.py -------------------------------------------------------------------------------- /tests/durabletask-azuremanaged/test_dts_activity_sequence.py: -------------------------------------------------------------------------------- 1 | """End-to-end sample that demonstrates how to configure an orchestrator 2 | that calls an activity function in a sequence and prints the outputs.""" 3 | import os 4 | 5 | import pytest 6 | 7 | from durabletask import client, task 8 | from durabletask.azuremanaged.client import DurableTaskSchedulerClient 9 | from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker 10 | 11 | pytestmark = pytest.mark.dts 12 | 13 | 14 | def hello(ctx: task.ActivityContext, name: str) -> str: 15 | """Activity function that returns a greeting""" 16 | return f'Hello {name}!' 17 | 18 | 19 | def sequence(ctx: task.OrchestrationContext, _): 20 | """Orchestrator function that calls the 'hello' activity function in a sequence""" 21 | # call "hello" activity function in a sequence 22 | result1 = yield ctx.call_activity(hello, input='Tokyo') 23 | result2 = yield ctx.call_activity(hello, input='Seattle') 24 | result3 = yield ctx.call_activity(hello, input='London') 25 | 26 | # return an array of results 27 | return [result1, result2, result3] 28 | 29 | 30 | # Read the environment variable 31 | taskhub_name = os.getenv("TASKHUB") 32 | 33 | # Check if the variable exists 34 | if taskhub_name: 35 | print(f"The value of TASKHUB is: {taskhub_name}") 36 | else: 37 | print("TASKHUB is not set. Please set the TASKHUB environment variable to the name of the taskhub you wish to use") 38 | print("If you are using windows powershell, run the following: $env:TASKHUB=\"\"") 39 | print("If you are using bash, run the following: export TASKHUB=\"\"") 40 | exit() 41 | 42 | # Read the environment variable 43 | endpoint = os.getenv("ENDPOINT") 44 | 45 | # Check if the variable exists 46 | if endpoint: 47 | print(f"The value of ENDPOINT is: {endpoint}") 48 | else: 49 | print("ENDPOINT is not set. Please set the ENDPOINT environment variable to the endpoint of the scheduler") 50 | print("If you are using windows powershell, run the following: $env:ENDPOINT=\"\"") 51 | print("If you are using bash, run the following: export ENDPOINT=\"\"") 52 | exit() 53 | 54 | # configure and start the worker 55 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 56 | taskhub=taskhub_name, token_credential=None) as w: 57 | w.add_orchestrator(sequence) 58 | w.add_activity(hello) 59 | w.start() 60 | 61 | # Construct the client and run the orchestrations 62 | c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 63 | taskhub=taskhub_name, token_credential=None) 64 | instance_id = c.schedule_new_orchestration(sequence) 65 | state = c.wait_for_orchestration_completion(instance_id, timeout=60) 66 | if state and state.runtime_status == client.OrchestrationStatus.COMPLETED: 67 | print(f'Orchestration completed! Result: {state.serialized_output}') 68 | elif state: 69 | print(f'Orchestration failed: {state.failure_details}') 70 | -------------------------------------------------------------------------------- /tests/durabletask-azuremanaged/test_dts_orchestration_e2e.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import json 5 | import os 6 | import threading 7 | from datetime import timedelta 8 | 9 | import pytest 10 | 11 | from durabletask import client, task 12 | from durabletask.azuremanaged.client import DurableTaskSchedulerClient 13 | from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker 14 | 15 | # NOTE: These tests assume a sidecar process is running. Example command: 16 | # docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator 17 | pytestmark = pytest.mark.dts 18 | 19 | # Read the environment variables 20 | taskhub_name = os.getenv("TASKHUB", "default") 21 | endpoint = os.getenv("ENDPOINT", "http://localhost:8080") 22 | 23 | 24 | def test_empty_orchestration(): 25 | 26 | invoked = False 27 | 28 | def empty_orchestrator(ctx: task.OrchestrationContext, _): 29 | nonlocal invoked # don't do this in a real app! 30 | invoked = True 31 | 32 | # Start a worker, which will connect to the sidecar in a background thread 33 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 34 | taskhub=taskhub_name, token_credential=None) as w: 35 | w.add_orchestrator(empty_orchestrator) 36 | w.start() 37 | 38 | c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 39 | taskhub=taskhub_name, token_credential=None) 40 | id = c.schedule_new_orchestration(empty_orchestrator) 41 | state = c.wait_for_orchestration_completion(id, timeout=30) 42 | 43 | assert invoked 44 | assert state is not None 45 | assert state.name == task.get_name(empty_orchestrator) 46 | assert state.instance_id == id 47 | assert state.failure_details is None 48 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 49 | assert state.serialized_input is None 50 | assert state.serialized_output is None 51 | assert state.serialized_custom_status is None 52 | 53 | 54 | def test_activity_sequence(): 55 | 56 | def plus_one(_: task.ActivityContext, input: int) -> int: 57 | return input + 1 58 | 59 | def sequence(ctx: task.OrchestrationContext, start_val: int): 60 | numbers = [start_val] 61 | current = start_val 62 | for _ in range(10): 63 | current = yield ctx.call_activity(plus_one, input=current) 64 | numbers.append(current) 65 | return numbers 66 | 67 | # Start a worker, which will connect to the sidecar in a background thread 68 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 69 | taskhub=taskhub_name, token_credential=None) as w: 70 | w.add_orchestrator(sequence) 71 | w.add_activity(plus_one) 72 | w.start() 73 | 74 | task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 75 | taskhub=taskhub_name, token_credential=None) 76 | id = task_hub_client.schedule_new_orchestration(sequence, input=1) 77 | state = task_hub_client.wait_for_orchestration_completion( 78 | id, timeout=30) 79 | 80 | assert state is not None 81 | assert state.name == task.get_name(sequence) 82 | assert state.instance_id == id 83 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 84 | assert state.failure_details is None 85 | assert state.serialized_input == json.dumps(1) 86 | assert state.serialized_output == json.dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) 87 | assert state.serialized_custom_status is None 88 | 89 | 90 | def test_activity_error_handling(): 91 | 92 | def throw(_: task.ActivityContext, input: int) -> int: 93 | raise RuntimeError("Kah-BOOOOM!!!") 94 | 95 | compensation_counter = 0 96 | 97 | def increment_counter(ctx, _): 98 | nonlocal compensation_counter 99 | compensation_counter += 1 100 | 101 | def orchestrator(ctx: task.OrchestrationContext, input: int): 102 | error_msg = "" 103 | try: 104 | yield ctx.call_activity(throw, input=input) 105 | except task.TaskFailedError as e: 106 | error_msg = e.details.message 107 | 108 | # compensating actions 109 | yield ctx.call_activity(increment_counter) 110 | yield ctx.call_activity(increment_counter) 111 | 112 | return error_msg 113 | 114 | # Start a worker, which will connect to the sidecar in a background thread 115 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 116 | taskhub=taskhub_name, token_credential=None) as w: 117 | w.add_orchestrator(orchestrator) 118 | w.add_activity(throw) 119 | w.add_activity(increment_counter) 120 | w.start() 121 | 122 | task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 123 | taskhub=taskhub_name, token_credential=None) 124 | id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) 125 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 126 | 127 | assert state is not None 128 | assert state.name == task.get_name(orchestrator) 129 | assert state.instance_id == id 130 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 131 | assert state.serialized_output == json.dumps("Kah-BOOOOM!!!") 132 | assert state.failure_details is None 133 | assert state.serialized_custom_status is None 134 | assert compensation_counter == 2 135 | 136 | 137 | def test_sub_orchestration_fan_out(): 138 | threadLock = threading.Lock() 139 | activity_counter = 0 140 | 141 | def increment(ctx, _): 142 | with threadLock: 143 | nonlocal activity_counter 144 | activity_counter += 1 145 | 146 | def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): 147 | for _ in range(activity_count): 148 | yield ctx.call_activity(increment) 149 | 150 | def parent_orchestrator(ctx: task.OrchestrationContext, count: int): 151 | # Fan out to multiple sub-orchestrations 152 | tasks = [] 153 | for _ in range(count): 154 | tasks.append(ctx.call_sub_orchestrator( 155 | orchestrator_child, input=3)) 156 | # Wait for all sub-orchestrations to complete 157 | yield task.when_all(tasks) 158 | 159 | # Start a worker, which will connect to the sidecar in a background thread 160 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 161 | taskhub=taskhub_name, token_credential=None) as w: 162 | w.add_activity(increment) 163 | w.add_orchestrator(orchestrator_child) 164 | w.add_orchestrator(parent_orchestrator) 165 | w.start() 166 | 167 | task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 168 | taskhub=taskhub_name, token_credential=None) 169 | id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) 170 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 171 | 172 | assert state is not None 173 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 174 | assert state.failure_details is None 175 | assert activity_counter == 30 176 | 177 | 178 | def test_wait_for_multiple_external_events(): 179 | def orchestrator(ctx: task.OrchestrationContext, _): 180 | a = yield ctx.wait_for_external_event('A') 181 | b = yield ctx.wait_for_external_event('B') 182 | c = yield ctx.wait_for_external_event('C') 183 | return [a, b, c] 184 | 185 | # Start a worker, which will connect to the sidecar in a background thread 186 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 187 | taskhub=taskhub_name, token_credential=None) as w: 188 | w.add_orchestrator(orchestrator) 189 | w.start() 190 | 191 | # Start the orchestration and immediately raise events to it. 192 | task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 193 | taskhub=taskhub_name, token_credential=None) 194 | id = task_hub_client.schedule_new_orchestration(orchestrator) 195 | task_hub_client.raise_orchestration_event(id, 'A', data='a') 196 | task_hub_client.raise_orchestration_event(id, 'B', data='b') 197 | task_hub_client.raise_orchestration_event(id, 'C', data='c') 198 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 199 | 200 | assert state is not None 201 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 202 | assert state.serialized_output == json.dumps(['a', 'b', 'c']) 203 | 204 | 205 | # @pytest.mark.parametrize("raise_event", [True, False]) 206 | # def test_wait_for_external_event_timeout(raise_event: bool): 207 | # def orchestrator(ctx: task.OrchestrationContext, _): 208 | # approval: task.Task[bool] = ctx.wait_for_external_event('Approval') 209 | # timeout = ctx.create_timer(timedelta(seconds=3)) 210 | # winner = yield task.when_any([approval, timeout]) 211 | # if winner == approval: 212 | # return "approved" 213 | # else: 214 | # return "timed out" 215 | 216 | # # Start a worker, which will connect to the sidecar in a background thread 217 | # with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 218 | # taskhub=taskhub_name, token_credential=None) as w: 219 | # w.add_orchestrator(orchestrator) 220 | # w.start() 221 | 222 | # # Start the orchestration and immediately raise events to it. 223 | # task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 224 | # taskhub=taskhub_name, token_credential=None) 225 | # id = task_hub_client.schedule_new_orchestration(orchestrator) 226 | # if raise_event: 227 | # task_hub_client.raise_orchestration_event(id, 'Approval') 228 | # state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 229 | 230 | # assert state is not None 231 | # assert state.runtime_status == client.OrchestrationStatus.COMPLETED 232 | # if raise_event: 233 | # assert state.serialized_output == json.dumps("approved") 234 | # else: 235 | # assert state.serialized_output == json.dumps("timed out") 236 | 237 | 238 | # def test_suspend_and_resume(): 239 | # def orchestrator(ctx: task.OrchestrationContext, _): 240 | # result = yield ctx.wait_for_external_event("my_event") 241 | # return result 242 | 243 | # # Start a worker, which will connect to the sidecar in a background thread 244 | # with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 245 | # taskhub=taskhub_name, token_credential=None) as w: 246 | # w.add_orchestrator(orchestrator) 247 | # w.start() 248 | 249 | # task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 250 | # taskhub=taskhub_name, token_credential=None) 251 | # id = task_hub_client.schedule_new_orchestration(orchestrator) 252 | # state = task_hub_client.wait_for_orchestration_start(id, timeout=30) 253 | # assert state is not None 254 | 255 | # # Suspend the orchestration and wait for it to go into the SUSPENDED state 256 | # task_hub_client.suspend_orchestration(id) 257 | # counter = 0 258 | # while state.runtime_status == client.OrchestrationStatus.RUNNING and counter < 1200: 259 | # time.sleep(0.1) 260 | # state = task_hub_client.get_orchestration_state(id) 261 | # assert state is not None 262 | # counter+=1 263 | # assert state.runtime_status == client.OrchestrationStatus.SUSPENDED 264 | 265 | # # Raise an event to the orchestration and confirm that it does NOT complete 266 | # task_hub_client.raise_orchestration_event(id, "my_event", data=42) 267 | # try: 268 | # state = task_hub_client.wait_for_orchestration_completion(id, timeout=3) 269 | # assert False, "Orchestration should not have completed" 270 | # except TimeoutError: 271 | # pass 272 | 273 | # # Resume the orchestration and wait for it to complete 274 | # task_hub_client.resume_orchestration(id) 275 | # state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 276 | # assert state is not None 277 | # assert state.runtime_status == client.OrchestrationStatus.COMPLETED 278 | # assert state.serialized_output == json.dumps(42) 279 | 280 | 281 | def test_terminate(): 282 | def orchestrator(ctx: task.OrchestrationContext, _): 283 | result = yield ctx.wait_for_external_event("my_event") 284 | return result 285 | 286 | # Start a worker, which will connect to the sidecar in a background thread 287 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 288 | taskhub=taskhub_name, token_credential=None) as w: 289 | w.add_orchestrator(orchestrator) 290 | w.start() 291 | 292 | task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 293 | taskhub=taskhub_name, token_credential=None) 294 | id = task_hub_client.schedule_new_orchestration(orchestrator) 295 | state = task_hub_client.wait_for_orchestration_start(id, timeout=30) 296 | assert state is not None 297 | assert state.runtime_status == client.OrchestrationStatus.RUNNING 298 | 299 | task_hub_client.terminate_orchestration(id, output="some reason for termination") 300 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 301 | assert state is not None 302 | assert state.runtime_status == client.OrchestrationStatus.TERMINATED 303 | assert state.serialized_output == json.dumps("some reason for termination") 304 | 305 | 306 | def test_terminate_recursive(): 307 | def root(ctx: task.OrchestrationContext, _): 308 | result = yield ctx.call_sub_orchestrator(child) 309 | return result 310 | 311 | def child(ctx: task.OrchestrationContext, _): 312 | result = yield ctx.wait_for_external_event("my_event") 313 | return result 314 | 315 | # Start a worker, which will connect to the sidecar in a background thread 316 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 317 | taskhub=taskhub_name, token_credential=None) as w: 318 | w.add_orchestrator(root) 319 | w.add_orchestrator(child) 320 | w.start() 321 | 322 | task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 323 | taskhub=taskhub_name, token_credential=None) 324 | id = task_hub_client.schedule_new_orchestration(root) 325 | state = task_hub_client.wait_for_orchestration_start(id, timeout=30) 326 | assert state is not None 327 | assert state.runtime_status == client.OrchestrationStatus.RUNNING 328 | 329 | # Terminate root orchestration(recursive set to True by default) 330 | task_hub_client.terminate_orchestration(id, output="some reason for termination") 331 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 332 | assert state is not None 333 | assert state.runtime_status == client.OrchestrationStatus.TERMINATED 334 | 335 | # Verify that child orchestration is also terminated 336 | task_hub_client.wait_for_orchestration_completion(id, timeout=30) 337 | assert state is not None 338 | assert state.runtime_status == client.OrchestrationStatus.TERMINATED 339 | 340 | task_hub_client.purge_orchestration(id) 341 | state = task_hub_client.get_orchestration_state(id) 342 | assert state is None 343 | 344 | 345 | # def test_continue_as_new(): 346 | # all_results = [] 347 | 348 | # def orchestrator(ctx: task.OrchestrationContext, input: int): 349 | # result = yield ctx.wait_for_external_event("my_event") 350 | # if not ctx.is_replaying: 351 | # # NOTE: Real orchestrations should never interact with nonlocal variables like this. 352 | # nonlocal all_results 353 | # all_results.append(result) 354 | 355 | # if len(all_results) <= 4: 356 | # ctx.continue_as_new(max(all_results), save_events=True) 357 | # else: 358 | # return all_results 359 | 360 | # # Start a worker, which will connect to the sidecar in a background thread 361 | # with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 362 | # taskhub=taskhub_name, token_credential=None) as w: 363 | # w.add_orchestrator(orchestrator) 364 | # w.start() 365 | 366 | # task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 367 | # taskhub=taskhub_name, token_credential=None) 368 | # id = task_hub_client.schedule_new_orchestration(orchestrator, input=0) 369 | # task_hub_client.raise_orchestration_event(id, "my_event", data=1) 370 | # task_hub_client.raise_orchestration_event(id, "my_event", data=2) 371 | # task_hub_client.raise_orchestration_event(id, "my_event", data=3) 372 | # task_hub_client.raise_orchestration_event(id, "my_event", data=4) 373 | # task_hub_client.raise_orchestration_event(id, "my_event", data=5) 374 | 375 | # state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 376 | # assert state is not None 377 | # assert state.runtime_status == client.OrchestrationStatus.COMPLETED 378 | # assert state.serialized_output == json.dumps(all_results) 379 | # assert state.serialized_input == json.dumps(4) 380 | # assert all_results == [1, 2, 3, 4, 5] 381 | 382 | 383 | # NOTE: This test fails when running against durabletask-go with sqlite because the sqlite backend does not yet 384 | # support orchestration ID reuse. This gap is being tracked here: 385 | # https://github.com/microsoft/durabletask-go/issues/42 386 | def test_retry_policies(): 387 | # This test verifies that the retry policies are working as expected. 388 | # It does this by creating an orchestration that calls a sub-orchestrator, 389 | # which in turn calls an activity that always fails. 390 | # In this test, the retry policies are added, and the orchestration 391 | # should still fail. But, number of times the sub-orchestrator and activity 392 | # is called should increase as per the retry policies. 393 | 394 | child_orch_counter = 0 395 | throw_activity_counter = 0 396 | 397 | # Second setup: With retry policies 398 | retry_policy = task.RetryPolicy( 399 | first_retry_interval=timedelta(seconds=1), 400 | max_number_of_attempts=3, 401 | backoff_coefficient=1, 402 | max_retry_interval=timedelta(seconds=10), 403 | retry_timeout=timedelta(seconds=30)) 404 | 405 | def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): 406 | yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) 407 | 408 | def child_orchestrator_with_retry(ctx: task.OrchestrationContext, _): 409 | nonlocal child_orch_counter 410 | if not ctx.is_replaying: 411 | # NOTE: Real orchestrations should never interact with nonlocal variables like this. 412 | # This is done only for testing purposes. 413 | child_orch_counter += 1 414 | yield ctx.call_activity(throw_activity_with_retry, retry_policy=retry_policy) 415 | 416 | def throw_activity_with_retry(ctx: task.ActivityContext, _): 417 | nonlocal throw_activity_counter 418 | throw_activity_counter += 1 419 | raise RuntimeError("Kah-BOOOOM!!!") 420 | 421 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 422 | taskhub=taskhub_name, token_credential=None) as w: 423 | w.add_orchestrator(parent_orchestrator_with_retry) 424 | w.add_orchestrator(child_orchestrator_with_retry) 425 | w.add_activity(throw_activity_with_retry) 426 | w.start() 427 | 428 | task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 429 | taskhub=taskhub_name, token_credential=None) 430 | id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) 431 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 432 | assert state is not None 433 | assert state.runtime_status == client.OrchestrationStatus.FAILED 434 | assert state.failure_details is not None 435 | assert state.failure_details.error_type == "TaskFailedError" 436 | assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") 437 | assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") 438 | assert state.failure_details.stack_trace is not None 439 | assert throw_activity_counter == 9 440 | assert child_orch_counter == 3 441 | 442 | 443 | def test_retry_timeout(): 444 | # This test verifies that the retry timeout is working as expected. 445 | # Max number of attempts is 5 and retry timeout is 14 seconds. 446 | # Total seconds consumed till 4th attempt is 1 + 2 + 4 + 8 = 15 seconds. 447 | # So, the 5th attempt should not be made and the orchestration should fail. 448 | throw_activity_counter = 0 449 | retry_policy = task.RetryPolicy( 450 | first_retry_interval=timedelta(seconds=1), 451 | max_number_of_attempts=5, 452 | backoff_coefficient=2, 453 | max_retry_interval=timedelta(seconds=10), 454 | retry_timeout=timedelta(seconds=14)) 455 | 456 | def mock_orchestrator(ctx: task.OrchestrationContext, _): 457 | yield ctx.call_activity(throw_activity, retry_policy=retry_policy) 458 | 459 | def throw_activity(ctx: task.ActivityContext, _): 460 | nonlocal throw_activity_counter 461 | throw_activity_counter += 1 462 | raise RuntimeError("Kah-BOOOOM!!!") 463 | 464 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 465 | taskhub=taskhub_name, token_credential=None) as w: 466 | w.add_orchestrator(mock_orchestrator) 467 | w.add_activity(throw_activity) 468 | w.start() 469 | 470 | task_hub_client = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 471 | taskhub=taskhub_name, token_credential=None) 472 | id = task_hub_client.schedule_new_orchestration(mock_orchestrator) 473 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 474 | assert state is not None 475 | assert state.runtime_status == client.OrchestrationStatus.FAILED 476 | assert state.failure_details is not None 477 | assert state.failure_details.error_type == "TaskFailedError" 478 | assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") 479 | assert state.failure_details.stack_trace is not None 480 | assert throw_activity_counter == 4 481 | 482 | 483 | def test_custom_status(): 484 | 485 | def empty_orchestrator(ctx: task.OrchestrationContext, _): 486 | ctx.set_custom_status("foobaz") 487 | 488 | # Start a worker, which will connect to the sidecar in a background thread 489 | with DurableTaskSchedulerWorker(host_address=endpoint, secure_channel=True, 490 | taskhub=taskhub_name, token_credential=None) as w: 491 | w.add_orchestrator(empty_orchestrator) 492 | w.start() 493 | 494 | c = DurableTaskSchedulerClient(host_address=endpoint, secure_channel=True, 495 | taskhub=taskhub_name, token_credential=None) 496 | id = c.schedule_new_orchestration(empty_orchestrator) 497 | state = c.wait_for_orchestration_completion(id, timeout=30) 498 | 499 | assert state is not None 500 | assert state.name == task.get_name(empty_orchestrator) 501 | assert state.instance_id == id 502 | assert state.failure_details is None 503 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 504 | assert state.serialized_input is None 505 | assert state.serialized_output is None 506 | assert state.serialized_custom_status == "\"foobaz\"" 507 | -------------------------------------------------------------------------------- /tests/durabletask-azuremanaged/test_durabletask_grpc_interceptor.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import unittest 5 | from concurrent import futures 6 | from importlib.metadata import version 7 | 8 | import grpc 9 | 10 | from durabletask.azuremanaged.client import DurableTaskSchedulerClient 11 | from durabletask.internal import orchestrator_service_pb2 as pb 12 | from durabletask.internal import orchestrator_service_pb2_grpc as stubs 13 | 14 | 15 | class MockTaskHubSidecarServiceServicer(stubs.TaskHubSidecarServiceServicer): 16 | """Mock implementation of the TaskHubSidecarService for testing.""" 17 | 18 | def __init__(self): 19 | self.captured_metadata = {} 20 | self.requests_received = 0 21 | 22 | def GetInstance(self, request, context): 23 | """Implementation of GetInstance that captures the metadata.""" 24 | # Store all metadata key-value pairs from the context 25 | for key, value in context.invocation_metadata(): 26 | self.captured_metadata[key] = value 27 | 28 | self.requests_received += 1 29 | 30 | # Return a mock response 31 | response = pb.GetInstanceResponse(exists=False) 32 | return response 33 | 34 | 35 | class TestDurableTaskGrpcInterceptor(unittest.TestCase): 36 | """Tests for the DTSDefaultClientInterceptorImpl class.""" 37 | 38 | @classmethod 39 | def setUpClass(cls): 40 | # Start a real gRPC server on a free port 41 | cls.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) 42 | cls.port = cls.server.add_insecure_port('[::]:0') # Bind to a random free port 43 | cls.server_address = f"localhost:{cls.port}" 44 | 45 | # Add our mock service implementation to the server 46 | cls.mock_servicer = MockTaskHubSidecarServiceServicer() 47 | stubs.add_TaskHubSidecarServiceServicer_to_server(cls.mock_servicer, cls.server) 48 | 49 | # Start the server in a background thread 50 | cls.server.start() 51 | 52 | @classmethod 53 | def tearDownClass(cls): 54 | cls.server.stop(grace=None) 55 | 56 | def test_user_agent_metadata_passed_in_request(self): 57 | """Test that the user agent metadata is correctly passed in gRPC requests.""" 58 | # Create a client that connects to our mock server 59 | # Note: secure_channel is False and token_credential is None as specified 60 | task_hub_client = DurableTaskSchedulerClient( 61 | host_address=self.server_address, 62 | secure_channel=False, 63 | taskhub="test-taskhub", 64 | token_credential=None 65 | ) 66 | 67 | # Make a client call that will trigger our interceptor 68 | task_hub_client.get_orchestration_state("test-instance-id") 69 | 70 | # Verify the request was received by our mock server 71 | self.assertEqual(1, self.mock_servicer.requests_received, "Expected one request to be received") 72 | 73 | # Check if our custom x-user-agent header was correctly set 74 | self.assertIn("x-user-agent", self.mock_servicer.captured_metadata, "x-user-agent header not found") 75 | 76 | # Get what we expect our user agent to be 77 | try: 78 | expected_version = version('durabletask-azuremanaged') 79 | except Exception: 80 | expected_version = "unknown" 81 | 82 | expected_user_agent = f"durabletask-python/{expected_version}" 83 | self.assertEqual( 84 | expected_user_agent, 85 | self.mock_servicer.captured_metadata["x-user-agent"], 86 | f"Expected x-user-agent header to be '{expected_user_agent}'" 87 | ) 88 | 89 | # Check if the taskhub header was correctly set 90 | self.assertIn("taskhub", self.mock_servicer.captured_metadata, "taskhub header not found") 91 | self.assertEqual("test-taskhub", self.mock_servicer.captured_metadata["taskhub"]) 92 | 93 | # Verify the standard gRPC user-agent is different from our custom one 94 | # Note: gRPC automatically adds its own "user-agent" header 95 | self.assertIn("user-agent", self.mock_servicer.captured_metadata, "gRPC user-agent header not found") 96 | self.assertNotEqual( 97 | self.mock_servicer.captured_metadata["user-agent"], 98 | self.mock_servicer.captured_metadata["x-user-agent"], 99 | "gRPC user-agent should be different from our custom x-user-agent" 100 | ) 101 | 102 | 103 | if __name__ == "__main__": 104 | unittest.main() 105 | -------------------------------------------------------------------------------- /tests/durabletask/test_activity_executor.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import json 5 | import logging 6 | from typing import Any, Optional, Tuple 7 | 8 | from durabletask import task, worker 9 | 10 | logging.basicConfig( 11 | format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s', 12 | datefmt='%Y-%m-%d %H:%M:%S', 13 | level=logging.DEBUG) 14 | TEST_LOGGER = logging.getLogger("tests") 15 | TEST_INSTANCE_ID = 'abc123' 16 | TEST_TASK_ID = 42 17 | 18 | 19 | def test_activity_inputs(): 20 | """Validates activity function input population""" 21 | def test_activity(ctx: task.ActivityContext, test_input: Any): 22 | # return all activity inputs back as the output 23 | return test_input, ctx.orchestration_id, ctx.task_id 24 | 25 | activity_input = "Hello, 世界!" 26 | executor, name = _get_activity_executor(test_activity) 27 | result = executor.execute(TEST_INSTANCE_ID, name, TEST_TASK_ID, json.dumps(activity_input)) 28 | assert result is not None 29 | 30 | result_input, result_orchestration_id, result_task_id = json.loads(result) 31 | assert activity_input == result_input 32 | assert TEST_INSTANCE_ID == result_orchestration_id 33 | assert TEST_TASK_ID == result_task_id 34 | 35 | 36 | def test_activity_not_registered(): 37 | 38 | def test_activity(ctx: task.ActivityContext, _): 39 | pass # not used 40 | 41 | executor, _ = _get_activity_executor(test_activity) 42 | 43 | caught_exception: Optional[Exception] = None 44 | try: 45 | executor.execute(TEST_INSTANCE_ID, "Bogus", TEST_TASK_ID, None) 46 | except Exception as ex: 47 | caught_exception = ex 48 | 49 | assert type(caught_exception) is worker.ActivityNotRegisteredError 50 | assert "Bogus" in str(caught_exception) 51 | 52 | 53 | def _get_activity_executor(fn: task.Activity) -> Tuple[worker._ActivityExecutor, str]: 54 | registry = worker._Registry() 55 | name = registry.add_activity(fn) 56 | executor = worker._ActivityExecutor(registry, TEST_LOGGER) 57 | return executor, name 58 | -------------------------------------------------------------------------------- /tests/durabletask/test_client.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import ANY, patch 2 | 3 | from durabletask.internal.grpc_interceptor import DefaultClientInterceptorImpl 4 | from durabletask.internal.shared import (get_default_host_address, 5 | get_grpc_channel) 6 | 7 | HOST_ADDRESS = 'localhost:50051' 8 | METADATA = [('key1', 'value1'), ('key2', 'value2')] 9 | INTERCEPTORS = [DefaultClientInterceptorImpl(METADATA)] 10 | 11 | 12 | def test_get_grpc_channel_insecure(): 13 | with patch('grpc.insecure_channel') as mock_channel: 14 | get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) 15 | mock_channel.assert_called_once_with(HOST_ADDRESS) 16 | 17 | 18 | def test_get_grpc_channel_secure(): 19 | with patch('grpc.secure_channel') as mock_channel, patch( 20 | 'grpc.ssl_channel_credentials') as mock_credentials: 21 | get_grpc_channel(HOST_ADDRESS, True, interceptors=INTERCEPTORS) 22 | mock_channel.assert_called_once_with(HOST_ADDRESS, mock_credentials.return_value) 23 | 24 | 25 | def test_get_grpc_channel_default_host_address(): 26 | with patch('grpc.insecure_channel') as mock_channel: 27 | get_grpc_channel(None, False, interceptors=INTERCEPTORS) 28 | mock_channel.assert_called_once_with(get_default_host_address()) 29 | 30 | 31 | def test_get_grpc_channel_with_metadata(): 32 | with patch('grpc.insecure_channel') as mock_channel, patch( 33 | 'grpc.intercept_channel') as mock_intercept_channel: 34 | get_grpc_channel(HOST_ADDRESS, False, interceptors=INTERCEPTORS) 35 | mock_channel.assert_called_once_with(HOST_ADDRESS) 36 | mock_intercept_channel.assert_called_once() 37 | 38 | # Capture and check the arguments passed to intercept_channel() 39 | args, kwargs = mock_intercept_channel.call_args 40 | assert args[0] == mock_channel.return_value 41 | assert isinstance(args[1], DefaultClientInterceptorImpl) 42 | assert args[1]._metadata == METADATA 43 | 44 | 45 | def test_grpc_channel_with_host_name_protocol_stripping(): 46 | with patch('grpc.insecure_channel') as mock_insecure_channel, patch( 47 | 'grpc.secure_channel') as mock_secure_channel: 48 | 49 | host_name = "myserver.com:1234" 50 | 51 | prefix = "grpc://" 52 | get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) 53 | mock_insecure_channel.assert_called_with(host_name) 54 | 55 | prefix = "http://" 56 | get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) 57 | mock_insecure_channel.assert_called_with(host_name) 58 | 59 | prefix = "HTTP://" 60 | get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) 61 | mock_insecure_channel.assert_called_with(host_name) 62 | 63 | prefix = "GRPC://" 64 | get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) 65 | mock_insecure_channel.assert_called_with(host_name) 66 | 67 | prefix = "" 68 | get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) 69 | mock_insecure_channel.assert_called_with(host_name) 70 | 71 | prefix = "grpcs://" 72 | get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) 73 | mock_secure_channel.assert_called_with(host_name, ANY) 74 | 75 | prefix = "https://" 76 | get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) 77 | mock_secure_channel.assert_called_with(host_name, ANY) 78 | 79 | prefix = "HTTPS://" 80 | get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) 81 | mock_secure_channel.assert_called_with(host_name, ANY) 82 | 83 | prefix = "GRPCS://" 84 | get_grpc_channel(prefix + host_name, interceptors=INTERCEPTORS) 85 | mock_secure_channel.assert_called_with(host_name, ANY) 86 | 87 | prefix = "" 88 | get_grpc_channel(prefix + host_name, True, interceptors=INTERCEPTORS) 89 | mock_secure_channel.assert_called_with(host_name, ANY) 90 | -------------------------------------------------------------------------------- /tests/durabletask/test_concurrency_options.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | 6 | from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker 7 | 8 | 9 | def test_default_concurrency_options(): 10 | """Test that default concurrency options work correctly.""" 11 | options = ConcurrencyOptions() 12 | processor_count = os.cpu_count() or 1 13 | expected_default = 100 * processor_count 14 | expected_workers = processor_count + 4 15 | 16 | assert options.maximum_concurrent_activity_work_items == expected_default 17 | assert options.maximum_concurrent_orchestration_work_items == expected_default 18 | assert options.maximum_thread_pool_workers == expected_workers 19 | 20 | 21 | def test_custom_concurrency_options(): 22 | """Test that custom concurrency options work correctly.""" 23 | options = ConcurrencyOptions( 24 | maximum_concurrent_activity_work_items=50, 25 | maximum_concurrent_orchestration_work_items=25, 26 | maximum_thread_pool_workers=30, 27 | ) 28 | 29 | assert options.maximum_concurrent_activity_work_items == 50 30 | assert options.maximum_concurrent_orchestration_work_items == 25 31 | assert options.maximum_thread_pool_workers == 30 32 | 33 | 34 | def test_partial_custom_options(): 35 | """Test that partially specified options use defaults for unspecified values.""" 36 | processor_count = os.cpu_count() or 1 37 | expected_default = 100 * processor_count 38 | expected_workers = processor_count + 4 39 | 40 | options = ConcurrencyOptions( 41 | maximum_concurrent_activity_work_items=30 42 | ) 43 | 44 | assert options.maximum_concurrent_activity_work_items == 30 45 | assert options.maximum_concurrent_orchestration_work_items == expected_default 46 | assert options.maximum_thread_pool_workers == expected_workers 47 | 48 | 49 | def test_worker_with_concurrency_options(): 50 | """Test that TaskHubGrpcWorker accepts concurrency options.""" 51 | options = ConcurrencyOptions( 52 | maximum_concurrent_activity_work_items=10, 53 | maximum_concurrent_orchestration_work_items=20, 54 | maximum_thread_pool_workers=15, 55 | ) 56 | 57 | worker = TaskHubGrpcWorker(concurrency_options=options) 58 | 59 | assert worker.concurrency_options == options 60 | 61 | 62 | def test_worker_default_options(): 63 | """Test that TaskHubGrpcWorker uses default options when no parameters are provided.""" 64 | worker = TaskHubGrpcWorker() 65 | 66 | processor_count = os.cpu_count() or 1 67 | expected_default = 100 * processor_count 68 | expected_workers = processor_count + 4 69 | 70 | assert ( 71 | worker.concurrency_options.maximum_concurrent_activity_work_items == expected_default 72 | ) 73 | assert ( 74 | worker.concurrency_options.maximum_concurrent_orchestration_work_items == expected_default 75 | ) 76 | assert worker.concurrency_options.maximum_thread_pool_workers == expected_workers 77 | 78 | 79 | def test_concurrency_options_property_access(): 80 | """Test that the concurrency_options property works correctly.""" 81 | options = ConcurrencyOptions( 82 | maximum_concurrent_activity_work_items=15, 83 | maximum_concurrent_orchestration_work_items=25, 84 | maximum_thread_pool_workers=30, 85 | ) 86 | 87 | worker = TaskHubGrpcWorker(concurrency_options=options) 88 | retrieved_options = worker.concurrency_options 89 | 90 | # Should be the same object 91 | assert retrieved_options is options 92 | 93 | # Should have correct values 94 | assert retrieved_options.maximum_concurrent_activity_work_items == 15 95 | assert retrieved_options.maximum_concurrent_orchestration_work_items == 25 96 | assert retrieved_options.maximum_thread_pool_workers == 30 97 | -------------------------------------------------------------------------------- /tests/durabletask/test_orchestration_e2e.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import json 5 | import threading 6 | import time 7 | from datetime import timedelta 8 | 9 | import pytest 10 | 11 | from durabletask import client, task, worker 12 | 13 | # NOTE: These tests assume a sidecar process is running. Example command: 14 | # docker run --name durabletask-sidecar -p 4001:4001 --env 'DURABLETASK_SIDECAR_LOGLEVEL=Debug' --rm cgillum/durabletask-sidecar:latest start --backend Emulator 15 | pytestmark = pytest.mark.e2e 16 | 17 | 18 | def test_empty_orchestration(): 19 | 20 | invoked = False 21 | 22 | def empty_orchestrator(ctx: task.OrchestrationContext, _): 23 | nonlocal invoked # don't do this in a real app! 24 | invoked = True 25 | 26 | # Start a worker, which will connect to the sidecar in a background thread 27 | with worker.TaskHubGrpcWorker() as w: 28 | w.add_orchestrator(empty_orchestrator) 29 | w.start() 30 | 31 | c = client.TaskHubGrpcClient() 32 | id = c.schedule_new_orchestration(empty_orchestrator) 33 | state = c.wait_for_orchestration_completion(id, timeout=30) 34 | 35 | assert invoked 36 | assert state is not None 37 | assert state.name == task.get_name(empty_orchestrator) 38 | assert state.instance_id == id 39 | assert state.failure_details is None 40 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 41 | assert state.serialized_input is None 42 | assert state.serialized_output is None 43 | assert state.serialized_custom_status is None 44 | 45 | 46 | def test_activity_sequence(): 47 | 48 | def plus_one(_: task.ActivityContext, input: int) -> int: 49 | return input + 1 50 | 51 | def sequence(ctx: task.OrchestrationContext, start_val: int): 52 | numbers = [start_val] 53 | current = start_val 54 | for _ in range(10): 55 | current = yield ctx.call_activity(plus_one, input=current) 56 | numbers.append(current) 57 | return numbers 58 | 59 | # Start a worker, which will connect to the sidecar in a background thread 60 | with worker.TaskHubGrpcWorker() as w: 61 | w.add_orchestrator(sequence) 62 | w.add_activity(plus_one) 63 | w.start() 64 | 65 | task_hub_client = client.TaskHubGrpcClient() 66 | id = task_hub_client.schedule_new_orchestration(sequence, input=1) 67 | state = task_hub_client.wait_for_orchestration_completion( 68 | id, timeout=30) 69 | 70 | assert state is not None 71 | assert state.name == task.get_name(sequence) 72 | assert state.instance_id == id 73 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 74 | assert state.failure_details is None 75 | assert state.serialized_input == json.dumps(1) 76 | assert state.serialized_output == json.dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) 77 | assert state.serialized_custom_status is None 78 | 79 | 80 | def test_activity_error_handling(): 81 | 82 | def throw(_: task.ActivityContext, input: int) -> int: 83 | raise RuntimeError("Kah-BOOOOM!!!") 84 | 85 | compensation_counter = 0 86 | 87 | def increment_counter(ctx, _): 88 | nonlocal compensation_counter 89 | compensation_counter += 1 90 | 91 | def orchestrator(ctx: task.OrchestrationContext, input: int): 92 | error_msg = "" 93 | try: 94 | yield ctx.call_activity(throw, input=input) 95 | except task.TaskFailedError as e: 96 | error_msg = e.details.message 97 | 98 | # compensating actions 99 | yield ctx.call_activity(increment_counter) 100 | yield ctx.call_activity(increment_counter) 101 | 102 | return error_msg 103 | 104 | # Start a worker, which will connect to the sidecar in a background thread 105 | with worker.TaskHubGrpcWorker() as w: 106 | w.add_orchestrator(orchestrator) 107 | w.add_activity(throw) 108 | w.add_activity(increment_counter) 109 | w.start() 110 | 111 | task_hub_client = client.TaskHubGrpcClient() 112 | id = task_hub_client.schedule_new_orchestration(orchestrator, input=1) 113 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 114 | 115 | assert state is not None 116 | assert state.name == task.get_name(orchestrator) 117 | assert state.instance_id == id 118 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 119 | assert state.serialized_output == json.dumps("Kah-BOOOOM!!!") 120 | assert state.failure_details is None 121 | assert state.serialized_custom_status is None 122 | assert compensation_counter == 2 123 | 124 | 125 | def test_sub_orchestration_fan_out(): 126 | threadLock = threading.Lock() 127 | activity_counter = 0 128 | 129 | def increment(ctx, _): 130 | with threadLock: 131 | nonlocal activity_counter 132 | activity_counter += 1 133 | 134 | def orchestrator_child(ctx: task.OrchestrationContext, activity_count: int): 135 | for _ in range(activity_count): 136 | yield ctx.call_activity(increment) 137 | 138 | def parent_orchestrator(ctx: task.OrchestrationContext, count: int): 139 | # Fan out to multiple sub-orchestrations 140 | tasks = [] 141 | for _ in range(count): 142 | tasks.append(ctx.call_sub_orchestrator( 143 | orchestrator_child, input=3)) 144 | # Wait for all sub-orchestrations to complete 145 | yield task.when_all(tasks) 146 | 147 | # Start a worker, which will connect to the sidecar in a background thread 148 | with worker.TaskHubGrpcWorker() as w: 149 | w.add_activity(increment) 150 | w.add_orchestrator(orchestrator_child) 151 | w.add_orchestrator(parent_orchestrator) 152 | w.start() 153 | 154 | task_hub_client = client.TaskHubGrpcClient() 155 | id = task_hub_client.schedule_new_orchestration(parent_orchestrator, input=10) 156 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 157 | 158 | assert state is not None 159 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 160 | assert state.failure_details is None 161 | assert activity_counter == 30 162 | 163 | 164 | def test_wait_for_multiple_external_events(): 165 | def orchestrator(ctx: task.OrchestrationContext, _): 166 | a = yield ctx.wait_for_external_event('A') 167 | b = yield ctx.wait_for_external_event('B') 168 | c = yield ctx.wait_for_external_event('C') 169 | return [a, b, c] 170 | 171 | # Start a worker, which will connect to the sidecar in a background thread 172 | with worker.TaskHubGrpcWorker() as w: 173 | w.add_orchestrator(orchestrator) 174 | w.start() 175 | 176 | # Start the orchestration and immediately raise events to it. 177 | task_hub_client = client.TaskHubGrpcClient() 178 | id = task_hub_client.schedule_new_orchestration(orchestrator) 179 | task_hub_client.raise_orchestration_event(id, 'A', data='a') 180 | task_hub_client.raise_orchestration_event(id, 'B', data='b') 181 | task_hub_client.raise_orchestration_event(id, 'C', data='c') 182 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 183 | 184 | assert state is not None 185 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 186 | assert state.serialized_output == json.dumps(['a', 'b', 'c']) 187 | 188 | 189 | @pytest.mark.parametrize("raise_event", [True, False]) 190 | def test_wait_for_external_event_timeout(raise_event: bool): 191 | def orchestrator(ctx: task.OrchestrationContext, _): 192 | approval: task.Task[bool] = ctx.wait_for_external_event('Approval') 193 | timeout = ctx.create_timer(timedelta(seconds=3)) 194 | winner = yield task.when_any([approval, timeout]) 195 | if winner == approval: 196 | return "approved" 197 | else: 198 | return "timed out" 199 | 200 | # Start a worker, which will connect to the sidecar in a background thread 201 | with worker.TaskHubGrpcWorker() as w: 202 | w.add_orchestrator(orchestrator) 203 | w.start() 204 | 205 | # Start the orchestration and immediately raise events to it. 206 | task_hub_client = client.TaskHubGrpcClient() 207 | id = task_hub_client.schedule_new_orchestration(orchestrator) 208 | if raise_event: 209 | task_hub_client.raise_orchestration_event(id, 'Approval') 210 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 211 | 212 | assert state is not None 213 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 214 | if raise_event: 215 | assert state.serialized_output == json.dumps("approved") 216 | else: 217 | assert state.serialized_output == json.dumps("timed out") 218 | 219 | 220 | def test_suspend_and_resume(): 221 | def orchestrator(ctx: task.OrchestrationContext, _): 222 | result = yield ctx.wait_for_external_event("my_event") 223 | return result 224 | 225 | # Start a worker, which will connect to the sidecar in a background thread 226 | with worker.TaskHubGrpcWorker() as w: 227 | w.add_orchestrator(orchestrator) 228 | w.start() 229 | 230 | task_hub_client = client.TaskHubGrpcClient() 231 | id = task_hub_client.schedule_new_orchestration(orchestrator) 232 | state = task_hub_client.wait_for_orchestration_start(id, timeout=30) 233 | assert state is not None 234 | 235 | # Suspend the orchestration and wait for it to go into the SUSPENDED state 236 | task_hub_client.suspend_orchestration(id) 237 | while state.runtime_status == client.OrchestrationStatus.RUNNING: 238 | time.sleep(0.1) 239 | state = task_hub_client.get_orchestration_state(id) 240 | assert state is not None 241 | assert state.runtime_status == client.OrchestrationStatus.SUSPENDED 242 | 243 | # Raise an event to the orchestration and confirm that it does NOT complete 244 | task_hub_client.raise_orchestration_event(id, "my_event", data=42) 245 | try: 246 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=3) 247 | assert False, "Orchestration should not have completed" 248 | except TimeoutError: 249 | pass 250 | 251 | # Resume the orchestration and wait for it to complete 252 | task_hub_client.resume_orchestration(id) 253 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 254 | assert state is not None 255 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 256 | assert state.serialized_output == json.dumps(42) 257 | 258 | 259 | def test_terminate(): 260 | def orchestrator(ctx: task.OrchestrationContext, _): 261 | result = yield ctx.wait_for_external_event("my_event") 262 | return result 263 | 264 | # Start a worker, which will connect to the sidecar in a background thread 265 | with worker.TaskHubGrpcWorker() as w: 266 | w.add_orchestrator(orchestrator) 267 | w.start() 268 | 269 | task_hub_client = client.TaskHubGrpcClient() 270 | id = task_hub_client.schedule_new_orchestration(orchestrator) 271 | state = task_hub_client.wait_for_orchestration_start(id, timeout=30) 272 | assert state is not None 273 | assert state.runtime_status == client.OrchestrationStatus.RUNNING 274 | 275 | task_hub_client.terminate_orchestration(id, output="some reason for termination") 276 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 277 | assert state is not None 278 | assert state.runtime_status == client.OrchestrationStatus.TERMINATED 279 | assert state.serialized_output == json.dumps("some reason for termination") 280 | 281 | 282 | def test_terminate_recursive(): 283 | def root(ctx: task.OrchestrationContext, _): 284 | result = yield ctx.call_sub_orchestrator(child) 285 | return result 286 | 287 | def child(ctx: task.OrchestrationContext, _): 288 | result = yield ctx.wait_for_external_event("my_event") 289 | return result 290 | 291 | # Start a worker, which will connect to the sidecar in a background thread 292 | with worker.TaskHubGrpcWorker() as w: 293 | w.add_orchestrator(root) 294 | w.add_orchestrator(child) 295 | w.start() 296 | 297 | task_hub_client = client.TaskHubGrpcClient() 298 | id = task_hub_client.schedule_new_orchestration(root) 299 | state = task_hub_client.wait_for_orchestration_start(id, timeout=30) 300 | assert state is not None 301 | assert state.runtime_status == client.OrchestrationStatus.RUNNING 302 | 303 | # Terminate root orchestration(recursive set to True by default) 304 | task_hub_client.terminate_orchestration(id, output="some reason for termination") 305 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 306 | assert state is not None 307 | assert state.runtime_status == client.OrchestrationStatus.TERMINATED 308 | 309 | # Verify that child orchestration is also terminated 310 | task_hub_client.wait_for_orchestration_completion(id, timeout=30) 311 | assert state is not None 312 | assert state.runtime_status == client.OrchestrationStatus.TERMINATED 313 | 314 | task_hub_client.purge_orchestration(id) 315 | state = task_hub_client.get_orchestration_state(id) 316 | assert state is None 317 | 318 | 319 | def test_continue_as_new(): 320 | all_results = [] 321 | 322 | def orchestrator(ctx: task.OrchestrationContext, input: int): 323 | result = yield ctx.wait_for_external_event("my_event") 324 | if not ctx.is_replaying: 325 | # NOTE: Real orchestrations should never interact with nonlocal variables like this. 326 | nonlocal all_results # noqa: F824 327 | all_results.append(result) 328 | 329 | if len(all_results) <= 4: 330 | ctx.continue_as_new(max(all_results), save_events=True) 331 | else: 332 | return all_results 333 | 334 | # Start a worker, which will connect to the sidecar in a background thread 335 | with worker.TaskHubGrpcWorker() as w: 336 | w.add_orchestrator(orchestrator) 337 | w.start() 338 | 339 | task_hub_client = client.TaskHubGrpcClient() 340 | id = task_hub_client.schedule_new_orchestration(orchestrator, input=0) 341 | task_hub_client.raise_orchestration_event(id, "my_event", data=1) 342 | task_hub_client.raise_orchestration_event(id, "my_event", data=2) 343 | task_hub_client.raise_orchestration_event(id, "my_event", data=3) 344 | task_hub_client.raise_orchestration_event(id, "my_event", data=4) 345 | task_hub_client.raise_orchestration_event(id, "my_event", data=5) 346 | 347 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 348 | assert state is not None 349 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 350 | assert state.serialized_output == json.dumps(all_results) 351 | assert state.serialized_input == json.dumps(4) 352 | assert all_results == [1, 2, 3, 4, 5] 353 | 354 | 355 | # NOTE: This test fails when running against durabletask-go with sqlite because the sqlite backend does not yet 356 | # support orchestration ID reuse. This gap is being tracked here: 357 | # https://github.com/microsoft/durabletask-go/issues/42 358 | def test_retry_policies(): 359 | # This test verifies that the retry policies are working as expected. 360 | # It does this by creating an orchestration that calls a sub-orchestrator, 361 | # which in turn calls an activity that always fails. 362 | # In this test, the retry policies are added, and the orchestration 363 | # should still fail. But, number of times the sub-orchestrator and activity 364 | # is called should increase as per the retry policies. 365 | 366 | child_orch_counter = 0 367 | throw_activity_counter = 0 368 | 369 | # Second setup: With retry policies 370 | retry_policy = task.RetryPolicy( 371 | first_retry_interval=timedelta(seconds=1), 372 | max_number_of_attempts=3, 373 | backoff_coefficient=1, 374 | max_retry_interval=timedelta(seconds=10), 375 | retry_timeout=timedelta(seconds=30)) 376 | 377 | def parent_orchestrator_with_retry(ctx: task.OrchestrationContext, _): 378 | yield ctx.call_sub_orchestrator(child_orchestrator_with_retry, retry_policy=retry_policy) 379 | 380 | def child_orchestrator_with_retry(ctx: task.OrchestrationContext, _): 381 | nonlocal child_orch_counter 382 | if not ctx.is_replaying: 383 | # NOTE: Real orchestrations should never interact with nonlocal variables like this. 384 | # This is done only for testing purposes. 385 | child_orch_counter += 1 386 | yield ctx.call_activity(throw_activity_with_retry, retry_policy=retry_policy) 387 | 388 | def throw_activity_with_retry(ctx: task.ActivityContext, _): 389 | nonlocal throw_activity_counter 390 | throw_activity_counter += 1 391 | raise RuntimeError("Kah-BOOOOM!!!") 392 | 393 | with worker.TaskHubGrpcWorker() as w: 394 | w.add_orchestrator(parent_orchestrator_with_retry) 395 | w.add_orchestrator(child_orchestrator_with_retry) 396 | w.add_activity(throw_activity_with_retry) 397 | w.start() 398 | 399 | task_hub_client = client.TaskHubGrpcClient() 400 | id = task_hub_client.schedule_new_orchestration(parent_orchestrator_with_retry) 401 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 402 | assert state is not None 403 | assert state.runtime_status == client.OrchestrationStatus.FAILED 404 | assert state.failure_details is not None 405 | assert state.failure_details.error_type == "TaskFailedError" 406 | assert state.failure_details.message.startswith("Sub-orchestration task #1 failed:") 407 | assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") 408 | assert state.failure_details.stack_trace is not None 409 | assert throw_activity_counter == 9 410 | assert child_orch_counter == 3 411 | 412 | 413 | def test_retry_timeout(): 414 | # This test verifies that the retry timeout is working as expected. 415 | # Max number of attempts is 5 and retry timeout is 14 seconds. 416 | # Total seconds consumed till 4th attempt is 1 + 2 + 4 + 8 = 15 seconds. 417 | # So, the 5th attempt should not be made and the orchestration should fail. 418 | throw_activity_counter = 0 419 | retry_policy = task.RetryPolicy( 420 | first_retry_interval=timedelta(seconds=1), 421 | max_number_of_attempts=5, 422 | backoff_coefficient=2, 423 | max_retry_interval=timedelta(seconds=10), 424 | retry_timeout=timedelta(seconds=14)) 425 | 426 | def mock_orchestrator(ctx: task.OrchestrationContext, _): 427 | yield ctx.call_activity(throw_activity, retry_policy=retry_policy) 428 | 429 | def throw_activity(ctx: task.ActivityContext, _): 430 | nonlocal throw_activity_counter 431 | throw_activity_counter += 1 432 | raise RuntimeError("Kah-BOOOOM!!!") 433 | 434 | with worker.TaskHubGrpcWorker() as w: 435 | w.add_orchestrator(mock_orchestrator) 436 | w.add_activity(throw_activity) 437 | w.start() 438 | 439 | task_hub_client = client.TaskHubGrpcClient() 440 | id = task_hub_client.schedule_new_orchestration(mock_orchestrator) 441 | state = task_hub_client.wait_for_orchestration_completion(id, timeout=30) 442 | assert state is not None 443 | assert state.runtime_status == client.OrchestrationStatus.FAILED 444 | assert state.failure_details is not None 445 | assert state.failure_details.error_type == "TaskFailedError" 446 | assert state.failure_details.message.endswith("Activity task #1 failed: Kah-BOOOOM!!!") 447 | assert state.failure_details.stack_trace is not None 448 | assert throw_activity_counter == 4 449 | 450 | 451 | def test_custom_status(): 452 | 453 | def empty_orchestrator(ctx: task.OrchestrationContext, _): 454 | ctx.set_custom_status("foobaz") 455 | 456 | # Start a worker, which will connect to the sidecar in a background thread 457 | with worker.TaskHubGrpcWorker() as w: 458 | w.add_orchestrator(empty_orchestrator) 459 | w.start() 460 | 461 | c = client.TaskHubGrpcClient() 462 | id = c.schedule_new_orchestration(empty_orchestrator) 463 | state = c.wait_for_orchestration_completion(id, timeout=30) 464 | 465 | assert state is not None 466 | assert state.name == task.get_name(empty_orchestrator) 467 | assert state.instance_id == id 468 | assert state.failure_details is None 469 | assert state.runtime_status == client.OrchestrationStatus.COMPLETED 470 | assert state.serialized_input is None 471 | assert state.serialized_output is None 472 | assert state.serialized_custom_status == "\"foobaz\"" 473 | -------------------------------------------------------------------------------- /tests/durabletask/test_worker_concurrency_loop.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import threading 3 | import time 4 | 5 | from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker 6 | 7 | 8 | class DummyStub: 9 | def __init__(self): 10 | self.completed = [] 11 | 12 | def CompleteOrchestratorTask(self, res): 13 | self.completed.append(('orchestrator', res)) 14 | 15 | def CompleteActivityTask(self, res): 16 | self.completed.append(('activity', res)) 17 | 18 | 19 | class DummyRequest: 20 | def __init__(self, kind, instance_id): 21 | self.kind = kind 22 | self.instanceId = instance_id 23 | self.orchestrationInstance = type('O', (), {'instanceId': instance_id}) 24 | self.name = 'dummy' 25 | self.taskId = 1 26 | self.input = type('I', (), {'value': ''}) 27 | self.pastEvents = [] 28 | self.newEvents = [] 29 | 30 | def HasField(self, field): 31 | return (field == 'orchestratorRequest' and self.kind == 'orchestrator') or \ 32 | (field == 'activityRequest' and self.kind == 'activity') 33 | 34 | def WhichOneof(self, _): 35 | return f'{self.kind}Request' 36 | 37 | 38 | class DummyCompletionToken: 39 | pass 40 | 41 | 42 | def test_worker_concurrency_loop_sync(): 43 | options = ConcurrencyOptions( 44 | maximum_concurrent_activity_work_items=2, 45 | maximum_concurrent_orchestration_work_items=1, 46 | maximum_thread_pool_workers=2, 47 | ) 48 | worker = TaskHubGrpcWorker(concurrency_options=options) 49 | stub = DummyStub() 50 | 51 | def dummy_orchestrator(req, stub, completionToken): 52 | time.sleep(0.1) 53 | stub.CompleteOrchestratorTask('ok') 54 | 55 | def dummy_activity(req, stub, completionToken): 56 | time.sleep(0.1) 57 | stub.CompleteActivityTask('ok') 58 | 59 | # Patch the worker's _execute_orchestrator and _execute_activity 60 | worker._execute_orchestrator = dummy_orchestrator 61 | worker._execute_activity = dummy_activity 62 | 63 | orchestrator_requests = [DummyRequest('orchestrator', f'orch{i}') for i in range(3)] 64 | activity_requests = [DummyRequest('activity', f'act{i}') for i in range(4)] 65 | 66 | async def run_test(): 67 | # Start the worker manager's run loop in the background 68 | worker_task = asyncio.create_task(worker._async_worker_manager.run()) 69 | for req in orchestrator_requests: 70 | worker._async_worker_manager.submit_orchestration(dummy_orchestrator, req, stub, DummyCompletionToken()) 71 | for req in activity_requests: 72 | worker._async_worker_manager.submit_activity(dummy_activity, req, stub, DummyCompletionToken()) 73 | await asyncio.sleep(1.0) 74 | orchestrator_count = sum(1 for t, _ in stub.completed if t == 'orchestrator') 75 | activity_count = sum(1 for t, _ in stub.completed if t == 'activity') 76 | assert orchestrator_count == 3, f"Expected 3 orchestrator completions, got {orchestrator_count}" 77 | assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" 78 | worker._async_worker_manager._shutdown = True 79 | await worker_task 80 | asyncio.run(run_test()) 81 | 82 | 83 | # Dummy orchestrator and activity for sync context 84 | def dummy_orchestrator(ctx, input): 85 | # Simulate some work 86 | time.sleep(0.1) 87 | return "orchestrator-done" 88 | 89 | 90 | def dummy_activity(ctx, input): 91 | # Simulate some work 92 | time.sleep(0.1) 93 | return "activity-done" 94 | 95 | 96 | def test_worker_concurrency_sync(): 97 | # Use small concurrency to make test observable 98 | options = ConcurrencyOptions( 99 | maximum_concurrent_activity_work_items=2, 100 | maximum_concurrent_orchestration_work_items=2, 101 | maximum_thread_pool_workers=2, 102 | ) 103 | worker = TaskHubGrpcWorker(concurrency_options=options) 104 | worker.add_orchestrator(dummy_orchestrator) 105 | worker.add_activity(dummy_activity) 106 | 107 | # Simulate submitting work items to the queues directly (bypassing gRPC) 108 | # We'll use the internal _async_worker_manager for this test 109 | manager = worker._async_worker_manager 110 | results = [] 111 | lock = threading.Lock() 112 | 113 | def make_work(kind, idx): 114 | def fn(*args, **kwargs): 115 | time.sleep(0.1) 116 | with lock: 117 | results.append((kind, idx)) 118 | return f"{kind}-{idx}-done" 119 | return fn 120 | 121 | # Submit more work than concurrency allows 122 | for i in range(5): 123 | manager.submit_orchestration(make_work("orch", i)) 124 | manager.submit_activity(make_work("act", i)) 125 | 126 | # Run the manager loop in a thread (sync context) 127 | def run_manager(): 128 | asyncio.run(manager.run()) 129 | 130 | t = threading.Thread(target=run_manager) 131 | t.start() 132 | time.sleep(1.5) # Let work process 133 | manager.shutdown() 134 | # Unblock the consumers by putting dummy items in the queues 135 | manager.activity_queue.put_nowait((lambda: None, (), {})) 136 | manager.orchestration_queue.put_nowait((lambda: None, (), {})) 137 | t.join(timeout=2) 138 | 139 | # Check that all work items completed 140 | assert len(results) == 10 141 | -------------------------------------------------------------------------------- /tests/durabletask/test_worker_concurrency_loop_async.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from durabletask.worker import ConcurrencyOptions, TaskHubGrpcWorker 4 | 5 | 6 | class DummyStub: 7 | def __init__(self): 8 | self.completed = [] 9 | 10 | def CompleteOrchestratorTask(self, res): 11 | self.completed.append(('orchestrator', res)) 12 | 13 | def CompleteActivityTask(self, res): 14 | self.completed.append(('activity', res)) 15 | 16 | 17 | class DummyRequest: 18 | def __init__(self, kind, instance_id): 19 | self.kind = kind 20 | self.instanceId = instance_id 21 | self.orchestrationInstance = type('O', (), {'instanceId': instance_id}) 22 | self.name = 'dummy' 23 | self.taskId = 1 24 | self.input = type('I', (), {'value': ''}) 25 | self.pastEvents = [] 26 | self.newEvents = [] 27 | 28 | def HasField(self, field): 29 | return (field == 'orchestratorRequest' and self.kind == 'orchestrator') or \ 30 | (field == 'activityRequest' and self.kind == 'activity') 31 | 32 | def WhichOneof(self, _): 33 | return f'{self.kind}Request' 34 | 35 | 36 | class DummyCompletionToken: 37 | pass 38 | 39 | 40 | def test_worker_concurrency_loop_async(): 41 | options = ConcurrencyOptions( 42 | maximum_concurrent_activity_work_items=2, 43 | maximum_concurrent_orchestration_work_items=1, 44 | maximum_thread_pool_workers=2, 45 | ) 46 | grpc_worker = TaskHubGrpcWorker(concurrency_options=options) 47 | stub = DummyStub() 48 | 49 | async def dummy_orchestrator(req, stub, completionToken): 50 | await asyncio.sleep(0.1) 51 | stub.CompleteOrchestratorTask('ok') 52 | 53 | async def dummy_activity(req, stub, completionToken): 54 | await asyncio.sleep(0.1) 55 | stub.CompleteActivityTask('ok') 56 | 57 | # Patch the worker's _execute_orchestrator and _execute_activity 58 | grpc_worker._execute_orchestrator = dummy_orchestrator 59 | grpc_worker._execute_activity = dummy_activity 60 | 61 | orchestrator_requests = [DummyRequest('orchestrator', f'orch{i}') for i in range(3)] 62 | activity_requests = [DummyRequest('activity', f'act{i}') for i in range(4)] 63 | 64 | async def run_test(): 65 | # Clear stub state before each run 66 | stub.completed.clear() 67 | worker_task = asyncio.create_task(grpc_worker._async_worker_manager.run()) 68 | for req in orchestrator_requests: 69 | grpc_worker._async_worker_manager.submit_orchestration(dummy_orchestrator, req, stub, DummyCompletionToken()) 70 | for req in activity_requests: 71 | grpc_worker._async_worker_manager.submit_activity(dummy_activity, req, stub, DummyCompletionToken()) 72 | await asyncio.sleep(1.0) 73 | orchestrator_count = sum(1 for t, _ in stub.completed if t == 'orchestrator') 74 | activity_count = sum(1 for t, _ in stub.completed if t == 'activity') 75 | assert orchestrator_count == 3, f"Expected 3 orchestrator completions, got {orchestrator_count}" 76 | assert activity_count == 4, f"Expected 4 activity completions, got {activity_count}" 77 | grpc_worker._async_worker_manager._shutdown = True 78 | await worker_task 79 | asyncio.run(run_test()) 80 | asyncio.run(run_test()) 81 | --------------------------------------------------------------------------------