├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── ci.yml │ ├── create-release.yml │ ├── publish-docs.yml │ ├── push-to-buf.yml │ ├── trigger-api-go-delete-release.yml │ ├── trigger-api-go-publish-release.yml │ └── trigger-api-go-update.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── api-linter.yaml ├── buf.gen.yaml ├── buf.lock ├── buf.yaml ├── google ├── api │ ├── annotations.proto │ └── http.proto └── protobuf │ ├── any.proto │ ├── descriptor.proto │ ├── duration.proto │ ├── empty.proto │ ├── struct.proto │ ├── timestamp.proto │ └── wrappers.proto ├── openapi ├── openapiv2.json ├── openapiv3.yaml └── payload_description.txt └── temporal └── api ├── activity └── v1 │ └── message.proto ├── batch └── v1 │ └── message.proto ├── command └── v1 │ └── message.proto ├── common └── v1 │ └── message.proto ├── deployment └── v1 │ └── message.proto ├── enums └── v1 │ ├── batch_operation.proto │ ├── command_type.proto │ ├── common.proto │ ├── deployment.proto │ ├── event_type.proto │ ├── failed_cause.proto │ ├── namespace.proto │ ├── nexus.proto │ ├── query.proto │ ├── reset.proto │ ├── schedule.proto │ ├── task_queue.proto │ ├── update.proto │ └── workflow.proto ├── errordetails └── v1 │ └── message.proto ├── export └── v1 │ └── message.proto ├── failure └── v1 │ └── message.proto ├── filter └── v1 │ └── message.proto ├── history └── v1 │ └── message.proto ├── namespace └── v1 │ └── message.proto ├── nexus └── v1 │ └── message.proto ├── operatorservice └── v1 │ ├── request_response.proto │ └── service.proto ├── protocol └── v1 │ └── message.proto ├── query └── v1 │ └── message.proto ├── replication └── v1 │ └── message.proto ├── rules └── v1 │ └── message.proto ├── schedule └── v1 │ └── message.proto ├── sdk └── v1 │ ├── enhanced_stack_trace.proto │ ├── task_complete_metadata.proto │ ├── user_metadata.proto │ └── workflow_metadata.proto ├── taskqueue └── v1 │ └── message.proto ├── update └── v1 │ └── message.proto ├── version └── v1 │ └── message.proto ├── worker └── v1 │ └── message.proto ├── workflow └── v1 │ └── message.proto └── workflowservice └── v1 ├── request_response.proto └── service.proto /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Syntax is here: 2 | # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax 3 | 4 | * @temporalio/server @temporalio/sdk 5 | api/temporal/api/sdk/* @temporalio/sdk 6 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | _**READ BEFORE MERGING:** All PRs require approval by both Server AND SDK teams before merging! This is why the number of required approvals is "2" and not "1"--two reviewers from the same team is NOT sufficient. If your PR is not approved by someone in BOTH teams, it may be summarily reverted._ 2 | 3 | 4 | **What changed?** 5 | 6 | 7 | 8 | **Why?** 9 | 10 | 11 | 12 | **Breaking changes** 13 | 14 | 15 | 16 | **Server PR** 17 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | pull_request: 4 | permissions: 5 | contents: read 6 | jobs: 7 | ci: 8 | name: ci 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: actions/setup-go@v4 13 | with: 14 | go-version: '^1.21' 15 | - uses: arduino/setup-protoc@v2 16 | - name: 'Setup jq' 17 | uses: dcarbone/install-jq-action@v2 18 | - run: make ci-build 19 | - name: Fail if the repo is dirty 20 | run: | 21 | if [[ -n $(git status --porcelain) ]]; then 22 | echo "Detected uncommitted changes." 23 | git status 24 | git diff 25 | exit 1 26 | fi 27 | -------------------------------------------------------------------------------- /.github/workflows/create-release.yml: -------------------------------------------------------------------------------- 1 | name: "Create release" 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | branch: 7 | description: "Branch to be tagged" 8 | required: true 9 | default: master 10 | tag: 11 | description: "Tag for new version (v1.23.4)" 12 | required: true 13 | base_tag: 14 | description: "Base tag to generate commit list for release notes" 15 | required: true 16 | skip_sdk_check: 17 | description: "Skip sdk-go compatibility check" 18 | type: boolean 19 | 20 | jobs: 21 | prepare-inputs: 22 | name: "Prepare inputs" 23 | runs-on: ubuntu-latest 24 | outputs: 25 | api_commit_sha: ${{ steps.pin_commits.outputs.api_commit_sha }} 26 | api_go_commit_sha: ${{ steps.pin_commits.outputs.api_go_commit_sha }} 27 | steps: 28 | - name: Checkout api 29 | uses: actions/checkout@v4 30 | with: 31 | ref: ${{ github.event.inputs.branch }} 32 | fetch-depth: 0 33 | fetch-tags: true 34 | path: api 35 | 36 | - name: Checkout api-go 37 | uses: actions/checkout@v4 38 | with: 39 | repository: temporalio/api-go 40 | ref: ${{ github.event.inputs.branch }} 41 | submodules: true 42 | path: api-go 43 | 44 | - name: Validate inputs 45 | env: 46 | BRANCH: ${{ github.event.inputs.branch }} 47 | TAG: ${{ github.event.inputs.tag }} 48 | BASE_TAG: ${{ github.event.inputs.base_tag }} 49 | working-directory: ./api 50 | run: | 51 | if ! [[ "${TAG}" =~ ^v.* ]]; then 52 | echo "::error::Tag is not prefixed with 'v'" 53 | exit 1 54 | fi 55 | 56 | if [[ -n "$(git tag -l "$TAG")" ]]; then 57 | echo "::error::Tag already exists" 58 | exit 1 59 | fi 60 | 61 | if [[ -z "$BASE_TAG" || -z "$(git tag -l "$BASE_TAG")" ]]; then 62 | echo "::error::Base tag not specified or does not exist" 63 | exit 1 64 | fi 65 | 66 | - name: Pin commits sha 67 | id: pin_commits 68 | env: 69 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 70 | BRANCH: ${{ github.event.inputs.branch }} 71 | run: | 72 | API_COMMIT_SHA=$(git -C ./api rev-parse HEAD) 73 | API_GO_COMMIT_SHA=$(git -C ./api-go rev-parse HEAD) 74 | API_GO_API_COMMIT_SHA=$(git -C ./api-go rev-parse HEAD:proto/api) 75 | if [[ "${API_GO_API_COMMIT_SHA}" != "${API_COMMIT_SHA}" ]]; then 76 | echo "::error::api-go ref ${API_GO_COMMIT_SHA} does not reference api ref ${API_COMMIT_SHA}, api-go repo might not be up-to-date." 77 | exit 1 78 | fi 79 | echo "api_commit_sha=$API_COMMIT_SHA" >> "$GITHUB_OUTPUT" 80 | echo "api_go_commit_sha=$API_GO_COMMIT_SHA" >> "$GITHUB_OUTPUT" 81 | 82 | check-compatibility-sdk-go: 83 | needs: prepare-inputs 84 | if: ${{ github.event.inputs.skip_sdk_check == false || github.event.inputs.skip_sdk_check == 'false' }} 85 | uses: temporalio/api-go/.github/workflows/check-sdk-compat.yml@master 86 | with: 87 | sdk_ref: latest 88 | api_ref: ${{ needs.prepare-inputs.outputs.api_go_commit_sha }} 89 | 90 | create-release: 91 | name: "Create release" 92 | needs: [prepare-inputs, check-compatibility-sdk-go] 93 | if: | 94 | !cancelled() && 95 | needs.prepare-inputs.result == 'success' && 96 | contains(fromJSON('["success", "skipped"]'), needs.check-compatibility-sdk-go.result) 97 | runs-on: ubuntu-latest 98 | 99 | steps: 100 | - name: Generate token 101 | id: generate_token 102 | uses: actions/create-github-app-token@v1 103 | with: 104 | app-id: ${{ secrets.TEMPORAL_CICD_APP_ID }} 105 | private-key: ${{ secrets.TEMPORAL_CICD_PRIVATE_KEY }} 106 | owner: ${{ github.repository_owner }} 107 | 108 | - name: Checkout 109 | uses: actions/checkout@v4 110 | with: 111 | ref: ${{ needs.prepare-inputs.outputs.api_commit_sha }} 112 | token: ${{ steps.generate_token.outputs.token }} 113 | 114 | - name: Create release 115 | env: 116 | GH_TOKEN: ${{ steps.generate_token.outputs.token }} 117 | REF: ${{ needs.prepare-inputs.outputs.api_commit_sha }} 118 | TAG: ${{ github.event.inputs.tag }} 119 | BASE_TAG: ${{ github.event.inputs.base_tag }} 120 | run: | 121 | gh repo set-default ${{ github.repository }} 122 | gh release create "$TAG" --target "$REF" --latest --generate-notes --notes-start-tag "$BASE_TAG" --draft 123 | 124 | release-api-go: 125 | needs: [prepare-inputs, create-release] 126 | if: | 127 | !cancelled() && 128 | needs.create-release.result == 'success' 129 | uses: temporalio/api-go/.github/workflows/create-release.yml@master 130 | with: 131 | ref: ${{ needs.prepare-inputs.outputs.api_go_commit_sha }} 132 | tag: ${{ github.event.inputs.tag }} 133 | api_commit_sha: ${{ needs.prepare-inputs.outputs.api_commit_sha }} 134 | base_tag: ${{ github.event.inputs.base_tag }} 135 | secrets: inherit 136 | -------------------------------------------------------------------------------- /.github/workflows/publish-docs.yml: -------------------------------------------------------------------------------- 1 | name: Publish docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | env: 9 | VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} 10 | VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} 11 | 12 | jobs: 13 | publish: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - name: Checkout repo 18 | uses: actions/checkout@v3 19 | - name: Generate docs 20 | run: | 21 | docker run -v $(pwd)/docs:/out -v $(pwd)/:/protos pseudomuto/protoc-gen-doc --doc_opt=html,index.html $(find temporal/api -type f -name "*.proto") 22 | - name: Deploy 23 | run: npx vercel deploy docs/ --prod --token=${{ secrets.VERCEL_TOKEN }} 24 | -------------------------------------------------------------------------------- /.github/workflows/push-to-buf.yml: -------------------------------------------------------------------------------- 1 | name: Push to Buf Registry 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v**' 7 | branches: 8 | - master 9 | permissions: 10 | contents: read 11 | jobs: 12 | publish: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout repo 16 | uses: actions/checkout@v4 17 | - uses: bufbuild/buf-action@v1 18 | with: 19 | version: 1.49.0 20 | token: ${{ secrets.BUF_TEMPORALIO_TOKEN }} 21 | -------------------------------------------------------------------------------- /.github/workflows/trigger-api-go-delete-release.yml: -------------------------------------------------------------------------------- 1 | name: "Trigger api-go delete release" 2 | 3 | on: 4 | release: 5 | types: [deleted] 6 | 7 | jobs: 8 | trigger-api-go-delete-release: 9 | uses: temporalio/api-go/.github/workflows/delete-release.yml@master 10 | with: 11 | tag: ${{ github.event.release.tag_name }} 12 | api_commit_sha: ${{ github.event.release.target_commitish }} 13 | secrets: inherit 14 | -------------------------------------------------------------------------------- /.github/workflows/trigger-api-go-publish-release.yml: -------------------------------------------------------------------------------- 1 | name: "Trigger api-go publish release" 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | trigger-api-go-publish-release: 9 | uses: temporalio/api-go/.github/workflows/publish-release.yml@master 10 | with: 11 | tag: ${{ github.event.release.tag_name }} 12 | api_commit_sha: ${{ github.event.release.target_commitish }} 13 | secrets: inherit 14 | -------------------------------------------------------------------------------- /.github/workflows/trigger-api-go-update.yml: -------------------------------------------------------------------------------- 1 | name: 'Trigger api-go Update' 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | workflow_dispatch: 8 | inputs: 9 | branch: 10 | description: "Branch in api-go repo to trigger update protos (default: master)" 11 | required: true 12 | default: master 13 | 14 | jobs: 15 | notify: 16 | name: 'Trigger api-go update' 17 | runs-on: ubuntu-latest 18 | 19 | defaults: 20 | run: 21 | shell: bash 22 | 23 | steps: 24 | - name: Generate token 25 | id: generate_token 26 | uses: actions/create-github-app-token@v1 27 | with: 28 | app-id: ${{ secrets.TEMPORAL_CICD_APP_ID }} 29 | private-key: ${{ secrets.TEMPORAL_CICD_PRIVATE_KEY }} 30 | owner: ${{ github.repository_owner }} 31 | repositories: api-go # generate a token with permissions to trigger GHA in api-go repo 32 | 33 | - name: Dispatch api-go Github Action 34 | env: 35 | GH_TOKEN: ${{ steps.generate_token.outputs.token }} 36 | EVENT_PUSH_BRANCH: ${{ github.event.ref }} 37 | EVENT_PUSH_COMMIT_AUTHOR: ${{ github.event.head_commit.author.name }} 38 | EVENT_PUSH_COMMIT_AUTHOR_EMAIL: ${{ github.event.head_commit.author.email }} 39 | EVENT_PUSH_COMMIT_MESSAGE: ${{ github.event.head_commit.message }} 40 | EVENT_WF_DISPATCH_BRANCH: ${{ github.event.inputs.branch }} 41 | run: | 42 | case "${{ github.event_name }}" in 43 | "push") 44 | BRANCH="${EVENT_PUSH_BRANCH#refs/heads/}" 45 | COMMIT_AUTHOR="${EVENT_PUSH_COMMIT_AUTHOR}" 46 | COMMIT_AUTHOR_EMAIL="${EVENT_PUSH_COMMIT_AUTHOR_EMAIL}" 47 | COMMIT_MESSAGE="${EVENT_PUSH_COMMIT_MESSAGE}" 48 | ;; 49 | 50 | "workflow_dispatch") 51 | BRANCH="${EVENT_WF_DISPATCH_BRANCH}" 52 | COMMIT_AUTHOR="Temporal Data" 53 | COMMIT_AUTHOR_EMAIL="commander-data@temporal.io" 54 | COMMIT_MESSAGE="Update proto" 55 | ;; 56 | esac 57 | 58 | gh workflow run update-proto.yml -R https://github.com/temporalio/api-go \ 59 | -r master \ 60 | -f branch="${BRANCH}" \ 61 | -f commit_author="${COMMIT_AUTHOR}" \ 62 | -f commit_author_email="${COMMIT_AUTHOR_EMAIL}" \ 63 | -f commit_message="${COMMIT_MESSAGE}" 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /.gen 3 | /.vscode 4 | /.stamp 5 | *~ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2025 Temporal Technologies Inc. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL=bash -o pipefail 2 | 3 | $(VERBOSE).SILENT: 4 | ############################# Main targets ############################# 5 | ci-build: install proto http-api-docs 6 | 7 | # Install dependencies. 8 | install: grpc-install api-linter-install buf-install 9 | 10 | # Run all linters and compile proto files. 11 | proto: grpc http-api-docs 12 | ######################################################################## 13 | 14 | ##### Variables ###### 15 | ifndef GOPATH 16 | GOPATH := $(shell go env GOPATH) 17 | endif 18 | 19 | GOBIN := $(if $(shell go env GOBIN),$(shell go env GOBIN),$(GOPATH)/bin) 20 | PATH := $(GOBIN):$(PATH) 21 | STAMPDIR := .stamp 22 | 23 | COLOR := "\e[1;36m%s\e[0m\n" 24 | 25 | PROTO_ROOT := . 26 | PROTO_FILES = $(shell find temporal -name "*.proto") 27 | PROTO_DIRS = $(sort $(dir $(PROTO_FILES))) 28 | PROTO_OUT := .gen 29 | PROTO_IMPORTS = \ 30 | -I=$(PROTO_ROOT) 31 | PROTO_PATHS = paths=source_relative:$(PROTO_OUT) 32 | 33 | OAPI_OUT := openapi 34 | OAPI3_PATH := .components.schemas.Payload 35 | 36 | $(PROTO_OUT): 37 | mkdir $(PROTO_OUT) 38 | 39 | ##### Compile proto files for go ##### 40 | grpc: buf-lint api-linter buf-breaking clean go-grpc fix-path 41 | 42 | go-grpc: clean $(PROTO_OUT) 43 | printf $(COLOR) "Compile for go-gRPC..." 44 | protogen \ 45 | --root=$(PROTO_ROOT) \ 46 | --output=$(PROTO_OUT) \ 47 | --exclude=internal \ 48 | --exclude=proto/api/google \ 49 | -I $(PROTO_ROOT) \ 50 | -p go-grpc_out=$(PROTO_PATHS) \ 51 | -p grpc-gateway_out=allow_patch_feature=false,$(PROTO_PATHS) \ 52 | -p doc_out=html,index.html,source_relative:$(PROTO_OUT) 53 | 54 | fix-path: 55 | mv -f $(PROTO_OUT)/temporal/api/* $(PROTO_OUT) && rm -rf $(PROTO_OUT)/temporal 56 | 57 | # We need to rewrite bits of this to support our shorthand payload format 58 | # We use both yq and jq here as they preserve comments and the ordering of the original 59 | # document 60 | http-api-docs: 61 | protoc -I $(PROTO_ROOT) \ 62 | --openapi_out=$(OAPI_OUT) \ 63 | --openapi_opt=enum_type=string \ 64 | --openapiv2_out=openapi \ 65 | --openapiv2_opt=allow_merge=true,merge_file_name=openapiv2,simple_operation_ids=true \ 66 | temporal/api/workflowservice/v1/* \ 67 | temporal/api/operatorservice/v1/* 68 | 69 | jq --rawfile desc $(OAPI_OUT)/payload_description.txt < $(OAPI_OUT)/openapiv2.swagger.json '.definitions.v1Payload={description: $$desc}' > $(OAPI_OUT)/v2.tmp 70 | mv -f $(OAPI_OUT)/v2.tmp $(OAPI_OUT)/openapiv2.json 71 | rm -f $(OAPI_OUT)/openapiv2.swagger.json 72 | DESC=$$(cat $(OAPI_OUT)/payload_description.txt) yq e -i '$(OAPIV3_PATH).description = strenv(DESC) | del($(OAPI3_PATH).type) | del($(OAPI3_PATH).properties)' $(OAPI_OUT)/openapi.yaml 73 | yq e -i '(.paths[] | .[] | .operationId) |= sub("\w+_(.*)", "$$1")' $(OAPI_OUT)/openapi.yaml 74 | mv -f $(OAPI_OUT)/openapi.yaml $(OAPI_OUT)/openapiv3.yaml 75 | 76 | ##### Plugins & tools ##### 77 | grpc-install: 78 | @printf $(COLOR) "Install/update protoc and plugins..." 79 | @go install go.temporal.io/api/cmd/protogen@master 80 | @go install google.golang.org/protobuf/cmd/protoc-gen-go@latest 81 | @go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest 82 | @go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@latest 83 | @go install github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc@latest 84 | @go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@latest 85 | @go install github.com/google/gnostic/cmd/protoc-gen-openapi@latest 86 | @go install github.com/mikefarah/yq/v4@latest 87 | 88 | api-linter-install: 89 | printf $(COLOR) "Install/update api-linter..." 90 | go install github.com/googleapis/api-linter/cmd/api-linter@v1.32.3 91 | go install github.com/itchyny/gojq/cmd/gojq@v0.12.14 92 | 93 | buf-install: 94 | printf $(COLOR) "Install/update buf..." 95 | go install github.com/bufbuild/buf/cmd/buf@v1.27.0 96 | 97 | ##### Linters ##### 98 | api-linter: 99 | printf $(COLOR) "Run api-linter..." 100 | @api-linter --set-exit-status $(PROTO_IMPORTS) --config $(PROTO_ROOT)/api-linter.yaml --output-format json $(PROTO_FILES) | gojq -r 'map(select(.problems != []) | . as $$file | .problems[] | {rule: .rule_doc_uri, location: "\($$file.file_path):\(.location.start_position.line_number)"}) | group_by(.rule) | .[] | .[0].rule + ":\n" + (map("\t" + .location) | join("\n"))' 101 | 102 | $(STAMPDIR): 103 | mkdir $@ 104 | 105 | $(STAMPDIR)/buf-mod-prune: $(STAMPDIR) buf.yaml 106 | printf $(COLOR) "Pruning buf module" 107 | buf mod prune 108 | touch $@ 109 | 110 | buf-lint: $(STAMPDIR)/buf-mod-prune 111 | printf $(COLOR) "Run buf linter..." 112 | (cd $(PROTO_ROOT) && buf lint) 113 | 114 | buf-breaking: 115 | @printf $(COLOR) "Run buf breaking changes check against master branch..." 116 | @(cd $(PROTO_ROOT) && buf breaking --against 'https://github.com/temporalio/api.git#branch=master') 117 | 118 | ##### Clean ##### 119 | clean: 120 | printf $(COLOR) "Delete generated go files..." 121 | rm -rf $(PROTO_OUT) $(BUF_DEPS) 122 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Temporal proto files 2 | 3 | This repository contains both the protobuf descriptors and OpenAPI documentation for the Temporal platform. 4 | 5 | ## How to use 6 | 7 | Install as git submodule to the project. 8 | 9 | ## Contribution 10 | 11 | Make your change to the temporal/proto files, and run `make` to update the openapi definitions. 12 | 13 | ## License 14 | 15 | MIT License, please see [LICENSE](LICENSE) for details. 16 | -------------------------------------------------------------------------------- /api-linter.yaml: -------------------------------------------------------------------------------- 1 | - included_paths: 2 | - "**/*.proto" 3 | disabled_rules: 4 | - "core::0122::name-suffix" # Allow fields to have a "_name" suffix -- https://linter.aip.dev/122/name-suffix 5 | - "core::0140::uri" # We use "URL" instead of "URI" in many places. -- https://linter.aip.dev/140/uri 6 | - "core::0192::has-comments" # Don't require comments on every field. -- https://linter.aip.dev/192/has-comments 7 | - "core::0203::required" # We don't use resource annotations -- https://linter.aip.dev/203/required 8 | - "core::0203::optional" # Same rationale as `core::0203::required` -- https://linter.aip.dev/203/optional 9 | 10 | - included_paths: 11 | - "**/message.proto" 12 | disabled_rules: 13 | - "core::0123::resource-annotation" # We don't require resource annotations on all messages -- https://linter.aip.dev/123/resource-annotation 14 | 15 | - included_paths: 16 | - "**/workflowservice/v1/request_response.proto" 17 | - "**/operatorservice/v1/request_response.proto" 18 | disabled_rules: 19 | - "core::0131::request-name-behavior" # We don't add non-HTTP annotations -- https://linter.aip.dev/131/request-name-behavior 20 | - "core::0131::request-name-reference" # We don't add non-HTTP annotations -- https://linter.aip.dev/131/request-name-reference 21 | - "core::0131::request-name-required" # Don't require the `name` field in RPCs -- https://linter.aip.dev/131/request-name-required 22 | - "core::0131::request-unknown-fields" # Allow things other than `name`, like `namespace`, in RPCs. This could've been `parent`, but that ship has sailed. -- https://linter.aip.dev/131/request-unknown-fields 23 | - "core::0132::request-parent-required" # Don't require the `parent` field in List RPCs -- https://linter.aip.dev/132/request-parent-required 24 | - "core::0132::request-unknown-fields" # Same rationale as `core::0131::request-unknown-fields`, but for List RPCs -- https://linter.aip.dev/132/request-unknown-fields 25 | - "core::0132::response-unknown-fields" # We have a lot of List APIs which have more than just X's in the response. -- https://linter.aip.dev/132/response-unknown-fields 26 | - "core::0133::request-parent-required" # Same rationale as `core::0132::request-parent-required`, but for Create RPCs -- https://linter.aip.dev/133/request-parent-required 27 | - "core::0133::request-resource-behavior" # We don't add non-HTTP annotations -- https://linter.aip.dev/133/request-resource-behavior 28 | - "core::0133::request-resource-field" # We don't add non-HTTP annotations -- https://linter.aip.dev/133/request-resource-field 29 | - "core::0133::request-unknown-fields" # Same rationale as `core::0131::request-unknown-fields`, but for Create RPCs -- https://linter.aip.dev/133/request-unknown-fields 30 | - "core::0134::request-mask-required" # We don't support an update mask in any of our APIs -- https://linter.aip.dev/134/request-mask-required 31 | - "core::0134::request-resource-required" # We don't add non-HTTP annotations -- https://linter.aip.dev/134/request-resource-required 32 | - "core::0134::request-unknown-fields" # Same rationale as `core::0131::request-unknown-fields`, but for Update RPCs -- https://linter.aip.dev/134/request-unknown-fields 33 | - "core::0135::request-name-behavior" # We don't add non-HTTP annotations -- https://linter.aip.dev/135/request-name-behavior 34 | - "core::0135::request-name-reference" # We don't add non-HTTP annotations -- https://linter.aip.dev/135/request-name-reference 35 | - "core::0135::request-name-required" # Allow objects to be identified with something other than `name` -- https://linter.aip.dev/135/request-name-required 36 | - "core::0135::request-unknown-fields" # Same rationale as `core::0131::request-unknown-fields`, but for Delete RPCs -- https://linter.aip.dev/135/request-unknown-fields 37 | - "core::0158::request-page-size-field" # Allow "maximum_page_size" instead of "page_size" in requests, and allow non-paginated List RPCs -- https://linter.aip.dev/158/response-next-page-token-field 38 | - "core::0158::request-page-token-field" # Allow "next_page_token" instead of "page_token" in requests, and allow non-paginated List RPCs -- https://linter.aip.dev/158/response-next-page-token-field 39 | - "core::0158::response-next-page-token-field" # Allow for page tokens to be byte arrays instead of strings, and allow non-paginated List RPCs -- https://linter.aip.dev/158/response-next-page-token-field 40 | - "core::0158::response-plural-first-field" # We have many APIs where we use "next_page_token" instead of "page_token" in the request. For some reason, that causes AIP to enforce some response-specific linter rules like this one. -- https://linter.aip.dev/158/response-plural-first-field 41 | - "core::0158::response-repeated-first-field" # Same rationale as `core::0158::response-plural-first-field` -- https://linter.aip.dev/158/response-repeated-first-field 42 | 43 | - included_paths: 44 | - "**/workflowservice/v1/service.proto" 45 | - "**/operatorservice/v1/service.proto" 46 | disabled_rules: 47 | - "core::0127::resource-name-extraction" # We extract specific fields in URL since the gRPC API predates the HTTP API -- https://linter.aip.dev/127/resource-name-extraction 48 | 49 | # We do not require specific "Get", "Create", "Update", or "Delete" RPC 50 | # rules just because we happen to use a known RPC name prefix 51 | - "core::0131" # https://linter.aip.dev/0131 52 | - "core::0133" # https://linter.aip.dev/0133 53 | - "core::0134" # https://linter.aip.dev/0134 54 | - "core::0135" # https://linter.aip.dev/0135 55 | 56 | - "core::0136::http-uri-suffix" # We don't require HTTP calls to be suffixed with the same name as the gRPC name -- https://linter.aip.dev/136/http-uri-suffix 57 | 58 | - included_paths: 59 | - "**/operatorservice/v1/service.proto" 60 | disabled_rules: 61 | - "core::0127::http-annotation" # Do not require HTTP annotations on OperatorService calls at this time -- https://linter.aip.dev/127/http-annotation 62 | 63 | - included_paths: 64 | - "google/**/*.proto" 65 | disabled_rules: 66 | - "all" 67 | -------------------------------------------------------------------------------- /buf.gen.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | plugins: 3 | - plugin: buf.build/protocolbuffers/go:v1.31.0 4 | out: ./ 5 | opt: 6 | - paths=source_relative 7 | - plugin: buf.build/grpc/go:v1.3.0 8 | out: ./ 9 | opt: 10 | - paths=source_relative 11 | - plugin: buf.build/grpc-ecosystem/gateway:v2.18.0 12 | out: ./ 13 | opt: 14 | - paths=source_relative 15 | - allow_patch_feature=false 16 | - name: go-helpers 17 | out: ./ 18 | path: ["go", "run", "./protoc-gen-go-helpers"] 19 | opt: 20 | - paths=source_relative 21 | -------------------------------------------------------------------------------- /buf.lock: -------------------------------------------------------------------------------- 1 | # Generated by buf. DO NOT EDIT. 2 | version: v1 3 | deps: 4 | - remote: buf.build 5 | owner: googleapis 6 | repository: googleapis 7 | commit: 28151c0d0a1641bf938a7672c500e01d 8 | digest: shake256:49215edf8ef57f7863004539deff8834cfb2195113f0b890dd1f67815d9353e28e668019165b9d872395871eeafcbab3ccfdb2b5f11734d3cca95be9e8d139de 9 | - remote: buf.build 10 | owner: grpc-ecosystem 11 | repository: grpc-gateway 12 | commit: 048ae6ff94ca4476b3225904b1078fad 13 | digest: shake256:e5250bf2d999516c02206d757502b902e406f35c099d0e869dc3e4f923f6870fe0805a9974c27df0695462937eae90cd4d9db90bb9a03489412560baa74a87b6 14 | -------------------------------------------------------------------------------- /buf.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | name: buf.build/temporalio/api 3 | deps: 4 | - buf.build/grpc-ecosystem/grpc-gateway 5 | - buf.build/googleapis/googleapis 6 | build: 7 | excludes: 8 | # Buf won't accept a local dependency on the google protos but we need them 9 | # to run api-linter, so just tell buf it ignore it 10 | - google 11 | breaking: 12 | use: 13 | - WIRE_JSON 14 | ignore: 15 | - google 16 | # TODO (yuri) remove this 17 | - temporal/api/workflow/v1/message.proto 18 | lint: 19 | use: 20 | - DEFAULT 21 | ignore: 22 | - google 23 | -------------------------------------------------------------------------------- /google/api/annotations.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | syntax = "proto3"; 16 | 17 | package google.api; 18 | 19 | import "google/api/http.proto"; 20 | import "google/protobuf/descriptor.proto"; 21 | 22 | option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; 23 | option java_multiple_files = true; 24 | option java_outer_classname = "AnnotationsProto"; 25 | option java_package = "com.google.api"; 26 | option objc_class_prefix = "GAPI"; 27 | 28 | extend google.protobuf.MethodOptions { 29 | // See `HttpRule`. 30 | HttpRule http = 72295728; 31 | } 32 | -------------------------------------------------------------------------------- /google/protobuf/any.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option go_package = "google.golang.org/protobuf/types/known/anypb"; 36 | option java_package = "com.google.protobuf"; 37 | option java_outer_classname = "AnyProto"; 38 | option java_multiple_files = true; 39 | option objc_class_prefix = "GPB"; 40 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 41 | 42 | // `Any` contains an arbitrary serialized protocol buffer message along with a 43 | // URL that describes the type of the serialized message. 44 | // 45 | // Protobuf library provides support to pack/unpack Any values in the form 46 | // of utility functions or additional generated methods of the Any type. 47 | // 48 | // Example 1: Pack and unpack a message in C++. 49 | // 50 | // Foo foo = ...; 51 | // Any any; 52 | // any.PackFrom(foo); 53 | // ... 54 | // if (any.UnpackTo(&foo)) { 55 | // ... 56 | // } 57 | // 58 | // Example 2: Pack and unpack a message in Java. 59 | // 60 | // Foo foo = ...; 61 | // Any any = Any.pack(foo); 62 | // ... 63 | // if (any.is(Foo.class)) { 64 | // foo = any.unpack(Foo.class); 65 | // } 66 | // // or ... 67 | // if (any.isSameTypeAs(Foo.getDefaultInstance())) { 68 | // foo = any.unpack(Foo.getDefaultInstance()); 69 | // } 70 | // 71 | // Example 3: Pack and unpack a message in Python. 72 | // 73 | // foo = Foo(...) 74 | // any = Any() 75 | // any.Pack(foo) 76 | // ... 77 | // if any.Is(Foo.DESCRIPTOR): 78 | // any.Unpack(foo) 79 | // ... 80 | // 81 | // Example 4: Pack and unpack a message in Go 82 | // 83 | // foo := &pb.Foo{...} 84 | // any, err := anypb.New(foo) 85 | // if err != nil { 86 | // ... 87 | // } 88 | // ... 89 | // foo := &pb.Foo{} 90 | // if err := any.UnmarshalTo(foo); err != nil { 91 | // ... 92 | // } 93 | // 94 | // The pack methods provided by protobuf library will by default use 95 | // 'type.googleapis.com/full.type.name' as the type URL and the unpack 96 | // methods only use the fully qualified type name after the last '/' 97 | // in the type URL, for example "foo.bar.com/x/y.z" will yield type 98 | // name "y.z". 99 | // 100 | // JSON 101 | // ==== 102 | // The JSON representation of an `Any` value uses the regular 103 | // representation of the deserialized, embedded message, with an 104 | // additional field `@type` which contains the type URL. Example: 105 | // 106 | // package google.profile; 107 | // message Person { 108 | // string first_name = 1; 109 | // string last_name = 2; 110 | // } 111 | // 112 | // { 113 | // "@type": "type.googleapis.com/google.profile.Person", 114 | // "firstName": , 115 | // "lastName": 116 | // } 117 | // 118 | // If the embedded message type is well-known and has a custom JSON 119 | // representation, that representation will be embedded adding a field 120 | // `value` which holds the custom JSON in addition to the `@type` 121 | // field. Example (for message [google.protobuf.Duration][]): 122 | // 123 | // { 124 | // "@type": "type.googleapis.com/google.protobuf.Duration", 125 | // "value": "1.212s" 126 | // } 127 | // 128 | message Any { 129 | // A URL/resource name that uniquely identifies the type of the serialized 130 | // protocol buffer message. This string must contain at least 131 | // one "/" character. The last segment of the URL's path must represent 132 | // the fully qualified name of the type (as in 133 | // `path/google.protobuf.Duration`). The name should be in a canonical form 134 | // (e.g., leading "." is not accepted). 135 | // 136 | // In practice, teams usually precompile into the binary all types that they 137 | // expect it to use in the context of Any. However, for URLs which use the 138 | // scheme `http`, `https`, or no scheme, one can optionally set up a type 139 | // server that maps type URLs to message definitions as follows: 140 | // 141 | // * If no scheme is provided, `https` is assumed. 142 | // * An HTTP GET on the URL must yield a [google.protobuf.Type][] 143 | // value in binary format, or produce an error. 144 | // * Applications are allowed to cache lookup results based on the 145 | // URL, or have them precompiled into a binary to avoid any 146 | // lookup. Therefore, binary compatibility needs to be preserved 147 | // on changes to types. (Use versioned type names to manage 148 | // breaking changes.) 149 | // 150 | // Note: this functionality is not currently available in the official 151 | // protobuf release, and it is not used for type URLs beginning with 152 | // type.googleapis.com. As of May 2023, there are no widely used type server 153 | // implementations and no plans to implement one. 154 | // 155 | // Schemes other than `http`, `https` (or the empty scheme) might be 156 | // used with implementation specific semantics. 157 | // 158 | string type_url = 1; 159 | 160 | // Must be a valid serialized protocol buffer of the above specified type. 161 | bytes value = 2; 162 | } -------------------------------------------------------------------------------- /google/protobuf/duration.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option cc_enable_arenas = true; 36 | option go_package = "google.golang.org/protobuf/types/known/durationpb"; 37 | option java_package = "com.google.protobuf"; 38 | option java_outer_classname = "DurationProto"; 39 | option java_multiple_files = true; 40 | option objc_class_prefix = "GPB"; 41 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 42 | 43 | // A Duration represents a signed, fixed-length span of time represented 44 | // as a count of seconds and fractions of seconds at nanosecond 45 | // resolution. It is independent of any calendar and concepts like "day" 46 | // or "month". It is related to Timestamp in that the difference between 47 | // two Timestamp values is a Duration and it can be added or subtracted 48 | // from a Timestamp. Range is approximately +-10,000 years. 49 | // 50 | // # Examples 51 | // 52 | // Example 1: Compute Duration from two Timestamps in pseudo code. 53 | // 54 | // Timestamp start = ...; 55 | // Timestamp end = ...; 56 | // Duration duration = ...; 57 | // 58 | // duration.seconds = end.seconds - start.seconds; 59 | // duration.nanos = end.nanos - start.nanos; 60 | // 61 | // if (duration.seconds < 0 && duration.nanos > 0) { 62 | // duration.seconds += 1; 63 | // duration.nanos -= 1000000000; 64 | // } else if (duration.seconds > 0 && duration.nanos < 0) { 65 | // duration.seconds -= 1; 66 | // duration.nanos += 1000000000; 67 | // } 68 | // 69 | // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. 70 | // 71 | // Timestamp start = ...; 72 | // Duration duration = ...; 73 | // Timestamp end = ...; 74 | // 75 | // end.seconds = start.seconds + duration.seconds; 76 | // end.nanos = start.nanos + duration.nanos; 77 | // 78 | // if (end.nanos < 0) { 79 | // end.seconds -= 1; 80 | // end.nanos += 1000000000; 81 | // } else if (end.nanos >= 1000000000) { 82 | // end.seconds += 1; 83 | // end.nanos -= 1000000000; 84 | // } 85 | // 86 | // Example 3: Compute Duration from datetime.timedelta in Python. 87 | // 88 | // td = datetime.timedelta(days=3, minutes=10) 89 | // duration = Duration() 90 | // duration.FromTimedelta(td) 91 | // 92 | // # JSON Mapping 93 | // 94 | // In JSON format, the Duration type is encoded as a string rather than an 95 | // object, where the string ends in the suffix "s" (indicating seconds) and 96 | // is preceded by the number of seconds, with nanoseconds expressed as 97 | // fractional seconds. For example, 3 seconds with 0 nanoseconds should be 98 | // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should 99 | // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 100 | // microsecond should be expressed in JSON format as "3.000001s". 101 | // 102 | message Duration { 103 | // Signed seconds of the span of time. Must be from -315,576,000,000 104 | // to +315,576,000,000 inclusive. Note: these bounds are computed from: 105 | // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years 106 | int64 seconds = 1; 107 | 108 | // Signed fractions of a second at nanosecond resolution of the span 109 | // of time. Durations less than one second are represented with a 0 110 | // `seconds` field and a positive or negative `nanos` field. For durations 111 | // of one second or more, a non-zero value for the `nanos` field must be 112 | // of the same sign as the `seconds` field. Must be from -999,999,999 113 | // to +999,999,999 inclusive. 114 | int32 nanos = 2; 115 | } -------------------------------------------------------------------------------- /google/protobuf/empty.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option go_package = "google.golang.org/protobuf/types/known/emptypb"; 36 | option java_package = "com.google.protobuf"; 37 | option java_outer_classname = "EmptyProto"; 38 | option java_multiple_files = true; 39 | option objc_class_prefix = "GPB"; 40 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 41 | option cc_enable_arenas = true; 42 | 43 | // A generic empty message that you can re-use to avoid defining duplicated 44 | // empty messages in your APIs. A typical example is to use it as the request 45 | // or the response type of an API method. For instance: 46 | // 47 | // service Foo { 48 | // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); 49 | // } 50 | // 51 | message Empty {} 52 | -------------------------------------------------------------------------------- /google/protobuf/struct.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option cc_enable_arenas = true; 36 | option go_package = "google.golang.org/protobuf/types/known/structpb"; 37 | option java_package = "com.google.protobuf"; 38 | option java_outer_classname = "StructProto"; 39 | option java_multiple_files = true; 40 | option objc_class_prefix = "GPB"; 41 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 42 | 43 | // `Struct` represents a structured data value, consisting of fields 44 | // which map to dynamically typed values. In some languages, `Struct` 45 | // might be supported by a native representation. For example, in 46 | // scripting languages like JS a struct is represented as an 47 | // object. The details of that representation are described together 48 | // with the proto support for the language. 49 | // 50 | // The JSON representation for `Struct` is JSON object. 51 | message Struct { 52 | // Unordered map of dynamically typed values. 53 | map fields = 1; 54 | } 55 | 56 | // `Value` represents a dynamically typed value which can be either 57 | // null, a number, a string, a boolean, a recursive struct value, or a 58 | // list of values. A producer of value is expected to set one of these 59 | // variants. Absence of any variant indicates an error. 60 | // 61 | // The JSON representation for `Value` is JSON value. 62 | message Value { 63 | // The kind of value. 64 | oneof kind { 65 | // Represents a null value. 66 | NullValue null_value = 1; 67 | // Represents a double value. 68 | double number_value = 2; 69 | // Represents a string value. 70 | string string_value = 3; 71 | // Represents a boolean value. 72 | bool bool_value = 4; 73 | // Represents a structured value. 74 | Struct struct_value = 5; 75 | // Represents a repeated `Value`. 76 | ListValue list_value = 6; 77 | } 78 | } 79 | 80 | // `NullValue` is a singleton enumeration to represent the null value for the 81 | // `Value` type union. 82 | // 83 | // The JSON representation for `NullValue` is JSON `null`. 84 | enum NullValue { 85 | // Null value. 86 | NULL_VALUE = 0; 87 | } 88 | 89 | // `ListValue` is a wrapper around a repeated field of values. 90 | // 91 | // The JSON representation for `ListValue` is JSON array. 92 | message ListValue { 93 | // Repeated field of dynamically typed values. 94 | repeated Value values = 1; 95 | } 96 | -------------------------------------------------------------------------------- /google/protobuf/timestamp.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option cc_enable_arenas = true; 36 | option go_package = "google.golang.org/protobuf/types/known/timestamppb"; 37 | option java_package = "com.google.protobuf"; 38 | option java_outer_classname = "TimestampProto"; 39 | option java_multiple_files = true; 40 | option objc_class_prefix = "GPB"; 41 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 42 | 43 | // A Timestamp represents a point in time independent of any time zone or local 44 | // calendar, encoded as a count of seconds and fractions of seconds at 45 | // nanosecond resolution. The count is relative to an epoch at UTC midnight on 46 | // January 1, 1970, in the proleptic Gregorian calendar which extends the 47 | // Gregorian calendar backwards to year one. 48 | // 49 | // All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap 50 | // second table is needed for interpretation, using a [24-hour linear 51 | // smear](https://developers.google.com/time/smear). 52 | // 53 | // The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By 54 | // restricting to that range, we ensure that we can convert to and from [RFC 55 | // 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. 56 | // 57 | // # Examples 58 | // 59 | // Example 1: Compute Timestamp from POSIX `time()`. 60 | // 61 | // Timestamp timestamp; 62 | // timestamp.set_seconds(time(NULL)); 63 | // timestamp.set_nanos(0); 64 | // 65 | // Example 2: Compute Timestamp from POSIX `gettimeofday()`. 66 | // 67 | // struct timeval tv; 68 | // gettimeofday(&tv, NULL); 69 | // 70 | // Timestamp timestamp; 71 | // timestamp.set_seconds(tv.tv_sec); 72 | // timestamp.set_nanos(tv.tv_usec * 1000); 73 | // 74 | // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. 75 | // 76 | // FILETIME ft; 77 | // GetSystemTimeAsFileTime(&ft); 78 | // UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; 79 | // 80 | // // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z 81 | // // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. 82 | // Timestamp timestamp; 83 | // timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); 84 | // timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); 85 | // 86 | // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. 87 | // 88 | // long millis = System.currentTimeMillis(); 89 | // 90 | // Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) 91 | // .setNanos((int) ((millis % 1000) * 1000000)).build(); 92 | // 93 | // Example 5: Compute Timestamp from Java `Instant.now()`. 94 | // 95 | // Instant now = Instant.now(); 96 | // 97 | // Timestamp timestamp = 98 | // Timestamp.newBuilder().setSeconds(now.getEpochSecond()) 99 | // .setNanos(now.getNano()).build(); 100 | // 101 | // Example 6: Compute Timestamp from current time in Python. 102 | // 103 | // timestamp = Timestamp() 104 | // timestamp.GetCurrentTime() 105 | // 106 | // # JSON Mapping 107 | // 108 | // In JSON format, the Timestamp type is encoded as a string in the 109 | // [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the 110 | // format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" 111 | // where {year} is always expressed using four digits while {month}, {day}, 112 | // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional 113 | // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), 114 | // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone 115 | // is required. A proto3 JSON serializer should always use UTC (as indicated by 116 | // "Z") when printing the Timestamp type and a proto3 JSON parser should be 117 | // able to accept both UTC and other timezones (as indicated by an offset). 118 | // 119 | // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past 120 | // 01:30 UTC on January 15, 2017. 121 | // 122 | // In JavaScript, one can convert a Date object to this format using the 123 | // standard 124 | // [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) 125 | // method. In Python, a standard `datetime.datetime` object can be converted 126 | // to this format using 127 | // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with 128 | // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use 129 | // the Joda Time's [`ISODateTimeFormat.dateTime()`]( 130 | // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() 131 | // ) to obtain a formatter capable of generating timestamps in this format. 132 | // 133 | message Timestamp { 134 | // Represents seconds of UTC time since Unix epoch 135 | // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 136 | // 9999-12-31T23:59:59Z inclusive. 137 | int64 seconds = 1; 138 | 139 | // Non-negative fractions of a second at nanosecond resolution. Negative 140 | // second values with fractions must still have non-negative nanos values 141 | // that count forward in time. Must be from 0 to 999,999,999 142 | // inclusive. 143 | int32 nanos = 2; 144 | } -------------------------------------------------------------------------------- /google/protobuf/wrappers.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | // 31 | // Wrappers for primitive (non-message) types. These types are useful 32 | // for embedding primitives in the `google.protobuf.Any` type and for places 33 | // where we need to distinguish between the absence of a primitive 34 | // typed field and its default value. 35 | // 36 | // These wrappers have no meaningful use within repeated fields as they lack 37 | // the ability to detect presence on individual elements. 38 | // These wrappers have no meaningful use within a map or a oneof since 39 | // individual entries of a map or fields of a oneof can already detect presence. 40 | 41 | syntax = "proto3"; 42 | 43 | package google.protobuf; 44 | 45 | option cc_enable_arenas = true; 46 | option go_package = "google.golang.org/protobuf/types/known/wrapperspb"; 47 | option java_package = "com.google.protobuf"; 48 | option java_outer_classname = "WrappersProto"; 49 | option java_multiple_files = true; 50 | option objc_class_prefix = "GPB"; 51 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 52 | 53 | // Wrapper message for `double`. 54 | // 55 | // The JSON representation for `DoubleValue` is JSON number. 56 | message DoubleValue { 57 | // The double value. 58 | double value = 1; 59 | } 60 | 61 | // Wrapper message for `float`. 62 | // 63 | // The JSON representation for `FloatValue` is JSON number. 64 | message FloatValue { 65 | // The float value. 66 | float value = 1; 67 | } 68 | 69 | // Wrapper message for `int64`. 70 | // 71 | // The JSON representation for `Int64Value` is JSON string. 72 | message Int64Value { 73 | // The int64 value. 74 | int64 value = 1; 75 | } 76 | 77 | // Wrapper message for `uint64`. 78 | // 79 | // The JSON representation for `UInt64Value` is JSON string. 80 | message UInt64Value { 81 | // The uint64 value. 82 | uint64 value = 1; 83 | } 84 | 85 | // Wrapper message for `int32`. 86 | // 87 | // The JSON representation for `Int32Value` is JSON number. 88 | message Int32Value { 89 | // The int32 value. 90 | int32 value = 1; 91 | } 92 | 93 | // Wrapper message for `uint32`. 94 | // 95 | // The JSON representation for `UInt32Value` is JSON number. 96 | message UInt32Value { 97 | // The uint32 value. 98 | uint32 value = 1; 99 | } 100 | 101 | // Wrapper message for `bool`. 102 | // 103 | // The JSON representation for `BoolValue` is JSON `true` and `false`. 104 | message BoolValue { 105 | // The bool value. 106 | bool value = 1; 107 | } 108 | 109 | // Wrapper message for `string`. 110 | // 111 | // The JSON representation for `StringValue` is JSON string. 112 | message StringValue { 113 | // The string value. 114 | string value = 1; 115 | } 116 | 117 | // Wrapper message for `bytes`. 118 | // 119 | // The JSON representation for `BytesValue` is JSON string. 120 | message BytesValue { 121 | // The bytes value. 122 | bytes value = 1; 123 | } -------------------------------------------------------------------------------- /openapi/payload_description.txt: -------------------------------------------------------------------------------- 1 | Arbitrary payload data in an unconstrained format. 2 | This may be activity input parameters, a workflow result, a memo, etc. 3 | -------------------------------------------------------------------------------- /temporal/api/activity/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.activity.v1; 4 | 5 | option go_package = "go.temporal.io/api/activity/v1;activity"; 6 | option java_package = "io.temporal.api.activity.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Activity::V1"; 10 | option csharp_namespace = "Temporalio.Api.Activity.V1"; 11 | 12 | import "temporal/api/common/v1/message.proto"; 13 | import "temporal/api/taskqueue/v1/message.proto"; 14 | 15 | import "google/protobuf/duration.proto"; 16 | 17 | message ActivityOptions { 18 | temporal.api.taskqueue.v1.TaskQueue task_queue = 1; 19 | 20 | // Indicates how long the caller is willing to wait for an activity completion. Limits how long 21 | // retries will be attempted. Either this or `start_to_close_timeout` must be specified. 22 | // 23 | // (-- api-linter: core::0140::prepositions=disabled 24 | // aip.dev/not-precedent: "to" is used to indicate interval. --) 25 | google.protobuf.Duration schedule_to_close_timeout = 2; 26 | // Limits time an activity task can stay in a task queue before a worker picks it up. This 27 | // timeout is always non retryable, as all a retry would achieve is to put it back into the same 28 | // queue. Defaults to `schedule_to_close_timeout` or workflow execution timeout if not 29 | // specified. 30 | // 31 | // (-- api-linter: core::0140::prepositions=disabled 32 | // aip.dev/not-precedent: "to" is used to indicate interval. --) 33 | google.protobuf.Duration schedule_to_start_timeout = 3; 34 | // Maximum time an activity is allowed to execute after being picked up by a worker. This 35 | // timeout is always retryable. Either this or `schedule_to_close_timeout` must be 36 | // specified. 37 | // 38 | // (-- api-linter: core::0140::prepositions=disabled 39 | // aip.dev/not-precedent: "to" is used to indicate interval. --) 40 | google.protobuf.Duration start_to_close_timeout = 4; 41 | // Maximum permitted time between successful worker heartbeats. 42 | google.protobuf.Duration heartbeat_timeout = 5; 43 | 44 | temporal.api.common.v1.RetryPolicy retry_policy = 6; 45 | } -------------------------------------------------------------------------------- /temporal/api/batch/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.batch.v1; 4 | 5 | option go_package = "go.temporal.io/api/batch/v1;batch"; 6 | option java_package = "io.temporal.api.batch.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Batch::V1"; 10 | option csharp_namespace = "Temporalio.Api.Batch.V1"; 11 | 12 | import "google/protobuf/duration.proto"; 13 | import "google/protobuf/field_mask.proto"; 14 | import "google/protobuf/timestamp.proto"; 15 | import "temporal/api/common/v1/message.proto"; 16 | import "temporal/api/enums/v1/batch_operation.proto"; 17 | import "temporal/api/enums/v1/reset.proto"; 18 | import "temporal/api/rules/v1/message.proto"; 19 | import "temporal/api/workflow/v1/message.proto"; 20 | 21 | message BatchOperationInfo { 22 | // Batch job ID 23 | string job_id = 1; 24 | // Batch operation state 25 | temporal.api.enums.v1.BatchOperationState state = 2; 26 | // Batch operation start time 27 | google.protobuf.Timestamp start_time = 3; 28 | // Batch operation close time 29 | google.protobuf.Timestamp close_time = 4; 30 | } 31 | 32 | // BatchOperationTermination sends terminate requests to batch workflows. 33 | // Keep the parameter in sync with temporal.api.workflowservice.v1.TerminateWorkflowExecutionRequest. 34 | // Ignore first_execution_run_id because this is used for single workflow operation. 35 | message BatchOperationTermination { 36 | // Serialized value(s) to provide to the termination event 37 | temporal.api.common.v1.Payloads details = 1; 38 | // The identity of the worker/client 39 | string identity = 2; 40 | } 41 | 42 | // BatchOperationSignal sends signals to batch workflows. 43 | // Keep the parameter in sync with temporal.api.workflowservice.v1.SignalWorkflowExecutionRequest. 44 | message BatchOperationSignal { 45 | // The workflow author-defined name of the signal to send to the workflow 46 | string signal = 1; 47 | // Serialized value(s) to provide with the signal 48 | temporal.api.common.v1.Payloads input = 2; 49 | // Headers that are passed with the signal to the processing workflow. 50 | // These can include things like auth or tracing tokens. 51 | temporal.api.common.v1.Header header = 3; 52 | // The identity of the worker/client 53 | string identity = 4; 54 | } 55 | 56 | // BatchOperationCancellation sends cancel requests to batch workflows. 57 | // Keep the parameter in sync with temporal.api.workflowservice.v1.RequestCancelWorkflowExecutionRequest. 58 | // Ignore first_execution_run_id because this is used for single workflow operation. 59 | message BatchOperationCancellation { 60 | // The identity of the worker/client 61 | string identity = 1; 62 | } 63 | 64 | // BatchOperationDeletion sends deletion requests to batch workflows. 65 | // Keep the parameter in sync with temporal.api.workflowservice.v1.DeleteWorkflowExecutionRequest. 66 | message BatchOperationDeletion { 67 | // The identity of the worker/client 68 | string identity = 1; 69 | } 70 | 71 | // BatchOperationReset sends reset requests to batch workflows. 72 | // Keep the parameter in sync with temporal.api.workflowservice.v1.ResetWorkflowExecutionRequest. 73 | message BatchOperationReset { 74 | // The identity of the worker/client. 75 | string identity = 3; 76 | 77 | // Describes what to reset to and how. If set, `reset_type` and `reset_reapply_type` are ignored. 78 | temporal.api.common.v1.ResetOptions options = 4; 79 | 80 | // Deprecated. Use `options`. 81 | temporal.api.enums.v1.ResetType reset_type = 1 [deprecated = true]; 82 | // Deprecated. Use `options`. 83 | temporal.api.enums.v1.ResetReapplyType reset_reapply_type = 2 [deprecated = true]; 84 | // Operations to perform after the workflow has been reset. These operations will be applied 85 | // to the *new* run of the workflow execution in the order they are provided. 86 | // All operations are applied to the workflow before the first new workflow task is generated 87 | repeated temporal.api.workflow.v1.PostResetOperation post_reset_operations = 5; 88 | } 89 | 90 | // BatchOperationUpdateWorkflowExecutionOptions sends UpdateWorkflowExecutionOptions requests to batch workflows. 91 | // Keep the parameters in sync with temporal.api.workflowservice.v1.UpdateWorkflowExecutionOptionsRequest. 92 | message BatchOperationUpdateWorkflowExecutionOptions { 93 | // The identity of the worker/client. 94 | string identity = 1; 95 | 96 | // Update Workflow options that were originally specified via StartWorkflowExecution. Partial updates are accepted and controlled by update_mask. 97 | temporal.api.workflow.v1.WorkflowExecutionOptions workflow_execution_options = 2; 98 | 99 | // Controls which fields from `workflow_execution_options` will be applied. 100 | // To unset a field, set it to null and use the update mask to indicate that it should be mutated. 101 | google.protobuf.FieldMask update_mask = 3; 102 | } 103 | 104 | // BatchOperationUnpauseActivities sends unpause requests to batch workflows. 105 | message BatchOperationUnpauseActivities { 106 | // The identity of the worker/client. 107 | string identity = 1; 108 | 109 | // The activity to unpause. If match_all is set to true, all activities will be unpaused. 110 | oneof activity { 111 | string type = 2; 112 | bool match_all = 3; 113 | } 114 | 115 | // Providing this flag will also reset the number of attempts. 116 | bool reset_attempts = 4; 117 | 118 | // Providing this flag will also reset the heartbeat details. 119 | bool reset_heartbeat = 5; 120 | 121 | // If set, the activity will start at a random time within the specified jitter 122 | // duration, introducing variability to the start time. 123 | google.protobuf.Duration jitter = 6; 124 | } 125 | 126 | // BatchOperationTriggerWorkflowRule sends TriggerWorkflowRule requests to batch workflows. 127 | message BatchOperationTriggerWorkflowRule { 128 | // The identity of the worker/client. 129 | string identity = 1; 130 | 131 | oneof rule { 132 | // ID of existing rule. 133 | string id = 2; 134 | // Rule specification to be applied to the workflow without creating a new rule. 135 | temporal.api.rules.v1.WorkflowRuleSpec spec = 3; 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /temporal/api/common/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.common.v1; 4 | 5 | option go_package = "go.temporal.io/api/common/v1;common"; 6 | option java_package = "io.temporal.api.common.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Common::V1"; 10 | option csharp_namespace = "Temporalio.Api.Common.V1"; 11 | 12 | import "google/protobuf/duration.proto"; 13 | import "google/protobuf/empty.proto"; 14 | 15 | import "temporal/api/enums/v1/common.proto"; 16 | import "temporal/api/enums/v1/event_type.proto"; 17 | import "temporal/api/enums/v1/reset.proto"; 18 | 19 | message DataBlob { 20 | temporal.api.enums.v1.EncodingType encoding_type = 1; 21 | bytes data = 2; 22 | } 23 | 24 | // See `Payload` 25 | message Payloads { 26 | repeated Payload payloads = 1; 27 | } 28 | 29 | // Represents some binary (byte array) data (ex: activity input parameters or workflow result) with 30 | // metadata which describes this binary data (format, encoding, encryption, etc). Serialization 31 | // of the data may be user-defined. 32 | message Payload { 33 | map metadata = 1; 34 | bytes data = 2; 35 | } 36 | 37 | // A user-defined set of *indexed* fields that are used/exposed when listing/searching workflows. 38 | // The payload is not serialized in a user-defined way. 39 | message SearchAttributes { 40 | map indexed_fields = 1; 41 | } 42 | 43 | // A user-defined set of *unindexed* fields that are exposed when listing/searching workflows 44 | message Memo { 45 | map fields = 1; 46 | } 47 | 48 | // Contains metadata that can be attached to a variety of requests, like starting a workflow, and 49 | // can be propagated between, for example, workflows and activities. 50 | message Header { 51 | map fields = 1; 52 | } 53 | 54 | // Identifies a specific workflow within a namespace. Practically speaking, because run_id is a 55 | // uuid, a workflow execution is globally unique. Note that many commands allow specifying an empty 56 | // run id as a way of saying "target the latest run of the workflow". 57 | message WorkflowExecution { 58 | string workflow_id = 1; 59 | string run_id = 2; 60 | } 61 | 62 | // Represents the identifier used by a workflow author to define the workflow. Typically, the 63 | // name of a function. This is sometimes referred to as the workflow's "name" 64 | message WorkflowType { 65 | string name = 1; 66 | } 67 | 68 | // Represents the identifier used by a activity author to define the activity. Typically, the 69 | // name of a function. This is sometimes referred to as the activity's "name" 70 | message ActivityType { 71 | string name = 1; 72 | } 73 | 74 | // How retries ought to be handled, usable by both workflows and activities 75 | message RetryPolicy { 76 | // Interval of the first retry. If retryBackoffCoefficient is 1.0 then it is used for all retries. 77 | google.protobuf.Duration initial_interval = 1; 78 | // Coefficient used to calculate the next retry interval. 79 | // The next retry interval is previous interval multiplied by the coefficient. 80 | // Must be 1 or larger. 81 | double backoff_coefficient = 2; 82 | // Maximum interval between retries. Exponential backoff leads to interval increase. 83 | // This value is the cap of the increase. Default is 100x of the initial interval. 84 | google.protobuf.Duration maximum_interval = 3; 85 | // Maximum number of attempts. When exceeded the retries stop even if not expired yet. 86 | // 1 disables retries. 0 means unlimited (up to the timeouts) 87 | int32 maximum_attempts = 4; 88 | // Non-Retryable errors types. Will stop retrying if the error type matches this list. Note that 89 | // this is not a substring match, the error *type* (not message) must match exactly. 90 | repeated string non_retryable_error_types = 5; 91 | } 92 | 93 | // Metadata relevant for metering purposes 94 | message MeteringMetadata { 95 | // Count of local activities which have begun an execution attempt during this workflow task, 96 | // and whose first attempt occurred in some previous task. This is used for metering 97 | // purposes, and does not affect workflow state. 98 | // 99 | // (-- api-linter: core::0141::forbidden-types=disabled 100 | // aip.dev/not-precedent: Negative values make no sense to represent. --) 101 | uint32 nonfirst_local_activity_execution_attempts = 13; 102 | } 103 | 104 | // Deprecated. This message is replaced with `Deployment` and `VersioningBehavior`. 105 | // Identifies the version(s) of a worker that processed a task 106 | message WorkerVersionStamp { 107 | // An opaque whole-worker identifier. Replaces the deprecated `binary_checksum` field when this 108 | // message is included in requests which previously used that. 109 | string build_id = 1; 110 | 111 | // If set, the worker is opting in to worker versioning. Otherwise, this is used only as a 112 | // marker for workflow reset points and the BuildIDs search attribute. 113 | bool use_versioning = 3; 114 | 115 | // Later, may include bundle id that could be used for WASM and/or JS dynamically loadable bundles. 116 | } 117 | 118 | // Identifies the version that a worker is compatible with when polling or identifying itself, 119 | // and whether or not this worker is opting into the build-id based versioning feature. This is 120 | // used by matching to determine which workers ought to receive what tasks. 121 | // Deprecated. Use WorkerDeploymentOptions instead. 122 | message WorkerVersionCapabilities { 123 | // An opaque whole-worker identifier 124 | string build_id = 1; 125 | 126 | // If set, the worker is opting in to worker versioning, and wishes to only receive appropriate 127 | // tasks. 128 | bool use_versioning = 2; 129 | 130 | // Must be sent if user has set a deployment series name (versioning-3). 131 | string deployment_series_name = 4; 132 | 133 | // Later, may include info like "I can process WASM and/or JS bundles" 134 | } 135 | 136 | // Describes where and how to reset a workflow, used for batch reset currently 137 | // and may be used for single-workflow reset later. 138 | message ResetOptions { 139 | // Which workflow task to reset to. 140 | oneof target { 141 | // Resets to the first workflow task completed or started event. 142 | google.protobuf.Empty first_workflow_task = 1; 143 | // Resets to the last workflow task completed or started event. 144 | google.protobuf.Empty last_workflow_task = 2; 145 | // The id of a specific `WORKFLOW_TASK_COMPLETED`,`WORKFLOW_TASK_TIMED_OUT`, `WORKFLOW_TASK_FAILED`, or 146 | // `WORKFLOW_TASK_STARTED` event to reset to. 147 | // Note that this option doesn't make sense when used as part of a batch request. 148 | int64 workflow_task_id = 3; 149 | // Resets to the first workflow task processed by this build id. 150 | // If the workflow was not processed by the build id, or the workflow task can't be 151 | // determined, no reset will be performed. 152 | // Note that by default, this reset is allowed to be to a prior run in a chain of 153 | // continue-as-new. 154 | string build_id = 4; 155 | } 156 | 157 | // Deprecated. Use `options`. 158 | // Default: RESET_REAPPLY_TYPE_SIGNAL 159 | temporal.api.enums.v1.ResetReapplyType reset_reapply_type = 10 [deprecated = true]; 160 | 161 | // If true, limit the reset to only within the current run. (Applies to build_id targets and 162 | // possibly others in the future.) 163 | bool current_run_only = 11; 164 | 165 | // Event types not to be reapplied 166 | repeated temporal.api.enums.v1.ResetReapplyExcludeType reset_reapply_exclude_types = 12; 167 | } 168 | 169 | // Callback to attach to various events in the system, e.g. workflow run completion. 170 | message Callback { 171 | message Nexus { 172 | // Callback URL. 173 | string url = 1; 174 | // Header to attach to callback request. 175 | map header = 2; 176 | } 177 | 178 | // Callbacks to be delivered internally within the system. 179 | // This variant is not settable in the API and will be rejected by the service with an INVALID_ARGUMENT error. 180 | // The only reason that this is exposed is because callbacks are replicated across clusters via the 181 | // WorkflowExecutionStarted event, which is defined in the public API. 182 | message Internal { 183 | // Opaque internal data. 184 | bytes data = 1; 185 | } 186 | 187 | reserved 1; // For a generic callback mechanism to be added later. 188 | oneof variant { 189 | Nexus nexus = 2; 190 | Internal internal = 3; 191 | } 192 | 193 | // Links associated with the callback. It can be used to link to underlying resources of the 194 | // callback. 195 | repeated Link links = 100; 196 | } 197 | 198 | // Link can be associated with history events. It might contain information about an external entity 199 | // related to the history event. For example, workflow A makes a Nexus call that starts workflow B: 200 | // in this case, a history event in workflow A could contain a Link to the workflow started event in 201 | // workflow B, and vice-versa. 202 | message Link { 203 | message WorkflowEvent { 204 | // EventReference is a direct reference to a history event through the event ID. 205 | message EventReference { 206 | int64 event_id = 1; 207 | temporal.api.enums.v1.EventType event_type = 2; 208 | } 209 | 210 | // RequestIdReference is a indirect reference to a history event through the request ID. 211 | message RequestIdReference { 212 | string request_id = 1; 213 | temporal.api.enums.v1.EventType event_type = 2; 214 | } 215 | 216 | string namespace = 1; 217 | string workflow_id = 2; 218 | string run_id = 3; 219 | 220 | // Additional information about the workflow event. 221 | // Eg: the caller workflow can send the history event details that made the Nexus call. 222 | oneof reference { 223 | EventReference event_ref = 100; 224 | RequestIdReference request_id_ref = 101; 225 | } 226 | } 227 | 228 | // A link to a built-in batch job. 229 | // Batch jobs can be used to perform operations on a set of workflows (e.g. terminate, signal, cancel, etc). 230 | // This link can be put on workflow history events generated by actions taken by a batch job. 231 | message BatchJob { 232 | string job_id = 1; 233 | } 234 | 235 | oneof variant { 236 | WorkflowEvent workflow_event = 1; 237 | BatchJob batch_job = 2; 238 | } 239 | } 240 | 241 | // Priority contains metadata that controls relative ordering of task processing 242 | // when tasks are backlogged in a queue. Initially, Priority will be used in 243 | // activity and workflow task queues, which are typically where backlogs exist. 244 | // Other queues in the server (such as transfer and timer queues) and rate 245 | // limiting decisions do not use Priority, but may in the future. 246 | // 247 | // Priority is attached to workflows and activities. Activities and child 248 | // workflows inherit Priority from the workflow that created them, but may 249 | // override fields when they are started or modified. For each field of a 250 | // Priority on an activity/workflow, not present or equal to zero/empty string 251 | // means to inherit the value from the calling workflow, or if there is no 252 | // calling workflow, then use the default (documented below). 253 | // 254 | // Despite being named "Priority", this message will also contains fields that 255 | // control "fairness" mechanisms. 256 | // 257 | // The overall semantics of Priority are: 258 | // 1. First, consider "priority_key": lower number goes first. 259 | // (more will be added here later) 260 | message Priority { 261 | // Priority key is a positive integer from 1 to n, where smaller integers 262 | // correspond to higher priorities (tasks run sooner). In general, tasks in 263 | // a queue should be processed in close to priority order, although small 264 | // deviations are possible. 265 | // 266 | // The maximum priority value (minimum priority) is determined by server 267 | // configuration, and defaults to 5. 268 | // 269 | // The default priority is (min+max)/2. With the default max of 5 and min of 270 | // 1, that comes out to 3. 271 | int32 priority_key = 1; 272 | } 273 | -------------------------------------------------------------------------------- /temporal/api/deployment/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.deployment.v1; 4 | 5 | option go_package = "go.temporal.io/api/deployment/v1;deployment"; 6 | option java_package = "io.temporal.api.deployment.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Deployment::V1"; 10 | option csharp_namespace = "Temporalio.Api.Deployment.V1"; 11 | 12 | import "google/protobuf/timestamp.proto"; 13 | 14 | import "temporal/api/enums/v1/deployment.proto"; 15 | import "temporal/api/enums/v1/task_queue.proto"; 16 | import "temporal/api/common/v1/message.proto"; 17 | 18 | // Worker Deployment options set in SDK that need to be sent to server in every poll. 19 | // Experimental. Worker Deployments are experimental and might significantly change in the future. 20 | message WorkerDeploymentOptions { 21 | // Required. Worker Deployment name. 22 | string deployment_name = 1; 23 | // The Build ID of the worker. Required when `worker_versioning_mode==VERSIONED`, in which case, 24 | // the worker will be part of a Deployment Version. 25 | string build_id = 2; 26 | // Required. Versioning Mode for this worker. Must be the same for all workers with the 27 | // same `deployment_name` and `build_id` combination, across all Task Queues. 28 | // When `worker_versioning_mode==VERSIONED`, the worker will be part of a Deployment Version. 29 | temporal.api.enums.v1.WorkerVersioningMode worker_versioning_mode = 3; 30 | } 31 | 32 | // `Deployment` identifies a deployment of Temporal workers. The combination of deployment series 33 | // name + build ID serves as the identifier. User can use `WorkerDeploymentOptions` in their worker 34 | // programs to specify these values. 35 | // Deprecated. 36 | message Deployment { 37 | // Different versions of the same worker service/application are related together by having a 38 | // shared series name. 39 | // Out of all deployments of a series, one can be designated as the current deployment, which 40 | // receives new workflow executions and new tasks of workflows with 41 | // `VERSIONING_BEHAVIOR_AUTO_UPGRADE` versioning behavior. 42 | string series_name = 1; 43 | // Build ID changes with each version of the worker when the worker program code and/or config 44 | // changes. 45 | string build_id = 2; 46 | } 47 | 48 | // `DeploymentInfo` holds information about a deployment. Deployment information is tracked 49 | // automatically by server as soon as the first poll from that deployment reaches the server. There 50 | // can be multiple task queue workers in a single deployment which are listed in this message. 51 | // Deprecated. 52 | message DeploymentInfo { 53 | Deployment deployment = 1; 54 | google.protobuf.Timestamp create_time = 2; 55 | repeated TaskQueueInfo task_queue_infos = 3; 56 | // A user-defined set of key-values. Can be updated as part of write operations to the 57 | // deployment, such as `SetCurrentDeployment`. 58 | map metadata = 4; 59 | // If this deployment is the current deployment of its deployment series. 60 | bool is_current = 5; 61 | 62 | message TaskQueueInfo { 63 | string name = 1; 64 | temporal.api.enums.v1.TaskQueueType type = 2; 65 | // When server saw the first poller for this task queue in this deployment. 66 | google.protobuf.Timestamp first_poller_time = 3; 67 | } 68 | } 69 | 70 | // Used as part of Deployment write APIs to update metadata attached to a deployment. 71 | // Deprecated. 72 | message UpdateDeploymentMetadata { 73 | map upsert_entries = 1; 74 | // List of keys to remove from the metadata. 75 | repeated string remove_entries = 2; 76 | } 77 | 78 | // DeploymentListInfo is an abbreviated set of fields from DeploymentInfo that's returned in 79 | // ListDeployments. 80 | // Deprecated. 81 | message DeploymentListInfo { 82 | deployment.v1.Deployment deployment = 1; 83 | google.protobuf.Timestamp create_time = 2; 84 | // If this deployment is the current deployment of its deployment series. 85 | bool is_current = 3; 86 | } 87 | 88 | 89 | // A Worker Deployment Version (Version, for short) represents all workers of the same 90 | // code and config within a Deployment. Workers of the same Version are expected to 91 | // behave exactly the same so when executions move between them there are no 92 | // non-determinism issues. 93 | // Worker Deployment Versions are created in Temporal server automatically when 94 | // their first poller arrives to the server. 95 | // Experimental. Worker Deployments are experimental and might significantly change in the future. 96 | message WorkerDeploymentVersionInfo { 97 | // Deprecated. Use `deployment_version`. 98 | string version = 1 [deprecated = true]; 99 | 100 | // The status of the Worker Deployment Version. 101 | temporal.api.enums.v1.WorkerDeploymentVersionStatus status = 14; 102 | 103 | // Required. 104 | WorkerDeploymentVersion deployment_version = 11; 105 | string deployment_name = 2; 106 | google.protobuf.Timestamp create_time = 3; 107 | 108 | // Last time `current_since_time`, `ramping_since_time, or `ramp_percentage` of this version changed. 109 | google.protobuf.Timestamp routing_changed_time = 4; 110 | 111 | // (-- api-linter: core::0140::prepositions=disabled 112 | // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) 113 | // Unset if not current. 114 | google.protobuf.Timestamp current_since_time = 5; 115 | 116 | // (-- api-linter: core::0140::prepositions=disabled 117 | // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) 118 | // Unset if not ramping. Updated when the version first starts ramping, not on each ramp change. 119 | google.protobuf.Timestamp ramping_since_time = 6; 120 | 121 | // Timestamp when this version first became current or ramping. 122 | google.protobuf.Timestamp first_activation_time = 12; 123 | // Timestamp when this version last stopped being current or ramping. 124 | google.protobuf.Timestamp last_deactivation_time = 13; 125 | 126 | // Range: [0, 100]. Must be zero if the version is not ramping (i.e. `ramping_since_time` is nil). 127 | // Can be in the range [0, 100] if the version is ramping. 128 | float ramp_percentage = 7; 129 | 130 | // All the Task Queues that have ever polled from this Deployment version. 131 | repeated VersionTaskQueueInfo task_queue_infos = 8; 132 | message VersionTaskQueueInfo { 133 | string name = 1; 134 | temporal.api.enums.v1.TaskQueueType type = 2; 135 | } 136 | 137 | // Helps user determine when it is safe to decommission the workers of this 138 | // Version. Not present when version is current or ramping. 139 | // Current limitations: 140 | // - Not supported for Unversioned mode. 141 | // - Periodically refreshed, may have delays up to few minutes (consult the 142 | // last_checked_time value). 143 | // - Refreshed only when version is not current or ramping AND the status is not 144 | // "drained" yet. 145 | // - Once the status is changed to "drained", it is not changed until the Version 146 | // becomes Current or Ramping again, at which time the drainage info is cleared. 147 | // This means if the Version is "drained" but new workflows are sent to it via 148 | // Pinned Versioning Override, the status does not account for those Pinned-override 149 | // executions and remains "drained". 150 | VersionDrainageInfo drainage_info = 9; 151 | 152 | // Arbitrary user-provided metadata attached to this version. 153 | VersionMetadata metadata = 10; 154 | } 155 | 156 | // Information about workflow drainage to help the user determine when it is safe 157 | // to decommission a Version. Not present while version is current or ramping. 158 | // Experimental. Worker Deployments are experimental and might significantly change in the future. 159 | message VersionDrainageInfo { 160 | // Set to DRAINING when the version first stops accepting new executions (is no longer current or ramping). 161 | // Set to DRAINED when no more open pinned workflows exist on this version. 162 | enums.v1.VersionDrainageStatus status = 1; 163 | // Last time the drainage status changed. 164 | google.protobuf.Timestamp last_changed_time = 2; 165 | // Last time the system checked for drainage of this version. 166 | google.protobuf.Timestamp last_checked_time = 3; 167 | } 168 | 169 | // A Worker Deployment (Deployment, for short) represents all workers serving 170 | // a shared set of Task Queues. Typically, a Deployment represents one service or 171 | // application. 172 | // A Deployment contains multiple Deployment Versions, each representing a different 173 | // version of workers. (see documentation of WorkerDeploymentVersionInfo) 174 | // Deployment records are created in Temporal server automatically when their 175 | // first poller arrives to the server. 176 | // Experimental. Worker Deployments are experimental and might significantly change in the future. 177 | message WorkerDeploymentInfo { 178 | // Identifies a Worker Deployment. Must be unique within the namespace. 179 | string name = 1; 180 | 181 | // Deployment Versions that are currently tracked in this Deployment. A DeploymentVersion will be 182 | // cleaned up automatically if all the following conditions meet: 183 | // - It does not receive new executions (is not current or ramping) 184 | // - It has no active pollers (see WorkerDeploymentVersionInfo.pollers_status) 185 | // - It is drained (see WorkerDeploymentVersionInfo.drainage_status) 186 | repeated WorkerDeploymentVersionSummary version_summaries = 2; 187 | 188 | google.protobuf.Timestamp create_time = 3; 189 | 190 | RoutingConfig routing_config = 4; 191 | 192 | // Identity of the last client who modified the configuration of this Deployment. Set to the 193 | // `identity` value sent by APIs such as `SetWorkerDeploymentCurrentVersion` and 194 | // `SetWorkerDeploymentRampingVersion`. 195 | string last_modifier_identity = 5; 196 | 197 | message WorkerDeploymentVersionSummary { 198 | // Deprecated. Use `deployment_version`. 199 | string version = 1 [deprecated = true]; 200 | 201 | // The status of the Worker Deployment Version. 202 | temporal.api.enums.v1.WorkerDeploymentVersionStatus status = 11; 203 | 204 | // Required. 205 | WorkerDeploymentVersion deployment_version = 4; 206 | google.protobuf.Timestamp create_time = 2; 207 | // Deprecated. Use `drainage_info` instead. 208 | enums.v1.VersionDrainageStatus drainage_status = 3; 209 | // Information about workflow drainage to help the user determine when it is safe 210 | // to decommission a Version. Not present while version is current or ramping 211 | VersionDrainageInfo drainage_info = 5; 212 | // Unset if not current. 213 | // (-- api-linter: core::0140::prepositions=disabled 214 | // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) 215 | google.protobuf.Timestamp current_since_time = 6; 216 | // Unset if not ramping. Updated when the version first starts ramping, not on each ramp change. 217 | // (-- api-linter: core::0140::prepositions=disabled 218 | // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) 219 | google.protobuf.Timestamp ramping_since_time = 7; 220 | // Last time `current_since_time`, `ramping_since_time, or `ramp_percentage` of this version changed. 221 | google.protobuf.Timestamp routing_update_time = 8; 222 | // Timestamp when this version first became current or ramping. 223 | google.protobuf.Timestamp first_activation_time = 9; 224 | // Timestamp when this version last stopped being current or ramping. 225 | google.protobuf.Timestamp last_deactivation_time = 10; 226 | } 227 | } 228 | 229 | // A Worker Deployment Version (Version, for short) represents a 230 | // version of workers within a Worker Deployment. (see documentation of WorkerDeploymentVersionInfo) 231 | // Version records are created in Temporal server automatically when their 232 | // first poller arrives to the server. 233 | // Experimental. Worker Deployment Versions are experimental and might significantly change in the future. 234 | message WorkerDeploymentVersion { 235 | // A unique identifier for this Version within the Deployment it is a part of. 236 | // Not necessarily unique within the namespace. 237 | // The combination of `deployment_name` and `build_id` uniquely identifies this 238 | // Version within the namespace, because Deployment names are unique within a namespace. 239 | string build_id = 1; 240 | 241 | // Identifies the Worker Deployment this Version is part of. 242 | string deployment_name = 2; 243 | } 244 | 245 | message VersionMetadata { 246 | // Arbitrary key-values. 247 | map entries = 1; 248 | } 249 | 250 | message RoutingConfig { 251 | // Specifies which Deployment Version should receive new workflow executions and tasks of 252 | // existing unversioned or AutoUpgrade workflows. 253 | // Nil value means no Version in this Deployment (except Ramping Version, if present) receives traffic other than tasks of previously Pinned workflows. In absence of a Current Version, remaining traffic after any ramp (if set) goes to unversioned workers (those with `UNVERSIONED` (or unspecified) `WorkerVersioningMode`.). 254 | // Note: Current Version is overridden by the Ramping Version for a portion of traffic when ramp percentage 255 | // is non-zero (see `ramping_deployment_version` and `ramping_version_percentage`). 256 | temporal.api.deployment.v1.WorkerDeploymentVersion current_deployment_version = 7; 257 | // Deprecated. Use `current_deployment_version`. 258 | string current_version = 1 [deprecated = true]; 259 | 260 | // When ramp percentage is non-zero, that portion of traffic is shifted from the Current Version to the Ramping Version. 261 | // Must always be different from `current_deployment_version` unless both are nil. 262 | // Nil value represents all the unversioned workers (those with `UNVERSIONED` (or unspecified) `WorkerVersioningMode`.) 263 | // Note that it is possible to ramp from one Version to another Version, or from unversioned 264 | // workers to a particular Version, or from a particular Version to unversioned workers. 265 | temporal.api.deployment.v1.WorkerDeploymentVersion ramping_deployment_version = 9; 266 | // Deprecated. Use `ramping_deployment_version`. 267 | string ramping_version = 2 [deprecated = true]; 268 | 269 | // Percentage of tasks that are routed to the Ramping Version instead of the Current Version. 270 | // Valid range: [0, 100]. A 100% value means the Ramping Version is receiving full traffic but 271 | // not yet "promoted" to be the Current Version, likely due to pending validations. 272 | // A 0% value means the Ramping Version is receiving no traffic. 273 | float ramping_version_percentage = 3; 274 | // Last time current version was changed. 275 | google.protobuf.Timestamp current_version_changed_time = 4; 276 | // Last time ramping version was changed. Not updated if only the ramp percentage changes. 277 | google.protobuf.Timestamp ramping_version_changed_time = 5; 278 | // Last time ramping version percentage was changed. 279 | // If ramping version is changed, this is also updated, even if the percentage stays the same. 280 | google.protobuf.Timestamp ramping_version_percentage_changed_time = 6; 281 | } 282 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/batch_operation.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "BatchOperationProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | enum BatchOperationType { 13 | BATCH_OPERATION_TYPE_UNSPECIFIED = 0; 14 | BATCH_OPERATION_TYPE_TERMINATE = 1; 15 | BATCH_OPERATION_TYPE_CANCEL = 2; 16 | BATCH_OPERATION_TYPE_SIGNAL = 3; 17 | BATCH_OPERATION_TYPE_DELETE = 4; 18 | BATCH_OPERATION_TYPE_RESET = 5; 19 | BATCH_OPERATION_TYPE_UPDATE_EXECUTION_OPTIONS = 6; 20 | } 21 | 22 | enum BatchOperationState { 23 | BATCH_OPERATION_STATE_UNSPECIFIED = 0; 24 | BATCH_OPERATION_STATE_RUNNING = 1; 25 | BATCH_OPERATION_STATE_COMPLETED = 2; 26 | BATCH_OPERATION_STATE_FAILED = 3; 27 | } 28 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/command_type.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "CommandTypeProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // Whenever this list of command types is changed do change the function shouldBufferEvent in mutableStateBuilder.go to make sure to do the correct event ordering. 13 | enum CommandType { 14 | COMMAND_TYPE_UNSPECIFIED = 0; 15 | COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK = 1; 16 | COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK = 2; 17 | COMMAND_TYPE_START_TIMER = 3; 18 | COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION = 4; 19 | COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION = 5; 20 | COMMAND_TYPE_CANCEL_TIMER = 6; 21 | COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION = 7; 22 | COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION = 8; 23 | COMMAND_TYPE_RECORD_MARKER = 9; 24 | COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION = 10; 25 | COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION = 11; 26 | COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION = 12; 27 | COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES = 13; 28 | COMMAND_TYPE_PROTOCOL_MESSAGE = 14; 29 | COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES = 16; 30 | COMMAND_TYPE_SCHEDULE_NEXUS_OPERATION = 17; 31 | COMMAND_TYPE_REQUEST_CANCEL_NEXUS_OPERATION = 18; 32 | } 33 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/common.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "CommonProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | enum EncodingType { 13 | ENCODING_TYPE_UNSPECIFIED = 0; 14 | ENCODING_TYPE_PROTO3 = 1; 15 | ENCODING_TYPE_JSON = 2; 16 | } 17 | 18 | enum IndexedValueType { 19 | INDEXED_VALUE_TYPE_UNSPECIFIED = 0; 20 | INDEXED_VALUE_TYPE_TEXT = 1; 21 | INDEXED_VALUE_TYPE_KEYWORD = 2; 22 | INDEXED_VALUE_TYPE_INT = 3; 23 | INDEXED_VALUE_TYPE_DOUBLE = 4; 24 | INDEXED_VALUE_TYPE_BOOL = 5; 25 | INDEXED_VALUE_TYPE_DATETIME = 6; 26 | INDEXED_VALUE_TYPE_KEYWORD_LIST = 7; 27 | } 28 | 29 | enum Severity { 30 | SEVERITY_UNSPECIFIED = 0; 31 | SEVERITY_HIGH = 1; 32 | SEVERITY_MEDIUM = 2; 33 | SEVERITY_LOW = 3; 34 | } 35 | 36 | // State of a callback. 37 | enum CallbackState { 38 | // Default value, unspecified state. 39 | CALLBACK_STATE_UNSPECIFIED = 0; 40 | // Callback is standing by, waiting to be triggered. 41 | CALLBACK_STATE_STANDBY = 1; 42 | // Callback is in the queue waiting to be executed or is currently executing. 43 | CALLBACK_STATE_SCHEDULED = 2; 44 | // Callback has failed with a retryable error and is backing off before the next attempt. 45 | CALLBACK_STATE_BACKING_OFF = 3; 46 | // Callback has failed. 47 | CALLBACK_STATE_FAILED = 4; 48 | // Callback has succeeded. 49 | CALLBACK_STATE_SUCCEEDED = 5; 50 | // Callback is blocked (eg: by circuit breaker). 51 | CALLBACK_STATE_BLOCKED = 6; 52 | } 53 | 54 | // State of a pending Nexus operation. 55 | enum PendingNexusOperationState { 56 | // Default value, unspecified state. 57 | PENDING_NEXUS_OPERATION_STATE_UNSPECIFIED = 0; 58 | // Operation is in the queue waiting to be executed or is currently executing. 59 | PENDING_NEXUS_OPERATION_STATE_SCHEDULED = 1; 60 | // Operation has failed with a retryable error and is backing off before the next attempt. 61 | PENDING_NEXUS_OPERATION_STATE_BACKING_OFF = 2; 62 | // Operation was started and will complete asynchronously. 63 | PENDING_NEXUS_OPERATION_STATE_STARTED = 3; 64 | // Operation is blocked (eg: by circuit breaker). 65 | PENDING_NEXUS_OPERATION_STATE_BLOCKED = 4; 66 | } 67 | 68 | // State of a Nexus operation cancellation. 69 | enum NexusOperationCancellationState { 70 | // Default value, unspecified state. 71 | NEXUS_OPERATION_CANCELLATION_STATE_UNSPECIFIED = 0; 72 | // Cancellation request is in the queue waiting to be executed or is currently executing. 73 | NEXUS_OPERATION_CANCELLATION_STATE_SCHEDULED = 1; 74 | // Cancellation request has failed with a retryable error and is backing off before the next attempt. 75 | NEXUS_OPERATION_CANCELLATION_STATE_BACKING_OFF = 2; 76 | // Cancellation request succeeded. 77 | NEXUS_OPERATION_CANCELLATION_STATE_SUCCEEDED = 3; 78 | // Cancellation request failed with a non-retryable error. 79 | NEXUS_OPERATION_CANCELLATION_STATE_FAILED = 4; 80 | // The associated operation timed out - exceeded the user supplied schedule-to-close timeout. 81 | NEXUS_OPERATION_CANCELLATION_STATE_TIMED_OUT = 5; 82 | // Cancellation request is blocked (eg: by circuit breaker). 83 | NEXUS_OPERATION_CANCELLATION_STATE_BLOCKED = 6; 84 | } 85 | 86 | enum WorkflowRuleActionScope { 87 | // Default value, unspecified scope. 88 | WORKFLOW_RULE_ACTION_SCOPE_UNSPECIFIED = 0; 89 | // The action will be applied to the entire workflow. 90 | WORKFLOW_RULE_ACTION_SCOPE_WORKFLOW = 1; 91 | // The action will be applied to a specific activity. 92 | WORKFLOW_RULE_ACTION_SCOPE_ACTIVITY = 2; 93 | } 94 | 95 | enum ApplicationErrorCategory { 96 | APPLICATION_ERROR_CATEGORY_UNSPECIFIED = 0; 97 | // Expected application error with little/no severity. 98 | APPLICATION_ERROR_CATEGORY_BENIGN = 1; 99 | } 100 | 101 | // (-- api-linter: core::0216::synonyms=disabled 102 | // aip.dev/not-precedent: It seems we have both state and status, and status is a better fit for workers. --) 103 | enum WorkerStatus { 104 | WORKER_STATUS_UNSPECIFIED = 0; 105 | WORKER_STATUS_RUNNING = 1; 106 | WORKER_STATUS_SHUTTING_DOWN = 2; 107 | WORKER_STATUS_SHUTDOWN = 3; 108 | } 109 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/deployment.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "DeploymentProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // Specify the reachability level for a deployment so users can decide if it is time to 13 | // decommission the deployment. 14 | enum DeploymentReachability { 15 | // Reachability level is not specified. 16 | DEPLOYMENT_REACHABILITY_UNSPECIFIED = 0; 17 | // The deployment is reachable by new and/or open workflows. The deployment cannot be 18 | // decommissioned safely. 19 | DEPLOYMENT_REACHABILITY_REACHABLE = 1; 20 | // The deployment is not reachable by new or open workflows, but might be still needed by 21 | // Queries sent to closed workflows. The deployment can be decommissioned safely if user does 22 | // not query closed workflows. 23 | DEPLOYMENT_REACHABILITY_CLOSED_WORKFLOWS_ONLY = 2; 24 | // The deployment is not reachable by any workflow because all the workflows who needed this 25 | // deployment went out of retention period. The deployment can be decommissioned safely. 26 | DEPLOYMENT_REACHABILITY_UNREACHABLE = 3; 27 | } 28 | 29 | // (-- api-linter: core::0216::synonyms=disabled 30 | // aip.dev/not-precedent: Call this status because it is . --) 31 | // Specify the drainage status for a Worker Deployment Version so users can decide whether they 32 | // can safely decommission the version. 33 | // Experimental. Worker Deployments are experimental and might significantly change in the future. 34 | enum VersionDrainageStatus { 35 | // Drainage Status is not specified. 36 | VERSION_DRAINAGE_STATUS_UNSPECIFIED = 0; 37 | // The Worker Deployment Version is not used by new workflows but is still used by 38 | // open pinned workflows. The version cannot be decommissioned safely. 39 | VERSION_DRAINAGE_STATUS_DRAINING = 1; 40 | // The Worker Deployment Version is not used by new or open workflows, but might be still needed by 41 | // Queries sent to closed workflows. The version can be decommissioned safely if user does 42 | // not query closed workflows. If the user does query closed workflows for some time x after 43 | // workflows are closed, they should decommission the version after it has been drained for that duration. 44 | VERSION_DRAINAGE_STATUS_DRAINED = 2; 45 | } 46 | 47 | // Versioning Mode of a worker is set by the app developer in the worker code, and specifies the 48 | // behavior of the system in the following related aspects: 49 | // - Whether or not Temporal Server considers this worker's version (Build ID) when dispatching 50 | // tasks to it. 51 | // - Whether or not the workflows processed by this worker are versioned using the worker's version. 52 | // Experimental. Worker Deployments are experimental and might significantly change in the future. 53 | enum WorkerVersioningMode { 54 | WORKER_VERSIONING_MODE_UNSPECIFIED = 0; 55 | // Workers with this mode are not distinguished from each other for task routing, even if they 56 | // have different Build IDs. 57 | // Workflows processed by this worker will be unversioned and user needs to use Patching to keep 58 | // the new code compatible with prior versions. 59 | // This mode is recommended to be used along with Rolling Upgrade deployment strategies. 60 | // Workers with this mode are represented by the special string `__unversioned__` in the APIs. 61 | WORKER_VERSIONING_MODE_UNVERSIONED = 1; 62 | // Workers with this mode are part of a Worker Deployment Version which is identified as 63 | // ".". Such workers are called "versioned" as opposed to 64 | // "unversioned". 65 | // Each Deployment Version is distinguished from other Versions for task routing and users can 66 | // configure Temporal Server to send tasks to a particular Version (see 67 | // `WorkerDeploymentInfo.routing_config`). This mode is the best option for Blue/Green and 68 | // Rainbow strategies (but typically not suitable for Rolling upgrades.) 69 | // Workflow Versioning Behaviors are enabled in this mode: each workflow type must choose 70 | // between the Pinned and AutoUpgrade behaviors. Depending on the chosen behavior, the user may 71 | // or may not need to use Patching to keep the new code compatible with prior versions. (see 72 | // VersioningBehavior enum.) 73 | WORKER_VERSIONING_MODE_VERSIONED = 2; 74 | } 75 | 76 | // (-- api-linter: core::0216::synonyms=disabled 77 | // aip.dev/not-precedent: Call this status because it is . --) 78 | // Specify the status of a Worker Deployment Version. 79 | // Experimental. Worker Deployments are experimental and might significantly change in the future. 80 | enum WorkerDeploymentVersionStatus { 81 | WORKER_DEPLOYMENT_VERSION_STATUS_UNSPECIFIED = 0; 82 | // The Worker Deployment Version has been created inside the Worker Deployment but is not used by any 83 | // workflow executions. These Versions can still have workflows if they have an explicit Versioning Override targeting 84 | // this Version. Such Versioning Override could be set at workflow start time, or at a later time via `UpdateWorkflowExecutionOptions`. 85 | WORKER_DEPLOYMENT_VERSION_STATUS_INACTIVE = 1; 86 | // The Worker Deployment Version is the current version of the Worker Deployment. All new workflow executions 87 | // and tasks of existing unversioned or AutoUpgrade workflows are routed to this version. 88 | WORKER_DEPLOYMENT_VERSION_STATUS_CURRENT = 2; 89 | // The Worker Deployment Version is the ramping version of the Worker Deployment. A subset of new Pinned workflow executions are 90 | // routed to this version. Moreover, a portion of existing unversioned or AutoUpgrade workflow executions are also routed to this version. 91 | WORKER_DEPLOYMENT_VERSION_STATUS_RAMPING = 3; 92 | // The Worker Deployment Version is not used by new workflows but is still used by 93 | // open pinned workflows. The version cannot be decommissioned safely. 94 | WORKER_DEPLOYMENT_VERSION_STATUS_DRAINING = 4; 95 | // The Worker Deployment Version is not used by new or open workflows, but might be still needed by 96 | // Queries sent to closed workflows. The version can be decommissioned safely if user does 97 | // not query closed workflows. If the user does query closed workflows for some time x after 98 | // workflows are closed, they should decommission the version after it has been drained for that duration. 99 | WORKER_DEPLOYMENT_VERSION_STATUS_DRAINED = 5; 100 | } 101 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/event_type.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "EventTypeProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // Whenever this list of events is changed do change the function shouldBufferEvent in mutableStateBuilder.go to make sure to do the correct event ordering 13 | enum EventType { 14 | // Place holder and should never appear in a Workflow execution history 15 | EVENT_TYPE_UNSPECIFIED = 0; 16 | // Workflow execution has been triggered/started 17 | // It contains Workflow execution inputs, as well as Workflow timeout configurations 18 | EVENT_TYPE_WORKFLOW_EXECUTION_STARTED = 1; 19 | // Workflow execution has successfully completed and contains Workflow execution results 20 | EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED = 2; 21 | // Workflow execution has unsuccessfully completed and contains the Workflow execution error 22 | EVENT_TYPE_WORKFLOW_EXECUTION_FAILED = 3; 23 | // Workflow execution has timed out by the Temporal Server 24 | // Usually due to the Workflow having not been completed within timeout settings 25 | EVENT_TYPE_WORKFLOW_EXECUTION_TIMED_OUT = 4; 26 | // Workflow Task has been scheduled and the SDK client should now be able to process any new history events 27 | EVENT_TYPE_WORKFLOW_TASK_SCHEDULED = 5; 28 | // Workflow Task has started and the SDK client has picked up the Workflow Task and is processing new history events 29 | EVENT_TYPE_WORKFLOW_TASK_STARTED = 6; 30 | // Workflow Task has completed 31 | // The SDK client picked up the Workflow Task and processed new history events 32 | // SDK client may or may not ask the Temporal Server to do additional work, such as: 33 | // EVENT_TYPE_ACTIVITY_TASK_SCHEDULED 34 | // EVENT_TYPE_TIMER_STARTED 35 | // EVENT_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES 36 | // EVENT_TYPE_MARKER_RECORDED 37 | // EVENT_TYPE_START_CHILD_WORKFLOW_EXECUTION_INITIATED 38 | // EVENT_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED 39 | // EVENT_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED 40 | // EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED 41 | // EVENT_TYPE_WORKFLOW_EXECUTION_FAILED 42 | // EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED 43 | // EVENT_TYPE_WORKFLOW_EXECUTION_CONTINUED_AS_NEW 44 | EVENT_TYPE_WORKFLOW_TASK_COMPLETED = 7; 45 | // Workflow Task encountered a timeout 46 | // Either an SDK client with a local cache was not available at the time, or it took too long for the SDK client to process the task 47 | EVENT_TYPE_WORKFLOW_TASK_TIMED_OUT = 8; 48 | // Workflow Task encountered a failure 49 | // Usually this means that the Workflow was non-deterministic 50 | // However, the Workflow reset functionality also uses this event 51 | EVENT_TYPE_WORKFLOW_TASK_FAILED = 9; 52 | // Activity Task was scheduled 53 | // The SDK client should pick up this activity task and execute 54 | // This event type contains activity inputs, as well as activity timeout configurations 55 | EVENT_TYPE_ACTIVITY_TASK_SCHEDULED = 10; 56 | // Activity Task has started executing 57 | // The SDK client has picked up the Activity Task and is processing the Activity invocation 58 | EVENT_TYPE_ACTIVITY_TASK_STARTED = 11; 59 | // Activity Task has finished successfully 60 | // The SDK client has picked up and successfully completed the Activity Task 61 | // This event type contains Activity execution results 62 | EVENT_TYPE_ACTIVITY_TASK_COMPLETED = 12; 63 | // Activity Task has finished unsuccessfully 64 | // The SDK picked up the Activity Task but unsuccessfully completed it 65 | // This event type contains Activity execution errors 66 | EVENT_TYPE_ACTIVITY_TASK_FAILED = 13; 67 | // Activity has timed out according to the Temporal Server 68 | // Activity did not complete within the timeout settings 69 | EVENT_TYPE_ACTIVITY_TASK_TIMED_OUT = 14; 70 | // A request to cancel the Activity has occurred 71 | // The SDK client will be able to confirm cancellation of an Activity during an Activity heartbeat 72 | EVENT_TYPE_ACTIVITY_TASK_CANCEL_REQUESTED = 15; 73 | // Activity has been cancelled 74 | EVENT_TYPE_ACTIVITY_TASK_CANCELED = 16; 75 | // A timer has started 76 | EVENT_TYPE_TIMER_STARTED = 17; 77 | // A timer has fired 78 | EVENT_TYPE_TIMER_FIRED = 18; 79 | // A time has been cancelled 80 | EVENT_TYPE_TIMER_CANCELED = 19; 81 | // A request has been made to cancel the Workflow execution 82 | EVENT_TYPE_WORKFLOW_EXECUTION_CANCEL_REQUESTED = 20; 83 | // SDK client has confirmed the cancellation request and the Workflow execution has been cancelled 84 | EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED = 21; 85 | // Workflow has requested that the Temporal Server try to cancel another Workflow 86 | EVENT_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED = 22; 87 | // Temporal Server could not cancel the targeted Workflow 88 | // This is usually because the target Workflow could not be found 89 | EVENT_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED = 23; 90 | // Temporal Server has successfully requested the cancellation of the target Workflow 91 | EVENT_TYPE_EXTERNAL_WORKFLOW_EXECUTION_CANCEL_REQUESTED = 24; 92 | // A marker has been recorded. 93 | // This event type is transparent to the Temporal Server 94 | // The Server will only store it and will not try to understand it. 95 | EVENT_TYPE_MARKER_RECORDED = 25; 96 | // Workflow has received a Signal event 97 | // The event type contains the Signal name, as well as a Signal payload 98 | EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED = 26; 99 | // Workflow execution has been forcefully terminated 100 | // This is usually because the terminate Workflow API was called 101 | EVENT_TYPE_WORKFLOW_EXECUTION_TERMINATED = 27; 102 | // Workflow has successfully completed and a new Workflow has been started within the same transaction 103 | // Contains last Workflow execution results as well as new Workflow execution inputs 104 | EVENT_TYPE_WORKFLOW_EXECUTION_CONTINUED_AS_NEW = 28; 105 | // Temporal Server will try to start a child Workflow 106 | EVENT_TYPE_START_CHILD_WORKFLOW_EXECUTION_INITIATED = 29; 107 | // Child Workflow execution cannot be started/triggered 108 | // Usually due to a child Workflow ID collision 109 | EVENT_TYPE_START_CHILD_WORKFLOW_EXECUTION_FAILED = 30; 110 | // Child Workflow execution has successfully started/triggered 111 | EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_STARTED = 31; 112 | // Child Workflow execution has successfully completed 113 | EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_COMPLETED = 32; 114 | // Child Workflow execution has unsuccessfully completed 115 | EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_FAILED = 33; 116 | // Child Workflow execution has been cancelled 117 | EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_CANCELED = 34; 118 | // Child Workflow execution has timed out by the Temporal Server 119 | EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_TIMED_OUT = 35; 120 | // Child Workflow execution has been terminated 121 | EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_TERMINATED = 36; 122 | // Temporal Server will try to Signal the targeted Workflow 123 | // Contains the Signal name, as well as a Signal payload 124 | EVENT_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED = 37; 125 | // Temporal Server cannot Signal the targeted Workflow 126 | // Usually because the Workflow could not be found 127 | EVENT_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED = 38; 128 | // Temporal Server has successfully Signaled the targeted Workflow 129 | EVENT_TYPE_EXTERNAL_WORKFLOW_EXECUTION_SIGNALED = 39; 130 | // Workflow search attributes should be updated and synchronized with the visibility store 131 | EVENT_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES = 40; 132 | // An update was admitted. Note that not all admitted updates result in this 133 | // event. See UpdateAdmittedEventOrigin for situations in which this event 134 | // is created. 135 | EVENT_TYPE_WORKFLOW_EXECUTION_UPDATE_ADMITTED = 47; 136 | // An update was accepted (i.e. passed validation, perhaps because no validator was defined) 137 | EVENT_TYPE_WORKFLOW_EXECUTION_UPDATE_ACCEPTED = 41; 138 | // This event is never written to history. 139 | EVENT_TYPE_WORKFLOW_EXECUTION_UPDATE_REJECTED = 42; 140 | // An update completed 141 | EVENT_TYPE_WORKFLOW_EXECUTION_UPDATE_COMPLETED = 43; 142 | // Some property or properties of the workflow as a whole have changed by non-workflow code. 143 | // The distinction of external vs. command-based modification is important so the SDK can 144 | // maintain determinism when using the command-based approach. 145 | EVENT_TYPE_WORKFLOW_PROPERTIES_MODIFIED_EXTERNALLY = 44; 146 | // Some property or properties of an already-scheduled activity have changed by non-workflow code. 147 | // The distinction of external vs. command-based modification is important so the SDK can 148 | // maintain determinism when using the command-based approach. 149 | EVENT_TYPE_ACTIVITY_PROPERTIES_MODIFIED_EXTERNALLY = 45; 150 | // Workflow properties modified by user workflow code 151 | EVENT_TYPE_WORKFLOW_PROPERTIES_MODIFIED = 46; 152 | // A Nexus operation was scheduled using a ScheduleNexusOperation command. 153 | EVENT_TYPE_NEXUS_OPERATION_SCHEDULED = 48; 154 | // An asynchronous Nexus operation was started by a Nexus handler. 155 | EVENT_TYPE_NEXUS_OPERATION_STARTED = 49; 156 | // A Nexus operation completed successfully. 157 | EVENT_TYPE_NEXUS_OPERATION_COMPLETED = 50; 158 | // A Nexus operation failed. 159 | EVENT_TYPE_NEXUS_OPERATION_FAILED = 51; 160 | // A Nexus operation completed as canceled. 161 | EVENT_TYPE_NEXUS_OPERATION_CANCELED = 52; 162 | // A Nexus operation timed out. 163 | EVENT_TYPE_NEXUS_OPERATION_TIMED_OUT = 53; 164 | // A Nexus operation was requested to be canceled using a RequestCancelNexusOperation command. 165 | EVENT_TYPE_NEXUS_OPERATION_CANCEL_REQUESTED = 54; 166 | // Workflow execution options updated by user. 167 | EVENT_TYPE_WORKFLOW_EXECUTION_OPTIONS_UPDATED = 55; 168 | // A cancellation request for a Nexus operation was successfully delivered to the Nexus handler. 169 | EVENT_TYPE_NEXUS_OPERATION_CANCEL_REQUEST_COMPLETED = 56; 170 | // A cancellation request for a Nexus operation resulted in an error. 171 | EVENT_TYPE_NEXUS_OPERATION_CANCEL_REQUEST_FAILED = 57; 172 | } 173 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/failed_cause.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "FailedCauseProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // Workflow tasks can fail for various reasons. Note that some of these reasons can only originate 13 | // from the server, and some of them can only originate from the SDK/worker. 14 | enum WorkflowTaskFailedCause { 15 | WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED = 0; 16 | // Between starting and completing the workflow task (with a workflow completion command), some 17 | // new command (like a signal) was processed into workflow history. The outstanding task will be 18 | // failed with this reason, and a worker must pick up a new task. 19 | WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND = 1; 20 | WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES = 2; 21 | WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES = 3; 22 | WORKFLOW_TASK_FAILED_CAUSE_BAD_START_TIMER_ATTRIBUTES = 4; 23 | WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES = 5; 24 | WORKFLOW_TASK_FAILED_CAUSE_BAD_RECORD_MARKER_ATTRIBUTES = 6; 25 | WORKFLOW_TASK_FAILED_CAUSE_BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES = 7; 26 | WORKFLOW_TASK_FAILED_CAUSE_BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES = 8; 27 | WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES = 9; 28 | WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES = 10; 29 | WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES = 11; 30 | WORKFLOW_TASK_FAILED_CAUSE_START_TIMER_DUPLICATE_ID = 12; 31 | // The worker wishes to fail the task and have the next one be generated on a normal, not sticky 32 | // queue. Generally workers should prefer to use the explicit `ResetStickyTaskQueue` RPC call. 33 | WORKFLOW_TASK_FAILED_CAUSE_RESET_STICKY_TASK_QUEUE = 13; 34 | WORKFLOW_TASK_FAILED_CAUSE_WORKFLOW_WORKER_UNHANDLED_FAILURE = 14; 35 | WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES = 15; 36 | WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES = 16; 37 | WORKFLOW_TASK_FAILED_CAUSE_FORCE_CLOSE_COMMAND = 17; 38 | WORKFLOW_TASK_FAILED_CAUSE_FAILOVER_CLOSE_COMMAND = 18; 39 | WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_INPUT_SIZE = 19; 40 | WORKFLOW_TASK_FAILED_CAUSE_RESET_WORKFLOW = 20; 41 | WORKFLOW_TASK_FAILED_CAUSE_BAD_BINARY = 21; 42 | WORKFLOW_TASK_FAILED_CAUSE_SCHEDULE_ACTIVITY_DUPLICATE_ID = 22; 43 | WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES = 23; 44 | // The worker encountered a mismatch while replaying history between what was expected, and 45 | // what the workflow code actually did. 46 | WORKFLOW_TASK_FAILED_CAUSE_NON_DETERMINISTIC_ERROR = 24; 47 | WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES = 25; 48 | 49 | // We send the below error codes to users when their requests would violate a size constraint 50 | // of their workflow. We do this to ensure that the state of their workflow does not become too 51 | // large because that can cause severe performance degradation. You can modify the thresholds for 52 | // each of these errors within your dynamic config. 53 | // 54 | // Spawning a new child workflow would cause this workflow to exceed its limit of pending child 55 | // workflows. 56 | WORKFLOW_TASK_FAILED_CAUSE_PENDING_CHILD_WORKFLOWS_LIMIT_EXCEEDED = 26; 57 | // Starting a new activity would cause this workflow to exceed its limit of pending activities 58 | // that we track. 59 | WORKFLOW_TASK_FAILED_CAUSE_PENDING_ACTIVITIES_LIMIT_EXCEEDED = 27; 60 | // A workflow has a buffer of signals that have not yet reached their destination. We return this 61 | // error when sending a new signal would exceed the capacity of this buffer. 62 | WORKFLOW_TASK_FAILED_CAUSE_PENDING_SIGNALS_LIMIT_EXCEEDED = 28; 63 | // Similarly, we have a buffer of pending requests to cancel other workflows. We return this error 64 | // when our capacity for pending cancel requests is already reached. 65 | WORKFLOW_TASK_FAILED_CAUSE_PENDING_REQUEST_CANCEL_LIMIT_EXCEEDED = 29; 66 | // Workflow execution update message (update.Acceptance, update.Rejection, or update.Response) 67 | // has wrong format, or missing required fields. 68 | WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE = 30; 69 | // Similar to WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, but for updates. 70 | WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_UPDATE = 31; 71 | 72 | // A workflow task completed with an invalid ScheduleNexusOperation command. 73 | WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES = 32; 74 | // A workflow task completed requesting to schedule a Nexus Operation exceeding the server configured limit. 75 | WORKFLOW_TASK_FAILED_CAUSE_PENDING_NEXUS_OPERATIONS_LIMIT_EXCEEDED = 33; 76 | // A workflow task completed with an invalid RequestCancelNexusOperation command. 77 | WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_NEXUS_OPERATION_ATTRIBUTES = 34; 78 | // A workflow task completed requesting a feature that's disabled on the server (either system wide or - typically - 79 | // for the workflow's namespace). 80 | // Check the workflow task failure message for more information. 81 | WORKFLOW_TASK_FAILED_CAUSE_FEATURE_DISABLED = 35; 82 | // A workflow task failed because a grpc message was too large. 83 | WORKFLOW_TASK_FAILED_CAUSE_GRPC_MESSAGE_TOO_LARGE = 36; 84 | } 85 | 86 | enum StartChildWorkflowExecutionFailedCause { 87 | START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_UNSPECIFIED = 0; 88 | START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_WORKFLOW_ALREADY_EXISTS = 1; 89 | START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND = 2; 90 | } 91 | 92 | enum CancelExternalWorkflowExecutionFailedCause { 93 | CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_UNSPECIFIED = 0; 94 | CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND = 1; 95 | CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND = 2; 96 | } 97 | 98 | enum SignalExternalWorkflowExecutionFailedCause { 99 | SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_UNSPECIFIED = 0; 100 | SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND = 1; 101 | SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND = 2; 102 | // Signal count limit is per workflow and controlled by server dynamic config "history.maximumSignalsPerExecution" 103 | SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_SIGNAL_COUNT_LIMIT_EXCEEDED = 3; 104 | } 105 | 106 | enum ResourceExhaustedCause { 107 | RESOURCE_EXHAUSTED_CAUSE_UNSPECIFIED = 0; 108 | // Caller exceeds request per second limit. 109 | RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT = 1; 110 | // Caller exceeds max concurrent request limit. 111 | RESOURCE_EXHAUSTED_CAUSE_CONCURRENT_LIMIT = 2; 112 | // System overloaded. 113 | RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED = 3; 114 | // Namespace exceeds persistence rate limit. 115 | RESOURCE_EXHAUSTED_CAUSE_PERSISTENCE_LIMIT = 4; 116 | // Workflow is busy 117 | RESOURCE_EXHAUSTED_CAUSE_BUSY_WORKFLOW = 5; 118 | // Caller exceeds action per second limit. 119 | RESOURCE_EXHAUSTED_CAUSE_APS_LIMIT = 6; 120 | // Persistence storage limit exceeded. 121 | RESOURCE_EXHAUSTED_CAUSE_PERSISTENCE_STORAGE_LIMIT = 7; 122 | // Circuit breaker is open/half-open. 123 | RESOURCE_EXHAUSTED_CAUSE_CIRCUIT_BREAKER_OPEN = 8; 124 | // Namespace exceeds operations rate limit. 125 | RESOURCE_EXHAUSTED_CAUSE_OPS_LIMIT = 9; 126 | } 127 | 128 | enum ResourceExhaustedScope { 129 | RESOURCE_EXHAUSTED_SCOPE_UNSPECIFIED = 0; 130 | // Exhausted resource is a system-level resource. 131 | RESOURCE_EXHAUSTED_SCOPE_NAMESPACE = 1; 132 | // Exhausted resource is a namespace-level resource. 133 | RESOURCE_EXHAUSTED_SCOPE_SYSTEM = 2; 134 | } 135 | 136 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/namespace.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "NamespaceProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | enum NamespaceState { 13 | NAMESPACE_STATE_UNSPECIFIED = 0; 14 | NAMESPACE_STATE_REGISTERED = 1; 15 | NAMESPACE_STATE_DEPRECATED = 2; 16 | NAMESPACE_STATE_DELETED = 3; 17 | } 18 | 19 | enum ArchivalState { 20 | ARCHIVAL_STATE_UNSPECIFIED = 0; 21 | ARCHIVAL_STATE_DISABLED = 1; 22 | ARCHIVAL_STATE_ENABLED = 2; 23 | } 24 | 25 | enum ReplicationState { 26 | REPLICATION_STATE_UNSPECIFIED = 0; 27 | REPLICATION_STATE_NORMAL = 1; 28 | REPLICATION_STATE_HANDOVER = 2; 29 | } 30 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/nexus.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "NexusProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // NexusHandlerErrorRetryBehavior allows nexus handlers to explicity set the retry behavior of a HandlerError. If not 13 | // specified, retry behavior is determined from the error type. For example internal errors are not retryable by default 14 | // unless specified otherwise. 15 | enum NexusHandlerErrorRetryBehavior { 16 | NEXUS_HANDLER_ERROR_RETRY_BEHAVIOR_UNSPECIFIED = 0; 17 | // A handler error is explicitly marked as retryable. 18 | NEXUS_HANDLER_ERROR_RETRY_BEHAVIOR_RETRYABLE = 1; 19 | // A handler error is explicitly marked as non-retryable. 20 | NEXUS_HANDLER_ERROR_RETRY_BEHAVIOR_NON_RETRYABLE = 2; 21 | } 22 | 23 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/query.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "QueryProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | enum QueryResultType { 13 | QUERY_RESULT_TYPE_UNSPECIFIED = 0; 14 | QUERY_RESULT_TYPE_ANSWERED = 1; 15 | QUERY_RESULT_TYPE_FAILED = 2; 16 | } 17 | 18 | enum QueryRejectCondition { 19 | QUERY_REJECT_CONDITION_UNSPECIFIED = 0; 20 | // None indicates that query should not be rejected. 21 | QUERY_REJECT_CONDITION_NONE = 1; 22 | // NotOpen indicates that query should be rejected if workflow is not open. 23 | QUERY_REJECT_CONDITION_NOT_OPEN = 2; 24 | // NotCompletedCleanly indicates that query should be rejected if workflow did not complete cleanly. 25 | QUERY_REJECT_CONDITION_NOT_COMPLETED_CLEANLY = 3; 26 | } 27 | 28 | 29 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/reset.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "ResetProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // Event types to exclude when reapplying events beyond the reset point. 13 | enum ResetReapplyExcludeType { 14 | RESET_REAPPLY_EXCLUDE_TYPE_UNSPECIFIED = 0; 15 | // Exclude signals when reapplying events beyond the reset point. 16 | RESET_REAPPLY_EXCLUDE_TYPE_SIGNAL = 1; 17 | // Exclude updates when reapplying events beyond the reset point. 18 | RESET_REAPPLY_EXCLUDE_TYPE_UPDATE = 2; 19 | // Exclude nexus events when reapplying events beyond the reset point. 20 | RESET_REAPPLY_EXCLUDE_TYPE_NEXUS = 3; 21 | // Deprecated, unimplemented option. 22 | RESET_REAPPLY_EXCLUDE_TYPE_CANCEL_REQUEST = 4 [deprecated=true]; 23 | } 24 | 25 | // Deprecated: applications should use ResetReapplyExcludeType to specify 26 | // exclusions from this set, and new event types should be added to ResetReapplyExcludeType 27 | // instead of here. 28 | enum ResetReapplyType { 29 | RESET_REAPPLY_TYPE_UNSPECIFIED = 0; 30 | // Signals are reapplied when workflow is reset. 31 | RESET_REAPPLY_TYPE_SIGNAL = 1; 32 | // No events are reapplied when workflow is reset. 33 | RESET_REAPPLY_TYPE_NONE = 2; 34 | // All eligible events are reapplied when workflow is reset. 35 | RESET_REAPPLY_TYPE_ALL_ELIGIBLE = 3; 36 | } 37 | 38 | // Deprecated, see temporal.api.common.v1.ResetOptions. 39 | enum ResetType { 40 | RESET_TYPE_UNSPECIFIED = 0; 41 | // Resets to event of the first workflow task completed, or if it does not exist, the event after task scheduled. 42 | RESET_TYPE_FIRST_WORKFLOW_TASK = 1; 43 | // Resets to event of the last workflow task completed, or if it does not exist, the event after task scheduled. 44 | RESET_TYPE_LAST_WORKFLOW_TASK = 2; 45 | } 46 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/schedule.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "ScheduleProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | 13 | // ScheduleOverlapPolicy controls what happens when a workflow would be started 14 | // by a schedule, and is already running. 15 | enum ScheduleOverlapPolicy { 16 | SCHEDULE_OVERLAP_POLICY_UNSPECIFIED = 0; 17 | // SCHEDULE_OVERLAP_POLICY_SKIP (default) means don't start anything. When the 18 | // workflow completes, the next scheduled event after that time will be considered. 19 | SCHEDULE_OVERLAP_POLICY_SKIP = 1; 20 | // SCHEDULE_OVERLAP_POLICY_BUFFER_ONE means start the workflow again soon as the 21 | // current one completes, but only buffer one start in this way. If another start is 22 | // supposed to happen when the workflow is running, and one is already buffered, then 23 | // only the first one will be started after the running workflow finishes. 24 | SCHEDULE_OVERLAP_POLICY_BUFFER_ONE = 2; 25 | // SCHEDULE_OVERLAP_POLICY_BUFFER_ALL means buffer up any number of starts to all 26 | // happen sequentially, immediately after the running workflow completes. 27 | SCHEDULE_OVERLAP_POLICY_BUFFER_ALL = 3; 28 | // SCHEDULE_OVERLAP_POLICY_CANCEL_OTHER means that if there is another workflow 29 | // running, cancel it, and start the new one after the old one completes cancellation. 30 | SCHEDULE_OVERLAP_POLICY_CANCEL_OTHER = 4; 31 | // SCHEDULE_OVERLAP_POLICY_TERMINATE_OTHER means that if there is another workflow 32 | // running, terminate it and start the new one immediately. 33 | SCHEDULE_OVERLAP_POLICY_TERMINATE_OTHER = 5; 34 | // SCHEDULE_OVERLAP_POLICY_ALLOW_ALL means start any number of concurrent workflows. 35 | // Note that with this policy, last completion result and last failure will not be 36 | // available since workflows are not sequential. 37 | SCHEDULE_OVERLAP_POLICY_ALLOW_ALL = 6; 38 | } 39 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/task_queue.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "TaskQueueProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | enum TaskQueueKind { 13 | TASK_QUEUE_KIND_UNSPECIFIED = 0; 14 | // Tasks from a normal workflow task queue always include complete workflow history 15 | // 16 | // The task queue specified by the user is always a normal task queue. There can be as many 17 | // workers as desired for a single normal task queue. All those workers may pick up tasks from 18 | // that queue. 19 | TASK_QUEUE_KIND_NORMAL = 1; 20 | // A sticky queue only includes new history since the last workflow task, and they are 21 | // per-worker. 22 | // 23 | // Sticky queues are created dynamically by each worker during their start up. They only exist 24 | // for the lifetime of the worker process. Tasks in a sticky task queue are only available to 25 | // the worker that created the sticky queue. 26 | // 27 | // Sticky queues are only for workflow tasks. There are no sticky task queues for activities. 28 | TASK_QUEUE_KIND_STICKY = 2; 29 | } 30 | 31 | enum TaskQueueType { 32 | TASK_QUEUE_TYPE_UNSPECIFIED = 0; 33 | // Workflow type of task queue. 34 | TASK_QUEUE_TYPE_WORKFLOW = 1; 35 | // Activity type of task queue. 36 | TASK_QUEUE_TYPE_ACTIVITY = 2; 37 | // Task queue type for dispatching Nexus requests. 38 | TASK_QUEUE_TYPE_NEXUS = 3; 39 | } 40 | 41 | // Specifies which category of tasks may reach a worker on a versioned task queue. 42 | // Used both in a reachability query and its response. 43 | // Deprecated. 44 | enum TaskReachability { 45 | TASK_REACHABILITY_UNSPECIFIED = 0; 46 | // There's a possiblity for a worker to receive new workflow tasks. Workers should *not* be retired. 47 | TASK_REACHABILITY_NEW_WORKFLOWS = 1; 48 | // There's a possiblity for a worker to receive existing workflow and activity tasks from existing workflows. Workers 49 | // should *not* be retired. 50 | // This enum value does not distinguish between open and closed workflows. 51 | TASK_REACHABILITY_EXISTING_WORKFLOWS = 2; 52 | // There's a possiblity for a worker to receive existing workflow and activity tasks from open workflows. Workers 53 | // should *not* be retired. 54 | TASK_REACHABILITY_OPEN_WORKFLOWS = 3; 55 | // There's a possiblity for a worker to receive existing workflow tasks from closed workflows. Workers may be 56 | // retired dependending on application requirements. For example, if there's no need to query closed workflows. 57 | TASK_REACHABILITY_CLOSED_WORKFLOWS = 4; 58 | } 59 | 60 | // Specifies which category of tasks may reach a versioned worker of a certain Build ID. 61 | // 62 | // Task Reachability is eventually consistent; there may be a delay (up to few minutes) until it 63 | // converges to the most accurate value but it is designed in a way to take the more conservative 64 | // side until it converges. For example REACHABLE is more conservative than CLOSED_WORKFLOWS_ONLY. 65 | // 66 | // Note: future activities who inherit their workflow's Build ID but not its Task Queue will not be 67 | // accounted for reachability as server cannot know if they'll happen as they do not use 68 | // assignment rules of their Task Queue. Same goes for Child Workflows or Continue-As-New Workflows 69 | // who inherit the parent/previous workflow's Build ID but not its Task Queue. In those cases, make 70 | // sure to query reachability for the parent/previous workflow's Task Queue as well. 71 | enum BuildIdTaskReachability { 72 | // Task reachability is not reported 73 | BUILD_ID_TASK_REACHABILITY_UNSPECIFIED = 0; 74 | // Build ID may be used by new workflows or activities (base on versioning rules), or there MAY 75 | // be open workflows or backlogged activities assigned to it. 76 | BUILD_ID_TASK_REACHABILITY_REACHABLE = 1; 77 | // Build ID does not have open workflows and is not reachable by new workflows, 78 | // but MAY have closed workflows within the namespace retention period. 79 | // Not applicable to activity-only task queues. 80 | BUILD_ID_TASK_REACHABILITY_CLOSED_WORKFLOWS_ONLY = 2; 81 | // Build ID is not used for new executions, nor it has been used by any existing execution 82 | // within the retention period. 83 | BUILD_ID_TASK_REACHABILITY_UNREACHABLE = 3; 84 | } 85 | 86 | enum DescribeTaskQueueMode { 87 | // Unspecified means legacy behavior. 88 | DESCRIBE_TASK_QUEUE_MODE_UNSPECIFIED = 0; 89 | // Enhanced mode reports aggregated results for all partitions, supports Build IDs, and reports richer info. 90 | DESCRIBE_TASK_QUEUE_MODE_ENHANCED = 1; 91 | } 92 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/update.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "UpdateProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // UpdateWorkflowExecutionLifecycleStage is specified by clients invoking 13 | // Workflow Updates and used to indicate to the server how long the 14 | // client wishes to wait for a return value from the API. If any value other 15 | // than UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED is sent by the 16 | // client then the API will complete before the Update is finished and will 17 | // return a handle to the running Update so that it can later be polled for 18 | // completion. 19 | // If specified stage wasn't reached before server timeout, server returns 20 | // actual stage reached. 21 | enum UpdateWorkflowExecutionLifecycleStage { 22 | // An unspecified value for this enum. 23 | UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_UNSPECIFIED = 0; 24 | // The API call will not return until the Update request has been admitted 25 | // by the server - it may be the case that due to a considerations like load 26 | // or resource limits that an Update is made to wait before the server will 27 | // indicate that it has been received and will be processed. This value 28 | // does not wait for any sort of acknowledgement from a worker. 29 | UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ADMITTED = 1; 30 | // The API call will not return until the Update has passed validation on a worker. 31 | UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED = 2; 32 | // The API call will not return until the Update has executed to completion 33 | // on a worker and has either been rejected or returned a value or an error. 34 | UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED = 3; 35 | } 36 | 37 | // Records why a WorkflowExecutionUpdateAdmittedEvent was written to history. 38 | // Note that not all admitted Updates result in this event. 39 | enum UpdateAdmittedEventOrigin { 40 | UPDATE_ADMITTED_EVENT_ORIGIN_UNSPECIFIED = 0; 41 | // The UpdateAdmitted event was created when reapplying events during reset 42 | // or replication. I.e. an accepted Update on one branch of Workflow history 43 | // was converted into an admitted Update on a different branch. 44 | UPDATE_ADMITTED_EVENT_ORIGIN_REAPPLY = 1; 45 | } 46 | -------------------------------------------------------------------------------- /temporal/api/enums/v1/workflow.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "WorkflowProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // Defines whether to allow re-using a workflow id from a previously *closed* workflow. 13 | // If the request is denied, the server returns a `WorkflowExecutionAlreadyStartedFailure` error. 14 | // 15 | // See `WorkflowIdConflictPolicy` for handling workflow id duplication with a *running* workflow. 16 | enum WorkflowIdReusePolicy { 17 | WORKFLOW_ID_REUSE_POLICY_UNSPECIFIED = 0; 18 | // Allow starting a workflow execution using the same workflow id. 19 | WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE = 1; 20 | // Allow starting a workflow execution using the same workflow id, only when the last 21 | // execution's final state is one of [terminated, cancelled, timed out, failed]. 22 | WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY = 2; 23 | // Do not permit re-use of the workflow id for this workflow. Future start workflow requests 24 | // could potentially change the policy, allowing re-use of the workflow id. 25 | WORKFLOW_ID_REUSE_POLICY_REJECT_DUPLICATE = 3; 26 | // This option belongs in WorkflowIdConflictPolicy but is here for backwards compatibility. 27 | // If specified, it acts like ALLOW_DUPLICATE, but also the WorkflowId*Conflict*Policy on 28 | // the request is treated as WORKFLOW_ID_CONFLICT_POLICY_TERMINATE_EXISTING. 29 | // If no running workflow, then the behavior is the same as ALLOW_DUPLICATE. 30 | WORKFLOW_ID_REUSE_POLICY_TERMINATE_IF_RUNNING = 4; 31 | } 32 | 33 | // Defines what to do when trying to start a workflow with the same workflow id as a *running* workflow. 34 | // Note that it is *never* valid to have two actively running instances of the same workflow id. 35 | // 36 | // See `WorkflowIdReusePolicy` for handling workflow id duplication with a *closed* workflow. 37 | enum WorkflowIdConflictPolicy { 38 | WORKFLOW_ID_CONFLICT_POLICY_UNSPECIFIED = 0; 39 | // Don't start a new workflow; instead return `WorkflowExecutionAlreadyStartedFailure`. 40 | WORKFLOW_ID_CONFLICT_POLICY_FAIL = 1; 41 | // Don't start a new workflow; instead return a workflow handle for the running workflow. 42 | WORKFLOW_ID_CONFLICT_POLICY_USE_EXISTING = 2; 43 | // Terminate the running workflow before starting a new one. 44 | WORKFLOW_ID_CONFLICT_POLICY_TERMINATE_EXISTING = 3; 45 | } 46 | 47 | // Defines how child workflows will react to their parent completing 48 | enum ParentClosePolicy { 49 | PARENT_CLOSE_POLICY_UNSPECIFIED = 0; 50 | // The child workflow will also terminate 51 | PARENT_CLOSE_POLICY_TERMINATE = 1; 52 | // The child workflow will do nothing 53 | PARENT_CLOSE_POLICY_ABANDON = 2; 54 | // Cancellation will be requested of the child workflow 55 | PARENT_CLOSE_POLICY_REQUEST_CANCEL = 3; 56 | } 57 | 58 | enum ContinueAsNewInitiator { 59 | CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED = 0; 60 | // The workflow itself requested to continue as new 61 | CONTINUE_AS_NEW_INITIATOR_WORKFLOW = 1; 62 | // The workflow continued as new because it is retrying 63 | CONTINUE_AS_NEW_INITIATOR_RETRY = 2; 64 | // The workflow continued as new because cron has triggered a new execution 65 | CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE = 3; 66 | } 67 | 68 | // (-- api-linter: core::0216::synonyms=disabled 69 | // aip.dev/not-precedent: There is WorkflowExecutionState already in another package. --) 70 | enum WorkflowExecutionStatus { 71 | WORKFLOW_EXECUTION_STATUS_UNSPECIFIED = 0; 72 | // Value 1 is hardcoded in SQL persistence. 73 | WORKFLOW_EXECUTION_STATUS_RUNNING = 1; 74 | WORKFLOW_EXECUTION_STATUS_COMPLETED = 2; 75 | WORKFLOW_EXECUTION_STATUS_FAILED = 3; 76 | WORKFLOW_EXECUTION_STATUS_CANCELED = 4; 77 | WORKFLOW_EXECUTION_STATUS_TERMINATED = 5; 78 | WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW = 6; 79 | WORKFLOW_EXECUTION_STATUS_TIMED_OUT = 7; 80 | } 81 | 82 | enum PendingActivityState { 83 | PENDING_ACTIVITY_STATE_UNSPECIFIED = 0; 84 | PENDING_ACTIVITY_STATE_SCHEDULED = 1; 85 | PENDING_ACTIVITY_STATE_STARTED = 2; 86 | PENDING_ACTIVITY_STATE_CANCEL_REQUESTED = 3; 87 | // PAUSED means activity is paused on the server, and is not running in the worker 88 | PENDING_ACTIVITY_STATE_PAUSED = 4; 89 | // PAUSE_REQUESTED means activity is currently running on the worker, but paused on the server 90 | PENDING_ACTIVITY_STATE_PAUSE_REQUESTED = 5; 91 | } 92 | 93 | enum PendingWorkflowTaskState { 94 | PENDING_WORKFLOW_TASK_STATE_UNSPECIFIED = 0; 95 | PENDING_WORKFLOW_TASK_STATE_SCHEDULED = 1; 96 | PENDING_WORKFLOW_TASK_STATE_STARTED = 2; 97 | } 98 | 99 | enum HistoryEventFilterType { 100 | HISTORY_EVENT_FILTER_TYPE_UNSPECIFIED = 0; 101 | HISTORY_EVENT_FILTER_TYPE_ALL_EVENT = 1; 102 | HISTORY_EVENT_FILTER_TYPE_CLOSE_EVENT = 2; 103 | } 104 | 105 | enum RetryState { 106 | RETRY_STATE_UNSPECIFIED = 0; 107 | RETRY_STATE_IN_PROGRESS = 1; 108 | RETRY_STATE_NON_RETRYABLE_FAILURE = 2; 109 | RETRY_STATE_TIMEOUT = 3; 110 | RETRY_STATE_MAXIMUM_ATTEMPTS_REACHED = 4; 111 | RETRY_STATE_RETRY_POLICY_NOT_SET = 5; 112 | RETRY_STATE_INTERNAL_SERVER_ERROR = 6; 113 | RETRY_STATE_CANCEL_REQUESTED = 7; 114 | } 115 | 116 | enum TimeoutType { 117 | TIMEOUT_TYPE_UNSPECIFIED = 0; 118 | TIMEOUT_TYPE_START_TO_CLOSE = 1; 119 | TIMEOUT_TYPE_SCHEDULE_TO_START = 2; 120 | TIMEOUT_TYPE_SCHEDULE_TO_CLOSE = 3; 121 | TIMEOUT_TYPE_HEARTBEAT = 4; 122 | } 123 | 124 | // Versioning Behavior specifies if and how a workflow execution moves between Worker Deployment 125 | // Versions. The Versioning Behavior of a workflow execution is typically specified by the worker 126 | // who completes the first task of the execution, but is also overridable manually for new and 127 | // existing workflows (see VersioningOverride). 128 | // Experimental. Worker Deployments are experimental and might significantly change in the future. 129 | enum VersioningBehavior { 130 | // Workflow execution does not have a Versioning Behavior and is called Unversioned. This is the 131 | // legacy behavior. An Unversioned workflow's task can go to any Unversioned worker (see 132 | // `WorkerVersioningMode`.) 133 | // User needs to use Patching to keep the new code compatible with prior versions when dealing 134 | // with Unversioned workflows. 135 | VERSIONING_BEHAVIOR_UNSPECIFIED = 0; 136 | // Workflow will start on the Current Deployment Version of its Task Queue, and then 137 | // will be pinned to that same Deployment Version until completion (the Version that 138 | // this Workflow is pinned to is specified in `versioning_info.version`). 139 | // This behavior eliminates most of compatibility concerns users face when changing their code. 140 | // Patching is not needed when pinned workflows code change. 141 | // Can be overridden explicitly via `UpdateWorkflowExecutionOptions` API to move the 142 | // execution to another Deployment Version. 143 | // Activities of `PINNED` workflows are sent to the same Deployment Version. Exception to this 144 | // would be when the activity Task Queue workers are not present in the workflow's Deployment 145 | // Version, in which case the activity will be sent to the Current Deployment Version of its own 146 | // task queue. 147 | VERSIONING_BEHAVIOR_PINNED = 1; 148 | // Workflow will automatically move to the Current Deployment Version of its Task Queue when the 149 | // next workflow task is dispatched. 150 | // AutoUpgrade behavior is suitable for long-running workflows as it allows them to move to the 151 | // latest Deployment Version, but the user still needs to use Patching to keep the new code 152 | // compatible with prior versions for changed workflow types. 153 | // Activities of `AUTO_UPGRADE` workflows are sent to the Deployment Version of the workflow 154 | // execution (as specified in versioning_info.version based on the last completed 155 | // workflow task). Exception to this would be when the activity Task Queue workers are not 156 | // present in the workflow's Deployment Version, in which case, the activity will be sent to a 157 | // different Deployment Version according to the Current Deployment Version of its own task 158 | // queue. 159 | // Workflows stuck on a backlogged activity will still auto-upgrade if the Current Deployment 160 | // Version of their Task Queue changes, without having to wait for the backlogged activity to 161 | // complete on the old Version. 162 | VERSIONING_BEHAVIOR_AUTO_UPGRADE = 2; 163 | } 164 | -------------------------------------------------------------------------------- /temporal/api/errordetails/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | // These error details are supplied in google.rpc.Status#details as described in "Google APIs, Error Model" (https://cloud.google.com/apis/design/errors#error_model) 4 | // and extend standard Error Details defined in https://github.com/googleapis/googleapis/blob/master/google/rpc/error_details.proto 5 | 6 | package temporal.api.errordetails.v1; 7 | 8 | option go_package = "go.temporal.io/api/errordetails/v1;errordetails"; 9 | option java_package = "io.temporal.api.errordetails.v1"; 10 | option java_multiple_files = true; 11 | option java_outer_classname = "MessageProto"; 12 | option ruby_package = "Temporalio::Api::ErrorDetails::V1"; 13 | option csharp_namespace = "Temporalio.Api.ErrorDetails.V1"; 14 | 15 | import "google/protobuf/any.proto"; 16 | import "temporal/api/common/v1/message.proto"; 17 | 18 | import "temporal/api/enums/v1/failed_cause.proto"; 19 | import "temporal/api/enums/v1/namespace.proto"; 20 | import "temporal/api/failure/v1/message.proto"; 21 | 22 | message NotFoundFailure { 23 | string current_cluster = 1; 24 | string active_cluster = 2; 25 | } 26 | 27 | message WorkflowExecutionAlreadyStartedFailure { 28 | string start_request_id = 1; 29 | string run_id = 2; 30 | } 31 | 32 | message NamespaceNotActiveFailure { 33 | string namespace = 1; 34 | string current_cluster = 2; 35 | string active_cluster = 3; 36 | } 37 | 38 | // NamespaceUnavailableFailure is returned by the service when a request addresses a namespace that is unavailable. For 39 | // example, when a namespace is in the process of failing over between clusters. 40 | // This is a transient error that should be automatically retried by clients. 41 | message NamespaceUnavailableFailure { 42 | string namespace = 1; 43 | } 44 | 45 | message NamespaceInvalidStateFailure { 46 | string namespace = 1; 47 | // Current state of the requested namespace. 48 | temporal.api.enums.v1.NamespaceState state = 2; 49 | // Allowed namespace states for requested operation. 50 | // For example NAMESPACE_STATE_DELETED is forbidden for most operations but allowed for DescribeNamespace. 51 | repeated temporal.api.enums.v1.NamespaceState allowed_states = 3; 52 | } 53 | 54 | message NamespaceNotFoundFailure { 55 | string namespace = 1; 56 | } 57 | 58 | message NamespaceAlreadyExistsFailure { 59 | } 60 | 61 | message ClientVersionNotSupportedFailure { 62 | string client_version = 1; 63 | string client_name = 2; 64 | string supported_versions = 3; 65 | } 66 | 67 | message ServerVersionNotSupportedFailure { 68 | string server_version = 1; 69 | string client_supported_server_versions = 2; 70 | } 71 | 72 | message CancellationAlreadyRequestedFailure { 73 | } 74 | 75 | message QueryFailedFailure { 76 | // The full reason for this query failure. May not be available if the response is generated by an old 77 | // SDK. This field can be encoded by the SDK's failure converter to support E2E encryption of messages and stack 78 | // traces. 79 | temporal.api.failure.v1.Failure failure = 1; 80 | } 81 | 82 | message PermissionDeniedFailure { 83 | string reason = 1; 84 | } 85 | 86 | message ResourceExhaustedFailure { 87 | temporal.api.enums.v1.ResourceExhaustedCause cause = 1; 88 | temporal.api.enums.v1.ResourceExhaustedScope scope = 2; 89 | } 90 | 91 | message SystemWorkflowFailure { 92 | // WorkflowId and RunId of the Temporal system workflow performing the underlying operation. 93 | // Looking up the info of the system workflow run may help identify the issue causing the failure. 94 | temporal.api.common.v1.WorkflowExecution workflow_execution = 1; 95 | // Serialized error returned by the system workflow performing the underlying operation. 96 | string workflow_error = 2; 97 | } 98 | 99 | message WorkflowNotReadyFailure { 100 | } 101 | 102 | message NewerBuildExistsFailure { 103 | // The current default compatible build ID which will receive tasks 104 | string default_build_id = 1; 105 | } 106 | 107 | message MultiOperationExecutionFailure { 108 | // One status for each requested operation from the failed MultiOperation. The failed 109 | // operation(s) have the same error details as if it was executed separately. All other operations have the 110 | // status code `Aborted` and `MultiOperationExecutionAborted` is added to the details field. 111 | repeated OperationStatus statuses = 1; 112 | 113 | // NOTE: `OperationStatus` is modelled after 114 | // [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto). 115 | // 116 | // (-- api-linter: core::0146::any=disabled 117 | // aip.dev/not-precedent: details are meant to hold arbitrary payloads. --) 118 | message OperationStatus { 119 | int32 code = 1; 120 | string message = 2; 121 | repeated google.protobuf.Any details = 3; 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /temporal/api/export/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.export.v1; 4 | 5 | option go_package = "go.temporal.io/api/export/v1;export"; 6 | option java_package = "io.temporal.api.export.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Export::V1"; 10 | option csharp_namespace = "Temporalio.Api.Export.V1"; 11 | 12 | import "temporal/api/history/v1/message.proto"; 13 | 14 | message WorkflowExecution { 15 | temporal.api.history.v1.History history = 1; 16 | } 17 | 18 | // WorkflowExecutions is used by the Cloud Export feature to deserialize 19 | // the exported file. It encapsulates a collection of workflow execution information. 20 | message WorkflowExecutions { 21 | repeated WorkflowExecution items = 1; 22 | } 23 | 24 | -------------------------------------------------------------------------------- /temporal/api/failure/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.failure.v1; 4 | 5 | option go_package = "go.temporal.io/api/failure/v1;failure"; 6 | option java_package = "io.temporal.api.failure.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Failure::V1"; 10 | option csharp_namespace = "Temporalio.Api.Failure.V1"; 11 | 12 | import "temporal/api/common/v1/message.proto"; 13 | import "temporal/api/enums/v1/workflow.proto"; 14 | import "temporal/api/enums/v1/nexus.proto"; 15 | import "temporal/api/enums/v1/common.proto"; 16 | 17 | import "google/protobuf/duration.proto"; 18 | 19 | message ApplicationFailureInfo { 20 | string type = 1; 21 | bool non_retryable = 2; 22 | temporal.api.common.v1.Payloads details = 3; 23 | // next_retry_delay can be used by the client to override the activity 24 | // retry interval calculated by the retry policy. Retry attempts will 25 | // still be subject to the maximum retries limit and total time limit 26 | // defined by the policy. 27 | google.protobuf.Duration next_retry_delay = 4; 28 | temporal.api.enums.v1.ApplicationErrorCategory category = 5; 29 | } 30 | 31 | message TimeoutFailureInfo { 32 | temporal.api.enums.v1.TimeoutType timeout_type = 1; 33 | temporal.api.common.v1.Payloads last_heartbeat_details = 2; 34 | } 35 | 36 | message CanceledFailureInfo { 37 | temporal.api.common.v1.Payloads details = 1; 38 | } 39 | 40 | message TerminatedFailureInfo { 41 | } 42 | 43 | message ServerFailureInfo { 44 | bool non_retryable = 1; 45 | } 46 | 47 | message ResetWorkflowFailureInfo { 48 | temporal.api.common.v1.Payloads last_heartbeat_details = 1; 49 | } 50 | 51 | message ActivityFailureInfo { 52 | int64 scheduled_event_id = 1; 53 | int64 started_event_id = 2; 54 | string identity = 3; 55 | temporal.api.common.v1.ActivityType activity_type = 4; 56 | string activity_id = 5; 57 | temporal.api.enums.v1.RetryState retry_state = 6; 58 | } 59 | 60 | message ChildWorkflowExecutionFailureInfo { 61 | string namespace = 1; 62 | temporal.api.common.v1.WorkflowExecution workflow_execution = 2; 63 | temporal.api.common.v1.WorkflowType workflow_type = 3; 64 | int64 initiated_event_id = 4; 65 | int64 started_event_id = 5; 66 | temporal.api.enums.v1.RetryState retry_state = 6; 67 | } 68 | 69 | message NexusOperationFailureInfo { 70 | // The NexusOperationScheduled event ID. 71 | int64 scheduled_event_id = 1; 72 | // Endpoint name. 73 | string endpoint = 2; 74 | // Service name. 75 | string service = 3; 76 | // Operation name. 77 | string operation = 4; 78 | // Operation ID - may be empty if the operation completed synchronously. 79 | // 80 | // Deprecated. Renamed to operation_token. 81 | string operation_id = 5 [deprecated = true]; 82 | // Operation token - may be empty if the operation completed synchronously. 83 | string operation_token = 6; 84 | } 85 | 86 | message NexusHandlerFailureInfo { 87 | // The Nexus error type as defined in the spec: 88 | // https://github.com/nexus-rpc/api/blob/main/SPEC.md#predefined-handler-errors. 89 | string type = 1; 90 | // Retry behavior, defaults to the retry behavior of the error type as defined in the spec. 91 | temporal.api.enums.v1.NexusHandlerErrorRetryBehavior retry_behavior = 2; 92 | } 93 | 94 | message Failure { 95 | string message = 1; 96 | // The source this Failure originated in, e.g. TypeScriptSDK / JavaSDK 97 | // In some SDKs this is used to rehydrate the stack trace into an exception object. 98 | string source = 2; 99 | string stack_trace = 3; 100 | // Alternative way to supply `message` and `stack_trace` and possibly other attributes, used for encryption of 101 | // errors originating in user code which might contain sensitive information. 102 | // The `encoded_attributes` Payload could represent any serializable object, e.g. JSON object or a `Failure` proto 103 | // message. 104 | // 105 | // SDK authors: 106 | // - The SDK should provide a default `encodeFailureAttributes` and `decodeFailureAttributes` implementation that: 107 | // - Uses a JSON object to represent `{ message, stack_trace }`. 108 | // - Overwrites the original message with "Encoded failure" to indicate that more information could be extracted. 109 | // - Overwrites the original stack_trace with an empty string. 110 | // - The resulting JSON object is converted to Payload using the default PayloadConverter and should be processed 111 | // by the user-provided PayloadCodec 112 | // 113 | // - If there's demand, we could allow overriding the default SDK implementation to encode other opaque Failure attributes. 114 | // (-- api-linter: core::0203::optional=disabled --) 115 | temporal.api.common.v1.Payload encoded_attributes = 20; 116 | Failure cause = 4; 117 | oneof failure_info { 118 | ApplicationFailureInfo application_failure_info = 5; 119 | TimeoutFailureInfo timeout_failure_info = 6; 120 | CanceledFailureInfo canceled_failure_info = 7; 121 | TerminatedFailureInfo terminated_failure_info = 8; 122 | ServerFailureInfo server_failure_info = 9; 123 | ResetWorkflowFailureInfo reset_workflow_failure_info = 10; 124 | ActivityFailureInfo activity_failure_info = 11; 125 | ChildWorkflowExecutionFailureInfo child_workflow_execution_failure_info = 12; 126 | NexusOperationFailureInfo nexus_operation_execution_failure_info = 13; 127 | NexusHandlerFailureInfo nexus_handler_failure_info = 14; 128 | } 129 | } 130 | 131 | message MultiOperationExecutionAborted {} 132 | -------------------------------------------------------------------------------- /temporal/api/filter/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.filter.v1; 4 | 5 | option go_package = "go.temporal.io/api/filter/v1;filter"; 6 | option java_package = "io.temporal.api.filter.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Filter::V1"; 10 | option csharp_namespace = "Temporalio.Api.Filter.V1"; 11 | 12 | import "google/protobuf/timestamp.proto"; 13 | 14 | import "temporal/api/enums/v1/workflow.proto"; 15 | 16 | message WorkflowExecutionFilter { 17 | string workflow_id = 1; 18 | string run_id = 2; 19 | } 20 | 21 | message WorkflowTypeFilter { 22 | string name = 1; 23 | } 24 | 25 | message StartTimeFilter { 26 | google.protobuf.Timestamp earliest_time = 1; 27 | google.protobuf.Timestamp latest_time = 2; 28 | } 29 | 30 | message StatusFilter { 31 | temporal.api.enums.v1.WorkflowExecutionStatus status = 1; 32 | } 33 | -------------------------------------------------------------------------------- /temporal/api/namespace/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.namespace.v1; 4 | 5 | option go_package = "go.temporal.io/api/namespace/v1;namespace"; 6 | option java_package = "io.temporal.api.namespace.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Namespace::V1"; 10 | option csharp_namespace = "Temporalio.Api.Namespace.V1"; 11 | 12 | import "google/protobuf/duration.proto"; 13 | import "google/protobuf/timestamp.proto"; 14 | 15 | import "temporal/api/enums/v1/namespace.proto"; 16 | 17 | 18 | message NamespaceInfo { 19 | string name = 1; 20 | temporal.api.enums.v1.NamespaceState state = 2; 21 | string description = 3; 22 | string owner_email = 4; 23 | // A key-value map for any customized purpose. 24 | map data = 5; 25 | string id = 6; 26 | // All capabilities the namespace supports. 27 | Capabilities capabilities = 7; 28 | 29 | // Namespace capability details. Should contain what features are enabled in a namespace. 30 | message Capabilities { 31 | // True if the namespace supports eager workflow start. 32 | bool eager_workflow_start = 1; 33 | // True if the namespace supports sync update 34 | bool sync_update = 2; 35 | // True if the namespace supports async update 36 | bool async_update = 3; 37 | } 38 | 39 | // Whether scheduled workflows are supported on this namespace. This is only needed 40 | // temporarily while the feature is experimental, so we can give it a high tag. 41 | bool supports_schedules = 100; 42 | } 43 | 44 | message NamespaceConfig { 45 | google.protobuf.Duration workflow_execution_retention_ttl = 1; 46 | BadBinaries bad_binaries = 2; 47 | // If unspecified (ARCHIVAL_STATE_UNSPECIFIED) then default server configuration is used. 48 | temporal.api.enums.v1.ArchivalState history_archival_state = 3; 49 | string history_archival_uri = 4; 50 | // If unspecified (ARCHIVAL_STATE_UNSPECIFIED) then default server configuration is used. 51 | temporal.api.enums.v1.ArchivalState visibility_archival_state = 5; 52 | string visibility_archival_uri = 6; 53 | // Map from field name to alias. 54 | map custom_search_attribute_aliases = 7; 55 | } 56 | 57 | message BadBinaries { 58 | map binaries = 1; 59 | } 60 | 61 | message BadBinaryInfo { 62 | string reason = 1; 63 | string operator = 2; 64 | google.protobuf.Timestamp create_time = 3; 65 | } 66 | 67 | message UpdateNamespaceInfo { 68 | string description = 1; 69 | string owner_email = 2; 70 | // A key-value map for any customized purpose. 71 | // If data already exists on the namespace, 72 | // this will merge with the existing key values. 73 | map data = 3; 74 | // New namespace state, server will reject if transition is not allowed. 75 | // Allowed transitions are: 76 | // Registered -> [ Deleted | Deprecated | Handover ] 77 | // Handover -> [ Registered ] 78 | // Default is NAMESPACE_STATE_UNSPECIFIED which is do not change state. 79 | temporal.api.enums.v1.NamespaceState state = 4; 80 | } 81 | 82 | message NamespaceFilter { 83 | // By default namespaces in NAMESPACE_STATE_DELETED state are not included. 84 | // Setting include_deleted to true will include deleted namespaces. 85 | // Note: Namespace is in NAMESPACE_STATE_DELETED state when it was deleted from the system but associated data is not deleted yet. 86 | bool include_deleted = 1; 87 | } 88 | -------------------------------------------------------------------------------- /temporal/api/nexus/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.nexus.v1; 4 | 5 | option go_package = "go.temporal.io/api/nexus/v1;nexus"; 6 | option java_package = "io.temporal.api.nexus.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Nexus::V1"; 10 | option csharp_namespace = "Temporalio.Api.Nexus.V1"; 11 | 12 | import "google/protobuf/timestamp.proto"; 13 | import "temporal/api/common/v1/message.proto"; 14 | import "temporal/api/enums/v1/nexus.proto"; 15 | 16 | // A general purpose failure message. 17 | // See: https://github.com/nexus-rpc/api/blob/main/SPEC.md#failure 18 | message Failure { 19 | string message = 1; 20 | map metadata = 2; 21 | // UTF-8 encoded JSON serializable details. 22 | bytes details = 3; 23 | } 24 | 25 | message HandlerError { 26 | // See https://github.com/nexus-rpc/api/blob/main/SPEC.md#predefined-handler-errors. 27 | string error_type = 1; 28 | Failure failure = 2; 29 | // Retry behavior, defaults to the retry behavior of the error type as defined in the spec. 30 | temporal.api.enums.v1.NexusHandlerErrorRetryBehavior retry_behavior = 3; 31 | } 32 | 33 | message UnsuccessfulOperationError { 34 | // See https://github.com/nexus-rpc/api/blob/main/SPEC.md#operationinfo. 35 | string operation_state = 1; 36 | Failure failure = 2; 37 | } 38 | 39 | message Link { 40 | // See https://github.com/nexus-rpc/api/blob/main/SPEC.md#links. 41 | string url = 1; 42 | string type = 2; 43 | } 44 | 45 | // A request to start an operation. 46 | message StartOperationRequest { 47 | // Name of service to start the operation in. 48 | string service = 1; 49 | // Type of operation to start. 50 | string operation = 2; 51 | // A request ID that can be used as an idempotentency key. 52 | string request_id = 3; 53 | // Callback URL to call upon completion if the started operation is async. 54 | string callback = 4; 55 | // Full request body from the incoming HTTP request. 56 | temporal.api.common.v1.Payload payload = 5; 57 | // Header that is expected to be attached to the callback request when the operation completes. 58 | map callback_header = 6; 59 | // Links contain caller information and can be attached to the operations started by the handler. 60 | repeated Link links = 7; 61 | } 62 | 63 | // A request to cancel an operation. 64 | message CancelOperationRequest { 65 | // Service name. 66 | string service = 1; 67 | // Type of operation to cancel. 68 | string operation = 2; 69 | // Operation ID as originally generated by a Handler. 70 | // 71 | // Deprecated. Renamed to operation_token. 72 | string operation_id = 3 [deprecated = true]; 73 | 74 | // Operation token as originally generated by a Handler. 75 | string operation_token = 4; 76 | } 77 | 78 | // A Nexus request. 79 | message Request { 80 | // Headers extracted from the original request in the Temporal frontend. 81 | // When using Nexus over HTTP, this includes the request's HTTP headers ignoring multiple values. 82 | map header = 1; 83 | 84 | // The timestamp when the request was scheduled in the frontend. 85 | // (-- api-linter: core::0142::time-field-names=disabled 86 | // aip.dev/not-precedent: Not following linter rules. --) 87 | google.protobuf.Timestamp scheduled_time = 2; 88 | 89 | oneof variant { 90 | StartOperationRequest start_operation = 3; 91 | CancelOperationRequest cancel_operation = 4; 92 | } 93 | } 94 | 95 | // Response variant for StartOperationRequest. 96 | message StartOperationResponse { 97 | // An operation completed successfully. 98 | message Sync { 99 | temporal.api.common.v1.Payload payload = 1; 100 | repeated Link links = 2; 101 | } 102 | 103 | // The operation will complete asynchronously. 104 | // The returned ID can be used to reference this operation. 105 | message Async { 106 | // Deprecated. Renamed to operation_token. 107 | string operation_id = 1 [deprecated = true]; 108 | repeated Link links = 2; 109 | string operation_token = 3; 110 | } 111 | 112 | oneof variant { 113 | Sync sync_success = 1; 114 | Async async_success = 2; 115 | // The operation completed unsuccessfully (failed or canceled). 116 | UnsuccessfulOperationError operation_error = 3; 117 | } 118 | } 119 | 120 | // Response variant for CancelOperationRequest. 121 | message CancelOperationResponse { 122 | } 123 | 124 | // A response indicating that the handler has successfully processed a request. 125 | message Response { 126 | // Variant must correlate to the corresponding Request's variant. 127 | oneof variant { 128 | StartOperationResponse start_operation = 1; 129 | CancelOperationResponse cancel_operation = 2; 130 | } 131 | } 132 | 133 | // A cluster-global binding from an endpoint ID to a target for dispatching incoming Nexus requests. 134 | message Endpoint { 135 | // Data version for this endpoint, incremented for every update issued via the UpdateNexusEndpoint API. 136 | int64 version = 1; 137 | // Unique server-generated endpoint ID. 138 | string id = 2; 139 | // Spec for the endpoint. 140 | EndpointSpec spec = 3; 141 | 142 | // The date and time when the endpoint was created. 143 | // (-- api-linter: core::0142::time-field-names=disabled 144 | // aip.dev/not-precedent: Not following linter rules. --) 145 | google.protobuf.Timestamp created_time = 4; 146 | 147 | // The date and time when the endpoint was last modified. 148 | // Will not be set if the endpoint has never been modified. 149 | // (-- api-linter: core::0142::time-field-names=disabled 150 | // aip.dev/not-precedent: Not following linter rules. --) 151 | google.protobuf.Timestamp last_modified_time = 5; 152 | 153 | // Server exposed URL prefix for invocation of operations on this endpoint. 154 | // This doesn't include the protocol, hostname or port as the server does not know how it should be accessed 155 | // publicly. The URL is stable in the face of endpoint renames. 156 | string url_prefix = 6; 157 | } 158 | 159 | // Contains mutable fields for an Endpoint. 160 | message EndpointSpec { 161 | // Endpoint name, unique for this cluster. Must match `[a-zA-Z_][a-zA-Z0-9_]*`. 162 | // Renaming an endpoint breaks all workflow callers that reference this endpoint, causing operations to fail. 163 | string name = 1; 164 | 165 | // Markdown description serialized as a single JSON string. 166 | // If the Payload is encrypted, the UI and CLI may decrypt with the configured codec server endpoint. 167 | // By default, the server enforces a limit of 20,000 bytes for this entire payload. 168 | temporal.api.common.v1.Payload description = 2; 169 | 170 | // Target to route requests to. 171 | EndpointTarget target = 3; 172 | } 173 | 174 | // Target to route requests to. 175 | message EndpointTarget { 176 | // Target a worker polling on a Nexus task queue in a specific namespace. 177 | message Worker { 178 | // Namespace to route requests to. 179 | string namespace = 1; 180 | // Nexus task queue to route requests to. 181 | string task_queue = 2; 182 | } 183 | 184 | // Target an external server by URL. 185 | // At a later point, this will support providing credentials, in the meantime, an http.RoundTripper can be injected 186 | // into the server to modify the request. 187 | message External { 188 | // URL to call. 189 | string url = 1; 190 | } 191 | 192 | oneof variant { 193 | Worker worker = 1; 194 | External external = 2; 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /temporal/api/operatorservice/v1/request_response.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.operatorservice.v1; 4 | 5 | option go_package = "go.temporal.io/api/operatorservice/v1;operatorservice"; 6 | option java_package = "io.temporal.api.operatorservice.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "RequestResponseProto"; 9 | option ruby_package = "Temporalio::Api::OperatorService::V1"; 10 | option csharp_namespace = "Temporalio.Api.OperatorService.V1"; 11 | 12 | import "temporal/api/enums/v1/common.proto"; 13 | import "temporal/api/nexus/v1/message.proto"; 14 | import "google/protobuf/duration.proto"; 15 | 16 | // (-- Search Attribute --) 17 | 18 | message AddSearchAttributesRequest { 19 | // Mapping between search attribute name and its IndexedValueType. 20 | map search_attributes = 1; 21 | string namespace = 2; 22 | } 23 | 24 | message AddSearchAttributesResponse { 25 | } 26 | 27 | message RemoveSearchAttributesRequest { 28 | // Search attribute names to delete. 29 | repeated string search_attributes = 1; 30 | string namespace = 2; 31 | } 32 | 33 | message RemoveSearchAttributesResponse { 34 | } 35 | 36 | message ListSearchAttributesRequest { 37 | string namespace = 1; 38 | } 39 | 40 | message ListSearchAttributesResponse { 41 | // Mapping between custom (user-registered) search attribute name to its IndexedValueType. 42 | map custom_attributes = 1; 43 | // Mapping between system (predefined) search attribute name to its IndexedValueType. 44 | map system_attributes = 2; 45 | // Mapping from the attribute name to the visibility storage native type. 46 | map storage_schema = 3; 47 | } 48 | 49 | message DeleteNamespaceRequest { 50 | // Only one of namespace or namespace_id must be specified to identify namespace. 51 | string namespace = 1; 52 | string namespace_id = 2; 53 | // If provided, the deletion of namespace info will be delayed for the given duration (0 means no delay). 54 | // If not provided, the default delay configured in the cluster will be used. 55 | google.protobuf.Duration namespace_delete_delay = 3; 56 | } 57 | 58 | message DeleteNamespaceResponse { 59 | // Temporary namespace name that is used during reclaim resources step. 60 | string deleted_namespace = 1; 61 | } 62 | 63 | message AddOrUpdateRemoteClusterRequest { 64 | // Frontend Address is a cross cluster accessible address for gRPC traffic. This field is required. 65 | string frontend_address = 1; 66 | // Flag to enable / disable the cross cluster connection. 67 | bool enable_remote_cluster_connection = 2; 68 | // Frontend HTTP Address is a cross cluster accessible address for HTTP traffic. This field is optional. If not provided 69 | // on update, the existing HTTP address will be removed. 70 | string frontend_http_address = 3; 71 | } 72 | 73 | message AddOrUpdateRemoteClusterResponse { 74 | } 75 | 76 | message RemoveRemoteClusterRequest { 77 | // Remote cluster name to be removed. 78 | string cluster_name = 1; 79 | } 80 | 81 | message RemoveRemoteClusterResponse { 82 | } 83 | 84 | message ListClustersRequest { 85 | int32 page_size = 1; 86 | bytes next_page_token = 2; 87 | } 88 | 89 | message ListClustersResponse { 90 | // List of all cluster information 91 | repeated ClusterMetadata clusters = 1; 92 | bytes next_page_token = 4; 93 | } 94 | 95 | message ClusterMetadata { 96 | // Name of the cluster name. 97 | string cluster_name = 1; 98 | // Id of the cluster. 99 | string cluster_id = 2; 100 | // gRPC address. 101 | string address = 3; 102 | // HTTP address, if one exists. 103 | string http_address = 7; 104 | // A unique failover version across all connected clusters. 105 | int64 initial_failover_version = 4; 106 | // History service shard number. 107 | int32 history_shard_count = 5; 108 | // A flag to indicate if a connection is active. 109 | bool is_connection_enabled = 6; 110 | } 111 | 112 | message GetNexusEndpointRequest { 113 | // Server-generated unique endpoint ID. 114 | string id = 1; 115 | } 116 | 117 | message GetNexusEndpointResponse { 118 | temporal.api.nexus.v1.Endpoint endpoint = 1; 119 | } 120 | 121 | message CreateNexusEndpointRequest { 122 | // Endpoint definition to create. 123 | temporal.api.nexus.v1.EndpointSpec spec = 1; 124 | } 125 | 126 | message CreateNexusEndpointResponse { 127 | // Data post acceptance. Can be used to issue additional updates to this record. 128 | temporal.api.nexus.v1.Endpoint endpoint = 1; 129 | } 130 | 131 | message UpdateNexusEndpointRequest { 132 | // Server-generated unique endpoint ID. 133 | string id = 1; 134 | // Data version for this endpoint. Must match current version. 135 | int64 version = 2; 136 | 137 | temporal.api.nexus.v1.EndpointSpec spec = 3; 138 | } 139 | 140 | message UpdateNexusEndpointResponse { 141 | // Data post acceptance. Can be used to issue additional updates to this record. 142 | temporal.api.nexus.v1.Endpoint endpoint = 1; 143 | } 144 | 145 | message DeleteNexusEndpointRequest { 146 | // Server-generated unique endpoint ID. 147 | string id = 1; 148 | // Data version for this endpoint. Must match current version. 149 | int64 version = 2; 150 | } 151 | 152 | message DeleteNexusEndpointResponse { 153 | } 154 | 155 | message ListNexusEndpointsRequest { 156 | int32 page_size = 1; 157 | // To get the next page, pass in `ListNexusEndpointsResponse.next_page_token` from the previous page's 158 | // response, the token will be empty if there's no other page. 159 | // Note: the last page may be empty if the total number of endpoints registered is a multiple of the page size. 160 | bytes next_page_token = 2; 161 | // Name of the incoming endpoint to filter on - optional. Specifying this will result in zero or one results. 162 | // (-- api-linter: core::203::field-behavior-required=disabled 163 | // aip.dev/not-precedent: Not following linter rules. --) 164 | string name = 3; 165 | } 166 | 167 | message ListNexusEndpointsResponse { 168 | // Token for getting the next page. 169 | bytes next_page_token = 1; 170 | repeated temporal.api.nexus.v1.Endpoint endpoints = 2; 171 | } 172 | -------------------------------------------------------------------------------- /temporal/api/operatorservice/v1/service.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.operatorservice.v1; 4 | 5 | option go_package = "go.temporal.io/api/operatorservice/v1;operatorservice"; 6 | option java_package = "io.temporal.api.operatorservice.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "ServiceProto"; 9 | option ruby_package = "Temporalio::Api::OperatorService::V1"; 10 | option csharp_namespace = "Temporalio.Api.OperatorService.V1"; 11 | 12 | 13 | import "temporal/api/operatorservice/v1/request_response.proto"; 14 | import "google/api/annotations.proto"; 15 | 16 | // OperatorService API defines how Temporal SDKs and other clients interact with the Temporal server 17 | // to perform administrative functions like registering a search attribute or a namespace. 18 | // APIs in this file could be not compatible with Temporal Cloud, hence it's usage in SDKs should be limited by 19 | // designated APIs that clearly state that they shouldn't be used by the main Application (Workflows & Activities) framework. 20 | service OperatorService { 21 | // (-- Search Attribute --) 22 | 23 | // AddSearchAttributes add custom search attributes. 24 | // 25 | // Returns ALREADY_EXISTS status code if a Search Attribute with any of the specified names already exists 26 | // Returns INTERNAL status code with temporal.api.errordetails.v1.SystemWorkflowFailure in Error Details if registration process fails, 27 | rpc AddSearchAttributes (AddSearchAttributesRequest) returns (AddSearchAttributesResponse) { 28 | } 29 | 30 | // RemoveSearchAttributes removes custom search attributes. 31 | // 32 | // Returns NOT_FOUND status code if a Search Attribute with any of the specified names is not registered 33 | rpc RemoveSearchAttributes (RemoveSearchAttributesRequest) returns (RemoveSearchAttributesResponse) { 34 | } 35 | 36 | // ListSearchAttributes returns comprehensive information about search attributes. 37 | rpc ListSearchAttributes (ListSearchAttributesRequest) returns (ListSearchAttributesResponse) { 38 | option (google.api.http) = { 39 | get: "/cluster/namespaces/{namespace}/search-attributes" 40 | additional_bindings { 41 | get: "/api/v1/namespaces/{namespace}/search-attributes" 42 | } 43 | }; 44 | } 45 | 46 | // DeleteNamespace synchronously deletes a namespace and asynchronously reclaims all namespace resources. 47 | rpc DeleteNamespace (DeleteNamespaceRequest) returns (DeleteNamespaceResponse) { 48 | } 49 | 50 | // AddOrUpdateRemoteCluster adds or updates remote cluster. 51 | rpc AddOrUpdateRemoteCluster(AddOrUpdateRemoteClusterRequest) returns (AddOrUpdateRemoteClusterResponse) { 52 | } 53 | 54 | // RemoveRemoteCluster removes remote cluster. 55 | rpc RemoveRemoteCluster(RemoveRemoteClusterRequest) returns (RemoveRemoteClusterResponse) { 56 | } 57 | 58 | // ListClusters returns information about Temporal clusters. 59 | rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { 60 | } 61 | 62 | // Get a registered Nexus endpoint by ID. The returned version can be used for optimistic updates. 63 | rpc GetNexusEndpoint(GetNexusEndpointRequest) returns (GetNexusEndpointResponse) { 64 | option (google.api.http) = { 65 | get: "/cluster/nexus/endpoints/{id}" 66 | additional_bindings { 67 | get: "/api/v1/nexus/endpoints/{id}" 68 | } 69 | }; 70 | } 71 | 72 | // Create a Nexus endpoint. This will fail if an endpoint with the same name is already registered with a status of 73 | // ALREADY_EXISTS. 74 | // Returns the created endpoint with its initial version. You may use this version for subsequent updates. 75 | rpc CreateNexusEndpoint(CreateNexusEndpointRequest) returns (CreateNexusEndpointResponse) { 76 | option (google.api.http) = { 77 | post: "/cluster/nexus/endpoints" 78 | body: "*" 79 | additional_bindings { 80 | post: "/api/v1/nexus/endpoints" 81 | body: "*" 82 | } 83 | }; 84 | } 85 | 86 | // Optimistically update a Nexus endpoint based on provided version as obtained via the `GetNexusEndpoint` or 87 | // `ListNexusEndpointResponse` APIs. This will fail with a status of FAILED_PRECONDITION if the version does not 88 | // match. 89 | // Returns the updated endpoint with its updated version. You may use this version for subsequent updates. You don't 90 | // need to increment the version yourself. The server will increment the version for you after each update. 91 | rpc UpdateNexusEndpoint(UpdateNexusEndpointRequest) returns (UpdateNexusEndpointResponse) { 92 | option (google.api.http) = { 93 | post: "/cluster/nexus/endpoints/{id}/update" 94 | body: "*" 95 | additional_bindings { 96 | post: "/api/v1/nexus/endpoints/{id}/update" 97 | body: "*" 98 | } 99 | }; 100 | } 101 | 102 | // Delete an incoming Nexus service by ID. 103 | rpc DeleteNexusEndpoint(DeleteNexusEndpointRequest) returns (DeleteNexusEndpointResponse) { 104 | option (google.api.http) = { 105 | delete: "/cluster/nexus/endpoints/{id}" 106 | additional_bindings { 107 | delete: "/api/v1/nexus/endpoints/{id}" 108 | } 109 | }; 110 | } 111 | 112 | // List all Nexus endpoints for the cluster, sorted by ID in ascending order. Set page_token in the request to the 113 | // next_page_token field of the previous response to get the next page of results. An empty next_page_token 114 | // indicates that there are no more results. During pagination, a newly added service with an ID lexicographically 115 | // earlier than the previous page's last endpoint's ID may be missed. 116 | rpc ListNexusEndpoints(ListNexusEndpointsRequest) returns (ListNexusEndpointsResponse) { 117 | option (google.api.http) = { 118 | get: "/cluster/nexus/endpoints" 119 | additional_bindings { 120 | get: "/api/v1/nexus/endpoints" 121 | } 122 | }; 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /temporal/api/protocol/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.protocol.v1; 4 | 5 | option go_package = "go.temporal.io/api/protocol/v1;protocol"; 6 | option java_package = "io.temporal.api.protocol.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Protocol::V1"; 10 | option csharp_namespace = "Temporalio.Api.Protocol.V1"; 11 | 12 | import "google/protobuf/any.proto"; 13 | 14 | // (-- api-linter: core::0146::any=disabled 15 | // aip.dev/not-precedent: We want runtime extensibility for the body field --) 16 | message Message { 17 | // An ID for this specific message. 18 | string id = 1; 19 | 20 | // Identifies the specific instance of a protocol to which this message 21 | // belongs. 22 | string protocol_instance_id = 2; 23 | 24 | // The event ID or command ID after which this message can be delivered. The 25 | // effects of history up to and including this event ID should be visible to 26 | // the code that handles this message. Omit to opt out of sequencing. 27 | oneof sequencing_id { 28 | int64 event_id = 3; 29 | int64 command_index = 4; 30 | }; 31 | 32 | // The opaque data carried by this message. The protocol type can be 33 | // extracted from the package name of the message carried inside the Any. 34 | google.protobuf.Any body = 5; 35 | } 36 | -------------------------------------------------------------------------------- /temporal/api/query/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.query.v1; 4 | 5 | option go_package = "go.temporal.io/api/query/v1;query"; 6 | option java_package = "io.temporal.api.query.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Query::V1"; 10 | option csharp_namespace = "Temporalio.Api.Query.V1"; 11 | 12 | import "temporal/api/enums/v1/query.proto"; 13 | import "temporal/api/enums/v1/workflow.proto"; 14 | import "temporal/api/common/v1/message.proto"; 15 | import "temporal/api/failure/v1/message.proto"; 16 | 17 | // See https://docs.temporal.io/docs/concepts/queries/ 18 | message WorkflowQuery { 19 | // The workflow-author-defined identifier of the query. Typically a function name. 20 | string query_type = 1; 21 | // Serialized arguments that will be provided to the query handler. 22 | temporal.api.common.v1.Payloads query_args = 2; 23 | // Headers that were passed by the caller of the query and copied by temporal 24 | // server into the workflow task. 25 | temporal.api.common.v1.Header header = 3; 26 | } 27 | 28 | // Answer to a `WorkflowQuery` 29 | message WorkflowQueryResult { 30 | // Did the query succeed or fail? 31 | temporal.api.enums.v1.QueryResultType result_type = 1; 32 | // Set when the query succeeds with the results. 33 | // Mutually exclusive with `error_message` and `failure`. 34 | temporal.api.common.v1.Payloads answer = 2; 35 | // Mutually exclusive with `answer`. Set when the query fails. 36 | // See also the newer `failure` field. 37 | string error_message = 3; 38 | // The full reason for this query failure. This field is newer than `error_message` and can be encoded by the SDK's 39 | // failure converter to support E2E encryption of messages and stack traces. 40 | // Mutually exclusive with `answer`. Set when the query fails. 41 | temporal.api.failure.v1.Failure failure = 4; 42 | } 43 | 44 | message QueryRejected { 45 | temporal.api.enums.v1.WorkflowExecutionStatus status = 1; 46 | } 47 | -------------------------------------------------------------------------------- /temporal/api/replication/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.replication.v1; 4 | 5 | option go_package = "go.temporal.io/api/replication/v1;replication"; 6 | option java_package = "io.temporal.api.replication.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Replication::V1"; 10 | option csharp_namespace = "Temporalio.Api.Replication.V1"; 11 | 12 | import "google/protobuf/timestamp.proto"; 13 | 14 | import "temporal/api/enums/v1/namespace.proto"; 15 | 16 | message ClusterReplicationConfig { 17 | string cluster_name = 1; 18 | } 19 | 20 | message NamespaceReplicationConfig { 21 | string active_cluster_name = 1; 22 | repeated ClusterReplicationConfig clusters = 2; 23 | temporal.api.enums.v1.ReplicationState state = 3; 24 | } 25 | 26 | // Represents a historical replication status of a Namespace 27 | message FailoverStatus { 28 | // Timestamp when the Cluster switched to the following failover_version 29 | google.protobuf.Timestamp failover_time = 1; 30 | int64 failover_version = 2; 31 | } 32 | -------------------------------------------------------------------------------- /temporal/api/rules/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.rules.v1; 4 | 5 | option go_package = "go.temporal.io/api/rules/v1;rules"; 6 | option java_package = "io.temporal.api.rules.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Rules::V1"; 10 | option csharp_namespace = "Temporalio.Api.Rules.V1"; 11 | 12 | 13 | import "google/protobuf/timestamp.proto"; 14 | 15 | message WorkflowRuleAction { 16 | message ActionActivityPause { 17 | } 18 | 19 | // Supported actions. 20 | oneof variant { 21 | ActionActivityPause activity_pause = 1; 22 | } 23 | } 24 | 25 | message WorkflowRuleSpec { 26 | // The id of the new workflow rule. Must be unique within the namespace. 27 | // Can be set by the user, and can have business meaning. 28 | string id = 1; 29 | 30 | // Activity trigger will be triggered when an activity is about to start. 31 | message ActivityStartingTrigger { 32 | // Activity predicate is a SQL-like string filter parameter. 33 | // It is used to match against workflow data. 34 | // The following activity attributes are supported as part of the predicate: 35 | // - ActivityType: An Activity Type is the mapping of a name to an Activity Definition.. 36 | // - ActivityId: The ID of the activity. 37 | // - ActivityAttempt: The number attempts of the activity. 38 | // - BackoffInterval: The current amount of time between scheduled attempts of the activity. 39 | // - ActivityStatus: The status of the activity. Can be one of "Scheduled", "Started", "Paused". 40 | // - TaskQueue: The name of the task queue the workflow specified that the activity should run on. 41 | // Activity predicate support the following operators: 42 | // * =, !=, >, >=, <, <= 43 | // * AND, OR, () 44 | // * BETWEEN ... AND 45 | // STARTS_WITH 46 | string predicate = 1; 47 | } 48 | 49 | // Specifies how the rule should be triggered and evaluated. 50 | // Currently, only "activity start" type is supported. 51 | oneof trigger { 52 | ActivityStartingTrigger activity_start = 2; 53 | } 54 | 55 | // Restricted Visibility query. 56 | // This query is used to filter workflows in this namespace to which this rule should apply. 57 | // It is applied to any running workflow each time a triggering event occurs, before the trigger predicate is evaluated. 58 | // The following workflow attributes are supported: 59 | // - WorkflowType 60 | // - WorkflowId 61 | // - StartTime 62 | // - ExecutionStatus 63 | string visibility_query = 3; 64 | 65 | // WorkflowRuleAction to be taken when the rule is triggered and predicate is matched. 66 | repeated WorkflowRuleAction actions = 4; 67 | 68 | // Expiration time of the rule. After this time, the rule will be deleted. 69 | // Can be empty if the rule should never expire. 70 | google.protobuf.Timestamp expiration_time = 5; 71 | } 72 | 73 | // WorkflowRule describes a rule that can be applied to any workflow in this namespace. 74 | message WorkflowRule { 75 | // Rule creation time. 76 | google.protobuf.Timestamp create_time = 1; 77 | 78 | // Rule specification 79 | WorkflowRuleSpec spec = 2; 80 | 81 | // Identity of the actor that created the rule 82 | // (-- api-linter: core::0140::prepositions=disabled 83 | // aip.dev/not-precedent: It is better reflect the intent this way, we will also have updated_by. --) 84 | // (-- api-linter: core::0142::time-field-names=disabled 85 | // aip.dev/not-precedent: Same as above. All other options sounds clumsy --) 86 | string created_by_identity = 3; 87 | 88 | // Rule description. 89 | string description = 4; 90 | } 91 | -------------------------------------------------------------------------------- /temporal/api/sdk/v1/enhanced_stack_trace.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.sdk.v1; 4 | 5 | option go_package = "go.temporal.io/api/sdk/v1;sdk"; 6 | option java_package = "io.temporal.api.sdk.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "EnhancedStackTraceProto"; 9 | option ruby_package = "Temporalio::Api::Sdk::V1"; 10 | option csharp_namespace = "Temporalio.Api.Sdk.V1"; 11 | 12 | // Internal structure used to create worker stack traces with references to code. 13 | message EnhancedStackTrace { 14 | // Information pertaining to the SDK that the trace has been captured from. 15 | StackTraceSDKInfo sdk = 1; 16 | 17 | // Mapping of file path to file contents. 18 | map sources = 2; 19 | 20 | // Collection of stacks captured. 21 | repeated StackTrace stacks = 3; 22 | } 23 | 24 | // Information pertaining to the SDK that the trace has been captured from. 25 | // (-- api-linter: core::0123::resource-annotation=disabled 26 | // aip.dev/not-precedent: Naming SDK version is optional. --) 27 | message StackTraceSDKInfo { 28 | // Name of the SDK 29 | string name = 1; 30 | 31 | // Version string of the SDK 32 | string version = 2; 33 | } 34 | 35 | // "Slice" of a file starting at line_offset -- a line offset and code fragment corresponding to the worker's stack. 36 | message StackTraceFileSlice { 37 | // Only used (possibly) to trim the file without breaking syntax highlighting. This is not optional, unlike 38 | // the `line` property of a `StackTraceFileLocation`. 39 | // (-- api-linter: core::0141::forbidden-types=disabled 40 | // aip.dev/not-precedent: These really shouldn't have negative values. --) 41 | uint32 line_offset = 1; 42 | 43 | // Slice of a file with the respective OS-specific line terminator. 44 | string content = 2; 45 | } 46 | 47 | // More specific location details of a file: its path, precise line and column numbers if applicable, and function name if available. 48 | // In essence, a pointer to a location in a file 49 | message StackTraceFileLocation { 50 | // Path to source file (absolute or relative). 51 | // If the paths are relative, ensure that they are all relative to the same root. 52 | string file_path = 1; 53 | 54 | // Optional; If possible, SDK should send this -- this is required for displaying the code location. 55 | // If not provided, set to -1. 56 | int32 line = 2; 57 | 58 | // Optional; if possible, SDK should send this. 59 | // If not provided, set to -1. 60 | int32 column = 3; 61 | 62 | // Function name this line belongs to, if applicable. 63 | // Used for falling back to stack trace view. 64 | string function_name = 4; 65 | 66 | // Flag to communicate whether a location should be hidden by default in the stack view. 67 | bool internal_code = 5; 68 | } 69 | 70 | // Collection of FileLocation messages from a single stack. 71 | message StackTrace { 72 | // Collection of `FileLocation`s, each for a stack frame that comprise a stack trace. 73 | repeated StackTraceFileLocation locations = 1; 74 | } 75 | -------------------------------------------------------------------------------- /temporal/api/sdk/v1/task_complete_metadata.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.sdk.v1; 4 | 5 | option go_package = "go.temporal.io/api/sdk/v1;sdk"; 6 | option java_package = "io.temporal.api.sdk.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "TaskCompleteMetadataProto"; 9 | option ruby_package = "Temporalio::Api::Sdk::V1"; 10 | option csharp_namespace = "Temporalio.Api.Sdk.V1"; 11 | 12 | message WorkflowTaskCompletedMetadata { 13 | // Internal flags used by the core SDK. SDKs using flags must comply with the following behavior: 14 | // 15 | // During replay: 16 | // * If a flag is not recognized (value is too high or not defined), it must fail the workflow 17 | // task. 18 | // * If a flag is recognized, it is stored in a set of used flags for the run. Code checks for 19 | // that flag during and after this WFT are allowed to assume that the flag is present. 20 | // * If a code check for a flag does not find the flag in the set of used flags, it must take 21 | // the branch corresponding to the absence of that flag. 22 | // 23 | // During non-replay execution of new WFTs: 24 | // * The SDK is free to use all flags it knows about. It must record any newly-used (IE: not 25 | // previously recorded) flags when completing the WFT. 26 | // 27 | // SDKs which are too old to even know about this field at all are considered to produce 28 | // undefined behavior if they replay workflows which used this mechanism. 29 | // 30 | // (-- api-linter: core::0141::forbidden-types=disabled 31 | // aip.dev/not-precedent: These really shouldn't have negative values. --) 32 | repeated uint32 core_used_flags = 1; 33 | 34 | // Flags used by the SDK lang. No attempt is made to distinguish between different SDK languages 35 | // here as processing a workflow with a different language than the one which authored it is 36 | // already undefined behavior. See `core_used_patches` for more. 37 | // 38 | // (-- api-linter: core::0141::forbidden-types=disabled 39 | // aip.dev/not-precedent: These really shouldn't have negative values. --) 40 | repeated uint32 lang_used_flags = 2; 41 | 42 | // Name of the SDK that processed the task. This is usually something like "temporal-go" and is 43 | // usually the same as client-name gRPC header. This should only be set if its value changed 44 | // since the last time recorded on the workflow (or be set on the first task). 45 | // 46 | // (-- api-linter: core::0122::name-suffix=disabled 47 | // aip.dev/not-precedent: We're ok with a name suffix here. --) 48 | string sdk_name = 3; 49 | 50 | // Version of the SDK that processed the task. This is usually something like "1.20.0" and is 51 | // usually the same as client-version gRPC header. This should only be set if its value changed 52 | // since the last time recorded on the workflow (or be set on the first task). 53 | string sdk_version = 4; 54 | } -------------------------------------------------------------------------------- /temporal/api/sdk/v1/user_metadata.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.sdk.v1; 4 | 5 | option go_package = "go.temporal.io/api/sdk/v1;sdk"; 6 | option java_package = "io.temporal.api.sdk.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "UserMetadataProto"; 9 | option ruby_package = "Temporalio::Api::Sdk::V1"; 10 | option csharp_namespace = "Temporalio.Api.Sdk.V1"; 11 | 12 | 13 | import "temporal/api/common/v1/message.proto"; 14 | 15 | // Information a user can set, often for use by user interfaces. 16 | message UserMetadata { 17 | // Short-form text that provides a summary. This payload should be a "json/plain"-encoded payload 18 | // that is a single JSON string for use in user interfaces. User interface formatting may not 19 | // apply to this text when used in "title" situations. The payload data section is limited to 400 20 | // bytes by default. 21 | temporal.api.common.v1.Payload summary = 1; 22 | 23 | // Long-form text that provides details. This payload should be a "json/plain"-encoded payload 24 | // that is a single JSON string for use in user interfaces. User interface formatting may apply to 25 | // this text in common use. The payload data section is limited to 20000 bytes by default. 26 | temporal.api.common.v1.Payload details = 2; 27 | } -------------------------------------------------------------------------------- /temporal/api/sdk/v1/workflow_metadata.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.sdk.v1; 4 | 5 | option go_package = "go.temporal.io/api/sdk/v1;sdk"; 6 | option java_package = "io.temporal.api.sdk.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "WorkflowMetadataProto"; 9 | option ruby_package = "Temporalio::Api::Sdk::V1"; 10 | option csharp_namespace = "Temporalio.Api.Sdk.V1"; 11 | 12 | // The name of the query to retrieve this information is `__temporal_workflow_metadata`. 13 | message WorkflowMetadata { 14 | // Metadata provided at declaration or creation time. 15 | WorkflowDefinition definition = 1; 16 | // Current long-form details of the workflow's state. This is used by user interfaces to show 17 | // long-form text. This text may be formatted by the user interface. 18 | string current_details = 2; 19 | } 20 | 21 | // (-- api-linter: core::0203::optional=disabled --) 22 | message WorkflowDefinition { 23 | // A name scoped by the task queue that maps to this workflow definition. 24 | // If missing, this workflow is a dynamic workflow. 25 | string type = 1; 26 | 27 | // Query definitions, sorted by name. 28 | repeated WorkflowInteractionDefinition query_definitions = 2; 29 | 30 | // Signal definitions, sorted by name. 31 | repeated WorkflowInteractionDefinition signal_definitions = 3; 32 | 33 | // Update definitions, sorted by name. 34 | repeated WorkflowInteractionDefinition update_definitions = 4; 35 | } 36 | 37 | // (-- api-linter: core::0123::resource-annotation=disabled 38 | // aip.dev/not-precedent: The `name` field is optional. --) 39 | // (-- api-linter: core::0203::optional=disabled --) 40 | message WorkflowInteractionDefinition { 41 | // An optional name for the handler. If missing, it represents 42 | // a dynamic handler that processes any interactions not handled by others. 43 | // There is at most one dynamic handler per workflow and interaction kind. 44 | string name = 1; 45 | // An optional interaction description provided by the application. 46 | // By convention, external tools may interpret its first part, 47 | // i.e., ending with a line break, as a summary of the description. 48 | string description = 2; 49 | } 50 | -------------------------------------------------------------------------------- /temporal/api/update/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.update.v1; 4 | 5 | option go_package = "go.temporal.io/api/update/v1;update"; 6 | option java_package = "io.temporal.api.update.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Update::V1"; 10 | option csharp_namespace = "Temporalio.Api.Update.V1"; 11 | 12 | import "temporal/api/common/v1/message.proto"; 13 | import "temporal/api/enums/v1/update.proto"; 14 | import "temporal/api/failure/v1/message.proto"; 15 | 16 | // Specifies client's intent to wait for Update results. 17 | message WaitPolicy { 18 | // Indicates the Update lifecycle stage that the Update must reach before 19 | // API call is returned. 20 | // NOTE: This field works together with API call timeout which is limited by 21 | // server timeout (maximum wait time). If server timeout is expired before 22 | // user specified timeout, API call returns even if specified stage is not reached. 23 | temporal.api.enums.v1.UpdateWorkflowExecutionLifecycleStage lifecycle_stage = 1; 24 | } 25 | 26 | // The data needed by a client to refer to a previously invoked Workflow Update. 27 | message UpdateRef { 28 | temporal.api.common.v1.WorkflowExecution workflow_execution = 1; 29 | string update_id = 2; 30 | } 31 | 32 | // The outcome of a Workflow Update: success or failure. 33 | message Outcome { 34 | oneof value { 35 | temporal.api.common.v1.Payloads success = 1; 36 | temporal.api.failure.v1.Failure failure = 2; 37 | } 38 | } 39 | 40 | // Metadata about a Workflow Update. 41 | message Meta { 42 | // An ID with workflow-scoped uniqueness for this Update. 43 | string update_id = 1; 44 | 45 | // A string identifying the agent that requested this Update. 46 | string identity = 2; 47 | } 48 | 49 | message Input { 50 | // Headers that are passed with the Update from the requesting entity. 51 | // These can include things like auth or tracing tokens. 52 | temporal.api.common.v1.Header header = 1; 53 | 54 | // The name of the Update handler to invoke on the target Workflow. 55 | string name = 2; 56 | 57 | // The arguments to pass to the named Update handler. 58 | temporal.api.common.v1.Payloads args = 3; 59 | } 60 | 61 | // The client request that triggers a Workflow Update. 62 | message Request { 63 | Meta meta = 1; 64 | Input input = 2; 65 | } 66 | 67 | // An Update protocol message indicating that a Workflow Update has been rejected. 68 | message Rejection { 69 | string rejected_request_message_id = 1; 70 | int64 rejected_request_sequencing_event_id = 2; 71 | Request rejected_request = 3; 72 | temporal.api.failure.v1.Failure failure = 4; 73 | } 74 | 75 | // An Update protocol message indicating that a Workflow Update has 76 | // been accepted (i.e. passed the worker-side validation phase). 77 | message Acceptance { 78 | string accepted_request_message_id = 1; 79 | int64 accepted_request_sequencing_event_id = 2; 80 | Request accepted_request = 3; 81 | } 82 | 83 | // An Update protocol message indicating that a Workflow Update has 84 | // completed with the contained outcome. 85 | message Response { 86 | Meta meta = 1; 87 | Outcome outcome = 2; 88 | } 89 | -------------------------------------------------------------------------------- /temporal/api/version/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.version.v1; 4 | 5 | option go_package = "go.temporal.io/api/version/v1;version"; 6 | option java_package = "io.temporal.api.version.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Version::V1"; 10 | option csharp_namespace = "Temporalio.Api.Version.V1"; 11 | 12 | import "google/protobuf/timestamp.proto"; 13 | import "temporal/api/enums/v1/common.proto"; 14 | 15 | // ReleaseInfo contains information about specific version of temporal. 16 | message ReleaseInfo { 17 | string version = 1; 18 | google.protobuf.Timestamp release_time = 2; 19 | string notes = 3; 20 | } 21 | 22 | // Alert contains notification and severity. 23 | message Alert { 24 | string message = 1; 25 | temporal.api.enums.v1.Severity severity = 2; 26 | } 27 | 28 | // VersionInfo contains details about current and recommended release versions as well as alerts and upgrade instructions. 29 | message VersionInfo { 30 | ReleaseInfo current = 1; 31 | ReleaseInfo recommended = 2; 32 | string instructions = 3; 33 | repeated Alert alerts = 4; 34 | google.protobuf.Timestamp last_update_time = 5; 35 | } 36 | 37 | -------------------------------------------------------------------------------- /temporal/api/worker/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.worker.v1; 4 | 5 | option go_package = "go.temporal.io/api/worker/v1;worker"; 6 | option java_package = "io.temporal.api.worker.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Worker::V1"; 10 | option csharp_namespace = "Temporalio.Api.Worker.V1"; 11 | 12 | import "google/protobuf/duration.proto"; 13 | import "google/protobuf/timestamp.proto"; 14 | import "temporal/api/deployment/v1/message.proto"; 15 | import "temporal/api/enums/v1/common.proto"; 16 | 17 | message WorkerPollerInfo { 18 | // Number of polling RPCs that are currently in flight. 19 | int32 current_pollers = 1; 20 | 21 | google.protobuf.Timestamp last_successful_poll_time = 2; 22 | 23 | // Set true if the number of concurrent pollers is auto-scaled 24 | bool is_autoscaling = 3; 25 | } 26 | 27 | message WorkerSlotsInfo { 28 | // Number of slots available for the worker to specific tasks. 29 | // May be -1 if the upper bound is not known. 30 | int32 current_available_slots = 1; 31 | // Number of slots used by the worker for specific tasks. 32 | int32 current_used_slots = 2; 33 | 34 | // Kind of the slot supplier, which is used to determine how the slots are allocated. 35 | // Possible values: "Fixed | ResourceBased | Custom String" 36 | string slot_supplier_kind = 3; 37 | 38 | // Total number of tasks processed (completed both successfully and unsuccesfully, or any other way) 39 | // by the worker since the worker started. This is a cumulative counter. 40 | int32 total_processed_tasks = 4; 41 | // Total number of failed tasks processed by the worker so far. 42 | int32 total_failed_tasks = 5; 43 | 44 | // Number of tasks processed in since the last heartbeat from the worker. 45 | // This is a cumulative counter, and it is reset to 0 each time the worker sends a heartbeat. 46 | // Contains both successful and failed tasks. 47 | int32 last_interval_processed_tasks = 6; 48 | // Number of failed tasks processed since the last heartbeat from the worker. 49 | int32 last_interval_failure_tasks = 7; 50 | } 51 | 52 | // Holds everything needed to identify the worker host/process context 53 | message WorkerHostInfo { 54 | // Worker host identifier. 55 | string host_name = 1; 56 | 57 | // Worker process identifier, should be unique for the host. 58 | string process_id = 2; 59 | 60 | // System used CPU as a float in the range [0.0, 1.0] where 1.0 is defined as all 61 | // cores on the host pegged. 62 | float current_host_cpu_usage = 3; 63 | // System used memory as a float in the range [0.0, 1.0] where 1.0 is defined as 64 | // all available memory on the host is used. 65 | float current_host_mem_usage = 4; 66 | } 67 | 68 | // Worker info message, contains information about the worker and its current state. 69 | // All information is provided by the worker itself. 70 | // (-- api-linter: core::0140::prepositions=disabled 71 | // aip.dev/not-precedent: Removing those words make names less clear. --) 72 | message WorkerHeartbeat { 73 | // Worker identifier, should be unique for the namespace. 74 | // It is distinct from worker identity, which is not necessarily namespace-unique. 75 | string worker_instance_key = 1; 76 | 77 | // Worker identity, set by the client, may not be unique. 78 | // Usually host_name+(user group name)+process_id, but can be overwritten by the user. 79 | string worker_identity = 2; 80 | 81 | 82 | // Worker host information. 83 | WorkerHostInfo host_info = 3; 84 | 85 | // Task queue this worker is polling for tasks. 86 | string task_queue = 4; 87 | 88 | temporal.api.deployment.v1.WorkerDeploymentVersion deployment_version = 5; 89 | 90 | string sdk_name = 6; 91 | string sdk_version = 7; 92 | 93 | // Worker status. Defined by SDK. 94 | temporal.api.enums.v1.WorkerStatus status = 8; 95 | 96 | // Worker start time. 97 | // It can be used to determine worker uptime. (current time - start time) 98 | google.protobuf.Timestamp start_time = 9; 99 | 100 | // Timestamp of this heartbeat, coming from the worker. Worker should set it to "now". 101 | // Note that this timestamp comes directly from the worker and is subject to workers' clock skew. 102 | google.protobuf.Timestamp heartbeat_time = 10; 103 | // Elapsed time since the last heartbeat from the worker. 104 | google.protobuf.Duration elapsed_since_last_heartbeat = 11; 105 | 106 | WorkerSlotsInfo workflow_task_slots_info = 12; 107 | WorkerSlotsInfo activity_task_slots_info = 13; 108 | WorkerSlotsInfo nexus_task_slots_info = 14; 109 | WorkerSlotsInfo local_activity_slots_info = 15; 110 | 111 | WorkerPollerInfo workflow_poller_info = 16; 112 | WorkerPollerInfo workflow_sticky_poller_info = 17; 113 | WorkerPollerInfo activity_poller_info = 18; 114 | WorkerPollerInfo nexus_poller_info = 19; 115 | 116 | // A Workflow Task found a cached Workflow Execution to run against. 117 | int32 total_sticky_cache_hit = 20; 118 | // A Workflow Task did not find a cached Workflow execution to run against. 119 | int32 total_sticky_cache_miss = 21; 120 | // Current cache size, expressed in number of Workflow Executions. 121 | int32 current_sticky_cache_size = 22; 122 | } 123 | 124 | message WorkerInfo { 125 | WorkerHeartbeat worker_heartbeat = 1; 126 | } --------------------------------------------------------------------------------