├── .cargo ├── config.toml └── multi-worker-manual-test ├── .github └── workflows │ ├── heavy.yml │ └── per-pr.yml ├── .gitignore ├── AGENTS.md ├── ARCHITECTURE.md ├── CODEOWNERS ├── Cargo.toml ├── LICENSE.txt ├── README.md ├── arch_docs ├── diagrams │ ├── README.md │ ├── sticky_queues.puml │ └── workflow_internals.svg ├── sticky_queues.md └── workflow_task_chunking.md ├── cargo-tokio-console.sh ├── client ├── Cargo.toml └── src │ ├── lib.rs │ ├── metrics.rs │ ├── proxy.rs │ ├── raw.rs │ ├── retry.rs │ ├── worker_registry │ └── mod.rs │ └── workflow_handle │ └── mod.rs ├── core-api ├── Cargo.toml └── src │ ├── errors.rs │ ├── lib.rs │ ├── telemetry.rs │ ├── telemetry │ └── metrics.rs │ └── worker.rs ├── core ├── Cargo.toml ├── benches │ └── workflow_replay.rs └── src │ ├── abstractions.rs │ ├── abstractions │ └── take_cell.rs │ ├── core_tests │ ├── activity_tasks.rs │ ├── child_workflows.rs │ ├── determinism.rs │ ├── local_activities.rs │ ├── mod.rs │ ├── queries.rs │ ├── replay_flag.rs │ ├── updates.rs │ ├── workers.rs │ ├── workflow_cancels.rs │ └── workflow_tasks.rs │ ├── debug_client.rs │ ├── ephemeral_server │ └── mod.rs │ ├── internal_flags.rs │ ├── lib.rs │ ├── pollers │ ├── mod.rs │ └── poll_buffer.rs │ ├── protosext │ ├── mod.rs │ └── protocol_messages.rs │ ├── replay │ └── mod.rs │ ├── retry_logic.rs │ ├── telemetry │ ├── log_export.rs │ ├── metrics.rs │ ├── mod.rs │ ├── otel.rs │ └── prometheus_server.rs │ ├── test_help │ └── mod.rs │ └── worker │ ├── activities.rs │ ├── activities │ ├── activity_heartbeat_manager.rs │ └── local_activities.rs │ ├── client.rs │ ├── client │ └── mocks.rs │ ├── mod.rs │ ├── nexus.rs │ ├── slot_provider.rs │ ├── slot_supplier.rs │ ├── tuner.rs │ ├── tuner │ ├── fixed_size.rs │ └── resource_based.rs │ └── workflow │ ├── driven_workflow.rs │ ├── history_update.rs │ ├── machines │ ├── activity_state_machine.rs │ ├── cancel_external_state_machine.rs │ ├── cancel_nexus_op_state_machine.rs │ ├── cancel_workflow_state_machine.rs │ ├── child_workflow_state_machine.rs │ ├── complete_workflow_state_machine.rs │ ├── continue_as_new_workflow_state_machine.rs │ ├── fail_workflow_state_machine.rs │ ├── local_activity_state_machine.rs │ ├── mod.rs │ ├── modify_workflow_properties_state_machine.rs │ ├── nexus_operation_state_machine.rs │ ├── patch_state_machine.rs │ ├── signal_external_state_machine.rs │ ├── timer_state_machine.rs │ ├── transition_coverage.rs │ ├── update_state_machine.rs │ ├── upsert_search_attributes_state_machine.rs │ ├── workflow_machines.rs │ ├── workflow_machines │ │ └── local_acts.rs │ └── workflow_task_state_machine.rs │ ├── managed_run.rs │ ├── mod.rs │ ├── run_cache.rs │ ├── wft_extraction.rs │ ├── wft_poller.rs │ └── workflow_stream.rs ├── docker ├── docker-compose-ci.yaml ├── docker-compose-telem.yaml └── docker-compose.yaml ├── etc ├── deps.svg ├── dynamic-config.yaml ├── otel-collector-ci.yaml ├── otel-collector-config.yaml ├── prometheus.yaml └── regen-depgraph.sh ├── fsm ├── Cargo.toml ├── LICENSE.txt ├── README.md ├── rustfsm_procmacro │ ├── Cargo.toml │ ├── LICENSE.txt │ ├── src │ │ └── lib.rs │ └── tests │ │ ├── progress.rs │ │ └── trybuild │ │ ├── dupe_transitions_fail.rs │ │ ├── dupe_transitions_fail.stderr │ │ ├── dynamic_dest_pass.rs │ │ ├── forgot_name_fail.rs │ │ ├── forgot_name_fail.stderr │ │ ├── handler_arg_pass.rs │ │ ├── handler_pass.rs │ │ ├── medium_complex_pass.rs │ │ ├── no_handle_conversions_require_into_fail.rs │ │ ├── no_handle_conversions_require_into_fail.stderr │ │ ├── simple_pass.rs │ │ ├── struct_event_variant_fail.rs │ │ ├── struct_event_variant_fail.stderr │ │ ├── tuple_more_item_event_variant_fail.rs │ │ ├── tuple_more_item_event_variant_fail.stderr │ │ ├── tuple_zero_item_event_variant_fail.rs │ │ └── tuple_zero_item_event_variant_fail.stderr ├── rustfsm_trait │ ├── Cargo.toml │ ├── LICENSE.txt │ └── src │ │ └── lib.rs └── src │ └── lib.rs ├── histories ├── ends_empty_wft_complete.bin ├── evict_while_la_running_no_interference-16_history.bin ├── evict_while_la_running_no_interference-23_history.bin ├── evict_while_la_running_no_interference-85_history.bin ├── fail_wf_task.bin ├── long_local_activity_with_update-0_history.bin ├── long_local_activity_with_update-1_history.bin ├── long_local_activity_with_update-2_history.bin ├── long_local_activity_with_update-3_history.bin ├── old_change_marker_format.bin └── timer_workflow_history.bin ├── integ-with-otel.sh ├── rustfmt.toml ├── sdk-core-protos ├── Cargo.toml ├── build.rs ├── protos │ ├── api_cloud_upstream │ │ ├── .github │ │ │ └── workflows │ │ │ │ ├── build.yaml │ │ │ │ └── push-to-buf.yml │ │ ├── .gitignore │ │ ├── CODEOWNERS │ │ ├── LICENSE │ │ ├── Makefile │ │ ├── README.md │ │ ├── VERSION │ │ ├── buf.gen.yaml │ │ ├── buf.lock │ │ ├── buf.yaml │ │ └── temporal │ │ │ └── api │ │ │ └── cloud │ │ │ ├── account │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── cloudservice │ │ │ └── v1 │ │ │ │ ├── request_response.proto │ │ │ │ └── service.proto │ │ │ ├── identity │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── namespace │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── nexus │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── operation │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── region │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── resource │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── sink │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ └── usage │ │ │ └── v1 │ │ │ └── message.proto │ ├── api_upstream │ │ ├── .github │ │ │ ├── CODEOWNERS │ │ │ ├── PULL_REQUEST_TEMPLATE.md │ │ │ └── workflows │ │ │ │ ├── ci.yml │ │ │ │ ├── create-release.yml │ │ │ │ ├── publish-docs.yml │ │ │ │ ├── push-to-buf.yml │ │ │ │ ├── trigger-api-go-delete-release.yml │ │ │ │ ├── trigger-api-go-publish-release.yml │ │ │ │ └── trigger-api-go-update.yml │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── Makefile │ │ ├── README.md │ │ ├── api-linter.yaml │ │ ├── buf.gen.yaml │ │ ├── buf.lock │ │ ├── buf.yaml │ │ ├── google │ │ │ ├── api │ │ │ │ ├── annotations.proto │ │ │ │ └── http.proto │ │ │ └── protobuf │ │ │ │ ├── any.proto │ │ │ │ ├── descriptor.proto │ │ │ │ ├── duration.proto │ │ │ │ ├── empty.proto │ │ │ │ ├── struct.proto │ │ │ │ ├── timestamp.proto │ │ │ │ └── wrappers.proto │ │ ├── openapi │ │ │ ├── openapiv2.json │ │ │ ├── openapiv3.yaml │ │ │ └── payload_description.txt │ │ └── temporal │ │ │ └── api │ │ │ ├── activity │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── batch │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── command │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── common │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── deployment │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── enums │ │ │ └── v1 │ │ │ │ ├── batch_operation.proto │ │ │ │ ├── command_type.proto │ │ │ │ ├── common.proto │ │ │ │ ├── deployment.proto │ │ │ │ ├── event_type.proto │ │ │ │ ├── failed_cause.proto │ │ │ │ ├── namespace.proto │ │ │ │ ├── nexus.proto │ │ │ │ ├── query.proto │ │ │ │ ├── reset.proto │ │ │ │ ├── schedule.proto │ │ │ │ ├── task_queue.proto │ │ │ │ ├── update.proto │ │ │ │ └── workflow.proto │ │ │ ├── errordetails │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── export │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── failure │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── filter │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── history │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── namespace │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── nexus │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── operatorservice │ │ │ └── v1 │ │ │ │ ├── request_response.proto │ │ │ │ └── service.proto │ │ │ ├── protocol │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── query │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── replication │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── rules │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── schedule │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── sdk │ │ │ └── v1 │ │ │ │ ├── enhanced_stack_trace.proto │ │ │ │ ├── task_complete_metadata.proto │ │ │ │ ├── user_metadata.proto │ │ │ │ └── workflow_metadata.proto │ │ │ ├── taskqueue │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── update │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── version │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ ├── workflow │ │ │ └── v1 │ │ │ │ └── message.proto │ │ │ └── workflowservice │ │ │ └── v1 │ │ │ ├── request_response.proto │ │ │ └── service.proto │ ├── google │ │ └── rpc │ │ │ └── status.proto │ ├── grpc │ │ └── health │ │ │ └── v1 │ │ │ └── health.proto │ ├── local │ │ └── temporal │ │ │ └── sdk │ │ │ └── core │ │ │ ├── activity_result │ │ │ └── activity_result.proto │ │ │ ├── activity_task │ │ │ └── activity_task.proto │ │ │ ├── child_workflow │ │ │ └── child_workflow.proto │ │ │ ├── common │ │ │ └── common.proto │ │ │ ├── core_interface.proto │ │ │ ├── external_data │ │ │ └── external_data.proto │ │ │ ├── nexus │ │ │ └── nexus.proto │ │ │ ├── workflow_activation │ │ │ └── workflow_activation.proto │ │ │ ├── workflow_commands │ │ │ └── workflow_commands.proto │ │ │ └── workflow_completion │ │ │ └── workflow_completion.proto │ └── testsrv_upstream │ │ ├── Makefile │ │ ├── api-linter.yaml │ │ ├── buf.yaml │ │ └── temporal │ │ └── api │ │ └── testservice │ │ └── v1 │ │ ├── request_response.proto │ │ └── service.proto └── src │ ├── constants.rs │ ├── history_builder.rs │ ├── history_info.rs │ ├── lib.rs │ ├── task_token.rs │ └── utilities.rs ├── sdk ├── Cargo.toml └── src │ ├── activity_context.rs │ ├── app_data.rs │ ├── interceptors.rs │ ├── lib.rs │ ├── workflow_context.rs │ ├── workflow_context │ └── options.rs │ └── workflow_future.rs ├── test-utils ├── Cargo.toml └── src │ ├── canned_histories.rs │ ├── histfetch.rs │ ├── interceptors.rs │ ├── lib.rs │ └── workflows.rs └── tests ├── cloud_tests.rs ├── fuzzy_workflow.rs ├── global_metric_tests.rs ├── heavy_tests.rs ├── integ_tests ├── activity_functions.rs ├── client_tests.rs ├── ephemeral_server_tests.rs ├── heartbeat_tests.rs ├── metrics_tests.rs ├── polling_tests.rs ├── queries_tests.rs ├── update_tests.rs ├── visibility_tests.rs ├── worker_tests.rs ├── worker_versioning_tests.rs ├── workflow_tests.rs └── workflow_tests │ ├── activities.rs │ ├── appdata_propagation.rs │ ├── cancel_external.rs │ ├── cancel_wf.rs │ ├── child_workflows.rs │ ├── continue_as_new.rs │ ├── determinism.rs │ ├── eager.rs │ ├── local_activities.rs │ ├── modify_wf_properties.rs │ ├── nexus.rs │ ├── patches.rs │ ├── priority.rs │ ├── replay.rs │ ├── resets.rs │ ├── signals.rs │ ├── stickyness.rs │ ├── timers.rs │ └── upsert_search_attrs.rs ├── main.rs ├── manual_tests.rs └── runner.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [env] 2 | # This temporarily overrides the version of the CLI used for integration tests, locally and in CI 3 | # TODO: Comment out once priority is released 4 | CLI_VERSION_OVERRIDE = "v1.3.1-priority.0" 5 | 6 | [alias] 7 | integ-test = ["test", "--features", "temporal-sdk-core-protos/serde_serialize", "--package", "temporal-sdk-core", "--test", "integ_runner", "--"] 8 | lint = ["clippy", "--workspace", "--examples", "--all-features", 9 | "--test", "integ_tests", "--test", "heavy_tests", "--test", "manual_tests", 10 | "--", "--D", "warnings"] 11 | test-lint = ["clippy", "--all", "--all-features", "--examples", "--workspace", 12 | "--tests", "--", "--D", "warnings"] 13 | -------------------------------------------------------------------------------- /.cargo/multi-worker-manual-test: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Use this script during manual testing against a server to run multiple instances 4 | # of a test, thus acting like multiple workers are running concurrently 5 | 6 | # Usage: multi-worker-heavy-test 7 | 8 | # You may want to set env vars for targeting cloud first, ex (fish syntax): 9 | # set -gx TEMPORAL_SERVICE_ADDRESS "https://sj-poller-test.e2e.tmprl-test.cloud:7233" 10 | # set -gx TEMPORAL_USE_TLS 1 11 | # set -gx TEMPORAL_NAMESPACE sj-poller-test.e2e 12 | 13 | cargo integ-test -c "--release" -t manual_tests --just-build 14 | 15 | parallel --line-buffer --process-slot-var=PAR_JOBNUM -j $1 cargo integ-test -c "--release" -s external -t manual_tests -- --nocapture $2 ::: $(seq 1 $1) -------------------------------------------------------------------------------- /.github/workflows/heavy.yml: -------------------------------------------------------------------------------- 1 | name: Heavy Tests 2 | 3 | on: # rebuild any PRs and main branch changes 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | build-and-test: 15 | timeout-minutes: 20 16 | runs-on: ubuntu-latest-16-cores 17 | steps: 18 | - uses: actions/checkout@v4 19 | with: 20 | submodules: recursive 21 | - uses: dtolnay/rust-toolchain@stable 22 | - name: Install protoc 23 | uses: arduino/setup-protoc@v3 24 | with: 25 | # TODO: Upgrade proto once https://github.com/arduino/setup-protoc/issues/99 is fixed 26 | version: '23.x' 27 | repo-token: ${{ secrets.GITHUB_TOKEN }} 28 | - run: cargo integ-test -c "--release" -t heavy_tests -- --test-threads 1 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Build files 2 | /target/ 3 | 4 | # sdk-core is a library we allow consumers pin specific versions. 5 | Cargo.lock 6 | 7 | /.idea/ 8 | *.iml 9 | *~ 10 | .aider* 11 | 12 | # Ignore generated protobuf files 13 | src/protos/*.rs 14 | !src/protos/mod.rs 15 | 16 | # Coverage 17 | /tarpaulin-report.html 18 | /machine_coverage/ 19 | /bindings/ 20 | /core/machine_coverage/ 21 | 22 | # Keep secrets here 23 | /.cloud_certs/ 24 | cloud_envs.fish 25 | -------------------------------------------------------------------------------- /AGENTS.md: -------------------------------------------------------------------------------- 1 | # Contributor Guidance for `sdk-core` 2 | 3 | This repository provides a Rust workspace for the Temporal Core SDK and related crates. Use this 4 | document as your quick reference when submitting pull requests. 5 | 6 | ## Where Things Are 7 | 8 | - `core/` – implementation of the core SDK 9 | - `client/` – clients for communicating with Temporal clusters 10 | - `core-api/` – API definitions exposed by core 11 | - `sdk/` – pre-alpha Rust SDK built on top of core (used mainly for tests) 12 | - `sdk-core-protos/` – protobuf definitions shared across crates 13 | - `fsm/` – state machine implementation and macros 14 | - `test-utils/` – helpers and binaries for tests 15 | - `tests/` – integration, heavy, and manual tests 16 | - `arch_docs/` – architectural design documents 17 | - Contributor guide: `README.md` 18 | 19 | ## Repo Specific Utilities 20 | 21 | - `.cargo/config.toml` defines useful cargo aliases: 22 | - `cargo lint` – run clippy on workspace crates 23 | - `cargo test-lint` – run clippy on tests 24 | - `cargo integ-test` – run the integration test runner 25 | - `cargo-tokio-console.sh` – run any cargo command with the `tokio-console` feature 26 | - `integ-with-otel.sh` – run integration tests with OpenTelemetry enabled 27 | - `.cargo/multi-worker-manual-test` – helper script for spawning multiple workers during manual 28 | testing 29 | 30 | ## Building and Testing 31 | 32 | The following commands are enforced for each pull request (see `README.md`): 33 | 34 | ```bash 35 | cargo build # build all crates 36 | cargo test # run unit tests 37 | cargo integ-test # integration tests (starts ephemeral server by default) 38 | cargo test --test heavy_tests # load tests -- agents do not need to run this and should not 39 | ``` 40 | 41 | Additional checks: 42 | 43 | ```bash 44 | cargo fmt --all # format code 45 | cargo clippy --all -- -D warnings # lint 46 | ``` 47 | 48 | Documentation can be generated with `cargo doc`. 49 | 50 | ## Expectations for Pull Requests 51 | 52 | - Format and lint your code before submitting. 53 | - Ensure all tests pass locally. Integration tests may require a running Temporal server or the 54 | ephemeral server started by `cargo integ-test`. 55 | - Keep commit messages short and in the imperative mood. 56 | - Provide a clear PR description outlining what changed and why. 57 | - Reviewers expect new features or fixes to include corresponding tests when applicable. 58 | 59 | ## Review Checklist 60 | 61 | Reviewers will look for: 62 | 63 | - All builds, tests, and lints passing in CI 64 | - New tests covering behavior changes 65 | - Clear and concise code following existing style (see `README.md` for error handling guidance) 66 | - Documentation updates for any public API changes 67 | 68 | ## Notes 69 | 70 | - Fetch workflow histories with `cargo run --bin histfetch [run_id]` (binary lives in 71 | `test-utils`). 72 | - Protobuf files under `sdk-core-protos/protos/api_upstream` are a git subtree; see `README.md` for 73 | update instructions. 74 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Primary owners 2 | 3 | * @temporalio/sdk 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["core", "client", "core-api", "fsm", "test-utils", "sdk-core-protos", "sdk"] 3 | resolver = "2" 4 | 5 | [workspace.package] 6 | license = "MIT" 7 | license-file = "LICENSE.txt" 8 | 9 | [workspace.dependencies] 10 | derive_builder = "0.20" 11 | derive_more = { version = "2.0", features = ["constructor", "display", "from", "into", "debug"] } 12 | thiserror = "2" 13 | tonic = "0.12" 14 | tonic-build = "0.12" 15 | opentelemetry = { version = "0.29", features = ["metrics"] } 16 | prost = "0.13" 17 | prost-types = "0.13" 18 | 19 | [workspace.lints.rust] 20 | unreachable_pub = "warn" 21 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2021 Temporal Technologies, Inc. All Rights Reserved 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /arch_docs/diagrams/README.md: -------------------------------------------------------------------------------- 1 | This directory contains diagrams used in the arch documents. 2 | 3 | They can be embedded into those documents two ways: 4 | 5 | * by the technique described here using the plantuml proxy service: 6 | https://stackoverflow.com/questions/32203610/how-to-integrate-uml-diagrams-into-gitlab-or-github 7 | * By pasting them into https://www.planttext.com/ and then embedding the resulting SVG link 8 | 9 | The first technique has the problem that the PR won't show changes, since it's always from master. 10 | The second has the problem that you have to update it by hand. Pick your poison. 11 | -------------------------------------------------------------------------------- /arch_docs/diagrams/sticky_queues.puml: -------------------------------------------------------------------------------- 1 | @startuml 2 | 3 | title "Sticky Task Queue Interactions" 4 | 5 | participant "Worker 1" as w1 6 | participant "Server" as fe 7 | queue "some_tq" as gq 8 | queue "some_tq_worker_1_xxx" as sq 9 | 10 | w1 -> fe : Poll on ""some_tq"" 11 | fe <- gq : grab next task 12 | fe --> w1 : Workflow Task (entire history) 13 | 14 | w1 -> w1 : Process task 15 | w1 -> fe : Task complete, use task queue ""some_tq_worker_1_xxx"" 16 | 17 | fe -> fe : An event, like a timer firing 18 | fe -> sq ** : Enqueue task 19 | 20 | loop Processing workflow tasks on a specific queue 21 | w1 -> fe : Poll on ""some_tq_worker_1_xxx"" 22 | fe <- sq : grab next task 23 | fe --> w1 : Workflow Task (only new events) 24 | w1 -> w1 : Process task 25 | w1 -> fe : Task complete, use task queue ""some_tq_worker_1_xxx"" 26 | fe -> fe : An event, like a timer firing 27 | fe -> sq : Enqueue task 28 | end 29 | 30 | w1 -> w1 : Evict workflow from cache 31 | note right : Eviction happens of this workflow for some reason 32 | 33 | w1 -> fe : ResetSticky (includes which workflow execution) 34 | 35 | fe -> fe : An event, like a timer firing 36 | fe -> gq : Enqueue task 37 | note left : We go back to the shared queue 38 | 39 | 40 | @enduml -------------------------------------------------------------------------------- /arch_docs/sticky_queues.md: -------------------------------------------------------------------------------- 1 | # Sticky (and Non-Sticky) Task Queues 2 | 3 | A temporal worker, whether polling for workflows or activities, does so by polling a specific 4 | "Task Queue". [Here](https://docs.temporal.io/docs/concepts/task-queues) is some basic documentation 5 | on them. Task queues are designed to be lightweight, it's OK to have very large numbers of them. 6 | 7 | Any time there is a new activity task or workflow task that needs to be performed by a worker, it'll 8 | be queued in a specific task queue. 9 | 10 | ## Non-Sticky Queues 11 | Before explaining sticky queues, let's examine how things work when they are *not* enabled. 12 | 13 | In our example, let's say there are two workers polling the same task queue (`shared_q`): 14 | 15 | ```text 16 | ┌───────┬───►shared_q 17 | w1│ │ ┌───────┐ 18 | ┌──┤ │ │ t1 │ 19 | │ │ │ ├───────┤ 20 | └──┘ │ │ t2 │ 21 | │ ├───────┤ 22 | w2 │ │ t3 │ 23 | ┌──┬───────┘ ├───────┤ 24 | │ │ │ .. │ 25 | └──┘ └───────┘ 26 | ``` 27 | 28 | Both workers poll the same queue, and either may take the next task from it. When they do, they will 29 | be delivered the *entire* history of that workflow. They will recreate workflow state by replaying 30 | history from the start, and then issue the appropriate workflow task completion. The cycle continues 31 | until the workflow executions are finished. 32 | 33 | ## Sticky Queues 34 | Sticky queues exist to avoid needing to ship the entire workflow history to workers for every task, 35 | and avoid the need to replay history from the start. 36 | 37 | To accomplish this, workers maintain workflow state in a cache. When they reply with a workflow 38 | task completion, they specify a task queue name specific to them. This queue name is unique to 39 | the specific instance of the worker and will be different across restarts even of the same binary on 40 | the same machine. The worker will poll on both the shared queue, and its specific task queue. 41 | 42 | Also unlike normal task queues, sticky task queues have a schedule-to-start timeout associated with 43 | them. More on why this is needed later. 44 | 45 | The interactions, at a high level, look like this: 46 | ![](https://www.planttext.com/api/plantuml/svg/jLEnRjim4Dtv5GTDSH0FNeEYI8SCtT8aG4Q3eKyI8OedyKwM_FSzKNOJkqK13rbvxxrxx-dqm6AJ36qmHhm4XE95l6iEy6gvWLy33WW_es2oJZn5BepfbE2TxsmKADueDPXWKu1b63VdmnTCUqnvLABfirZ1rE9M-lnQzHUlsp7hRJVRQPeoX7jZnWsilwi4tCCJXG0KeVYZKnWTV5khbewhPDyXuYGWwd-Uh9Mf_7kOdPQ1nYNP3KRn2Q7sB9GEgzEI37rAv91vqVYqF3CTjLr0mJiO63C4ZXd-7K8RcsqS9Nv4mBtkleFW6mGBubljh_J9n-e8v1vkRnNx61VXRCC4ekxaJB4mdlBCOvuxiS0TEbzwjpZwRs-N9fSI-ReIAOQ38iSb4w--iCJhExme4EFkx2C_xhsJZnRBH2quwseqaGGX-QgM4qml7shRTHYrw194h-OJanBCfY6XPOfdv_gCZ7ByesvyT67OeL9hx-eF0PpG3VEErTMdKlqLCviFMCxUtn0gWdVh6X1IrmXSsuIxutaOyw2bwB__6m00)] 47 | 48 | After reviewing the diagram you may wonder what happens if the worker dies without being able 49 | to send the `ResetSticky` request. This is where the timeout for sticky queues matters. If the 50 | worker does not poll from the queue during the specified duration (default `5s`), then the task 51 | in the queue will time out and will be rescheduled onto the non-sticky queue. 52 | -------------------------------------------------------------------------------- /arch_docs/workflow_task_chunking.md: -------------------------------------------------------------------------------- 1 | # Workflow Task Chunking 2 | 3 | One source of complexity in Core is the chunking of history into "logical" Workflow Tasks. 4 | 5 | Workflow tasks (WFTs) always take the following form in event history: 6 | 7 | * \[Preceding Events\] (optional) 8 | * WFT Scheduled 9 | * WFT Started 10 | * WFT Completed 11 | * \[Commands\] (optional) 12 | 13 | In the typical case, the "logical" WFT consists of all the commands from the last workflow task, 14 | any events generated in the interrim, and the scheduled/started preamble. So: 15 | 16 | * WFT Completed 17 | * \[Commands\] (optional) 18 | * \[Events\] (optional) 19 | * WFT Scheduled 20 | * WFT Started 21 | 22 | Commands and events are both "optional" in the sense that: 23 | 24 | Workflow code, after being woken up, might not do anything, and thus generate no new commands 25 | 26 | There may be no events for more nuanced reasons: 27 | 28 | 1. The workflow might have been running a long-running local activity. In such cases, the workflow 29 | must "workflow task heartbeat" in order to avoid timing out the workflow task. This means 30 | completing the WFT with no commands while the LA is ongoing. 31 | 2. The workflow might have received an update, which does not come as an event in history, but 32 | rather as a "protocol message" attached to the task. 33 | 3. Server can forcibly generate a new WFT with some obscure APIs 34 | 35 | Core does not consider such empty WFT sequences as worthy of waking lang (on replay - as a new 36 | task, they always will), since nothing meaningful has happened. Thus, they are grouped together 37 | as part of a "logical" WFT with the last WFT that had any real work in it. 38 | 39 | ## Possible issues as of this writing (5/25) 40 | 41 | The "new WFT force-issued by server" case would, currently, not cause a wakeup on replay for the 42 | reasons discussed above. In some obscure edge cases (inspecting workflow clock) this could cause 43 | NDE. 44 | 45 | ### Possible solutions 46 | 47 | * Core can attach a flag on WFT completes in order to be explicit that that WFT may be skipped on 48 | replay. IE: During WFT heartbeating for LAs. 49 | * We could legislate that server should never send empty WFTs. Seemingly the only case of this 50 | is 51 | the [obscure api](https://github.com/temporalio/temporal/blob/d189737aa2ed1b07c221abb9fbdd28ecf68f0492/proto/internal/temporal/server/api/adminservice/v1/service.proto#L151) 52 | -------------------------------------------------------------------------------- /cargo-tokio-console.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export RUSTFLAGS="--cfg tokio_unstable" 4 | 5 | cargo "$1" --features "tokio-console" "${@:2}" 6 | -------------------------------------------------------------------------------- /client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "temporal-client" 3 | version = "0.1.0" 4 | edition = "2024" 5 | authors = ["Spencer Judge "] 6 | license-file = { workspace = true } 7 | description = "Clients for interacting with Temporal Clusters" 8 | homepage = "https://temporal.io/" 9 | repository = "https://github.com/temporalio/sdk-core" 10 | keywords = ["temporal", "workflow"] 11 | categories = ["development-tools"] 12 | 13 | [features] 14 | telemetry = ["dep:opentelemetry"] 15 | 16 | [dependencies] 17 | anyhow = "1.0" 18 | async-trait = "0.1" 19 | backoff = "0.4" 20 | base64 = "0.22" 21 | derive_builder = { workspace = true } 22 | derive_more = { workspace = true } 23 | futures-util = { version = "0.3", default-features = false } 24 | futures-retry = "0.6.0" 25 | http = "1.1.0" 26 | http-body-util = "0.1" 27 | hyper = { version = "1.4.1" } 28 | hyper-util = "0.1.6" 29 | opentelemetry = { workspace = true, features = ["metrics"], optional = true } 30 | parking_lot = "0.12" 31 | slotmap = "1.0" 32 | thiserror = { workspace = true } 33 | tokio = "1.1" 34 | tonic = { workspace = true, features = ["tls", "tls-roots"] } 35 | tower = { version = "0.5", features = ["util"] } 36 | tracing = "0.1" 37 | url = "2.2" 38 | uuid = { version = "1.1", features = ["v4"] } 39 | 40 | [dependencies.temporal-sdk-core-protos] 41 | path = "../sdk-core-protos" 42 | 43 | [dependencies.temporal-sdk-core-api] 44 | path = "../core-api" 45 | 46 | [dev-dependencies] 47 | assert_matches = "1" 48 | mockall = "0.13" 49 | rstest = "0.25" 50 | 51 | [lints] 52 | workspace = true 53 | -------------------------------------------------------------------------------- /client/src/proxy.rs: -------------------------------------------------------------------------------- 1 | use base64::prelude::*; 2 | use http_body_util::Empty; 3 | use hyper::{body::Bytes, header}; 4 | use hyper_util::{ 5 | client::legacy::Client, 6 | rt::{TokioExecutor, TokioIo}, 7 | }; 8 | use std::{ 9 | future::Future, 10 | pin::Pin, 11 | task::{Context, Poll}, 12 | }; 13 | use tokio::net::TcpStream; 14 | use tonic::transport::{Channel, Endpoint}; 15 | use tower::{Service, service_fn}; 16 | 17 | /// Options for HTTP CONNECT proxy. 18 | #[derive(Clone, Debug)] 19 | pub struct HttpConnectProxyOptions { 20 | /// The host:port to proxy through. 21 | pub target_addr: String, 22 | /// Optional HTTP basic auth for the proxy as user/pass tuple. 23 | pub basic_auth: Option<(String, String)>, 24 | } 25 | 26 | impl HttpConnectProxyOptions { 27 | /// Create a channel from the given endpoint that uses the HTTP CONNECT proxy. 28 | pub async fn connect_endpoint( 29 | &self, 30 | endpoint: &Endpoint, 31 | ) -> Result { 32 | let proxy_options = self.clone(); 33 | let svc_fn = service_fn(move |uri: tonic::transport::Uri| { 34 | let proxy_options = proxy_options.clone(); 35 | async move { proxy_options.connect(uri).await } 36 | }); 37 | endpoint.connect_with_connector(svc_fn).await 38 | } 39 | 40 | async fn connect( 41 | &self, 42 | uri: tonic::transport::Uri, 43 | ) -> anyhow::Result { 44 | debug!("Connecting to {} via proxy at {}", uri, self.target_addr); 45 | // Create CONNECT request 46 | let mut req_build = hyper::Request::builder().method("CONNECT").uri(uri); 47 | if let Some((user, pass)) = &self.basic_auth { 48 | let creds = BASE64_STANDARD.encode(format!("{}:{}", user, pass)); 49 | req_build = req_build.header(header::PROXY_AUTHORIZATION, format!("Basic {}", creds)); 50 | } 51 | let req = req_build.body(Empty::::new())?; 52 | 53 | // We have to create a client with a specific connector because Hyper is 54 | // not letting us change the HTTP/2 authority 55 | let client = Client::builder(TokioExecutor::new()) 56 | .build(OverrideAddrConnector(self.target_addr.clone())); 57 | 58 | // Send request 59 | let res = client.request(req).await?; 60 | if res.status().is_success() { 61 | Ok(hyper::upgrade::on(res).await?) 62 | } else { 63 | Err(anyhow::anyhow!( 64 | "CONNECT call failed with status: {}", 65 | res.status() 66 | )) 67 | } 68 | } 69 | } 70 | 71 | #[derive(Clone)] 72 | struct OverrideAddrConnector(String); 73 | 74 | impl Service for OverrideAddrConnector { 75 | type Response = TokioIo; 76 | 77 | type Error = anyhow::Error; 78 | 79 | type Future = Pin> + Send>>; 80 | 81 | fn poll_ready(&mut self, _ctx: &mut Context<'_>) -> Poll> { 82 | Poll::Ready(Ok(())) 83 | } 84 | 85 | fn call(&mut self, _uri: hyper::Uri) -> Self::Future { 86 | let target_addr = self.0.clone(); 87 | let fut = async move { Ok(TokioIo::new(TcpStream::connect(target_addr).await?)) }; 88 | Box::pin(fut) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /core-api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "temporal-sdk-core-api" 3 | version = "0.1.0" 4 | edition = "2024" 5 | authors = ["Spencer Judge "] 6 | license-file = { workspace = true } 7 | description = "Interface definitions for the Temporal Core SDK" 8 | homepage = "https://temporal.io/" 9 | repository = "https://github.com/temporalio/sdk-core" 10 | keywords = ["temporal", "workflow"] 11 | categories = ["development-tools"] 12 | 13 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 14 | 15 | [features] 16 | otel_impls = ["dep:opentelemetry"] 17 | 18 | [dependencies] 19 | async-trait = "0.1" 20 | derive_builder = { workspace = true } 21 | derive_more = { workspace = true } 22 | opentelemetry = { workspace = true, optional = true } 23 | prost = { workspace = true } 24 | serde_json = "1.0" 25 | thiserror = { workspace = true } 26 | tonic = { workspace = true } 27 | tracing-core = "0.1" 28 | url = "2.3" 29 | 30 | [dependencies.temporal-sdk-core-protos] 31 | path = "../sdk-core-protos" 32 | version = "0.1" 33 | 34 | [lints] 35 | workspace = true 36 | -------------------------------------------------------------------------------- /core-api/src/errors.rs: -------------------------------------------------------------------------------- 1 | //! Error types exposed by public APIs 2 | 3 | use temporal_sdk_core_protos::coresdk::activity_result::ActivityExecutionResult; 4 | 5 | /// Errors thrown by [crate::Worker::validate] 6 | #[derive(thiserror::Error, Debug)] 7 | pub enum WorkerValidationError { 8 | /// The namespace provided to the worker does not exist on the server. 9 | #[error("Namespace {namespace} was not found or otherwise could not be described: {source:?}")] 10 | NamespaceDescribeError { 11 | source: tonic::Status, 12 | namespace: String, 13 | }, 14 | } 15 | 16 | /// Errors thrown by [crate::Worker] polling methods 17 | #[derive(thiserror::Error, Debug)] 18 | pub enum PollError { 19 | /// [crate::Worker::shutdown] was called, and there are no more tasks to be handled from this 20 | /// poll function. Lang must call [crate::Worker::complete_workflow_activation], 21 | /// [crate::Worker::complete_activity_task], or 22 | /// [crate::Worker::complete_nexus_task] for any remaining tasks, and then may exit. 23 | #[error("Core is shut down and there are no more tasks of this kind")] 24 | ShutDown, 25 | /// Unhandled error when calling the temporal server. Core will attempt to retry any non-fatal 26 | /// errors, so lang should consider this fatal. 27 | #[error("Unhandled grpc error when polling: {0:?}")] 28 | TonicError(#[from] tonic::Status), 29 | } 30 | 31 | /// Errors thrown by [crate::Worker::complete_workflow_activation] 32 | #[derive(thiserror::Error, Debug)] 33 | #[allow(clippy::large_enum_variant)] 34 | pub enum CompleteWfError { 35 | /// Lang SDK sent us a malformed workflow completion. This likely means a bug in the lang sdk. 36 | #[error("Lang SDK sent us a malformed workflow completion for run ({run_id}): {reason}")] 37 | MalformedWorkflowCompletion { 38 | /// Reason the completion was malformed 39 | reason: String, 40 | /// The run associated with the completion 41 | run_id: String, 42 | }, 43 | } 44 | 45 | /// Errors thrown by [crate::Worker::complete_activity_task] 46 | #[derive(thiserror::Error, Debug)] 47 | pub enum CompleteActivityError { 48 | /// Lang SDK sent us a malformed activity completion. This likely means a bug in the lang sdk. 49 | #[error("Lang SDK sent us a malformed activity completion ({reason}): {completion:?}")] 50 | MalformedActivityCompletion { 51 | /// Reason the completion was malformed 52 | reason: String, 53 | /// The completion, which may not be included to avoid unnecessary copies. 54 | completion: Option, 55 | }, 56 | } 57 | 58 | /// Errors thrown by [crate::Worker::complete_nexus_task] 59 | #[derive(thiserror::Error, Debug)] 60 | pub enum CompleteNexusError { 61 | /// Lang SDK sent us a malformed nexus completion. This likely means a bug in the lang sdk. 62 | #[error("Lang SDK sent us a malformed nexus completion: {reason}")] 63 | MalformedNexusCompletion { 64 | /// Reason the completion was malformed 65 | reason: String, 66 | }, 67 | /// Nexus has not been enabled on this worker. If a user registers any Nexus handlers, the 68 | #[error("Nexus is not enabled on this worker")] 69 | NexusNotEnabled, 70 | } 71 | 72 | /// Errors we can encounter during workflow processing which we may treat as either WFT failures 73 | /// or whole-workflow failures depending on user preference. 74 | #[derive(Clone, Debug, Eq, PartialEq, Hash)] 75 | pub enum WorkflowErrorType { 76 | /// A nondeterminism error 77 | Nondeterminism, 78 | } 79 | -------------------------------------------------------------------------------- /core/benches/workflow_replay.rs: -------------------------------------------------------------------------------- 1 | use criterion::{Criterion, criterion_group, criterion_main}; 2 | use futures_util::StreamExt; 3 | use std::time::Duration; 4 | use temporal_sdk::{WfContext, WorkflowFunction}; 5 | use temporal_sdk_core::replay::HistoryForReplay; 6 | use temporal_sdk_core_protos::DEFAULT_WORKFLOW_TYPE; 7 | use temporal_sdk_core_test_utils::{canned_histories, replay_sdk_worker}; 8 | 9 | pub fn criterion_benchmark(c: &mut Criterion) { 10 | let tokio_runtime = tokio::runtime::Builder::new_current_thread() 11 | .enable_time() 12 | .build() 13 | .unwrap(); 14 | let _g = tokio_runtime.enter(); 15 | 16 | let num_timers = 10; 17 | let t = canned_histories::long_sequential_timers(num_timers as usize); 18 | let hist = HistoryForReplay::new( 19 | t.get_full_history_info().unwrap().into(), 20 | "whatever".to_string(), 21 | ); 22 | 23 | c.bench_function("Small history replay", |b| { 24 | b.iter(|| { 25 | tokio_runtime.block_on(async { 26 | let func = timers_wf(num_timers); 27 | let mut worker = replay_sdk_worker([hist.clone()]); 28 | worker.register_wf(DEFAULT_WORKFLOW_TYPE, func); 29 | worker.run().await.unwrap(); 30 | }) 31 | }) 32 | }); 33 | 34 | let num_tasks = 50; 35 | let t = canned_histories::lots_of_big_signals(num_tasks); 36 | let hist = HistoryForReplay::new( 37 | t.get_full_history_info().unwrap().into(), 38 | "whatever".to_string(), 39 | ); 40 | 41 | c.bench_function("Large payloads history replay", |b| { 42 | b.iter(|| { 43 | tokio_runtime.block_on(async { 44 | let func = big_signals_wf(num_tasks); 45 | let mut worker = replay_sdk_worker([hist.clone()]); 46 | worker.register_wf(DEFAULT_WORKFLOW_TYPE, func); 47 | worker.run().await.unwrap(); 48 | }) 49 | }) 50 | }); 51 | } 52 | 53 | criterion_group!(benches, criterion_benchmark); 54 | criterion_main!(benches); 55 | 56 | fn timers_wf(num_timers: u32) -> WorkflowFunction { 57 | WorkflowFunction::new(move |ctx: WfContext| async move { 58 | for _ in 1..=num_timers { 59 | ctx.timer(Duration::from_secs(1)).await; 60 | } 61 | Ok(().into()) 62 | }) 63 | } 64 | 65 | fn big_signals_wf(num_tasks: usize) -> WorkflowFunction { 66 | WorkflowFunction::new(move |ctx: WfContext| async move { 67 | let mut sigs = ctx.make_signal_channel("bigsig"); 68 | for _ in 1..=num_tasks { 69 | for _ in 1..=5 { 70 | let _ = sigs.next().await.unwrap(); 71 | } 72 | } 73 | 74 | Ok(().into()) 75 | }) 76 | } 77 | -------------------------------------------------------------------------------- /core/src/abstractions/take_cell.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::Mutex; 2 | use std::sync::atomic::{AtomicBool, Ordering}; 3 | 4 | /// Implements something a bit like a `OnceLock`, but starts already initialized and allows you 5 | /// to take everything out of it only once in a thread-safe way. This isn't optimized for super 6 | /// fast-path usage. 7 | pub(crate) struct TakeCell { 8 | taken: AtomicBool, 9 | data: Mutex>, 10 | } 11 | 12 | impl TakeCell { 13 | pub(crate) fn new(val: T) -> Self { 14 | Self { 15 | taken: AtomicBool::new(false), 16 | data: Mutex::new(Some(val)), 17 | } 18 | } 19 | 20 | /// If the cell has not already been taken from, takes the value and returns it 21 | pub(crate) fn take_once(&self) -> Option { 22 | if self.taken.load(Ordering::Acquire) { 23 | return None; 24 | } 25 | self.taken.store(true, Ordering::Release); 26 | self.data.lock().take() 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /core/src/core_tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod activity_tasks; 2 | mod child_workflows; 3 | mod determinism; 4 | mod local_activities; 5 | mod queries; 6 | mod replay_flag; 7 | mod updates; 8 | mod workers; 9 | mod workflow_cancels; 10 | mod workflow_tasks; 11 | 12 | use crate::{ 13 | Worker, 14 | errors::PollError, 15 | test_help::{MockPollCfg, build_mock_pollers, canned_histories, mock_worker, test_worker_cfg}, 16 | worker::client::mocks::{mock_manual_workflow_client, mock_workflow_client}, 17 | }; 18 | use futures_util::FutureExt; 19 | use std::{sync::LazyLock, time::Duration}; 20 | use temporal_sdk_core_api::{Worker as WorkerTrait, worker::PollerBehavior}; 21 | use temporal_sdk_core_protos::coresdk::workflow_completion::WorkflowActivationCompletion; 22 | use tokio::{sync::Barrier, time::sleep}; 23 | 24 | #[tokio::test] 25 | async fn after_shutdown_server_is_not_polled() { 26 | let t = canned_histories::single_timer("fake_timer"); 27 | let mh = MockPollCfg::from_resp_batches("fake_wf_id", t, [1], mock_workflow_client()); 28 | let mut mock = build_mock_pollers(mh); 29 | // Just so we don't have to deal w/ cache overflow 30 | mock.worker_cfg(|cfg| cfg.max_cached_workflows = 1); 31 | let worker = mock_worker(mock); 32 | 33 | let res = worker.poll_workflow_activation().await.unwrap(); 34 | assert_eq!(res.jobs.len(), 1); 35 | worker 36 | .complete_workflow_activation(WorkflowActivationCompletion::empty(res.run_id)) 37 | .await 38 | .unwrap(); 39 | worker.shutdown().await; 40 | assert_matches!( 41 | worker.poll_workflow_activation().await.unwrap_err(), 42 | PollError::ShutDown 43 | ); 44 | worker.finalize_shutdown().await; 45 | } 46 | 47 | // Better than cloning a billion arcs... 48 | static BARR: LazyLock = LazyLock::new(|| Barrier::new(3)); 49 | 50 | #[tokio::test] 51 | async fn shutdown_interrupts_both_polls() { 52 | let mut mock_client = mock_manual_workflow_client(); 53 | mock_client 54 | .expect_poll_activity_task() 55 | .times(1) 56 | .returning(move |_, _| { 57 | async move { 58 | BARR.wait().await; 59 | sleep(Duration::from_secs(1)).await; 60 | Ok(Default::default()) 61 | } 62 | .boxed() 63 | }); 64 | mock_client 65 | .expect_poll_workflow_task() 66 | .times(1) 67 | .returning(move |_, _| { 68 | async move { 69 | BARR.wait().await; 70 | sleep(Duration::from_secs(1)).await; 71 | Ok(Default::default()) 72 | } 73 | .boxed() 74 | }); 75 | 76 | let worker = Worker::new_test( 77 | test_worker_cfg() 78 | // Need only 1 concurrent pollers for mock expectations to work here 79 | .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)) 80 | .activity_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)) 81 | .build() 82 | .unwrap(), 83 | mock_client, 84 | ); 85 | tokio::join! { 86 | async { 87 | assert_matches!(worker.poll_activity_task().await.unwrap_err(), 88 | PollError::ShutDown); 89 | }, 90 | async { 91 | assert_matches!(worker.poll_workflow_activation().await.unwrap_err(), 92 | PollError::ShutDown); 93 | }, 94 | async { 95 | // Give polling a bit to get stuck, then shutdown 96 | BARR.wait().await; 97 | worker.shutdown().await; 98 | } 99 | }; 100 | } 101 | -------------------------------------------------------------------------------- /core/src/debug_client.rs: -------------------------------------------------------------------------------- 1 | //! Defines an http client that is used for the VSCode debug plugin and any other associated 2 | //! machinery. 3 | 4 | use anyhow::Context; 5 | use hyper::http::HeaderValue; 6 | use prost::Message; 7 | use reqwest::{self, header::HeaderMap}; 8 | use std::time::Duration; 9 | use temporal_sdk_core_protos::temporal::api::history::v1::History; 10 | use url::Url; 11 | 12 | /// A client for interacting with the VSCode debug plugin 13 | #[derive(Clone)] 14 | pub struct DebugClient { 15 | /// URL for the local instance of the debugger server 16 | debugger_url: Url, 17 | client: reqwest::Client, 18 | } 19 | 20 | #[derive(Clone, serde::Serialize)] 21 | struct WFTStartedMsg { 22 | event_id: i64, 23 | } 24 | 25 | impl DebugClient { 26 | /// Create a new instance of a DebugClient with the specified url and client name/version 27 | /// strings. 28 | pub fn new( 29 | url: String, 30 | client_name: &str, 31 | client_version: &str, 32 | ) -> Result { 33 | let mut client = reqwest::ClientBuilder::new(); 34 | client = client.default_headers({ 35 | let mut hm = HeaderMap::new(); 36 | hm.insert("temporal-client-name", HeaderValue::from_str(client_name)?); 37 | hm.insert( 38 | "temporal-client-version", 39 | HeaderValue::from_str(client_version)?, 40 | ); 41 | hm 42 | }); 43 | let client = client.build()?; 44 | Ok(DebugClient { 45 | debugger_url: Url::parse(&url).context( 46 | "debugger url malformed, is the TEMPORAL_DEBUGGER_PLUGIN_URL env var correct?", 47 | )?, 48 | client, 49 | }) 50 | } 51 | 52 | /// Get the history from the instance of the debug plugin server 53 | pub async fn get_history(&self) -> Result { 54 | let url = self.debugger_url.join("history")?; 55 | let resp = self.client.get(url).send().await?; 56 | 57 | let bytes = resp.bytes().await?; 58 | Ok(History::decode(bytes)?) 59 | } 60 | 61 | /// Post to current-wft-started to tell the debug plugin which event we've most recently made it 62 | /// to 63 | pub async fn post_wft_started( 64 | &self, 65 | event_id: &i64, 66 | ) -> Result { 67 | let url = self.debugger_url.join("current-wft-started")?; 68 | Ok(self 69 | .client 70 | .get(url) 71 | .timeout(Duration::from_secs(5)) 72 | .json(&WFTStartedMsg { 73 | event_id: *event_id, 74 | }) 75 | .send() 76 | .await?) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /core/src/telemetry/prometheus_server.rs: -------------------------------------------------------------------------------- 1 | use http_body_util::Full; 2 | use hyper::{Method, Request, Response, body::Bytes, header::CONTENT_TYPE, service::service_fn}; 3 | use hyper_util::{ 4 | rt::{TokioExecutor, TokioIo}, 5 | server::conn::auto, 6 | }; 7 | use opentelemetry_prometheus::PrometheusExporter; 8 | use prometheus::{Encoder, Registry, TextEncoder}; 9 | use std::net::{SocketAddr, TcpListener}; 10 | use temporal_sdk_core_api::telemetry::PrometheusExporterOptions; 11 | use tokio::io; 12 | 13 | /// Exposes prometheus metrics for scraping 14 | pub(super) struct PromServer { 15 | listener: TcpListener, 16 | registry: Registry, 17 | } 18 | 19 | impl PromServer { 20 | pub(super) fn new( 21 | opts: &PrometheusExporterOptions, 22 | ) -> Result<(Self, PrometheusExporter), anyhow::Error> { 23 | let registry = Registry::new(); 24 | let exporter = opentelemetry_prometheus::exporter() 25 | .without_scope_info() 26 | .with_registry(registry.clone()); 27 | let exporter = if !opts.counters_total_suffix { 28 | exporter.without_counter_suffixes() 29 | } else { 30 | exporter 31 | }; 32 | let exporter = if !opts.unit_suffix { 33 | exporter.without_units() 34 | } else { 35 | exporter 36 | }; 37 | Ok(( 38 | Self { 39 | listener: TcpListener::bind(opts.socket_addr)?, 40 | registry, 41 | }, 42 | exporter.build()?, 43 | )) 44 | } 45 | 46 | pub(super) async fn run(self) -> Result<(), anyhow::Error> { 47 | // Spin up hyper server to serve metrics for scraping. We use hyper since we already depend 48 | // on it via Tonic. 49 | self.listener.set_nonblocking(true)?; 50 | let listener = tokio::net::TcpListener::from_std(self.listener)?; 51 | loop { 52 | let (stream, _) = listener.accept().await?; 53 | let io = TokioIo::new(stream); 54 | let regclone = self.registry.clone(); 55 | tokio::task::spawn(async move { 56 | let server = auto::Builder::new(TokioExecutor::new()); 57 | if let Err(e) = server 58 | .serve_connection( 59 | io, 60 | service_fn(move |req| metrics_req(req, regclone.clone())), 61 | ) 62 | .await 63 | { 64 | warn!("Error serving metrics connection: {:?}", e); 65 | } 66 | }); 67 | } 68 | } 69 | 70 | pub(super) fn bound_addr(&self) -> io::Result { 71 | self.listener.local_addr() 72 | } 73 | } 74 | 75 | /// Serves prometheus metrics in the expected format for scraping 76 | async fn metrics_req( 77 | req: Request, 78 | registry: Registry, 79 | ) -> Result>, hyper::Error> { 80 | let response = match (req.method(), req.uri().path()) { 81 | (&Method::GET, "/metrics") => { 82 | let mut buffer = vec![]; 83 | let encoder = TextEncoder::new(); 84 | let metric_families = registry.gather(); 85 | encoder.encode(&metric_families, &mut buffer).unwrap(); 86 | 87 | Response::builder() 88 | .status(200) 89 | .header(CONTENT_TYPE, encoder.format_type()) 90 | .body(buffer.into()) 91 | .unwrap() 92 | } 93 | _ => Response::builder() 94 | .status(404) 95 | .body(vec![].into()) 96 | .expect("Can't fail to construct empty resp"), 97 | }; 98 | Ok(response) 99 | } 100 | -------------------------------------------------------------------------------- /core/src/worker/slot_supplier.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /core/src/worker/tuner/fixed_size.rs: -------------------------------------------------------------------------------- 1 | use std::{marker::PhantomData, sync::Arc}; 2 | use temporal_sdk_core_api::worker::{ 3 | SlotKind, SlotMarkUsedContext, SlotReleaseContext, SlotReservationContext, SlotSupplier, 4 | SlotSupplierPermit, 5 | }; 6 | use tokio::sync::Semaphore; 7 | 8 | /// Implements [SlotSupplier] with a fixed number of slots 9 | pub struct FixedSizeSlotSupplier { 10 | sem: Arc, 11 | _pd: PhantomData, 12 | } 13 | 14 | impl FixedSizeSlotSupplier { 15 | /// Create a slot supplier which will only hand out at most the provided number of slots 16 | pub fn new(size: usize) -> Self { 17 | Self { 18 | sem: Arc::new(Semaphore::new(size)), 19 | _pd: Default::default(), 20 | } 21 | } 22 | } 23 | 24 | #[async_trait::async_trait] 25 | impl SlotSupplier for FixedSizeSlotSupplier 26 | where 27 | SK: SlotKind + Send + Sync, 28 | { 29 | type SlotKind = SK; 30 | 31 | async fn reserve_slot(&self, _: &dyn SlotReservationContext) -> SlotSupplierPermit { 32 | let perm = self 33 | .sem 34 | .clone() 35 | .acquire_owned() 36 | .await 37 | .expect("inner semaphore is never closed"); 38 | SlotSupplierPermit::with_user_data(perm) 39 | } 40 | 41 | fn try_reserve_slot(&self, _: &dyn SlotReservationContext) -> Option { 42 | let perm = self.sem.clone().try_acquire_owned(); 43 | perm.ok().map(SlotSupplierPermit::with_user_data) 44 | } 45 | 46 | fn mark_slot_used(&self, _ctx: &dyn SlotMarkUsedContext) {} 47 | 48 | fn release_slot(&self, _ctx: &dyn SlotReleaseContext) {} 49 | 50 | fn available_slots(&self) -> Option { 51 | Some(self.sem.available_permits()) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /docker/docker-compose-ci.yaml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | services: 4 | otel-collector: 5 | image: otel/opentelemetry-collector:latest 6 | command: [ '--config=/etc/otel-collector-ci.yaml' ] 7 | volumes: 8 | - ../etc/otel-collector-ci.yaml:/etc/otel-collector-ci.yaml 9 | ports: 10 | # - "1888:1888" # pprof extension 11 | # It's useful to be able to manually inspect metrics during dev 12 | - '8888:8888' # Prometheus metrics exposed by the collector 13 | - '8889:8889' # Prometheus exporter metrics 14 | # - "13133:13133" # health_check extension 15 | - '4317:4317' # OTLP gRPC receiver 16 | - '4318:4318' # OTLP HTTP receiver 17 | # - "55679:55679" # zpages extension 18 | 19 | prometheus: 20 | container_name: prometheus 21 | image: prom/prometheus:latest 22 | volumes: 23 | - ../etc/prometheus.yaml:/etc/prometheus/prometheus.yml 24 | ports: 25 | - '9090:9090' 26 | -------------------------------------------------------------------------------- /docker/docker-compose-telem.yaml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | services: 4 | jaeger: 5 | image: jaegertracing/all-in-one:latest 6 | ports: 7 | - '16686:16686' 8 | - '14268' 9 | - '14250' 10 | 11 | otel-collector: 12 | image: otel/opentelemetry-collector:latest 13 | command: [ '--config=/etc/otel-collector-config.yaml' ] 14 | volumes: 15 | - ../etc/otel-collector-config.yaml:/etc/otel-collector-config.yaml 16 | ports: 17 | # - "1888:1888" # pprof extension 18 | # It's useful to be able to manually inspect metrics during dev 19 | - '8888:8888' # Prometheus metrics exposed by the collector 20 | - '8889:8889' # Prometheus exporter metrics 21 | # - "13133:13133" # health_check extension 22 | - '4317:4317' # OTLP gRPC receiver 23 | # - "55679:55679" # zpages extension 24 | depends_on: 25 | - jaeger 26 | 27 | prometheus: 28 | network_mode: host 29 | container_name: prometheus 30 | image: prom/prometheus:latest 31 | volumes: 32 | - ../etc/prometheus.yaml:/etc/prometheus/prometheus.yml 33 | ports: 34 | - '9090:9090' 35 | -------------------------------------------------------------------------------- /docker/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | services: 4 | cassandra: 5 | image: cassandra:3.11 6 | logging: 7 | driver: none 8 | # ports: 9 | # - '9042:9042' 10 | 11 | temporal: 12 | image: temporalio/auto-setup:1.22.0 13 | ports: 14 | - "7233:7233" 15 | - "7234:7234" 16 | # - "7235:7235" 17 | # - "7239:7239" 18 | # - "6933:6933" 19 | # - "6934:6934" 20 | # - "6935:6935" 21 | # - "6939:6939" 22 | environment: 23 | - "CASSANDRA_SEEDS=cassandra" 24 | - "DYNAMIC_CONFIG_FILE_PATH=/etc/dynamic-config.yaml" 25 | volumes: 26 | - ../../etc/dynamic-config.yaml:/etc/dynamic-config.yaml 27 | depends_on: 28 | - cassandra 29 | 30 | temporal-web: 31 | image: temporalio/ui:2.8.0 32 | logging: 33 | driver: none 34 | ports: 35 | - "8080:8080" 36 | environment: 37 | - TEMPORAL_ADDRESS=temporal:7233 38 | - TEMPORAL_CORS_ORIGINS=http://localhost:3000 39 | depends_on: 40 | - temporal 41 | -------------------------------------------------------------------------------- /etc/dynamic-config.yaml: -------------------------------------------------------------------------------- 1 | system.enableActivityLocalDispatch: 2 | - value: true 3 | system.enableEagerWorkflowStart: 4 | - value: true 5 | frontend.workerVersioningWorkflowAPIs: 6 | - value: true 7 | frontend.workerVersioningDataAPIs: 8 | - value: true 9 | frontend.enableUpdateWorkflowExecution: 10 | - value: true 11 | frontend.enableUpdateWorkflowExecutionAsyncAccepted: 12 | - value: true 13 | -------------------------------------------------------------------------------- /etc/otel-collector-ci.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | endpoint: 0.0.0.0:4317 6 | http: 7 | endpoint: 0.0.0.0:4318 8 | 9 | exporters: 10 | prometheus: 11 | endpoint: '0.0.0.0:8889' 12 | namespace: temporal_sdk 13 | 14 | debug: 15 | 16 | processors: 17 | batch: 18 | 19 | extensions: 20 | health_check: 21 | pprof: 22 | endpoint: :1888 23 | zpages: 24 | endpoint: :55679 25 | 26 | service: 27 | extensions: [ pprof, zpages, health_check ] 28 | pipelines: 29 | traces: 30 | receivers: [ otlp ] 31 | processors: [ batch ] 32 | exporters: [ debug, ] 33 | metrics: 34 | receivers: [ otlp ] 35 | processors: [ batch ] 36 | exporters: [ debug, prometheus ] 37 | -------------------------------------------------------------------------------- /etc/otel-collector-config.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | endpoint: 0.0.0.0:4317 6 | http: 7 | endpoint: 0.0.0.0:4318 8 | 9 | exporters: 10 | prometheus: 11 | endpoint: '0.0.0.0:8889' 12 | namespace: temporal_sdk 13 | 14 | debug: 15 | 16 | otlp/jaeger: 17 | endpoint: jaeger:14250 18 | 19 | processors: 20 | batch: 21 | 22 | extensions: 23 | health_check: 24 | pprof: 25 | endpoint: :1888 26 | zpages: 27 | endpoint: :55679 28 | 29 | service: 30 | extensions: [ pprof, zpages, health_check ] 31 | pipelines: 32 | traces: 33 | receivers: [ otlp ] 34 | processors: [ batch ] 35 | exporters: [ debug, otlp/jaeger ] 36 | metrics: 37 | receivers: [ otlp ] 38 | processors: [ batch ] 39 | exporters: [ debug, prometheus ] 40 | -------------------------------------------------------------------------------- /etc/prometheus.yaml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: 'otel-collector' 3 | scrape_interval: 1s 4 | static_configs: 5 | - targets: [ 'otel-collector:8889' ] 6 | - targets: [ 'otel-collector:8888' ] 7 | - job_name: 'direct-prom' 8 | scrape_interval: 1s 9 | static_configs: 10 | - targets: [ 'localhost:9999' ] 11 | -------------------------------------------------------------------------------- /etc/regen-depgraph.sh: -------------------------------------------------------------------------------- 1 | # Run this from the repo root 2 | cargo depgraph \ 3 | --focus temporal-sdk,temporal-sdk-core-protos,temporal-client,temporal-sdk-core-api,temporal-sdk-core,rustfsm \ 4 | --dev-deps \ 5 | | dot -Tsvg > etc/deps.svg -------------------------------------------------------------------------------- /fsm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rustfsm" 3 | version = "0.1.0" 4 | authors = ["Spencer Judge "] 5 | edition = "2024" 6 | license-file = "LICENSE.txt" 7 | description = "Define state machines that can accept events and produce commands" 8 | homepage = "https://temporal.io/" 9 | repository = "https://github.com/temporalio/sdk-core" 10 | keywords = ["state-machine", "fsm"] 11 | categories = ["data-structures"] 12 | 13 | [dependencies] 14 | rustfsm_procmacro = { version = "0.1", path = "rustfsm_procmacro" } 15 | rustfsm_trait = { version = "0.1", path = "rustfsm_trait" } 16 | 17 | [package.metadata.workspaces] 18 | independent = true 19 | 20 | [lints] 21 | workspace = true 22 | -------------------------------------------------------------------------------- /fsm/LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2021 Temporal Technologies, Inc. All Rights Reserved 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /fsm/README.md: -------------------------------------------------------------------------------- 1 | A procmacro and trait for implementing state machines in Rust 2 | 3 | We should move this to it's own repo once we're done iterating. -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rustfsm_procmacro" 3 | version = "0.1.0" 4 | authors = ["Spencer Judge "] 5 | edition = "2024" 6 | license-file = "LICENSE.txt" 7 | description = "Procmacro sub-crate of the `rustfsm` crate" 8 | 9 | [lib] 10 | proc-macro = true 11 | 12 | [[test]] 13 | name = "tests" 14 | path = "tests/progress.rs" 15 | 16 | [dependencies] 17 | derive_more = { workspace = true } 18 | proc-macro2 = "1.0" 19 | syn = { version = "2.0", features = ["default", "extra-traits"] } 20 | quote = "1.0" 21 | rustfsm_trait = { version = "0.1", path = "../rustfsm_trait" } 22 | 23 | [dev-dependencies] 24 | trybuild = { version = "1.0", features = ["diff"] } 25 | 26 | [package.metadata.workspaces] 27 | independent = true 28 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2021 Temporal Technologies, Inc. All Rights Reserved 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/progress.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | #[test] 4 | fn tests() { 5 | let t = trybuild::TestCases::new(); 6 | t.pass("tests/trybuild/*_pass.rs"); 7 | t.compile_fail("tests/trybuild/*_fail.rs"); 8 | } 9 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | use rustfsm_procmacro::fsm; 4 | 5 | fsm! { 6 | name SimpleMachine; command SimpleMachineCommand; error Infallible; 7 | 8 | One --(A)--> Two; 9 | One --(A)--> Two; 10 | } 11 | 12 | #[derive(Default, Clone)] 13 | pub struct One {} 14 | 15 | #[derive(Default, Clone)] 16 | pub struct Two {} 17 | 18 | fn main() {} 19 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/dupe_transitions_fail.stderr: -------------------------------------------------------------------------------- 1 | error: Duplicate transitions are not allowed! 2 | --> $DIR/dupe_transitions_fail.rs:5:1 3 | | 4 | 5 | / fsm! { 5 | 6 | | name SimpleMachine; command SimpleMachineCommand; error Infallible; 6 | 7 | | 7 | 8 | | One --(A)--> Two; 8 | 9 | | One --(A)--> Two; 9 | 10 | | } 10 | | |_^ 11 | | 12 | = note: this error originates in the macro `fsm` (in Nightly builds, run with -Z macro-backtrace for more info) 13 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/dynamic_dest_pass.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | extern crate rustfsm_trait as rustfsm; 4 | 5 | use rustfsm_procmacro::fsm; 6 | use rustfsm_trait::TransitionResult; 7 | use std::convert::Infallible; 8 | 9 | fsm! { 10 | name SimpleMachine; command SimpleMachineCommand; error Infallible; 11 | 12 | One --(A(String), foo)--> Two; 13 | One --(A(String), foo)--> Three; 14 | 15 | Two --(B(String), bar)--> One; 16 | Two --(B(String), bar)--> Two; 17 | Two --(B(String), bar)--> Three; 18 | } 19 | 20 | #[derive(Default, Clone)] 21 | pub struct One {} 22 | impl One { 23 | fn foo(self, _: String) -> SimpleMachineTransition { 24 | TransitionResult::ok(vec![], Two {}.into()) 25 | } 26 | } 27 | 28 | #[derive(Default, Clone)] 29 | pub struct Two {} 30 | impl Two { 31 | fn bar(self, _: String) -> SimpleMachineTransition { 32 | TransitionResult::ok(vec![], Three {}.into()) 33 | } 34 | } 35 | 36 | #[derive(Default, Clone)] 37 | pub struct Three {} 38 | 39 | pub enum SimpleMachineCommand {} 40 | 41 | fn main() {} 42 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | use rustfsm_procmacro::fsm; 4 | 5 | fsm! { 6 | One --(A)--> Two 7 | } 8 | 9 | #[derive(Default, Clone)] 10 | pub struct One {} 11 | #[derive(Default, Clone)] 12 | pub struct Two {} 13 | 14 | fn main() {} 15 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/forgot_name_fail.stderr: -------------------------------------------------------------------------------- 1 | error: expected `name` 2 | --> $DIR/forgot_name_fail.rs:6:5 3 | | 4 | 6 | One --(A)--> Two 5 | | ^^^ 6 | 7 | error: The fsm definition should begin with `name MachineName; command CommandType; error ErrorType;` optionally followed by `shared_state SharedStateType;` 8 | --> $DIR/forgot_name_fail.rs:6:5 9 | | 10 | 6 | One --(A)--> Two 11 | | ^^^ 12 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/handler_arg_pass.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | use rustfsm_procmacro::fsm; 4 | use rustfsm_trait::TransitionResult; 5 | use std::convert::Infallible; 6 | 7 | fsm! { 8 | name Simple; command SimpleCommand; error Infallible; 9 | 10 | One --(A(String), on_a)--> Two 11 | } 12 | 13 | #[derive(Default, Clone)] 14 | pub struct One {} 15 | impl One { 16 | fn on_a(self, _: String) -> SimpleTransition { 17 | SimpleTransition::ok(vec![], Two {}) 18 | } 19 | } 20 | 21 | #[derive(Default, Clone)] 22 | pub struct Two {} 23 | 24 | pub enum SimpleCommand {} 25 | 26 | fn main() { 27 | // state enum exists with both states 28 | let _ = SimpleState::One(One {}); 29 | let _ = SimpleState::Two(Two {}); 30 | // Avoid dead code warning 31 | let _ = SimpleEvents::A("yo".to_owned()); 32 | } 33 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/handler_pass.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | use rustfsm_procmacro::fsm; 4 | use rustfsm_trait::TransitionResult; 5 | use std::convert::Infallible; 6 | 7 | fsm! { 8 | name Simple; command SimpleCommand; error Infallible; 9 | 10 | One --(A, on_a)--> Two 11 | } 12 | 13 | #[derive(Default, Clone)] 14 | pub struct One {} 15 | impl One { 16 | fn on_a(self) -> SimpleTransition { 17 | SimpleTransition::ok(vec![], Two {}) 18 | } 19 | } 20 | 21 | #[derive(Default, Clone)] 22 | pub struct Two {} 23 | 24 | pub enum SimpleCommand {} 25 | 26 | fn main() { 27 | // state enum exists with both states 28 | let _ = SimpleState::One(One {}); 29 | let _ = SimpleState::Two(Two {}); 30 | let _ = SimpleEvents::A; 31 | } 32 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/medium_complex_pass.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | extern crate rustfsm_trait as rustfsm; 4 | 5 | use rustfsm_procmacro::fsm; 6 | use rustfsm_trait::TransitionResult; 7 | use std::convert::Infallible; 8 | 9 | fsm! { 10 | name SimpleMachine; command SimpleMachineCommand; error Infallible; 11 | 12 | One --(A(String), foo)--> Two; 13 | One --(B)--> Two; 14 | Two --(B)--> One; 15 | Two --(C, baz)--> One 16 | } 17 | 18 | #[derive(Default, Clone)] 19 | pub struct One {} 20 | impl One { 21 | fn foo(self, _: String) -> SimpleMachineTransition { 22 | TransitionResult::default() 23 | } 24 | } 25 | impl From for One { 26 | fn from(_: Two) -> Self { 27 | One {} 28 | } 29 | } 30 | 31 | #[derive(Default, Clone)] 32 | pub struct Two {} 33 | impl Two { 34 | fn baz(self) -> SimpleMachineTransition { 35 | TransitionResult::default() 36 | } 37 | } 38 | impl From for Two { 39 | fn from(_: One) -> Self { 40 | Two {} 41 | } 42 | } 43 | 44 | pub enum SimpleMachineCommand {} 45 | 46 | fn main() {} 47 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | use rustfsm_procmacro::fsm; 4 | use rustfsm_trait::TransitionResult; 5 | use std::convert::Infallible; 6 | 7 | fsm! { 8 | name SimpleMachine; command SimpleMachineCommand; error Infallible; 9 | 10 | One --(A)--> Two; 11 | Two --(B)--> One; 12 | } 13 | 14 | #[derive(Default, Clone)] 15 | pub struct One {} 16 | 17 | #[derive(Default, Clone)] 18 | pub struct Two {} 19 | // We implement one of them because trait bound satisfaction error output is not deterministically 20 | // ordered 21 | impl From for Two { 22 | fn from(_: One) -> Self { 23 | Two {} 24 | } 25 | } 26 | 27 | enum SimpleMachineCommand {} 28 | 29 | fn main() {} 30 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/no_handle_conversions_require_into_fail.stderr: -------------------------------------------------------------------------------- 1 | error[E0277]: the trait bound `One: From` is not satisfied 2 | --> tests/trybuild/no_handle_conversions_require_into_fail.rs:11:5 3 | | 4 | 11 | Two --(B)--> One; 5 | | ^^^ the trait `From` is not implemented for `One` 6 | | 7 | = note: required for `Two` to implement `Into` 8 | note: required by a bound in `TransitionResult::::from` 9 | --> $WORKSPACE/fsm/rustfsm_trait/src/lib.rs 10 | | 11 | | pub fn from(current_state: CurrentState) -> Self 12 | | ---- required by a bound in this associated function 13 | | where 14 | | CurrentState: Into, 15 | | ^^^^^^^^ required by this bound in `TransitionResult::::from` 16 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/simple_pass.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | use rustfsm_procmacro::fsm; 4 | use rustfsm_trait::TransitionResult; 5 | use std::convert::Infallible; 6 | 7 | fsm! { 8 | name SimpleMachine; command SimpleMachineCommand; error Infallible; 9 | 10 | One --(A)--> Two 11 | } 12 | 13 | #[derive(Default, Clone)] 14 | pub struct One {} 15 | 16 | #[derive(Default, Clone)] 17 | pub struct Two {} 18 | impl From for Two { 19 | fn from(_: One) -> Self { 20 | Two {} 21 | } 22 | } 23 | 24 | pub enum SimpleMachineCommand {} 25 | 26 | fn main() { 27 | // state enum exists with both states 28 | let _ = SimpleMachineState::One(One {}); 29 | let _ = SimpleMachineState::Two(Two {}); 30 | // Event enum exists 31 | let _ = SimpleMachineEvents::A; 32 | } 33 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | use rustfsm_procmacro::fsm; 4 | 5 | fsm! { 6 | name Simple; command SimpleCommand; error Infallible; 7 | 8 | One --(A{foo: String}, on_a)--> Two 9 | } 10 | 11 | #[derive(Default, Clone)] 12 | pub struct One {} 13 | #[derive(Default, Clone)] 14 | pub struct Two {} 15 | 16 | pub enum SimpleCommand {} 17 | 18 | fn main() {} 19 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/struct_event_variant_fail.stderr: -------------------------------------------------------------------------------- 1 | error: Struct variants are not supported for events 2 | --> $DIR/struct_event_variant_fail.rs:8:12 3 | | 4 | 8 | One --(A{foo: String}, on_a)--> Two 5 | | ^ 6 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | use rustfsm_procmacro::fsm; 4 | 5 | fsm! { 6 | name Simple; command SimpleCmd; error Infallible; 7 | 8 | One --(A(Foo, Bar), on_a)--> Two 9 | } 10 | 11 | fn main() {} 12 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/tuple_more_item_event_variant_fail.stderr: -------------------------------------------------------------------------------- 1 | error: Only tuple variants with exactly one item are supported for events 2 | --> $DIR/tuple_more_item_event_variant_fail.rs:8:12 3 | | 4 | 8 | One --(A(Foo, Bar), on_a)--> Two 5 | | ^ 6 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.rs: -------------------------------------------------------------------------------- 1 | extern crate rustfsm_trait as rustfsm; 2 | 3 | use rustfsm_procmacro::fsm; 4 | 5 | fsm! { 6 | name Simple; command SimpleCmd; error Infallible; 7 | 8 | One --(A(), on_a)--> Two 9 | } 10 | 11 | fn main() {} 12 | -------------------------------------------------------------------------------- /fsm/rustfsm_procmacro/tests/trybuild/tuple_zero_item_event_variant_fail.stderr: -------------------------------------------------------------------------------- 1 | error: Only tuple variants with exactly one item are supported for events 2 | --> $DIR/tuple_zero_item_event_variant_fail.rs:8:12 3 | | 4 | 8 | One --(A(), on_a)--> Two 5 | | ^ 6 | -------------------------------------------------------------------------------- /fsm/rustfsm_trait/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rustfsm_trait" 3 | version = "0.1.0" 4 | authors = ["Spencer Judge "] 5 | edition = "2024" 6 | license-file = "LICENSE.txt" 7 | description = "Trait sub-crate of the `rustfsm` crate" 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | 13 | [package.metadata.workspaces] 14 | independent = true 15 | -------------------------------------------------------------------------------- /fsm/rustfsm_trait/LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2021 Temporal Technologies, Inc. All Rights Reserved 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /fsm/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub use rustfsm_procmacro::fsm; 2 | pub use rustfsm_trait::{MachineError, StateMachine, TransitionResult}; 3 | -------------------------------------------------------------------------------- /histories/ends_empty_wft_complete.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/ends_empty_wft_complete.bin -------------------------------------------------------------------------------- /histories/evict_while_la_running_no_interference-16_history.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/evict_while_la_running_no_interference-16_history.bin -------------------------------------------------------------------------------- /histories/evict_while_la_running_no_interference-23_history.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/evict_while_la_running_no_interference-23_history.bin -------------------------------------------------------------------------------- /histories/evict_while_la_running_no_interference-85_history.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/evict_while_la_running_no_interference-85_history.bin -------------------------------------------------------------------------------- /histories/fail_wf_task.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/fail_wf_task.bin -------------------------------------------------------------------------------- /histories/long_local_activity_with_update-0_history.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/long_local_activity_with_update-0_history.bin -------------------------------------------------------------------------------- /histories/long_local_activity_with_update-1_history.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/long_local_activity_with_update-1_history.bin -------------------------------------------------------------------------------- /histories/long_local_activity_with_update-2_history.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/long_local_activity_with_update-2_history.bin -------------------------------------------------------------------------------- /histories/long_local_activity_with_update-3_history.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/long_local_activity_with_update-3_history.bin -------------------------------------------------------------------------------- /histories/old_change_marker_format.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/old_change_marker_format.bin -------------------------------------------------------------------------------- /histories/timer_workflow_history.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/temporalio/sdk-core/8913cdb27d18f2297fa3f76cc10911874bf89df2/histories/timer_workflow_history.bin -------------------------------------------------------------------------------- /integ-with-otel.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Run integ tests with OTel collector export enabled 4 | export TEMPORAL_INTEG_OTEL_URL="grpc://localhost:4317" 5 | export TEMPORAL_TRACING_FILTER="temporal_sdk_core=DEBUG" 6 | 7 | cargo integ-test "${@:1}" 8 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | imports_granularity="Crate" -------------------------------------------------------------------------------- /sdk-core-protos/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "temporal-sdk-core-protos" 3 | version = "0.1.0" 4 | edition = "2024" 5 | authors = ["Spencer Judge "] 6 | license-file = { workspace = true } 7 | description = "Protobuf definitions for Temporal SDKs Core/Lang interface" 8 | homepage = "https://temporal.io/" 9 | repository = "https://github.com/temporalio/sdk-core" 10 | keywords = ["temporal", "workflow"] 11 | categories = ["development-tools"] 12 | 13 | [features] 14 | history_builders = ["uuid", "rand"] 15 | serde_serialize = [] 16 | 17 | [dependencies] 18 | anyhow = "1.0" 19 | base64 = "0.22" 20 | derive_more = { workspace = true } 21 | prost = { workspace = true } 22 | prost-wkt = "0.6" 23 | prost-wkt-types = "0.6" 24 | rand = { version = "0.9", optional = true } 25 | serde = { version = "1.0", features = ["derive"] } 26 | serde_json = "1.0" 27 | thiserror = { workspace = true } 28 | tonic = { workspace = true } 29 | uuid = { version = "1.1", features = ["v4"], optional = true } 30 | 31 | [build-dependencies] 32 | tonic-build = { workspace = true } 33 | prost-build = "0.13" 34 | prost-wkt-build = "0.6" 35 | 36 | [lints] 37 | workspace = true 38 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: Build 2 | on: push 3 | jobs: 4 | build: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v4 8 | - uses: actions/setup-go@v4 9 | with: 10 | go-version: '1.21' 11 | check-latest: true 12 | - name: Install Protoc 13 | uses: arduino/setup-protoc@v2 14 | - name: Validate and build go bindings from the proto files 15 | run: make ci-build 16 | env: 17 | BUF_INPUT_HTTPS_USERNAME: ${{github.actor}} 18 | BUF_INPUT_HTTPS_PASSWORD: ${{github.token}} 19 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/.github/workflows/push-to-buf.yml: -------------------------------------------------------------------------------- 1 | name: Push to Buf Registry 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v**' 7 | branches: 8 | - main 9 | permissions: 10 | contents: read 11 | jobs: 12 | publish: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout repo 16 | uses: actions/checkout@v4 17 | - uses: bufbuild/buf-action@v1 18 | with: 19 | version: 1.49.0 20 | token: ${{ secrets.BUF_TEMPORALIO_TOKEN }} 21 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /.gen 3 | /.vscode 4 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in 2 | # the repo. Unless a later match takes precedence, 3 | # @temporalio/saas will be requested for review when 4 | # someone opens a pull request. 5 | 6 | * @temporalio/saas 7 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2023 Temporal Technologies Inc. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/Makefile: -------------------------------------------------------------------------------- 1 | $(VERBOSE).SILENT: 2 | ############################# Main targets ############################# 3 | ci-build: install proto 4 | 5 | # Install dependencies. 6 | install: buf-install grpc-install openapiv2-install 7 | 8 | # Run all linters and compile proto files. 9 | proto: grpc 10 | ######################################################################## 11 | 12 | ##### Variables ###### 13 | ifndef GOPATH 14 | GOPATH := $(shell go env GOPATH) 15 | endif 16 | 17 | GOBIN := $(if $(shell go env GOBIN),$(shell go env GOBIN),$(GOPATH)/bin) 18 | SHELL := PATH=$(GOBIN):$(PATH) /bin/sh 19 | 20 | COLOR := "\e[1;36m%s\e[0m\n" 21 | 22 | PROTO_OUT := .gen 23 | $(PROTO_OUT): 24 | mkdir $(PROTO_OUT) 25 | 26 | ##### Compile proto files for go ##### 27 | grpc: buf-lint buf-breaking go-grpc 28 | 29 | go-grpc: clean $(PROTO_OUT) 30 | printf $(COLOR) "Compile for go-gRPC..." 31 | buf generate --output $(PROTO_OUT) 32 | 33 | ##### Plugins & tools ##### 34 | buf-install: 35 | printf $(COLOR) "Install/update buf..." 36 | go install github.com/bufbuild/buf/cmd/buf@v1.25.1 37 | 38 | grpc-install: 39 | printf $(COLOR) "Install/update go and grpc protoc gen ..." 40 | go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.31 41 | go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3 42 | 43 | openapiv2-install: 44 | printf $(COLOR) "Install/update openapiv2 protoc gen..." 45 | go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.16.2 46 | 47 | ##### Linters ##### 48 | buf-lint: 49 | printf $(COLOR) "Run buf linter..." 50 | buf lint 51 | 52 | buf-breaking: 53 | @printf $(COLOR) "Run buf breaking changes check against main branch..." 54 | buf breaking --against 'https://github.com/temporalio/api-cloud.git#branch=main' 55 | 56 | ##### Clean ##### 57 | clean: 58 | printf $(COLOR) "Delete generated go files..." 59 | rm -rf $(PROTO_OUT) 60 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/README.md: -------------------------------------------------------------------------------- 1 | # Temporal Cloud Operations API (Public Preview) 2 | 3 | > aka the Cloud Ops API 4 | > These apis (proto files) are currently offered as a Public Preview. While they are production worthy, they are subject to change. Please reach out to Temporal Support if you have questions. 5 | 6 | ## How to use 7 | 8 | To use the Cloud Ops API in your project, preform the following 4 steps: 9 | 1. Copy over the protobuf files under [temporal](temporal) directory to your desired project directory 10 | 2. Use [gRPC](https://grpc.io/docs/) to compile and generate code in your desired programming language, typically handled as a part of your code build process 11 | 3. Create a client connection in your code using a Temporal Cloud API Key (see [Samples](#samples) below) 12 | 4. Use the Cloud Operations API services to automate Cloud Operations, such as creating users or namespaces 13 | 14 | ### API Version 15 | 16 | The client is expected to pass in a `temporal-cloud-api-version` header with the api version identifier with every request it makes to the apis. The backend will use the version to safely mutate resources. The `temporal:versioning:min_version` label specifies the minimum version of the API that supports the field. 17 | 18 | Current Version `v0.4.0` 19 | 20 | ### URL 21 | 22 | The grpc URL the clients should connect to: 23 | ``` 24 | saas-api.tmprl.cloud:443 25 | ``` 26 | 27 | ## Samples 28 | 29 | Refer to the [cloud-samples-go](https://github.com/temporalio/cloud-samples-go/blob/main/cmd/worker/README.md) sample repository for how to use the cloud ops api in Go. 30 | > This sample demonstrates how to automate Temporal Cloud operations using Temporal Workflows that make Cloud Ops API requests within Workflow Activities ([Worker Sample README](https://github.com/temporalio/cloud-samples-go/tree/main/cmd/worker)). 31 | > See [here](https://github.com/temporalio/cloud-samples-go/blob/60d5cbca8696c87fb184efc56f5ae117561213d2/client/api/client.go#L16) for a quick reference showing you how to connect to Temporal Cloud with an API Key for the Cloud Ops API in Go. 32 | 33 | Refer to the [temporal-cloud-api-client-typescript](https://github.com/steveandroulakis/temporal-cloud-api-client-typescript) sample repository for how to use the cloud ops api in Typescript. 34 | Refer to the [temporal-cloud-api-client-java](https://github.com/steveandroulakis/temporal-cloud-api-client-java) sample repository for how to use the cloud ops api in Java. 35 | Refer to the [temporal-cloud-api-client-kotlin](https://github.com/steveandroulakis/temporal-cloud-api-client-kotlin) sample repository for how to use the cloud ops api in Kotlin. 36 | > The Java, Typescript, and Kotlin sample apps all provide a simple HTML UI that demonstrates how to use the Cloud Ops API to CRUD Namespaces and Users. 37 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/VERSION: -------------------------------------------------------------------------------- 1 | v0.5.1 2 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/buf.gen.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | plugins: 3 | - plugin: go 4 | out: go 5 | opt: paths=source_relative 6 | - plugin: go-grpc 7 | out: go 8 | opt: 9 | - paths=source_relative 10 | - plugin: openapiv2 11 | out: openapiv2 12 | opt: 13 | - output_format=yaml 14 | - allow_delete_body 15 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/buf.lock: -------------------------------------------------------------------------------- 1 | # Generated by buf. DO NOT EDIT. 2 | version: v1 3 | deps: 4 | - remote: buf.build 5 | owner: googleapis 6 | repository: googleapis 7 | commit: e93e34f48be043dab55be31b4b47f458 8 | digest: shake256:93dbe51c27606999eef918360df509485a4d272e79aaed6d0016940379a9b06d316fc5228b7b50cca94bb310f34c5fc5955ce7474f655f0d0a224c4121dda3c1 9 | - remote: buf.build 10 | owner: temporalio 11 | repository: api 12 | commit: 95c35fbcc7f647cbb0facec6fb60aca8 13 | digest: shake256:f40de31043fe8dbf433395ebd2c7fef6a395582a856da1476cf5bb8ec32c7091a2c21208590effa59715bcceceec8ab2a6331919eb260d72b1091d9c76fd535b 14 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/buf.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | name: buf.build/temporalio/cloud-api 3 | deps: 4 | - buf.build/googleapis/googleapis 5 | - buf.build/temporalio/api:v1.43.0 6 | breaking: 7 | use: 8 | - FILE 9 | lint: 10 | use: 11 | - DEFAULT 12 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/temporal/api/cloud/account/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.cloud.account.v1; 4 | 5 | option go_package = "go.temporal.io/api/cloud/account/v1;account"; 6 | option java_package = "io.temporal.api.cloud.account.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Cloud::Account::V1"; 10 | option csharp_namespace = "Temporalio.Api.Cloud.Account.V1"; 11 | 12 | import "temporal/api/cloud/resource/v1/message.proto"; 13 | 14 | message MetricsSpec { 15 | // The ca cert(s) in PEM format that clients connecting to the metrics endpoint can use for authentication. 16 | // This must only be one value, but the CA can have a chain. 17 | bytes accepted_client_ca = 2; 18 | } 19 | 20 | message AccountSpec { 21 | // The metrics specification for this account. 22 | // If not specified, metrics will not be enabled. 23 | MetricsSpec metrics = 1; 24 | } 25 | 26 | message Metrics { 27 | // The prometheus metrics endpoint uri. 28 | // This is only populated when the metrics is enabled in the metrics specification. 29 | string uri = 1; 30 | } 31 | 32 | message Account { 33 | // The id of the account. 34 | string id = 1; 35 | // The account specification. 36 | AccountSpec spec = 2; 37 | // The current version of the account specification. 38 | // The next update operation will have to include this version. 39 | string resource_version = 3; 40 | // The current state of the account. 41 | temporal.api.cloud.resource.v1.ResourceState state = 4; 42 | // The id of the async operation that is updating the account, if any. 43 | string async_operation_id = 5; 44 | // Information related to metrics. 45 | Metrics metrics = 6; 46 | } 47 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/temporal/api/cloud/nexus/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.cloud.nexus.v1; 4 | 5 | option go_package = "go.temporal.io/api/cloud/nexus/v1;nexus"; 6 | option java_package = "io.temporal.api.cloud.nexus.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Cloud::Nexus::V1"; 10 | option csharp_namespace = "Temporalio.Api.Cloud.Nexus.V1"; 11 | 12 | import "temporal/api/common/v1/message.proto"; 13 | import "temporal/api/cloud/resource/v1/message.proto"; 14 | import "google/protobuf/timestamp.proto"; 15 | 16 | message EndpointSpec { 17 | // The name of the endpoint. Must be unique within an account. 18 | // The name must match `^[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9]$`. 19 | // This field is mutable. 20 | string name = 1; 21 | 22 | // Indicates where the endpoint should forward received nexus requests to. 23 | EndpointTargetSpec target_spec = 2; 24 | 25 | // The set of policies (e.g. authorization) for the endpoint. Each request's caller 26 | // must match with at least one of the specs to be accepted by the endpoint. 27 | // This field is mutable. 28 | repeated EndpointPolicySpec policy_specs = 3; 29 | 30 | // Deprecated: Not supported after v0.4.0 api version. Use description instead. 31 | // temporal:versioning:max_version=v0.4.0 32 | string description_deprecated = 4 [deprecated = true]; 33 | 34 | // The markdown description of the endpoint - optional. 35 | // temporal:versioning:min_version=v0.4.0 36 | temporal.api.common.v1.Payload description = 5; 37 | } 38 | 39 | message EndpointTargetSpec { 40 | oneof variant { 41 | // A target spec for routing nexus requests to a specific cloud namespace worker. 42 | WorkerTargetSpec worker_target_spec = 1; 43 | } 44 | } 45 | 46 | message WorkerTargetSpec { 47 | // The target cloud namespace to route requests to. Namespace must be in same account as the endpoint. This field is mutable. 48 | string namespace_id = 1; 49 | 50 | // The task queue on the cloud namespace to route requests to. This field is mutable. 51 | string task_queue = 2; 52 | } 53 | 54 | message EndpointPolicySpec { 55 | oneof variant { 56 | // A policy spec that allows one caller namespace to access the endpoint. 57 | AllowedCloudNamespacePolicySpec allowed_cloud_namespace_policy_spec = 1; 58 | } 59 | } 60 | 61 | message AllowedCloudNamespacePolicySpec { 62 | // The namespace that is allowed to call into this endpoint. Calling namespace must be in same account as the endpoint. 63 | string namespace_id = 1; 64 | } 65 | 66 | // An endpoint that receives and then routes Nexus requests 67 | message Endpoint { 68 | // The id of the endpoint. This is generated by the server and is immutable. 69 | string id = 1; 70 | 71 | // The current version of the endpoint specification. 72 | // The next update operation must include this version. 73 | string resource_version = 2; 74 | 75 | // The endpoint specification. 76 | EndpointSpec spec = 3; 77 | 78 | // The current state of the endpoint. 79 | // For any failed state, reach out to Temporal Cloud support for remediation. 80 | temporal.api.cloud.resource.v1.ResourceState state = 4; 81 | 82 | // The id of any ongoing async operation that is creating, updating, or deleting the endpoint, if any. 83 | string async_operation_id = 5; 84 | 85 | // The date and time when the endpoint was created. 86 | google.protobuf.Timestamp created_time = 6; 87 | 88 | // The date and time when the endpoint was last modified. 89 | google.protobuf.Timestamp last_modified_time = 7; 90 | } 91 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/temporal/api/cloud/operation/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.cloud.operation.v1; 4 | 5 | option go_package = "go.temporal.io/api/cloud/operation/v1;operation"; 6 | option java_package = "io.temporal.api.cloud.operation.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Cloud::Operation::V1"; 10 | option csharp_namespace = "Temporalio.Api.Cloud.Operation.V1"; 11 | 12 | import "google/protobuf/duration.proto"; 13 | import "google/protobuf/timestamp.proto"; 14 | import "google/protobuf/any.proto"; 15 | 16 | message AsyncOperation { 17 | // The operation id. 18 | string id = 1; 19 | // The current state of this operation. 20 | // Possible values are: pending, in_progress, failed, cancelled, fulfilled. 21 | // Deprecated: Not supported after v0.3.0 api version. Use state instead. 22 | // temporal:versioning:max_version=v0.3.0 23 | string state_deprecated = 2 [deprecated = true]; 24 | // The current state of this operation. 25 | // temporal:versioning:min_version=v0.3.0 26 | // temporal:enums:replaces=state_deprecated 27 | State state = 9; 28 | // The recommended duration to check back for an update in the operation's state. 29 | google.protobuf.Duration check_duration = 3; 30 | // The type of operation being performed. 31 | string operation_type = 4; 32 | // The input to the operation being performed. 33 | // 34 | // (-- api-linter: core::0146::any=disabled --) 35 | google.protobuf.Any operation_input = 5; 36 | // If the operation failed, the reason for the failure. 37 | string failure_reason = 6; 38 | // The date and time when the operation initiated. 39 | google.protobuf.Timestamp started_time = 7; 40 | // The date and time when the operation completed. 41 | google.protobuf.Timestamp finished_time = 8; 42 | 43 | enum State { 44 | STATE_UNSPECIFIED = 0; 45 | STATE_PENDING = 1; // The operation is pending. 46 | STATE_IN_PROGRESS = 2; // The operation is in progress. 47 | STATE_FAILED = 3; // The operation failed, check failure_reason for more details. 48 | STATE_CANCELLED = 4; // The operation was cancelled. 49 | STATE_FULFILLED = 5; // The operation was fulfilled. 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/temporal/api/cloud/region/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.cloud.region.v1; 4 | 5 | option go_package = "go.temporal.io/api/cloud/region/v1;region"; 6 | option java_package = "io.temporal.api.cloud.region.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Cloud::Region::V1"; 10 | option csharp_namespace = "Temporalio.Api.Cloud.Region.V1"; 11 | 12 | message Region { 13 | // The id of the temporal cloud region. 14 | string id = 1; 15 | // The name of the cloud provider that's hosting the region. 16 | // Currently only "aws" is supported. 17 | // Deprecated: Not supported after v0.3.0 api version. Use cloud_provider instead. 18 | // temporal:versioning:max_version=v0.3.0 19 | string cloud_provider_deprecated = 2 [deprecated = true]; 20 | // The cloud provider that's hosting the region. 21 | // temporal:versioning:min_version=v0.3.0 22 | // temporal:enums:replaces=cloud_provider_deprecated 23 | CloudProvider cloud_provider = 5; 24 | // The region identifier as defined by the cloud provider. 25 | string cloud_provider_region = 3; 26 | // The human readable location of the region. 27 | string location = 4; 28 | 29 | // The cloud provider that's hosting the region. 30 | enum CloudProvider { 31 | CLOUD_PROVIDER_UNSPECIFIED = 0; 32 | CLOUD_PROVIDER_AWS = 1; 33 | CLOUD_PROVIDER_GCP = 2; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/temporal/api/cloud/resource/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.cloud.resource.v1; 4 | 5 | option go_package = "go.temporal.io/api/cloud/resource/v1;resource"; 6 | option java_package = "io.temporal.api.cloud.resource.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Cloud::Resource::V1"; 10 | option csharp_namespace = "Temporalio.Api.Cloud.Resource.V1"; 11 | 12 | 13 | enum ResourceState { 14 | RESOURCE_STATE_UNSPECIFIED = 0; 15 | RESOURCE_STATE_ACTIVATING = 1; // The resource is being activated. 16 | RESOURCE_STATE_ACTIVATION_FAILED = 2; // The resource failed to activate. This is an error state. Reach out to support for remediation. 17 | RESOURCE_STATE_ACTIVE = 3; // The resource is active and ready to use. 18 | RESOURCE_STATE_UPDATING = 4; // The resource is being updated. 19 | RESOURCE_STATE_UPDATE_FAILED = 5; // The resource failed to update. This is an error state. Reach out to support for remediation. 20 | RESOURCE_STATE_DELETING = 6; // The resource is being deleted. 21 | RESOURCE_STATE_DELETE_FAILED = 7; // The resource failed to delete. This is an error state. Reach out to support for remediation. 22 | RESOURCE_STATE_DELETED = 8; // The resource has been deleted. 23 | RESOURCE_STATE_SUSPENDED = 9; // The resource is suspended and not available for use. Reach out to support for remediation. 24 | RESOURCE_STATE_EXPIRED = 10; // The resource has expired and is no longer available for use. 25 | } 26 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/temporal/api/cloud/sink/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.cloud.sink.v1; 4 | 5 | option go_package = "go.temporal.io/api/cloud/sink/v1;sink"; 6 | option java_package = "io.temporal.api.cloud.sink.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Cloud::Sink::V1"; 10 | option csharp_namespace = "Temporalio.Api.Cloud.Sink.V1"; 11 | 12 | message S3Spec { 13 | // The IAM role that Temporal Cloud assumes for writing records to the customer's S3 bucket. 14 | string role_name = 1; 15 | 16 | // The name of the destination S3 bucket where Temporal will send data. 17 | string bucket_name = 2; 18 | 19 | // The region where the S3 bucket is located. 20 | string region = 3; 21 | 22 | // The AWS Key Management Service (KMS) ARN used for encryption. 23 | string kms_arn = 4; 24 | 25 | // The AWS account ID associated with the S3 bucket and the assumed role. 26 | string aws_account_id = 5; 27 | } 28 | 29 | message GCSSpec { 30 | // The customer service account ID that Temporal Cloud impersonates for writing records to the customer's GCS bucket. 31 | string sa_id = 1; 32 | 33 | // The name of the destination GCS bucket where Temporal will send data. 34 | string bucket_name = 2; 35 | 36 | // The GCP project ID associated with the GCS bucket and service account. 37 | string gcp_project_id = 3; 38 | 39 | // The region of the gcs bucket 40 | string region = 4; 41 | } -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_cloud_upstream/temporal/api/cloud/usage/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.cloud.usage.v1; 4 | 5 | option go_package = "go.temporal.io/api/cloud/usage/v1;usage"; 6 | option java_package = "io.temporal.api.cloud.usage.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Cloud::Usage::V1"; 10 | option csharp_namespace = "Temporalio.Api.Cloud.Usage.V1"; 11 | 12 | import "google/protobuf/timestamp.proto"; 13 | 14 | message Summary { 15 | // Start of UTC day for now (inclusive) 16 | google.protobuf.Timestamp start_time = 1; 17 | // End of UTC day for now (exclusive) 18 | google.protobuf.Timestamp end_time = 2; 19 | // Records grouped by namespace 20 | repeated RecordGroup record_groups = 3; 21 | // True if data for given time window is not fully available yet (e.g. delays) 22 | // When true, records for the given time range could still be added/updated in the future (until false) 23 | bool incomplete = 4; 24 | } 25 | 26 | message RecordGroup { 27 | // GroupBy keys and their values for this record group. Multiple fields are combined with logical AND. 28 | repeated GroupBy group_bys = 1; 29 | repeated Record records = 2; 30 | } 31 | 32 | message GroupBy { 33 | GroupByKey key = 1; 34 | string value = 2; 35 | } 36 | 37 | message Record { 38 | RecordType type = 1; 39 | RecordUnit unit = 2; 40 | double value = 3; 41 | } 42 | 43 | enum RecordType { 44 | RECORD_TYPE_UNSPECIFIED = 0; 45 | RECORD_TYPE_ACTIONS = 1; 46 | RECORD_TYPE_ACTIVE_STORAGE = 2; 47 | RECORD_TYPE_RETAINED_STORAGE = 3; 48 | } 49 | 50 | enum RecordUnit { 51 | RECORD_UNIT_UNSPECIFIED = 0; 52 | RECORD_UNIT_NUMBER = 1; 53 | RECORD_UNIT_BYTE_SECONDS = 2; 54 | } 55 | 56 | enum GroupByKey { 57 | GROUP_BY_KEY_UNSPECIFIED = 0; 58 | GROUP_BY_KEY_NAMESPACE = 1; 59 | } -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Syntax is here: 2 | # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax 3 | 4 | * @temporalio/server @temporalio/sdk 5 | api/temporal/api/sdk/* @temporalio/sdk 6 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | _**READ BEFORE MERGING:** All PRs require approval by both Server AND SDK teams before merging! This is why the number of required approvals is "2" and not "1"--two reviewers from the same team is NOT sufficient. If your PR is not approved by someone in BOTH teams, it may be summarily reverted._ 2 | 3 | 4 | **What changed?** 5 | 6 | 7 | 8 | **Why?** 9 | 10 | 11 | 12 | **Breaking changes** 13 | 14 | 15 | 16 | **Server PR** 17 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | pull_request: 4 | permissions: 5 | contents: read 6 | jobs: 7 | ci: 8 | name: ci 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: actions/setup-go@v4 13 | with: 14 | go-version: '^1.21' 15 | - uses: arduino/setup-protoc@v2 16 | - name: 'Setup jq' 17 | uses: dcarbone/install-jq-action@v2 18 | - run: make ci-build 19 | - name: Fail if the repo is dirty 20 | run: | 21 | if [[ -n $(git status --porcelain) ]]; then 22 | echo "Detected uncommitted changes." 23 | git status 24 | git diff 25 | exit 1 26 | fi 27 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/.github/workflows/publish-docs.yml: -------------------------------------------------------------------------------- 1 | name: Publish docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | env: 9 | VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} 10 | VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} 11 | 12 | jobs: 13 | publish: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - name: Checkout repo 18 | uses: actions/checkout@v3 19 | - name: Generate docs 20 | run: | 21 | docker run -v $(pwd)/docs:/out -v $(pwd)/:/protos pseudomuto/protoc-gen-doc --doc_opt=html,index.html $(find temporal/api -type f -name "*.proto") 22 | - name: Deploy 23 | run: npx vercel deploy docs/ --prod --token=${{ secrets.VERCEL_TOKEN }} 24 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/.github/workflows/push-to-buf.yml: -------------------------------------------------------------------------------- 1 | name: Push to Buf Registry 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v**' 7 | branches: 8 | - master 9 | permissions: 10 | contents: read 11 | jobs: 12 | publish: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout repo 16 | uses: actions/checkout@v4 17 | - uses: bufbuild/buf-action@v1 18 | with: 19 | version: 1.49.0 20 | token: ${{ secrets.BUF_TEMPORALIO_TOKEN }} 21 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/.github/workflows/trigger-api-go-delete-release.yml: -------------------------------------------------------------------------------- 1 | name: "Trigger api-go delete release" 2 | 3 | on: 4 | release: 5 | types: [deleted] 6 | 7 | jobs: 8 | trigger-api-go-delete-release: 9 | uses: temporalio/api-go/.github/workflows/delete-release.yml@master 10 | with: 11 | tag: ${{ github.event.release.tag_name }} 12 | api_commit_sha: ${{ github.event.release.target_commitish }} 13 | secrets: inherit 14 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/.github/workflows/trigger-api-go-publish-release.yml: -------------------------------------------------------------------------------- 1 | name: "Trigger api-go publish release" 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | trigger-api-go-publish-release: 9 | uses: temporalio/api-go/.github/workflows/publish-release.yml@master 10 | with: 11 | tag: ${{ github.event.release.tag_name }} 12 | api_commit_sha: ${{ github.event.release.target_commitish }} 13 | secrets: inherit 14 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/.github/workflows/trigger-api-go-update.yml: -------------------------------------------------------------------------------- 1 | name: 'Trigger api-go Update' 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | workflow_dispatch: 8 | inputs: 9 | branch: 10 | description: "Branch in api-go repo to trigger update protos (default: master)" 11 | required: true 12 | default: master 13 | 14 | jobs: 15 | notify: 16 | name: 'Trigger api-go update' 17 | runs-on: ubuntu-latest 18 | 19 | defaults: 20 | run: 21 | shell: bash 22 | 23 | steps: 24 | - name: Generate token 25 | id: generate_token 26 | uses: actions/create-github-app-token@v1 27 | with: 28 | app-id: ${{ secrets.TEMPORAL_CICD_APP_ID }} 29 | private-key: ${{ secrets.TEMPORAL_CICD_PRIVATE_KEY }} 30 | owner: ${{ github.repository_owner }} 31 | repositories: api-go # generate a token with permissions to trigger GHA in api-go repo 32 | 33 | - name: Dispatch api-go Github Action 34 | env: 35 | GH_TOKEN: ${{ steps.generate_token.outputs.token }} 36 | EVENT_PUSH_BRANCH: ${{ github.event.ref }} 37 | EVENT_PUSH_COMMIT_AUTHOR: ${{ github.event.head_commit.author.name }} 38 | EVENT_PUSH_COMMIT_AUTHOR_EMAIL: ${{ github.event.head_commit.author.email }} 39 | EVENT_PUSH_COMMIT_MESSAGE: ${{ github.event.head_commit.message }} 40 | EVENT_WF_DISPATCH_BRANCH: ${{ github.event.inputs.branch }} 41 | run: | 42 | case "${{ github.event_name }}" in 43 | "push") 44 | BRANCH="${EVENT_PUSH_BRANCH#refs/heads/}" 45 | COMMIT_AUTHOR="${EVENT_PUSH_COMMIT_AUTHOR}" 46 | COMMIT_AUTHOR_EMAIL="${EVENT_PUSH_COMMIT_AUTHOR_EMAIL}" 47 | COMMIT_MESSAGE="${EVENT_PUSH_COMMIT_MESSAGE}" 48 | ;; 49 | 50 | "workflow_dispatch") 51 | BRANCH="${EVENT_WF_DISPATCH_BRANCH}" 52 | COMMIT_AUTHOR="Temporal Data" 53 | COMMIT_AUTHOR_EMAIL="commander-data@temporal.io" 54 | COMMIT_MESSAGE="Update proto" 55 | ;; 56 | esac 57 | 58 | gh workflow run update-proto.yml -R https://github.com/temporalio/api-go \ 59 | -r master \ 60 | -f branch="${BRANCH}" \ 61 | -f commit_author="${COMMIT_AUTHOR}" \ 62 | -f commit_author_email="${COMMIT_AUTHOR_EMAIL}" \ 63 | -f commit_message="${COMMIT_MESSAGE}" 64 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /.gen 3 | /.vscode 4 | /.stamp 5 | *~ -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2025 Temporal Technologies Inc. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/README.md: -------------------------------------------------------------------------------- 1 | # Temporal proto files 2 | 3 | This repository contains both the protobuf descriptors and OpenAPI documentation for the Temporal platform. 4 | 5 | ## How to use 6 | 7 | Install as git submodule to the project. 8 | 9 | ## License 10 | 11 | MIT License, please see [LICENSE](LICENSE) for details. 12 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/buf.gen.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | plugins: 3 | - plugin: buf.build/protocolbuffers/go:v1.31.0 4 | out: ./ 5 | opt: 6 | - paths=source_relative 7 | - plugin: buf.build/grpc/go:v1.3.0 8 | out: ./ 9 | opt: 10 | - paths=source_relative 11 | - plugin: buf.build/grpc-ecosystem/gateway:v2.18.0 12 | out: ./ 13 | opt: 14 | - paths=source_relative 15 | - allow_patch_feature=false 16 | - name: go-helpers 17 | out: ./ 18 | path: ["go", "run", "./protoc-gen-go-helpers"] 19 | opt: 20 | - paths=source_relative 21 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/buf.lock: -------------------------------------------------------------------------------- 1 | # Generated by buf. DO NOT EDIT. 2 | version: v1 3 | deps: 4 | - remote: buf.build 5 | owner: googleapis 6 | repository: googleapis 7 | commit: 28151c0d0a1641bf938a7672c500e01d 8 | digest: shake256:49215edf8ef57f7863004539deff8834cfb2195113f0b890dd1f67815d9353e28e668019165b9d872395871eeafcbab3ccfdb2b5f11734d3cca95be9e8d139de 9 | - remote: buf.build 10 | owner: grpc-ecosystem 11 | repository: grpc-gateway 12 | commit: 048ae6ff94ca4476b3225904b1078fad 13 | digest: shake256:e5250bf2d999516c02206d757502b902e406f35c099d0e869dc3e4f923f6870fe0805a9974c27df0695462937eae90cd4d9db90bb9a03489412560baa74a87b6 14 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/buf.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | name: buf.build/temporalio/api 3 | deps: 4 | - buf.build/grpc-ecosystem/grpc-gateway 5 | - buf.build/googleapis/googleapis 6 | build: 7 | excludes: 8 | # Buf won't accept a local dependency on the google protos but we need them 9 | # to run api-linter, so just tell buf it ignore it 10 | - google 11 | breaking: 12 | use: 13 | - WIRE_JSON 14 | ignore: 15 | - google 16 | # TODO (yuri) remove this 17 | - temporal/api/workflow/v1/message.proto 18 | lint: 19 | use: 20 | - DEFAULT 21 | ignore: 22 | - google 23 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/google/api/annotations.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | syntax = "proto3"; 16 | 17 | package google.api; 18 | 19 | import "google/api/http.proto"; 20 | import "google/protobuf/descriptor.proto"; 21 | 22 | option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; 23 | option java_multiple_files = true; 24 | option java_outer_classname = "AnnotationsProto"; 25 | option java_package = "com.google.api"; 26 | option objc_class_prefix = "GAPI"; 27 | 28 | extend google.protobuf.MethodOptions { 29 | // See `HttpRule`. 30 | HttpRule http = 72295728; 31 | } 32 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/google/protobuf/empty.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option go_package = "google.golang.org/protobuf/types/known/emptypb"; 36 | option java_package = "com.google.protobuf"; 37 | option java_outer_classname = "EmptyProto"; 38 | option java_multiple_files = true; 39 | option objc_class_prefix = "GPB"; 40 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 41 | option cc_enable_arenas = true; 42 | 43 | // A generic empty message that you can re-use to avoid defining duplicated 44 | // empty messages in your APIs. A typical example is to use it as the request 45 | // or the response type of an API method. For instance: 46 | // 47 | // service Foo { 48 | // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); 49 | // } 50 | // 51 | message Empty {} 52 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/openapi/payload_description.txt: -------------------------------------------------------------------------------- 1 | Arbitrary payload data in an unconstrained format. 2 | This may be activity input parameters, a workflow result, a memo, etc. 3 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/activity/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.activity.v1; 4 | 5 | option go_package = "go.temporal.io/api/activity/v1;activity"; 6 | option java_package = "io.temporal.api.activity.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Activity::V1"; 10 | option csharp_namespace = "Temporalio.Api.Activity.V1"; 11 | 12 | import "temporal/api/common/v1/message.proto"; 13 | import "temporal/api/taskqueue/v1/message.proto"; 14 | 15 | import "google/protobuf/duration.proto"; 16 | 17 | message ActivityOptions { 18 | temporal.api.taskqueue.v1.TaskQueue task_queue = 1; 19 | 20 | // Indicates how long the caller is willing to wait for an activity completion. Limits how long 21 | // retries will be attempted. Either this or `start_to_close_timeout` must be specified. 22 | // 23 | // (-- api-linter: core::0140::prepositions=disabled 24 | // aip.dev/not-precedent: "to" is used to indicate interval. --) 25 | google.protobuf.Duration schedule_to_close_timeout = 2; 26 | // Limits time an activity task can stay in a task queue before a worker picks it up. This 27 | // timeout is always non retryable, as all a retry would achieve is to put it back into the same 28 | // queue. Defaults to `schedule_to_close_timeout` or workflow execution timeout if not 29 | // specified. 30 | // 31 | // (-- api-linter: core::0140::prepositions=disabled 32 | // aip.dev/not-precedent: "to" is used to indicate interval. --) 33 | google.protobuf.Duration schedule_to_start_timeout = 3; 34 | // Maximum time an activity is allowed to execute after being picked up by a worker. This 35 | // timeout is always retryable. Either this or `schedule_to_close_timeout` must be 36 | // specified. 37 | // 38 | // (-- api-linter: core::0140::prepositions=disabled 39 | // aip.dev/not-precedent: "to" is used to indicate interval. --) 40 | google.protobuf.Duration start_to_close_timeout = 4; 41 | // Maximum permitted time between successful worker heartbeats. 42 | google.protobuf.Duration heartbeat_timeout = 5; 43 | 44 | temporal.api.common.v1.RetryPolicy retry_policy = 6; 45 | } -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/batch_operation.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "BatchOperationProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | enum BatchOperationType { 13 | BATCH_OPERATION_TYPE_UNSPECIFIED = 0; 14 | BATCH_OPERATION_TYPE_TERMINATE = 1; 15 | BATCH_OPERATION_TYPE_CANCEL = 2; 16 | BATCH_OPERATION_TYPE_SIGNAL = 3; 17 | BATCH_OPERATION_TYPE_DELETE = 4; 18 | BATCH_OPERATION_TYPE_RESET = 5; 19 | BATCH_OPERATION_TYPE_UPDATE_EXECUTION_OPTIONS = 6; 20 | } 21 | 22 | enum BatchOperationState { 23 | BATCH_OPERATION_STATE_UNSPECIFIED = 0; 24 | BATCH_OPERATION_STATE_RUNNING = 1; 25 | BATCH_OPERATION_STATE_COMPLETED = 2; 26 | BATCH_OPERATION_STATE_FAILED = 3; 27 | } 28 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/command_type.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "CommandTypeProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // Whenever this list of command types is changed do change the function shouldBufferEvent in mutableStateBuilder.go to make sure to do the correct event ordering. 13 | enum CommandType { 14 | COMMAND_TYPE_UNSPECIFIED = 0; 15 | COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK = 1; 16 | COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK = 2; 17 | COMMAND_TYPE_START_TIMER = 3; 18 | COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION = 4; 19 | COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION = 5; 20 | COMMAND_TYPE_CANCEL_TIMER = 6; 21 | COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION = 7; 22 | COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION = 8; 23 | COMMAND_TYPE_RECORD_MARKER = 9; 24 | COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION = 10; 25 | COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION = 11; 26 | COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION = 12; 27 | COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES = 13; 28 | COMMAND_TYPE_PROTOCOL_MESSAGE = 14; 29 | COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES = 16; 30 | COMMAND_TYPE_SCHEDULE_NEXUS_OPERATION = 17; 31 | COMMAND_TYPE_REQUEST_CANCEL_NEXUS_OPERATION = 18; 32 | } 33 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/namespace.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "NamespaceProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | enum NamespaceState { 13 | NAMESPACE_STATE_UNSPECIFIED = 0; 14 | NAMESPACE_STATE_REGISTERED = 1; 15 | NAMESPACE_STATE_DEPRECATED = 2; 16 | NAMESPACE_STATE_DELETED = 3; 17 | } 18 | 19 | enum ArchivalState { 20 | ARCHIVAL_STATE_UNSPECIFIED = 0; 21 | ARCHIVAL_STATE_DISABLED = 1; 22 | ARCHIVAL_STATE_ENABLED = 2; 23 | } 24 | 25 | enum ReplicationState { 26 | REPLICATION_STATE_UNSPECIFIED = 0; 27 | REPLICATION_STATE_NORMAL = 1; 28 | REPLICATION_STATE_HANDOVER = 2; 29 | } 30 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/nexus.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "NexusProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // NexusHandlerErrorRetryBehavior allows nexus handlers to explicity set the retry behavior of a HandlerError. If not 13 | // specified, retry behavior is determined from the error type. For example internal errors are not retryable by default 14 | // unless specified otherwise. 15 | enum NexusHandlerErrorRetryBehavior { 16 | NEXUS_HANDLER_ERROR_RETRY_BEHAVIOR_UNSPECIFIED = 0; 17 | // A handler error is explicitly marked as retryable. 18 | NEXUS_HANDLER_ERROR_RETRY_BEHAVIOR_RETRYABLE = 1; 19 | // A handler error is explicitly marked as non-retryable. 20 | NEXUS_HANDLER_ERROR_RETRY_BEHAVIOR_NON_RETRYABLE = 2; 21 | } 22 | 23 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/query.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "QueryProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | enum QueryResultType { 13 | QUERY_RESULT_TYPE_UNSPECIFIED = 0; 14 | QUERY_RESULT_TYPE_ANSWERED = 1; 15 | QUERY_RESULT_TYPE_FAILED = 2; 16 | } 17 | 18 | enum QueryRejectCondition { 19 | QUERY_REJECT_CONDITION_UNSPECIFIED = 0; 20 | // None indicates that query should not be rejected. 21 | QUERY_REJECT_CONDITION_NONE = 1; 22 | // NotOpen indicates that query should be rejected if workflow is not open. 23 | QUERY_REJECT_CONDITION_NOT_OPEN = 2; 24 | // NotCompletedCleanly indicates that query should be rejected if workflow did not complete cleanly. 25 | QUERY_REJECT_CONDITION_NOT_COMPLETED_CLEANLY = 3; 26 | } 27 | 28 | 29 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/reset.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "ResetProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // Event types to exclude when reapplying events beyond the reset point. 13 | enum ResetReapplyExcludeType { 14 | RESET_REAPPLY_EXCLUDE_TYPE_UNSPECIFIED = 0; 15 | // Exclude signals when reapplying events beyond the reset point. 16 | RESET_REAPPLY_EXCLUDE_TYPE_SIGNAL = 1; 17 | // Exclude updates when reapplying events beyond the reset point. 18 | RESET_REAPPLY_EXCLUDE_TYPE_UPDATE = 2; 19 | // Exclude nexus events when reapplying events beyond the reset point. 20 | RESET_REAPPLY_EXCLUDE_TYPE_NEXUS = 3; 21 | // Deprecated, unimplemented option. 22 | RESET_REAPPLY_EXCLUDE_TYPE_CANCEL_REQUEST = 4 [deprecated=true]; 23 | } 24 | 25 | // Event types to include when reapplying events. Deprecated: applications 26 | // should use ResetReapplyExcludeType to specify exclusions from this set, and 27 | // new event types should be added to ResetReapplyExcludeType instead of here. 28 | enum ResetReapplyType { 29 | RESET_REAPPLY_TYPE_UNSPECIFIED = 0; 30 | // Signals are reapplied when workflow is reset. 31 | RESET_REAPPLY_TYPE_SIGNAL = 1; 32 | // No events are reapplied when workflow is reset. 33 | RESET_REAPPLY_TYPE_NONE = 2; 34 | // All eligible events are reapplied when workflow is reset. 35 | RESET_REAPPLY_TYPE_ALL_ELIGIBLE = 3; 36 | } 37 | 38 | // Reset type options. Deprecated, see temporal.api.common.v1.ResetOptions. 39 | enum ResetType { 40 | RESET_TYPE_UNSPECIFIED = 0; 41 | // Resets to event of the first workflow task completed, or if it does not exist, the event after task scheduled. 42 | RESET_TYPE_FIRST_WORKFLOW_TASK = 1; 43 | // Resets to event of the last workflow task completed, or if it does not exist, the event after task scheduled. 44 | RESET_TYPE_LAST_WORKFLOW_TASK = 2; 45 | } 46 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/schedule.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "ScheduleProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | 13 | // ScheduleOverlapPolicy controls what happens when a workflow would be started 14 | // by a schedule, and is already running. 15 | enum ScheduleOverlapPolicy { 16 | SCHEDULE_OVERLAP_POLICY_UNSPECIFIED = 0; 17 | // SCHEDULE_OVERLAP_POLICY_SKIP (default) means don't start anything. When the 18 | // workflow completes, the next scheduled event after that time will be considered. 19 | SCHEDULE_OVERLAP_POLICY_SKIP = 1; 20 | // SCHEDULE_OVERLAP_POLICY_BUFFER_ONE means start the workflow again soon as the 21 | // current one completes, but only buffer one start in this way. If another start is 22 | // supposed to happen when the workflow is running, and one is already buffered, then 23 | // only the first one will be started after the running workflow finishes. 24 | SCHEDULE_OVERLAP_POLICY_BUFFER_ONE = 2; 25 | // SCHEDULE_OVERLAP_POLICY_BUFFER_ALL means buffer up any number of starts to all 26 | // happen sequentially, immediately after the running workflow completes. 27 | SCHEDULE_OVERLAP_POLICY_BUFFER_ALL = 3; 28 | // SCHEDULE_OVERLAP_POLICY_CANCEL_OTHER means that if there is another workflow 29 | // running, cancel it, and start the new one after the old one completes cancellation. 30 | SCHEDULE_OVERLAP_POLICY_CANCEL_OTHER = 4; 31 | // SCHEDULE_OVERLAP_POLICY_TERMINATE_OTHER means that if there is another workflow 32 | // running, terminate it and start the new one immediately. 33 | SCHEDULE_OVERLAP_POLICY_TERMINATE_OTHER = 5; 34 | // SCHEDULE_OVERLAP_POLICY_ALLOW_ALL means start any number of concurrent workflows. 35 | // Note that with this policy, last completion result and last failure will not be 36 | // available since workflows are not sequential. 37 | SCHEDULE_OVERLAP_POLICY_ALLOW_ALL = 6; 38 | } 39 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/update.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.enums.v1; 4 | 5 | option go_package = "go.temporal.io/api/enums/v1;enums"; 6 | option java_package = "io.temporal.api.enums.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "UpdateProto"; 9 | option ruby_package = "Temporalio::Api::Enums::V1"; 10 | option csharp_namespace = "Temporalio.Api.Enums.V1"; 11 | 12 | // UpdateWorkflowExecutionLifecycleStage is specified by clients invoking 13 | // Workflow Updates and used to indicate to the server how long the 14 | // client wishes to wait for a return value from the API. If any value other 15 | // than UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED is sent by the 16 | // client then the API will complete before the Update is finished and will 17 | // return a handle to the running Update so that it can later be polled for 18 | // completion. 19 | // If specified stage wasn't reached before server timeout, server returns 20 | // actual stage reached. 21 | enum UpdateWorkflowExecutionLifecycleStage { 22 | // An unspecified value for this enum. 23 | UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_UNSPECIFIED = 0; 24 | // The API call will not return until the Update request has been admitted 25 | // by the server - it may be the case that due to a considerations like load 26 | // or resource limits that an Update is made to wait before the server will 27 | // indicate that it has been received and will be processed. This value 28 | // does not wait for any sort of acknowledgement from a worker. 29 | UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ADMITTED = 1; 30 | // The API call will not return until the Update has passed validation on a worker. 31 | UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED = 2; 32 | // The API call will not return until the Update has executed to completion 33 | // on a worker and has either been rejected or returned a value or an error. 34 | UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED = 3; 35 | } 36 | 37 | // Records why a WorkflowExecutionUpdateAdmittedEvent was written to history. 38 | // Note that not all admitted Updates result in this event. 39 | enum UpdateAdmittedEventOrigin { 40 | UPDATE_ADMITTED_EVENT_ORIGIN_UNSPECIFIED = 0; 41 | // The UpdateAdmitted event was created when reapplying events during reset 42 | // or replication. I.e. an accepted Update on one branch of Workflow history 43 | // was converted into an admitted Update on a different branch. 44 | UPDATE_ADMITTED_EVENT_ORIGIN_REAPPLY = 1; 45 | } 46 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/export/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.export.v1; 4 | 5 | option go_package = "go.temporal.io/api/export/v1;export"; 6 | option java_package = "io.temporal.api.export.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Export::V1"; 10 | option csharp_namespace = "Temporalio.Api.Export.V1"; 11 | 12 | import "temporal/api/history/v1/message.proto"; 13 | 14 | message WorkflowExecution { 15 | temporal.api.history.v1.History history = 1; 16 | } 17 | 18 | // WorkflowExecutions is used by the Cloud Export feature to deserialize 19 | // the exported file. It encapsulates a collection of workflow execution information. 20 | message WorkflowExecutions { 21 | repeated WorkflowExecution items = 1; 22 | } 23 | 24 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/filter/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.filter.v1; 4 | 5 | option go_package = "go.temporal.io/api/filter/v1;filter"; 6 | option java_package = "io.temporal.api.filter.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Filter::V1"; 10 | option csharp_namespace = "Temporalio.Api.Filter.V1"; 11 | 12 | import "google/protobuf/timestamp.proto"; 13 | 14 | import "temporal/api/enums/v1/workflow.proto"; 15 | 16 | message WorkflowExecutionFilter { 17 | string workflow_id = 1; 18 | string run_id = 2; 19 | } 20 | 21 | message WorkflowTypeFilter { 22 | string name = 1; 23 | } 24 | 25 | message StartTimeFilter { 26 | google.protobuf.Timestamp earliest_time = 1; 27 | google.protobuf.Timestamp latest_time = 2; 28 | } 29 | 30 | message StatusFilter { 31 | temporal.api.enums.v1.WorkflowExecutionStatus status = 1; 32 | } 33 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/namespace/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.namespace.v1; 4 | 5 | option go_package = "go.temporal.io/api/namespace/v1;namespace"; 6 | option java_package = "io.temporal.api.namespace.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Namespace::V1"; 10 | option csharp_namespace = "Temporalio.Api.Namespace.V1"; 11 | 12 | import "google/protobuf/duration.proto"; 13 | import "google/protobuf/timestamp.proto"; 14 | 15 | import "temporal/api/enums/v1/namespace.proto"; 16 | 17 | 18 | message NamespaceInfo { 19 | string name = 1; 20 | temporal.api.enums.v1.NamespaceState state = 2; 21 | string description = 3; 22 | string owner_email = 4; 23 | // A key-value map for any customized purpose. 24 | map data = 5; 25 | string id = 6; 26 | // All capabilities the namespace supports. 27 | Capabilities capabilities = 7; 28 | 29 | // Namespace capability details. Should contain what features are enabled in a namespace. 30 | message Capabilities { 31 | // True if the namespace supports eager workflow start. 32 | bool eager_workflow_start = 1; 33 | // True if the namespace supports sync update 34 | bool sync_update = 2; 35 | // True if the namespace supports async update 36 | bool async_update = 3; 37 | } 38 | 39 | // Whether scheduled workflows are supported on this namespace. This is only needed 40 | // temporarily while the feature is experimental, so we can give it a high tag. 41 | bool supports_schedules = 100; 42 | } 43 | 44 | message NamespaceConfig { 45 | google.protobuf.Duration workflow_execution_retention_ttl = 1; 46 | BadBinaries bad_binaries = 2; 47 | // If unspecified (ARCHIVAL_STATE_UNSPECIFIED) then default server configuration is used. 48 | temporal.api.enums.v1.ArchivalState history_archival_state = 3; 49 | string history_archival_uri = 4; 50 | // If unspecified (ARCHIVAL_STATE_UNSPECIFIED) then default server configuration is used. 51 | temporal.api.enums.v1.ArchivalState visibility_archival_state = 5; 52 | string visibility_archival_uri = 6; 53 | // Map from field name to alias. 54 | map custom_search_attribute_aliases = 7; 55 | } 56 | 57 | message BadBinaries { 58 | map binaries = 1; 59 | } 60 | 61 | message BadBinaryInfo { 62 | string reason = 1; 63 | string operator = 2; 64 | google.protobuf.Timestamp create_time = 3; 65 | } 66 | 67 | message UpdateNamespaceInfo { 68 | string description = 1; 69 | string owner_email = 2; 70 | // A key-value map for any customized purpose. 71 | // If data already exists on the namespace, 72 | // this will merge with the existing key values. 73 | map data = 3; 74 | // New namespace state, server will reject if transition is not allowed. 75 | // Allowed transitions are: 76 | // Registered -> [ Deleted | Deprecated | Handover ] 77 | // Handover -> [ Registered ] 78 | // Default is NAMESPACE_STATE_UNSPECIFIED which is do not change state. 79 | temporal.api.enums.v1.NamespaceState state = 4; 80 | } 81 | 82 | message NamespaceFilter { 83 | // By default namespaces in NAMESPACE_STATE_DELETED state are not included. 84 | // Setting include_deleted to true will include deleted namespaces. 85 | // Note: Namespace is in NAMESPACE_STATE_DELETED state when it was deleted from the system but associated data is not deleted yet. 86 | bool include_deleted = 1; 87 | } 88 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/protocol/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.protocol.v1; 4 | 5 | option go_package = "go.temporal.io/api/protocol/v1;protocol"; 6 | option java_package = "io.temporal.api.protocol.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Protocol::V1"; 10 | option csharp_namespace = "Temporalio.Api.Protocol.V1"; 11 | 12 | import "google/protobuf/any.proto"; 13 | 14 | // (-- api-linter: core::0146::any=disabled 15 | // aip.dev/not-precedent: We want runtime extensibility for the body field --) 16 | message Message { 17 | // An ID for this specific message. 18 | string id = 1; 19 | 20 | // Identifies the specific instance of a protocol to which this message 21 | // belongs. 22 | string protocol_instance_id = 2; 23 | 24 | // The event ID or command ID after which this message can be delivered. The 25 | // effects of history up to and including this event ID should be visible to 26 | // the code that handles this message. Omit to opt out of sequencing. 27 | oneof sequencing_id { 28 | int64 event_id = 3; 29 | int64 command_index = 4; 30 | }; 31 | 32 | // The opaque data carried by this message. The protocol type can be 33 | // extracted from the package name of the message carried inside the Any. 34 | google.protobuf.Any body = 5; 35 | } 36 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/query/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.query.v1; 4 | 5 | option go_package = "go.temporal.io/api/query/v1;query"; 6 | option java_package = "io.temporal.api.query.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Query::V1"; 10 | option csharp_namespace = "Temporalio.Api.Query.V1"; 11 | 12 | import "temporal/api/enums/v1/query.proto"; 13 | import "temporal/api/enums/v1/workflow.proto"; 14 | import "temporal/api/common/v1/message.proto"; 15 | import "temporal/api/failure/v1/message.proto"; 16 | 17 | // See https://docs.temporal.io/docs/concepts/queries/ 18 | message WorkflowQuery { 19 | // The workflow-author-defined identifier of the query. Typically a function name. 20 | string query_type = 1; 21 | // Serialized arguments that will be provided to the query handler. 22 | temporal.api.common.v1.Payloads query_args = 2; 23 | // Headers that were passed by the caller of the query and copied by temporal 24 | // server into the workflow task. 25 | temporal.api.common.v1.Header header = 3; 26 | } 27 | 28 | // Answer to a `WorkflowQuery` 29 | message WorkflowQueryResult { 30 | // Did the query succeed or fail? 31 | temporal.api.enums.v1.QueryResultType result_type = 1; 32 | // Set when the query succeeds with the results. 33 | // Mutually exclusive with `error_message` and `failure`. 34 | temporal.api.common.v1.Payloads answer = 2; 35 | // Mutually exclusive with `answer`. Set when the query fails. 36 | // See also the newer `failure` field. 37 | string error_message = 3; 38 | // The full reason for this query failure. This field is newer than `error_message` and can be encoded by the SDK's 39 | // failure converter to support E2E encryption of messages and stack traces. 40 | // Mutually exclusive with `answer`. Set when the query fails. 41 | temporal.api.failure.v1.Failure failure = 4; 42 | } 43 | 44 | message QueryRejected { 45 | temporal.api.enums.v1.WorkflowExecutionStatus status = 1; 46 | } 47 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/replication/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.replication.v1; 4 | 5 | option go_package = "go.temporal.io/api/replication/v1;replication"; 6 | option java_package = "io.temporal.api.replication.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Replication::V1"; 10 | option csharp_namespace = "Temporalio.Api.Replication.V1"; 11 | 12 | import "google/protobuf/timestamp.proto"; 13 | 14 | import "temporal/api/enums/v1/namespace.proto"; 15 | 16 | message ClusterReplicationConfig { 17 | string cluster_name = 1; 18 | } 19 | 20 | message NamespaceReplicationConfig { 21 | string active_cluster_name = 1; 22 | repeated ClusterReplicationConfig clusters = 2; 23 | temporal.api.enums.v1.ReplicationState state = 3; 24 | } 25 | 26 | // Represents a historical replication status of a Namespace 27 | message FailoverStatus { 28 | // Timestamp when the Cluster switched to the following failover_version 29 | google.protobuf.Timestamp failover_time = 1; 30 | int64 failover_version = 2; 31 | } 32 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/rules/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.rules.v1; 4 | 5 | option go_package = "go.temporal.io/api/rules/v1;rules"; 6 | option java_package = "io.temporal.api.rules.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Rules::V1"; 10 | option csharp_namespace = "Temporalio.Api.Rules.V1"; 11 | 12 | 13 | import "google/protobuf/timestamp.proto"; 14 | 15 | message WorkflowRuleAction { 16 | message ActionActivityPause { 17 | } 18 | 19 | // Supported actions. 20 | oneof variant { 21 | ActionActivityPause activity_pause = 1; 22 | } 23 | } 24 | 25 | message WorkflowRuleSpec { 26 | // The id of the new workflow rule. Must be unique within the namespace. 27 | // Can be set by the user, and can have business meaning. 28 | string id = 1; 29 | 30 | // Activity trigger will be triggered when an activity is about to start. 31 | message ActivityStartingTrigger { 32 | // Activity predicate is a SQL-like string filter parameter. 33 | // It is used to match against workflow data. 34 | // The following activity attributes are supported as part of the predicate: 35 | // - ActivityType: An Activity Type is the mapping of a name to an Activity Definition.. 36 | // - ActivityId: The ID of the activity. 37 | // - ActivityAttempt: The number attempts of the activity. 38 | // - BackoffInterval: The current amount of time between scheduled attempts of the activity. 39 | // - ActivityStatus: The status of the activity. Can be one of "Scheduled", "Started", "Paused". 40 | // - TaskQueue: The name of the task queue the workflow specified that the activity should run on. 41 | // Activity predicate support the following operators: 42 | // * =, !=, >, >=, <, <= 43 | // * AND, OR, () 44 | // * BETWEEN ... AND 45 | // STARTS_WITH 46 | string predicate = 1; 47 | } 48 | 49 | // Specifies how the rule should be triggered and evaluated. 50 | // Currently, only "activity start" type is supported. 51 | oneof trigger { 52 | ActivityStartingTrigger activity_start = 2; 53 | } 54 | 55 | // Restricted Visibility query. 56 | // This query is used to filter workflows in this namespace to which this rule should apply. 57 | // It is applied to any running workflow each time a triggering event occurs, before the trigger predicate is evaluated. 58 | // The following workflow attributes are supported: 59 | // - WorkflowType 60 | // - WorkflowId 61 | // - StartTime 62 | // - ExecutionStatus 63 | string visibility_query = 3; 64 | 65 | // WorkflowRuleAction to be taken when the rule is triggered and predicate is matched. 66 | repeated WorkflowRuleAction actions = 4; 67 | 68 | // Expiration time of the rule. After this time, the rule will be deleted. 69 | // Can be empty if the rule should never expire. 70 | google.protobuf.Timestamp expiration_time = 5; 71 | } 72 | 73 | // WorkflowRule describes a rule that can be applied to any workflow in this namespace. 74 | message WorkflowRule { 75 | // Rule creation time. 76 | google.protobuf.Timestamp create_time = 1; 77 | 78 | // Rule specification 79 | WorkflowRuleSpec spec = 2; 80 | 81 | // Identity of the actor that created the rule 82 | // (-- api-linter: core::0140::prepositions=disabled 83 | // aip.dev/not-precedent: It is better reflect the intent this way, we will also have updated_by. --) 84 | // (-- api-linter: core::0142::time-field-names=disabled 85 | // aip.dev/not-precedent: Same as above. All other options sounds clumsy --) 86 | string created_by_identity = 3; 87 | 88 | // Rule description. 89 | string description = 4; 90 | } 91 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/sdk/v1/enhanced_stack_trace.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.sdk.v1; 4 | 5 | option go_package = "go.temporal.io/api/sdk/v1;sdk"; 6 | option java_package = "io.temporal.api.sdk.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "EnhancedStackTraceProto"; 9 | option ruby_package = "Temporalio::Api::Sdk::V1"; 10 | option csharp_namespace = "Temporalio.Api.Sdk.V1"; 11 | 12 | // Internal structure used to create worker stack traces with references to code. 13 | message EnhancedStackTrace { 14 | // Information pertaining to the SDK that the trace has been captured from. 15 | StackTraceSDKInfo sdk = 1; 16 | 17 | // Mapping of file path to file contents. 18 | map sources = 2; 19 | 20 | // Collection of stacks captured. 21 | repeated StackTrace stacks = 3; 22 | } 23 | 24 | // Information pertaining to the SDK that the trace has been captured from. 25 | // (-- api-linter: core::0123::resource-annotation=disabled 26 | // aip.dev/not-precedent: Naming SDK version is optional. --) 27 | message StackTraceSDKInfo { 28 | // Name of the SDK 29 | string name = 1; 30 | 31 | // Version string of the SDK 32 | string version = 2; 33 | } 34 | 35 | // "Slice" of a file starting at line_offset -- a line offset and code fragment corresponding to the worker's stack. 36 | message StackTraceFileSlice { 37 | // Only used (possibly) to trim the file without breaking syntax highlighting. This is not optional, unlike 38 | // the `line` property of a `StackTraceFileLocation`. 39 | // (-- api-linter: core::0141::forbidden-types=disabled 40 | // aip.dev/not-precedent: These really shouldn't have negative values. --) 41 | uint32 line_offset = 1; 42 | 43 | // Slice of a file with the respective OS-specific line terminator. 44 | string content = 2; 45 | } 46 | 47 | // More specific location details of a file: its path, precise line and column numbers if applicable, and function name if available. 48 | // In essence, a pointer to a location in a file 49 | message StackTraceFileLocation { 50 | // Path to source file (absolute or relative). 51 | // If the paths are relative, ensure that they are all relative to the same root. 52 | string file_path = 1; 53 | 54 | // Optional; If possible, SDK should send this -- this is required for displaying the code location. 55 | // If not provided, set to -1. 56 | int32 line = 2; 57 | 58 | // Optional; if possible, SDK should send this. 59 | // If not provided, set to -1. 60 | int32 column = 3; 61 | 62 | // Function name this line belongs to, if applicable. 63 | // Used for falling back to stack trace view. 64 | string function_name = 4; 65 | 66 | // Flag to communicate whether a location should be hidden by default in the stack view. 67 | bool internal_code = 5; 68 | } 69 | 70 | // Collection of FileLocation messages from a single stack. 71 | message StackTrace { 72 | // Collection of `FileLocation`s, each for a stack frame that comprise a stack trace. 73 | repeated StackTraceFileLocation locations = 1; 74 | } 75 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.sdk.v1; 4 | 5 | option go_package = "go.temporal.io/api/sdk/v1;sdk"; 6 | option java_package = "io.temporal.api.sdk.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "TaskCompleteMetadataProto"; 9 | option ruby_package = "Temporalio::Api::Sdk::V1"; 10 | option csharp_namespace = "Temporalio.Api.Sdk.V1"; 11 | 12 | message WorkflowTaskCompletedMetadata { 13 | // Internal flags used by the core SDK. SDKs using flags must comply with the following behavior: 14 | // 15 | // During replay: 16 | // * If a flag is not recognized (value is too high or not defined), it must fail the workflow 17 | // task. 18 | // * If a flag is recognized, it is stored in a set of used flags for the run. Code checks for 19 | // that flag during and after this WFT are allowed to assume that the flag is present. 20 | // * If a code check for a flag does not find the flag in the set of used flags, it must take 21 | // the branch corresponding to the absence of that flag. 22 | // 23 | // During non-replay execution of new WFTs: 24 | // * The SDK is free to use all flags it knows about. It must record any newly-used (IE: not 25 | // previously recorded) flags when completing the WFT. 26 | // 27 | // SDKs which are too old to even know about this field at all are considered to produce 28 | // undefined behavior if they replay workflows which used this mechanism. 29 | // 30 | // (-- api-linter: core::0141::forbidden-types=disabled 31 | // aip.dev/not-precedent: These really shouldn't have negative values. --) 32 | repeated uint32 core_used_flags = 1; 33 | 34 | // Flags used by the SDK lang. No attempt is made to distinguish between different SDK languages 35 | // here as processing a workflow with a different language than the one which authored it is 36 | // already undefined behavior. See `core_used_patches` for more. 37 | // 38 | // (-- api-linter: core::0141::forbidden-types=disabled 39 | // aip.dev/not-precedent: These really shouldn't have negative values. --) 40 | repeated uint32 lang_used_flags = 2; 41 | 42 | // Name of the SDK that processed the task. This is usually something like "temporal-go" and is 43 | // usually the same as client-name gRPC header. This should only be set if its value changed 44 | // since the last time recorded on the workflow (or be set on the first task). 45 | // 46 | // (-- api-linter: core::0122::name-suffix=disabled 47 | // aip.dev/not-precedent: We're ok with a name suffix here. --) 48 | string sdk_name = 3; 49 | 50 | // Version of the SDK that processed the task. This is usually something like "1.20.0" and is 51 | // usually the same as client-version gRPC header. This should only be set if its value changed 52 | // since the last time recorded on the workflow (or be set on the first task). 53 | string sdk_version = 4; 54 | } -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/sdk/v1/user_metadata.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.sdk.v1; 4 | 5 | option go_package = "go.temporal.io/api/sdk/v1;sdk"; 6 | option java_package = "io.temporal.api.sdk.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "UserMetadataProto"; 9 | option ruby_package = "Temporalio::Api::Sdk::V1"; 10 | option csharp_namespace = "Temporalio.Api.Sdk.V1"; 11 | 12 | 13 | import "temporal/api/common/v1/message.proto"; 14 | 15 | // Information a user can set, often for use by user interfaces. 16 | message UserMetadata { 17 | // Short-form text that provides a summary. This payload should be a "json/plain"-encoded payload 18 | // that is a single JSON string for use in user interfaces. User interface formatting may not 19 | // apply to this text when used in "title" situations. The payload data section is limited to 400 20 | // bytes by default. 21 | temporal.api.common.v1.Payload summary = 1; 22 | 23 | // Long-form text that provides details. This payload should be a "json/plain"-encoded payload 24 | // that is a single JSON string for use in user interfaces. User interface formatting may apply to 25 | // this text in common use. The payload data section is limited to 20000 bytes by default. 26 | temporal.api.common.v1.Payload details = 2; 27 | } -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/sdk/v1/workflow_metadata.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.sdk.v1; 4 | 5 | option go_package = "go.temporal.io/api/sdk/v1;sdk"; 6 | option java_package = "io.temporal.api.sdk.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "WorkflowMetadataProto"; 9 | option ruby_package = "Temporalio::Api::Sdk::V1"; 10 | option csharp_namespace = "Temporalio.Api.Sdk.V1"; 11 | 12 | // The name of the query to retrieve this information is `__temporal_workflow_metadata`. 13 | message WorkflowMetadata { 14 | // Metadata provided at declaration or creation time. 15 | WorkflowDefinition definition = 1; 16 | // Current long-form details of the workflow's state. This is used by user interfaces to show 17 | // long-form text. This text may be formatted by the user interface. 18 | string current_details = 2; 19 | } 20 | 21 | // (-- api-linter: core::0203::optional=disabled --) 22 | message WorkflowDefinition { 23 | // A name scoped by the task queue that maps to this workflow definition. 24 | // If missing, this workflow is a dynamic workflow. 25 | string type = 1; 26 | 27 | // Query definitions, sorted by name. 28 | repeated WorkflowInteractionDefinition query_definitions = 2; 29 | 30 | // Signal definitions, sorted by name. 31 | repeated WorkflowInteractionDefinition signal_definitions = 3; 32 | 33 | // Update definitions, sorted by name. 34 | repeated WorkflowInteractionDefinition update_definitions = 4; 35 | } 36 | 37 | // (-- api-linter: core::0123::resource-annotation=disabled 38 | // aip.dev/not-precedent: The `name` field is optional. --) 39 | // (-- api-linter: core::0203::optional=disabled --) 40 | message WorkflowInteractionDefinition { 41 | // An optional name for the handler. If missing, it represents 42 | // a dynamic handler that processes any interactions not handled by others. 43 | // There is at most one dynamic handler per workflow and interaction kind. 44 | string name = 1; 45 | // An optional interaction description provided by the application. 46 | // By convention, external tools may interpret its first part, 47 | // i.e., ending with a line break, as a summary of the description. 48 | string description = 2; 49 | } 50 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/update/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.update.v1; 4 | 5 | option go_package = "go.temporal.io/api/update/v1;update"; 6 | option java_package = "io.temporal.api.update.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Update::V1"; 10 | option csharp_namespace = "Temporalio.Api.Update.V1"; 11 | 12 | import "temporal/api/common/v1/message.proto"; 13 | import "temporal/api/enums/v1/update.proto"; 14 | import "temporal/api/failure/v1/message.proto"; 15 | 16 | // Specifies client's intent to wait for Update results. 17 | message WaitPolicy { 18 | // Indicates the Update lifecycle stage that the Update must reach before 19 | // API call is returned. 20 | // NOTE: This field works together with API call timeout which is limited by 21 | // server timeout (maximum wait time). If server timeout is expired before 22 | // user specified timeout, API call returns even if specified stage is not reached. 23 | temporal.api.enums.v1.UpdateWorkflowExecutionLifecycleStage lifecycle_stage = 1; 24 | } 25 | 26 | // The data needed by a client to refer to a previously invoked Workflow Update. 27 | message UpdateRef { 28 | temporal.api.common.v1.WorkflowExecution workflow_execution = 1; 29 | string update_id = 2; 30 | } 31 | 32 | // The outcome of a Workflow Update: success or failure. 33 | message Outcome { 34 | oneof value { 35 | temporal.api.common.v1.Payloads success = 1; 36 | temporal.api.failure.v1.Failure failure = 2; 37 | } 38 | } 39 | 40 | // Metadata about a Workflow Update. 41 | message Meta { 42 | // An ID with workflow-scoped uniqueness for this Update. 43 | string update_id = 1; 44 | 45 | // A string identifying the agent that requested this Update. 46 | string identity = 2; 47 | } 48 | 49 | message Input { 50 | // Headers that are passed with the Update from the requesting entity. 51 | // These can include things like auth or tracing tokens. 52 | temporal.api.common.v1.Header header = 1; 53 | 54 | // The name of the Update handler to invoke on the target Workflow. 55 | string name = 2; 56 | 57 | // The arguments to pass to the named Update handler. 58 | temporal.api.common.v1.Payloads args = 3; 59 | } 60 | 61 | // The client request that triggers a Workflow Update. 62 | message Request { 63 | Meta meta = 1; 64 | Input input = 2; 65 | } 66 | 67 | // An Update protocol message indicating that a Workflow Update has been rejected. 68 | message Rejection { 69 | string rejected_request_message_id = 1; 70 | int64 rejected_request_sequencing_event_id = 2; 71 | Request rejected_request = 3; 72 | temporal.api.failure.v1.Failure failure = 4; 73 | } 74 | 75 | // An Update protocol message indicating that a Workflow Update has 76 | // been accepted (i.e. passed the worker-side validation phase). 77 | message Acceptance { 78 | string accepted_request_message_id = 1; 79 | int64 accepted_request_sequencing_event_id = 2; 80 | Request accepted_request = 3; 81 | } 82 | 83 | // An Update protocol message indicating that a Workflow Update has 84 | // completed with the contained outcome. 85 | message Response { 86 | Meta meta = 1; 87 | Outcome outcome = 2; 88 | } 89 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/api_upstream/temporal/api/version/v1/message.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package temporal.api.version.v1; 4 | 5 | option go_package = "go.temporal.io/api/version/v1;version"; 6 | option java_package = "io.temporal.api.version.v1"; 7 | option java_multiple_files = true; 8 | option java_outer_classname = "MessageProto"; 9 | option ruby_package = "Temporalio::Api::Version::V1"; 10 | option csharp_namespace = "Temporalio.Api.Version.V1"; 11 | 12 | import "google/protobuf/timestamp.proto"; 13 | import "temporal/api/enums/v1/common.proto"; 14 | 15 | // ReleaseInfo contains information about specific version of temporal. 16 | message ReleaseInfo { 17 | string version = 1; 18 | google.protobuf.Timestamp release_time = 2; 19 | string notes = 3; 20 | } 21 | 22 | // Alert contains notification and severity. 23 | message Alert { 24 | string message = 1; 25 | temporal.api.enums.v1.Severity severity = 2; 26 | } 27 | 28 | // VersionInfo contains details about current and recommended release versions as well as alerts and upgrade instructions. 29 | message VersionInfo { 30 | ReleaseInfo current = 1; 31 | ReleaseInfo recommended = 2; 32 | string instructions = 3; 33 | repeated Alert alerts = 4; 34 | google.protobuf.Timestamp last_update_time = 5; 35 | } 36 | 37 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/google/rpc/status.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // The canonical version of this proto can be found at 16 | // https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto 17 | 18 | syntax = "proto3"; 19 | 20 | package google.rpc; 21 | 22 | import "google/protobuf/any.proto"; 23 | 24 | option cc_enable_arenas = true; 25 | option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; 26 | option java_multiple_files = true; 27 | option java_outer_classname = "StatusProto"; 28 | option java_package = "com.google.rpc"; 29 | option objc_class_prefix = "RPC"; 30 | 31 | // The `Status` type defines a logical error model that is suitable for 32 | // different programming environments, including REST APIs and RPC APIs. It is 33 | // used by [gRPC](https://github.com/grpc). Each `Status` message contains 34 | // three pieces of data: error code, error message, and error details. 35 | // 36 | // You can find out more about this error model and how to work with it in the 37 | // [API Design Guide](https://cloud.google.com/apis/design/errors). 38 | message Status { 39 | // The status code, which should be an enum value of 40 | // [google.rpc.Code][google.rpc.Code]. 41 | int32 code = 1; 42 | 43 | // A developer-facing error message, which should be in English. Any 44 | // user-facing error message should be localized and sent in the 45 | // [google.rpc.Status.details][google.rpc.Status.details] field, or localized 46 | // by the client. 47 | string message = 2; 48 | 49 | // A list of messages that carry the error details. There is a common set of 50 | // message types for APIs to use. 51 | repeated google.protobuf.Any details = 3; 52 | } -------------------------------------------------------------------------------- /sdk-core-protos/protos/grpc/health/v1/health.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The gRPC Authors 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // The canonical version of this proto can be found at 16 | // https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto 17 | 18 | syntax = "proto3"; 19 | 20 | package grpc.health.v1; 21 | 22 | option csharp_namespace = "Grpc.Health.V1"; 23 | option go_package = "google.golang.org/grpc/health/grpc_health_v1"; 24 | option java_multiple_files = true; 25 | option java_outer_classname = "HealthProto"; 26 | option java_package = "io.grpc.health.v1"; 27 | 28 | message HealthCheckRequest { 29 | string service = 1; 30 | } 31 | 32 | message HealthCheckResponse { 33 | enum ServingStatus { 34 | UNKNOWN = 0; 35 | SERVING = 1; 36 | NOT_SERVING = 2; 37 | SERVICE_UNKNOWN = 3; // Used only by the Watch method. 38 | } 39 | ServingStatus status = 1; 40 | } 41 | 42 | service Health { 43 | // If the requested service is unknown, the call will fail with status 44 | // NOT_FOUND. 45 | rpc Check(HealthCheckRequest) returns (HealthCheckResponse); 46 | 47 | // Performs a watch for the serving status of the requested service. 48 | // The server will immediately send back a message indicating the current 49 | // serving status. It will then subsequently send a new message whenever 50 | // the service's serving status changes. 51 | // 52 | // If the requested service is unknown when the call is received, the 53 | // server will send a message setting the serving status to 54 | // SERVICE_UNKNOWN but will *not* terminate the call. If at some 55 | // future point, the serving status of the service becomes known, the 56 | // server will send a new message with the service's serving status. 57 | // 58 | // If the call terminates with status UNIMPLEMENTED, then clients 59 | // should assume this method is not supported and should not retry the 60 | // call. If the call terminates with any other status (including OK), 61 | // clients should retry the call with appropriate exponential backoff. 62 | rpc Watch(HealthCheckRequest) returns (stream HealthCheckResponse); 63 | } 64 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/local/temporal/sdk/core/activity_result/activity_result.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coresdk.activity_result; 4 | option ruby_package = "Temporalio::Internal::Bridge::Api::ActivityResult"; 5 | 6 | import "google/protobuf/duration.proto"; 7 | import "google/protobuf/timestamp.proto"; 8 | import "temporal/api/common/v1/message.proto"; 9 | import "temporal/api/failure/v1/message.proto"; 10 | 11 | // Used to report activity completions to core 12 | message ActivityExecutionResult { 13 | oneof status { 14 | Success completed = 1; 15 | Failure failed = 2; 16 | Cancellation cancelled = 3; 17 | WillCompleteAsync will_complete_async = 4; 18 | } 19 | } 20 | 21 | // Used to report activity resolutions to lang. IE: This is what the activities are resolved with 22 | // in the workflow. 23 | message ActivityResolution { 24 | oneof status { 25 | Success completed = 1; 26 | Failure failed = 2; 27 | Cancellation cancelled = 3; 28 | DoBackoff backoff = 4; 29 | } 30 | } 31 | 32 | // Used to report successful completion either when executing or resolving 33 | message Success { 34 | temporal.api.common.v1.Payload result = 1; 35 | } 36 | 37 | // Used to report activity failure either when executing or resolving 38 | message Failure { 39 | temporal.api.failure.v1.Failure failure = 1; 40 | } 41 | 42 | /* 43 | * Used to report cancellation from both Core and Lang. 44 | * When Lang reports a cancelled activity, it must put a CancelledFailure in the failure field. 45 | * When Core reports a cancelled activity, it must put an ActivityFailure with CancelledFailure 46 | * as the cause in the failure field. 47 | */ 48 | message Cancellation { 49 | temporal.api.failure.v1.Failure failure = 1; 50 | } 51 | 52 | /* 53 | * Used in ActivityExecutionResult to notify Core that this Activity will complete asynchronously. 54 | * Core will forget about this Activity and free up resources used to track this Activity. 55 | */ 56 | message WillCompleteAsync { 57 | } 58 | 59 | /* 60 | * Issued when a local activity needs to retry but also wants to back off more than would be 61 | * reasonable to WFT heartbeat for. Lang is expected to schedule a timer for the duration 62 | * and then start a local activity of the same type & same inputs with the provided attempt number 63 | * after the timer has elapsed. 64 | * 65 | * This exists because Core does not have a concept of starting commands by itself, they originate 66 | * from lang. So expecting lang to start the timer / next pass of the activity fits more smoothly. 67 | */ 68 | message DoBackoff { 69 | // The attempt number that lang should provide when scheduling the retry. If the LA failed 70 | // on attempt 4 and we told lang to back off with a timer, this number will be 5. 71 | uint32 attempt = 1; 72 | google.protobuf.Duration backoff_duration = 2; 73 | // The time the first attempt of this local activity was scheduled. Must be passed with attempt 74 | // to the retry LA. 75 | google.protobuf.Timestamp original_schedule_time = 3; 76 | } 77 | 78 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/local/temporal/sdk/core/child_workflow/child_workflow.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coresdk.child_workflow; 4 | option ruby_package = "Temporalio::Internal::Bridge::Api::ChildWorkflow"; 5 | 6 | import "temporal/api/common/v1/message.proto"; 7 | import "temporal/api/failure/v1/message.proto"; 8 | import "temporal/sdk/core/common/common.proto"; 9 | 10 | // Used by core to resolve child workflow executions. 11 | message ChildWorkflowResult { 12 | oneof status { 13 | Success completed = 1; 14 | Failure failed = 2; 15 | Cancellation cancelled = 3; 16 | } 17 | } 18 | 19 | // Used in ChildWorkflowResult to report successful completion. 20 | message Success { 21 | temporal.api.common.v1.Payload result = 1; 22 | } 23 | 24 | // Used in ChildWorkflowResult to report non successful outcomes such as 25 | // application failures, timeouts, terminations, and cancellations. 26 | message Failure { 27 | temporal.api.failure.v1.Failure failure = 1; 28 | } 29 | 30 | // Used in ChildWorkflowResult to report cancellation. 31 | // Failure should be ChildWorkflowFailure with a CanceledFailure cause. 32 | message Cancellation { 33 | temporal.api.failure.v1.Failure failure = 1; 34 | } 35 | 36 | // Used by the service to determine the fate of a child workflow 37 | // in case its parent is closed. 38 | enum ParentClosePolicy { 39 | // Let's the server set the default. 40 | PARENT_CLOSE_POLICY_UNSPECIFIED = 0; 41 | // Terminate means terminating the child workflow. 42 | PARENT_CLOSE_POLICY_TERMINATE = 1; 43 | // Abandon means not doing anything on the child workflow. 44 | PARENT_CLOSE_POLICY_ABANDON = 2; 45 | // Cancel means requesting cancellation on the child workflow. 46 | PARENT_CLOSE_POLICY_REQUEST_CANCEL = 3; 47 | } 48 | 49 | // Possible causes of failure to start a child workflow 50 | enum StartChildWorkflowExecutionFailedCause { 51 | START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_UNSPECIFIED = 0; 52 | START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_WORKFLOW_ALREADY_EXISTS = 1; 53 | } 54 | 55 | // Controls at which point to report back to lang when a child workflow is cancelled 56 | enum ChildWorkflowCancellationType { 57 | // Do not request cancellation of the child workflow if already scheduled 58 | ABANDON = 0; 59 | // Initiate a cancellation request and immediately report cancellation to the parent. 60 | TRY_CANCEL = 1; 61 | // Wait for child cancellation completion. 62 | WAIT_CANCELLATION_COMPLETED = 2; 63 | // Request cancellation of the child and wait for confirmation that the request was received. 64 | WAIT_CANCELLATION_REQUESTED = 3; 65 | } 66 | 67 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/local/temporal/sdk/core/common/common.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coresdk.common; 4 | option ruby_package = "Temporalio::Internal::Bridge::Api::Common"; 5 | 6 | import "google/protobuf/duration.proto"; 7 | 8 | // Identifying information about a particular workflow execution, including namespace 9 | message NamespacedWorkflowExecution { 10 | // Namespace the workflow run is located in 11 | string namespace = 1; 12 | // Can never be empty 13 | string workflow_id = 2; 14 | // May be empty if the most recent run of the workflow with the given ID is being targeted 15 | string run_id = 3; 16 | } 17 | 18 | // An indication of user's intent concerning what Build ID versioning approach should be used for 19 | // a specific command 20 | enum VersioningIntent { 21 | // Indicates that core should choose the most sensible default behavior for the type of 22 | // command, accounting for whether the command will be run on the same task queue as the current 23 | // worker. 24 | UNSPECIFIED = 0; 25 | // Indicates that the command should run on a worker with compatible version if possible. It may 26 | // not be possible if the target task queue does not also have knowledge of the current worker's 27 | // build ID. 28 | COMPATIBLE = 1; 29 | // Indicates that the command should run on the target task queue's current overall-default 30 | // build ID. 31 | DEFAULT = 2; 32 | } 33 | 34 | message WorkerDeploymentVersion { 35 | string deployment_name = 1; 36 | string build_id = 2; 37 | } -------------------------------------------------------------------------------- /sdk-core-protos/protos/local/temporal/sdk/core/core_interface.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coresdk; 4 | option ruby_package = "Temporalio::Internal::Bridge::Api::CoreInterface"; 5 | 6 | // Note: Intellij will think the Google imports don't work because of the slightly odd nature of 7 | // the include paths. You can make it work by going to the "Protobuf Support" settings section 8 | // and adding the "api_upstream" subdir as an include path. 9 | import "google/protobuf/duration.proto"; 10 | import "google/protobuf/empty.proto"; 11 | import "google/protobuf/timestamp.proto"; 12 | import "temporal/api/common/v1/message.proto"; 13 | import "temporal/sdk/core/activity_result/activity_result.proto"; 14 | import "temporal/sdk/core/activity_task/activity_task.proto"; 15 | import "temporal/sdk/core/common/common.proto"; 16 | import "temporal/sdk/core/external_data/external_data.proto"; 17 | import "temporal/sdk/core/workflow_activation/workflow_activation.proto"; 18 | import "temporal/sdk/core/workflow_commands/workflow_commands.proto"; 19 | import "temporal/sdk/core/workflow_completion/workflow_completion.proto"; 20 | 21 | // A request as given to `record_activity_heartbeat` 22 | message ActivityHeartbeat { 23 | bytes task_token = 1; 24 | repeated temporal.api.common.v1.Payload details = 2; 25 | } 26 | 27 | // A request as given to `complete_activity_task` 28 | message ActivityTaskCompletion { 29 | bytes task_token = 1; 30 | activity_result.ActivityExecutionResult result = 2; 31 | } 32 | 33 | // Info about workflow task slot usage 34 | message WorkflowSlotInfo { 35 | string workflow_type = 1; 36 | bool is_sticky = 2; 37 | } 38 | 39 | // Info about activity task slot usage 40 | message ActivitySlotInfo { 41 | string activity_type = 1; 42 | } 43 | 44 | // Info about local activity slot usage 45 | message LocalActivitySlotInfo { 46 | string activity_type = 1; 47 | } 48 | 49 | // Info about nexus task slot usage 50 | message NexusSlotInfo { 51 | string service = 1; 52 | string operation = 2; 53 | } 54 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/local/temporal/sdk/core/external_data/external_data.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coresdk.external_data; 4 | option ruby_package = "Temporalio::Internal::Bridge::Api::ExternalData"; 5 | 6 | import "google/protobuf/duration.proto"; 7 | import "google/protobuf/timestamp.proto"; 8 | 9 | // This file defines data that Core might write externally. The first motivating case being 10 | // storing data in markers in event history. Defining such data as protos provides an easy way 11 | // for consumers which would like to just depend on the proto package to make sense of marker data. 12 | 13 | message LocalActivityMarkerData { 14 | uint32 seq = 1; 15 | // The number of attempts at execution before we recorded this result. Typically starts at 1, 16 | // but it is possible to start at a higher number when backing off using a timer. 17 | uint32 attempt = 2; 18 | string activity_id = 3; 19 | string activity_type = 4; 20 | // You can think of this as "perceived completion time". It is the time the local activity thought 21 | // it was when it completed. Which could be different from wall-clock time because of workflow 22 | // replay. It's the WFT start time + the LA's runtime 23 | google.protobuf.Timestamp complete_time = 5; 24 | // If set, this local activity conceptually is retrying after the specified backoff. 25 | // Implementation wise, they are really two different LA machines, but with the same type & input. 26 | // The retry starts with an attempt number > 1. 27 | google.protobuf.Duration backoff = 6; 28 | // The time the LA was originally scheduled (wall clock time). This is used to track 29 | // schedule-to-close timeouts when timer-based backoffs are used 30 | google.protobuf.Timestamp original_schedule_time = 7; 31 | } 32 | 33 | message PatchedMarkerData { 34 | // The patch id 35 | string id = 1; 36 | // Whether or not the patch is marked deprecated. 37 | bool deprecated = 2; 38 | } -------------------------------------------------------------------------------- /sdk-core-protos/protos/local/temporal/sdk/core/workflow_completion/workflow_completion.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coresdk.workflow_completion; 4 | option ruby_package = "Temporalio::Internal::Bridge::Api::WorkflowCompletion"; 5 | 6 | import "temporal/api/failure/v1/message.proto"; 7 | import "temporal/api/enums/v1/failed_cause.proto"; 8 | import "temporal/api/enums/v1/workflow.proto"; 9 | import "temporal/sdk/core/common/common.proto"; 10 | import "temporal/sdk/core/workflow_commands/workflow_commands.proto"; 11 | 12 | // Result of a single workflow activation, reported from lang to core 13 | message WorkflowActivationCompletion { 14 | // The run id from the workflow activation you are completing 15 | string run_id = 1; 16 | oneof status { 17 | Success successful = 2; 18 | Failure failed = 3; 19 | } 20 | } 21 | 22 | // Successful workflow activation with a list of commands generated by the workflow execution 23 | message Success { 24 | // A list of commands to send back to the temporal server 25 | repeated workflow_commands.WorkflowCommand commands = 1; 26 | // Any internal flags which the lang SDK used in the processing of this activation 27 | repeated uint32 used_internal_flags = 6; 28 | // The versioning behavior this workflow is currently using 29 | temporal.api.enums.v1.VersioningBehavior versioning_behavior = 7; 30 | } 31 | 32 | // Failure to activate or execute a workflow 33 | message Failure { 34 | temporal.api.failure.v1.Failure failure = 1; 35 | // Forces overriding the WFT failure cause 36 | temporal.api.enums.v1.WorkflowTaskFailedCause force_cause = 2; 37 | } 38 | 39 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/testsrv_upstream/Makefile: -------------------------------------------------------------------------------- 1 | $(VERBOSE).SILENT: 2 | ############################# Main targets ############################# 3 | ci-build: install proto 4 | 5 | # Install dependencies. 6 | install: grpc-install api-linter-install buf-install 7 | 8 | # Run all linters and compile proto files. 9 | proto: grpc 10 | ######################################################################## 11 | 12 | ##### Variables ###### 13 | ifndef GOPATH 14 | GOPATH := $(shell go env GOPATH) 15 | endif 16 | 17 | GOBIN := $(if $(shell go env GOBIN),$(shell go env GOBIN),$(GOPATH)/bin) 18 | SHELL := PATH=$(GOBIN):$(PATH) /bin/sh 19 | 20 | COLOR := "\e[1;36m%s\e[0m\n" 21 | 22 | PROTO_ROOT := . 23 | PROTO_FILES = $(shell find $(PROTO_ROOT) -name "*.proto") 24 | PROTO_DIRS = $(sort $(dir $(PROTO_FILES))) 25 | PROTO_OUT := .gen 26 | PROTO_IMPORTS := -I=$(PROTO_ROOT) 27 | 28 | $(PROTO_OUT): 29 | mkdir $(PROTO_OUT) 30 | 31 | ##### Compile proto files for go ##### 32 | grpc: buf-lint api-linter buf-breaking fix-path 33 | 34 | go-grpc: clean $(PROTO_OUT) 35 | printf $(COLOR) "Compile for go-gRPC..." 36 | $(foreach PROTO_DIR,$(PROTO_DIRS),protoc $(PROTO_IMPORTS) --go_out=plugins=grpc,paths=source_relative:$(PROTO_OUT) $(PROTO_DIR)*.proto;) 37 | 38 | fix-path: 39 | mv -f $(PROTO_OUT)/temporal/api/* $(PROTO_OUT) && rm -rf $(PROTO_OUT)/temporal 40 | 41 | ##### Plugins & tools ##### 42 | grpc-install: 43 | printf $(COLOR) "Install/update gRPC plugins..." 44 | go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.1.0 45 | 46 | go-protobuf-install: 47 | go install github.com/golang/protobuf/protoc-gen-go@v1.4.3 48 | 49 | api-linter-install: 50 | printf $(COLOR) "Install/update api-linter..." 51 | go install github.com/googleapis/api-linter/cmd/api-linter@v1.22.0 52 | 53 | buf-install: 54 | printf $(COLOR) "Install/update buf..." 55 | go install github.com/bufbuild/buf/cmd/buf@v0.43.2 56 | 57 | ##### Linters ##### 58 | api-linter: 59 | printf $(COLOR) "Run api-linter..." 60 | api-linter --set-exit-status $(PROTO_IMPORTS) --config $(PROTO_ROOT)/api-linter.yaml $(PROTO_FILES) 61 | 62 | buf-lint: 63 | printf $(COLOR) "Run buf linter..." 64 | (cd $(PROTO_ROOT) && buf lint) 65 | 66 | buf-breaking: 67 | # @printf $(COLOR) "Run buf breaking changes check against master branch..." 68 | # @(cd $(PROTO_ROOT) && buf breaking --against '../../../../.git#branch=master') 69 | 70 | ##### Clean ##### 71 | clean: 72 | printf $(COLOR) "Delete generated go files..." 73 | rm -rf $(PROTO_OUT) 74 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/testsrv_upstream/api-linter.yaml: -------------------------------------------------------------------------------- 1 | - included_paths: 2 | - '**/*.proto' 3 | disabled_rules: 4 | - 'core::0192::has-comments' 5 | 6 | - included_paths: 7 | - '**/message.proto' 8 | disabled_rules: 9 | - 'core::0122::name-suffix' 10 | - 'core::0123::resource-annotation' 11 | 12 | - included_paths: 13 | - '**/testservice/v1/request_response.proto' 14 | disabled_rules: 15 | - 'core::0122::name-suffix' 16 | - 'core::0131::request-name-required' 17 | - 'core::0131::request-unknown-fields' 18 | - 'core::0132::request-parent-required' 19 | - 'core::0132::request-unknown-fields' 20 | - 'core::0132::response-unknown-fields' 21 | - 'core::0134::request-unknown-fields' 22 | - 'core::0158::request-page-size-field' 23 | - 'core::0158::request-page-token-field' 24 | - 'core::0158::response-next-page-token-field' 25 | - 'core::0158::response-plural-first-field' 26 | - 'core::0158::response-repeated-first-field' 27 | 28 | - included_paths: 29 | - '**/testservice/v1/service.proto' 30 | disabled_rules: 31 | - 'core::0127::http-annotation' 32 | - 'core::0131::method-signature' 33 | - 'core::0131::response-message-name' 34 | -------------------------------------------------------------------------------- /sdk-core-protos/protos/testsrv_upstream/buf.yaml: -------------------------------------------------------------------------------- 1 | version: v1beta1 2 | build: 3 | roots: 4 | - . 5 | lint: 6 | ignore: 7 | - dependencies 8 | use: 9 | - DEFAULT 10 | breaking: 11 | ignore: 12 | use: 13 | - PACKAGE -------------------------------------------------------------------------------- /sdk-core-protos/protos/testsrv_upstream/temporal/api/testservice/v1/request_response.proto: -------------------------------------------------------------------------------- 1 | // The MIT License 2 | // 3 | // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. 4 | // 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | // 12 | // The above copyright notice and this permission notice shall be included in 13 | // all copies or substantial portions of the Software. 14 | // 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | // THE SOFTWARE. 22 | 23 | syntax = "proto3"; 24 | 25 | package temporal.api.testservice.v1; 26 | 27 | option go_package = "go.temporal.io/api/testservice/v1;testservice"; 28 | option java_package = "io.temporal.api.testservice.v1"; 29 | option java_multiple_files = true; 30 | option java_outer_classname = "RequestResponseProto"; 31 | option ruby_package = "Temporalio::Api::TestService::V1"; 32 | option csharp_namespace = "Temporalio.Api.TestService.V1"; 33 | 34 | import "google/protobuf/duration.proto"; 35 | import "google/protobuf/timestamp.proto"; 36 | 37 | message LockTimeSkippingRequest { 38 | } 39 | 40 | message LockTimeSkippingResponse { 41 | } 42 | 43 | message UnlockTimeSkippingRequest { 44 | } 45 | 46 | message UnlockTimeSkippingResponse { 47 | } 48 | 49 | message SleepUntilRequest { 50 | google.protobuf.Timestamp timestamp = 1; 51 | } 52 | 53 | message SleepRequest { 54 | google.protobuf.Duration duration = 1; 55 | } 56 | 57 | message SleepResponse { 58 | } 59 | 60 | message GetCurrentTimeResponse { 61 | google.protobuf.Timestamp time = 1; 62 | } -------------------------------------------------------------------------------- /sdk-core-protos/src/constants.rs: -------------------------------------------------------------------------------- 1 | //! Contains various constants that are used by core when storing/serializing data 2 | 3 | /// Used as `marker_name` field when recording patch markers 4 | pub const PATCH_MARKER_NAME: &str = "core_patch"; 5 | 6 | /// Used as `marker_name` field when recording local activity markers 7 | pub const LOCAL_ACTIVITY_MARKER_NAME: &str = "core_local_activity"; 8 | -------------------------------------------------------------------------------- /sdk-core-protos/src/task_token.rs: -------------------------------------------------------------------------------- 1 | use base64::{Engine, prelude::BASE64_STANDARD}; 2 | use std::{ 3 | borrow::Borrow, 4 | fmt::{Debug, Display, Formatter}, 5 | }; 6 | 7 | static LOCAL_ACT_TASK_TOKEN_PREFIX: &[u8] = b"local_act_"; 8 | 9 | #[derive( 10 | Hash, 11 | Eq, 12 | PartialEq, 13 | Clone, 14 | derive_more::From, 15 | derive_more::Into, 16 | serde::Serialize, 17 | serde::Deserialize, 18 | )] 19 | /// Type-safe wrapper for task token bytes 20 | pub struct TaskToken(pub Vec); 21 | 22 | impl TaskToken { 23 | /// Task tokens for local activities are always prefixed with a special sigil so they can 24 | /// be identified easily 25 | pub fn new_local_activity_token(unique_data: impl IntoIterator) -> Self { 26 | let mut bytes = LOCAL_ACT_TASK_TOKEN_PREFIX.to_vec(); 27 | bytes.extend(unique_data); 28 | TaskToken(bytes) 29 | } 30 | 31 | /// Returns true if the task token is for a local activity 32 | pub fn is_local_activity_task(&self) -> bool { 33 | self.0.starts_with(LOCAL_ACT_TASK_TOKEN_PREFIX) 34 | } 35 | } 36 | 37 | impl Display for TaskToken { 38 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 39 | f.write_str(&fmt_tt(&self.0)) 40 | } 41 | } 42 | 43 | impl Debug for TaskToken { 44 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 45 | f.write_str(&format!("TaskToken({})", fmt_tt(&self.0))) 46 | } 47 | } 48 | 49 | impl Borrow<[u8]> for TaskToken { 50 | fn borrow(&self) -> &[u8] { 51 | self.0.as_slice() 52 | } 53 | } 54 | 55 | pub(crate) fn fmt_tt(tt: &[u8]) -> String { 56 | BASE64_STANDARD.encode(tt) 57 | } 58 | -------------------------------------------------------------------------------- /sdk-core-protos/src/utilities.rs: -------------------------------------------------------------------------------- 1 | use prost::{EncodeError, Message}; 2 | 3 | pub trait TryIntoOrNone { 4 | /// Turn an option of something into an option of another thing, trying to convert along the way 5 | /// and returning `None` if that conversion fails 6 | fn try_into_or_none(self) -> Option; 7 | } 8 | 9 | impl TryIntoOrNone for Option 10 | where 11 | F: TryInto, 12 | { 13 | fn try_into_or_none(self) -> Option { 14 | self.map(TryInto::try_into).transpose().ok().flatten() 15 | } 16 | } 17 | 18 | /// Use to encode an message into a proto `Any`. 19 | /// 20 | /// Delete this once `prost_wkt_types` supports `prost` `0.12.x` which has built-in any packing. 21 | pub fn pack_any( 22 | type_url: String, 23 | msg: &T, 24 | ) -> Result { 25 | let mut value = Vec::new(); 26 | Message::encode(msg, &mut value)?; 27 | Ok(prost_wkt_types::Any { type_url, value }) 28 | } 29 | -------------------------------------------------------------------------------- /sdk/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "temporal-sdk" 3 | version = "0.1.0-alpha.1" 4 | edition = "2024" 5 | authors = ["Spencer Judge "] 6 | license-file = { workspace = true } 7 | description = "Temporal Rust SDK" 8 | homepage = "https://temporal.io/" 9 | repository = "https://github.com/temporalio/sdk-core" 10 | keywords = ["temporal", "workflow"] 11 | categories = ["development-tools"] 12 | 13 | [dependencies] 14 | async-trait = "0.1" 15 | anyhow = "1.0" 16 | derive_more = { workspace = true } 17 | futures-util = { version = "0.3", default-features = false } 18 | parking_lot = { version = "0.12", features = ["send_guard"] } 19 | prost-types = { version = "0.6", package = "prost-wkt-types" } 20 | serde = "1.0" 21 | tokio = { version = "1.26", features = ["rt", "rt-multi-thread", "parking_lot", "time", "fs"] } 22 | tokio-util = { version = "0.7" } 23 | tokio-stream = "0.1" 24 | tracing = "0.1" 25 | 26 | [dependencies.temporal-sdk-core] 27 | path = "../core" 28 | version = "0.1" 29 | default-features = false 30 | 31 | [dependencies.temporal-sdk-core-protos] 32 | path = "../sdk-core-protos" 33 | version = "0.1" 34 | 35 | [dependencies.temporal-sdk-core-api] 36 | path = "../core-api" 37 | version = "0.1" 38 | 39 | [dependencies.temporal-client] 40 | path = "../client" 41 | version = "0.1" 42 | 43 | [lints] 44 | workspace = true 45 | -------------------------------------------------------------------------------- /sdk/src/app_data.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | any::{Any, TypeId}, 3 | collections::HashMap, 4 | fmt, 5 | }; 6 | 7 | /// A Wrapper Type for workflow and activity app data 8 | #[derive(Default)] 9 | pub(crate) struct AppData { 10 | map: HashMap>, 11 | } 12 | 13 | impl AppData { 14 | /// Insert an item, overwritting duplicates 15 | pub(crate) fn insert(&mut self, val: T) -> Option { 16 | self.map 17 | .insert(TypeId::of::(), Box::new(val)) 18 | .and_then(downcast_owned) 19 | } 20 | 21 | /// Get a reference to a type in the map 22 | pub(crate) fn get(&self) -> Option<&T> { 23 | self.map 24 | .get(&TypeId::of::()) 25 | .and_then(|boxed| boxed.downcast_ref()) 26 | } 27 | } 28 | 29 | impl fmt::Debug for AppData { 30 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 31 | f.debug_struct("AppData").finish() 32 | } 33 | } 34 | 35 | fn downcast_owned(boxed: Box) -> Option { 36 | boxed.downcast().ok().map(|boxed| *boxed) 37 | } 38 | -------------------------------------------------------------------------------- /test-utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "temporal-sdk-core-test-utils" 3 | version = "0.1.0" 4 | authors = ["Spencer Judge "] 5 | edition = "2024" 6 | license-file = { workspace = true } 7 | 8 | [[bin]] 9 | name = "histfetch" 10 | path = "src/histfetch.rs" 11 | 12 | [features] 13 | default = ["ephemeral-server"] 14 | ephemeral-server = ["temporal-sdk-core/ephemeral-server"] 15 | 16 | [dependencies] 17 | anyhow = "1.0" 18 | assert_matches = "1" 19 | async-trait = "0.1" 20 | futures-util = { version = "0.3", default-features = false } 21 | parking_lot = "0.12" 22 | prost = { workspace = true } 23 | rand = "0.9" 24 | temporal-client = { path = "../client" } 25 | temporal-sdk = { path = "../sdk" } 26 | temporal-sdk-core = { path = "../core" } 27 | temporal-sdk-core-api = { path = "../core-api" } 28 | tokio = "1.1" 29 | tracing = "0.1" 30 | url = "2.2" 31 | 32 | [dependencies.temporal-sdk-core-protos] 33 | path = "../sdk-core-protos" 34 | version = "0.1" 35 | 36 | [lints] 37 | workspace = true 38 | -------------------------------------------------------------------------------- /test-utils/src/histfetch.rs: -------------------------------------------------------------------------------- 1 | //! Use this binary to fetch histories as proto-encoded binary. The first argument must be a 2 | //! workflow ID. A run id may optionally be provided as the second arg. The history is written to 3 | //! `{workflow_id}_history.bin`. 4 | //! 5 | //! We can use `clap` if this needs more arguments / other stuff later on. 6 | 7 | use prost::Message; 8 | use temporal_client::WorkflowClientTrait; 9 | use temporal_sdk_core_test_utils::get_integ_server_options; 10 | 11 | #[tokio::main] 12 | async fn main() -> Result<(), anyhow::Error> { 13 | let gw_opts = get_integ_server_options(); 14 | let client = gw_opts.connect("default", None).await?; 15 | let wf_id = std::env::args() 16 | .nth(1) 17 | .expect("must provide workflow id as only argument"); 18 | let run_id = std::env::args().nth(2); 19 | let hist = client 20 | .get_workflow_execution_history(wf_id.clone(), run_id, vec![]) 21 | .await? 22 | .history 23 | .expect("history field must be populated"); 24 | // Serialize history to file 25 | let byteified = hist.encode_to_vec(); 26 | tokio::fs::write(format!("{wf_id}_history.bin"), &byteified).await?; 27 | Ok(()) 28 | } 29 | -------------------------------------------------------------------------------- /test-utils/src/interceptors.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Error; 2 | use parking_lot::Mutex; 3 | use std::{ 4 | collections::VecDeque, 5 | sync::atomic::{AtomicBool, Ordering}, 6 | }; 7 | use temporal_sdk::interceptors::WorkerInterceptor; 8 | use temporal_sdk_core_protos::coresdk::workflow_activation::WorkflowActivation; 9 | 10 | #[derive(Default)] 11 | pub struct ActivationAssertionsInterceptor { 12 | #[allow(clippy::type_complexity)] 13 | assertions: Mutex>>, 14 | used: AtomicBool, 15 | } 16 | 17 | impl ActivationAssertionsInterceptor { 18 | pub fn skip_one(&mut self) -> &mut Self { 19 | self.assertions.lock().push_back(Box::new(|_| {})); 20 | self 21 | } 22 | 23 | pub fn then(&mut self, assert: impl FnOnce(&WorkflowActivation) + 'static) -> &mut Self { 24 | self.assertions.lock().push_back(Box::new(assert)); 25 | self 26 | } 27 | } 28 | 29 | #[async_trait::async_trait(?Send)] 30 | impl WorkerInterceptor for ActivationAssertionsInterceptor { 31 | async fn on_workflow_activation(&self, act: &WorkflowActivation) -> Result<(), Error> { 32 | self.used.store(true, Ordering::Relaxed); 33 | if let Some(fun) = self.assertions.lock().pop_front() { 34 | fun(act); 35 | } 36 | Ok(()) 37 | } 38 | } 39 | 40 | impl Drop for ActivationAssertionsInterceptor { 41 | fn drop(&mut self) { 42 | if !self.used.load(Ordering::Relaxed) { 43 | panic!("Activation assertions interceptor was never used!") 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /test-utils/src/workflows.rs: -------------------------------------------------------------------------------- 1 | use crate::prost_dur; 2 | use std::time::Duration; 3 | use temporal_sdk::{ActivityOptions, LocalActivityOptions, WfContext, WorkflowResult}; 4 | use temporal_sdk_core_protos::{coresdk::AsJsonPayloadExt, temporal::api::common::v1::RetryPolicy}; 5 | 6 | pub async fn la_problem_workflow(ctx: WfContext) -> WorkflowResult<()> { 7 | ctx.local_activity(LocalActivityOptions { 8 | activity_type: "delay".to_string(), 9 | input: "hi".as_json_payload().expect("serializes fine"), 10 | retry_policy: RetryPolicy { 11 | initial_interval: Some(prost_dur!(from_micros(15))), 12 | backoff_coefficient: 1_000., 13 | maximum_interval: Some(prost_dur!(from_millis(1500))), 14 | maximum_attempts: 4, 15 | non_retryable_error_types: vec![], 16 | }, 17 | timer_backoff_threshold: Some(Duration::from_secs(1)), 18 | ..Default::default() 19 | }) 20 | .await; 21 | ctx.activity(ActivityOptions { 22 | activity_type: "delay".to_string(), 23 | start_to_close_timeout: Some(Duration::from_secs(20)), 24 | input: "hi!".as_json_payload().expect("serializes fine"), 25 | ..Default::default() 26 | }) 27 | .await; 28 | Ok(().into()) 29 | } 30 | -------------------------------------------------------------------------------- /tests/cloud_tests.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::str::FromStr; 3 | use std::sync::atomic::AtomicBool; 4 | use std::sync::atomic::Ordering::Relaxed; 5 | use temporal_client::{ 6 | Client, ClientOptionsBuilder, ClientTlsConfig, RetryClient, TlsConfig, WorkflowClientTrait, 7 | }; 8 | use temporal_sdk::WfContext; 9 | use temporal_sdk_core_protos::temporal::api::enums::v1::EventType; 10 | use temporal_sdk_core_protos::temporal::api::enums::v1::WorkflowTaskFailedCause::WorkflowWorkerUnhandledFailure; 11 | use temporal_sdk_core_protos::temporal::api::history::v1::history_event::Attributes::WorkflowTaskFailedEventAttributes; 12 | use temporal_sdk_core_test_utils::CoreWfStarter; 13 | use url::Url; 14 | 15 | async fn get_client(client_name: &str) -> RetryClient { 16 | let cloud_addr = env::var("TEMPORAL_CLOUD_ADDRESS").unwrap(); 17 | let cloud_key = env::var("TEMPORAL_CLIENT_KEY").unwrap(); 18 | 19 | let client_cert = env::var("TEMPORAL_CLIENT_CERT") 20 | .expect("TEMPORAL_CLIENT_CERT must be set") 21 | .replace("\\n", "\n") 22 | .into_bytes(); 23 | let client_private_key = cloud_key.replace("\\n", "\n").into_bytes(); 24 | let sgo = ClientOptionsBuilder::default() 25 | .target_url(Url::from_str(&cloud_addr).unwrap()) 26 | .client_name(client_name) 27 | .client_version("clientver") 28 | .tls_cfg(TlsConfig { 29 | client_tls_config: Some(ClientTlsConfig { 30 | client_cert, 31 | client_private_key, 32 | }), 33 | ..Default::default() 34 | }) 35 | .build() 36 | .unwrap(); 37 | sgo.connect( 38 | env::var("TEMPORAL_NAMESPACE").expect("TEMPORAL_NAMESPACE must be set"), 39 | None, 40 | ) 41 | .await 42 | .unwrap() 43 | } 44 | 45 | #[tokio::test] 46 | async fn tls_test() { 47 | let con = get_client("tls_tester").await; 48 | con.list_workflow_executions(100, vec![], "".to_string()) 49 | .await 50 | .unwrap(); 51 | } 52 | 53 | #[tokio::test] 54 | async fn grpc_message_too_large_test() { 55 | let wf_name = "oversize_grpc_message"; 56 | let mut starter = 57 | CoreWfStarter::new_with_client(wf_name, get_client("grpc_message_too_large").await); 58 | starter.worker_config.no_remote_activities(true); 59 | let mut core = starter.worker().await; 60 | 61 | static OVERSIZE_GRPC_MESSAGE_RUN: AtomicBool = AtomicBool::new(false); 62 | core.register_wf(wf_name.to_owned(), |_ctx: WfContext| async move { 63 | if OVERSIZE_GRPC_MESSAGE_RUN.load(Relaxed) { 64 | Ok(vec![].into()) 65 | } else { 66 | OVERSIZE_GRPC_MESSAGE_RUN.store(true, Relaxed); 67 | let result: Vec = vec![0; 5000000]; 68 | Ok(result.into()) 69 | } 70 | }); 71 | starter.start_with_worker(wf_name, &mut core).await; 72 | core.run_until_done().await.unwrap(); 73 | 74 | assert!(starter.get_history().await.events.iter().any(|e| { 75 | e.event_type == EventType::WorkflowTaskFailed as i32 76 | && if let WorkflowTaskFailedEventAttributes(attr) = e.attributes.as_ref().unwrap() { 77 | // TODO tim: Change to custom cause 78 | attr.cause == WorkflowWorkerUnhandledFailure as i32 79 | && attr.failure.as_ref().unwrap().message == "GRPC Message too large" 80 | } else { 81 | false 82 | } 83 | })) 84 | } 85 | -------------------------------------------------------------------------------- /tests/integ_tests/activity_functions.rs: -------------------------------------------------------------------------------- 1 | use temporal_sdk::{ActContext, ActivityError}; 2 | 3 | pub(crate) async fn echo(_ctx: ActContext, e: String) -> Result { 4 | Ok(e) 5 | } 6 | -------------------------------------------------------------------------------- /tests/integ_tests/workflow_tests/appdata_propagation.rs: -------------------------------------------------------------------------------- 1 | use assert_matches::assert_matches; 2 | use std::time::Duration; 3 | use temporal_client::{WfClientExt, WorkflowExecutionResult, WorkflowOptions}; 4 | use temporal_sdk::{ActContext, ActivityOptions, WfContext, WorkflowResult}; 5 | use temporal_sdk_core_protos::coresdk::AsJsonPayloadExt; 6 | use temporal_sdk_core_test_utils::CoreWfStarter; 7 | 8 | const TEST_APPDATA_MESSAGE: &str = "custom app data, yay"; 9 | 10 | struct Data { 11 | message: String, 12 | } 13 | 14 | pub(crate) async fn appdata_activity_wf(ctx: WfContext) -> WorkflowResult<()> { 15 | ctx.activity(ActivityOptions { 16 | activity_type: "echo_activity".to_string(), 17 | start_to_close_timeout: Some(Duration::from_secs(5)), 18 | input: "hi!".as_json_payload().expect("serializes fine"), 19 | ..Default::default() 20 | }) 21 | .await; 22 | Ok(().into()) 23 | } 24 | 25 | #[tokio::test] 26 | async fn appdata_access_in_activities_and_workflows() { 27 | let wf_name = "appdata_activity"; 28 | let mut starter = CoreWfStarter::new(wf_name); 29 | let mut worker = starter.worker().await; 30 | worker.inner_mut().insert_app_data(Data { 31 | message: TEST_APPDATA_MESSAGE.to_owned(), 32 | }); 33 | 34 | let client = starter.get_client().await; 35 | worker.register_wf(wf_name.to_owned(), appdata_activity_wf); 36 | worker.register_activity( 37 | "echo_activity", 38 | |ctx: ActContext, echo_me: String| async move { 39 | let data = ctx.app_data::().expect("appdata exists. qed"); 40 | assert_eq!(data.message, TEST_APPDATA_MESSAGE.to_owned()); 41 | Ok(echo_me) 42 | }, 43 | ); 44 | 45 | let run_id = worker 46 | .submit_wf( 47 | wf_name.to_owned(), 48 | wf_name.to_owned(), 49 | vec![], 50 | WorkflowOptions::default(), 51 | ) 52 | .await 53 | .unwrap(); 54 | worker.run_until_done().await.unwrap(); 55 | let handle = client.get_untyped_workflow_handle(wf_name, run_id); 56 | let res = handle 57 | .get_workflow_result(Default::default()) 58 | .await 59 | .unwrap(); 60 | assert_matches!(res, WorkflowExecutionResult::Succeeded(_)); 61 | } 62 | -------------------------------------------------------------------------------- /tests/integ_tests/workflow_tests/cancel_external.rs: -------------------------------------------------------------------------------- 1 | use temporal_client::{GetWorkflowResultOpts, WfClientExt, WorkflowOptions}; 2 | use temporal_sdk::{WfContext, WorkflowResult}; 3 | use temporal_sdk_core_protos::coresdk::{FromJsonPayloadExt, common::NamespacedWorkflowExecution}; 4 | use temporal_sdk_core_test_utils::CoreWfStarter; 5 | 6 | const RECEIVER_WFID: &str = "sends-cancel-receiver"; 7 | 8 | async fn cancel_sender(ctx: WfContext) -> WorkflowResult<()> { 9 | let run_id = std::str::from_utf8(&ctx.get_args()[0].data)?.to_owned(); 10 | let sigres = ctx 11 | .cancel_external( 12 | NamespacedWorkflowExecution { 13 | workflow_id: RECEIVER_WFID.to_string(), 14 | run_id, 15 | namespace: ctx.namespace().to_string(), 16 | }, 17 | "cancel-reason".to_string(), 18 | ) 19 | .await; 20 | if ctx.get_args().get(1).is_some() { 21 | // We expect failure 22 | assert!(sigres.is_err()); 23 | } else { 24 | sigres.unwrap(); 25 | } 26 | Ok(().into()) 27 | } 28 | 29 | async fn cancel_receiver(ctx: WfContext) -> WorkflowResult { 30 | let r = ctx.cancelled().await; 31 | Ok(r.into()) 32 | } 33 | 34 | #[tokio::test] 35 | async fn sends_cancel_to_other_wf() { 36 | let mut starter = CoreWfStarter::new("sends_cancel_to_other_wf"); 37 | starter.worker_config.no_remote_activities(true); 38 | let mut worker = starter.worker().await; 39 | worker.register_wf("sender", cancel_sender); 40 | worker.register_wf("receiver", cancel_receiver); 41 | 42 | let receiver_run_id = worker 43 | .submit_wf( 44 | RECEIVER_WFID, 45 | "receiver", 46 | vec![], 47 | WorkflowOptions::default(), 48 | ) 49 | .await 50 | .unwrap(); 51 | worker 52 | .submit_wf( 53 | "sends-cancel-sender", 54 | "sender", 55 | vec![receiver_run_id.clone().into()], 56 | WorkflowOptions::default(), 57 | ) 58 | .await 59 | .unwrap(); 60 | worker.run_until_done().await.unwrap(); 61 | let h = starter 62 | .get_client() 63 | .await 64 | .get_untyped_workflow_handle(RECEIVER_WFID, receiver_run_id); 65 | let res = String::from_json_payload( 66 | &h.get_workflow_result(GetWorkflowResultOpts::default()) 67 | .await 68 | .unwrap() 69 | .unwrap_success()[0], 70 | ) 71 | .unwrap(); 72 | assert!(res.contains("Cancel requested by workflow")); 73 | assert!(res.contains("cancel-reason")); 74 | } 75 | -------------------------------------------------------------------------------- /tests/integ_tests/workflow_tests/cancel_wf.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | use temporal_client::WorkflowClientTrait; 3 | use temporal_sdk::{WfContext, WfExitValue, WorkflowResult}; 4 | use temporal_sdk_core_protos::temporal::api::enums::v1::WorkflowExecutionStatus; 5 | use temporal_sdk_core_test_utils::CoreWfStarter; 6 | 7 | async fn cancelled_wf(ctx: WfContext) -> WorkflowResult<()> { 8 | let mut reason = "".to_string(); 9 | let cancelled = tokio::select! { 10 | _ = ctx.timer(Duration::from_secs(500)) => false, 11 | r = ctx.cancelled() => { 12 | reason = r; 13 | true 14 | } 15 | }; 16 | 17 | assert_eq!(reason, "Dieee"); 18 | 19 | if cancelled { 20 | Ok(WfExitValue::Cancelled) 21 | } else { 22 | panic!("Should have been cancelled") 23 | } 24 | } 25 | 26 | #[tokio::test] 27 | async fn cancel_during_timer() { 28 | let wf_name = "cancel_during_timer"; 29 | let mut starter = CoreWfStarter::new(wf_name); 30 | starter.worker_config.no_remote_activities(true); 31 | let mut worker = starter.worker().await; 32 | let client = starter.get_client().await; 33 | worker.register_wf(wf_name.to_string(), cancelled_wf); 34 | starter.start_with_worker(wf_name, &mut worker).await; 35 | let wf_id = starter.get_task_queue().to_string(); 36 | 37 | let canceller = async { 38 | tokio::time::sleep(Duration::from_millis(500)).await; 39 | // Cancel the workflow externally 40 | client 41 | .cancel_workflow_execution(wf_id.clone(), None, "Dieee".to_string(), None) 42 | .await 43 | .unwrap(); 44 | }; 45 | 46 | let (_, res) = tokio::join!(canceller, worker.run_until_done()); 47 | res.unwrap(); 48 | let desc = client 49 | .describe_workflow_execution(wf_id, None) 50 | .await 51 | .unwrap(); 52 | 53 | assert_eq!( 54 | desc.workflow_execution_info.unwrap().status, 55 | WorkflowExecutionStatus::Canceled as i32 56 | ); 57 | } 58 | -------------------------------------------------------------------------------- /tests/integ_tests/workflow_tests/continue_as_new.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | use temporal_client::WorkflowOptions; 3 | use temporal_sdk::{WfContext, WfExitValue, WorkflowResult}; 4 | use temporal_sdk_core_protos::coresdk::workflow_commands::ContinueAsNewWorkflowExecution; 5 | use temporal_sdk_core_test_utils::CoreWfStarter; 6 | 7 | async fn continue_as_new_wf(ctx: WfContext) -> WorkflowResult<()> { 8 | let run_ct = ctx.get_args()[0].data[0]; 9 | ctx.timer(Duration::from_millis(500)).await; 10 | Ok(if run_ct < 5 { 11 | WfExitValue::continue_as_new(ContinueAsNewWorkflowExecution { 12 | arguments: vec![[run_ct + 1].into()], 13 | ..Default::default() 14 | }) 15 | } else { 16 | ().into() 17 | }) 18 | } 19 | 20 | #[tokio::test] 21 | async fn continue_as_new_happy_path() { 22 | let wf_name = "continue_as_new_happy_path"; 23 | let mut starter = CoreWfStarter::new(wf_name); 24 | starter.worker_config.no_remote_activities(true); 25 | let mut worker = starter.worker().await; 26 | worker.register_wf(wf_name.to_string(), continue_as_new_wf); 27 | 28 | worker 29 | .submit_wf( 30 | wf_name.to_string(), 31 | wf_name.to_string(), 32 | vec![[1].into()], 33 | WorkflowOptions::default(), 34 | ) 35 | .await 36 | .unwrap(); 37 | worker.run_until_done().await.unwrap(); 38 | } 39 | 40 | #[tokio::test] 41 | async fn continue_as_new_multiple_concurrent() { 42 | let wf_name = "continue_as_new_multiple_concurrent"; 43 | let mut starter = CoreWfStarter::new(wf_name); 44 | starter 45 | .worker_config 46 | .no_remote_activities(true) 47 | .max_cached_workflows(5_usize) 48 | .max_outstanding_workflow_tasks(5_usize); 49 | let mut worker = starter.worker().await; 50 | worker.register_wf(wf_name.to_string(), continue_as_new_wf); 51 | 52 | let wf_names = (1..=20).map(|i| format!("{wf_name}-{i}")); 53 | for name in wf_names.clone() { 54 | worker 55 | .submit_wf( 56 | name.to_string(), 57 | wf_name.to_string(), 58 | vec![[1].into()], 59 | WorkflowOptions::default(), 60 | ) 61 | .await 62 | .unwrap(); 63 | } 64 | worker.run_until_done().await.unwrap(); 65 | } 66 | -------------------------------------------------------------------------------- /tests/integ_tests/workflow_tests/determinism.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | sync::atomic::{AtomicBool, AtomicUsize, Ordering}, 3 | time::Duration, 4 | }; 5 | use temporal_sdk::{ActContext, ActivityOptions, WfContext, WorkflowResult}; 6 | use temporal_sdk_core_protos::coresdk::AsJsonPayloadExt; 7 | use temporal_sdk_core_test_utils::{CoreWfStarter, WorkflowHandleExt}; 8 | 9 | static RUN_CT: AtomicUsize = AtomicUsize::new(1); 10 | 11 | pub(crate) async fn timer_wf_nondeterministic(ctx: WfContext) -> WorkflowResult<()> { 12 | let run_ct = RUN_CT.fetch_add(1, Ordering::Relaxed); 13 | 14 | match run_ct { 15 | 1 | 3 => { 16 | // If we have not run yet or are on the third attempt, schedule a timer 17 | ctx.timer(Duration::from_secs(1)).await; 18 | if run_ct == 1 { 19 | // on first attempt we need to blow up after the timer fires so we will replay 20 | panic!("dying on purpose"); 21 | } 22 | } 23 | 2 => { 24 | // On the second attempt we should cause a nondeterminism error 25 | ctx.activity(ActivityOptions { 26 | activity_type: "whatever".to_string(), 27 | ..Default::default() 28 | }) 29 | .await; 30 | } 31 | _ => panic!("Ran too many times"), 32 | } 33 | Ok(().into()) 34 | } 35 | 36 | #[tokio::test] 37 | async fn test_determinism_error_then_recovers() { 38 | let wf_name = "test_determinism_error_then_recovers"; 39 | let mut starter = CoreWfStarter::new(wf_name); 40 | starter.worker_config.no_remote_activities(true); 41 | let mut worker = starter.worker().await; 42 | 43 | worker.register_wf(wf_name.to_owned(), timer_wf_nondeterministic); 44 | starter.start_with_worker(wf_name, &mut worker).await; 45 | worker.run_until_done().await.unwrap(); 46 | // 4 because we still add on the 3rd and final attempt 47 | assert_eq!(RUN_CT.load(Ordering::Relaxed), 4); 48 | } 49 | 50 | #[tokio::test] 51 | async fn task_fail_causes_replay_unset_too_soon() { 52 | let wf_name = "task_fail_causes_replay_unset_too_soon"; 53 | let mut starter = CoreWfStarter::new(wf_name); 54 | let mut worker = starter.worker().await; 55 | 56 | static DID_FAIL: AtomicBool = AtomicBool::new(false); 57 | worker.register_wf(wf_name.to_owned(), move |ctx: WfContext| async move { 58 | if DID_FAIL.load(Ordering::Relaxed) { 59 | assert!(ctx.is_replaying()); 60 | } 61 | ctx.activity(ActivityOptions { 62 | activity_type: "echo".to_string(), 63 | input: "hi!".as_json_payload().expect("serializes fine"), 64 | start_to_close_timeout: Some(Duration::from_secs(2)), 65 | ..Default::default() 66 | }) 67 | .await; 68 | if !DID_FAIL.load(Ordering::Relaxed) { 69 | DID_FAIL.store(true, Ordering::Relaxed); 70 | panic!("Die on purpose"); 71 | } 72 | Ok(().into()) 73 | }); 74 | worker.register_activity("echo", |_ctx: ActContext, echo_me: String| async move { 75 | Ok(echo_me) 76 | }); 77 | 78 | let handle = starter.start_with_worker(wf_name, &mut worker).await; 79 | 80 | worker.run_until_done().await.unwrap(); 81 | handle 82 | .fetch_history_and_replay(worker.inner_mut()) 83 | .await 84 | .unwrap(); 85 | } 86 | -------------------------------------------------------------------------------- /tests/integ_tests/workflow_tests/eager.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | use temporal_client::WorkflowClientTrait; 3 | use temporal_sdk::{WfContext, WorkflowResult}; 4 | use temporal_sdk_core_test_utils::{CoreWfStarter, NAMESPACE, get_integ_server_options}; 5 | 6 | pub(crate) async fn eager_wf(_context: WfContext) -> WorkflowResult<()> { 7 | Ok(().into()) 8 | } 9 | 10 | #[tokio::test] 11 | async fn eager_wf_start() { 12 | let wf_name = "eager_wf_start"; 13 | let mut starter = CoreWfStarter::new(wf_name); 14 | starter.workflow_options.enable_eager_workflow_start = true; 15 | // hang the test if eager task dispatch failed 16 | starter.workflow_options.task_timeout = Some(Duration::from_secs(1500)); 17 | starter.worker_config.no_remote_activities(true); 18 | let mut worker = starter.worker().await; 19 | worker.register_wf(wf_name.to_owned(), eager_wf); 20 | starter.eager_start_with_worker(wf_name, &mut worker).await; 21 | worker.run_until_done().await.unwrap(); 22 | } 23 | 24 | #[tokio::test] 25 | async fn eager_wf_start_different_clients() { 26 | let wf_name = "eager_wf_start"; 27 | let mut starter = CoreWfStarter::new(wf_name); 28 | starter.workflow_options.enable_eager_workflow_start = true; 29 | // hang the test if wf task needs retry 30 | starter.workflow_options.task_timeout = Some(Duration::from_secs(1500)); 31 | starter.worker_config.no_remote_activities(true); 32 | let mut worker = starter.worker().await; 33 | worker.register_wf(wf_name.to_owned(), eager_wf); 34 | 35 | let client = get_integ_server_options() 36 | .connect(NAMESPACE.to_string(), None) 37 | .await 38 | .expect("Should connect"); 39 | let w = starter.get_worker().await; 40 | let res = client 41 | .start_workflow( 42 | vec![], 43 | w.get_config().task_queue.clone(), // task_queue 44 | wf_name.to_string(), // workflow_id 45 | wf_name.to_string(), // workflow_type 46 | None, 47 | starter.workflow_options.clone(), 48 | ) 49 | .await 50 | .unwrap(); 51 | // different clients means no eager_wf_start. 52 | assert!(res.eager_workflow_task.is_none()); 53 | 54 | //wf task delivered through default path 55 | worker.expect_workflow_completion(wf_name, Some(res.run_id)); 56 | worker.run_until_done().await.unwrap(); 57 | } 58 | -------------------------------------------------------------------------------- /tests/integ_tests/workflow_tests/modify_wf_properties.rs: -------------------------------------------------------------------------------- 1 | use temporal_client::WorkflowClientTrait; 2 | use temporal_sdk::{WfContext, WorkflowResult}; 3 | use temporal_sdk_core_protos::coresdk::{AsJsonPayloadExt, FromJsonPayloadExt}; 4 | use temporal_sdk_core_test_utils::CoreWfStarter; 5 | use uuid::Uuid; 6 | 7 | static FIELD_A: &str = "cat_name"; 8 | static FIELD_B: &str = "cute_level"; 9 | 10 | async fn memo_upserter(ctx: WfContext) -> WorkflowResult<()> { 11 | ctx.upsert_memo([ 12 | (FIELD_A.to_string(), "enchi".as_json_payload().unwrap()), 13 | (FIELD_B.to_string(), 9001.as_json_payload().unwrap()), 14 | ]); 15 | Ok(().into()) 16 | } 17 | 18 | #[tokio::test] 19 | async fn sends_modify_wf_props() { 20 | let wf_name = "can_upsert_memo"; 21 | let wf_id = Uuid::new_v4(); 22 | let mut starter = CoreWfStarter::new(wf_name); 23 | starter.worker_config.no_remote_activities(true); 24 | let mut worker = starter.worker().await; 25 | 26 | worker.register_wf(wf_name, memo_upserter); 27 | let run_id = worker 28 | .submit_wf(wf_id.to_string(), wf_name, vec![], Default::default()) 29 | .await 30 | .unwrap(); 31 | worker.run_until_done().await.unwrap(); 32 | 33 | let memo = starter 34 | .get_client() 35 | .await 36 | .describe_workflow_execution(wf_id.to_string(), Some(run_id)) 37 | .await 38 | .unwrap() 39 | .workflow_execution_info 40 | .unwrap() 41 | .memo 42 | .unwrap() 43 | .fields; 44 | let catname = memo.get(FIELD_A).unwrap(); 45 | let cuteness = memo.get(FIELD_B).unwrap(); 46 | for payload in [catname, cuteness] { 47 | assert!(payload.is_json_payload()); 48 | } 49 | assert_eq!("enchi", String::from_json_payload(catname).unwrap()); 50 | assert_eq!(9001, usize::from_json_payload(cuteness).unwrap()); 51 | } 52 | -------------------------------------------------------------------------------- /tests/integ_tests/workflow_tests/stickyness.rs: -------------------------------------------------------------------------------- 1 | use crate::integ_tests::workflow_tests::timers::timer_wf; 2 | use std::{ 3 | sync::atomic::{AtomicBool, AtomicUsize, Ordering}, 4 | time::Duration, 5 | }; 6 | use temporal_client::WorkflowOptions; 7 | use temporal_sdk::{WfContext, WorkflowResult}; 8 | use temporal_sdk_core_api::worker::PollerBehavior; 9 | use temporal_sdk_core_test_utils::CoreWfStarter; 10 | use tokio::sync::Barrier; 11 | 12 | #[tokio::test] 13 | async fn timer_workflow_not_sticky() { 14 | let wf_name = "timer_wf_not_sticky"; 15 | let mut starter = CoreWfStarter::new(wf_name); 16 | starter 17 | .worker_config 18 | .no_remote_activities(true) 19 | .max_cached_workflows(0_usize); 20 | let mut worker = starter.worker().await; 21 | worker.register_wf(wf_name.to_owned(), timer_wf); 22 | 23 | starter.start_with_worker(wf_name, &mut worker).await; 24 | worker.run_until_done().await.unwrap(); 25 | } 26 | 27 | static TIMED_OUT_ONCE: AtomicBool = AtomicBool::new(false); 28 | static RUN_CT: AtomicUsize = AtomicUsize::new(0); 29 | async fn timer_timeout_wf(ctx: WfContext) -> WorkflowResult<()> { 30 | RUN_CT.fetch_add(1, Ordering::SeqCst); 31 | let t = ctx.timer(Duration::from_secs(1)); 32 | if !TIMED_OUT_ONCE.load(Ordering::SeqCst) { 33 | ctx.force_task_fail(anyhow::anyhow!("I AM SLAIN!")); 34 | TIMED_OUT_ONCE.store(true, Ordering::SeqCst); 35 | } 36 | t.await; 37 | Ok(().into()) 38 | } 39 | 40 | #[tokio::test] 41 | async fn timer_workflow_timeout_on_sticky() { 42 | // This test intentionally times out a workflow task in order to make the next task be scheduled 43 | // on a not-sticky queue 44 | let wf_name = "timer_workflow_timeout_on_sticky"; 45 | let mut starter = CoreWfStarter::new(wf_name); 46 | starter.worker_config.no_remote_activities(true); 47 | starter.workflow_options.task_timeout = Some(Duration::from_secs(2)); 48 | let mut worker = starter.worker().await; 49 | worker.register_wf(wf_name.to_owned(), timer_timeout_wf); 50 | 51 | starter.start_with_worker(wf_name, &mut worker).await; 52 | worker.run_until_done().await.unwrap(); 53 | // If it didn't run twice it didn't time out 54 | assert_eq!(RUN_CT.load(Ordering::SeqCst), 2); 55 | } 56 | 57 | #[tokio::test] 58 | async fn cache_miss_ok() { 59 | let wf_name = "cache_miss_ok"; 60 | let mut starter = CoreWfStarter::new(wf_name); 61 | starter 62 | .worker_config 63 | .no_remote_activities(true) 64 | .max_outstanding_workflow_tasks(2_usize) 65 | .max_cached_workflows(0_usize) 66 | .workflow_task_poller_behavior(PollerBehavior::SimpleMaximum(1_usize)); 67 | let mut worker = starter.worker().await; 68 | 69 | let barr: &'static Barrier = Box::leak(Box::new(Barrier::new(2))); 70 | worker.register_wf(wf_name.to_owned(), move |ctx: WfContext| async move { 71 | barr.wait().await; 72 | ctx.timer(Duration::from_secs(1)).await; 73 | Ok(().into()) 74 | }); 75 | 76 | let run_id = worker 77 | .submit_wf( 78 | wf_name.to_owned(), 79 | wf_name.to_owned(), 80 | vec![], 81 | WorkflowOptions::default(), 82 | ) 83 | .await 84 | .unwrap(); 85 | let core = starter.get_worker().await; 86 | let (r1, _) = tokio::join!(worker.run_until_done(), async move { 87 | barr.wait().await; 88 | core.request_workflow_eviction(&run_id); 89 | // We need to signal the barrier again since the wf gets evicted and will hit it again 90 | barr.wait().await; 91 | }); 92 | r1.unwrap(); 93 | } 94 | -------------------------------------------------------------------------------- /tests/integ_tests/workflow_tests/upsert_search_attrs.rs: -------------------------------------------------------------------------------- 1 | use assert_matches::assert_matches; 2 | use std::{collections::HashMap, time::Duration}; 3 | use temporal_client::{ 4 | GetWorkflowResultOpts, WfClientExt, WorkflowClientTrait, WorkflowExecutionResult, 5 | WorkflowOptions, 6 | }; 7 | use temporal_sdk::{WfContext, WfExitValue, WorkflowResult}; 8 | use temporal_sdk_core_protos::coresdk::{AsJsonPayloadExt, FromJsonPayloadExt}; 9 | use temporal_sdk_core_test_utils::{CoreWfStarter, SEARCH_ATTR_INT, SEARCH_ATTR_TXT}; 10 | use uuid::Uuid; 11 | 12 | async fn search_attr_updater(ctx: WfContext) -> WorkflowResult<()> { 13 | let mut int_val = ctx 14 | .search_attributes() 15 | .indexed_fields 16 | .get(SEARCH_ATTR_INT) 17 | .cloned() 18 | .unwrap_or_default(); 19 | let orig_val = int_val.data[0]; 20 | int_val.data[0] += 1; 21 | ctx.upsert_search_attributes([ 22 | (SEARCH_ATTR_TXT.to_string(), "goodbye".as_json_payload()?), 23 | (SEARCH_ATTR_INT.to_string(), int_val), 24 | ]); 25 | // 49 is ascii 1 26 | if orig_val == 49 { 27 | Ok(WfExitValue::ContinueAsNew(Box::default())) 28 | } else { 29 | Ok(().into()) 30 | } 31 | } 32 | 33 | #[tokio::test] 34 | async fn sends_upsert() { 35 | let wf_name = "sends_upsert_search_attrs"; 36 | let wf_id = Uuid::new_v4(); 37 | let mut starter = CoreWfStarter::new(wf_name); 38 | starter.worker_config.no_remote_activities(true); 39 | let mut worker = starter.worker().await; 40 | 41 | worker.register_wf(wf_name, search_attr_updater); 42 | worker 43 | .submit_wf( 44 | wf_id.to_string(), 45 | wf_name, 46 | vec![], 47 | WorkflowOptions { 48 | search_attributes: Some(HashMap::from([ 49 | ( 50 | SEARCH_ATTR_TXT.to_string(), 51 | "hello".as_json_payload().unwrap(), 52 | ), 53 | (SEARCH_ATTR_INT.to_string(), 1.as_json_payload().unwrap()), 54 | ])), 55 | execution_timeout: Some(Duration::from_secs(4)), 56 | ..Default::default() 57 | }, 58 | ) 59 | .await 60 | .unwrap(); 61 | worker.run_until_done().await.unwrap(); 62 | 63 | let client = starter.get_client().await; 64 | let search_attrs = client 65 | .describe_workflow_execution(wf_id.to_string(), None) 66 | .await 67 | .unwrap() 68 | .workflow_execution_info 69 | .unwrap() 70 | .search_attributes 71 | .unwrap() 72 | .indexed_fields; 73 | let txt_attr_payload = search_attrs.get(SEARCH_ATTR_TXT).unwrap(); 74 | let int_attr_payload = search_attrs.get(SEARCH_ATTR_INT).unwrap(); 75 | for payload in [txt_attr_payload, int_attr_payload] { 76 | assert!(payload.is_json_payload()); 77 | } 78 | assert_eq!( 79 | "goodbye", 80 | String::from_json_payload(txt_attr_payload).unwrap() 81 | ); 82 | assert_eq!(3, usize::from_json_payload(int_attr_payload).unwrap()); 83 | let handle = client.get_untyped_workflow_handle(wf_id.to_string(), ""); 84 | let res = handle 85 | .get_workflow_result(GetWorkflowResultOpts::default()) 86 | .await 87 | .unwrap(); 88 | assert_matches!(res, WorkflowExecutionResult::Succeeded(_)); 89 | } 90 | -------------------------------------------------------------------------------- /tests/main.rs: -------------------------------------------------------------------------------- 1 | //! Integration tests 2 | 3 | #[macro_use] 4 | extern crate rstest; 5 | #[macro_use] 6 | extern crate temporal_sdk_core_test_utils; 7 | 8 | #[cfg(test)] 9 | mod integ_tests { 10 | mod activity_functions; 11 | mod client_tests; 12 | mod ephemeral_server_tests; 13 | mod heartbeat_tests; 14 | mod metrics_tests; 15 | mod polling_tests; 16 | mod queries_tests; 17 | mod update_tests; 18 | mod visibility_tests; 19 | mod worker_tests; 20 | mod worker_versioning_tests; 21 | mod workflow_tests; 22 | 23 | use std::time::Duration; 24 | use temporal_client::{NamespacedClient, WorkflowService}; 25 | use temporal_sdk_core::{CoreRuntime, init_worker}; 26 | use temporal_sdk_core_api::worker::WorkerConfigBuilder; 27 | use temporal_sdk_core_protos::temporal::api::{ 28 | nexus::v1::{EndpointSpec, EndpointTarget, endpoint_target}, 29 | operatorservice::v1::CreateNexusEndpointRequest, 30 | workflowservice::v1::ListNamespacesRequest, 31 | }; 32 | use temporal_sdk_core_test_utils::{ 33 | CoreWfStarter, get_integ_server_options, get_integ_telem_options, rand_6_chars, 34 | }; 35 | 36 | // Create a worker like a bridge would (unwraps aside) 37 | #[tokio::test] 38 | #[ignore] // Really a compile time check more than anything 39 | async fn lang_bridge_example() { 40 | let opts = get_integ_server_options(); 41 | let runtime = CoreRuntime::new_assume_tokio(get_integ_telem_options()).unwrap(); 42 | let mut retrying_client = opts 43 | .connect_no_namespace(runtime.telemetry().get_temporal_metric_meter()) 44 | .await 45 | .unwrap(); 46 | 47 | let _worker = init_worker( 48 | &runtime, 49 | WorkerConfigBuilder::default() 50 | .namespace("default") 51 | .task_queue("Wheee!") 52 | .build() 53 | .unwrap(), 54 | // clone the client if you intend to use it later. Strip off the retry wrapper since 55 | // worker will assert its own 56 | retrying_client.clone(), 57 | ); 58 | 59 | // Do things with worker or client 60 | let _ = retrying_client 61 | .list_namespaces(ListNamespacesRequest::default()) 62 | .await; 63 | } 64 | 65 | pub(crate) async fn mk_nexus_endpoint(starter: &mut CoreWfStarter) -> String { 66 | let client = starter.get_client().await; 67 | let endpoint = format!("mycoolendpoint-{}", rand_6_chars()); 68 | let mut op_client = client.get_client().inner().operator_svc().clone(); 69 | op_client 70 | .create_nexus_endpoint(CreateNexusEndpointRequest { 71 | spec: Some(EndpointSpec { 72 | name: endpoint.to_owned(), 73 | description: None, 74 | target: Some(EndpointTarget { 75 | variant: Some(endpoint_target::Variant::Worker(endpoint_target::Worker { 76 | namespace: client.namespace().to_owned(), 77 | task_queue: starter.get_task_queue().to_owned(), 78 | })), 79 | }), 80 | }), 81 | }) 82 | .await 83 | .unwrap(); 84 | // Endpoint creation can (as of server 1.25.2 at least) return before they are actually usable. 85 | tokio::time::sleep(Duration::from_millis(800)).await; 86 | endpoint 87 | } 88 | } 89 | --------------------------------------------------------------------------------