├── .env ├── .gitignore ├── README.md ├── compose-postgres.yml ├── compose-services-replication.yml ├── compose-services.yml ├── deployment ├── envoy │ └── envoy.yaml ├── grafana │ ├── dashboards │ │ └── replay-worker.json │ ├── grafana.ini │ ├── old_dashboards │ │ ├── dockermetrics.json │ │ ├── postgres.json │ │ ├── sdk.json │ │ └── temporal.json │ └── provisioning │ │ ├── alerting │ │ └── alerts.yaml │ │ ├── dashboards │ │ └── all.yml │ │ └── datasources │ │ └── all.yml ├── haproxy │ └── haproxy.cfg ├── loki │ └── local-config.yaml ├── nginx │ └── nginx.conf ├── otel │ └── otel-config.yaml └── prometheus │ └── config.yml ├── dynamicconfig ├── development-c1.yaml ├── development-c2.yaml └── development.yaml ├── fluentd ├── Dockerfile └── conf │ └── fluent.conf ├── script └── setup.sh └── template ├── my_config_template.yaml ├── replication_template_c1.yaml └── replication_template_c2.yaml /.env: -------------------------------------------------------------------------------- 1 | POSTGRES_DEFAULT_PORT=5432 2 | POSTGRES_USER=temporal 3 | POSTGRES_PWD=temporal 4 | TEMPORAL_SERVER_IMG=1.26.2 5 | TEMPORAL_ADMINTOOLS_IMG=1.26.2-tctl-1.18.1-cli-1.2.0 6 | TEMPORAL_UI_IMG=2.34.0 7 | GRAFANA_IMG=9.5.2 8 | PROMETHEUS_IMG=v2.37.0 9 | PORTAINER_IMG=2.16.2-alpine 10 | JAEGER_IMG=1.37 11 | OTEL_IMG=0.47.0 12 | KIBANA_IMG=7.13.1 13 | LOKI_IMG=latest 14 | NGINX_IMG=1.22.1 15 | HAPROXY_IMG=2.7.3 16 | MYSQL_IMG=8 17 | ENVOY_IMG=dev-706fe7871ab5fe631406db1e0fe5af1c4d0eb1b8 18 | POSTGRES_EXPORTER_IMG=v0.13.2 19 | CADVISOR_IMG=v0.47.2 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Eclipse 2 | .project 3 | .classpath 4 | .settings/ 5 | bin/ 6 | target/ 7 | 8 | # IntelliJ 9 | .idea 10 | *.ipr 11 | *.iml 12 | *.iws 13 | 14 | # NetBeans 15 | nb-configuration.xml 16 | 17 | # Visual Studio Code 18 | .vscode 19 | .factorypath 20 | 21 | # OSX 22 | .DS_Store 23 | 24 | # Vim 25 | *.swp 26 | *.swo 27 | 28 | # patch 29 | *.orig 30 | *.rej 31 | 32 | # Local environment -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Table of Contents 2 | 3 | - [About](#about) 4 | - [Deploying your Temporal service on Docker](#deploying-your-service) 5 | - [Some usueful Docker commands](#some-useful-docker-commands) 6 | - [Troubleshoot](#troubleshoot) 7 | - [Extra](#extra) 8 | - [Multi Cluster Replication setup](#multi-cluster-replication-setup) 9 | - [Swarm: Deploy on single node Swarm](#deploying-on-single-node-swarm) 10 | - [Compose: Temporalite](#deploying-temporalite) 11 | 12 | ## About 13 | 14 | This repo includes some experiments on self-deploying Temporal server via Docker 15 | Compose and Swarm. 16 | 17 | It can serve as reference to community for a number of Docker related 18 | deployment questions. 19 | For this repo we use PostgreSQL for persistence for temporal db. We set up 20 | advanced visibility with Postgres DB (but options with OpenSearch / ElasticSearch are possible) for temporal_visibility. 21 | It also shows how to set up internal frontend service and use by worker service (even tho we do not set up yet 22 | custom authorizer/claims mapper). 23 | It also shows how to set up server grpc tracing with otel collector and can be visualized with Jaeger. 24 | 25 | In addition it has a out of box sample of setting up multi cluster replication and some cli commands 26 | to get it up and running locally 27 | 28 | ## Deploying your service 29 | 30 | This setup targets more of production environments as it deploys each Temporal server role 31 | (frontend, matching, history, worker) in individual containers. 32 | The setup runs two instances (containers) for frontend, matching and history hosts. 33 | Service sets up 2K shards, so ~1K per history host. 34 | 35 | Each server role container exposes server metrics under its own port. 36 | Note that bashing into the admin-tools image also gives you access to tctl as well as the temporal-tools for different 37 | dbs. 38 | 39 | ### How to start 40 | 41 | First we need to install the loki plugin (you have to do this just one time) 42 | 43 | docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions 44 | 45 | Check if the plugin is installed: 46 | 47 | docker plugin ls 48 | 49 | (should see the Loki Logging Driver plugin installed 50 | 51 | Then in the main repo dir run: 52 | 53 | docker network create temporal-network 54 | docker compose -f compose-postgres.yml -f compose-services.yml up --detach 55 | 56 | ## Check if it works 57 | 58 | ### If you are running tctl locally 59 | 60 | tctl cl h 61 | 62 | should return "SERVING" 63 | 64 | ### If you don't have tctl locally 65 | 66 | Bash into admin-tools container and run tctl (you can do this from your machine if you have tctl installed too) 67 | 68 | docker container ls --format "table {{.ID}}\t{{.Image}}\t{{.Names}}" 69 | 70 | copy the id of the temporal-admin-tools container 71 | 72 | docker exec -it bash 73 | tctl cl h 74 | 75 | you should see response: 76 | 77 | temporal.api.workflowservice.v1.WorkflowService: SERVING 78 | 79 | Note: if you set up HAProxy instead of default Envoy, and dont see "SERVING" but rather "context deadline exceeded errors" 80 | restart the "temporal-haproxy" container. Not yet sure why but haproxy is 81 | doing something weird. Once you restart it admin-tools container should be able to finish 82 | its setup-server script and things should work fine. If anyone can figure out the issue 83 | please commit PR. Thanks. 84 | 85 | We start postgres from a separate compose file but you don't have to and can combine them if you want. 86 | 87 | By the way, if you want to docker exec into the postgres container do: 88 | 89 | docker exec -it psql -U temporal 90 | \l 91 | 92 | which should show the temporal and temporal_visiblity dbs 93 | 94 | (You can do this via Portainer as well, this just shows the "long way") 95 | 96 | In addition let's check out the rings in cluster: 97 | 98 | tctl adm cl d 99 | 100 | You should see two members for frontend, matching and history service rings. One 101 | for the worker service (typically you dont need to scale worker service) 102 | 103 | ### Health check service containers 104 | 105 | * Frontend (via grpcurl) 106 | ``` 107 | grpcurl -plaintext -d '{"service": "temporal.api.workflowservice.v1.WorkflowService"}' 127.0.0.1:7233 grpc.health.v1.Health/Check 108 | ``` 109 | 110 | again you can just run `tctl cl h` too 111 | 112 | * Frontend (via grpc-health-probe) 113 | 114 | ``` 115 | grpc-health-probe -addr=localhost:7233 -service=temporal.api.workflowservice.v1.WorkflowService 116 | ``` 117 | 118 | To check the two frontend services individually: 119 | 120 | ``` 121 | grpc-health-probe -addr=localhost:7236 -service=temporal.api.workflowservice.v1.WorkflowService 122 | grpc-health-probe -addr=localhost:7237 -service=temporal.api.workflowservice.v1.WorkflowService 123 | ``` 124 | 125 | * Internal Frontend (via grpc-health-probe) 126 | ``` 127 | grpc-health-probe -addr=localhost:7233 -service=temporal.api.workflowservice.v1.WorkflowService 128 | ``` 129 | 130 | * Matching (via grpc-health-probe) 131 | 132 | ``` 133 | grpc-health-probe -addr=localhost:7235 -service=temporal.api.workflowservice.v1.MatchingService 134 | grpc-health-probe -addr=localhost:7239 -service=temporal.api.workflowservice.v1.MatchingService 135 | ``` 136 | 137 | * History via grpc-health-probe) 138 | 139 | ``` 140 | grpc-health-probe -addr=localhost:7234 -service=temporal.api.workflowservice.v1.HistoryService 141 | grpc-health-probe -addr=localhost:7238 -service=temporal.api.workflowservice.v1.HistoryService 142 | ``` 143 | 144 | ### Http API 145 | Added support for HTTP api which became available since 1.22.0 server release 146 | to test you can do for example: 147 | 148 | ``` 149 | curl http://localhost:7243/api/v1/namespaces/default 150 | ``` 151 | 152 | once your service is up and running. For more info see [here](https://github.com/temporalio/api/blob/master/temporal/api/workflowservice/v1/service.proto) 153 | 154 | ### Links 155 | 156 | * Server metrics (raw) 157 | * [History Service1](http://localhost:8000/metrics) 158 | * [History Service2](http://localhost:8005/metrics) 159 | * [Matching Service1](http://localhost:8001/metrics) 160 | * [Matching Service2](http://localhost:8006/metrics) 161 | * [Frontend Service1](http://localhost:8002/metrics) 162 | * [Frontend Service2](http://localhost:8004/metrics) 163 | * [Internal Frontend](http://localhost:8007/metrics) 164 | * [Worker Service](http://localhost:8003/metrics) 165 | * [Prometheus targets (scrape points)](http://localhost:9090/targets) 166 | * [Grafana (includes server, sdk, docker, and postgres dashboards)](http://localhost:8085/) 167 | * No login required 168 | * In order to scrape docker system metrics add "metrics-addr":"127.0.0.1:9323" to your docker daemon.js, on Mac this is located at ~/.docker/daemon.json 169 | * Go to "Explore" and select Loki data source to run LogQL against server metrics 170 | * [Web UI v2](http://localhost:8080/namespaces/default/workflows) 171 | * [Web UI v1](http://localhost:8088/) 172 | * [Portainer](http://localhost:9000/) 173 | * Note you will have to create an user the first time you log in 174 | * Yes it forces a longer password but whatever 175 | * [Jaeger](http://localhost:16686/) - includes server grpc traces 176 | * [PgAdmin](http://localhost:5050/) (username: pgadmin4@pgadmin.org passwd: admin) 177 | * [cAdvisor](http://localhost:9092/docker) to monitor docker containers 178 | 179 | 180 | ### Custom docker template 181 | 182 | Docker server image by default use [this](https://github.com/temporalio/temporal/blob/master/docker/config_template.yaml) server config template. 183 | This is a base template that may not fit everyones needs. You an define your custom configuration template if you wish 184 | and this is what we are doing via [my_config_template.yaml](template/my_config_template.yaml). 185 | With this you can customize the template as you wish, for example you could configure env vars for namespace setup, like set up 186 | s3 archival etc which is not possible with the default template. 187 | 188 | ### Exclude metrics 189 | 190 | This demo also shows how to exclude certain metrics produced by Temporal services 191 | when scraping their metric endpoints, for example [here](/deployment/prometheus/config.yml) we drop all metrics 192 | for the "temporal_system" namespace. 193 | 194 | ### Client access 195 | Envoy / HAProxy / NGINX role is exposed on 127.0.0.1:7233 (so all SDK samples should work w/o changes). It is load balancing the two 196 | Temporal frontend services defined in the docker compose. 197 | 198 | ### Envoy 199 | This sample uses Envoy load balancing by default. Check out the Envoy config file [here](/deployment/envoy/envoy.yaml) and make 200 | any necessary changes. Note this is just a demo so you might want to 201 | update the values where needed for your prod env. 202 | Envoy pushes access logs to stdout and is picked up by loki, so can run 203 | all queries in Grafana. This includes grpc code and size and everything. 204 | 205 | ### HAProxy 206 | You can set up HAProxy load balancing if you want. It load balances 207 | our two frontend services. Check out the HAProxy config file [here](/deployment/haproxy/haproxy.cfg) and make 208 | any necessary changes. Note this is just a demo so you might want to 209 | update the values where needed for your prod env. 210 | 211 | I have ran into some issues with this HAProxy setup, specifically 212 | sometimes having to restart its container in order for admin-tools to be able to complete 213 | setup, as well as for executions first workflow task timing out. 214 | Pretty sure this has to do with some problem with the config, maybe someone could 215 | look and fix. 216 | 217 | ### NGINX 218 | You can also have NGINX configured and use it for load balancing. It load balanced our two temporal frontends. 219 | Check out the NGINX config file [here](/deployment/nginx/nginx.conf) and make any necessary adjustments. This is just a demo remember and 220 | for production use you should make sure to update values where necessary. 221 | 222 | ## Some useful Docker commands 223 | docker-compose down --volumes 224 | docker system prune -a 225 | docker volume prune 226 | 227 | docker compose up --detach 228 | docker compose up --force-recreate 229 | 230 | docker stop $(docker ps -a -q) 231 | docker rm $(docker ps -a -q) 232 | 233 | ## Troubleshoot 234 | 235 | * "Not enough hosts to serve the request" 236 | * Can happen on startup when some temporal service container did not start up properly, run the docker compose command again typically fixes this 237 | 238 | ## Extra 239 | Here are some extra configurations, try them out and please report any errors. 240 | 241 | ## Multi Cluster Replication Setup 242 | 243 | Clear your docker env (see "Some useful Docker commands" section) 244 | 245 | Start the multicluster replication services 246 | 247 | docker compose -f compose-services-replication.yml up --detach 248 | 249 | Get the pod IPs of the two clusters: 250 | 251 | docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' temporalc1 252 | 253 | docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' temporalc2 254 | 255 | Write down these IPs for temporalc1, temporalc2 clusters, we are referencing them below as 256 | TEMPORALC1_CLUSTER_IP, TEMPORALC2_CLUSTER_IP respectively 257 | 258 | Enable connection from temporalc1 cluster to temporalc2 cluster 259 | 260 | temporal operator cluster upsert --enable-connection --frontend-address "TEMPORALC2_CLUSTER_IP:2233" 261 | 262 | Enable connection from temporalc2 cluster to temporalc1 cluster 263 | 264 | temporal --address 127.0.0.1:2233 operator cluster upsert --enable-connection --frontend-address "TEMPORAL_C1_CLUSTER_IP:7233" 265 | 266 | Create a global namespace on temporalc1 cluster and set both clusters as reference 267 | 268 | temporal operator namespace create abcnamespace --global --active-cluster c1 --cluster c1 --cluster c2 269 | 270 | Start an execution on "abcnamespace" now and look at web ui for temporalc1, temporalc2 clusters, you can access them via 271 | 272 | http://localhost:8081/namespaces/abcnamespace/workflows 273 | http://localhost:8082/namespaces/abcnamespace/workflows 274 | 275 | ## Deploying on single node Swarm 276 | 277 | Init the Swarm capability if you haven't already 278 | 279 | docker swarm init 280 | 281 | Create the overlay network 282 | 283 | docker network create --scope=swarm --driver=overlay --attachable temporal-network 284 | 285 | (Optional) Create the visualizer service: 286 | 287 | docker service create \ 288 | --name=viz \ 289 | --publish=8050:8080/tcp \ 290 | --constraint=node.role==manager \ 291 | --mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \ 292 | dockersamples/visualizer 293 | 294 | Create the postgresql stack 295 | 296 | docker stack deploy -c compose-postgres.yml temporal-postgres 297 | 298 | Create the services stack 299 | 300 | docker stack deploy -c compose-services.yml temporal-services 301 | 302 | Check out your stacks 303 | 304 | docker stack ls 305 | 306 | Check out your services 307 | 308 | docker service ls 309 | 310 | Note they should all have mode "replicated" and 1 replica by default 311 | (if they don't show that right away wait a sec or two and run this command again) 312 | 313 | Inspect frontend service 314 | 315 | docker service inspect --pretty temporal-services_temporal-frontend 316 | 317 | ### Let's have some fun with Swarm 318 | 319 | 320 | Let's scale the history service to 2 321 | (you can do this for other services too if you want to play around) 322 | 323 | docker service scale temporal-services_temporal-history=2 324 | 325 | Run `docker service ls` again, you should see 2 replicas now for history node 326 | 327 | ### Todo 328 | 329 | Still trying to figure out how to access frontend 7233 port outside of swarm. 330 | It has something to do with port ingress and grpc but im not sure what yet. 331 | If anyone knows let me know :) 332 | 333 | Right now you would need to deploy your temporal client service to 334 | swarm and set target temporal-frontend:7233 to connect and run workflows. 335 | You can always bash into the admin-tools service and run tctl from there, 336 | via Portainer or in your terminal. 337 | 338 | ### Important links: 339 | 340 | * Server metrics (raw) 341 | * [History Service](http://localhost:8000/metrics) 342 | * [Matching Service](http://localhost:8001/metrics) 343 | * [Frontend Service](http://localhost:8002/metrics) 344 | * [Worker Service](http://localhost:8003/metrics) 345 | * [Prometheus targets (scrape points)](http://localhost:9090/targets) 346 | * [Grafana (includes server, sdk, docker, and postgres dashboards)](http://localhost:8085/) 347 | * no login required 348 | * In order to scrape docker system metrics add "metrics-addr":"127.0.0.1:9323" to your docker daemon.js, on Mac this is located at ~/.docker/daemon.json 349 | * [Web UI v2](http://localhost:8081/namespaces/default/workflows) 350 | * [Web UI v1](http://localhost:8088/) 351 | * [Portainer](http://localhost:9000/) 352 | * Note you will have to create an user the first time you log in 353 | * Yes it forces a longer password but whatever 354 | * [Swarm visualizer](http://localhost:8050/) 355 | 356 | To leave swarm mode after your done you can do: 357 | 358 | docker swarm leave -f 359 | -------------------------------------------------------------------------------- /compose-postgres.yml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | services: 3 | postgresql: 4 | container_name: temporal-postgresql 5 | command: postgres -c 'max_connections=200' 6 | environment: 7 | POSTGRES_PASSWORD: temporal 8 | POSTGRES_USER: temporal 9 | image: postgres:13 10 | ports: 11 | - 5432:5432 12 | postgres-exporter: 13 | container_name: postgres-exporter 14 | image: prometheuscommunity/postgres-exporter:${POSTGRES_EXPORTER_IMG} 15 | environment: 16 | - DATA_SOURCE_URI=postgresql:5432/postgres?sslmode=disable 17 | - DATA_SOURCE_USER=temporal 18 | - DATA_SOURCE_PASS=temporal 19 | - PG_EXPORTER_INCLUDE_DATABASES=temporal,temporal_visibility 20 | ports: 21 | - "9187:9187" 22 | pgadmin: 23 | container_name: temporal-pgadmin 24 | image: dpage/pgadmin4 25 | environment: 26 | PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL:-pgadmin4@pgadmin.org} 27 | PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD:-admin} 28 | PGADMIN_CONFIG_SERVER_MODE: 'False' 29 | ports: 30 | - "5050:80" 31 | restart: unless-stopped 32 | networks: 33 | default: 34 | external: true 35 | name: temporal-network -------------------------------------------------------------------------------- /compose-services-replication.yml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | services: 3 | postgresqlc1: 4 | container_name: temporal-postgresql-c1 5 | command: postgres -c 'max_connections=200' 6 | environment: 7 | POSTGRES_PASSWORD: temporal 8 | POSTGRES_USER: temporal 9 | image: postgres:13 10 | ports: 11 | - 5432:5432 12 | volumes: 13 | - /var/lib/postgresql/data2 14 | postgresqlc2: 15 | container_name: temporal-postgresql-c2 16 | command: postgres -c 'max_connections=200' 17 | environment: 18 | POSTGRES_PASSWORD: temporal 19 | POSTGRES_USER: temporal 20 | PGPORT: 7432 21 | image: postgres:13 22 | ports: 23 | - 7432:7432 24 | volumes: 25 | - /var/lib/postgresql/data3 26 | temporalc1: 27 | container_name: temporalc1 28 | depends_on: 29 | - postgresqlc1 30 | environment: 31 | - DB=postgres12 32 | - DB_PORT=5432 33 | - POSTGRES_USER=temporal 34 | - POSTGRES_PWD=temporal 35 | - POSTGRES_SEEDS=postgresqlc1 36 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-c1.yaml 37 | - PROMETHEUS_ENDPOINT=0.0.0.0:8003 38 | image: temporalio/auto-setup:${TEMPORAL_SERVER_IMG} 39 | ports: 40 | - 7233:7233 41 | - 8003:8003 42 | volumes: 43 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 44 | - ./template/replication_template_c1.yaml:/etc/temporal/config/config_template.yaml 45 | temporal-ui-c1: 46 | container_name: temporal-ui-c1 47 | depends_on: 48 | - temporalc1 49 | environment: 50 | - TEMPORAL_ADDRESS=temporalc1:7233 51 | - TEMPORAL_CORS_ORIGINS=http://localhost:3000 52 | - TEMPORAL_UI_PORT=8081 53 | - TEMPORAL_SHOW_TEMPORAL_SYSTEM_NAMESPACE=true 54 | image: temporalio/ui:${TEMPORAL_UI_IMG} 55 | ports: 56 | - 8081:8081 57 | temporalc2: 58 | container_name: temporalc2 59 | depends_on: 60 | - postgresqlc2 61 | environment: 62 | - DB=postgres12 63 | - DB_PORT=7432 64 | - POSTGRES_USER=temporal 65 | - POSTGRES_PWD=temporal 66 | - POSTGRES_SEEDS=postgresqlc2 67 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-c2.yaml 68 | - FRONTEND_GRPC_PORT=2233 69 | - FRONTEND_MEMBERSHIP_PORT=2933 70 | - FRONTEND_HTTP_PORT=2245 71 | - MATCHING_MEMBERSHIP_PORT=2935 72 | - HISTORY_MEMBERSHIP_PORT=2934 73 | - WORKER_GRPC_PORT=2239 74 | - WORKER_MEMBERSHIP_PORT=2939 75 | - PROMETHEUS_ENDPOINT=0.0.0.0:8004 76 | - TEMPORAL_ADDRESS=temporalc2:2233 77 | image: temporalio/auto-setup:${TEMPORAL_SERVER_IMG} 78 | ports: 79 | - 2233:2233 80 | - 2245:2245 81 | - 8004:8004 82 | volumes: 83 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 84 | - ./template/replication_template_c2.yaml:/etc/temporal/config/config_template.yaml 85 | temporal-ui-c2: 86 | container_name: temporal-ui-c2 87 | depends_on: 88 | - temporalc2 89 | environment: 90 | - TEMPORAL_ADDRESS=temporalc2:2233 91 | - TEMPORAL_CORS_ORIGINS=http://localhost:3000 92 | - TEMPORAL_UI_PORT=8082 93 | - TEMPORAL_SHOW_TEMPORAL_SYSTEM_NAMESPACE=true 94 | image: temporalio/ui:${TEMPORAL_UI_IMG} 95 | ports: 96 | - 8082:8082 97 | networks: 98 | default: 99 | external: true 100 | name: temporal-network-replication -------------------------------------------------------------------------------- /compose-services.yml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | x-logging: &logging 3 | logging: 4 | driver: loki 5 | options: 6 | loki-url: "http://host.docker.internal:3100/loki/api/v1/push" 7 | mode: non-blocking 8 | max-buffer-size: 4m 9 | loki-retries: "3" 10 | services: 11 | temporal-history: 12 | <<: *logging 13 | container_name: temporal-history 14 | # depends_on: 15 | # - temporal-admin-tools 16 | environment: 17 | - DB=postgres12 18 | - DB_PORT=${POSTGRES_DEFAULT_PORT} 19 | - POSTGRES_USER=${POSTGRES_USER} 20 | - POSTGRES_PWD=${POSTGRES_PWD} 21 | - POSTGRES_SEEDS=postgresql 22 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml 23 | - SERVICES=history 24 | - USE_INTERNAL_FRONTEND=true 25 | - LOG_LEVEL=warn 26 | # - BIND_ON_IP=0.0.0.0 27 | - PROMETHEUS_ENDPOINT=0.0.0.0:8000 28 | # - TEMPORAL_BROADCAST_ADDRESS=temporal-history 29 | - NUM_HISTORY_SHARDS=2048 30 | image: temporalio/server:${TEMPORAL_SERVER_IMG} 31 | ports: 32 | - published: 7234 33 | target: 7234 34 | - published: 8000 35 | target: 8000 36 | restart: on-failure 37 | volumes: 38 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 39 | - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml 40 | temporal-history2: 41 | <<: *logging 42 | container_name: temporal-history2 43 | # depends_on: 44 | # - temporal-admin-tools 45 | environment: 46 | - DB=postgres12 47 | - DB_PORT=${POSTGRES_DEFAULT_PORT} 48 | - POSTGRES_USER=${POSTGRES_USER} 49 | - POSTGRES_PWD=${POSTGRES_PWD} 50 | - POSTGRES_SEEDS=postgresql 51 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml 52 | - SERVICES=history 53 | - NUM_HISTORY_SHARDS=2048 54 | - USE_INTERNAL_FRONTEND=true 55 | # - BIND_ON_IP=0.0.0.0 56 | - PROMETHEUS_ENDPOINT=0.0.0.0:8005 57 | # - TEMPORAL_BROADCAST_ADDRESS=temporal-history 58 | - HISTORY_MEMBERSHIP_PORT=6934 59 | - HISTORY_GRPC_PORT=7238 60 | - LOG_LEVEL=warn 61 | image: temporalio/server:${TEMPORAL_SERVER_IMG} 62 | ports: 63 | - published: 7238 64 | target: 7238 65 | - published: 8005 66 | target: 8005 67 | restart: on-failure 68 | volumes: 69 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 70 | - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml 71 | temporal-matching: 72 | <<: *logging 73 | container_name: temporal-matching 74 | depends_on: 75 | - temporal-history 76 | environment: 77 | - DB=postgres12 78 | - DB_PORT=${POSTGRES_DEFAULT_PORT} 79 | - POSTGRES_USER=${POSTGRES_USER} 80 | - POSTGRES_PWD=${POSTGRES_PWD} 81 | - POSTGRES_SEEDS=postgresql 82 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml 83 | - SERVICES=matching 84 | - PROMETHEUS_ENDPOINT=0.0.0.0:8001 85 | - NUM_HISTORY_SHARDS=2048 86 | - USE_INTERNAL_FRONTEND=true 87 | - LOG_LEVEL=warn 88 | image: temporalio/server:${TEMPORAL_SERVER_IMG} 89 | ports: 90 | - published: 7235 91 | target: 7235 92 | - published: 8001 93 | target: 8001 94 | restart: on-failure 95 | volumes: 96 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 97 | - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml 98 | temporal-matching2: 99 | <<: *logging 100 | container_name: temporal-matching2 101 | depends_on: 102 | - temporal-history 103 | - temporal-history2 104 | environment: 105 | - DB=postgres12 106 | - DB_PORT=${POSTGRES_DEFAULT_PORT} 107 | - POSTGRES_USER=${POSTGRES_USER} 108 | - POSTGRES_PWD=${POSTGRES_PWD} 109 | - POSTGRES_SEEDS=postgresql 110 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml 111 | - SERVICES=matching 112 | - PROMETHEUS_ENDPOINT=0.0.0.0:8006 113 | - MATCHING_GRPC_PORT=7249 114 | - MATCHING_MEMBERSHIP_PORT=6939 115 | - NUM_HISTORY_SHARDS=2048 116 | - USE_INTERNAL_FRONTEND=true 117 | - LOG_LEVEL=warn 118 | image: temporalio/server:${TEMPORAL_SERVER_IMG} 119 | ports: 120 | - published: 7249 121 | target: 7249 122 | - published: 8006 123 | target: 8006 124 | restart: on-failure 125 | volumes: 126 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 127 | - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml 128 | temporal-frontend: 129 | <<: *logging 130 | container_name: temporal-frontend 131 | depends_on: 132 | - temporal-matching 133 | environment: 134 | - DB=postgres12 135 | - DB_PORT=${POSTGRES_DEFAULT_PORT} 136 | - POSTGRES_USER=${POSTGRES_USER} 137 | - POSTGRES_PWD=${POSTGRES_PWD} 138 | - POSTGRES_SEEDS=postgresql 139 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml 140 | - SERVICES=frontend 141 | - FRONTEND_GRPC_PORT=7237 142 | - PROMETHEUS_ENDPOINT=0.0.0.0:8002 143 | - NUM_HISTORY_SHARDS=2048 144 | - USE_INTERNAL_FRONTEND=true 145 | - LOG_LEVEL=warn 146 | - FRONTEND_HTTP_PORT=7244 147 | image: temporalio/server:${TEMPORAL_SERVER_IMG} 148 | ports: 149 | - published: 7237 150 | target: 7237 151 | - published: 7244 152 | target: 7244 153 | - published: 8002 154 | target: 8002 155 | restart: on-failure 156 | volumes: 157 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 158 | - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml 159 | temporal-frontend2: 160 | <<: *logging 161 | container_name: temporal-frontend2 162 | depends_on: 163 | - temporal-matching 164 | environment: 165 | - DB=postgres12 166 | - DB_PORT=${POSTGRES_DEFAULT_PORT} 167 | - POSTGRES_USER=${POSTGRES_USER} 168 | - POSTGRES_PWD=${POSTGRES_PWD} 169 | - POSTGRES_SEEDS=postgresql 170 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml 171 | - SERVICES=frontend 172 | # set different frontend grpc port 173 | - FRONTEND_GRPC_PORT=7236 174 | # set different membership port than temporal-frontend 175 | - FRONTEND_MEMBERSHIP_PORT=6936 176 | - PROMETHEUS_ENDPOINT=0.0.0.0:8004 177 | - NUM_HISTORY_SHARDS=2048 178 | - USE_INTERNAL_FRONTEND=true 179 | - LOG_LEVEL=warn 180 | - FRONTEND_HTTP_PORT=7245 181 | image: temporalio/server:${TEMPORAL_SERVER_IMG} 182 | ports: 183 | - published: 7236 184 | target: 7236 185 | - published: 7245 186 | target: 7245 187 | - published: 8004 188 | target: 8004 189 | restart: on-failure 190 | volumes: 191 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 192 | - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml 193 | temporal-internal-frontend: 194 | <<: *logging 195 | container_name: temporal-internal-frontend 196 | depends_on: 197 | - temporal-matching 198 | environment: 199 | - DB=postgres12 200 | - DB_PORT=${POSTGRES_DEFAULT_PORT} 201 | - POSTGRES_USER=${POSTGRES_USER} 202 | - POSTGRES_PWD=${POSTGRES_PWD} 203 | - POSTGRES_SEEDS=postgresql 204 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml 205 | - SERVICES=internal-frontend 206 | - PROMETHEUS_ENDPOINT=0.0.0.0:8007 207 | - NUM_HISTORY_SHARDS=2048 208 | - USE_INTERNAL_FRONTEND=true 209 | - LOG_LEVEL=warn 210 | image: temporalio/server:${TEMPORAL_SERVER_IMG} 211 | ports: 212 | - published: 7231 213 | target: 7231 214 | - published: 8007 215 | target: 8007 216 | restart: on-failure 217 | volumes: 218 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 219 | - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml 220 | temporal-worker: 221 | <<: *logging 222 | container_name: temporal-worker 223 | depends_on: 224 | - temporal-frontend 225 | - temporal-frontend2 226 | environment: 227 | - DB=postgres12 228 | - DB_PORT=${POSTGRES_DEFAULT_PORT} 229 | - POSTGRES_USER=${POSTGRES_USER} 230 | - POSTGRES_PWD=${POSTGRES_PWD} 231 | - POSTGRES_SEEDS=postgresql 232 | - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml 233 | - SERVICES=worker 234 | - PROMETHEUS_ENDPOINT=0.0.0.0:8003 235 | # set to loadbalancing 236 | - USE_INTERNAL_FRONTEND=true 237 | - LOG_LEVEL=warn 238 | image: temporalio/server:${TEMPORAL_SERVER_IMG} 239 | ports: 240 | - published: 7232 241 | target: 7232 242 | - published: 8003 243 | target: 8003 244 | restart: on-failure 245 | volumes: 246 | - ./dynamicconfig:/etc/temporal/config/dynamicconfig 247 | - ./template/my_config_template.yaml:/etc/temporal/config/config_template.yaml 248 | temporal-admin-tools: 249 | <<: *logging 250 | container_name: temporal-admin-tools 251 | depends_on: 252 | - temporal-frontend 253 | - temporal-frontend2 254 | environment: 255 | - TEMPORAL_CLI_ADDRESS=temporal-loadbalancing:7233 256 | - TEMPORAL_ADDRESS=temporal-loadbalancing:7233 257 | - DB=postgres12 258 | - DB_PORT=${POSTGRES_DEFAULT_PORT} 259 | - POSTGRES_USER=${POSTGRES_USER} 260 | - POSTGRES_PWD=${POSTGRES_PWD} 261 | - POSTGRES_SEEDS=postgresql 262 | - TEMPORAL_HOME=/etc/temporal 263 | # set to loadbalancing 264 | - PUBLIC_FRONTEND_ADDRESS=temporal-loadbalancing:7233 265 | image: temporalio/admin-tools:${TEMPORAL_ADMINTOOLS_IMG} 266 | volumes: 267 | - "./script/setup.sh:/etc/temporal/setup.sh" 268 | entrypoint: 269 | - /etc/temporal/setup.sh 270 | restart: always 271 | stdin_open: true 272 | tty: true 273 | temporal-ui: 274 | container_name: temporal-ui 275 | depends_on: 276 | - temporal-admin-tools 277 | environment: 278 | - TEMPORAL_ADDRESS=temporal-loadbalancing:7233 279 | - TEMPORAL_CORS_ORIGINS=http://localhost:3000 280 | - TEMPORAL_UI_PORT=8081 281 | - TEMPORAL_SHOW_TEMPORAL_SYSTEM_NAMESPACE=true 282 | image: temporalio/ui:${TEMPORAL_UI_IMG} 283 | ports: 284 | - published: 8081 285 | target: 8081 286 | prometheus: 287 | <<: *logging 288 | container_name: prometheus 289 | image: prom/prometheus:${PROMETHEUS_IMG} 290 | command: --config.file=/etc/prometheus/prometheus.yml --log.level=error 291 | ports: 292 | - published: 9090 293 | target: 9090 294 | volumes: 295 | - type: bind 296 | source: ./deployment/prometheus/config.yml 297 | target: /etc/prometheus/prometheus.yml 298 | depends_on: 299 | - temporal-worker 300 | grafana: 301 | container_name: grafana 302 | image: grafana/grafana:${GRAFANA_IMG} 303 | environment: 304 | - GF_AUTH_DISABLE_LOGIN_FORM=true 305 | - GF_AUTH_ANONYMOUS_ENABLED=true 306 | - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin 307 | ports: 308 | - published: 8085 309 | target: 3000 310 | volumes: 311 | - type: bind 312 | source: ./deployment/grafana/dashboards 313 | target: /var/lib/grafana/dashboards 314 | - type: bind 315 | source: ./deployment/grafana/provisioning/dashboards 316 | target: /etc/grafana/provisioning/dashboards 317 | - type: bind 318 | source: ./deployment/grafana/provisioning/datasources 319 | target: /etc/grafana/provisioning/datasources 320 | - type: bind 321 | source: ./deployment/grafana/provisioning/alerting 322 | target: /etc/grafana/provisioning/alerting 323 | - type: bind 324 | source: ./deployment/grafana/provisioning/notifiers 325 | target: /etc/grafana/provisioning/notifiers 326 | - type: bind 327 | source: ./deployment/grafana/grafana.ini 328 | target: /etc/grafana/grafana.ini 329 | depends_on: 330 | - prometheus 331 | portainer: 332 | container_name: portainer 333 | image: portainer/portainer-ce:${PORTAINER_IMG} 334 | security_opt: 335 | - no-new-privileges:true 336 | volumes: 337 | - /etc/localtime:/etc/localtime:ro 338 | - /var/run/docker.sock:/var/run/docker.sock:ro 339 | - ./portainer-data:/data 340 | ports: 341 | - published: 9000 342 | target: 9000 343 | jaeger-all-in-one: 344 | image: jaegertracing/all-in-one:${JAEGER_IMG} 345 | ports: 346 | - published: 16686 347 | target: 16686 348 | - published: 14268 349 | target: 14268 350 | - published: 14250 351 | target: 14250 352 | otel-collector: 353 | image: otel/opentelemetry-collector:${OTEL_IMG} 354 | command: [ "--config=/etc/otel-collector-config.yaml" ] 355 | volumes: 356 | - type: bind 357 | source: ./deployment/otel/otel-config.yaml 358 | target: /etc/otel-collector-config.yaml 359 | ports: 360 | - published: 1888 361 | target: 1888 362 | - published: 13133 363 | target: 13133 364 | - published: 4317 365 | target: 4317 366 | - published: 4318 367 | target: 4318 368 | - published: 55670 369 | target: 55670 370 | - published: 8889 371 | target: 8889 372 | - published: 5575 373 | target: 5575 374 | depends_on: 375 | - jaeger-all-in-one 376 | loki: 377 | container_name: loki 378 | image: grafana/loki:${LOKI_IMG} 379 | ports: 380 | - published: 3100 381 | target: 3100 382 | command: -config.file=/etc/loki/local-config.yaml 383 | volumes: 384 | - type: bind 385 | source: ./deployment/loki/local-config.yaml 386 | target: /etc/loki/local-config.yaml 387 | depends_on: 388 | - grafana 389 | cadvisor: 390 | image: gcr.io/cadvisor/cadvisor-arm64:${CADVISOR_IMG} 391 | container_name: cadvisor 392 | ports: 393 | - 9092:9092 394 | command: 395 | - '-port=9092' 396 | volumes: 397 | - /:/rootfs:ro 398 | - /var/run/docker.sock:/var/run/docker.sock:rw 399 | - /sys:/sys:ro 400 | - /var/lib/docker/:/var/lib/docker:ro 401 | #envoy 402 | temporal-loadbalancing: 403 | <<: *logging 404 | container_name: temporal-envoy 405 | image: envoyproxy/envoy:${ENVOY_IMG} 406 | ports: 407 | - published: 7233 408 | target: 7233 409 | - published: 7243 410 | target: 7243 411 | - published: 9901 412 | target: 9901 413 | volumes: 414 | - type: bind 415 | source: ./deployment/envoy/envoy.yaml 416 | target: /etc/envoy/envoy.yaml 417 | restart: on-failure 418 | depends_on: 419 | - temporal-admin-tools 420 | - temporal-frontend 421 | - temporal-frontend2 422 | # nginx 423 | # temporal-loadbalancing: 424 | # <<: *logging 425 | # image: nginx:${NGINX_IMG} 426 | # container_name: temporal-nginx 427 | # restart: unless-stopped 428 | # depends_on: 429 | # - temporal-frontend 430 | # - temporal-frontend2 431 | # ports: 432 | # - 7233:7233 433 | # volumes: 434 | # - ./deployment/nginx/nginx.conf:/etc/nginx/nginx.conf 435 | 436 | # haproxy 437 | # temporal-loadbalancing: 438 | # <<: *logging 439 | # image: haproxy:${HAPROXY_IMG} 440 | # container_name: temporal-haproxy 441 | # restart: unless-stopped 442 | # depends_on: 443 | # - temporal-admin-tools 444 | # ports: 445 | # - 7233:7233 446 | # - 8404:8404 447 | # volumes: 448 | # - ./deployment/haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro 449 | networks: 450 | default: 451 | external: true 452 | name: temporal-network 453 | -------------------------------------------------------------------------------- /deployment/envoy/envoy.yaml: -------------------------------------------------------------------------------- 1 | admin: 2 | access_log_path: /dev/null 3 | address: 4 | socket_address: { address: 0.0.0.0, port_value: 9901 } 5 | 6 | static_resources: 7 | listeners: 8 | - name: listener_0 9 | address: 10 | socket_address: { address: 0.0.0.0, port_value: 7233 } 11 | filter_chains: 12 | - filters: 13 | - name: envoy.filters.network.http_connection_manager 14 | typed_config: 15 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager 16 | codec_type: auto 17 | stat_prefix: ingress_http 18 | route_config: 19 | name: local_route 20 | virtual_hosts: 21 | - name: local_service 22 | domains: ["*"] 23 | routes: 24 | - match: { prefix: "/" } 25 | route: 26 | # cluster: temporal_service 27 | timeout: 0s 28 | max_stream_duration: 29 | grpc_timeout_header_max: 0s 30 | weighted_clusters: 31 | clusters: 32 | - name: temporal_service 33 | weight: 1 34 | - name: temporal_service2 35 | weight: 1 36 | cors: 37 | allow_origin_string_match: 38 | - prefix: "*" 39 | allow_methods: GET, PUT, DELETE, POST, OPTIONS 40 | allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,custom-header-1,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout 41 | max_age: "1728000" 42 | expose_headers: grpc-status,grpc-message 43 | http_filters: 44 | - name: envoy.filters.http.grpc_web 45 | typed_config: 46 | "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_web.v3.GrpcWeb 47 | - name: envoy.filters.http.cors 48 | typed_config: 49 | "@type": type.googleapis.com/envoy.extensions.filters.http.cors.v3.Cors 50 | - name: envoy.filters.http.router 51 | typed_config: 52 | "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router 53 | access_log: 54 | - name: envoy.access_loggers.file 55 | typed_config: 56 | "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog 57 | path: "/dev/stdout" 58 | typed_json_format: 59 | "@timestamp": "%START_TIME%" 60 | client: 61 | address: "%DOWNSTREAM_REMOTE_ADDRESS%" 62 | certificate: 63 | local: 64 | subject: "%DOWNSTREAM_LOCAL_SUBJECT%" 65 | uri_san: "%DOWNSTREAM_LOCAL_URI_SAN%" 66 | peer: 67 | cert_v_end: "%DOWNSTREAM_PEER_CERT_V_END%" 68 | cert_v_start: "%DOWNSTREAM_PEER_CERT_V_START%" 69 | certificate: "%DOWNSTREAM_PEER_CERT%" 70 | issuer: "%DOWNSTREAM_PEER_ISSUER%" 71 | serial: "%DOWNSTREAM_PEER_SERIAL%" 72 | uri_san: "%DOWNSTREAM_PEER_URI_SAN%" 73 | direct: 74 | remote: 75 | address: "%DOWNSTREAM_DIRECT_REMOTE_ADDRESS%" 76 | local: 77 | address: "%DOWNSTREAM_LOCAL_ADDRESS%" 78 | remote: 79 | address_no_port: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" 80 | sni: "%REQUESTED_SERVER_NAME%" 81 | tls: 82 | cipher: "%DOWNSTREAM_TLS_CIPHER%" 83 | session_id: "%DOWNSTREAM_TLS_SESSION_ID%" 84 | version: "%DOWNSTREAM_TLS_VERSION%" 85 | envoy: 86 | route: 87 | name: "%ROUTE_NAME%" 88 | upstream: 89 | cluster: "%UPSTREAM_CLUSTER%" 90 | localaddress: "%UPSTREAM_LOCAL_ADDRESS%" 91 | transport_failure_reason: "%UPSTREAM_TRANSPORT_FAILURE_REASON%" 92 | host: 93 | hostname: "%HOSTNAME%" 94 | http: 95 | request: 96 | body: 97 | bytes: "%BYTES_RECEIVED%" 98 | duration: "%DURATION%" 99 | duration_req: "%REQUEST_DURATION%" 100 | headers: 101 | accept: "%REQ(ACCEPT)%" 102 | authority: "%REQ(:AUTHORITY)%" 103 | content-length: "%REQ(CONTENT-LENGTH)%" 104 | content-type: "%REQ(CONTENT-TYPE)%" 105 | host: "%REQ(HOST)%" 106 | id: "%REQ(X-REQUEST-ID)%" 107 | session-id: "%REQ(SESSION-ID)%" 108 | correlation-id: "%REQ(CORRELATION-ID)%" 109 | x_forwarded_for: "%REQ(X-FORWARDED-FOR)%" 110 | x_forwarded_proto: "%REQ(X-FORWARDED-PROTO)%" 111 | x_envoy_internal: "%REQ(X-ENVOY-INTERNAL)%" 112 | x_envoy_decorator_operation: "%REQ(X-ENVOY-DECORATOR-OPERATION)%" 113 | x_envoy_expected_rq_timeout_ms: "%REQ(X-ENVOY-EXPECTED-RQ-TIMEOUT-MS)%" 114 | x_b3_traceid: "%REQ(X-B3-TRACEID)%" 115 | x_b3_parentspanid: "%REQ(X-B3-PARENTSPANID)%" 116 | x_b3_spanid: "%REQ(X-B3-SPANID)%" 117 | x_b3_sampled: "%REQ(X-B3-SAMPLED)%" 118 | method: "%REQ(:METHOD)%" 119 | path: "%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%" 120 | response: 121 | body: 122 | bytes: "%BYTES_SENT%" 123 | duration_resp: "%RESPONSE_DURATION%" 124 | duration_tx: "%RESPONSE_TX_DURATION%" 125 | flags: "%RESPONSE_FLAGS%" 126 | headers: 127 | access-control-allow-origin: "%RESP(ACCESS-CONTROL-ALLOW-ORIGIN)%" 128 | content-length: "%RESP(CONTENT-LENGTH)%" 129 | content-type: "%RESP(CONTENT-TYPE)%" 130 | date: "%RESP(DATE)%" 131 | server: "%RESP(SERVER)%" 132 | status_code: "%RESPONSE_CODE%" 133 | status_code_details: "%RESPONSE_CODE_DETAILS%" 134 | version: "%PROTOCOL%" 135 | log: 136 | level: "info" # default envoy log level 137 | network: 138 | direction: "inbound" 139 | server: 140 | address: "%UPSTREAM_HOST%" 141 | service: 142 | name: "envoy" 143 | version: "dev-706fe7871ab5fe631406db1e0fe5af1c4d0eb1b8" 144 | user_agent.name: "%REQ(USER-AGENT)%" 145 | - name: listener_1 146 | address: 147 | socket_address: { address: 0.0.0.0, port_value: 7243 } 148 | filter_chains: 149 | - filters: 150 | - name: envoy.filters.network.http_connection_manager 151 | typed_config: 152 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager 153 | codec_type: auto 154 | stat_prefix: ingress_http2 155 | route_config: 156 | name: local_route2 157 | virtual_hosts: 158 | - name: local_service2 159 | domains: [ "*" ] 160 | routes: 161 | - match: { prefix: "/" } 162 | route: 163 | weighted_clusters: 164 | clusters: 165 | - name: temporal_service3 166 | weight: 1 167 | http_filters: 168 | - name: envoy.filters.http.router2 169 | typed_config: 170 | "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router 171 | access_log: 172 | - name: envoy.access_loggers.file 173 | typed_config: 174 | "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog 175 | path: "/dev/stdout" 176 | typed_json_format: 177 | "@timestamp": "%START_TIME%" 178 | client: 179 | address: "%DOWNSTREAM_REMOTE_ADDRESS%" 180 | certificate: 181 | local: 182 | subject: "%DOWNSTREAM_LOCAL_SUBJECT%" 183 | uri_san: "%DOWNSTREAM_LOCAL_URI_SAN%" 184 | peer: 185 | cert_v_end: "%DOWNSTREAM_PEER_CERT_V_END%" 186 | cert_v_start: "%DOWNSTREAM_PEER_CERT_V_START%" 187 | certificate: "%DOWNSTREAM_PEER_CERT%" 188 | issuer: "%DOWNSTREAM_PEER_ISSUER%" 189 | serial: "%DOWNSTREAM_PEER_SERIAL%" 190 | uri_san: "%DOWNSTREAM_PEER_URI_SAN%" 191 | direct: 192 | remote: 193 | address: "%DOWNSTREAM_DIRECT_REMOTE_ADDRESS%" 194 | local: 195 | address: "%DOWNSTREAM_LOCAL_ADDRESS%" 196 | remote: 197 | address_no_port: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" 198 | sni: "%REQUESTED_SERVER_NAME%" 199 | tls: 200 | cipher: "%DOWNSTREAM_TLS_CIPHER%" 201 | session_id: "%DOWNSTREAM_TLS_SESSION_ID%" 202 | version: "%DOWNSTREAM_TLS_VERSION%" 203 | envoy: 204 | route: 205 | name: "%ROUTE_NAME%" 206 | upstream: 207 | cluster: "%UPSTREAM_CLUSTER%" 208 | localaddress: "%UPSTREAM_LOCAL_ADDRESS%" 209 | transport_failure_reason: "%UPSTREAM_TRANSPORT_FAILURE_REASON%" 210 | host: 211 | hostname: "%HOSTNAME%" 212 | http: 213 | request: 214 | body: 215 | bytes: "%BYTES_RECEIVED%" 216 | duration: "%DURATION%" 217 | duration_req: "%REQUEST_DURATION%" 218 | headers: 219 | accept: "%REQ(ACCEPT)%" 220 | authority: "%REQ(:AUTHORITY)%" 221 | content-length: "%REQ(CONTENT-LENGTH)%" 222 | content-type: "%REQ(CONTENT-TYPE)%" 223 | host: "%REQ(HOST)%" 224 | id: "%REQ(X-REQUEST-ID)%" 225 | session-id: "%REQ(SESSION-ID)%" 226 | correlation-id: "%REQ(CORRELATION-ID)%" 227 | x_forwarded_for: "%REQ(X-FORWARDED-FOR)%" 228 | x_forwarded_proto: "%REQ(X-FORWARDED-PROTO)%" 229 | x_envoy_internal: "%REQ(X-ENVOY-INTERNAL)%" 230 | x_envoy_decorator_operation: "%REQ(X-ENVOY-DECORATOR-OPERATION)%" 231 | x_envoy_expected_rq_timeout_ms: "%REQ(X-ENVOY-EXPECTED-RQ-TIMEOUT-MS)%" 232 | x_b3_traceid: "%REQ(X-B3-TRACEID)%" 233 | x_b3_parentspanid: "%REQ(X-B3-PARENTSPANID)%" 234 | x_b3_spanid: "%REQ(X-B3-SPANID)%" 235 | x_b3_sampled: "%REQ(X-B3-SAMPLED)%" 236 | method: "%REQ(:METHOD)%" 237 | path: "%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%" 238 | response: 239 | body: 240 | bytes: "%BYTES_SENT%" 241 | duration_resp: "%RESPONSE_DURATION%" 242 | duration_tx: "%RESPONSE_TX_DURATION%" 243 | flags: "%RESPONSE_FLAGS%" 244 | headers: 245 | access-control-allow-origin: "%RESP(ACCESS-CONTROL-ALLOW-ORIGIN)%" 246 | content-length: "%RESP(CONTENT-LENGTH)%" 247 | content-type: "%RESP(CONTENT-TYPE)%" 248 | date: "%RESP(DATE)%" 249 | server: "%RESP(SERVER)%" 250 | status_code: "%RESPONSE_CODE%" 251 | status_code_details: "%RESPONSE_CODE_DETAILS%" 252 | version: "%PROTOCOL%" 253 | log: 254 | level: "info" # default envoy log level 255 | network: 256 | direction: "inbound" 257 | server: 258 | address: "%UPSTREAM_HOST%" 259 | service: 260 | name: "envoy" 261 | version: "dev-706fe7871ab5fe631406db1e0fe5af1c4d0eb1b8" 262 | user_agent.name: "%REQ(USER-AGENT)%" 263 | clusters: 264 | - name: temporal_service 265 | connect_timeout: 200.0s 266 | type: logical_dns 267 | http2_protocol_options: {} 268 | lb_policy: round_robin 269 | load_assignment: 270 | cluster_name: cluster_0 271 | endpoints: 272 | - lb_endpoints: 273 | - endpoint: 274 | address: 275 | socket_address: 276 | address: temporal-frontend 277 | port_value: 7237 278 | - name: temporal_service2 279 | connect_timeout: 200.0s 280 | type: logical_dns 281 | http2_protocol_options: { } 282 | lb_policy: round_robin 283 | load_assignment: 284 | cluster_name: cluster_1 285 | endpoints: 286 | - lb_endpoints: 287 | - endpoint: 288 | address: 289 | socket_address: 290 | address: temporal-frontend2 291 | port_value: 7236 292 | - name: temporal_service3 293 | connect_timeout: 200.0s 294 | type: logical_dns 295 | http_protocol_options: {} 296 | lb_policy: round_robin 297 | load_assignment: 298 | cluster_name: cluster_2 299 | endpoints: 300 | - lb_endpoints: 301 | - endpoint: 302 | address: 303 | socket_address: 304 | address: temporal-frontend 305 | port_value: 7244 306 | # getting ECONNREFUSED with envoy (error code 111) sometimes, so just routing http to one fe service for now 307 | # - name: temporal_service4 308 | # connect_timeout: 200.0s 309 | # type: logical_dns 310 | # http_protocol_options: {} 311 | # lb_policy: round_robin 312 | # load_assignment: 313 | # cluster_name: cluster_3 314 | # endpoints: 315 | # - lb_endpoints: 316 | # - endpoint: 317 | # address: 318 | # socket_address: 319 | # address: temporal-frontend 320 | # port_value: 7245 321 | -------------------------------------------------------------------------------- /deployment/grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | [paths] 2 | provisioning = /etc/grafana/provisioning 3 | alerting = /etc/grafana/provisioning/alerting 4 | notifiers = /etc/grafana/provisioning/notifiers 5 | 6 | [server] 7 | enable_gzip = true 8 | 9 | [users] 10 | default_theme = dark 11 | 12 | [log.console] 13 | level = error -------------------------------------------------------------------------------- /deployment/grafana/old_dashboards/dockermetrics.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": { 7 | "type": "datasource", 8 | "uid": "grafana" 9 | }, 10 | "enable": true, 11 | "hide": true, 12 | "iconColor": "rgba(0, 211, 255, 1)", 13 | "name": "Annotations & Alerts", 14 | "target": { 15 | "limit": 100, 16 | "matchAny": false, 17 | "tags": [], 18 | "type": "dashboard" 19 | }, 20 | "type": "dashboard" 21 | } 22 | ] 23 | }, 24 | "description": "Draw some docker metrics", 25 | "editable": true, 26 | "fiscalYearStartMonth": 0, 27 | "graphTooltip": 0, 28 | "links": [], 29 | "liveNow": false, 30 | "panels": [ 31 | { 32 | "datasource": { 33 | "type": "grafana", 34 | "uid": "grafana" 35 | }, 36 | "description": "Number of CPUs", 37 | "fieldConfig": { 38 | "defaults": { 39 | "color": { 40 | "mode": "thresholds" 41 | }, 42 | "mappings": [ 43 | { 44 | "options": { 45 | "match": "null", 46 | "result": { 47 | "text": "N/A" 48 | } 49 | }, 50 | "type": "special" 51 | } 52 | ], 53 | "thresholds": { 54 | "mode": "absolute", 55 | "steps": [ 56 | { 57 | "color": "green", 58 | "value": null 59 | }, 60 | { 61 | "color": "red", 62 | "value": 80 63 | } 64 | ] 65 | }, 66 | "unit": "none" 67 | }, 68 | "overrides": [] 69 | }, 70 | "gridPos": { 71 | "h": 3, 72 | "w": 24, 73 | "x": 0, 74 | "y": 0 75 | }, 76 | "id": 7, 77 | "links": [], 78 | "maxDataPoints": 100, 79 | "options": { 80 | "colorMode": "value", 81 | "graphMode": "none", 82 | "justifyMode": "auto", 83 | "orientation": "horizontal", 84 | "reduceOptions": { 85 | "calcs": [ 86 | "lastNotNull" 87 | ], 88 | "fields": "", 89 | "values": false 90 | }, 91 | "textMode": "auto" 92 | }, 93 | "pluginVersion": "9.0.7", 94 | "repeat": "instance", 95 | "targets": [ 96 | { 97 | "datasource": { 98 | "type": "datasource", 99 | "uid": "grafana" 100 | }, 101 | "expr": "engine_daemon_engine_cpus_cpus{instance=~'$instance'}", 102 | "intervalFactor": 2, 103 | "legendFormat": "", 104 | "metric": "engine_daemon_engine_cpus_cpus", 105 | "refId": "A", 106 | "step": 60 107 | } 108 | ], 109 | "title": "CPU Cores", 110 | "type": "stat" 111 | }, 112 | { 113 | "aliasColors": {}, 114 | "bars": false, 115 | "dashLength": 10, 116 | "dashes": false, 117 | "datasource": { 118 | "type": "grafana", 119 | "uid": "grafana" 120 | }, 121 | "description": "Measuring some percentiles performance", 122 | "fill": 1, 123 | "fillGradient": 0, 124 | "gridPos": { 125 | "h": 7, 126 | "w": 24, 127 | "x": 0, 128 | "y": 3 129 | }, 130 | "hiddenSeries": false, 131 | "id": 14, 132 | "legend": { 133 | "alignAsTable": false, 134 | "avg": false, 135 | "current": false, 136 | "hideEmpty": true, 137 | "hideZero": true, 138 | "max": false, 139 | "min": false, 140 | "show": true, 141 | "total": false, 142 | "values": false 143 | }, 144 | "lines": true, 145 | "linewidth": 1, 146 | "links": [], 147 | "nullPointMode": "null as zero", 148 | "options": { 149 | "alertThreshold": true 150 | }, 151 | "percentage": false, 152 | "pluginVersion": "9.0.7", 153 | "pointradius": 5, 154 | "points": false, 155 | "renderer": "flot", 156 | "repeat": "instance", 157 | "seriesOverrides": [], 158 | "spaceLength": 10, 159 | "stack": false, 160 | "steppedLine": false, 161 | "targets": [ 162 | { 163 | "datasource": { 164 | "type": "datasource", 165 | "uid": "grafana" 166 | }, 167 | "expr": "histogram_quantile(0.99, rate(engine_daemon_container_actions_seconds_bucket{instance=~'$instance'}[$interval]))", 168 | "intervalFactor": 2, 169 | "legendFormat": "{{action}} 99", 170 | "refId": "A", 171 | "step": 4 172 | }, 173 | { 174 | "datasource": { 175 | "type": "datasource", 176 | "uid": "grafana" 177 | }, 178 | "expr": "histogram_quantile(0.90, rate(engine_daemon_container_actions_seconds_bucket{instance=~'$instance'}[$interval]))", 179 | "intervalFactor": 2, 180 | "legendFormat": "{{action}} 90", 181 | "refId": "B", 182 | "step": 4 183 | }, 184 | { 185 | "datasource": { 186 | "type": "datasource", 187 | "uid": "grafana" 188 | }, 189 | "expr": "histogram_quantile(0.50, rate(engine_daemon_container_actions_seconds_bucket{instance=~'$instance'}[$interval]))", 190 | "intervalFactor": 2, 191 | "legendFormat": "{{action}} 50", 192 | "refId": "C", 193 | "step": 4 194 | }, 195 | { 196 | "datasource": { 197 | "type": "datasource", 198 | "uid": "grafana" 199 | }, 200 | "expr": "histogram_quantile(0.25, rate(engine_daemon_container_actions_seconds_bucket{instance=~'$instance'}[$interval]))", 201 | "intervalFactor": 2, 202 | "legendFormat": "{{action}} 25", 203 | "refId": "D", 204 | "step": 4 205 | } 206 | ], 207 | "thresholds": [], 208 | "timeRegions": [], 209 | "title": "Time x Container Action percentile", 210 | "tooltip": { 211 | "shared": true, 212 | "sort": 0, 213 | "value_type": "individual" 214 | }, 215 | "type": "graph", 216 | "xaxis": { 217 | "mode": "time", 218 | "show": true, 219 | "values": [] 220 | }, 221 | "yaxes": [ 222 | { 223 | "format": "short", 224 | "logBase": 1, 225 | "show": true 226 | }, 227 | { 228 | "format": "short", 229 | "logBase": 1, 230 | "show": true 231 | } 232 | ], 233 | "yaxis": { 234 | "align": false 235 | } 236 | }, 237 | { 238 | "aliasColors": {}, 239 | "bars": false, 240 | "dashLength": 10, 241 | "dashes": false, 242 | "datasource": { 243 | "type": "grafana", 244 | "uid": "grafana" 245 | }, 246 | "fill": 1, 247 | "fillGradient": 0, 248 | "gridPos": { 249 | "h": 7, 250 | "w": 24, 251 | "x": 0, 252 | "y": 10 253 | }, 254 | "hiddenSeries": false, 255 | "id": 15, 256 | "legend": { 257 | "avg": false, 258 | "current": false, 259 | "hideEmpty": true, 260 | "hideZero": true, 261 | "max": false, 262 | "min": false, 263 | "show": true, 264 | "total": false, 265 | "values": false 266 | }, 267 | "lines": true, 268 | "linewidth": 1, 269 | "links": [], 270 | "nullPointMode": "null", 271 | "options": { 272 | "alertThreshold": true 273 | }, 274 | "percentage": false, 275 | "pluginVersion": "9.0.7", 276 | "pointradius": 5, 277 | "points": false, 278 | "renderer": "flot", 279 | "repeat": "instance", 280 | "seriesOverrides": [], 281 | "spaceLength": 10, 282 | "stack": false, 283 | "steppedLine": false, 284 | "targets": [ 285 | { 286 | "datasource": { 287 | "type": "datasource", 288 | "uid": "grafana" 289 | }, 290 | "expr": "engine_daemon_container_actions_seconds_count{instance=~'$instance'}", 291 | "intervalFactor": 2, 292 | "legendFormat": "{{action}}", 293 | "metric": "engine_daemon_container_actions_seconds_count", 294 | "refId": "A", 295 | "step": 4 296 | } 297 | ], 298 | "thresholds": [], 299 | "timeRegions": [], 300 | "title": "Total Container Actions", 301 | "tooltip": { 302 | "shared": true, 303 | "sort": 2, 304 | "value_type": "individual" 305 | }, 306 | "type": "graph", 307 | "xaxis": { 308 | "mode": "time", 309 | "show": true, 310 | "values": [] 311 | }, 312 | "yaxes": [ 313 | { 314 | "format": "short", 315 | "logBase": 1, 316 | "show": true 317 | }, 318 | { 319 | "format": "short", 320 | "logBase": 1, 321 | "show": true 322 | } 323 | ], 324 | "yaxis": { 325 | "align": false 326 | } 327 | }, 328 | { 329 | "aliasColors": {}, 330 | "bars": false, 331 | "dashLength": 10, 332 | "dashes": false, 333 | "datasource": { 334 | "type": "grafana", 335 | "uid": "grafana" 336 | }, 337 | "description": "Measuring some percentiles performance", 338 | "fill": 1, 339 | "fillGradient": 0, 340 | "gridPos": { 341 | "h": 7, 342 | "w": 24, 343 | "x": 0, 344 | "y": 17 345 | }, 346 | "hiddenSeries": false, 347 | "id": 22, 348 | "legend": { 349 | "alignAsTable": false, 350 | "avg": false, 351 | "current": false, 352 | "hideEmpty": true, 353 | "hideZero": true, 354 | "max": false, 355 | "min": false, 356 | "show": true, 357 | "total": false, 358 | "values": false 359 | }, 360 | "lines": true, 361 | "linewidth": 1, 362 | "links": [], 363 | "nullPointMode": "null as zero", 364 | "options": { 365 | "alertThreshold": true 366 | }, 367 | "percentage": false, 368 | "pluginVersion": "9.0.7", 369 | "pointradius": 5, 370 | "points": false, 371 | "renderer": "flot", 372 | "repeat": "instance", 373 | "seriesOverrides": [], 374 | "spaceLength": 10, 375 | "stack": false, 376 | "steppedLine": false, 377 | "targets": [ 378 | { 379 | "datasource": { 380 | "type": "datasource", 381 | "uid": "grafana" 382 | }, 383 | "expr": "histogram_quantile(0.99, rate(engine_daemon_network_actions_seconds_bucket{instance=~'$instance'}[$interval]))", 384 | "intervalFactor": 2, 385 | "legendFormat": "{{action}} 99", 386 | "refId": "A", 387 | "step": 4 388 | }, 389 | { 390 | "datasource": { 391 | "type": "datasource", 392 | "uid": "grafana" 393 | }, 394 | "expr": "histogram_quantile(0.90, rate(engine_daemon_network_actions_seconds_bucket{instance=~'$instance'}[$interval]))", 395 | "intervalFactor": 2, 396 | "legendFormat": "{{action}} 90", 397 | "refId": "B", 398 | "step": 4 399 | }, 400 | { 401 | "datasource": { 402 | "type": "datasource", 403 | "uid": "grafana" 404 | }, 405 | "expr": "histogram_quantile(0.50, rate(engine_daemon_network_actions_seconds_bucket{instance=~'$instance'}[$interval]))", 406 | "intervalFactor": 2, 407 | "legendFormat": "{{action}} 50", 408 | "refId": "C", 409 | "step": 4 410 | }, 411 | { 412 | "datasource": { 413 | "type": "datasource", 414 | "uid": "grafana" 415 | }, 416 | "expr": "histogram_quantile(0.25, rate(engine_daemon_network_actions_seconds_bucket{instance=~'$instance'}[$interval]))", 417 | "intervalFactor": 2, 418 | "legendFormat": "{{action}} 25", 419 | "refId": "D", 420 | "step": 4 421 | } 422 | ], 423 | "thresholds": [], 424 | "timeRegions": [], 425 | "title": "Time x Network Action percentile", 426 | "tooltip": { 427 | "shared": true, 428 | "sort": 0, 429 | "value_type": "individual" 430 | }, 431 | "type": "graph", 432 | "xaxis": { 433 | "mode": "time", 434 | "show": true, 435 | "values": [] 436 | }, 437 | "yaxes": [ 438 | { 439 | "format": "short", 440 | "logBase": 1, 441 | "show": true 442 | }, 443 | { 444 | "format": "short", 445 | "logBase": 1, 446 | "show": true 447 | } 448 | ], 449 | "yaxis": { 450 | "align": false 451 | } 452 | }, 453 | { 454 | "aliasColors": {}, 455 | "bars": false, 456 | "dashLength": 10, 457 | "dashes": false, 458 | "datasource": { 459 | "type": "grafana", 460 | "uid": "grafana" 461 | }, 462 | "fill": 1, 463 | "fillGradient": 0, 464 | "gridPos": { 465 | "h": 7, 466 | "w": 24, 467 | "x": 0, 468 | "y": 24 469 | }, 470 | "hiddenSeries": false, 471 | "id": 19, 472 | "legend": { 473 | "avg": false, 474 | "current": false, 475 | "hideEmpty": true, 476 | "hideZero": true, 477 | "max": false, 478 | "min": false, 479 | "show": true, 480 | "total": false, 481 | "values": false 482 | }, 483 | "lines": true, 484 | "linewidth": 1, 485 | "links": [], 486 | "nullPointMode": "null as zero", 487 | "options": { 488 | "alertThreshold": true 489 | }, 490 | "percentage": false, 491 | "pluginVersion": "9.0.7", 492 | "pointradius": 5, 493 | "points": false, 494 | "renderer": "flot", 495 | "repeat": "instance", 496 | "seriesOverrides": [], 497 | "spaceLength": 10, 498 | "stack": false, 499 | "steppedLine": false, 500 | "targets": [ 501 | { 502 | "datasource": { 503 | "type": "datasource", 504 | "uid": "grafana" 505 | }, 506 | "expr": "engine_daemon_network_actions_seconds_count{instance=~'$instance'}", 507 | "intervalFactor": 2, 508 | "legendFormat": "{{action}}", 509 | "metric": "engine_daemon_container_actions_seconds_count", 510 | "refId": "A", 511 | "step": 4 512 | } 513 | ], 514 | "thresholds": [], 515 | "timeRegions": [], 516 | "title": "Total Network Actions", 517 | "tooltip": { 518 | "shared": true, 519 | "sort": 2, 520 | "value_type": "individual" 521 | }, 522 | "type": "graph", 523 | "xaxis": { 524 | "mode": "time", 525 | "show": true, 526 | "values": [] 527 | }, 528 | "yaxes": [ 529 | { 530 | "format": "none", 531 | "logBase": 1, 532 | "show": true 533 | }, 534 | { 535 | "format": "none", 536 | "logBase": 1, 537 | "show": true 538 | } 539 | ], 540 | "yaxis": { 541 | "align": false 542 | } 543 | }, 544 | { 545 | "aliasColors": {}, 546 | "bars": false, 547 | "dashLength": 10, 548 | "dashes": false, 549 | "datasource": { 550 | "type": "grafana", 551 | "uid": "grafana" 552 | }, 553 | "description": "Measuring histogram on some percentile", 554 | "fill": 1, 555 | "fillGradient": 0, 556 | "gridPos": { 557 | "h": 7, 558 | "w": 24, 559 | "x": 0, 560 | "y": 31 561 | }, 562 | "hiddenSeries": false, 563 | "id": 21, 564 | "legend": { 565 | "alignAsTable": false, 566 | "avg": false, 567 | "current": false, 568 | "hideEmpty": true, 569 | "hideZero": true, 570 | "max": false, 571 | "min": false, 572 | "show": true, 573 | "total": false, 574 | "values": false 575 | }, 576 | "lines": true, 577 | "linewidth": 1, 578 | "links": [], 579 | "nullPointMode": "null as zero", 580 | "options": { 581 | "alertThreshold": true 582 | }, 583 | "percentage": false, 584 | "pluginVersion": "9.0.7", 585 | "pointradius": 5, 586 | "points": false, 587 | "renderer": "flot", 588 | "repeat": "instance", 589 | "seriesOverrides": [], 590 | "spaceLength": 10, 591 | "stack": false, 592 | "steppedLine": false, 593 | "targets": [ 594 | { 595 | "datasource": { 596 | "type": "datasource", 597 | "uid": "grafana" 598 | }, 599 | "expr": "histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{instance=~'$instance'}[$interval]))", 600 | "intervalFactor": 2, 601 | "legendFormat": "99", 602 | "refId": "D", 603 | "step": 4 604 | }, 605 | { 606 | "datasource": { 607 | "type": "datasource", 608 | "uid": "grafana" 609 | }, 610 | "expr": "histogram_quantile(0.90, rate(etcd_disk_wal_fsync_duration_seconds_bucket{instance=~'$instance'}[$interval]))", 611 | "intervalFactor": 2, 612 | "legendFormat": "90", 613 | "refId": "A", 614 | "step": 4 615 | }, 616 | { 617 | "datasource": { 618 | "type": "datasource", 619 | "uid": "grafana" 620 | }, 621 | "expr": "histogram_quantile(0.50, rate(etcd_disk_wal_fsync_duration_seconds_bucket{instance=~'$instance'}[$interval]))", 622 | "intervalFactor": 2, 623 | "legendFormat": "50", 624 | "refId": "B", 625 | "step": 4 626 | }, 627 | { 628 | "datasource": { 629 | "type": "datasource", 630 | "uid": "grafana" 631 | }, 632 | "expr": "histogram_quantile(0.10, rate(etcd_disk_wal_fsync_duration_seconds_bucket{instance=~'$instance'}[$interval]))", 633 | "intervalFactor": 2, 634 | "legendFormat": "10", 635 | "refId": "C", 636 | "step": 4 637 | } 638 | ], 639 | "thresholds": [], 640 | "timeRegions": [], 641 | "title": "etcd fsync wall percentile", 642 | "tooltip": { 643 | "shared": true, 644 | "sort": 0, 645 | "value_type": "individual" 646 | }, 647 | "type": "graph", 648 | "xaxis": { 649 | "mode": "time", 650 | "show": true, 651 | "values": [] 652 | }, 653 | "yaxes": [ 654 | { 655 | "format": "short", 656 | "logBase": 1, 657 | "show": true 658 | }, 659 | { 660 | "format": "short", 661 | "logBase": 1, 662 | "show": true 663 | } 664 | ], 665 | "yaxis": { 666 | "align": false 667 | } 668 | }, 669 | { 670 | "aliasColors": {}, 671 | "bars": false, 672 | "dashLength": 10, 673 | "dashes": false, 674 | "datasource": { 675 | "type": "grafana", 676 | "uid": "grafana" 677 | }, 678 | "decimals": 0, 679 | "fill": 1, 680 | "fillGradient": 0, 681 | "gridPos": { 682 | "h": 7, 683 | "w": 24, 684 | "x": 0, 685 | "y": 38 686 | }, 687 | "hiddenSeries": false, 688 | "id": 20, 689 | "legend": { 690 | "alignAsTable": false, 691 | "avg": false, 692 | "current": true, 693 | "max": true, 694 | "min": true, 695 | "show": true, 696 | "total": false, 697 | "values": true 698 | }, 699 | "lines": true, 700 | "linewidth": 1, 701 | "links": [], 702 | "nullPointMode": "null", 703 | "options": { 704 | "alertThreshold": true 705 | }, 706 | "percentage": false, 707 | "pluginVersion": "9.0.7", 708 | "pointradius": 5, 709 | "points": false, 710 | "renderer": "flot", 711 | "repeat": "instance", 712 | "seriesOverrides": [], 713 | "spaceLength": 10, 714 | "stack": false, 715 | "steppedLine": false, 716 | "targets": [ 717 | { 718 | "datasource": { 719 | "type": "datasource", 720 | "uid": "grafana" 721 | }, 722 | "expr": "engine_daemon_events_subscribers_total{instance=~'$instance'}", 723 | "intervalFactor": 2, 724 | "legendFormat": " ", 725 | "refId": "A", 726 | "step": 4 727 | } 728 | ], 729 | "thresholds": [], 730 | "timeRegions": [], 731 | "title": "Event Subscribers", 732 | "tooltip": { 733 | "shared": true, 734 | "sort": 0, 735 | "value_type": "individual" 736 | }, 737 | "type": "graph", 738 | "xaxis": { 739 | "mode": "time", 740 | "show": true, 741 | "values": [] 742 | }, 743 | "yaxes": [ 744 | { 745 | "format": "short", 746 | "logBase": 1, 747 | "show": true 748 | }, 749 | { 750 | "format": "short", 751 | "logBase": 1, 752 | "show": true 753 | } 754 | ], 755 | "yaxis": { 756 | "align": false 757 | } 758 | }, 759 | { 760 | "aliasColors": {}, 761 | "bars": false, 762 | "dashLength": 10, 763 | "dashes": false, 764 | "datasource": { 765 | "type": "grafana", 766 | "uid": "grafana" 767 | }, 768 | "decimals": 0, 769 | "fill": 1, 770 | "fillGradient": 0, 771 | "gridPos": { 772 | "h": 7, 773 | "w": 24, 774 | "x": 0, 775 | "y": 45 776 | }, 777 | "hiddenSeries": false, 778 | "id": 23, 779 | "legend": { 780 | "avg": false, 781 | "current": false, 782 | "hideEmpty": true, 783 | "hideZero": true, 784 | "max": false, 785 | "min": false, 786 | "show": true, 787 | "total": false, 788 | "values": false 789 | }, 790 | "lines": true, 791 | "linewidth": 1, 792 | "links": [], 793 | "nullPointMode": "null", 794 | "options": { 795 | "alertThreshold": true 796 | }, 797 | "percentage": false, 798 | "pluginVersion": "9.0.7", 799 | "pointradius": 5, 800 | "points": false, 801 | "renderer": "flot", 802 | "repeat": "instance", 803 | "seriesOverrides": [], 804 | "spaceLength": 10, 805 | "stack": false, 806 | "steppedLine": false, 807 | "targets": [ 808 | { 809 | "datasource": { 810 | "type": "datasource", 811 | "uid": "grafana" 812 | }, 813 | "refId": "A" 814 | } 815 | ], 816 | "thresholds": [], 817 | "timeRegions": [], 818 | "title": "Image Builds", 819 | "tooltip": { 820 | "shared": true, 821 | "sort": 0, 822 | "value_type": "individual" 823 | }, 824 | "type": "graph", 825 | "xaxis": { 826 | "mode": "time", 827 | "show": true, 828 | "values": [] 829 | }, 830 | "yaxes": [ 831 | { 832 | "format": "short", 833 | "logBase": 1, 834 | "show": true 835 | }, 836 | { 837 | "format": "short", 838 | "logBase": 1, 839 | "show": true 840 | } 841 | ], 842 | "yaxis": { 843 | "align": false 844 | } 845 | } 846 | ], 847 | "schemaVersion": 36, 848 | "style": "dark", 849 | "tags": [ 850 | "docker", 851 | "docker metrics", 852 | "docker engine" 853 | ], 854 | "templating": { 855 | "list": [ 856 | { 857 | "auto": true, 858 | "auto_count": 30, 859 | "auto_min": "10s", 860 | "current": { 861 | "selected": true, 862 | "text": "auto", 863 | "value": "$__auto_interval_interval" 864 | }, 865 | "hide": 0, 866 | "label": "Interval", 867 | "name": "interval", 868 | "options": [ 869 | { 870 | "selected": true, 871 | "text": "auto", 872 | "value": "$__auto_interval_interval" 873 | }, 874 | { 875 | "selected": false, 876 | "text": "30s", 877 | "value": "30s" 878 | }, 879 | { 880 | "selected": false, 881 | "text": "1m", 882 | "value": "1m" 883 | }, 884 | { 885 | "selected": false, 886 | "text": "2m", 887 | "value": "2m" 888 | }, 889 | { 890 | "selected": false, 891 | "text": "3m", 892 | "value": "3m" 893 | }, 894 | { 895 | "selected": false, 896 | "text": "5m", 897 | "value": "5m" 898 | }, 899 | { 900 | "selected": false, 901 | "text": "7m", 902 | "value": "7m" 903 | }, 904 | { 905 | "selected": false, 906 | "text": "10m", 907 | "value": "10m" 908 | }, 909 | { 910 | "selected": false, 911 | "text": "30m", 912 | "value": "30m" 913 | }, 914 | { 915 | "selected": false, 916 | "text": "1h", 917 | "value": "1h" 918 | }, 919 | { 920 | "selected": false, 921 | "text": "6h", 922 | "value": "6h" 923 | }, 924 | { 925 | "selected": false, 926 | "text": "12h", 927 | "value": "12h" 928 | }, 929 | { 930 | "selected": false, 931 | "text": "1d", 932 | "value": "1d" 933 | }, 934 | { 935 | "selected": false, 936 | "text": "7d", 937 | "value": "7d" 938 | }, 939 | { 940 | "selected": false, 941 | "text": "14d", 942 | "value": "14d" 943 | }, 944 | { 945 | "selected": false, 946 | "text": "30d", 947 | "value": "30d" 948 | } 949 | ], 950 | "query": "30s,1m,2m,3m,5m,7m,10m,30m,1h,6h,12h,1d,7d,14d,30d", 951 | "queryValue": "", 952 | "refresh": 2, 953 | "skipUrlSync": false, 954 | "type": "interval" 955 | }, 956 | { 957 | "current": { 958 | "selected": true, 959 | "text": [], 960 | "value": [] 961 | }, 962 | "datasource": { 963 | "uid": "$datasource" 964 | }, 965 | "definition": "", 966 | "error": { 967 | "message": "Datasource $datasource was not found" 968 | }, 969 | "hide": 0, 970 | "includeAll": true, 971 | "label": "Instance", 972 | "multi": true, 973 | "name": "instance", 974 | "options": [], 975 | "query": "engine_daemon_engine_info", 976 | "refresh": 1, 977 | "regex": "/instance=\"([^\"]+)\"/", 978 | "skipUrlSync": false, 979 | "sort": 0, 980 | "type": "query", 981 | "useTags": false 982 | } 983 | ] 984 | }, 985 | "time": { 986 | "from": "now-1h", 987 | "to": "now" 988 | }, 989 | "timepicker": { 990 | "refresh_intervals": [ 991 | "5s", 992 | "10s", 993 | "30s", 994 | "1m", 995 | "5m", 996 | "15m", 997 | "30m", 998 | "1h", 999 | "2h", 1000 | "1d" 1001 | ], 1002 | "time_options": [ 1003 | "5m", 1004 | "15m", 1005 | "1h", 1006 | "6h", 1007 | "12h", 1008 | "24h", 1009 | "2d", 1010 | "7d", 1011 | "30d" 1012 | ] 1013 | }, 1014 | "timezone": "browser", 1015 | "title": "Docker Engine Metrics", 1016 | "uid": "ngW7QSi4k", 1017 | "version": 1, 1018 | "weekStart": "" 1019 | } -------------------------------------------------------------------------------- /deployment/grafana/provisioning/alerting/alerts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | groups: 3 | - orgId: 1 4 | name: pollers 5 | folder: alerts 6 | interval: 10s 7 | rules: 8 | - uid: f534a4db-b7fe-4959-b74b-9c3a4522ff7a 9 | title: No Activity Polls For Workflow 10 | condition: F 11 | data: 12 | - refId: A 13 | relativeTimeRange: 14 | from: 300 15 | to: 0 16 | datasourceUid: P7AC1CE4626B85CB6 17 | model: 18 | datasource: 19 | type: prometheus 20 | uid: P7AC1CE4626B85CB6 21 | editorMode: code 22 | expr: sum(rate(service_pending_requests{namespace="default", operation="PollActivityTaskQueue", service_name="frontend"}[1m])) or vector(0) 23 | hide: false 24 | intervalMs: 1000 25 | legendFormat: __auto 26 | maxDataPoints: 43200 27 | range: true 28 | refId: A 29 | - refId: D 30 | relativeTimeRange: 31 | from: 600 32 | to: 0 33 | datasourceUid: P7AC1CE4626B85CB6 34 | model: 35 | datasource: 36 | type: prometheus 37 | uid: P7AC1CE4626B85CB6 38 | editorMode: code 39 | expr: sum(rate(service_requests{operation="StartWorkflowExecution"}[1m])) by (namespace) 40 | hide: false 41 | intervalMs: 1000 42 | legendFormat: __auto 43 | maxDataPoints: 43200 44 | range: true 45 | refId: D 46 | - refId: B 47 | relativeTimeRange: 48 | from: 300 49 | to: 0 50 | datasourceUid: __expr__ 51 | model: 52 | conditions: 53 | - evaluator: 54 | params: [] 55 | type: gt 56 | operator: 57 | type: and 58 | query: 59 | params: 60 | - B 61 | reducer: 62 | params: [] 63 | type: last 64 | type: query 65 | datasource: 66 | type: __expr__ 67 | uid: __expr__ 68 | expression: A 69 | hide: false 70 | intervalMs: 1000 71 | maxDataPoints: 43200 72 | reducer: last 73 | refId: B 74 | settings: 75 | mode: dropNN 76 | type: reduce 77 | - refId: C 78 | relativeTimeRange: 79 | from: 600 80 | to: 0 81 | datasourceUid: __expr__ 82 | model: 83 | conditions: 84 | - evaluator: 85 | params: 86 | - 0 87 | - 0 88 | type: gt 89 | operator: 90 | type: and 91 | query: 92 | params: [] 93 | reducer: 94 | params: [] 95 | type: avg 96 | type: query 97 | datasource: 98 | name: Expression 99 | type: __expr__ 100 | uid: __expr__ 101 | expression: D 102 | intervalMs: 1000 103 | maxDataPoints: 43200 104 | reducer: last 105 | refId: C 106 | settings: 107 | mode: dropNN 108 | type: reduce 109 | - refId: E 110 | datasourceUid: __expr__ 111 | model: 112 | conditions: 113 | - evaluator: 114 | params: 115 | - 0 116 | - 0 117 | type: gt 118 | operator: 119 | type: and 120 | query: 121 | params: [] 122 | reducer: 123 | params: [] 124 | type: avg 125 | type: query 126 | datasource: 127 | name: Expression 128 | type: __expr__ 129 | uid: __expr__ 130 | expression: C 131 | intervalMs: 1000 132 | maxDataPoints: 43200 133 | refId: E 134 | type: threshold 135 | - refId: F 136 | datasourceUid: __expr__ 137 | model: 138 | conditions: 139 | - evaluator: 140 | params: 141 | - 1e-05 142 | - 0 143 | type: lt 144 | operator: 145 | type: and 146 | query: 147 | params: [] 148 | reducer: 149 | params: [] 150 | type: avg 151 | type: query 152 | datasource: 153 | name: Expression 154 | type: __expr__ 155 | uid: __expr__ 156 | expression: B 157 | intervalMs: 1000 158 | maxDataPoints: 43200 159 | refId: F 160 | type: threshold 161 | dashboardUid: bc1d1f40-3475-4a46-b0a1-e319f7073040 162 | panelId: 17 163 | noDataState: OK 164 | execErrState: OK 165 | for: 30s 166 | annotations: 167 | __dashboardUid__: bc1d1f40-3475-4a46-b0a1-e319f7073040 168 | __panelId__: "17" 169 | isPaused: false 170 | - orgId: 1 171 | name: resourceexhausted 172 | folder: alerts 173 | interval: 10s 174 | rules: 175 | - uid: b8122a57-da7e-4910-ac42-0d58ba43c97f 176 | title: Activity Respond Message Size 177 | condition: C 178 | data: 179 | - refId: A 180 | queryType: range 181 | relativeTimeRange: 182 | from: 300 183 | to: 0 184 | datasourceUid: P8E80F9AEF21F6940 185 | model: 186 | datasource: 187 | type: loki 188 | uid: P8E80F9AEF21F6940 189 | editorMode: builder 190 | expr: rate({compose_service="replaydemo-worker"} |= `received message larger than max` [1m]) 191 | hide: false 192 | intervalMs: 1000 193 | maxDataPoints: 43200 194 | queryType: range 195 | refId: A 196 | - refId: B 197 | relativeTimeRange: 198 | from: 300 199 | to: 0 200 | datasourceUid: __expr__ 201 | model: 202 | conditions: 203 | - evaluator: 204 | params: [] 205 | type: gt 206 | operator: 207 | type: and 208 | query: 209 | params: 210 | - B 211 | reducer: 212 | params: [] 213 | type: last 214 | type: query 215 | datasource: 216 | type: __expr__ 217 | uid: __expr__ 218 | expression: A 219 | hide: false 220 | intervalMs: 1000 221 | maxDataPoints: 43200 222 | reducer: last 223 | refId: B 224 | settings: 225 | mode: dropNN 226 | type: reduce 227 | - refId: C 228 | relativeTimeRange: 229 | from: 300 230 | to: 0 231 | datasourceUid: __expr__ 232 | model: 233 | conditions: 234 | - evaluator: 235 | params: 236 | - 0 237 | type: gt 238 | operator: 239 | type: and 240 | query: 241 | params: 242 | - C 243 | reducer: 244 | params: [] 245 | type: last 246 | type: query 247 | datasource: 248 | type: __expr__ 249 | uid: __expr__ 250 | expression: B 251 | hide: false 252 | intervalMs: 1000 253 | maxDataPoints: 43200 254 | refId: C 255 | type: threshold 256 | noDataState: NoData 257 | execErrState: Error 258 | for: 30s 259 | isPaused: false 260 | - orgId: 1 261 | name: wf failures 262 | folder: alerts 263 | interval: 10s 264 | rules: 265 | - uid: f594fced-fc48-43d4-bafe-d12cae8a6792 266 | title: Workflow Failures 267 | condition: C 268 | data: 269 | - refId: A 270 | relativeTimeRange: 271 | from: 300 272 | to: 0 273 | datasourceUid: P7AC1CE4626B85CB6 274 | model: 275 | datasource: 276 | type: prometheus 277 | uid: P7AC1CE4626B85CB6 278 | editorMode: code 279 | expr: sum(avg_over_time(temporal_workflow_failed_total[1m])) by(namespace) 280 | hide: false 281 | intervalMs: 1000 282 | legendFormat: __auto 283 | maxDataPoints: 43200 284 | range: true 285 | refId: A 286 | - refId: B 287 | relativeTimeRange: 288 | from: 300 289 | to: 0 290 | datasourceUid: __expr__ 291 | model: 292 | conditions: 293 | - evaluator: 294 | params: [] 295 | type: gt 296 | operator: 297 | type: and 298 | query: 299 | params: 300 | - B 301 | reducer: 302 | params: [] 303 | type: last 304 | type: query 305 | datasource: 306 | type: __expr__ 307 | uid: __expr__ 308 | expression: A 309 | hide: false 310 | intervalMs: 1000 311 | maxDataPoints: 43200 312 | reducer: max 313 | refId: B 314 | settings: 315 | mode: dropNN 316 | type: reduce 317 | - refId: C 318 | relativeTimeRange: 319 | from: 300 320 | to: 0 321 | datasourceUid: __expr__ 322 | model: 323 | conditions: 324 | - evaluator: 325 | params: 326 | - 0 327 | type: gt 328 | operator: 329 | type: and 330 | query: 331 | params: 332 | - C 333 | reducer: 334 | params: [] 335 | type: last 336 | type: query 337 | datasource: 338 | type: __expr__ 339 | uid: __expr__ 340 | expression: B 341 | hide: false 342 | intervalMs: 1000 343 | maxDataPoints: 43200 344 | refId: C 345 | type: threshold 346 | dashboardUid: bc1d1f40-3475-4a46-b0a1-e319f7073040 347 | panelId: 13 348 | noDataState: OK 349 | execErrState: OK 350 | for: 30s 351 | annotations: 352 | __dashboardUid__: bc1d1f40-3475-4a46-b0a1-e319f7073040 353 | __panelId__: "13" 354 | isPaused: false 355 | 356 | #deleteContactPoints: 357 | # - orgId: 1 358 | # name: "grafana-default-email" 359 | 360 | contactPoints: 361 | # organization ID, default = 1 362 | - orgId: 1 363 | # name of the contact point 364 | name: default-webhook 365 | receivers: 366 | # unique identifier for the receiver 367 | - uid: notifier1 368 | # type of the receiver 369 | type: webhook 370 | # Disable the additional [Incident Resolved] follow-up alert, default = false 371 | disableResolveMessage: false 372 | # settings for the specific receiver type 373 | settings: 374 | url: http://host.docker.internal:9995/webhook 375 | httpMethod: POST 376 | username: 377 | password: 378 | authorization_scheme: 379 | authorization_credentials: 380 | maxAlerts: '1000' 381 | 382 | policies: 383 | # organization ID, default = 1 384 | - orgId: 1 385 | # name of the contact point that should be used for this route 386 | receiver: default-webhook -------------------------------------------------------------------------------- /deployment/grafana/provisioning/dashboards/all.yml: -------------------------------------------------------------------------------- 1 | - name: 'default' 2 | org_id: 1 3 | folder: '' 4 | type: 'file' 5 | options: 6 | folder: '/var/lib/grafana/dashboards' -------------------------------------------------------------------------------- /deployment/grafana/provisioning/datasources/all.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: 'Temporal Prometheus' 5 | type: 'prometheus' 6 | org_id: 1 7 | url: 'http://prometheus:9090' 8 | is_default: true 9 | version: 1 10 | editable: true 11 | - name: 'Loki' 12 | type: loki 13 | access: proxy 14 | url: 'http://loki:3100' 15 | jsonData: 16 | maxLines: 1000 -------------------------------------------------------------------------------- /deployment/haproxy/haproxy.cfg: -------------------------------------------------------------------------------- 1 | global 2 | log stdout format raw local0 3 | maxconn 50000 4 | 5 | defaults 6 | timeout connect 10000ms 7 | timeout client 75s 8 | timeout server 75s 9 | timeout http-request 75s 10 | mode http 11 | maxconn 3000 12 | 13 | frontend stats 14 | bind *:8404 15 | http-request use-service prometheus-exporter if { path /metrics } 16 | stats enable 17 | stats uri /stats 18 | stats refresh 10s 19 | 20 | frontend www 21 | mode http 22 | bind :7233 proto h2 23 | default_backend servers 24 | 25 | backend servers 26 | mode http 27 | balance roundrobin 28 | server f1 temporal-frontend:7237 proto h2 29 | server f2 temporal-frontend2:7236 proto h2 30 | 31 | -------------------------------------------------------------------------------- /deployment/loki/local-config.yaml: -------------------------------------------------------------------------------- 1 | auth_enabled: false 2 | 3 | server: 4 | http_listen_port: 3100 5 | log_level: error 6 | 7 | ingester: 8 | wal: 9 | dir: "/tmp/wal" 10 | lifecycler: 11 | address: 127.0.0.1 12 | ring: 13 | kvstore: 14 | store: inmemory 15 | replication_factor: 1 16 | final_sleep: 0s 17 | chunk_idle_period: 5m 18 | chunk_retain_period: 30s 19 | 20 | schema_config: 21 | configs: 22 | - from: 2022-11-20 23 | store: boltdb 24 | object_store: filesystem 25 | schema: v9 26 | index: 27 | prefix: index_ 28 | period: 168h 29 | 30 | storage_config: 31 | boltdb: 32 | directory: /tmp/loki/index 33 | 34 | filesystem: 35 | directory: /tmp/loki/chunks 36 | 37 | limits_config: 38 | # enforce_metric_name: false 39 | reject_old_samples: true 40 | reject_old_samples_max_age: 168h 41 | allow_structured_metadata: false 42 | 43 | chunk_store_config: 44 | # max_look_back_period: 0 45 | 46 | table_manager: 47 | chunk_tables_provisioning: 48 | inactive_read_throughput: 0 49 | inactive_write_throughput: 0 50 | provisioned_read_throughput: 0 51 | provisioned_write_throughput: 0 52 | index_tables_provisioning: 53 | inactive_read_throughput: 0 54 | inactive_write_throughput: 0 55 | provisioned_read_throughput: 0 56 | provisioned_write_throughput: 0 57 | retention_deletes_enabled: false 58 | retention_period: 0 -------------------------------------------------------------------------------- /deployment/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | 3 | events { 4 | worker_connections 2048; 5 | } 6 | 7 | http { 8 | grpc_connect_timeout 75; 9 | proxy_read_timeout 1d; 10 | proxy_connect_timeout 1d; 11 | proxy_send_timeout 1d; 12 | 13 | upstream frontend_hosts { 14 | server temporal-frontend:7237 max_fails=0; 15 | server temporal-frontend2:7236 max_fails=0; 16 | 17 | keepalive 64; 18 | keepalive_time 1d; 19 | keepalive_timeout 75s; 20 | keepalive_requests 100000; 21 | } 22 | 23 | server { 24 | listen 7233 http2; 25 | location / { 26 | grpc_pass grpc://frontend_hosts; 27 | proxy_set_header Connection ""; 28 | proxy_http_version 1.1; 29 | } 30 | 31 | # Standard HTTP-to-gRPC status code mappings 32 | # Ref: https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md 33 | error_page 400 = @grpc_internal; 34 | error_page 401 = @grpc_unauthenticated; 35 | error_page 403 = @grpc_permission_denied; 36 | error_page 404 = @grpc_unimplemented; 37 | error_page 429 = @grpc_unavailable; 38 | error_page 502 = @grpc_unavailable; 39 | error_page 503 = @grpc_unavailable; 40 | error_page 504 = @grpc_unavailable; 41 | # NGINX-to-gRPC status code mappings 42 | # Ref: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md 43 | # 44 | error_page 405 = @grpc_internal; # Method not allowed 45 | error_page 408 = @grpc_deadline_exceeded; # Request timeout 46 | error_page 413 = @grpc_resource_exhausted; # Payload too large 47 | error_page 414 = @grpc_resource_exhausted; # Request URI too large 48 | error_page 415 = @grpc_internal; # Unsupported media type; 49 | error_page 426 = @grpc_internal; # HTTP request was sent to HTTPS port 50 | error_page 495 = @grpc_unauthenticated; # Client certificate authentication error 51 | error_page 496 = @grpc_unauthenticated; # Client certificate not presented 52 | error_page 497 = @grpc_internal; # HTTP request was sent to mutual TLS port 53 | error_page 500 = @grpc_internal; # Server error 54 | error_page 501 = @grpc_internal; # Not implemented 55 | # gRPC error responses 56 | # Ref: https://github.com/grpc/grpc-go/blob/master/codes/codes.go 57 | # 58 | location @grpc_deadline_exceeded { 59 | add_header grpc-status 4; 60 | add_header grpc-message 'deadline exceeded'; 61 | return 204; 62 | } 63 | location @grpc_permission_denied { 64 | add_header grpc-status 7; 65 | add_header grpc-message 'permission denied'; 66 | return 204; 67 | } 68 | location @grpc_resource_exhausted { 69 | add_header grpc-status 8; 70 | add_header grpc-message 'resource exhausted'; 71 | return 204; 72 | } 73 | location @grpc_unimplemented { 74 | add_header grpc-status 12; 75 | add_header grpc-message unimplemented; 76 | return 204; 77 | } 78 | location @grpc_internal { 79 | add_header grpc-status 13; 80 | add_header grpc-message 'internal error'; 81 | return 204; 82 | } 83 | location @grpc_unavailable { 84 | add_header grpc-status 14; 85 | add_header grpc-message unavailable; 86 | return 204; 87 | } 88 | location @grpc_unauthenticated { 89 | add_header grpc-status 16; 90 | add_header grpc-message unauthenticated; 91 | return 204; 92 | } 93 | default_type application/grpc; # Ensure gRPC for all error responses 94 | } 95 | } -------------------------------------------------------------------------------- /deployment/otel/otel-config.yaml: -------------------------------------------------------------------------------- 1 | extensions: 2 | memory_ballast: 3 | size_mib: 512 4 | zpages: 5 | endpoint: 0.0.0.0:55679 6 | 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: 11 | http: 12 | processors: 13 | batch: 14 | 15 | exporters: 16 | logging: 17 | logLevel: debug 18 | jaeger: 19 | endpoint: jaeger-all-in-one:14250 20 | tls: 21 | insecure: true 22 | prometheus: 23 | endpoint: "0.0.0.0:5575" 24 | service: 25 | pipelines: 26 | traces: 27 | receivers: [ otlp ] 28 | processors: [ batch ] 29 | exporters: [ jaeger ] 30 | metrics: 31 | receivers: [otlp] 32 | processors: [batch] 33 | exporters: [prometheus] 34 | extensions: [ memory_ballast, zpages ] -------------------------------------------------------------------------------- /deployment/prometheus/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 10s 3 | external_labels: 4 | cluster: "abc" 5 | scrape_configs: 6 | - job_name: 'temporalmetrics' 7 | metrics_path: /metrics 8 | scheme: http 9 | static_configs: 10 | # Server metrics target 11 | - targets: 12 | - 'host.docker.internal:8000' 13 | - 'host.docker.internal:8001' 14 | - 'host.docker.internal:8002' 15 | - 'host.docker.internal:8003' 16 | - 'host.docker.internal:8004' 17 | - 'host.docker.internal:8005' 18 | - 'host.docker.internal:8006' 19 | - 'host.docker.internal:8007' 20 | labels: 21 | group: 'server-metrics' 22 | 23 | # Local app targets (if configured) 24 | - targets: 25 | - 'host.docker.internal:8077' 26 | - 'host.docker.internal:8078' 27 | labels: 28 | group: 'sdk-metrics' 29 | # Docker metrics 30 | - targets: 31 | - 'host.docker.internal:9323' 32 | labels: 33 | group: 'docker-metrics' 34 | # Postgres Exporter 35 | - targets: 36 | - 'host.docker.internal:9187' 37 | labels: 38 | group: 'postgres-metrics' 39 | # HAProxy Metrics 40 | - targets: 41 | - 'host.docker.internal:8404' 42 | labels: 43 | group: 'haproxy-metrics' 44 | # Envoy Metrics 45 | - targets: 46 | - 'host.docker.internal:9901' 47 | labels: 48 | group: 'envoy-metrics' 49 | # this show how to drop certain metrics, for example here all metrics for temporal_system namespace 50 | metric_relabel_configs: 51 | - source_labels: [ namespace ] 52 | regex: "temporal_system" 53 | action: drop 54 | - job_name: 'springbootjavasdk' 55 | metrics_path: /actuator/prometheus 56 | scheme: http 57 | static_configs: 58 | - targets: 59 | - 'host.docker.internal:3030' 60 | - 'host.docker.internal:19994' 61 | - 'host.docker.internal:19995' 62 | - 'host.docker.internal:19996' 63 | - 'host.docker.internal:19997' 64 | - 'host.docker.internal:19998' 65 | - 'host.docker.internal:19999' 66 | labels: 67 | group: 'spring-boot-metrics' 68 | 69 | - job_name: "collector" 70 | static_configs: 71 | - targets: [ 'host.docker.internal:5575' ] 72 | 73 | ## sample when cloud prom endpoint support it 74 | # - job_name: 'temporalcloud' 75 | # metrics_path: /federate 76 | # params: 77 | # 'match[]': 78 | # - '{job!=""}' 79 | # tls_config: 80 | # ca_file: /etc/prometheus/TihoClient.crt 81 | # cert_file: /etc/prometheus/TihoClient.pem 82 | # key_file: /etc/prometheus/TihoClient.key 83 | # insecure_skip_verify: true 84 | # scheme: https 85 | # static_configs: 86 | # - targets: 87 | # - 'a2dd6.tmprl.cloud' 88 | # labels: 89 | # group: 'cloud-metrics' -------------------------------------------------------------------------------- /dynamicconfig/development-c1.yaml: -------------------------------------------------------------------------------- 1 | frontend.globalNamespaceCount: 2 | - value: 20000 3 | frontend.globalNamespaceRPS: 4 | - value: 1600 5 | frontend.globalRPS: 6 | - value: 6000 7 | frontend.persistenceMaxQPS: 8 | - value: 2400 9 | history.persistenceGlobalMaxQPS: 10 | - value: 36000 11 | history.persistenceMaxQPS: 12 | - value: 16384 13 | history.persistencePerShardNamespaceMaxQPS: 14 | - value: 500 15 | matching.numTaskqueueReadPartitions: 16 | - value: 4 17 | matching.numTaskqueueWritePartitions: 18 | - value: 4 19 | matching.persistenceMaxQPS: 20 | - value: 2400 -------------------------------------------------------------------------------- /dynamicconfig/development-c2.yaml: -------------------------------------------------------------------------------- 1 | frontend.globalNamespaceCount: 2 | - value: 20000 3 | frontend.globalNamespaceRPS: 4 | - value: 1600 5 | frontend.globalRPS: 6 | - value: 6000 7 | frontend.persistenceMaxQPS: 8 | - value: 2400 9 | history.persistenceGlobalMaxQPS: 10 | - value: 36000 11 | history.persistenceMaxQPS: 12 | - value: 16384 13 | history.persistencePerShardNamespaceMaxQPS: 14 | - value: 500 15 | matching.numTaskqueueReadPartitions: 16 | - value: 4 17 | matching.numTaskqueueWritePartitions: 18 | - value: 4 19 | matching.persistenceMaxQPS: 20 | - value: 2400 -------------------------------------------------------------------------------- /dynamicconfig/development.yaml: -------------------------------------------------------------------------------- 1 | frontend.globalNamespaceCount: 2 | - value: 20000 3 | frontend.globalNamespaceRPS: 4 | - value: 1600 5 | frontend.globalRPS: 6 | - value: 6000 7 | frontend.persistenceMaxQPS: 8 | - value: 2400 9 | history.persistenceGlobalMaxQPS: 10 | - value: 36000 11 | history.persistenceMaxQPS: 12 | - value: 16384 13 | history.persistencePerShardNamespaceMaxQPS: 14 | - value: 500 15 | matching.numTaskqueueReadPartitions: 16 | - value: 4 17 | matching.numTaskqueueWritePartitions: 18 | - value: 4 19 | matching.persistenceMaxQPS: 20 | - value: 2400 21 | 22 | ## just samples 23 | #frontend.globalNamespaceCount: 24 | # - value: 20000 25 | #frontend.globalNamespaceRPS: 26 | # - value: 1600 27 | #frontend.globalRPS: 28 | # - value: 6000 29 | #frontend.persistenceMaxQPS: 30 | # - value: 2400 31 | #history.persistenceGlobalMaxQPS: 32 | # - value: 36000 33 | #history.persistenceMaxQPS: 34 | # - value: 16384 35 | #history.persistencePerShardNamespaceMaxQPS: 36 | # - value: 500 37 | #matching.numTaskqueueReadPartitions: 38 | # - value: 4 39 | #matching.numTaskqueueWritePartitions: 40 | # - value: 4 41 | #matching.persistenceMaxQPS: 42 | # - value: 2400 43 | #frontend.enableExecuteMultiOperation: 44 | # - value: true 45 | #frontend.namespaceRPS.visibility: 46 | # - value: 20 47 | # constraints: {} 48 | #system.advancedVisibilityWritingMode: 49 | # - value: "on" 50 | # constraints: {} 51 | #system.enableReadVisibilityFromES: 52 | # - value: true 53 | # constraints: {} 54 | #history.defaultWorkflowTaskTimeout: 55 | # - value: 10s 56 | # - value: 12s 57 | # constraints: 58 | # namespace: default 59 | # - value: 13s 60 | # constraints: 61 | # namespace: my-namespace2 62 | #history.persistenceDynamicRateLimitingParams: 63 | # - enabled: true 64 | # - refreshInterval: 5s 65 | #frontend.workerVersioningDataAPIs: 66 | # - value: true 67 | #frontend.workerVersioningWorkflowAPIs: 68 | # - value: true 69 | #frontend.buildIdScavengerEnabled: 70 | # - value: true 71 | #frontend.enableUpdateWorkflowExecution: 72 | # - value: true 73 | #frontend.enableUpdateWorkflowExecutionAsyncAccepted: 74 | # - value: true 75 | # disable incoming calls to mytest ns (just to show that its possible) 76 | #frontend.namespacerps: 77 | # - value: 2400 78 | # - value: 0 79 | # constraints: 80 | # namespace: "mytest" 81 | #frontend.namespaceBurst: 82 | # - value: 4800 83 | # - value: 0 84 | # constraints: 85 | # namespace: "mytest" 86 | 87 | 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /fluentd/Dockerfile: -------------------------------------------------------------------------------- 1 | # Inherit from the official fluentd image. 2 | FROM fluent/fluentd:v1.15-1 3 | 4 | # Install elasticsearch library and fluent plugin. 5 | USER root 6 | RUN apk add --no-cache --update --virtual .build-deps \ 7 | sudo build-base ruby-dev \ 8 | # Pin the elasticsearch-ruby library < 7.14. 9 | # See https://github.com/uken/fluent-plugin-elasticsearch/issues/912 10 | && sudo gem install elasticsearch -v 7.13.3 \ 11 | && sudo gem install fluent-plugin-elasticsearch \ 12 | && sudo gem sources --clear-all \ 13 | && apk del .build-deps \ 14 | && rm -rf /tmp/* /var/tmp/* /usr/lib/ruby/gems/*/cache/*.gem 15 | USER fluent -------------------------------------------------------------------------------- /fluentd/conf/fluent.conf: -------------------------------------------------------------------------------- 1 | 2 | @type forward 3 | port 24224 4 | bind 0.0.0.0 5 | 6 | 7 | @type copy 8 | 9 | @type elasticsearch 10 | host elasticsearch 11 | port 9200 12 | logstash_format true 13 | logstash_prefix fluentd 14 | logstash_dateformat %Y%m%d 15 | include_tag_key true 16 | type_name access_log 17 | tag_key @log_name 18 | flush_interval 1s 19 | 20 | 21 | @type stdout 22 | 23 | -------------------------------------------------------------------------------- /script/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu -o pipefail 4 | 5 | # === Auto setup defaults === 6 | 7 | : "${DB:=cassandra}" 8 | : "${SKIP_SCHEMA_SETUP:=false}" 9 | : "${SKIP_DB_CREATE:=false}" 10 | 11 | # Cassandra 12 | : "${KEYSPACE:=temporal}" 13 | 14 | : "${CASSANDRA_SEEDS:=}" 15 | : "${CASSANDRA_PORT:=9042}" 16 | : "${CASSANDRA_USER:=}" 17 | : "${CASSANDRA_PASSWORD:=}" 18 | : "${CASSANDRA_TLS_ENABLED:=}" 19 | : "${CASSANDRA_CERT:=}" 20 | : "${CASSANDRA_CERT_KEY:=}" 21 | : "${CASSANDRA_CA:=}" 22 | : "${CASSANDRA_REPLICATION_FACTOR:=1}" 23 | 24 | # MySQL/PostgreSQL 25 | : "${DBNAME:=temporal}" 26 | : "${VISIBILITY_DBNAME:=temporal_visibility}" 27 | : "${DB_PORT:=3306}" 28 | 29 | : "${MYSQL_SEEDS:=}" 30 | : "${MYSQL_USER:=}" 31 | : "${MYSQL_PWD:=}" 32 | : "${MYSQL_TX_ISOLATION_COMPAT:=false}" 33 | 34 | : "${POSTGRES_SEEDS:=}" 35 | : "${POSTGRES_USER:=}" 36 | : "${POSTGRES_PWD:=}" 37 | 38 | : "${POSTGRES_TLS_ENABLED:=false}" 39 | : "${POSTGRES_TLS_DISABLE_HOST_VERIFICATION:=false}" 40 | : "${POSTGRES_TLS_CERT_FILE:=}" 41 | : "${POSTGRES_TLS_KEY_FILE:=}" 42 | : "${POSTGRES_TLS_CA_FILE:=}" 43 | : "${POSTGRES_TLS_SERVER_NAME:=}" 44 | 45 | # Elasticsearch 46 | : "${ENABLE_ES:=false}" 47 | : "${ES_SCHEME:=http}" 48 | : "${ES_SEEDS:=}" 49 | : "${ES_PORT:=9200}" 50 | : "${ES_USER:=}" 51 | : "${ES_PWD:=}" 52 | : "${ES_VERSION:=v7}" 53 | : "${ES_VIS_INDEX:=temporal_visibility_v1_dev}" 54 | : "${ES_SEC_VIS_INDEX:=}" 55 | : "${ES_SCHEMA_SETUP_TIMEOUT_IN_SECONDS:=0}" 56 | 57 | # Server setup 58 | : "${TEMPORAL_ADDRESS:=}" 59 | # TEMPORAL_CLI_ADDRESS is deprecated and support for it will be removed in the future release. 60 | : "${TEMPORAL_CLI_ADDRESS:=}" 61 | 62 | : "${SKIP_DEFAULT_NAMESPACE_CREATION:=false}" 63 | : "${DEFAULT_NAMESPACE:=default}" 64 | : "${DEFAULT_NAMESPACE_RETENTION:=24h}" 65 | 66 | : "${SKIP_ADD_CUSTOM_SEARCH_ATTRIBUTES:=false}" 67 | 68 | # === Helper functions === 69 | 70 | die() { 71 | echo "$*" 1>&2 72 | exit 1 73 | } 74 | 75 | # === Main database functions === 76 | 77 | validate_db_env() { 78 | case ${DB} in 79 | mysql8) 80 | if [[ -z ${MYSQL_SEEDS} ]]; then 81 | die "MYSQL_SEEDS env must be set if DB is ${DB}." 82 | fi 83 | ;; 84 | postgres12 | postgres12_pgx) 85 | if [[ -z ${POSTGRES_SEEDS} ]]; then 86 | die "POSTGRES_SEEDS env must be set if DB is ${DB}." 87 | fi 88 | ;; 89 | cassandra) 90 | if [[ -z ${CASSANDRA_SEEDS} ]]; then 91 | die "CASSANDRA_SEEDS env must be set if DB is ${DB}." 92 | fi 93 | ;; 94 | *) 95 | die "Unsupported driver specified: 'DB=${DB}'. Valid drivers are: mysql8, postgres12, postgres12_pgx, cassandra." 96 | ;; 97 | esac 98 | } 99 | 100 | wait_for_cassandra() { 101 | # TODO (alex): Remove exports 102 | export CASSANDRA_USER=${CASSANDRA_USER} 103 | export CASSANDRA_PORT=${CASSANDRA_PORT} 104 | export CASSANDRA_ENABLE_TLS=${CASSANDRA_TLS_ENABLED} 105 | export CASSANDRA_TLS_CERT=${CASSANDRA_CERT} 106 | export CASSANDRA_TLS_KEY=${CASSANDRA_CERT_KEY} 107 | export CASSANDRA_TLS_CA=${CASSANDRA_CA} 108 | 109 | export CASSANDRA_PASSWORD=${CASSANDRA_PASSWORD} 110 | 111 | until temporal-cassandra-tool --ep "${CASSANDRA_SEEDS}" validate-health; do 112 | echo 'Waiting for Cassandra to start up.' 113 | sleep 1 114 | done 115 | echo 'Cassandra started.' 116 | } 117 | 118 | wait_for_mysql() { 119 | until nc -z "${MYSQL_SEEDS%%,*}" "${DB_PORT}"; do 120 | echo 'Waiting for MySQL to start up.' 121 | sleep 1 122 | done 123 | 124 | echo 'MySQL started.' 125 | } 126 | 127 | wait_for_postgres() { 128 | until nc -z "${POSTGRES_SEEDS%%,*}" "${DB_PORT}"; do 129 | echo 'Waiting for PostgreSQL to startup.' 130 | sleep 1 131 | done 132 | 133 | echo 'PostgreSQL started.' 134 | } 135 | 136 | wait_for_db() { 137 | case ${DB} in 138 | mysql8) 139 | wait_for_mysql 140 | ;; 141 | postgres12 | postgres12_pgx) 142 | wait_for_postgres 143 | ;; 144 | cassandra) 145 | wait_for_cassandra 146 | ;; 147 | *) 148 | die "Unsupported DB type: ${DB}." 149 | ;; 150 | esac 151 | } 152 | 153 | setup_cassandra_schema() { 154 | # TODO (alex): Remove exports 155 | export CASSANDRA_USER=${CASSANDRA_USER} 156 | export CASSANDRA_PORT=${CASSANDRA_PORT} 157 | export CASSANDRA_ENABLE_TLS=${CASSANDRA_TLS_ENABLED} 158 | export CASSANDRA_TLS_CERT=${CASSANDRA_CERT} 159 | export CASSANDRA_TLS_KEY=${CASSANDRA_CERT_KEY} 160 | export CASSANDRA_TLS_CA=${CASSANDRA_CA} 161 | 162 | export CASSANDRA_PASSWORD=${CASSANDRA_PASSWORD} 163 | 164 | SCHEMA_DIR=${TEMPORAL_HOME}/schema/cassandra/temporal/versioned 165 | if [[ ${SKIP_DB_CREATE} != true ]]; then 166 | temporal-cassandra-tool --ep "${CASSANDRA_SEEDS}" create -k "${KEYSPACE}" --rf "${CASSANDRA_REPLICATION_FACTOR}" 167 | fi 168 | temporal-cassandra-tool --ep "${CASSANDRA_SEEDS}" -k "${KEYSPACE}" setup-schema -v 0.0 169 | temporal-cassandra-tool --ep "${CASSANDRA_SEEDS}" -k "${KEYSPACE}" update-schema -d "${SCHEMA_DIR}" 170 | } 171 | 172 | setup_mysql_schema() { 173 | # TODO (alex): Remove exports 174 | export SQL_PASSWORD=${MYSQL_PWD} 175 | 176 | if [[ ${MYSQL_TX_ISOLATION_COMPAT} == true ]]; then 177 | MYSQL_CONNECT_ATTR=(--connect-attributes "tx_isolation=READ-COMMITTED") 178 | else 179 | MYSQL_CONNECT_ATTR=() 180 | fi 181 | 182 | MYSQL_VERSION_DIR=v8 183 | SCHEMA_DIR=${TEMPORAL_HOME}/schema/mysql/${MYSQL_VERSION_DIR}/temporal/versioned 184 | if [[ ${SKIP_DB_CREATE} != true ]]; then 185 | temporal-sql-tool --plugin "${DB}" --ep "${MYSQL_SEEDS}" -u "${MYSQL_USER}" -p "${DB_PORT}" "${MYSQL_CONNECT_ATTR[@]}" --db "${DBNAME}" create 186 | fi 187 | temporal-sql-tool --plugin "${DB}" --ep "${MYSQL_SEEDS}" -u "${MYSQL_USER}" -p "${DB_PORT}" "${MYSQL_CONNECT_ATTR[@]}" --db "${DBNAME}" setup-schema -v 0.0 188 | temporal-sql-tool --plugin "${DB}" --ep "${MYSQL_SEEDS}" -u "${MYSQL_USER}" -p "${DB_PORT}" "${MYSQL_CONNECT_ATTR[@]}" --db "${DBNAME}" update-schema -d "${SCHEMA_DIR}" 189 | 190 | # Only setup visibility schema if ES is not enabled 191 | if [[ ${ENABLE_ES} == false ]]; then 192 | VISIBILITY_SCHEMA_DIR=${TEMPORAL_HOME}/schema/mysql/${MYSQL_VERSION_DIR}/visibility/versioned 193 | if [[ ${SKIP_DB_CREATE} != true ]]; then 194 | temporal-sql-tool --plugin "${DB}" --ep "${MYSQL_SEEDS}" -u "${MYSQL_USER}" -p "${DB_PORT}" "${MYSQL_CONNECT_ATTR[@]}" --db "${VISIBILITY_DBNAME}" create 195 | fi 196 | temporal-sql-tool --plugin "${DB}" --ep "${MYSQL_SEEDS}" -u "${MYSQL_USER}" -p "${DB_PORT}" "${MYSQL_CONNECT_ATTR[@]}" --db "${VISIBILITY_DBNAME}" setup-schema -v 0.0 197 | temporal-sql-tool --plugin "${DB}" --ep "${MYSQL_SEEDS}" -u "${MYSQL_USER}" -p "${DB_PORT}" "${MYSQL_CONNECT_ATTR[@]}" --db "${VISIBILITY_DBNAME}" update-schema -d "${VISIBILITY_SCHEMA_DIR}" 198 | fi 199 | } 200 | 201 | setup_postgres_schema() { 202 | # TODO (alex): Remove exports 203 | export SQL_PASSWORD=${POSTGRES_PWD} 204 | 205 | POSTGRES_VERSION_DIR=v12 206 | SCHEMA_DIR=${TEMPORAL_HOME}/schema/postgresql/${POSTGRES_VERSION_DIR}/temporal/versioned 207 | # Create database only if its name is different from the user name. Otherwise PostgreSQL container itself will create database. 208 | if [[ ${DBNAME} != "${POSTGRES_USER}" && ${SKIP_DB_CREATE} != true ]]; then 209 | temporal-sql-tool \ 210 | --plugin ${DB} \ 211 | --ep "${POSTGRES_SEEDS}" \ 212 | -u "${POSTGRES_USER}" \ 213 | -p "${DB_PORT}" \ 214 | --db "${DBNAME}" \ 215 | --tls="${POSTGRES_TLS_ENABLED}" \ 216 | --tls-disable-host-verification="${POSTGRES_TLS_DISABLE_HOST_VERIFICATION}" \ 217 | --tls-cert-file "${POSTGRES_TLS_CERT_FILE}" \ 218 | --tls-key-file "${POSTGRES_TLS_KEY_FILE}" \ 219 | --tls-ca-file "${POSTGRES_TLS_CA_FILE}" \ 220 | --tls-server-name "${POSTGRES_TLS_SERVER_NAME}" \ 221 | create 222 | fi 223 | temporal-sql-tool \ 224 | --plugin ${DB} \ 225 | --ep "${POSTGRES_SEEDS}" \ 226 | -u "${POSTGRES_USER}" \ 227 | -p "${DB_PORT}" \ 228 | --db "${DBNAME}" \ 229 | --tls="${POSTGRES_TLS_ENABLED}" \ 230 | --tls-disable-host-verification="${POSTGRES_TLS_DISABLE_HOST_VERIFICATION}" \ 231 | --tls-cert-file "${POSTGRES_TLS_CERT_FILE}" \ 232 | --tls-key-file "${POSTGRES_TLS_KEY_FILE}" \ 233 | --tls-ca-file "${POSTGRES_TLS_CA_FILE}" \ 234 | --tls-server-name "${POSTGRES_TLS_SERVER_NAME}" \ 235 | setup-schema -v 0.0 236 | temporal-sql-tool \ 237 | --plugin ${DB} \ 238 | --ep "${POSTGRES_SEEDS}" \ 239 | -u "${POSTGRES_USER}" \ 240 | -p "${DB_PORT}" \ 241 | --db "${DBNAME}" \ 242 | --tls="${POSTGRES_TLS_ENABLED}" \ 243 | --tls-disable-host-verification="${POSTGRES_TLS_DISABLE_HOST_VERIFICATION}" \ 244 | --tls-cert-file "${POSTGRES_TLS_CERT_FILE}" \ 245 | --tls-key-file "${POSTGRES_TLS_KEY_FILE}" \ 246 | --tls-ca-file "${POSTGRES_TLS_CA_FILE}" \ 247 | --tls-server-name "${POSTGRES_TLS_SERVER_NAME}" \ 248 | update-schema -d "${SCHEMA_DIR}" 249 | 250 | # Only setup visibility schema if ES is not enabled 251 | if [[ ${ENABLE_ES} == false ]]; then 252 | VISIBILITY_SCHEMA_DIR=${TEMPORAL_HOME}/schema/postgresql/${POSTGRES_VERSION_DIR}/visibility/versioned 253 | if [[ ${VISIBILITY_DBNAME} != "${POSTGRES_USER}" && ${SKIP_DB_CREATE} != true ]]; then 254 | temporal-sql-tool \ 255 | --plugin ${DB} \ 256 | --ep "${POSTGRES_SEEDS}" \ 257 | -u "${POSTGRES_USER}" \ 258 | -p "${DB_PORT}" \ 259 | --db "${VISIBILITY_DBNAME}" \ 260 | --tls="${POSTGRES_TLS_ENABLED}" \ 261 | --tls-disable-host-verification="${POSTGRES_TLS_DISABLE_HOST_VERIFICATION}" \ 262 | --tls-cert-file "${POSTGRES_TLS_CERT_FILE}" \ 263 | --tls-key-file "${POSTGRES_TLS_KEY_FILE}" \ 264 | --tls-ca-file "${POSTGRES_TLS_CA_FILE}" \ 265 | --tls-server-name "${POSTGRES_TLS_SERVER_NAME}" \ 266 | create 267 | fi 268 | temporal-sql-tool \ 269 | --plugin ${DB} \ 270 | --ep "${POSTGRES_SEEDS}" \ 271 | -u "${POSTGRES_USER}" \ 272 | -p "${DB_PORT}" \ 273 | --db "${VISIBILITY_DBNAME}" \ 274 | --tls="${POSTGRES_TLS_ENABLED}" \ 275 | --tls-disable-host-verification="${POSTGRES_TLS_DISABLE_HOST_VERIFICATION}" \ 276 | --tls-cert-file "${POSTGRES_TLS_CERT_FILE}" \ 277 | --tls-key-file "${POSTGRES_TLS_KEY_FILE}" \ 278 | --tls-ca-file "${POSTGRES_TLS_CA_FILE}" \ 279 | --tls-server-name "${POSTGRES_TLS_SERVER_NAME}" \ 280 | setup-schema -v 0.0 281 | temporal-sql-tool \ 282 | --plugin ${DB} \ 283 | --ep "${POSTGRES_SEEDS}" \ 284 | -u "${POSTGRES_USER}" \ 285 | -p "${DB_PORT}" \ 286 | --db "${VISIBILITY_DBNAME}" \ 287 | --tls="${POSTGRES_TLS_ENABLED}" \ 288 | --tls-disable-host-verification="${POSTGRES_TLS_DISABLE_HOST_VERIFICATION}" \ 289 | --tls-cert-file "${POSTGRES_TLS_CERT_FILE}" \ 290 | --tls-key-file "${POSTGRES_TLS_KEY_FILE}" \ 291 | --tls-ca-file "${POSTGRES_TLS_CA_FILE}" \ 292 | --tls-server-name "${POSTGRES_TLS_SERVER_NAME}" \ 293 | update-schema -d "${VISIBILITY_SCHEMA_DIR}" 294 | fi 295 | } 296 | 297 | setup_schema() { 298 | case ${DB} in 299 | mysql8) 300 | echo 'Setup MySQL schema.' 301 | setup_mysql_schema 302 | ;; 303 | postgres12 | postgres12_pgx) 304 | echo 'Setup PostgreSQL schema.' 305 | setup_postgres_schema 306 | ;; 307 | cassandra) 308 | echo 'Setup Cassandra schema.' 309 | setup_cassandra_schema 310 | ;; 311 | *) 312 | die "Unsupported DB type: ${DB}." 313 | ;; 314 | esac 315 | } 316 | 317 | # === Elasticsearch functions === 318 | 319 | validate_es_env() { 320 | if [[ ${ENABLE_ES} == true ]]; then 321 | if [[ -z ${ES_SEEDS} ]]; then 322 | die "ES_SEEDS env must be set if ENABLE_ES is ${ENABLE_ES}" 323 | fi 324 | fi 325 | } 326 | 327 | wait_for_es() { 328 | SECONDS=0 329 | 330 | ES_SERVER="${ES_SCHEME}://${ES_SEEDS%%,*}:${ES_PORT}" 331 | 332 | until curl --silent --fail --user "${ES_USER}":"${ES_PWD}" "${ES_SERVER}" >& /dev/null; do 333 | DURATION=${SECONDS} 334 | 335 | if [[ ${ES_SCHEMA_SETUP_TIMEOUT_IN_SECONDS} -gt 0 && ${DURATION} -ge "${ES_SCHEMA_SETUP_TIMEOUT_IN_SECONDS}" ]]; then 336 | echo 'WARNING: timed out waiting for Elasticsearch to start up. Skipping index creation.' 337 | return; 338 | fi 339 | 340 | echo 'Waiting for Elasticsearch to start up.' 341 | sleep 1 342 | done 343 | 344 | echo 'Elasticsearch started.' 345 | } 346 | 347 | setup_es_index() { 348 | ES_SERVER="${ES_SCHEME}://${ES_SEEDS%%,*}:${ES_PORT}" 349 | # @@@SNIPSTART setup-es-template-commands 350 | # ES_SERVER is the URL of Elasticsearch server i.e. "http://localhost:9200". 351 | SETTINGS_URL="${ES_SERVER}/_cluster/settings" 352 | SETTINGS_FILE=${TEMPORAL_HOME}/schema/elasticsearch/visibility/cluster_settings_${ES_VERSION}.json 353 | TEMPLATE_URL="${ES_SERVER}/_template/temporal_visibility_v1_template" 354 | SCHEMA_FILE=${TEMPORAL_HOME}/schema/elasticsearch/visibility/index_template_${ES_VERSION}.json 355 | INDEX_URL="${ES_SERVER}/${ES_VIS_INDEX}" 356 | curl --fail --user "${ES_USER}":"${ES_PWD}" -X PUT "${SETTINGS_URL}" -H "Content-Type: application/json" --data-binary "@${SETTINGS_FILE}" --write-out "\n" 357 | curl --fail --user "${ES_USER}":"${ES_PWD}" -X PUT "${TEMPLATE_URL}" -H 'Content-Type: application/json' --data-binary "@${SCHEMA_FILE}" --write-out "\n" 358 | curl --user "${ES_USER}":"${ES_PWD}" -X PUT "${INDEX_URL}" --write-out "\n" 359 | if [[ -n "${ES_SEC_VIS_INDEX}" ]]; then 360 | SEC_INDEX_URL="${ES_SERVER}/${ES_SEC_VIS_INDEX}" 361 | curl --user "${ES_USER}":"${ES_PWD}" -X PUT "${SEC_INDEX_URL}" --write-out "\n" 362 | fi 363 | # @@@SNIPEND 364 | } 365 | 366 | # === Server setup === 367 | 368 | register_default_namespace() { 369 | echo "Registering default namespace: ${DEFAULT_NAMESPACE}." 370 | if ! temporal operator namespace describe "${DEFAULT_NAMESPACE}"; then 371 | echo "Default namespace ${DEFAULT_NAMESPACE} not found. Creating..." 372 | temporal operator namespace create --retention "${DEFAULT_NAMESPACE_RETENTION}" --description "Default namespace for Temporal Server." "${DEFAULT_NAMESPACE}" 373 | echo "Default namespace ${DEFAULT_NAMESPACE} registration complete." 374 | else 375 | echo "Default namespace ${DEFAULT_NAMESPACE} already registered." 376 | fi 377 | } 378 | 379 | add_custom_search_attributes() { 380 | until temporal operator search-attribute list --namespace "${DEFAULT_NAMESPACE}"; do 381 | echo "Waiting for namespace cache to refresh..." 382 | sleep 1 383 | done 384 | echo "Namespace cache refreshed." 385 | 386 | echo "Adding Custom*Field search attributes." 387 | # TODO: Remove CustomStringField 388 | # @@@SNIPSTART add-custom-search-attributes-for-testing-command 389 | temporal operator search-attribute create --namespace "${DEFAULT_NAMESPACE}" \ 390 | --name CustomKeywordField --type Keyword \ 391 | --name CustomStringField --type Text \ 392 | --name CustomTextField --type Text \ 393 | --name CustomIntField --type Int \ 394 | --name CustomDatetimeField --type Datetime \ 395 | --name CustomDoubleField --type Double \ 396 | --name CustomBoolField --type Bool 397 | # @@@SNIPEND 398 | } 399 | 400 | setup_server(){ 401 | echo "Temporal CLI address: ${TEMPORAL_ADDRESS}." 402 | 403 | until temporal operator cluster health | grep -q SERVING; do 404 | echo "Waiting for Temporal server to start..." 405 | sleep 1 406 | done 407 | echo "Temporal server started." 408 | 409 | if [[ ${SKIP_DEFAULT_NAMESPACE_CREATION} != true ]]; then 410 | register_default_namespace 411 | fi 412 | 413 | if [[ ${SKIP_ADD_CUSTOM_SEARCH_ATTRIBUTES} != true ]]; then 414 | add_custom_search_attributes 415 | fi 416 | } 417 | 418 | # === Main === 419 | 420 | if [[ ${SKIP_SCHEMA_SETUP} != true ]]; then 421 | validate_db_env 422 | wait_for_db 423 | setup_schema 424 | fi 425 | 426 | if [[ ${ENABLE_ES} == true ]]; then 427 | validate_es_env 428 | wait_for_es 429 | setup_es_index 430 | fi 431 | 432 | # Run this func in parallel process. It will wait for server to start and then run required steps. 433 | setup_server & 434 | 435 | tail -f /dev/null -------------------------------------------------------------------------------- /template/my_config_template.yaml: -------------------------------------------------------------------------------- 1 | log: 2 | stdout: true 3 | level: {{ default .Env.LOG_LEVEL "warn" }} 4 | 5 | persistence: 6 | numHistoryShards: {{ default .Env.NUM_HISTORY_SHARDS "4" }} 7 | defaultStore: default 8 | {{- $es := default .Env.ENABLE_ES "false" | lower -}} 9 | {{- if eq $es "true" }} 10 | visibilityStore: es-visibility 11 | {{- else }} 12 | visibilityStore: visibility 13 | {{- end }} 14 | datastores: 15 | {{- $db := default .Env.DB "cassandra" | lower -}} 16 | {{- if eq $db "cassandra" }} 17 | default: 18 | cassandra: 19 | hosts: "{{ default .Env.CASSANDRA_SEEDS "" }}" 20 | keyspace: "{{ default .Env.KEYSPACE "temporal" }}" 21 | user: "{{ default .Env.CASSANDRA_USER "" }}" 22 | password: "{{ default .Env.CASSANDRA_PASSWORD "" }}" 23 | {{- if .Env.CASSANDRA_ALLOWED_AUTHENTICATORS }} 24 | allowedAuthenticators: {{ range split .Env.CASSANDRA_ALLOWED_AUTHENTICATORS "," }} 25 | - {{trim .}} 26 | {{- end }} 27 | {{- end }} 28 | port: {{ default .Env.CASSANDRA_PORT "9042" }} 29 | maxConns: {{ default .Env.CASSANDRA_MAX_CONNS "20" }} 30 | tls: 31 | enabled: {{ default .Env.CASSANDRA_TLS_ENABLED "false" }} 32 | caFile: {{ default .Env.CASSANDRA_CA "" }} 33 | certFile: {{ default .Env.CASSANDRA_CERT "" }} 34 | keyFile: {{ default .Env.CASSANDRA_CERT_KEY "" }} 35 | caData: {{ default .Env.CASSANDRA_CA_DATA "" }} 36 | certData: {{ default .Env.CASSANDRA_CERT_DATA "" }} 37 | keyData: {{ default .Env.CASSANDRA_CERT_KEY_DATA "" }} 38 | enableHostVerification: {{ default .Env.CASSANDRA_HOST_VERIFICATION "false" }} 39 | serverName: {{ default .Env.CASSANDRA_HOST_NAME "" }} 40 | {{- if .Env.CASSANDRA_ADDRESS_TRANSLATOR }} 41 | addressTranslator: 42 | translator: {{ default .Env.CASSANDRA_ADDRESS_TRANSLATOR "" }} 43 | {{- if .Env.CASSANDRA_ADDRESS_TRANSLATOR_OPTIONS }} 44 | options: 45 | advertised-hostname: {{ default .Env.CASSANDRA_ADDRESS_TRANSLATOR_OPTIONS "" }} 46 | {{- end }} 47 | {{- end }} 48 | {{- else if eq $db "mysql8" }} 49 | default: 50 | sql: 51 | pluginName: "{{ $db }}" 52 | databaseName: "{{ default .Env.DBNAME "temporal" }}" 53 | connectAddr: "{{ default .Env.MYSQL_SEEDS "" }}:{{ default .Env.DB_PORT "3306" }}" 54 | connectProtocol: "tcp" 55 | user: "{{ default .Env.MYSQL_USER "" }}" 56 | password: "{{ default .Env.MYSQL_PWD "" }}" 57 | {{- if .Env.MYSQL_TX_ISOLATION_COMPAT }} 58 | connectAttributes: 59 | tx_isolation: "'READ-COMMITTED'" 60 | {{- end }} 61 | maxConns: {{ default .Env.SQL_MAX_CONNS "20" }} 62 | maxIdleConns: {{ default .Env.SQL_MAX_IDLE_CONNS "20" }} 63 | maxConnLifetime: {{ default .Env.SQL_MAX_CONN_TIME "1h" }} 64 | tls: 65 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 66 | caFile: {{ default .Env.SQL_CA "" }} 67 | certFile: {{ default .Env.SQL_CERT "" }} 68 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 69 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 70 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 71 | visibility: 72 | sql: 73 | {{ $visibility_seeds_default := default .Env.MYSQL_SEEDS "" }} 74 | {{ $visibility_seeds := default .Env.VISIBILITY_MYSQL_SEEDS $visibility_seeds_default }} 75 | {{ $visibility_port_default := default .Env.DB_PORT "3306" }} 76 | {{ $visibility_port := default .Env.VISIBILITY_DB_PORT $visibility_port_default }} 77 | {{ $visibility_user_default := default .Env.MYSQL_USER "" }} 78 | {{ $visibility_user := default .Env.VISIBILITY_MYSQL_USER $visibility_user_default }} 79 | {{ $visibility_pwd_default := default .Env.MYSQL_PWD "" }} 80 | {{ $visibility_pwd := default .Env.VISIBILITY_MYSQL_PWD $visibility_pwd_default }} 81 | pluginName: "{{ $db }}" 82 | databaseName: "{{ default .Env.VISIBILITY_DBNAME "temporal_visibility" }}" 83 | connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}" 84 | connectProtocol: "tcp" 85 | user: "{{ $visibility_user }}" 86 | password: "{{ $visibility_pwd }}" 87 | {{- if .Env.MYSQL_TX_ISOLATION_COMPAT }} 88 | connectAttributes: 89 | tx_isolation: "'READ-COMMITTED'" 90 | {{- end }} 91 | maxConns: {{ default .Env.SQL_VIS_MAX_CONNS "10" }} 92 | maxIdleConns: {{ default .Env.SQL_VIS_MAX_IDLE_CONNS "10" }} 93 | maxConnLifetime: {{ default .Env.SQL_VIS_MAX_CONN_TIME "1h" }} 94 | tls: 95 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 96 | caFile: {{ default .Env.SQL_CA "" }} 97 | certFile: {{ default .Env.SQL_CERT "" }} 98 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 99 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 100 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 101 | {{- else if eq $db "postgres12" "postgres12_pgx" }} 102 | default: 103 | sql: 104 | pluginName: "{{ $db }}" 105 | databaseName: "{{ default .Env.DBNAME "temporal" }}" 106 | connectAddr: "{{ default .Env.POSTGRES_SEEDS "" }}:{{ default .Env.DB_PORT "5432" }}" 107 | connectProtocol: "tcp" 108 | user: "{{ default .Env.POSTGRES_USER "" }}" 109 | password: "{{ default .Env.POSTGRES_PWD "" }}" 110 | maxConns: {{ default .Env.SQL_MAX_CONNS "20" }} 111 | maxIdleConns: {{ default .Env.SQL_MAX_IDLE_CONNS "20" }} 112 | maxConnLifetime: {{ default .Env.SQL_MAX_CONN_TIME "1h" }} 113 | tls: 114 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 115 | caFile: {{ default .Env.SQL_CA "" }} 116 | certFile: {{ default .Env.SQL_CERT "" }} 117 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 118 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 119 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 120 | visibility: 121 | sql: 122 | {{ $visibility_seeds_default := default .Env.POSTGRES_SEEDS "" }} 123 | {{ $visibility_seeds := default .Env.VISIBILITY_POSTGRES_SEEDS $visibility_seeds_default }} 124 | {{ $visibility_port_default := default .Env.DB_PORT "5432" }} 125 | {{ $visibility_port := default .Env.VISIBILITY_DB_PORT $visibility_port_default }} 126 | {{ $visibility_user_default := default .Env.POSTGRES_USER "" }} 127 | {{ $visibility_user := default .Env.VISIBILITY_POSTGRES_USER $visibility_user_default }} 128 | {{ $visibility_pwd_default := default .Env.POSTGRES_PWD "" }} 129 | {{ $visibility_pwd := default .Env.VISIBILITY_POSTGRES_PWD $visibility_pwd_default }} 130 | pluginName: "{{ $db }}" 131 | databaseName: "{{ default .Env.VISIBILITY_DBNAME "temporal_visibility" }}" 132 | connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}" 133 | connectProtocol: "tcp" 134 | user: "{{ $visibility_user }}" 135 | password: "{{ $visibility_pwd }}" 136 | maxConns: {{ default .Env.SQL_VIS_MAX_CONNS "10" }} 137 | maxIdleConns: {{ default .Env.SQL_VIS_MAX_IDLE_CONNS "10" }} 138 | maxConnLifetime: {{ default .Env.SQL_VIS_MAX_CONN_TIME "1h" }} 139 | tls: 140 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 141 | caFile: {{ default .Env.SQL_CA "" }} 142 | certFile: {{ default .Env.SQL_CERT "" }} 143 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 144 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 145 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 146 | {{- end }} 147 | {{- if eq $es "true" }} 148 | es-visibility: 149 | elasticsearch: 150 | version: {{ default .Env.ES_VERSION "" }} 151 | url: 152 | scheme: {{ default .Env.ES_SCHEME "http" }} 153 | host: "{{ default .Env.ES_SEEDS "" }}:{{ default .Env.ES_PORT "9200" }}" 154 | username: "{{ default .Env.ES_USER "" }}" 155 | password: "{{ default .Env.ES_PWD "" }}" 156 | indices: 157 | visibility: "{{ default .Env.ES_VIS_INDEX "temporal_visibility_v1_dev" }}" 158 | {{- $es_sec_vis_index := default .Env.ES_SEC_VIS_INDEX "" -}} 159 | {{- if ne $es_sec_vis_index "" }} 160 | secondary_visibility: "{{ $es_sec_vis_index }}" 161 | {{- end }} 162 | {{- end }} 163 | 164 | global: 165 | membership: 166 | maxJoinDuration: 30s 167 | broadcastAddress: "{{ default .Env.TEMPORAL_BROADCAST_ADDRESS "" }}" 168 | pprof: 169 | port: {{ default .Env.PPROF_PORT "0" }} 170 | tls: 171 | refreshInterval: {{ default .Env.TEMPORAL_TLS_REFRESH_INTERVAL "0s" }} 172 | expirationChecks: 173 | warningWindow: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_WARNING_WINDOW "0s" }} 174 | errorWindow: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_ERROR_WINDOW "0s" }} 175 | checkInterval: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_CHECK_INTERVAL "0s" }} 176 | internode: 177 | # This server section configures the TLS certificate that internal temporal 178 | # cluster nodes (history, matching, and internal-frontend) present to other 179 | # clients within the Temporal Cluster. 180 | server: 181 | requireClientAuth: {{ default .Env.TEMPORAL_TLS_REQUIRE_CLIENT_AUTH "false" }} 182 | 183 | certFile: {{ default .Env.TEMPORAL_TLS_SERVER_CERT "" }} 184 | keyFile: {{ default .Env.TEMPORAL_TLS_SERVER_KEY "" }} 185 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }} 186 | clientCaFiles: 187 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }} 188 | {{- end }} 189 | 190 | certData: {{ default .Env.TEMPORAL_TLS_SERVER_CERT_DATA "" }} 191 | keyData: {{ default .Env.TEMPORAL_TLS_SERVER_KEY_DATA "" }} 192 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }} 193 | clientCaData: 194 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }} 195 | {{- end }} 196 | 197 | # This client section is used to configure the TLS clients within 198 | # the Temporal Cluster that connect to an Internode (history, matching, or 199 | # internal-frontend) 200 | client: 201 | serverName: {{ default .Env.TEMPORAL_TLS_INTERNODE_SERVER_NAME "" }} 202 | disableHostVerification: {{ default .Env.TEMPORAL_TLS_INTERNODE_DISABLE_HOST_VERIFICATION "false"}} 203 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }} 204 | rootCaFiles: 205 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }} 206 | {{- end }} 207 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }} 208 | rootCaData: 209 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }} 210 | {{- end }} 211 | frontend: 212 | # This server section configures the TLS certificate that the Frontend 213 | # server presents to external clients. 214 | server: 215 | requireClientAuth: {{ default .Env.TEMPORAL_TLS_REQUIRE_CLIENT_AUTH "false" }} 216 | certFile: {{ default .Env.TEMPORAL_TLS_FRONTEND_CERT "" }} 217 | keyFile: {{ default .Env.TEMPORAL_TLS_FRONTEND_KEY "" }} 218 | {{- if .Env.TEMPORAL_TLS_CLIENT1_CA_CERT }} 219 | clientCaFiles: 220 | - {{ default .Env.TEMPORAL_TLS_CLIENT1_CA_CERT "" }} 221 | - {{ default .Env.TEMPORAL_TLS_CLIENT2_CA_CERT "" }} 222 | {{- end }} 223 | 224 | certData: {{ default .Env.TEMPORAL_TLS_FRONTEND_CERT_DATA "" }} 225 | keyData: {{ default .Env.TEMPORAL_TLS_FRONTEND_KEY_DATA "" }} 226 | {{- if .Env.TEMPORAL_TLS_CLIENT1_CA_CERT_DATA }} 227 | clientCaData: 228 | - {{ default .Env.TEMPORAL_TLS_CLIENT1_CA_CERT_DATA "" }} 229 | - {{ default .Env.TEMPORAL_TLS_CLIENT2_CA_CERT_DATA "" }} 230 | {{- end }} 231 | 232 | # This client section is used to configure the TLS clients within 233 | # the Temporal Cluster (specifically the Worker role) that connect to the Frontend service 234 | client: 235 | serverName: {{ default .Env.TEMPORAL_TLS_FRONTEND_SERVER_NAME "" }} 236 | disableHostVerification: {{ default .Env.TEMPORAL_TLS_FRONTEND_DISABLE_HOST_VERIFICATION "false"}} 237 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }} 238 | rootCaFiles: 239 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }} 240 | {{- end }} 241 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }} 242 | rootCaData: 243 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }} 244 | {{- end }} 245 | {{- if .Env.STATSD_ENDPOINT }} 246 | metrics: 247 | statsd: 248 | hostPort: {{ .Env.STATSD_ENDPOINT }} 249 | prefix: "temporal" 250 | {{- else if .Env.PROMETHEUS_ENDPOINT }} 251 | metrics: 252 | prometheus: 253 | timerType: {{ default .Env.PROMETHEUS_TIMER_TYPE "histogram" }} 254 | listenAddress: "{{ .Env.PROMETHEUS_ENDPOINT }}" 255 | {{- end }} 256 | authorization: 257 | jwtKeyProvider: 258 | keySourceURIs: 259 | {{- if .Env.TEMPORAL_JWT_KEY_SOURCE1 }} 260 | - {{ default .Env.TEMPORAL_JWT_KEY_SOURCE1 "" }} 261 | {{- end }} 262 | {{- if .Env.TEMPORAL_JWT_KEY_SOURCE2 }} 263 | - {{ default .Env.TEMPORAL_JWT_KEY_SOURCE2 "" }} 264 | {{- end }} 265 | refreshInterval: {{ default .Env.TEMPORAL_JWT_KEY_REFRESH "1m" }} 266 | permissionsClaimName: {{ default .Env.TEMPORAL_JWT_PERMISSIONS_CLAIM "permissions" }} 267 | authorizer: {{ default .Env.TEMPORAL_AUTH_AUTHORIZER "" }} 268 | claimMapper: {{ default .Env.TEMPORAL_AUTH_CLAIM_MAPPER "" }} 269 | 270 | {{- $temporalGrpcPort := default .Env.FRONTEND_GRPC_PORT "7233" }} 271 | {{- $temporalHTTPPort := default .Env.FRONTEND_HTTP_PORT "7243" }} 272 | services: 273 | frontend: 274 | rpc: 275 | grpcPort: {{ $temporalGrpcPort }} 276 | membershipPort: {{ default .Env.FRONTEND_MEMBERSHIP_PORT "6933" }} 277 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 278 | httpPort: {{ $temporalHTTPPort }} 279 | 280 | {{- if .Env.USE_INTERNAL_FRONTEND }} 281 | internal-frontend: 282 | rpc: 283 | grpcPort: {{ default .Env.INTERNAL_FRONTEND_GRPC_PORT "7236" }} 284 | membershipPort: {{ default .Env.INTERNAL_FRONTEND_MEMBERSHIP_PORT "6936" }} 285 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 286 | {{- end }} 287 | 288 | matching: 289 | rpc: 290 | grpcPort: {{ default .Env.MATCHING_GRPC_PORT "7235" }} 291 | membershipPort: {{ default .Env.MATCHING_MEMBERSHIP_PORT "6935" }} 292 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 293 | 294 | history: 295 | rpc: 296 | grpcPort: {{ default .Env.HISTORY_GRPC_PORT "7234" }} 297 | membershipPort: {{ default .Env.HISTORY_MEMBERSHIP_PORT "6934" }} 298 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 299 | 300 | worker: 301 | rpc: 302 | grpcPort: {{ default .Env.WORKER_GRPC_PORT "7239" }} 303 | membershipPort: {{ default .Env.WORKER_MEMBERSHIP_PORT "6939" }} 304 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 305 | 306 | clusterMetadata: 307 | enableGlobalNamespace: false 308 | failoverVersionIncrement: 10 309 | masterClusterName: "active" 310 | currentClusterName: "active" 311 | clusterInformation: 312 | active: 313 | enabled: true 314 | initialFailoverVersion: 1 315 | rpcName: "frontend" 316 | rpcAddress: {{ (print "127.0.0.1:" $temporalGrpcPort) }} 317 | httpAddress: {{ (print "127.0.0.1:" $temporalHTTPPort) }} 318 | 319 | dcRedirectionPolicy: 320 | policy: "noop" 321 | 322 | archival: 323 | history: 324 | state: "enabled" 325 | enableRead: true 326 | provider: 327 | filestore: 328 | fileMode: "0666" 329 | dirMode: "0766" 330 | visibility: 331 | state: "enabled" 332 | enableRead: true 333 | provider: 334 | filestore: 335 | fileMode: "0666" 336 | dirMode: "0766" 337 | 338 | namespaceDefaults: 339 | archival: 340 | history: 341 | state: "enabled" 342 | URI: "file:///tmp/temporal_archival/development" 343 | visibility: 344 | state: "enabled" 345 | URI: "file:///tmp/temporal_vis_archival/development" 346 | 347 | {{- if or (.Env.USE_INTERNAL_FRONTEND) (and (not .Env.TEMPORAL_AUTH_AUTHORIZER) (not .Env.TEMPORAL_AUTH_CLAIM_MAPPER)) }} 348 | {{/* publicClient is not needed with internal frontend, or if not using authorizer + claim mapper */}} 349 | {{- else }} 350 | {{ $publicIp := default .Env.BIND_ON_IP "127.0.0.1" -}} 351 | {{- $defaultPublicHostPost := (print $publicIp ":" $temporalGrpcPort) -}} 352 | publicClient: 353 | hostPort: "{{ default .Env.PUBLIC_FRONTEND_ADDRESS $defaultPublicHostPost }}" 354 | {{- end }} 355 | 356 | dynamicConfigClient: 357 | filepath: "{{ default .Env.DYNAMIC_CONFIG_FILE_PATH "/etc/temporal/config/dynamicconfig/docker.yaml" }}" 358 | pollInterval: "60s" -------------------------------------------------------------------------------- /template/replication_template_c1.yaml: -------------------------------------------------------------------------------- 1 | log: 2 | stdout: true 3 | level: {{ default .Env.LOG_LEVEL "warn" }} 4 | 5 | persistence: 6 | numHistoryShards: {{ default .Env.NUM_HISTORY_SHARDS "4" }} 7 | defaultStore: default 8 | {{- $es := default .Env.ENABLE_ES "false" | lower -}} 9 | {{- if eq $es "true" }} 10 | visibilityStore: es-visibility 11 | {{- else }} 12 | visibilityStore: visibility 13 | {{- end }} 14 | datastores: 15 | {{- $db := default .Env.DB "cassandra" | lower -}} 16 | {{- if eq $db "cassandra" }} 17 | default: 18 | cassandra: 19 | hosts: "{{ default .Env.CASSANDRA_SEEDS "" }}" 20 | keyspace: "{{ default .Env.KEYSPACE "temporal" }}" 21 | user: "{{ default .Env.CASSANDRA_USER "" }}" 22 | password: "{{ default .Env.CASSANDRA_PASSWORD "" }}" 23 | {{- if .Env.CASSANDRA_ALLOWED_AUTHENTICATORS }} 24 | allowedAuthenticators: {{ range split .Env.CASSANDRA_ALLOWED_AUTHENTICATORS "," }} 25 | - {{trim .}} 26 | {{- end }} 27 | {{- end }} 28 | port: {{ default .Env.CASSANDRA_PORT "9042" }} 29 | maxConns: {{ default .Env.CASSANDRA_MAX_CONNS "20" }} 30 | tls: 31 | enabled: {{ default .Env.CASSANDRA_TLS_ENABLED "false" }} 32 | caFile: {{ default .Env.CASSANDRA_CA "" }} 33 | certFile: {{ default .Env.CASSANDRA_CERT "" }} 34 | keyFile: {{ default .Env.CASSANDRA_CERT_KEY "" }} 35 | caData: {{ default .Env.CASSANDRA_CA_DATA "" }} 36 | certData: {{ default .Env.CASSANDRA_CERT_DATA "" }} 37 | keyData: {{ default .Env.CASSANDRA_CERT_KEY_DATA "" }} 38 | enableHostVerification: {{ default .Env.CASSANDRA_HOST_VERIFICATION "false" }} 39 | serverName: {{ default .Env.CASSANDRA_HOST_NAME "" }} 40 | {{- if .Env.CASSANDRA_ADDRESS_TRANSLATOR }} 41 | addressTranslator: 42 | translator: {{ default .Env.CASSANDRA_ADDRESS_TRANSLATOR "" }} 43 | {{- if .Env.CASSANDRA_ADDRESS_TRANSLATOR_OPTIONS }} 44 | options: 45 | advertised-hostname: {{ default .Env.CASSANDRA_ADDRESS_TRANSLATOR_OPTIONS "" }} 46 | {{- end }} 47 | {{- end }} 48 | {{- else if eq $db "mysql8" }} 49 | default: 50 | sql: 51 | pluginName: "{{ $db }}" 52 | databaseName: "{{ default .Env.DBNAME "temporal" }}" 53 | connectAddr: "{{ default .Env.MYSQL_SEEDS "" }}:{{ default .Env.DB_PORT "3306" }}" 54 | connectProtocol: "tcp" 55 | user: "{{ default .Env.MYSQL_USER "" }}" 56 | password: "{{ default .Env.MYSQL_PWD "" }}" 57 | {{- if .Env.MYSQL_TX_ISOLATION_COMPAT }} 58 | connectAttributes: 59 | tx_isolation: "'READ-COMMITTED'" 60 | {{- end }} 61 | maxConns: {{ default .Env.SQL_MAX_CONNS "20" }} 62 | maxIdleConns: {{ default .Env.SQL_MAX_IDLE_CONNS "20" }} 63 | maxConnLifetime: {{ default .Env.SQL_MAX_CONN_TIME "1h" }} 64 | tls: 65 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 66 | caFile: {{ default .Env.SQL_CA "" }} 67 | certFile: {{ default .Env.SQL_CERT "" }} 68 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 69 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 70 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 71 | visibility: 72 | sql: 73 | {{ $visibility_seeds_default := default .Env.MYSQL_SEEDS "" }} 74 | {{ $visibility_seeds := default .Env.VISIBILITY_MYSQL_SEEDS $visibility_seeds_default }} 75 | {{ $visibility_port_default := default .Env.DB_PORT "3306" }} 76 | {{ $visibility_port := default .Env.VISIBILITY_DB_PORT $visibility_port_default }} 77 | {{ $visibility_user_default := default .Env.MYSQL_USER "" }} 78 | {{ $visibility_user := default .Env.VISIBILITY_MYSQL_USER $visibility_user_default }} 79 | {{ $visibility_pwd_default := default .Env.MYSQL_PWD "" }} 80 | {{ $visibility_pwd := default .Env.VISIBILITY_MYSQL_PWD $visibility_pwd_default }} 81 | pluginName: "{{ $db }}" 82 | databaseName: "{{ default .Env.VISIBILITY_DBNAME "temporal_visibility" }}" 83 | connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}" 84 | connectProtocol: "tcp" 85 | user: "{{ $visibility_user }}" 86 | password: "{{ $visibility_pwd }}" 87 | {{- if .Env.MYSQL_TX_ISOLATION_COMPAT }} 88 | connectAttributes: 89 | tx_isolation: "'READ-COMMITTED'" 90 | {{- end }} 91 | maxConns: {{ default .Env.SQL_VIS_MAX_CONNS "10" }} 92 | maxIdleConns: {{ default .Env.SQL_VIS_MAX_IDLE_CONNS "10" }} 93 | maxConnLifetime: {{ default .Env.SQL_VIS_MAX_CONN_TIME "1h" }} 94 | tls: 95 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 96 | caFile: {{ default .Env.SQL_CA "" }} 97 | certFile: {{ default .Env.SQL_CERT "" }} 98 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 99 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 100 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 101 | {{- else if eq $db "postgres12" "postgres12_pgx" }} 102 | default: 103 | sql: 104 | pluginName: "{{ $db }}" 105 | databaseName: "{{ default .Env.DBNAME "temporal" }}" 106 | connectAddr: "{{ default .Env.POSTGRES_SEEDS "" }}:{{ default .Env.DB_PORT "5432" }}" 107 | connectProtocol: "tcp" 108 | user: "{{ default .Env.POSTGRES_USER "" }}" 109 | password: "{{ default .Env.POSTGRES_PWD "" }}" 110 | maxConns: {{ default .Env.SQL_MAX_CONNS "20" }} 111 | maxIdleConns: {{ default .Env.SQL_MAX_IDLE_CONNS "20" }} 112 | maxConnLifetime: {{ default .Env.SQL_MAX_CONN_TIME "1h" }} 113 | tls: 114 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 115 | caFile: {{ default .Env.SQL_CA "" }} 116 | certFile: {{ default .Env.SQL_CERT "" }} 117 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 118 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 119 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 120 | visibility: 121 | sql: 122 | {{ $visibility_seeds_default := default .Env.POSTGRES_SEEDS "" }} 123 | {{ $visibility_seeds := default .Env.VISIBILITY_POSTGRES_SEEDS $visibility_seeds_default }} 124 | {{ $visibility_port_default := default .Env.DB_PORT "5432" }} 125 | {{ $visibility_port := default .Env.VISIBILITY_DB_PORT $visibility_port_default }} 126 | {{ $visibility_user_default := default .Env.POSTGRES_USER "" }} 127 | {{ $visibility_user := default .Env.VISIBILITY_POSTGRES_USER $visibility_user_default }} 128 | {{ $visibility_pwd_default := default .Env.POSTGRES_PWD "" }} 129 | {{ $visibility_pwd := default .Env.VISIBILITY_POSTGRES_PWD $visibility_pwd_default }} 130 | pluginName: "{{ $db }}" 131 | databaseName: "{{ default .Env.VISIBILITY_DBNAME "temporal_visibility" }}" 132 | connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}" 133 | connectProtocol: "tcp" 134 | user: "{{ $visibility_user }}" 135 | password: "{{ $visibility_pwd }}" 136 | maxConns: {{ default .Env.SQL_VIS_MAX_CONNS "10" }} 137 | maxIdleConns: {{ default .Env.SQL_VIS_MAX_IDLE_CONNS "10" }} 138 | maxConnLifetime: {{ default .Env.SQL_VIS_MAX_CONN_TIME "1h" }} 139 | tls: 140 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 141 | caFile: {{ default .Env.SQL_CA "" }} 142 | certFile: {{ default .Env.SQL_CERT "" }} 143 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 144 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 145 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 146 | {{- end }} 147 | {{- if eq $es "true" }} 148 | es-visibility: 149 | elasticsearch: 150 | version: {{ default .Env.ES_VERSION "" }} 151 | url: 152 | scheme: {{ default .Env.ES_SCHEME "http" }} 153 | host: "{{ default .Env.ES_SEEDS "" }}:{{ default .Env.ES_PORT "9200" }}" 154 | username: "{{ default .Env.ES_USER "" }}" 155 | password: "{{ default .Env.ES_PWD "" }}" 156 | indices: 157 | visibility: "{{ default .Env.ES_VIS_INDEX "temporal_visibility_v1_dev" }}" 158 | {{- $es_sec_vis_index := default .Env.ES_SEC_VIS_INDEX "" -}} 159 | {{- if ne $es_sec_vis_index "" }} 160 | secondary_visibility: "{{ $es_sec_vis_index }}" 161 | {{- end }} 162 | {{- end }} 163 | 164 | global: 165 | membership: 166 | maxJoinDuration: 30s 167 | broadcastAddress: "{{ default .Env.TEMPORAL_BROADCAST_ADDRESS "" }}" 168 | pprof: 169 | port: {{ default .Env.PPROF_PORT "0" }} 170 | tls: 171 | refreshInterval: {{ default .Env.TEMPORAL_TLS_REFRESH_INTERVAL "0s" }} 172 | expirationChecks: 173 | warningWindow: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_WARNING_WINDOW "0s" }} 174 | errorWindow: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_ERROR_WINDOW "0s" }} 175 | checkInterval: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_CHECK_INTERVAL "0s" }} 176 | internode: 177 | # This server section configures the TLS certificate that internal temporal 178 | # cluster nodes (history, matching, and internal-frontend) present to other 179 | # clients within the Temporal Cluster. 180 | server: 181 | requireClientAuth: {{ default .Env.TEMPORAL_TLS_REQUIRE_CLIENT_AUTH "false" }} 182 | 183 | certFile: {{ default .Env.TEMPORAL_TLS_SERVER_CERT "" }} 184 | keyFile: {{ default .Env.TEMPORAL_TLS_SERVER_KEY "" }} 185 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }} 186 | clientCaFiles: 187 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }} 188 | {{- end }} 189 | 190 | certData: {{ default .Env.TEMPORAL_TLS_SERVER_CERT_DATA "" }} 191 | keyData: {{ default .Env.TEMPORAL_TLS_SERVER_KEY_DATA "" }} 192 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }} 193 | clientCaData: 194 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }} 195 | {{- end }} 196 | 197 | # This client section is used to configure the TLS clients within 198 | # the Temporal Cluster that connect to an Internode (history, matching, or 199 | # internal-frontend) 200 | client: 201 | serverName: {{ default .Env.TEMPORAL_TLS_INTERNODE_SERVER_NAME "" }} 202 | disableHostVerification: {{ default .Env.TEMPORAL_TLS_INTERNODE_DISABLE_HOST_VERIFICATION "false"}} 203 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }} 204 | rootCaFiles: 205 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }} 206 | {{- end }} 207 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }} 208 | rootCaData: 209 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }} 210 | {{- end }} 211 | frontend: 212 | # This server section configures the TLS certificate that the Frontend 213 | # server presents to external clients. 214 | server: 215 | requireClientAuth: {{ default .Env.TEMPORAL_TLS_REQUIRE_CLIENT_AUTH "false" }} 216 | certFile: {{ default .Env.TEMPORAL_TLS_FRONTEND_CERT "" }} 217 | keyFile: {{ default .Env.TEMPORAL_TLS_FRONTEND_KEY "" }} 218 | {{- if .Env.TEMPORAL_TLS_CLIENT1_CA_CERT }} 219 | clientCaFiles: 220 | - {{ default .Env.TEMPORAL_TLS_CLIENT1_CA_CERT "" }} 221 | - {{ default .Env.TEMPORAL_TLS_CLIENT2_CA_CERT "" }} 222 | {{- end }} 223 | 224 | certData: {{ default .Env.TEMPORAL_TLS_FRONTEND_CERT_DATA "" }} 225 | keyData: {{ default .Env.TEMPORAL_TLS_FRONTEND_KEY_DATA "" }} 226 | {{- if .Env.TEMPORAL_TLS_CLIENT1_CA_CERT_DATA }} 227 | clientCaData: 228 | - {{ default .Env.TEMPORAL_TLS_CLIENT1_CA_CERT_DATA "" }} 229 | - {{ default .Env.TEMPORAL_TLS_CLIENT2_CA_CERT_DATA "" }} 230 | {{- end }} 231 | 232 | # This client section is used to configure the TLS clients within 233 | # the Temporal Cluster (specifically the Worker role) that connect to the Frontend service 234 | client: 235 | serverName: {{ default .Env.TEMPORAL_TLS_FRONTEND_SERVER_NAME "" }} 236 | disableHostVerification: {{ default .Env.TEMPORAL_TLS_FRONTEND_DISABLE_HOST_VERIFICATION "false"}} 237 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }} 238 | rootCaFiles: 239 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }} 240 | {{- end }} 241 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }} 242 | rootCaData: 243 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }} 244 | {{- end }} 245 | {{- if .Env.STATSD_ENDPOINT }} 246 | metrics: 247 | statsd: 248 | hostPort: {{ .Env.STATSD_ENDPOINT }} 249 | prefix: "temporal" 250 | {{- else if .Env.PROMETHEUS_ENDPOINT }} 251 | metrics: 252 | prometheus: 253 | timerType: {{ default .Env.PROMETHEUS_TIMER_TYPE "histogram" }} 254 | listenAddress: "{{ .Env.PROMETHEUS_ENDPOINT }}" 255 | {{- end }} 256 | authorization: 257 | jwtKeyProvider: 258 | keySourceURIs: 259 | {{- if .Env.TEMPORAL_JWT_KEY_SOURCE1 }} 260 | - {{ default .Env.TEMPORAL_JWT_KEY_SOURCE1 "" }} 261 | {{- end }} 262 | {{- if .Env.TEMPORAL_JWT_KEY_SOURCE2 }} 263 | - {{ default .Env.TEMPORAL_JWT_KEY_SOURCE2 "" }} 264 | {{- end }} 265 | refreshInterval: {{ default .Env.TEMPORAL_JWT_KEY_REFRESH "1m" }} 266 | permissionsClaimName: {{ default .Env.TEMPORAL_JWT_PERMISSIONS_CLAIM "permissions" }} 267 | authorizer: {{ default .Env.TEMPORAL_AUTH_AUTHORIZER "" }} 268 | claimMapper: {{ default .Env.TEMPORAL_AUTH_CLAIM_MAPPER "" }} 269 | 270 | {{- $temporalGrpcPort := default .Env.FRONTEND_GRPC_PORT "7233" }} 271 | {{- $temporalHTTPPort := default .Env.FRONTEND_HTTP_PORT "7243" }} 272 | services: 273 | frontend: 274 | rpc: 275 | grpcPort: {{ $temporalGrpcPort }} 276 | membershipPort: {{ default .Env.FRONTEND_MEMBERSHIP_PORT "6933" }} 277 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 278 | httpPort: {{ $temporalHTTPPort }} 279 | 280 | {{- if .Env.USE_INTERNAL_FRONTEND }} 281 | internal-frontend: 282 | rpc: 283 | grpcPort: {{ default .Env.INTERNAL_FRONTEND_GRPC_PORT "7236" }} 284 | membershipPort: {{ default .Env.INTERNAL_FRONTEND_MEMBERSHIP_PORT "6936" }} 285 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 286 | {{- end }} 287 | 288 | matching: 289 | rpc: 290 | grpcPort: {{ default .Env.MATCHING_GRPC_PORT "7235" }} 291 | membershipPort: {{ default .Env.MATCHING_MEMBERSHIP_PORT "6935" }} 292 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 293 | 294 | history: 295 | rpc: 296 | grpcPort: {{ default .Env.HISTORY_GRPC_PORT "7234" }} 297 | membershipPort: {{ default .Env.HISTORY_MEMBERSHIP_PORT "6934" }} 298 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 299 | 300 | worker: 301 | rpc: 302 | grpcPort: {{ default .Env.WORKER_GRPC_PORT "7239" }} 303 | membershipPort: {{ default .Env.WORKER_MEMBERSHIP_PORT "6939" }} 304 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 305 | 306 | clusterMetadata: 307 | enableGlobalNamespace: true 308 | failoverVersionIncrement: 1000000 309 | masterClusterName: "c1" 310 | currentClusterName: "c1" 311 | clusterInformation: 312 | c1: 313 | enabled: true 314 | initialFailoverVersion: 100 315 | rpcAddress: {{ (print "127.0.0.1:" $temporalGrpcPort) }} 316 | httpAddress: {{ (print "127.0.0.1:" $temporalHTTPPort) }} 317 | 318 | dcRedirectionPolicy: 319 | policy: "noop" 320 | 321 | archival: 322 | history: 323 | state: "enabled" 324 | enableRead: true 325 | provider: 326 | filestore: 327 | fileMode: "0666" 328 | dirMode: "0766" 329 | visibility: 330 | state: "enabled" 331 | enableRead: true 332 | provider: 333 | filestore: 334 | fileMode: "0666" 335 | dirMode: "0766" 336 | 337 | namespaceDefaults: 338 | archival: 339 | history: 340 | state: "disabled" 341 | URI: "file:///tmp/temporal_archival/development" 342 | visibility: 343 | state: "disabled" 344 | URI: "file:///tmp/temporal_vis_archival/development" 345 | 346 | {{- if or (.Env.USE_INTERNAL_FRONTEND) (and (not .Env.TEMPORAL_AUTH_AUTHORIZER) (not .Env.TEMPORAL_AUTH_CLAIM_MAPPER)) }} 347 | {{/* publicClient is not needed with internal frontend, or if not using authorizer + claim mapper */}} 348 | {{- else }} 349 | {{ $publicIp := default .Env.BIND_ON_IP "127.0.0.1" -}} 350 | {{- $defaultPublicHostPost := (print $publicIp ":" $temporalGrpcPort) -}} 351 | publicClient: 352 | hostPort: "{{ default .Env.PUBLIC_FRONTEND_ADDRESS $defaultPublicHostPost }}" 353 | {{- end }} 354 | 355 | dynamicConfigClient: 356 | filepath: "{{ default .Env.DYNAMIC_CONFIG_FILE_PATH "/etc/temporal/config/dynamicconfig/docker.yaml" }}" 357 | pollInterval: "60s" -------------------------------------------------------------------------------- /template/replication_template_c2.yaml: -------------------------------------------------------------------------------- 1 | log: 2 | stdout: true 3 | level: {{ default .Env.LOG_LEVEL "warn" }} 4 | 5 | persistence: 6 | numHistoryShards: {{ default .Env.NUM_HISTORY_SHARDS "4" }} 7 | defaultStore: default 8 | {{- $es := default .Env.ENABLE_ES "false" | lower -}} 9 | {{- if eq $es "true" }} 10 | visibilityStore: es-visibility 11 | {{- else }} 12 | visibilityStore: visibility 13 | {{- end }} 14 | datastores: 15 | {{- $db := default .Env.DB "cassandra" | lower -}} 16 | {{- if eq $db "cassandra" }} 17 | default: 18 | cassandra: 19 | hosts: "{{ default .Env.CASSANDRA_SEEDS "" }}" 20 | keyspace: "{{ default .Env.KEYSPACE "temporal" }}" 21 | user: "{{ default .Env.CASSANDRA_USER "" }}" 22 | password: "{{ default .Env.CASSANDRA_PASSWORD "" }}" 23 | {{- if .Env.CASSANDRA_ALLOWED_AUTHENTICATORS }} 24 | allowedAuthenticators: {{ range split .Env.CASSANDRA_ALLOWED_AUTHENTICATORS "," }} 25 | - {{trim .}} 26 | {{- end }} 27 | {{- end }} 28 | port: {{ default .Env.CASSANDRA_PORT "9042" }} 29 | maxConns: {{ default .Env.CASSANDRA_MAX_CONNS "20" }} 30 | tls: 31 | enabled: {{ default .Env.CASSANDRA_TLS_ENABLED "false" }} 32 | caFile: {{ default .Env.CASSANDRA_CA "" }} 33 | certFile: {{ default .Env.CASSANDRA_CERT "" }} 34 | keyFile: {{ default .Env.CASSANDRA_CERT_KEY "" }} 35 | caData: {{ default .Env.CASSANDRA_CA_DATA "" }} 36 | certData: {{ default .Env.CASSANDRA_CERT_DATA "" }} 37 | keyData: {{ default .Env.CASSANDRA_CERT_KEY_DATA "" }} 38 | enableHostVerification: {{ default .Env.CASSANDRA_HOST_VERIFICATION "false" }} 39 | serverName: {{ default .Env.CASSANDRA_HOST_NAME "" }} 40 | {{- if .Env.CASSANDRA_ADDRESS_TRANSLATOR }} 41 | addressTranslator: 42 | translator: {{ default .Env.CASSANDRA_ADDRESS_TRANSLATOR "" }} 43 | {{- if .Env.CASSANDRA_ADDRESS_TRANSLATOR_OPTIONS }} 44 | options: 45 | advertised-hostname: {{ default .Env.CASSANDRA_ADDRESS_TRANSLATOR_OPTIONS "" }} 46 | {{- end }} 47 | {{- end }} 48 | {{- else if eq $db "mysql8" }} 49 | default: 50 | sql: 51 | pluginName: "{{ $db }}" 52 | databaseName: "{{ default .Env.DBNAME "temporal" }}" 53 | connectAddr: "{{ default .Env.MYSQL_SEEDS "" }}:{{ default .Env.DB_PORT "3306" }}" 54 | connectProtocol: "tcp" 55 | user: "{{ default .Env.MYSQL_USER "" }}" 56 | password: "{{ default .Env.MYSQL_PWD "" }}" 57 | {{- if .Env.MYSQL_TX_ISOLATION_COMPAT }} 58 | connectAttributes: 59 | tx_isolation: "'READ-COMMITTED'" 60 | {{- end }} 61 | maxConns: {{ default .Env.SQL_MAX_CONNS "20" }} 62 | maxIdleConns: {{ default .Env.SQL_MAX_IDLE_CONNS "20" }} 63 | maxConnLifetime: {{ default .Env.SQL_MAX_CONN_TIME "1h" }} 64 | tls: 65 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 66 | caFile: {{ default .Env.SQL_CA "" }} 67 | certFile: {{ default .Env.SQL_CERT "" }} 68 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 69 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 70 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 71 | visibility: 72 | sql: 73 | {{ $visibility_seeds_default := default .Env.MYSQL_SEEDS "" }} 74 | {{ $visibility_seeds := default .Env.VISIBILITY_MYSQL_SEEDS $visibility_seeds_default }} 75 | {{ $visibility_port_default := default .Env.DB_PORT "3306" }} 76 | {{ $visibility_port := default .Env.VISIBILITY_DB_PORT $visibility_port_default }} 77 | {{ $visibility_user_default := default .Env.MYSQL_USER "" }} 78 | {{ $visibility_user := default .Env.VISIBILITY_MYSQL_USER $visibility_user_default }} 79 | {{ $visibility_pwd_default := default .Env.MYSQL_PWD "" }} 80 | {{ $visibility_pwd := default .Env.VISIBILITY_MYSQL_PWD $visibility_pwd_default }} 81 | pluginName: "{{ $db }}" 82 | databaseName: "{{ default .Env.VISIBILITY_DBNAME "temporal_visibility" }}" 83 | connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}" 84 | connectProtocol: "tcp" 85 | user: "{{ $visibility_user }}" 86 | password: "{{ $visibility_pwd }}" 87 | {{- if .Env.MYSQL_TX_ISOLATION_COMPAT }} 88 | connectAttributes: 89 | tx_isolation: "'READ-COMMITTED'" 90 | {{- end }} 91 | maxConns: {{ default .Env.SQL_VIS_MAX_CONNS "10" }} 92 | maxIdleConns: {{ default .Env.SQL_VIS_MAX_IDLE_CONNS "10" }} 93 | maxConnLifetime: {{ default .Env.SQL_VIS_MAX_CONN_TIME "1h" }} 94 | tls: 95 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 96 | caFile: {{ default .Env.SQL_CA "" }} 97 | certFile: {{ default .Env.SQL_CERT "" }} 98 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 99 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 100 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 101 | {{- else if eq $db "postgres12" "postgres12_pgx" }} 102 | default: 103 | sql: 104 | pluginName: "{{ $db }}" 105 | databaseName: "{{ default .Env.DBNAME "temporal" }}" 106 | connectAddr: "{{ default .Env.POSTGRES_SEEDS "" }}:{{ default .Env.DB_PORT "5432" }}" 107 | connectProtocol: "tcp" 108 | user: "{{ default .Env.POSTGRES_USER "" }}" 109 | password: "{{ default .Env.POSTGRES_PWD "" }}" 110 | maxConns: {{ default .Env.SQL_MAX_CONNS "20" }} 111 | maxIdleConns: {{ default .Env.SQL_MAX_IDLE_CONNS "20" }} 112 | maxConnLifetime: {{ default .Env.SQL_MAX_CONN_TIME "1h" }} 113 | tls: 114 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 115 | caFile: {{ default .Env.SQL_CA "" }} 116 | certFile: {{ default .Env.SQL_CERT "" }} 117 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 118 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 119 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 120 | visibility: 121 | sql: 122 | {{ $visibility_seeds_default := default .Env.POSTGRES_SEEDS "" }} 123 | {{ $visibility_seeds := default .Env.VISIBILITY_POSTGRES_SEEDS $visibility_seeds_default }} 124 | {{ $visibility_port_default := default .Env.DB_PORT "5432" }} 125 | {{ $visibility_port := default .Env.VISIBILITY_DB_PORT $visibility_port_default }} 126 | {{ $visibility_user_default := default .Env.POSTGRES_USER "" }} 127 | {{ $visibility_user := default .Env.VISIBILITY_POSTGRES_USER $visibility_user_default }} 128 | {{ $visibility_pwd_default := default .Env.POSTGRES_PWD "" }} 129 | {{ $visibility_pwd := default .Env.VISIBILITY_POSTGRES_PWD $visibility_pwd_default }} 130 | pluginName: "{{ $db }}" 131 | databaseName: "{{ default .Env.VISIBILITY_DBNAME "temporal_visibility" }}" 132 | connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}" 133 | connectProtocol: "tcp" 134 | user: "{{ $visibility_user }}" 135 | password: "{{ $visibility_pwd }}" 136 | maxConns: {{ default .Env.SQL_VIS_MAX_CONNS "10" }} 137 | maxIdleConns: {{ default .Env.SQL_VIS_MAX_IDLE_CONNS "10" }} 138 | maxConnLifetime: {{ default .Env.SQL_VIS_MAX_CONN_TIME "1h" }} 139 | tls: 140 | enabled: {{ default .Env.SQL_TLS_ENABLED "false" }} 141 | caFile: {{ default .Env.SQL_CA "" }} 142 | certFile: {{ default .Env.SQL_CERT "" }} 143 | keyFile: {{ default .Env.SQL_CERT_KEY "" }} 144 | enableHostVerification: {{ default .Env.SQL_HOST_VERIFICATION "false" }} 145 | serverName: {{ default .Env.SQL_HOST_NAME "" }} 146 | {{- end }} 147 | {{- if eq $es "true" }} 148 | es-visibility: 149 | elasticsearch: 150 | version: {{ default .Env.ES_VERSION "" }} 151 | url: 152 | scheme: {{ default .Env.ES_SCHEME "http" }} 153 | host: "{{ default .Env.ES_SEEDS "" }}:{{ default .Env.ES_PORT "9200" }}" 154 | username: "{{ default .Env.ES_USER "" }}" 155 | password: "{{ default .Env.ES_PWD "" }}" 156 | indices: 157 | visibility: "{{ default .Env.ES_VIS_INDEX "temporal_visibility_v1_dev" }}" 158 | {{- $es_sec_vis_index := default .Env.ES_SEC_VIS_INDEX "" -}} 159 | {{- if ne $es_sec_vis_index "" }} 160 | secondary_visibility: "{{ $es_sec_vis_index }}" 161 | {{- end }} 162 | {{- end }} 163 | 164 | global: 165 | membership: 166 | maxJoinDuration: 30s 167 | broadcastAddress: "{{ default .Env.TEMPORAL_BROADCAST_ADDRESS "" }}" 168 | pprof: 169 | port: {{ default .Env.PPROF_PORT "0" }} 170 | tls: 171 | refreshInterval: {{ default .Env.TEMPORAL_TLS_REFRESH_INTERVAL "0s" }} 172 | expirationChecks: 173 | warningWindow: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_WARNING_WINDOW "0s" }} 174 | errorWindow: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_ERROR_WINDOW "0s" }} 175 | checkInterval: {{ default .Env.TEMPORAL_TLS_EXPIRATION_CHECKS_CHECK_INTERVAL "0s" }} 176 | internode: 177 | # This server section configures the TLS certificate that internal temporal 178 | # cluster nodes (history, matching, and internal-frontend) present to other 179 | # clients within the Temporal Cluster. 180 | server: 181 | requireClientAuth: {{ default .Env.TEMPORAL_TLS_REQUIRE_CLIENT_AUTH "false" }} 182 | 183 | certFile: {{ default .Env.TEMPORAL_TLS_SERVER_CERT "" }} 184 | keyFile: {{ default .Env.TEMPORAL_TLS_SERVER_KEY "" }} 185 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }} 186 | clientCaFiles: 187 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }} 188 | {{- end }} 189 | 190 | certData: {{ default .Env.TEMPORAL_TLS_SERVER_CERT_DATA "" }} 191 | keyData: {{ default .Env.TEMPORAL_TLS_SERVER_KEY_DATA "" }} 192 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }} 193 | clientCaData: 194 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }} 195 | {{- end }} 196 | 197 | # This client section is used to configure the TLS clients within 198 | # the Temporal Cluster that connect to an Internode (history, matching, or 199 | # internal-frontend) 200 | client: 201 | serverName: {{ default .Env.TEMPORAL_TLS_INTERNODE_SERVER_NAME "" }} 202 | disableHostVerification: {{ default .Env.TEMPORAL_TLS_INTERNODE_DISABLE_HOST_VERIFICATION "false"}} 203 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }} 204 | rootCaFiles: 205 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }} 206 | {{- end }} 207 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }} 208 | rootCaData: 209 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }} 210 | {{- end }} 211 | frontend: 212 | # This server section configures the TLS certificate that the Frontend 213 | # server presents to external clients. 214 | server: 215 | requireClientAuth: {{ default .Env.TEMPORAL_TLS_REQUIRE_CLIENT_AUTH "false" }} 216 | certFile: {{ default .Env.TEMPORAL_TLS_FRONTEND_CERT "" }} 217 | keyFile: {{ default .Env.TEMPORAL_TLS_FRONTEND_KEY "" }} 218 | {{- if .Env.TEMPORAL_TLS_CLIENT1_CA_CERT }} 219 | clientCaFiles: 220 | - {{ default .Env.TEMPORAL_TLS_CLIENT1_CA_CERT "" }} 221 | - {{ default .Env.TEMPORAL_TLS_CLIENT2_CA_CERT "" }} 222 | {{- end }} 223 | 224 | certData: {{ default .Env.TEMPORAL_TLS_FRONTEND_CERT_DATA "" }} 225 | keyData: {{ default .Env.TEMPORAL_TLS_FRONTEND_KEY_DATA "" }} 226 | {{- if .Env.TEMPORAL_TLS_CLIENT1_CA_CERT_DATA }} 227 | clientCaData: 228 | - {{ default .Env.TEMPORAL_TLS_CLIENT1_CA_CERT_DATA "" }} 229 | - {{ default .Env.TEMPORAL_TLS_CLIENT2_CA_CERT_DATA "" }} 230 | {{- end }} 231 | 232 | # This client section is used to configure the TLS clients within 233 | # the Temporal Cluster (specifically the Worker role) that connect to the Frontend service 234 | client: 235 | serverName: {{ default .Env.TEMPORAL_TLS_FRONTEND_SERVER_NAME "" }} 236 | disableHostVerification: {{ default .Env.TEMPORAL_TLS_FRONTEND_DISABLE_HOST_VERIFICATION "false"}} 237 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT }} 238 | rootCaFiles: 239 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT "" }} 240 | {{- end }} 241 | {{- if .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA }} 242 | rootCaData: 243 | - {{ default .Env.TEMPORAL_TLS_SERVER_CA_CERT_DATA "" }} 244 | {{- end }} 245 | {{- if .Env.STATSD_ENDPOINT }} 246 | metrics: 247 | statsd: 248 | hostPort: {{ .Env.STATSD_ENDPOINT }} 249 | prefix: "temporal" 250 | {{- else if .Env.PROMETHEUS_ENDPOINT }} 251 | metrics: 252 | prometheus: 253 | timerType: {{ default .Env.PROMETHEUS_TIMER_TYPE "histogram" }} 254 | listenAddress: "{{ .Env.PROMETHEUS_ENDPOINT }}" 255 | {{- end }} 256 | authorization: 257 | jwtKeyProvider: 258 | keySourceURIs: 259 | {{- if .Env.TEMPORAL_JWT_KEY_SOURCE1 }} 260 | - {{ default .Env.TEMPORAL_JWT_KEY_SOURCE1 "" }} 261 | {{- end }} 262 | {{- if .Env.TEMPORAL_JWT_KEY_SOURCE2 }} 263 | - {{ default .Env.TEMPORAL_JWT_KEY_SOURCE2 "" }} 264 | {{- end }} 265 | refreshInterval: {{ default .Env.TEMPORAL_JWT_KEY_REFRESH "1m" }} 266 | permissionsClaimName: {{ default .Env.TEMPORAL_JWT_PERMISSIONS_CLAIM "permissions" }} 267 | authorizer: {{ default .Env.TEMPORAL_AUTH_AUTHORIZER "" }} 268 | claimMapper: {{ default .Env.TEMPORAL_AUTH_CLAIM_MAPPER "" }} 269 | 270 | {{- $temporalGrpcPort := default .Env.FRONTEND_GRPC_PORT "7233" }} 271 | {{- $temporalHTTPPort := default .Env.FRONTEND_HTTP_PORT "7243" }} 272 | services: 273 | frontend: 274 | rpc: 275 | grpcPort: {{ $temporalGrpcPort }} 276 | membershipPort: {{ default .Env.FRONTEND_MEMBERSHIP_PORT "6933" }} 277 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 278 | httpPort: {{ $temporalHTTPPort }} 279 | 280 | {{- if .Env.USE_INTERNAL_FRONTEND }} 281 | internal-frontend: 282 | rpc: 283 | grpcPort: {{ default .Env.INTERNAL_FRONTEND_GRPC_PORT "7236" }} 284 | membershipPort: {{ default .Env.INTERNAL_FRONTEND_MEMBERSHIP_PORT "6936" }} 285 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 286 | {{- end }} 287 | 288 | matching: 289 | rpc: 290 | grpcPort: {{ default .Env.MATCHING_GRPC_PORT "7235" }} 291 | membershipPort: {{ default .Env.MATCHING_MEMBERSHIP_PORT "6935" }} 292 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 293 | 294 | history: 295 | rpc: 296 | grpcPort: {{ default .Env.HISTORY_GRPC_PORT "7234" }} 297 | membershipPort: {{ default .Env.HISTORY_MEMBERSHIP_PORT "6934" }} 298 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 299 | 300 | worker: 301 | rpc: 302 | grpcPort: {{ default .Env.WORKER_GRPC_PORT "7239" }} 303 | membershipPort: {{ default .Env.WORKER_MEMBERSHIP_PORT "6939" }} 304 | bindOnIP: "{{ default .Env.BIND_ON_IP "127.0.0.1" }}" 305 | 306 | clusterMetadata: 307 | enableGlobalNamespace: true 308 | failoverVersionIncrement: 1000000 309 | masterClusterName: "c2" 310 | currentClusterName: "c2" 311 | clusterInformation: 312 | c2: 313 | enabled: true 314 | initialFailoverVersion: 200 315 | rpcAddress: {{ (print "127.0.0.1:" $temporalGrpcPort) }} 316 | httpAddress: {{ (print "127.0.0.1:" $temporalHTTPPort) }} 317 | 318 | dcRedirectionPolicy: 319 | policy: "noop" 320 | 321 | archival: 322 | history: 323 | state: "enabled" 324 | enableRead: true 325 | provider: 326 | filestore: 327 | fileMode: "0666" 328 | dirMode: "0766" 329 | visibility: 330 | state: "enabled" 331 | enableRead: true 332 | provider: 333 | filestore: 334 | fileMode: "0666" 335 | dirMode: "0766" 336 | 337 | namespaceDefaults: 338 | archival: 339 | history: 340 | state: "disabled" 341 | URI: "file:///tmp/temporal_archival/development" 342 | visibility: 343 | state: "disabled" 344 | URI: "file:///tmp/temporal_vis_archival/development" 345 | 346 | {{- if or (.Env.USE_INTERNAL_FRONTEND) (and (not .Env.TEMPORAL_AUTH_AUTHORIZER) (not .Env.TEMPORAL_AUTH_CLAIM_MAPPER)) }} 347 | {{/* publicClient is not needed with internal frontend, or if not using authorizer + claim mapper */}} 348 | {{- else }} 349 | {{ $publicIp := default .Env.BIND_ON_IP "127.0.0.1" -}} 350 | {{- $defaultPublicHostPost := (print $publicIp ":" $temporalGrpcPort) -}} 351 | publicClient: 352 | hostPort: "{{ default .Env.PUBLIC_FRONTEND_ADDRESS $defaultPublicHostPost }}" 353 | {{- end }} 354 | 355 | dynamicConfigClient: 356 | filepath: "{{ default .Env.DYNAMIC_CONFIG_FILE_PATH "/etc/temporal/config/dynamicconfig/docker.yaml" }}" 357 | pollInterval: "60s" --------------------------------------------------------------------------------