├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.MD ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── aws ├── Makefile ├── README.md ├── include │ ├── cluster.tpl.yaml │ ├── ebs-csi-driver-trust-policy-template.json │ ├── kubernetes-aws.mk │ └── ssd-storageclass-aws.yaml └── ingress │ ├── istio │ ├── Makefile │ ├── README.md │ └── camunda-values-aws.yaml │ └── nginx │ ├── Makefile │ └── tls │ ├── Makefile │ ├── README.md │ ├── aws-ingress.sh │ └── secure-camunda-setup │ ├── Makefile │ ├── README.md │ ├── component-ssl-narration.png │ └── self-signed-cert.yaml ├── azure ├── Makefile ├── README.md ├── include │ ├── aks.mk │ ├── fileshare.mk │ ├── public-zeebe-ingress.yaml │ └── ssd-storageclass.yaml ├── ingress │ ├── agic │ │ ├── Makefile │ │ ├── README.md │ │ ├── agic.mk │ │ ├── camunda-values.yaml │ │ └── ingress-azure.yaml │ └── nginx │ │ ├── Makefile │ │ └── tls │ │ ├── Makefile │ │ ├── README.md │ │ ├── camunda-values.yaml │ │ ├── entra │ │ ├── Makefile │ │ ├── camunda-values-entraid.tpl.yaml │ │ └── set-env-aks.sh │ │ ├── read-only-file-system │ │ ├── Makefile │ │ ├── README.md │ │ └── camunda-values.yaml │ │ └── secure-zeebe-ingress │ │ ├── Makefile │ │ ├── README.md │ │ ├── camunda-values.yaml │ │ ├── oauth2-ingress.yaml │ │ ├── oauth2-values.yaml │ │ └── public-zeebe-ingress.yaml └── keycloak │ ├── Makefile │ └── README.md ├── backup ├── README.md ├── backup.mk └── backup.sh ├── benchmark ├── Makefile ├── README.md ├── benchmark.mk ├── benchmark.yaml ├── camunda-values.yaml ├── chaosmesh.mk ├── grafana_zeebe_dashboard.png ├── latency.png ├── payload.json └── throughput.png ├── bpmn ├── BenchmarkProcess.bpmn ├── deploy-models.mk ├── hello_webhook.bpmn ├── simple_inbound_connector.bpmn ├── simple_openai.bpmn └── simple_sendgrid.bpmn ├── cfssl ├── Makefile ├── README.md ├── cfssl-certs.mk └── template │ ├── ca.json │ ├── csr.json │ ├── csr.tpl.yaml │ └── server-signing-config.json ├── connectors └── connectors.mk ├── default ├── Makefile ├── README.md └── camunda-values.yaml ├── development ├── Makefile ├── README.md ├── camunda-values-2.yaml ├── camunda-values-with-ingress.yaml └── camunda-values.yaml ├── docs └── images │ ├── actuator_disabled.png │ ├── actuator_enabled.png │ ├── keycloak_ssl_required.png │ ├── kibana_dev_console.png │ └── webmodeler-zeebe-connect.png ├── echo-server ├── deployment.yaml ├── echo.mk └── ingress.yaml ├── google ├── Makefile ├── README.md ├── benchmark │ ├── Makefile │ ├── README.md │ ├── chaos-mesh-template.yaml │ └── chaos-network-brokers.yaml ├── include │ ├── kubernetes-gke.mk │ └── ssd-storageclass-gke.yaml ├── ingress │ └── nginx │ │ ├── Makefile │ │ └── tls │ │ ├── Makefile │ │ ├── README.md │ │ └── set-env-gcp.sh ├── keycloak │ ├── Makefile │ ├── README.md │ └── camunda │ │ ├── Makefile │ │ └── README.md └── multi-region │ └── active-active │ ├── README.md │ ├── dns-lb.yaml │ ├── region0 │ ├── Makefile │ └── camunda-values.yaml │ ├── region1 │ ├── Makefile │ └── camunda-values.yaml │ ├── setup-zeebe.py │ └── teardown-zeebe.py ├── high-available-webapps ├── Makefile ├── README.md ├── camunda-values.yaml ├── identity.png ├── operate-ingress.tpl.yaml ├── operate-tasklist.png └── optimize.png ├── include ├── README.md ├── cacerts_staging ├── camunda.mk ├── cert-manager.mk ├── ingress-nginx.mk ├── letsencrypt-prod.yaml ├── letsencrypt-stage.yaml ├── rebalance-leader-job.tpl.yaml ├── values.yaml ├── zbctl-deploy-job-with-auth.yaml └── zbctl-deploy-job.yaml ├── ingress-nginx ├── Camunda 8 and Nginx.png ├── Makefile ├── README.md ├── camunda-values-nginx-tls-secure.yaml ├── camunda-values-with-metrics.yaml ├── camunda-values.old.yaml └── camunda-values.yaml ├── istio ├── README.md ├── gateway.tpl.yaml ├── identity.tpl.yaml ├── istio.mk ├── keycloak.tpl.yaml ├── operate.tpl.yaml ├── optimize.tpl.yaml └── tasklist.tpl.yaml ├── keycloak ├── README.md ├── camunda-values-ext-keycloak.tpl.yaml ├── keycloak-secrets.tpl.yaml ├── keycloak-values.tpl.yaml └── keycloak.mk ├── kibana ├── Makefile ├── README.md └── kibana.mk ├── kind ├── Makefile ├── README.md ├── include │ ├── config.yaml │ ├── deploy-ingress.yml │ ├── kubernetes-kind.mk │ ├── nginx_ingress_values.yaml │ └── ssd-storageclass-kind.yaml └── ingress │ └── nginx │ ├── Makefile │ └── tls │ ├── Makefile │ ├── README.md │ └── camunda-values.yaml ├── metrics ├── Makefile ├── README.md ├── grafana-load-balancer.yml ├── metrics.mk └── prometheus-operator-values.yml ├── multi-region └── dual-region │ ├── Makefile │ ├── README.md │ ├── camunda-values.d │ ├── cluster-size.yaml │ ├── connectors-disabled.yaml │ ├── connectors-outbound-only.yaml │ ├── dual-region.yaml │ ├── elasticsearch-2.5-region-stretch-cluster.yaml │ ├── elasticsearch-disabled.yaml │ ├── elasticsearch-only.yaml │ ├── elasticsearch-version.yaml │ ├── identity-disabled.yaml │ ├── ingress.yaml │ ├── prometheus-service-monitor.yaml │ └── zeebe-debug.yaml │ ├── export_environment_prerequisites.sh │ ├── generate_zeebe_helm_values.sh │ ├── region0 │ ├── Makefile │ ├── config.mk │ └── region0.yaml │ ├── region1 │ ├── Makefile │ ├── config.mk │ └── region1.yaml │ ├── region2 │ ├── Makefile │ ├── config.mk │ └── region2.yaml │ ├── service-per-broker-single-region.yaml │ ├── service-per-broker-template.yaml │ └── service-per-broker.yaml ├── oauth2-proxy ├── README.md ├── oauth2-ingress.yaml ├── oauth2-values.tpl.yaml ├── oauth2.mk └── zeebe-oauth2-ingress.yaml ├── openshift ├── Makefile ├── README.md ├── camunda-identity-edge-routes.yaml ├── certs │ ├── backend.p12 │ ├── backend.test.crt │ ├── backend.test.csr │ ├── backend.test.ext │ ├── dave.test.key │ ├── frontend.test.crt │ ├── frontend.test.csr │ ├── frontend.test.ext │ ├── keycloak.keystore.jks │ ├── keycloak.truststore.jks │ ├── myCA.key │ ├── myCA.pem │ └── myCA.srl ├── openshift.mk ├── patch.sh ├── set-env-openshift.sh └── values │ ├── values-dev.yaml │ ├── values-identity-edge.yaml │ └── values-identity-reencrypt.yaml ├── operate ├── Makefile ├── README.md └── include │ ├── camunda-ingress.tpl.yaml │ ├── configmap-importer-archiver.tpl.yaml │ ├── configmap-webapp.tpl.yaml │ ├── deployment-importer-archiver.tpl.yaml │ ├── deployment-webapp.tpl.yaml │ ├── operate-ingress.tpl.yaml │ ├── operate.mk │ ├── service-importer-archiver.tpl.yaml │ └── service-webapp.tpl.yaml ├── renovate.json ├── spring-actuator ├── Makefile ├── README.md ├── actuator.mk ├── application.yaml └── camunda-values.yaml ├── tasklist ├── Makefile ├── README.md └── include │ ├── camunda-ingress.tpl.yaml │ ├── configmap-importer-archiver.tpl.yaml │ ├── configmap-webapp.tpl.yaml │ ├── deployment-importer-archiver.tpl.yaml │ ├── deployment-webapp.tpl.yaml │ ├── service-importer-archiver.tpl.yaml │ ├── service-webapp.tpl.yaml │ ├── tasklist-ingress.tpl.yaml │ └── tasklist.mk └── tls ├── Makefile ├── README.md ├── keystore ├── README.md └── keystore.mk ├── selfsigned ├── README.md ├── cert.tpl.cnf ├── san.tpl.ext └── self-signed-cert.mk ├── tls.mk ├── zbctl-plaintext-job.tpl.yaml └── zbctl-tls-job.tpl.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | workspace.code-workspace 3 | ingress-azure/camunda-values-azure.yaml 4 | ingress-aws/camunda-values-aws.yaml 5 | ingress-nginx/camunda-values-nginx.yaml 6 | azure/camunda-values-azure.yaml 7 | azure/ingress/nginx/tls/camunda-values-nginx-tls.yaml 8 | azure/ingress/nginx/tls/dns/camunda-values-nginx-tls.yaml 9 | azure/ingress/nginx/tls/secure-zeebe-ingress/camunda-values-nginx-tls.yaml 10 | azure/ingress/nginx/tls/camunda-platform 11 | azure/ingress/nginx/tls/camunda-values-nginx-all.yaml 12 | google/ingress/nginx/camunda-values-ingress.yaml 13 | google/ingress/nginx/tls/camunda-values-nginx.yaml 14 | google/ingress/nginx/tls/camunda-values-nginx-all.yaml 15 | google/keycloak/camunda/camunda-values-ext-keycloak.yaml 16 | google/external-keycloak/keycloak-secrets.yaml 17 | aws/cluster.yaml 18 | aws/camunda-values-aws.yaml 19 | aws/ebs-csi-driver-trust-policy.json 20 | aws/ingress/nginx/tls/cluster.yaml 21 | aws/ingress/nginx/camunda-values-ingress.yaml 22 | aws/ingress/nginx/tls/camunda-values-nginx-aws.yaml 23 | aws/ingress/nginx/tls/camunda-values-ingress-tls-aws.yaml 24 | kind/ingress/nginx/tls/camunda-values-nginx-localhost.yaml 25 | kind/camunda-values-kind.yaml 26 | kind/ingress/nginx/camunda-values-kind.yaml 27 | cfssl/csr.yaml 28 | cfssl/certs 29 | keycloak/keycloak-secrets.yaml 30 | keycloak/keycloak-values.yaml 31 | **/oauth2-values.yaml 32 | google/multi-region/active-active/generated 33 | **/operate-configmap-importer-archiver.yaml 34 | **/operate-configmap-webapp.yaml 35 | **/operate-deployment-importer-archiver.yaml 36 | **/operate-deployment-webapp.yaml 37 | **/operate-service-webapp.yaml 38 | **/operate-operate-ingress.yaml 39 | **/operate-camunda-ingress.yaml 40 | **/tasklist-configmap-importer-archiver.yaml 41 | **/tasklist-configmap-webapp.yaml 42 | **/tasklist-deployment-importer-archiver.yaml 43 | **/tasklist-deployment-webapp.yaml 44 | **/tasklist-service-webapp.yaml 45 | **/tasklist-tasklist-ingress.yaml 46 | **/tasklist-camunda-ingress.yaml 47 | **/operate-ingress.yaml 48 | **/camunda-values-openshift.yaml 49 | **/certs/ 50 | 51 | 52 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## In progress / Known Issues 2 | 3 | ### TODO: Fix development `standard` persistent volume type 4 | 5 | Currently, the `camunda-values.yaml` in the development profile specifies a `standard` storage class for Kind but that 6 | breaks when deploying to the cloud. As a temp hack, there's a `camunda-values-2.yaml` with `ssd` storage class 7 | 8 | ### TODO: Fix issue with applying AWS ebs-csi addon 9 | 10 | After the `create-ebs-csi-addon` target in [kuberenets-aws.mk](aws/include/kubernetes-aws.mk) file, you'll see an error 11 | because there's no delay to allow the sa account to complete. 12 | 13 | To fix this, run `make annotate-ebs-csi-sa restart-ebs-csi-controller` 14 | 15 | ```shell 16 | kubectl annotate serviceaccount ebs-csi-controller-sa \ 17 | -n kube-system \ 18 | eks.amazonaws.com/role-arn=arn:aws:iam::487945211782:role/AmazonEKS_EBS_CSI_DriverRole_Cluster_dave-camunda-01 \ 19 | --overwrite 20 | Error from server (NotFound): serviceaccounts "ebs-csi-controller-sa" not found 21 | make: *** [annotate-ebs-csi-sa] Error 1 22 | ``` 23 | 24 | ## May 16, 2023 25 | 26 | - Many improvements made in preparation for the Community Summit Workshop 27 | - Updated profiles to use Camunda version 8.2.3 Helm Chart by default 28 | - All non-tls profiles are using 8.2.3 of Camunda 8 Platform 29 | - All tls profiles are using 8.3.0-alpha0 of Camunda 8 Platform 30 | - Regression tested each cloud provider development profile 31 | - Regression tested each cloud provider `ingress/nginx` profile 32 | - Regression tested each cloud provider `ingress/nginx/tls` profile 33 | - Enabled [Connectors](https://docs.camunda.io/docs/components/connectors/use-connectors/) in profiles 34 | - Fixed Azure networking issue related to Azure Load Balancer Health Probes. We add /healthz to load balancer paths by default 35 | - The `deploy-models` target was moved out of the benchmark profile and refactored slightly to be more reusable. See examples inside [bpmn/deploy-models.mk](bpmn/deploy-models.mk) 36 | - renamed `kube-nginx` target in azure profile to be kube-azure 37 | 38 | ## Before May 2023 Camunda v8.1.x, Helm Profiles 0.0.1 39 | 40 | - This helm profile project existed since early 2022, but we did not start this CHANGELOG until May 2023. 41 | - Before then, Helm profiles supported Camunda version 8.1.x 42 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.MD: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ### View the [Camunda Code of Conduct](https://camunda.com/events/code-conduct/) and find ways to report violations. 4 | -------------------------------------------------------------------------------- /aws/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | # Already have a Cluster? Set these values to point to your existing environment 4 | # Otherwise, these values will be used to create a new Cluster 5 | 6 | region ?= us-east-1 7 | zones ?= ['us-east-1a', 'us-east-1b'] 8 | clusterName ?= YOUR_CLUSTER_NAME 9 | clusterVersion ?= 1.25 10 | 11 | machineType ?= c6i.4xlarge 12 | # TODO: Currently, auto scaling configuration using these scripts for AWS is not working 13 | # desiredSize is used as the starting size of the cluster 14 | desiredSize ?= 3 15 | minSize ?= 1 16 | maxSize ?= 6 17 | 18 | # ------------------------------------ 19 | # The following variables should not be changed except for advanced use cases 20 | ifeq ($(OS),Windows_NT) 21 | root ?= $(CURDIR)/.. 22 | else 23 | root ?= $(shell pwd)/.. 24 | endif 25 | 26 | # Camunda components will be installed into the following Kubernetes namespace 27 | namespace ?= camunda 28 | # Helm release name 29 | release ?= camunda 30 | # Helm chart coordinates for Camunda 31 | chart ?= camunda/camunda-platform 32 | 33 | # This file will be generated by make targets 34 | chartValues ?= camunda-values-aws.yaml 35 | 36 | .PHONY: all 37 | all: camunda-values-aws.yaml camunda external-urls 38 | 39 | # 0 kube from kubernetes-aws.mk: Create Kubernetes cluster 40 | 41 | # 1 create camunda-values-aws 42 | camunda-values-aws.yaml: 43 | cp $(root)/development/camunda-values-2.yaml $(chartValues) 44 | 45 | # 2 helm install camunda from camunda.mk 46 | 47 | # 3 Show external URLs 48 | .PHONY: external-urls 49 | external-urls: external-urls-no-ingress 50 | 51 | .PHONY: clean 52 | clean: clean-camunda 53 | rm -f $(chartValues) 54 | 55 | .PHONY: clean-kube 56 | clean-kube: clean-kube-aws 57 | 58 | include $(root)/aws/include/kubernetes-aws.mk 59 | include $(root)/include/camunda.mk 60 | include $(root)/bpmn/deploy-models.mk 61 | include $(root)/connectors/connectors.mk 62 | -------------------------------------------------------------------------------- /aws/include/cluster.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | 4 | metadata: 5 | name: 6 | region: 7 | version: "" 8 | 9 | managedNodeGroups: 10 | - name: ng-1 11 | instanceType: 12 | desiredCapacity: 13 | minSize: 14 | maxSize: 15 | volumeSize: 16 | privateNetworking: true 17 | 18 | availabilityZones: 19 | -------------------------------------------------------------------------------- /aws/include/ebs-csi-driver-trust-policy-template.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Principal": { 7 | "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" 8 | }, 9 | "Action": "sts:AssumeRoleWithWebIdentity", 10 | "Condition": { 11 | "StringEquals": { 12 | "oidc.eks..amazonaws.com/id/:aud": "sts.amazonaws.com", 13 | "oidc.eks..amazonaws.com/id/:sub": "system:serviceaccount:kube-system:ebs-csi-controller-sa" 14 | } 15 | } 16 | } 17 | ] 18 | } -------------------------------------------------------------------------------- /aws/include/ssd-storageclass-aws.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: ssd 6 | #provisioner: kubernetes.io/aws-ebs 7 | provisioner: ebs.csi.aws.com 8 | # parameters: 9 | # type: gp3 10 | reclaimPolicy: Delete 11 | volumeBindingMode: WaitForFirstConsumer 12 | -------------------------------------------------------------------------------- /aws/ingress/istio/README.md: -------------------------------------------------------------------------------- 1 | # Helm Profile for configuring Istio in AWS 2 | 3 | TODO: provide Documentation -------------------------------------------------------------------------------- /aws/ingress/istio/camunda-values-aws.yaml: -------------------------------------------------------------------------------- 1 | # Chart values for the Camunda Platform 8 Helm chart. 2 | # This file deliberately contains only the values that differ from the defaults. 3 | # For changes and documentation, use your favorite diff tool to compare it with: 4 | # https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml 5 | 6 | global: 7 | ingress: 8 | enabled: false 9 | image: 10 | tag: 8.2.11 11 | # pullPolicy: Always 12 | identity: 13 | auth: 14 | enabled: true 15 | publicIssuerUrl: "https://keycloak.aws.c8sm.com/auth/realms/camunda-platform" 16 | operate: 17 | redirectUrl: "https://operate.aws.c8sm.com" 18 | tasklist: 19 | redirectUrl: "https://tasklist.aws.c8sm.com" 20 | optimize: 21 | redirectUrl: "https://optimize.aws.c8sm.com" 22 | 23 | connectors: 24 | enabled: false 25 | 26 | webModeler: 27 | enabled: false 28 | 29 | optimize: 30 | enabled: true 31 | 32 | #prometheusServiceMonitor: 33 | # enabled: true 34 | 35 | identity: 36 | enabled: true 37 | fullURL: "https://identity.aws.c8sm.com" 38 | 39 | keycloak: 40 | extraEnvVars: 41 | - name: KEYCLOAK_PROXY_ADDRESS_FORWARDING 42 | value: "true" 43 | - name: KEYCLOAK_FRONTEND_URL 44 | value: "https://keycloak.aws.c8sm.com/auth" 45 | 46 | zeebe-gateway: 47 | ingress: 48 | enabled: false 49 | replicas: 1 50 | resources: 51 | requests: 52 | memory: "512Mi" 53 | cpu: "250m" 54 | limits: 55 | memory: "2048Mi" 56 | cpu: "1000m" 57 | 58 | zeebe: 59 | clusterSize: 1 60 | partitionCount: 1 61 | replicationFactor: 1 62 | pvcSize: 1Gi 63 | 64 | resources: 65 | requests: 66 | cpu: "100m" 67 | memory: "512M" 68 | limits: 69 | cpu: "512m" 70 | memory: "2Gi" 71 | 72 | elasticsearch: 73 | enabled: true 74 | imageTag: 7.17.3 75 | replicas: 1 76 | minimumMasterNodes: 1 77 | # Allow no backup for single node setups 78 | clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" 79 | resources: 80 | requests: 81 | cpu: "100m" 82 | memory: "512M" 83 | limits: 84 | cpu: "1000m" 85 | memory: "2Gi" 86 | 87 | 88 | -------------------------------------------------------------------------------- /aws/ingress/nginx/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | # Already have a Cluster? Set these values to point to your existing environment 4 | # Otherwise, these values will be used to create a new Cluster 5 | 6 | #region ?= us-east-1 7 | region ?= YOUR_AWS_REGION 8 | zones ?= ['us-east-1a', 'us-east-1b'] 9 | clusterName ?= CLUSTER_NAME 10 | clusterVersion ?= 1.25 11 | 12 | machineType ?= c6i.4xlarge 13 | # TODO: Currently, auto scaling configuration using these scripts for AWS is not working 14 | # desiredSize is used as the starting size of the cluster 15 | desiredSize ?= 3 16 | minSize ?= 1 17 | maxSize ?= 6 18 | 19 | # Configure the fully qualified domain name 20 | # The dnsLabel is the first part of the domain address. It will be used no matter what baseDomain you configure below 21 | dnsLabel ?= YOUR_DNS_LABEL 22 | 23 | # By default, we'll use nip.io (See more at [https://nip.io](http://nip.io) ) 24 | # The fully qualified domain name will look something like ..nip.io 25 | baseDomainName ?= nip.io 26 | 27 | # Another option is to replace baseDomainName with your own domain name 28 | # In this case, the fully qualified domain name will look like . 29 | # baseDomainName ?= YOUR_CUSTOM_DOMAIN_NAME 30 | 31 | # ------------------------------------ 32 | # The following variables should not be changed except for advanced use cases 33 | ifeq ($(OS),Windows_NT) 34 | root ?= $(CURDIR)/../../.. 35 | else 36 | root ?= $(shell pwd)/../../.. 37 | endif 38 | 39 | # Camunda components will be installed into the following Kubernetes namespace 40 | namespace ?= camunda 41 | # Helm release name 42 | release ?= camunda 43 | # Helm chart coordinates for Camunda 44 | chart ?= camunda/camunda-platform 45 | 46 | # This file will be generated by make targets 47 | chartValues ?= camunda-values-ingress-aws.yaml 48 | 49 | .PHONY: all 50 | all: ingress-nginx camunda-values-ingress-aws.yaml camunda external-urls 51 | 52 | # 0 kube from kubernetes-aws.mk: Create Kubernetes cluster 53 | 54 | # 1 install nginx ingress controller 55 | 56 | # 2 create camunda-values-ingress.yaml 57 | 58 | # 3 helm install camunda from camunda.mk 59 | 60 | # 4 Show external URLs 61 | .PHONY: external-urls 62 | external-urls: external-urls-with-fqdn 63 | 64 | .PHONY: clean 65 | clean: clean-camunda clean-ingress 66 | rm -f $(chartValues) 67 | 68 | .PHONY: clean-kube 69 | clean-kube: clean-kube-aws 70 | 71 | include $(root)/aws/include/kubernetes-aws.mk 72 | include $(root)/include/camunda.mk 73 | include $(root)/bpmn/deploy-models.mk 74 | include $(root)/connectors/connectors.mk 75 | include $(root)/include/ingress-nginx.mk -------------------------------------------------------------------------------- /aws/ingress/nginx/tls/aws-ingress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "Attempt to find ip address ..."; 4 | 5 | getIp() 6 | { 7 | ELB_ID=$(kubectl get service -w ingress-nginx-controller -o 'go-template={{with .status.loadBalancer.ingress}}{{range .}}{{.hostname}}{{"\n"}}{{end}}{{.err}}{{end}}' -n ingress-nginx 2>/dev/null | head -n1 | cut -d'.' -f 1 | cut -d'-' -f 1) 8 | #echo "Attempt to find ip address for elb: ${ELB_ID}"; 9 | IP_ADDRESS=$(aws ec2 describe-network-interfaces --filters Name=description,Values="ELB ${ELB_ID}" --query 'NetworkInterfaces[0].PrivateIpAddresses[*].Association.PublicIp' --output text) 10 | #echo "IP Address: $IP_ADDRESS"; 11 | } 12 | 13 | getIp; 14 | 15 | while [ "$IP_ADDRESS" = "None" ] || [ -z "$IP_ADDRESS" ]; do getIp; done; 16 | echo "Found ip address: $IP_ADDRESS"; -------------------------------------------------------------------------------- /aws/ingress/nginx/tls/secure-camunda-setup/component-ssl-narration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/aws/ingress/nginx/tls/secure-camunda-setup/component-ssl-narration.png -------------------------------------------------------------------------------- /aws/ingress/nginx/tls/secure-camunda-setup/self-signed-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Issuer 3 | metadata: 4 | name: poc-issuer 5 | spec: 6 | selfSigned: {} 7 | --- 8 | apiVersion: cert-manager.io/v1 9 | kind: Certificate 10 | metadata: 11 | name: poc-cert 12 | spec: 13 | commonName: camunda-poc 14 | isCA: true 15 | issuerRef: 16 | kind: Issuer 17 | name: poc-issuer 18 | secretName: poc-cert 19 | dnsNames: 20 | - camunda-zeebe-gateway.camunda.svc.cluster.local #this is required if the zeebeclients - tasklist, operate, java clients etc are connecting to zeebe form the same k8scluster 21 | - localhost #this is needed to test in case of portforwarding 22 | -------------------------------------------------------------------------------- /azure/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | # Already have a Cluster? Set these values to point to your existing environment 4 | # Otherwise, these values will be used to create a new Cluster 5 | 6 | region ?= eastus 7 | region ?= YOUR_REGION 8 | clusterName ?= YOUR_CLUSTER_NAME 9 | resourceGroup ?= YOUR_CLUSTER_NAME-rg 10 | 11 | machineType ?= Standard_A8_v2 12 | minSize ?= 1 13 | maxSize ?= 6 14 | 15 | # ------------------------------------ 16 | # The following variables should not be changed except for advanced use cases 17 | ifeq ($(OS),Windows_NT) 18 | root ?= $(CURDIR)/.. 19 | else 20 | root ?= $(shell pwd)/.. 21 | endif 22 | 23 | # Camunda components will be installed into the following Kubernetes namespace 24 | namespace ?= camunda 25 | # Helm release name 26 | release ?= camunda 27 | # Helm chart coordinates for Camunda 28 | chart ?= camunda/camunda-platform 29 | 30 | # This file will be generated by make targets 31 | chartValues ?= camunda-values-azure.yaml 32 | 33 | .PHONY: all 34 | all: camunda-values-azure.yaml camunda external-urls 35 | 36 | # 0 kube from aks.mk: Create Kubernetes cluster. (No aplication gateway required) 37 | .PHONY: kube 38 | kube: kube-aks 39 | 40 | # 1 create camunda-values-azure 41 | camunda-values-azure.yaml: 42 | cp $(root)/development/camunda-values-2.yaml $(chartValues) 43 | 44 | # 2 helm install camunda from camunda.mk 45 | 46 | # 3 Show external URLs 47 | .PHONY: external-urls 48 | external-urls: external-urls-no-ingress 49 | 50 | # Remove nginx ingress and Camunda from cluster 51 | .PHONY: clean 52 | clean: clean-camunda 53 | rm -f $(chartValues) 54 | 55 | include $(root)/azure/include/aks.mk 56 | include $(root)/include/camunda.mk 57 | include $(root)/bpmn/deploy-models.mk 58 | include $(root)/connectors/connectors.mk 59 | 60 | 61 | -------------------------------------------------------------------------------- /azure/README.md: -------------------------------------------------------------------------------- 1 | # Helm Profiles for Camunda 8 on Microsoft Azure 2 | 3 | Create a Camunda 8 self-managed Kubernetes Cluster in 3 Steps: 4 | 5 | Step 1: Setup some [global prerequisites](../README.md#prerequisites) 6 | 7 | Step 2: Setup command line tools for Azure: 8 | 9 | 1. Verify that the `az` cli tool is installed (https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) 10 | 11 | $ az version 12 | { 13 | "azure-cli": "2.38.0", 14 | "azure-cli-core": "2.38.0", 15 | "azure-cli-telemetry": "1.0.6", 16 | "extensions": {} 17 | } 18 | 19 | 2. Make sure you are authenticated. If you don't already have one, you'll need to sign up for a new Azure Account. Then, run the following command and then follow the instructions to authenticate via your browser. 20 | 21 | $ az login 22 | 23 | > **Tip** If you or your company uses SSO to sign in to Microsoft, first, open a browser and sign in 24 | > to your Azure/Microsoft account. Then try doing the `az login` command again. 25 | 26 | 3. Go into one of the profiles inside this `azure` folder and use the `Makefile` to create a AKS cluster. 27 | 28 | e.g. `cd` into the `ingress/nginx/tls` directory and see the [README.md](ingress/nginx/tls/README.md) for more. 29 | 30 | 31 | -------------------------------------------------------------------------------- /azure/include/fileshare.mk: -------------------------------------------------------------------------------- 1 | # https://learn.microsoft.com/en-us/azure/aks/azure-files-volume 2 | # https://9to5tutorial.com/mount-an-azure-file-share-from-a-pod-on-an-aks-virtual-node-aci 3 | # https://zimmergren.net/mount-an-azure-storage-file-share-to-deployments-in-azure-kubernetes-services-aks/ 4 | fsAccount ?= mystorage 5 | fsRg ?= fileshare-rg 6 | fsShare ?= aksshare 7 | haha ?= "" 8 | 9 | .PHONY: fileshare 10 | fileshare: 11 | $(eval fsAccount := camStorage$(shell cat /dev/urandom | tr -dc 'a-z0-9' | fold -w $${1:-12} | head -n 1)) 12 | @echo $(fsAccount) 13 | az group create -n $(fsRg) -l $(region) 14 | az storage account create -n $(fsAccount) -g $(fsRg) -l $(region) --sku Standard_LRS 15 | $(eval fsConStr := $(shell az storage account show-connection-string -n $(fsAccount) -g $(fsRg) -o tsv)) 16 | az storage share create -n $(fsShare) --connection-string $(fsConStr) 17 | $(eval fsKey := $(shell az storage account keys list --resource-group $(fsRg) --account-name $(fsAccount) --query "[0].value" -o tsv)) 18 | @echo Creating secerte for storage account name: $(fsAccount) and key: $(fsKey) 19 | kubectl create secret generic fs-secret --from-literal=azurestorageaccountname=$$(fsAccount) --from-literal=azurestorageaccountkey=$(fsKey) 20 | 21 | .PHONY: clean-fileshare 22 | clean-fileshare: 23 | az storage share delete -n $(fsAccount) 24 | az group delete -n $(fsRg) -l $(region) 25 | 26 | -------------------------------------------------------------------------------- /azure/include/public-zeebe-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | namespace: camunda 5 | name: zeebe-ingress 6 | annotations: 7 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 8 | nginx.ingress.kubernetes.io/backend-protocol: "GRPC" 9 | cert-manager.io/cluster-issuer: letsencrypt 10 | spec: 11 | ingressClassName: nginx 12 | rules: 13 | - host: MY_HOSTNAME 14 | http: 15 | paths: 16 | - path: / 17 | pathType: Prefix 18 | backend: 19 | service: 20 | name: camunda-zeebe-gateway 21 | port: 22 | number: 26500 23 | tls: 24 | - secretName: tls-secret 25 | hosts: 26 | - MY_HOSTNAME -------------------------------------------------------------------------------- /azure/include/ssd-storageclass.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: ssd 6 | provisioner: disk.csi.azure.com 7 | parameters: 8 | skuName: StandardSSD_LRS 9 | reclaimPolicy: Delete 10 | allowVolumeExpansion: true -------------------------------------------------------------------------------- /azure/ingress/agic/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Azure App Gateway Ingress with TLS 2 | 3 | > **Note** this profile uses an Azure App Gateway which [doesn't support GRPC traffic](https://azure.microsoft.com/en-gb/blog/application-gateway-ingress-controller-for-azure-kubernetes-service/). You probably want to use [the nginx ingress profile](../nginx/tls) instead. 4 | 5 | > **Note** This profile is still a work in progress. For latest progress, please see this [Github Issue](https://github.com/camunda-community-hub/camunda-8-helm-profiles/issues/37) 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /azure/ingress/agic/agic.mk: -------------------------------------------------------------------------------- 1 | .PHONY: azure-gateway-ip-address 2 | azure-gateway-ip-address: 3 | $(eval IP := $(shell az network public-ip show -g $(nodeResourceGroup) -n $(gatewayName)-appgwpip -o json --query ipAddress | xargs)) 4 | @echo Application Gateway IP: $(IP) 5 | 6 | camunda-values-azure.yaml: azure-gateway-ip-address 7 | sed "s/127.0.0.1/$(IP)/g;" ../ingress-nginx/camunda-values.yaml > ./camunda-values-azure.yaml 8 | 9 | .PHONY: ingress-azure 10 | ingress-azure: namespace azure-gateway-ip-address 11 | echo "Creating ingress controller at: http://$(IP).nip.io" ; 12 | cat ingress-azure.yaml | sed -E "s/([0-9]{1,3}\.){3}[0-9]{1,3}/$(IP)/g" | kubectl apply -f - 13 | 14 | .PHONY: clean-ingress-azure 15 | clean-ingress-azure: 16 | kubectl delete ingress ingress-azure -n camunda 17 | -------------------------------------------------------------------------------- /azure/ingress/agic/ingress-azure.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: ingress-azure 5 | namespace: camunda 6 | annotations: 7 | kubernetes.io/ingress.class: azure/application-gateway 8 | spec: 9 | rules: 10 | - host: identity.127.0.0.1.nip.io 11 | http: 12 | paths: 13 | - path: / 14 | pathType: Prefix 15 | backend: 16 | service: 17 | name: camunda-identity 18 | port: 19 | number: 80 20 | - host: keycloak.127.0.0.1.nip.io 21 | http: 22 | paths: 23 | - path: / 24 | pathType: Prefix 25 | backend: 26 | service: 27 | name: camunda-keycloak 28 | port: 29 | number: 80 30 | - host: operate.127.0.0.1.nip.io 31 | http: 32 | paths: 33 | - path: / 34 | pathType: Prefix 35 | backend: 36 | service: 37 | name: camunda-operate 38 | port: 39 | number: 80 40 | - host: tasklist.127.0.0.1.nip.io 41 | http: 42 | paths: 43 | - path: / 44 | pathType: Prefix 45 | backend: 46 | service: 47 | name: camunda-tasklist 48 | port: 49 | number: 80 50 | - host: optimize.127.0.0.1.nip.io 51 | http: 52 | paths: 53 | - path: / 54 | pathType: Prefix 55 | backend: 56 | service: 57 | name: camunda-optimize 58 | port: 59 | number: 80 60 | -------------------------------------------------------------------------------- /azure/ingress/nginx/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | # Already have a Cluster? Set these values to point to your existing environment 4 | # Otherwise, these values will be used to create a new Cluster 5 | 6 | #region ?= eastus 7 | region ?= YOUR_REGION 8 | clusterName ?= YOUR_CLUSTER_NAME 9 | resourceGroup ?= YOUR_CLUSTER_NAME-rg 10 | 11 | machineType ?= Standard_A8_v2 12 | minSize ?= 1 13 | maxSize ?= 6 14 | 15 | # Configure the fully qualified domain name 16 | # The dnsLabel is the first part of the domain address. It will be used no matter what baseDomain you configure below 17 | dnsLabel ?= YOUR_DNS_LABEL 18 | 19 | # In Azure, we can use the `cloudapp.azure.com` base domain. 20 | # In this case, the fully qualified domain name will look like ..cloudapp.azure.com 21 | baseDomainName ?= $(region).cloudapp.azure.com 22 | 23 | # Another option is to use nip.io (See more at [https://nip.io](http://nip.io) ) 24 | # The fully qualified domain name will look something like ..nip.io 25 | # baseDomainName ?= nip.io 26 | 27 | # Another option is to replace baseDomainName with your own domain name 28 | # In this case, the fully qualified domain name will look like . 29 | # baseDomainName ?= YOUR_CUSTOM_DOMAIN_NAME 30 | 31 | # ------------------------------------ 32 | # The following variables should not be changed except for advanced use cases 33 | ifeq ($(OS),Windows_NT) 34 | root ?= $(CURDIR)/../../.. 35 | else 36 | root ?= $(shell pwd)/../../.. 37 | endif 38 | 39 | # Camunda components will be installed into the following Kubernetes namespace 40 | namespace ?= camunda 41 | # Helm release name 42 | release ?= camunda 43 | # Helm chart coordinates for Camunda 44 | chart ?= camunda/camunda-platform 45 | 46 | # This file will be generated by make targets 47 | chartValues ?= camunda-values-ingress.yaml 48 | 49 | .PHONY: all 50 | all: azure-ingress-nginx camunda-values-ingress.yaml camunda external-urls 51 | 52 | # 0 kube from aks.mk: Create Kubernetes cluster. (No aplication gateway required) 53 | .PHONY: kube 54 | kube: kube-aks 55 | 56 | # 1 install nginx ingress controller 57 | 58 | # 2 create camunda-values-ingress.yaml 59 | 60 | # 3 helm install camunda from camunda.mk 61 | 62 | # 4 Show external URLs 63 | .PHONY: external-urls 64 | external-urls: external-urls-with-fqdn 65 | 66 | # Remove Camunda from cluster 67 | .PHONY: clean 68 | clean: clean-camunda 69 | rm -f $(chartValues) 70 | 71 | .PHONY: clean-kube 72 | clean-kube: clean-kube-aks 73 | 74 | include $(root)/azure/include/aks.mk 75 | include $(root)/include/camunda.mk 76 | include $(root)/bpmn/deploy-models.mk 77 | include $(root)/connectors/connectors.mk 78 | include $(root)/include/ingress-nginx.mk 79 | 80 | 81 | -------------------------------------------------------------------------------- /azure/ingress/nginx/tls/entra/set-env-aks.sh: -------------------------------------------------------------------------------- 1 | export certEmail="xxx" 2 | export clusterName="xxx" 3 | export resourceGroup="xxx-rg" 4 | 5 | export camundaVersion=8.6.7 6 | export camundaHelmVersion=11.1.1 7 | 8 | # Use this to configure Google DNS 9 | export dnsLabel="your-dns-label" 10 | export baseDomainName="your.domain.name" 11 | export dnsManagedZone="your-managed-zone-name" 12 | 13 | export region="eastus" 14 | export namespace="camunda" 15 | 16 | export machineType=Standard_A8_v2 17 | export minSize=1 18 | export maxSize=6 19 | 20 | # Prior to 8.6.6, Web Modeler docker images required credentials. However, the image is now freely available 21 | #export camundaDockerRegistrySecretName="camunda-docker-registry" 22 | #export camundaDockerRegistryUrl="https://registry.camunda.cloud/" 23 | #export camundaDockerRegistryUsername="xxx" 24 | #export camundaDockerRegistryPassword="xxx" 25 | #export camundaDockerRegistryEmail="xxx" 26 | 27 | # This is only relevant for performing backups 28 | export awsKey="xxx" 29 | export awsSecret="xxx" 30 | export backupId="backup01" 31 | export elasticsearchUrl="http://elasticsearch-master:9200" 32 | export s3BucketName="my-bucket" 33 | export s3BucketRegion="us-east-1" 34 | 35 | export addressPrefix="10.1.0.0/16" 36 | export nodeSubnetPrefix="10.1.240.0/24" 37 | export podSubnetPrefix="10.1.241.0/24" 38 | export serviceCidr="10.0.1.0/24" 39 | export dnsServiceIp="10.0.1.10" 40 | export dnsLBIp="20.104.45.90" 41 | 42 | export regions=1 43 | export regionId=0 44 | 45 | export entraTentantId="entra-tenant-id" 46 | export entraAppId="entra-app-id" 47 | export entraClientSecret="entra-client-secret" 48 | export entraAdminUserOid="entra-admin-user-id" 49 | 50 | # echo demo | base64 51 | # export base64Secret="ZGVtbwo=" -------------------------------------------------------------------------------- /azure/ingress/nginx/tls/read-only-file-system/README.md: -------------------------------------------------------------------------------- 1 | # Restricted Security Context 2 | 3 | This profile is created to set up a Camunda 8 cluster with `automountServiceAccountToken: false` & the below security context applied to all pods. 4 | This is required in some environments as a security best practise 5 | 6 | ```allowPrivilegeEscalation: false 7 | privileged: false 8 | readOnlyRootFilesystem: true 9 | runAsNonRoot: true 10 | runAsUser: 1000 11 | ``` 12 | > **NOTE** : In case of applying `runAsNonRoot: true` for Elastic Search, this would not work, unless the node on which ES is setup already has the `vm.max_map_count` set to 262144 13 | 14 | This particular kernel parameter `vm.max_map_count` is forbidden from being edited via the security context. Furthermore, Elasticsearch provides an initContainer, which configures this kernel parameter on the host prior to execution but this requires to run as root. 15 | 16 | More details in the ES DOCS - https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html 17 | -------------------------------------------------------------------------------- /azure/ingress/nginx/tls/secure-zeebe-ingress/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Ingress NGINX for Azure with TLS Certificates additonal secued ingress 2 | 3 | This profiles adds an additional secured ingress to the steup describe [here](../README.md) 4 | 5 | 6 | -------------------------------------------------------------------------------- /azure/ingress/nginx/tls/secure-zeebe-ingress/oauth2-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: oauth2-proxy 5 | namespace: ingress-nginx 6 | annotations: 7 | cert-manager.io/cluster-issuer: letsencrypt 8 | spec: 9 | ingressClassName: nginx 10 | rules: 11 | - host: dnslabel.location.cloudapp.azure.com 12 | http: 13 | paths: 14 | - backend: 15 | service: 16 | name: oauth2-proxy 17 | port: 18 | number: 80 19 | path: /oauth2 20 | pathType: Prefix 21 | tls: 22 | - hosts: 23 | - dnslabel.location.cloudapp.azure.com 24 | secretName: tls-secret -------------------------------------------------------------------------------- /azure/ingress/nginx/tls/secure-zeebe-ingress/oauth2-values.yaml: -------------------------------------------------------------------------------- 1 | # https://github.com/oauth2-proxy/oauth2-proxy/blob/7c3efe4f42bc37ccab613fe5002c172e147e3195/docs/2_auth.md#keycloak-auth-provider 2 | extraArgs: 3 | - --skip-jwt-bearer-tokens 4 | - --provider=keycloak-oidc 5 | - --client-id=oauth2 6 | - --client-secret=28WW4NRZ0c7ZjF4dg2XXQOeXc8NdVma3 7 | - --redirect-url=https://dnslabel.location.cloudapp.azure.com/oauth2/callback 8 | - --oidc-issuer-url=https://dnslabel.location.cloudapp.azure.com/auth/realms/camunda-platform 9 | - --silence-ping-logging 10 | - --auth-logging=true 11 | - --insecure-oidc-allow-unverified-email 12 | - --request-logging=true 13 | - --standard-logging=true 14 | # - --session-store-type=cookie 15 | # - --cookie-secret=gfVLP_MPTXFi4JAXFUOOiikk5EXXgCOQsBUy3wCeNG4= 16 | # - --login-url=https://tuesday.southeastasia.cloudapp.azure.com/auth/realms/camunda-platform/protocol/openid-connect/auth 17 | # - --redeem-url=https://tuesday.southeastasia.cloudapp.azure.com/auth/realms/camunda-platform/protocol/openid-connect/token 18 | # - --profile-url=https://tuesday.southeastasia.cloudapp.azure.com/auth/realms/camunda-platform/protocol/openid-connect/userinfo 19 | # - --validate-url=https://tuesday.southeastasia.cloudapp.azure.com/auth/realms/camunda-platform/protocol/openid-connect/userinfo 20 | # - --redirect-url=https://tuesday.southeastasia.cloudapp.azure.com/oauth2/callback 21 | # - --keycloak-group=/users 22 | # - --provider-display-name=LoginBlah 23 | # - --set-xauthrequest=true 24 | # - --pass-user-headers=true 25 | # - --pass-access-token=true 26 | # - --pass-authorization-header=true 27 | # # - --scope=openid 28 | # - --cookie-httponly=false 29 | 30 | 31 | replicaCount: 1 32 | affinity: 33 | podAntiAffinity: 34 | requiredDuringSchedulingIgnoredDuringExecution: 35 | - labelSelector: 36 | matchLabels: 37 | app: oauth2-proxy 38 | topologyKey: "kubernetes.io/hostname" 39 | resources: 40 | limits: 41 | cpu: 200m 42 | memory: 100Mi 43 | requests: 44 | cpu: 100m 45 | memory: 25Mi -------------------------------------------------------------------------------- /azure/ingress/nginx/tls/secure-zeebe-ingress/public-zeebe-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | namespace: camunda 5 | name: zeebe-ingress 6 | annotations: 7 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 8 | nginx.ingress.kubernetes.io/backend-protocol: "GRPC" 9 | cert-manager.io/cluster-issuer: letsencrypt 10 | nginx.ingress.kubernetes.io/auth-signin: https://$host/oauth2/start?rd=$escaped_request_uri 11 | # nginx.ingress.kubernetes.io/auth-response-headers: Authorization 12 | nginx.ingress.kubernetes.io/auth-url: https://$host/oauth2/auth 13 | # nginx.ingress.kubernetes.io/configuration-snippet: | 14 | # auth_request_set $name_upstream_1 $upstream_cookie__oauth2_proxy_1; 15 | 16 | # access_by_lua_block { 17 | # if ngx.var.name_upstream_1 ~= "" then 18 | # ngx.header["Set-Cookie"] = "_oauth2_proxy_1=" .. ngx.var.name_upstream_1 .. ngx.var.auth_cookie:match("(; .*)") 19 | # end 20 | # } 21 | spec: 22 | ingressClassName: nginx 23 | rules: 24 | - host: azure02.upgradingdave.com 25 | http: 26 | paths: 27 | - path: / 28 | pathType: Prefix 29 | backend: 30 | service: 31 | name: camunda-zeebe-gateway 32 | port: 33 | number: 26500 34 | tls: 35 | - secretName: tls-secret 36 | hosts: 37 | - azure02.upgradingdave.com -------------------------------------------------------------------------------- /azure/keycloak/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | # Already have a Cluster? Set these values to point to your existing environment 4 | # Otherwise, these values will be used to create a new Cluster 5 | 6 | region ?= eastus 7 | clusterName ?= dave-keycloak-01 8 | resourceGroup ?= dave-keycloak-01-rg 9 | 10 | # Configure the fully qualified domain name 11 | # A fully qualified domain name is built by appending `dnsLabel` together with `baseDomainName`. 12 | # By default, in Azure, we can use the `cloudapp.azure.com` base domain. 13 | #baseDomainName ?= $(region).cloudapp.azure.com 14 | # Another option is to replace baseDomainName with your own domain name 15 | baseDomainName ?= upgradingdave.com 16 | # Yet another option option is to use `nip.io` 17 | #baseDomainName ?= nip.io 18 | 19 | # Replace `MY_DNS_LABEL`. For example, if you set `dnsLabel` to `camunda`, then the fqdn will be `camunda.mydomain.com` 20 | dnsLabel ?= kc 21 | 22 | machineType ?= Standard_A8_v2 23 | minSize ?= 1 24 | maxSize ?= 6 25 | certEmail ?= david.paroulek@camunda.com 26 | 27 | keycloakChartVersion ?= 13.0.2 28 | keycloakVersion ?= 19.0.3 29 | 30 | keycloakAdminUser ?= admin 31 | keycloakBase64EncodedAdminPassword ?= WTg1a2hvYklyYg== 32 | keycloakBase64EncodedManagementPassword ?= NEtHclRnbTJjWg== 33 | keycloakContextPath ?= / 34 | 35 | # ------------------------------------ 36 | # The following variables should not be changed except for advanced use cases 37 | ifeq ($(OS),Windows_NT) 38 | root ?= $(CURDIR)/../.. 39 | else 40 | root ?= $(shell pwd)/../.. 41 | endif 42 | 43 | # Camunda components will be installed into the following Kubernetes namespace 44 | namespace ?= keycloak 45 | 46 | 47 | .PHONY: all 48 | all: cert-manager letsencrypt-staging ingress-nginx-tls create-namespace-keycloak create-secret-keycloak keycloak-values-ip install-keycloak 49 | 50 | # 0 kube from cluster.mk: Create Kubernetes cluster. 51 | .PHONY: kube 52 | kube: kube-aks 53 | 54 | # 1 cert-manager from cert-manager.mk: create certificate manager for tls 55 | 56 | # 2 letsencrypt-staging/-prod from cert-manager.mk: create letsencrypt cert issuer 57 | 58 | # 3 create nginx ingress controller with dns and tls 59 | 60 | # 4 create keycloak namespace 61 | 62 | # 5 create keycloak secret (this will be reused when installing camunda) 63 | 64 | # 6 create keycloak values (by default uses `keycloak-values-ip`. `keycloak-values-hostname` is also available) 65 | 66 | # 7 helm install keycloak using `keycloak-values.yaml` 67 | 68 | .PHONY: clean 69 | clean: clean-keycloak 70 | 71 | .PHONY: clean-kube 72 | clean-kube: clean-keycloak clean-kube-aks 73 | 74 | include $(root)/keycloak/keycloak.mk 75 | include $(root)/include/cert-manager.mk 76 | include $(root)/include/ingress-nginx.mk 77 | include $(root)/azure/include/aks.mk 78 | -------------------------------------------------------------------------------- /azure/keycloak/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Keycloak 2 | 3 | 1. This [keycloak](./) directory contains scripts to create a kubernetes cluster with a standalone keycloak accessible over tls/https. 4 | 5 | ## Installation 6 | 7 | If this is your first time here, make sure you have [installed the prerequisites](../../../README.md). 8 | 9 | After you've installed the prerequisites, follow these steps: 10 | 11 | Open a terminal, cd to this directory, and edit the [Makefile](./Makefile) and change the parameters as needed. At the very least, replace the following: `CLUSTER_NAME` and `YOUR_EMAIL@yourdomain.com`. 12 | 13 | If you don't have a Kubernetes cluster, the values provided will be used to create a new cluster. Otherwise, the values are used to connect and manage an existing cluster. 14 | 15 | If you need to create a new Cluster, run `make kube`. 16 | 17 | Once you have a Cluster, run `make` to do the following: 18 | 19 | 1. Install a Kubernetes [cert-manager](https://cert-manager.io/) 20 | 2. Setup a [Lets Encrypt](https://letsencrypt.org/) [staging](https://letsencrypt.org/docs/staging-environment/) [ClusterIssuer](https://cert-manager.io/docs/concepts/issuer/). 21 | 3. Install a nginx [Kubernetes Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) 22 | 4. Install a Keycloak instance configured with ingress rules and valid tls certificate from lets encrypt staging environment. 23 | 24 | If needed, run the following to port forward to the keycloak service: 25 | 26 | ```shell 27 | make port-keycloak 28 | ``` 29 | 30 | If needed, run the following to see the admin password: 31 | 32 | ```shell 33 | make keycloak-password 34 | ``` 35 | 36 | Then, access keycloak over https://YOUR_KEYCLOAK_HOSTNAME 37 | 38 | ## Uninstall 39 | ```sh 40 | make clean 41 | ```` 42 | 43 | WARNING!!! This will completely destroy your cluster and everything inside of it!!! To completely delete your cluster, run `make clean-kube`. 44 | 45 | See the main README for [Troubleshooting, Tips, and Tricks](../../README.md#troubleshooting-tips-and-tricks) -------------------------------------------------------------------------------- /backup/backup.mk: -------------------------------------------------------------------------------- 1 | .PHONY: create-aws-credentials-secret 2 | create-aws-credentials-secret: 3 | -kubectl create secret generic "aws-credentials" \ 4 | --from-literal=key=$(awsKey) \ 5 | --from-literal=secret=$(awsSecret) 6 | 7 | 8 | -------------------------------------------------------------------------------- /benchmark/Makefile: -------------------------------------------------------------------------------- 1 | # Camunda components will be installed into the following Kubernetes namespace 2 | namespace ?= camunda 3 | # Helm release name 4 | release ?= camunda 5 | # Helm chart coordinates for Camunda 6 | chart ?= camunda/camunda-platform 7 | 8 | # ------------------------------------ 9 | # The following variables should not be changed except for advanced use cases 10 | 11 | ifeq ($(OS),Windows_NT) 12 | root ?= $(CURDIR)/.. 13 | else 14 | root ?= $(shell pwd)/.. 15 | endif 16 | 17 | # This file is created during the make call which runs inside `$(helmProfilesDir)/ingress-aws` 18 | chartValues ?= "$(root)/benchmark/camunda-values.yaml" 19 | 20 | .PHONY: all 21 | all: camunda await-zeebe deploy-benchmark-process rebalance-leaders benchmark 22 | 23 | .PHONY: chaos-all 24 | chaos-all: all chaosmesh 25 | 26 | .PHONY: clean 27 | clean: clean-benchmark clean-camunda 28 | 29 | include $(root)/include/camunda.mk 30 | include $(root)/metrics/metrics.mk 31 | include $(root)/benchmark/benchmark.mk 32 | include $(root)/bpmn/deploy-models.mk 33 | -------------------------------------------------------------------------------- /benchmark/benchmark.mk: -------------------------------------------------------------------------------- 1 | 2 | .PHONY: benchmark 3 | benchmark: namespace 4 | kubectl create configmap payload --from-file=$(root)/benchmark/payload.json -n $(namespace) 5 | kubectl apply -f $(root)/benchmark/benchmark.yaml -n $(namespace) 6 | 7 | .PHONY: clean-benchmark 8 | clean-benchmark: 9 | -kubectl delete -f $(root)/benchmark/benchmark.yaml -n $(namespace) 10 | -kubectl delete configmap payload -n $(namespace) 11 | 12 | .PHONY: logs-benchmark 13 | logs-benchmark: 14 | kubectl logs -f -l app=benchmark -n $(namespace) 15 | -------------------------------------------------------------------------------- /benchmark/benchmark.yaml: -------------------------------------------------------------------------------- 1 | # config for https://github.com/camunda-community-hub/camunda-8-benchmark 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: benchmark 6 | labels: 7 | app: benchmark 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: benchmark 12 | replicas: 1 13 | template: 14 | metadata: 15 | labels: 16 | app: benchmark 17 | spec: 18 | containers: 19 | - name: benchmark 20 | image: camundacommunityhub/camunda-8-benchmark:main 21 | imagePullPolicy: Always 22 | env: 23 | - name: JAVA_OPTIONS 24 | value: >- 25 | -Dcamunda.client.mode=selfmanaged 26 | -Dcamunda.client.zeebe.grpc-address=http://camunda-zeebe-gateway:26500 27 | -Dcamunda.client.zeebe.prefer-rest-over-grpc=false 28 | -Dzeebe.client.request-timeout=600s 29 | -Dzeebe.client.job.poll-interval=1ms 30 | -Dzeebe.client.default-job-worker-stream-enabled=true 31 | -Dzeebe.client.worker.max-jobs-active=2000 32 | -Dbenchmark.startPiPerSecond=5 33 | -Dbenchmark.maxBackpressurePercentage=1.0 34 | -Dbenchmark.taskCompletionDelay=10 35 | -Dbenchmark.bpmnProcessId=BenchmarkProcess 36 | -Dbenchmark.jobType=benchmark-task 37 | -Dbenchmark.multipleJobTypes=8 38 | -Dbenchmark.payloadPath=file:/payload.json 39 | -Dbenchmark.autoDeployProcess=false 40 | -Dbenchmark.warmupPhaseDurationMillis=3000 41 | -Dbenchmark.startRateAdjustmentStrategy=backpressure 42 | -Dbenchmark.startPiIncreaseFactor=0.1 43 | resources: 44 | limits: 45 | cpu: 1 46 | memory: 4Gi 47 | requests: 48 | cpu: 1 49 | memory: 1Gi 50 | volumeMounts: 51 | - name: payload 52 | mountPath: payload.json 53 | subPath: payload.json 54 | readOnly: true 55 | volumes: 56 | - name: payload 57 | configMap: 58 | name: payload 59 | -------------------------------------------------------------------------------- /benchmark/camunda-values.yaml: -------------------------------------------------------------------------------- 1 | # Chart values for the Camunda Platform 8 Helm chart. 2 | # This file deliberately contains only the values that differ from the defaults. 3 | # For changes and documentation, use your favorite diff tool to compare it with: 4 | # https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml 5 | 6 | global: 7 | image: 8 | tag: latest 9 | identity: 10 | auth: 11 | # Disable the Identity authentication 12 | # it will fall back to basic-auth: demo/demo as default user 13 | enabled: false 14 | elasticsearch: 15 | disableExporter: true 16 | 17 | identity: 18 | enabled: false 19 | keycloak: 20 | enabled: false 21 | 22 | operate: 23 | enabled: false 24 | 25 | tasklist: 26 | enabled: false 27 | 28 | optimize: 29 | enabled: false 30 | 31 | prometheusServiceMonitor: 32 | enabled: true 33 | 34 | zeebe: 35 | resources: 36 | limits: 37 | cpu: 8 38 | memory: 16Gi 39 | requests: 40 | cpu: 7 41 | memory: 16Gi 42 | clusterSize: "3" 43 | partitionCount: "9" 44 | replicationFactor: "3" 45 | cpuThreadCount: 5 46 | ioThreadCount: 2 47 | persistenceType: disk 48 | pvcSize: 128Gi 49 | pvcStorageClassName: ssd 50 | logLevel: ERROR 51 | env: 52 | - name: ZEEBE_BROKER_EXECUTION_METRICS_EXPORTER_ENABLED 53 | value: "true" 54 | - name: "ZEEBE_BROKER_CLUSTER_MESSAGECOMPRESSION" 55 | value: "SNAPPY" 56 | zeebe-gateway: 57 | replicas: 1 58 | resources: 59 | limits: 60 | cpu: 3 61 | memory: 16Gi 62 | requests: 63 | cpu: 3 64 | memory: 16Gi 65 | logLevel: ERROR 66 | env: 67 | - name: ZEEBE_GATEWAY_MONITORING_ENABLED 68 | value: "true" 69 | - name: ZEEBE_GATEWAY_THREADS_MANAGEMENTTHREADS 70 | value: "3" 71 | - name: ZEEBE_GATEWAY_CLUSTER_MESSAGECOMPRESSION 72 | value: "SNAPPY" 73 | # ELASTIC 74 | elasticsearch: 75 | enabled: false 76 | -------------------------------------------------------------------------------- /benchmark/chaosmesh.mk: -------------------------------------------------------------------------------- 1 | .PHONY: chaos-mesh 2 | chaos-mesh: 3 | helm repo add chaos-mesh https://charts.chaos-mesh.org 4 | -git clone https://github.com/chaos-mesh/chaos-mesh.git 5 | -cd chaos-mesh && kubectl apply -f manifests/ 6 | helm install chaos-mesh chaos-mesh/chaos-mesh -f chaos-mesh-values.yaml -n default --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock 7 | kubectl wait --for=condition=Ready pod -n default -l app.kubernetes.io/instance=chaos-mesh --timeout=900s 8 | kubectl delete validatingWebhookConfigurations.admissionregistration.k8s.io chaos-mesh-validation-auth 9 | 10 | 11 | .PHONY: clean-chaos-mesh 12 | clean-chaos: 13 | -rm -rf chaos-mesh 14 | -helm --namespace default uninstall chaos-mesh -------------------------------------------------------------------------------- /benchmark/grafana_zeebe_dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/benchmark/grafana_zeebe_dashboard.png -------------------------------------------------------------------------------- /benchmark/latency.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/benchmark/latency.png -------------------------------------------------------------------------------- /benchmark/payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "var1": "value1", 3 | "var2": true, 4 | "var3": 15, 5 | "var4": { 6 | "var4-1": "value4-1", 7 | "var4-2": false, 8 | "var4-3": 111 9 | }, 10 | "var5": "736d9100-0155-4af5-be14-b09c42de8417", 11 | "var6": "b2959d57-d091-42d4-b18c-9e2145b45074", 12 | "var7": "572c74fa-fb3d-4711-bb76-21d66b87fa86 ", 13 | "var8": "d091-42d4-b18c-9e2145b45074-b2959d57", 14 | "var9": "b18c-9e2145b45074-b2959d57-d091-42d4", 15 | "var10": "b2959d5742d4-b18c-d091-9e2145b45074", 16 | "var11": "b18c-9e2145b45074-b2959d57-d091-42d4", 17 | "var12": 7458, 18 | "var13": false, 19 | "list": ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"] 20 | } -------------------------------------------------------------------------------- /benchmark/throughput.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/benchmark/throughput.png -------------------------------------------------------------------------------- /bpmn/deploy-models.mk: -------------------------------------------------------------------------------- 1 | 2 | .PHONY: create-deploy-model 3 | create-deploy-model: 4 | kubectl create configmap models --from-file=CamundaProcess.bpmn=$(pathToCamundaProcessBpmnFile) -n $(namespace) 5 | kubectl apply -f $(root)/include/zbctl-deploy-job.yaml -n $(namespace) 6 | kubectl wait --for=condition=complete job/zbctl-deploy --timeout=10s -n $(namespace) 7 | 8 | .PHONY: create-deploy-model-with-auth 9 | create-deploy-model-with-auth: 10 | kubectl create configmap models --from-file=CamundaProcess.bpmn=$(pathToCamundaProcessBpmnFile) -n $(namespace) 11 | kubectl apply -f $(root)/include/zbctl-deploy-job-with-auth.yaml -n $(namespace) 12 | kubectl wait --for=condition=complete job/zbctl-deploy --timeout=10s -n $(namespace) 13 | 14 | .PHONY: clean-deploy-model 15 | clean-deploy-model: 16 | kubectl delete configmap models -n $(namespace) 17 | kubectl delete -f $(root)/include/zbctl-deploy-job.yaml -n $(namespace) 18 | 19 | .PHONY: deploy-model 20 | deploy-model: create-deploy-model-with-auth clean-deploy-model 21 | 22 | # Simple Inbound Connector Process 23 | 24 | .PHONY: set-simple-inbound 25 | set-simple-inbound: 26 | $(eval pathToCamundaProcessBpmnFile := $(root)/bpmn/simple_inbound_connector.bpmn) 27 | 28 | .PHONY: deploy-simple-inbound 29 | deploy-simple-inbound: set-simple-inbound deploy-model 30 | 31 | # Simple SendGrid Process 32 | 33 | .PHONY: set-simple-sendgrid 34 | set-simple-sendgrid: 35 | $(eval pathToCamundaProcessBpmnFile := $(root)/bpmn/simple_sendgrid.bpmn) 36 | 37 | .PHONY: deploy-simple-sendgrid 38 | deploy-simple-sendgrid: set-simple-sendgrid deploy-model 39 | 40 | # Benchmark Process 41 | 42 | .PHONY: set-benchmark-process 43 | set-benchmark-process: 44 | $(eval pathToCamundaProcessBpmnFile := $(root)/bpmn/BenchmarkProcess.bpmn) 45 | 46 | .PHONY: deploy-benchmark-process 47 | deploy-benchmark-process: set-benchmark-process deploy-model 48 | 49 | # OpenAI ChatGpt Process 50 | 51 | .PHONY: set-simple-openai 52 | set-simple-openai: 53 | $(eval pathToCamundaProcessBpmnFile := $(root)/bpmn/simple_openai.bpmn) 54 | 55 | .PHONY: deploy-simple-openai 56 | deploy-simple-openai: set-simple-openai deploy-model 57 | 58 | -------------------------------------------------------------------------------- /cfssl/Makefile: -------------------------------------------------------------------------------- 1 | # Create a cert to use with GRPC gateway 2 | # this script uses dependencies (cfssl, cfssljosn, jq) 3 | root ?= $(shell pwd)/.. 4 | 5 | namespace ?= camunda 6 | # Service to apply cert 7 | service ?= zeebe-gateway 8 | # TLS secret name 9 | secret_name ?= tls-secret 10 | # Cert Signing Reqest (CSR) signer name 11 | signerName ?= 127.0.0.1.nip.io\/pdiddy 12 | 13 | .PHONY: all 14 | all: cfssl-create-csr kube-upload-csr kube-approve-csr \ 15 | cfssl-create-cert-authority cfssl-sign-certificate \ 16 | kube-upload-cert kube-create-secret clean-certs 17 | 18 | include $(root)/cfssl/cfssl-certs.mk 19 | -------------------------------------------------------------------------------- /cfssl/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Cloud Flare PKI Toolkit 2 | 3 | > **Note** This profile is still a work in progress. For latest progress, please see this [Github Issue](https://github.com/camunda-community-hub/camunda-8-helm-profiles/issues/41) 4 | 5 | This folder contains a [Makefile](Makefile) that can be used to install and configure TLS Certificates using Cloud Flare's PKI Toolkit `cfssl`. 6 | 7 | If you don't have a Kubernetes Cluster yet, see the main [README](../README.md) for details on how to create a cluster on the popular cloud providers. 8 | 9 | ## Install 10 | 11 | Make sure you meet [these prerequisites](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/master/README.md#prerequisites). 12 | 13 | For this profile, you will also need the following: 14 | 15 | 1. `cfssl` command line tool 16 | 2. `jq` command line tool 17 | 18 | Open a terminal, and run: 19 | 20 | ```sh 21 | make 22 | ``` 23 | 24 | ## Uninstall 25 | ```sh 26 | make clean 27 | ``` -------------------------------------------------------------------------------- /cfssl/cfssl-certs.mk: -------------------------------------------------------------------------------- 1 | # Create a cert to use with GRPC gateway 2 | # this script uses cfssl, cfssljosn, jq 3 | 4 | .PHONY: cfssl-create-csr 5 | cfssl-create-csr: 6 | mkdir certs 7 | cat $(root)/cfssl/template/csr.json | cfssl genkey - | cfssljson -bare server -f 8 | mv server* ./certs/ 9 | 10 | .PHONY: kube-upload-csr 11 | kube-upload-csr: 12 | sed "s//$(shell cat certs/server.csr | base64 | tr -d '\n')/g; \ 13 | s//$(service).$(namespace)/g; \ 14 | s//$(signerName)/g;" \ 15 | $(root)/cfssl/template/csr.tpl.yaml > ./certs/csr.yaml 16 | kubectl apply -f ./certs/csr.yaml -n $(namespace) 17 | kubectl describe csr $(service).$(namespace) -n $(namespace) 18 | 19 | .PHONY: kube-approve-csr 20 | kube-approve-csr: 21 | kubectl certificate approve $(service).$(namespace) -n $(namespace) 22 | kubectl get csr -n $(namespace) 23 | 24 | .PHONY: cfssl-create-cert-authority 25 | cfssl-create-cert-authority: 26 | cat $(root)/cfssl/template/ca.json | cfssl gencert -initca - | cfssljson -bare ca 27 | mv ca-* ./certs/ 28 | mv ca.* ./certs/ 29 | 30 | .PHONY: cfssl-sign-certificate 31 | cfssl-sign-certificate: 32 | kubectl get csr $(service).$(namespace) -o jsonpath='{.spec.request}' | \ 33 | base64 --decode | \ 34 | cfssl sign -ca ./certs/ca.pem -ca-key ./certs/ca-key.pem \ 35 | -config $(root)/cfssl/template/server-signing-config.json - | \ 36 | cfssljson -bare ca-signed-server -n $(namespace) 37 | mv ca-* ./certs/ 38 | 39 | .PHONY: kube-upload-cert 40 | kube-upload-cert: 41 | kubectl get csr $(service).$(namespace) -o json | \ 42 | jq '.status.certificate = "'$(shell base64 -i ./certs/ca-signed-server.pem | tr -d '\n')'"' | \ 43 | kubectl replace --raw /apis/certificates.k8s.io/v1/certificatesigningrequests/$(service).$(namespace)/status -f - -n $(namespace) 44 | kubectl get csr -n $(namespace) 45 | 46 | .PHONY: kube-get-client-cert 47 | kube-get-client-cert: 48 | $(shell kubectl get csr $(service).$(namespace) -n $(namespace) -o jsonpath='{.status.certificate}' \ 49 | | base64 --decode > ./certs/signed-client.crt) 50 | 51 | .PHONY: kube-create-client-cert 52 | kube-create-client-cert: 53 | # kubectl get csr $(service).$(namespace) -n $(namespace) -o jsonpath='{.status.certificate}' \ 54 | # | base64 --decode | 55 | kubectl create secret generic zeebe-gateway-client-secret --from-file ./certs/signed-client.crt -n $(namespace) 56 | 57 | .PHONY: kube-create-secret 58 | kube-create-secret: 59 | -kubectl delete secret $(secretName) 60 | kubectl create secret tls $(secretName) --cert ./certs/ca-signed-server.pem --key ./certs/server-key.pem -n $(namespace) 61 | kubectl get secret $(secretName) -n $(namespace) -o json 62 | 63 | .PHONY: clean-certs 64 | clean-certs: 65 | -rm -Rf ./certs 66 | -kubectl delete csr $(service).$(namespace) 67 | -------------------------------------------------------------------------------- /cfssl/template/ca.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "127.0.0.1.nip.io\/pdiddy", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /cfssl/template/csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "hosts": [ 3 | "127.0.0.1.nip.io" 4 | ], 5 | "CN": "127.0.0.1.nip.io", 6 | "key": { 7 | "algo": "ecdsa", 8 | "size": 256 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /cfssl/template/csr.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: 5 | spec: 6 | request: 7 | signerName: 8 | usages: 9 | - digital signature 10 | - key encipherment 11 | - server auth 12 | -------------------------------------------------------------------------------- /cfssl/template/server-signing-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "usages": [ 5 | "digital signature", 6 | "key encipherment", 7 | "server auth" 8 | ], 9 | "expiry": "876000h", 10 | "ca_constraint": { 11 | 12 | "is_ca": false 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /connectors/connectors.mk: -------------------------------------------------------------------------------- 1 | 2 | # SendGrid 3 | 4 | .PHONY: set-sendgrid-secret 5 | set-sendgrid-secret: 6 | $(eval SECRET := $(shell bash -c 'read -p "SENGRDI_KEY: " secret; echo $$secret')) 7 | kubectl set env deployment camunda-connectors SENDGRID_KEY=$(SECRET) CAMUNDA_OPERATE_CLIENT_USERNAME=demo CAMUNDA_OPERATE_CLIENT_PASSWORD=demo 8 | 9 | .PHONY: set-openai-secret 10 | set-openai-secret: 11 | $(eval SECRET := $(shell bash -c 'read -p "OPENAI_KEY: " secret; echo $$secret')) 12 | kubectl set env deployment camunda-connectors OPENAI_KEY=$(SECRET) 13 | 14 | -------------------------------------------------------------------------------- /default/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # The following variables should not be changed except for advanced use cases 3 | 4 | ifeq ($(OS),Windows_NT) 5 | root ?= $(CURDIR)/.. 6 | else 7 | root ?= $(shell pwd)/.. 8 | endif 9 | 10 | # Camunda components will be installed into the following Kubernetes namespace 11 | namespace ?= camunda 12 | # Helm release name 13 | release ?= camunda 14 | # Helm chart coordinates for Camunda 15 | chart ?= camunda/camunda-platform 16 | 17 | chartValues ?= $(root)/default/camunda-values.yaml 18 | 19 | .PHONY: all 20 | all: camunda 21 | 22 | .PHONY: clean 23 | clean: clean-camunda 24 | 25 | include $(root)/include/camunda.mk 26 | -------------------------------------------------------------------------------- /default/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Default 2 | 3 | A configuration for Camunda Platform 8 4 | that relies only on the defaults provided by the official Helm chart 5 | and also serves as a template for creating new profiles. 6 | 7 | This folder contains a [Helm](https://helm.sh/) [values file](camunda-values.yaml) 8 | for installing the [Camunda Platform Helm Chart](https://helm.camunda.io/) 9 | on an existing Kubernetes cluster (if you don't have one yet, see [Cloud-platform-specific Profiles](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/master/README.md#cloud-platform-specific-profiles)). 10 | A [Makefile](Makefile) is provided to automate the installation process. 11 | 12 | ## Install 13 | 14 | Make sure you meet [these prerequisites](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/master/README.md#prerequisites). 15 | 16 | Configure the desired Kubernetes `namespace`, Helm `release` name, and Helm `chart` in [Makefile](Makefile) 17 | and run: 18 | 19 | ```sh 20 | make 21 | ``` 22 | 23 | If `make` is correctly configured, you should also get tab completion for all available make targets. 24 | 25 | ## Uninstall 26 | ```sh 27 | make clean 28 | ``` 29 | -------------------------------------------------------------------------------- /default/camunda-values.yaml: -------------------------------------------------------------------------------- 1 | # Chart values for the Camunda Platform 8 Helm chart. 2 | # This file deliberately contains only the values that differ from the defaults. 3 | # For changes and documentation, use your favorite diff tool to compare it with: 4 | # https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml 5 | 6 | -------------------------------------------------------------------------------- /development/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # The following variables should not be changed except for advanced use cases 3 | 4 | ifeq ($(OS),Windows_NT) 5 | root ?= $(CURDIR)/.. 6 | else 7 | root ?= $(shell pwd)/.. 8 | endif 9 | 10 | # Camunda components will be installed into the following Kubernetes namespace 11 | namespace ?= camunda 12 | # Helm release name 13 | release ?= camunda 14 | # Helm chart coordinates for Camunda 15 | chart ?= camunda/camunda-platform 16 | 17 | chartValues ?= $(root)/development/camunda-values.yaml 18 | 19 | .PHONY: all 20 | all: camunda 21 | 22 | .PHONY: clean 23 | clean: clean-camunda 24 | 25 | include $(root)/include/camunda.mk -------------------------------------------------------------------------------- /development/README.md: -------------------------------------------------------------------------------- 1 | # Helm Profiles for Camunda 8 Developers 2 | 3 | A lightweight development configuration for Camunda Platform 8 4 | that can be used locally via KIND or any other minimal Kubernetes environment 5 | as well as on a "real" Kubernetes cluster in a public or private cloud. 6 | 7 | This folder contains a [Helm](https://helm.sh/) [values file](camunda-values.yaml) 8 | for installing the [Camunda Platform Helm Chart](https://helm.camunda.io/) 9 | on an existing Kubernetes cluster (if you don't have one yet, 10 | see the `kind` folder, or one of the cloud provider folders for more information). 11 | A [Makefile](Makefile) is provided to automate the installation process. 12 | 13 | ## Install 14 | 15 | Note: you should already have a Kubernetes cluster running before you run this profile. Your `kubectl` should be configured to connect to your existing cluster. If you need to create a cluster, see the main [README.md](../README.md) for guidance. 16 | 17 | Run the following to install Camunda using the `camunda-values.yaml` file found in this directory: 18 | 19 | ```sh 20 | cd development 21 | make 22 | ``` 23 | 24 | ## Uninstall 25 | ```sh 26 | cd development 27 | make clean 28 | ``` 29 | -------------------------------------------------------------------------------- /development/camunda-values-2.yaml: -------------------------------------------------------------------------------- 1 | # Chart values for the Camunda Platform 8 Helm chart. 2 | # This file deliberately contains only the values that differ from the defaults. 3 | # For changes and documentation, use your favorite diff tool to compare it with: 4 | # https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml 5 | 6 | # This is a very small cluster useful for running locally and for development 7 | 8 | global: 9 | image: 10 | tag: latest 11 | identity: 12 | auth: 13 | # Disable the Identity authentication 14 | # it will fall back to basic-auth: demo/demo as default user 15 | enabled: false 16 | 17 | identity: 18 | enabled: false 19 | 20 | optimize: 21 | enabled: false 22 | 23 | connectors: 24 | enabled: true 25 | inbound: 26 | mode: credentials 27 | resources: 28 | requests: 29 | cpu: "100m" 30 | memory: "512M" 31 | limits: 32 | cpu: "1000m" 33 | memory: "2Gi" 34 | env: 35 | - name: CAMUNDA_OPERATE_CLIENT_USERNAME 36 | value: demo 37 | - name: CAMUNDA_OPERATE_CLIENT_PASSWORD 38 | value: demo 39 | 40 | zeebe: 41 | clusterSize: 1 42 | partitionCount: 1 43 | replicationFactor: 1 44 | pvcSize: 1Gi 45 | affinity: 46 | podAntiAffinity: null 47 | resources: 48 | requests: 49 | cpu: "100m" 50 | memory: "512M" 51 | limits: 52 | cpu: "512m" 53 | memory: "2Gi" 54 | 55 | zeebe-gateway: 56 | replicas: 1 57 | 58 | resources: 59 | requests: 60 | cpu: "100m" 61 | memory: "512M" 62 | limits: 63 | cpu: "1000m" 64 | memory: "1Gi" 65 | 66 | logLevel: ERROR 67 | 68 | elasticsearch: 69 | enabled: true 70 | # imageTag: 7.17.3 71 | replicas: 1 72 | minimumMasterNodes: 1 73 | # Allow no backup for single node setups 74 | clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" 75 | 76 | resources: 77 | requests: 78 | cpu: "100m" 79 | memory: "512M" 80 | limits: 81 | cpu: "1000m" 82 | memory: "2Gi" 83 | -------------------------------------------------------------------------------- /development/camunda-values-with-ingress.yaml: -------------------------------------------------------------------------------- 1 | # Chart values for the Camunda Platform 8 Helm chart. 2 | # This file deliberately contains only the values that differ from the defaults. 3 | # For changes and documentation, use your favorite diff tool to compare it with: 4 | # https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml 5 | 6 | # This is a very small cluster useful for running locally and for development 7 | 8 | global: 9 | image: 10 | tag: latest 11 | ingress: 12 | enabled: true 13 | className: nginx 14 | host: "localhost" 15 | identity: 16 | auth: 17 | # Disable the Identity authentication 18 | # it will fall back to basic-auth: demo/demo as default user 19 | enabled: false 20 | 21 | identity: 22 | enabled: false 23 | 24 | optimize: 25 | enabled: false 26 | 27 | operate: 28 | contextPath: "/operate" 29 | 30 | tasklist: 31 | contextPath: "/tasklist" 32 | 33 | connectors: 34 | enabled: true 35 | inbound: 36 | mode: credentials 37 | ingress: 38 | enabled: true 39 | # TODO: not sure why this doesn't seem to work? 40 | # path: "/connectors" 41 | host: "localhost" 42 | resources: 43 | requests: 44 | cpu: "100m" 45 | memory: "512M" 46 | limits: 47 | cpu: "1000m" 48 | memory: "2Gi" 49 | env: 50 | - name: CAMUNDA_OPERATE_CLIENT_USERNAME 51 | value: demo 52 | - name: CAMUNDA_OPERATE_CLIENT_PASSWORD 53 | value: demo 54 | 55 | zeebe: 56 | clusterSize: 1 57 | partitionCount: 1 58 | replicationFactor: 1 59 | pvcSize: 1Gi 60 | affinity: 61 | podAntiAffinity: null 62 | resources: 63 | requests: 64 | cpu: "100m" 65 | memory: "512M" 66 | limits: 67 | cpu: "512m" 68 | memory: "2Gi" 69 | 70 | zeebe-gateway: 71 | ingress: 72 | enabled: false # NOTE: GRPC is not available via nginx ingress unless tls is enabled. Use port forwarding instead 73 | 74 | replicas: 1 75 | 76 | resources: 77 | requests: 78 | cpu: "100m" 79 | memory: "512M" 80 | limits: 81 | cpu: "1000m" 82 | memory: "1Gi" 83 | 84 | logLevel: ERROR 85 | 86 | elasticsearch: 87 | enabled: true 88 | # imageTag: 7.17.3 89 | replicas: 1 90 | minimumMasterNodes: 1 91 | # Allow no backup for single node setups 92 | clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" 93 | 94 | resources: 95 | requests: 96 | cpu: "100m" 97 | memory: "512M" 98 | limits: 99 | cpu: "1000m" 100 | memory: "2Gi" 101 | -------------------------------------------------------------------------------- /development/camunda-values.yaml: -------------------------------------------------------------------------------- 1 | # Chart values for the Camunda Platform 8 Helm chart. 2 | # This file deliberately contains only the values that differ from the defaults. 3 | # For changes and documentation, use your favorite diff tool to compare it with: 4 | # https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml 5 | 6 | # This is a very small cluster useful for running locally and for development 7 | 8 | global: 9 | image: 10 | tag: latest 11 | identity: 12 | auth: 13 | # Disable the Identity authentication 14 | # it will fall back to basic-auth: demo/demo as default user 15 | enabled: false 16 | 17 | identity: 18 | enabled: false 19 | 20 | optimize: 21 | enabled: false 22 | 23 | connectors: 24 | enabled: true 25 | inbound: 26 | mode: credentials 27 | resources: 28 | requests: 29 | cpu: "100m" 30 | memory: "512M" 31 | limits: 32 | cpu: "1000m" 33 | memory: "2Gi" 34 | env: 35 | - name: CAMUNDA_OPERATE_CLIENT_USERNAME 36 | value: demo 37 | - name: CAMUNDA_OPERATE_CLIENT_PASSWORD 38 | value: demo 39 | 40 | zeebe: 41 | clusterSize: 1 42 | partitionCount: 1 43 | replicationFactor: 1 44 | pvcSize: 1Gi 45 | affinity: 46 | podAntiAffinity: null 47 | resources: 48 | requests: 49 | cpu: "100m" 50 | memory: "512M" 51 | limits: 52 | cpu: "512m" 53 | memory: "2Gi" 54 | 55 | zeebe-gateway: 56 | replicas: 1 57 | 58 | resources: 59 | requests: 60 | cpu: "100m" 61 | memory: "512M" 62 | limits: 63 | cpu: "1000m" 64 | memory: "1Gi" 65 | 66 | logLevel: ERROR 67 | 68 | elasticsearch: 69 | enabled: true 70 | # imageTag: 7.17.3 71 | replicas: 1 72 | minimumMasterNodes: 1 73 | # Allow no backup for single node setups 74 | clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" 75 | 76 | resources: 77 | requests: 78 | cpu: "100m" 79 | memory: "512M" 80 | limits: 81 | cpu: "1000m" 82 | memory: "2Gi" 83 | 84 | # Request smaller persistent volumes. 85 | volumeClaimTemplate: 86 | accessModes: [ "ReadWriteOnce" ] 87 | storageClassName: "standard" 88 | resources: 89 | requests: 90 | storage: 15Gi 91 | -------------------------------------------------------------------------------- /docs/images/actuator_disabled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/docs/images/actuator_disabled.png -------------------------------------------------------------------------------- /docs/images/actuator_enabled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/docs/images/actuator_enabled.png -------------------------------------------------------------------------------- /docs/images/keycloak_ssl_required.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/docs/images/keycloak_ssl_required.png -------------------------------------------------------------------------------- /docs/images/kibana_dev_console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/docs/images/kibana_dev_console.png -------------------------------------------------------------------------------- /docs/images/webmodeler-zeebe-connect.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/docs/images/webmodeler-zeebe-connect.png -------------------------------------------------------------------------------- /echo-server/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: echoserver 5 | labels: 6 | app: echoserver 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: echoserver 11 | replicas: 1 12 | template: 13 | metadata: 14 | labels: 15 | app: echoserver 16 | spec: 17 | containers: 18 | - name: echoserver 19 | image: registry.k8s.io/echoserver:1.4 20 | ports: 21 | - containerPort: 80 22 | resources: 23 | limits: 24 | memory: 256Mi 25 | cpu: "250m" 26 | requests: 27 | memory: 128Mi 28 | cpu: "80m" 29 | --- 30 | apiVersion: v1 31 | kind: Service 32 | metadata: 33 | name: echoserver-service 34 | spec: 35 | selector: 36 | app: echoserver 37 | ports: 38 | - name: http 39 | protocol: TCP 40 | port: 8080 41 | targetPort: 8080 42 | type: ClusterIP -------------------------------------------------------------------------------- /echo-server/echo.mk: -------------------------------------------------------------------------------- 1 | #k -n echo annotate ingress echo-server cert-manager.io/cluster-issuer=letsencrypt 2 | 3 | .PHONY: echo 4 | echo: ingress-ip-from-service 5 | kubectl apply -f $(root)/echo-server/deployment.yaml -n $(namespace) 6 | if [ -n "$(baseDomainName)" ]; then \ 7 | cat $(root)/echo-server/ingress.yaml | sed -E "s/YOUR_HOSTNAME/$(subDomainName).$(baseDomainName)/g" | kubectl apply -f - ; \ 8 | else \ 9 | cat $(root)/echo-server/ingress.yaml | sed -E "s/YOUR_HOSTNAME/$(IP).nip.io/g" | kubectl apply -f - ; \ 10 | fi 11 | 12 | .PHONY: clean-echo 13 | clean-echo: 14 | kubectl delete -f ./echo-server/deployment.yaml -n $(namespace) 15 | kubectl delete -f ./echo-server/ingress.yaml -n $(namespace) -------------------------------------------------------------------------------- /echo-server/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: echo-server 5 | annotations: 6 | # nginx.ingress.kubernetes.io/rewrite-target: /$2 7 | cert-manager.io/cluster-issuer: letsencrypt 8 | nginx.ingress.kubernetes.io/auth-signin: https://$host/oauth2/start?rd=$escaped_request_uri 9 | # nginx.ingress.kubernetes.io/auth-response-headers: Authorization 10 | nginx.ingress.kubernetes.io/auth-url: https://$host/oauth2/auth 11 | # nginx.ingress.kubernetes.io/configuration-snippet: | 12 | # auth_request_set $name_upstream_1 $upstream_cookie__oauth2_proxy_1; 13 | 14 | # access_by_lua_block { 15 | # if ngx.var.name_upstream_1 ~= "" then 16 | # ngx.header["Set-Cookie"] = "_oauth2_proxy_1=" .. ngx.var.name_upstream_1 .. ngx.var.auth_cookie:match("(; .*)") 17 | # end 18 | # } 19 | 20 | labels: 21 | app.kubernetes.io/name: echo-server 22 | spec: 23 | ingressClassName: nginx 24 | tls: 25 | - hosts: 26 | - YOUR_HOSTNAME 27 | secretName: tls-secret 28 | rules: 29 | - host: YOUR_HOSTNAME 30 | http: 31 | paths: 32 | - path: /echo 33 | pathType: Prefix 34 | backend: 35 | service: 36 | name: echoserver-service 37 | port: 38 | number: 8080 -------------------------------------------------------------------------------- /google/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | # Already have a Cluster? Set these values to point to your existing environment 4 | # Otherwise, these values will be used to create a new Cluster 5 | 6 | #project ?= camunda-researchanddevelopment 7 | project ?= YOUR_GCP_PROJECT 8 | #region ?= us-east1-b # see: https://cloud.withgoogle.com/region-picker/ 9 | region ?= YOUR_GCP_REGION 10 | clusterName ?= YOUR_CLUSTER_NAME 11 | 12 | machineType ?= n1-standard-16 13 | minSize ?= 1 14 | maxSize ?= 6 15 | 16 | # ------------------------------------ 17 | # The following variables should not be changed except for advanced use cases 18 | ifeq ($(OS),Windows_NT) 19 | root ?= $(CURDIR)/.. 20 | else 21 | root ?= $(shell pwd)/.. 22 | endif 23 | 24 | # Camunda components will be installed into the following Kubernetes namespace 25 | namespace ?= camunda 26 | # Helm release name 27 | release ?= camunda 28 | # Helm chart coordinates for Camunda 29 | chart ?= camunda/camunda-platform 30 | 31 | # This file will be generated by make targets 32 | chartValues ?= camunda-values-google.yaml 33 | 34 | .PHONY: all 35 | all: camunda-values-google.yaml camunda external-urls 36 | 37 | # 0 kube from aks.mk: Create Kubernetes cluster. (No aplication gateway required) 38 | .PHONY: kube 39 | kube: kube-gke 40 | 41 | # 1 create camunda-values-azure 42 | camunda-values-google.yaml: 43 | cp $(root)/development/camunda-values-2.yaml $(chartValues) 44 | 45 | # 2 helm install camunda from camunda.mk 46 | 47 | # 3 Show external URLs 48 | .PHONY: external-urls 49 | external-urls: external-urls-no-ingress 50 | 51 | ### <--- End of setup ---> 52 | 53 | # Remove nginx ingress and Camunda from cluster 54 | .PHONY: clean 55 | clean: clean-camunda 56 | rm -f $(chartValues) 57 | 58 | .PHONY: clean-kube 59 | clean-kube: clean-kube-gke 60 | 61 | include $(root)/google/include/kubernetes-gke.mk 62 | include $(root)/include/camunda.mk 63 | include $(root)/bpmn/deploy-models.mk 64 | include $(root)/connectors/connectors.mk 65 | 66 | 67 | -------------------------------------------------------------------------------- /google/README.md: -------------------------------------------------------------------------------- 1 | # Helm Profiles for Camunda 8 on Google Cloud Platform (GCP) 2 | 3 | Create a Camunda 8 self-managed Kubernetes Cluster in 3 Steps: 4 | 5 | Step 1: Setup some [global prerequisites](../README.md#prerequisites) 6 | 7 | Step 2: Setup command line tools for GCP: 8 | 9 | 1. Verify `gcloud` is installed (https://cloud.google.com/sdk/docs/install-sdk) 10 | 11 | gcloud --help 12 | 13 | 2. Make sure you are authenticated. If you don't already have one, you'll need to sign up for a new Google Cloud Account. Then, run the following command and then follow the instructions to authenticate via your browser. 14 | 15 | gcloud auth login 16 | 17 | 3. Setup the gke-cloud-auth-plugin 18 | 19 | gcloud components install gke-gcloud-auth-plugin 20 | 21 | 6. Go into one of the profiles in the `google` folder and use the `Makefile` to create a GKE cluster 22 | 23 | e.g. `cd` into the `ingress/nginx/tls` directory and see the [README.md](./ingress/nginx/tls/README.md) for more. 24 | -------------------------------------------------------------------------------- /google/benchmark/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Google benchmark with chaos-mesh 2 | 3 | If this is your first time here, make sure you have [installed the prerequisites](../../../README.md). 4 | 5 | After you've installed the prerequisites, follow these steps: 6 | 7 | Open a Terminal and `cd` into this directory 8 | 9 | Edit the [Makefile](Makefile) found in this directory and set the following bash variables so that they are appropriate for your specific environment. 10 | 11 | If you don't have a Kubernetes cluster, the values provided will be used to create a new cluster. Otherwise, the values are used to connect and manage an existing cluster. 12 | 13 | ``` 14 | project ?= camunda-researchanddevelopment 15 | region ?= us-east1-b # see: https://cloud.withgoogle.com/region-picker/ 16 | clusterName ?= CLUSTER_NAME 17 | # Azure provides temporary dns names such as: MY_DOMAIN_NAME.region.cloudapp.azure.com 18 | # However GCP does not. DNS names will be configured using https://nip.io 19 | # dnsLabel ?= MY_DOMAIN_NAME 20 | machineType ?= n1-standard-16 21 | minSize ?= 1 22 | maxSize ?= 6 23 | gcpClientId ?= GCP_CLIENT_ID # see: https://chaos-mesh.org/docs/next/gcp-authentication/ 24 | gcpClientSecret ?= GCP_CLIENT_SECRET # see: https://chaos-mesh.org/docs/next/gcp-authentication/ 25 | ``` 26 | 27 | If you need to create a new GKE Cluster, run `make kube`. 28 | 29 | Once you have a GKE Cluster, run `make` to do the following: 30 | 31 | 1. Set up a Camunda cluster 32 | 2. Deploy the model 33 | 3. Rebalance partitions 34 | 4. install the chaos-experiment as described in `chaos-network-brokers.yaml` 35 | 5. execute the benchmark 36 | 37 | You can re-install this profile easily. First run `make clean` to remove all kubernetes objects created by `make`. Then, re-run `make` to re-install. 38 | 39 | WARNING!!! This will completely destroy your cluster and everything inside of it!!! To completely delete your cluster, run `make clean-kube`. 40 | 41 | See the main README for [Troubleshooting, Tips, and Tricks](../../../../README.md#troubleshooting-tips-and-tricks) -------------------------------------------------------------------------------- /google/benchmark/chaos-mesh-template.yaml: -------------------------------------------------------------------------------- 1 | 2 | dashboard: 3 | gcpSecurityMode: true 4 | gcpClientId: YOUR_GCP_CLIENT_ID 5 | gcpClientSecret: YOUR_GCP_CLIENT_SECRET 6 | rootUrl: http://localhost:8080 -------------------------------------------------------------------------------- /google/benchmark/chaos-network-brokers.yaml: -------------------------------------------------------------------------------- 1 | kind: NetworkChaos 2 | apiVersion: chaos-mesh.org/v1alpha1 3 | metadata: 4 | namespace: default 5 | name: chaos-network-brokers 6 | spec: 7 | selector: 8 | namespaces: 9 | - camunda 10 | pods: 11 | camunda: 12 | - camunda-zeebe-4 13 | - camunda-zeebe-5 14 | - camunda-zeebe-6 15 | - camunda-zeebe-7 16 | mode: all 17 | action: delay 18 | delay: 19 | latency: 35ms 20 | correlation: '100' 21 | jitter: '0' 22 | direction: both 23 | target: 24 | selector: 25 | namespaces: 26 | - camunda 27 | pods: 28 | camunda: 29 | - camunda-zeebe-0 30 | - camunda-zeebe-1 31 | - camunda-zeebe-2 32 | - camunda-zeebe-3 33 | mode: all 34 | -------------------------------------------------------------------------------- /google/include/ssd-storageclass-gke.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: ssd 6 | provisioner: kubernetes.io/gce-pd 7 | parameters: 8 | type: pd-ssd 9 | reclaimPolicy: Delete -------------------------------------------------------------------------------- /google/ingress/nginx/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | # Already have a Cluster? Set these values to point to your existing environment 4 | # Otherwise, these values will be used to create a new Cluster 5 | 6 | #project ?= camunda-researchanddevelopment 7 | project ?= YOUR_GCP_PROJECT 8 | #region ?= us-east1-b # see: https://cloud.withgoogle.com/region-picker/ 9 | region ?= YOUR_GCP_REGION 10 | clusterName ?= YOUR_CLUSTER_NAME 11 | 12 | machineType ?= n1-standard-16 13 | minSize ?= 1 14 | maxSize ?= 6 15 | 16 | # Configure the fully qualified domain name 17 | # The dnsLabel is the first part of the domain address. It will be used no matter what baseDomain you configure below 18 | dnsLabel ?= YOUR_DNS_LABEL 19 | 20 | # By default, we'll use nip.io (See more at [https://nip.io](http://nip.io) ) 21 | # The fully qualified domain name will look something like ..nip.io 22 | baseDomainName ?= nip.io 23 | 24 | # Another option is to replace baseDomainName with your own domain name 25 | # In this case, the fully qualified domain name will look like . 26 | # baseDomainName ?= YOUR_CUSTOM_DOMAIN_NAME 27 | 28 | 29 | # ------------------------------------ 30 | # The following variables should not be changed except for advanced use cases 31 | ifeq ($(OS),Windows_NT) 32 | root ?= $(CURDIR)/../../.. 33 | else 34 | root ?= $(shell pwd)/../../.. 35 | endif 36 | 37 | # Camunda components will be installed into the following Kubernetes namespace 38 | namespace ?= camunda 39 | # Helm release name 40 | release ?= camunda 41 | # Helm chart coordinates for Camunda 42 | chart ?= camunda/camunda-platform 43 | 44 | # This file will be generated by make targets 45 | chartValues ?= camunda-values-ingress.yaml 46 | 47 | .PHONY: all 48 | all: ingress-nginx camunda-values-ingress.yaml camunda external-urls 49 | 50 | # 0 kube from aks.mk: Create Kubernetes cluster. (No aplication gateway required) 51 | .PHONY: kube 52 | kube: kube-gke 53 | 54 | # 1 install nginx ingress controller 55 | 56 | # 2 create camunda-values-ingress.yaml 57 | 58 | # 3 helm install camunda from camunda.mk 59 | 60 | # 4 Show external URLs 61 | .PHONY: external-urls 62 | external-urls: external-urls-with-fqdn 63 | 64 | # Remove nginx ingress and Camunda from cluster 65 | .PHONY: clean 66 | clean: clean-camunda clean-ingress 67 | rm -f $(chartValues) 68 | 69 | .PHONY: clean-kube 70 | clean-kube: clean-kube-gke 71 | 72 | include $(root)/google/include/kubernetes-gke.mk 73 | include $(root)/include/camunda.mk 74 | include $(root)/bpmn/deploy-models.mk 75 | include $(root)/connectors/connectors.mk 76 | include $(root)/include/ingress-nginx.mk 77 | 78 | 79 | -------------------------------------------------------------------------------- /google/ingress/nginx/tls/set-env-gcp.sh: -------------------------------------------------------------------------------- 1 | export certEmail="xxx" 2 | export clusterName="dave01" 3 | export project="xxx" 4 | 5 | export camundaVersion=8.6.6 6 | export camundaHelmVersion=11.1.0 7 | 8 | export dnsLabel="dave01" 9 | export baseDomainName="gke.c8sm.com" 10 | export dnsManagedZone="c8sm" 11 | 12 | #export region="us-central1-a" 13 | export region="us-east4-a" 14 | export namespace="camunda" 15 | 16 | export machineType=n1-standard-16 17 | export minSize=1 18 | export maxSize=50 19 | 20 | export camundaDockerRegistrySecretName="camunda-docker-registry" 21 | export camundaDockerRegistryUrl="https://registry.camunda.cloud/" 22 | export camundaDockerRegistryUsername="xxx" 23 | export camundaDockerRegistryPassword="xxx" 24 | export camundaDockerRegistryEmail="xxx" -------------------------------------------------------------------------------- /google/keycloak/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | # Already have a Cluster? Set these values to point to your existing environment 4 | # Otherwise, these values will be used to create a new Cluster 5 | 6 | project ?= camunda-researchanddevelopment 7 | region ?= us-east1-b # see: https://cloud.withgoogle.com/region-picker/ 8 | clusterName ?= CLUSTER_NAME 9 | 10 | keycloakChartVersion ?= 13.0.2 11 | keycloakVersion ?= 19.0.3 12 | 13 | keycloakAdminUser ?= admin 14 | keycloakBase64EncodedAdminPassword ?= WTg1a2hvYklyYg== 15 | keycloakBase64EncodedManagementPassword ?= NEtHclRnbTJjWg== 16 | keycloakContextPath ?= / 17 | # Azure provides temporary dns names such as: MY_DOMAIN_NAME.region.cloudapp.azure.com 18 | # However GCP does not. DNS names will be configured using https://nip.io 19 | # To use a custom hostname, uncomment the following, 20 | # and change to use `keycloak-values-hostname` instead of `keycloak-values-ip` 21 | # keycloakHostName ?= KEYCLOAK_HOSTNAME 22 | 23 | machineType ?= n1-standard-16 24 | minSize ?= 1 25 | maxSize ?= 6 26 | certEmail ?= YOUR_EMAIL@yourdomain.com 27 | 28 | 29 | # ------------------------------------ 30 | # The following variables should not be changed except for advanced use cases 31 | 32 | ifeq ($(OS),Windows_NT) 33 | root ?= $(CURDIR)/../.. 34 | else 35 | root ?= $(shell pwd)/../.. 36 | endif 37 | 38 | namespace ?= keycloak 39 | 40 | 41 | .PHONY: all 42 | all: cert-manager letsencrypt-staging ingress-nginx-tls create-namespace-keycloak create-secret-keycloak keycloak-values-ip install-keycloak 43 | 44 | # 0 kube from cluster.mk: Create Kubernetes cluster. 45 | .PHONY: kube 46 | kube: kube-gke 47 | 48 | # 1 cert-manager from cert-manager.mk: create certificate manager for tls 49 | 50 | # 2 letsencrypt-staging/-prod from cert-manager.mk: create letsencrypt cert issuer 51 | 52 | # 3 create nginx ingress controller with dns and tls 53 | 54 | # 4 create keycloak namespace 55 | 56 | # 5 create keycloak secret (this will be reused when installing camunda) 57 | 58 | # 6 create keycloak values (by default uses `keycloak-values-ip`. `keycloak-values-hostname` is also available) 59 | 60 | # 7 helm install keycloak using `keycloak-values.yaml` 61 | 62 | .PHONY: clean 63 | clean: clean-keycloak 64 | 65 | .PHONY: clean-kube 66 | clean-kube: clean-kube-gke 67 | 68 | 69 | include $(root)/keycloak/keycloak.mk 70 | include $(root)/include/cert-manager.mk 71 | include $(root)/include/ingress-nginx.mk 72 | 73 | include $(root)/google/include/kubernetes-gke.mk 74 | -------------------------------------------------------------------------------- /google/multi-region/active-active/dns-lb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | # TODO: Check whether AWS/Azure can use internal load balancers. Google 6 | # can't, unfortunately. 7 | # service.beta.kubernetes.io/aws-load-balancer-internal: "true" 8 | # service.beta.kubernetes.io/azure-load-balancer-internal: "true" 9 | # TODO Falko try this: 10 | # cloud.google.com/load-balancer-type: "Internal" 11 | labels: 12 | k8s-app: kube-dns 13 | name: kube-dns-lb 14 | namespace: kube-system 15 | spec: 16 | ports: 17 | - name: dns 18 | port: 53 19 | protocol: UDP 20 | targetPort: 53 21 | selector: 22 | k8s-app: kube-dns 23 | sessionAffinity: None 24 | type: LoadBalancer 25 | -------------------------------------------------------------------------------- /google/multi-region/active-active/teardown-zeebe.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from shutil import rmtree 4 | from subprocess import call 5 | 6 | # Before running the script, fill in appropriate values for all the parameters 7 | # above the dashed line. You should use the same values when tearing down a 8 | # cluster that you used when setting it up. 9 | 10 | # To get the names of your kubectl "contexts" for each of your clusters, run: 11 | # kubectl config get-contexts 12 | contexts = { 13 | 'europe-west4-b': 'gke_camunda-researchanddevelopment_europe-west4-b_cdame-region-0', 14 | 'us-east1-b': 'gke_camunda-researchanddevelopment_us-east1-b_cdame-region-1', 15 | } 16 | 17 | certs_dir = './certs' 18 | ca_key_dir = './my-safe-directory' 19 | generated_files_dir = './generated' 20 | 21 | # ------------------------------------------------------------------------------ 22 | 23 | # Delete each cluster's special zone-scoped namespace, which transitively 24 | # deletes all resources that were created in the namespace, along with the few 25 | # other resources we created that weren't in that namespace 26 | for zone, context in contexts.items(): 27 | call(['kubectl', 'delete', 'namespace', zone, '--context', context]) 28 | # call(['kubectl', 'delete', 'secret', 'cockroachdb.client.root', '--context', context]) 29 | # call(['kubectl', 'delete', '-f', 'external-name-svc.yaml', '--context', context]) 30 | call(['kubectl', 'delete', '-f', 'dns-lb.yaml', '--context', context]) 31 | call(['kubectl', 'delete', 'configmap', 'kube-dns', '--namespace', 'kube-system', '--context', context]) 32 | # Restart the DNS pods to clear out our stub-domains configuration. 33 | call(['kubectl', 'delete', 'pods', '-l', 'k8s-app=kube-dns', '--namespace', 'kube-system', '--context', context]) 34 | 35 | try: 36 | rmtree(certs_dir) 37 | except OSError: 38 | pass 39 | try: 40 | rmtree(ca_key_dir) 41 | except OSError: 42 | pass 43 | try: 44 | rmtree(generated_files_dir) 45 | except OSError: 46 | pass 47 | -------------------------------------------------------------------------------- /high-available-webapps/camunda-values.yaml: -------------------------------------------------------------------------------- 1 | zeebe: 2 | env: 3 | - name: ZEEBE_BROKER_EXPORTERS_ELASTICSEARCH_ARGS_INDEX_NUMBEROFREPLICAS 4 | value: "1" 5 | 6 | identity: 7 | keycloak: 8 | replicaCount: 3 9 | postgresql: 10 | architecture: replication 11 | replication: 12 | synchronousCommit: "on" 13 | numSynchronousReplicas: 2 14 | readReplicas: 15 | replicaCount: 2 16 | 17 | connectors: 18 | replicas: 3 19 | 20 | operate: 21 | env: 22 | - name: CAMUNDA_OPERATE_ELASTICSEARCH_NUMBEROFREPLICAS 23 | value: "1" 24 | 25 | tasklist: 26 | env: 27 | - name: CAMUNDA_TASKLIST_ELASTICSEARCH_NUMBEROFREPLICAS 28 | value: "1" 29 | 30 | postgresql: ## https://github.com/bitnami/charts/tree/main/bitnami/postgresql-ha ? 31 | enabled: true 32 | architecture: replication 33 | replication: 34 | synchronousCommit: "on" 35 | numSynchronousReplicas: 2 36 | readReplicas: 37 | replicaCount: 2 38 | 39 | elasticsearch: 40 | master: 41 | replicaCount: 3 42 | global: 43 | kibanaEnabled: true 44 | kibana: 45 | image: 46 | tag: 8.7.1 47 | replicaCount: 2 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /high-available-webapps/identity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/high-available-webapps/identity.png -------------------------------------------------------------------------------- /high-available-webapps/operate-ingress.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: camunda-platform/templates/ingress.yaml 3 | apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | name: operate-ingress 7 | labels: 8 | app.kubernetes.io/app: operate-ingress 9 | annotations: 10 | ingress.kubernetes.io/rewrite-target: / 11 | nginx.ingress.kubernetes.io/ssl-redirect: "false" 12 | nginx.ingress.kubernetes.io/affinity: "cookie" 13 | nginx.ingress.kubernetes.io/session-cookie-name: "operate-route" 14 | nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" 15 | spec: 16 | ingressClassName: nginx 17 | rules: 18 | - host: YOUR_HOSTNAME 19 | http: 20 | paths: 21 | - backend: 22 | service: 23 | name: camunda-operate 24 | port: 25 | number: 80 26 | path: /operate 27 | pathType: Prefix 28 | tls: 29 | - hosts: 30 | - YOUR_HOSTNAME 31 | secretName: tls-secret -------------------------------------------------------------------------------- /high-available-webapps/operate-tasklist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/high-available-webapps/operate-tasklist.png -------------------------------------------------------------------------------- /high-available-webapps/optimize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/high-available-webapps/optimize.png -------------------------------------------------------------------------------- /include/README.md: -------------------------------------------------------------------------------- 1 | This folder does not contain a Helm profile but common scripts and configuration files, e.g. for GNU Make, 2 | that are shared among multiple profiles. 3 | 4 | [camunda.mk](camunda.mk) is the installation script that is included in the `Makefile` of all Helm profiles. 5 | It contains targets to install, update, and remove Camunda via Helm as well as 6 | watch/await pods, get logs, forward ports, and open URLs with `kubectl` 7 | 8 | [ingress-nginx.mk](ingress-nginx.mk) is the installation script for an Nginx Ingress Controller and getting its IP. 9 | 10 | [cert-manager.mk](cert-manager..mk) is the installation script for setting up Kubernetes objects for issuing letsencrypt tls certificates 11 | -------------------------------------------------------------------------------- /include/cacerts_staging: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/include/cacerts_staging -------------------------------------------------------------------------------- /include/cert-manager.mk: -------------------------------------------------------------------------------- 1 | # https://cert-manager.io/docs/tutorials/acme/nginx-ingress/ 2 | prodserver ?= https:\\/\\/acme-v02.api.letsencrypt.org/directory 3 | 4 | .PHONY: cert-manager 5 | cert-manager: 6 | helm repo add jetstack https://charts.jetstack.io 7 | helm repo update 8 | helm install cert-manager jetstack/cert-manager \ 9 | --namespace cert-manager \ 10 | --create-namespace \ 11 | --version v1.9.1 \ 12 | --set installCRDs=true 13 | 14 | .PHONY: letsencrypt-staging 15 | letsencrypt-staging: 16 | cat $(root)/include/letsencrypt-stage.yaml | sed -E "s/someone@somewhere.io/$(certEmail)/g" | kubectl apply -n cert-manager -f - 17 | 18 | .PHONY: letsencrypt-prod 19 | letsencrypt-prod: 20 | cat $(root)/include/letsencrypt-prod.yaml | sed -E "s/someone@somewhere.io/$(certEmail)/g" | kubectl apply -n cert-manager -f - 21 | 22 | #TODO: succeeds, but does not seem to have right effect 23 | .PHONY: letsencrypt-prod-patch 24 | letsencrypt-prod-patch: 25 | kubectl patch ClusterIssuer letsencrypt --type json -p '[{"op": "replace", "path": "/spec/acme/server", "value":"$(prodserver)"}]' 26 | kubectl describe ClusterIssuer letsencrypt | grep letsencrypt.org 27 | 28 | .PHONY: annotate-remove-ingress-tls 29 | annotate-remove-ingress-tls: 30 | kubectl -n $(namespace) annotate ingress camunda-camunda-platform cert-manager.io/cluster-issuer- 31 | $(MAKE) get-ingress 32 | 33 | .PHONY: annotate-ingress-tls 34 | annotate-ingress-tls: annotate-remove-ingress-tls 35 | kubectl -n $(namespace) annotate ingress camunda-camunda-platform cert-manager.io/cluster-issuer=letsencrypt 36 | $(MAKE) get-ingress 37 | 38 | .PHONY: annotate-letsencrypt-stage 39 | annotate-letsencrypt-stage: annotate-remove-ingress-tls 40 | kubectl -n $(namespace) annotate ingress camunda-camunda-platform cert-manager.io/cluster-issuer=letsencrypt-stage 41 | $(MAKE) get-ingress 42 | 43 | # clean cert-manager and cluster issuer 44 | .PHONY: clean-cert-manager 45 | clean-cert-manager: 46 | helm --namespace cert-manager delete cert-manager 47 | kubectl delete namespace cert-manager 48 | 49 | # create a secret containing a cacerts truststore containing the lets encrypt staging CA certificates 50 | .PHONY: cacerts-staging 51 | cacerts-staging: 52 | -kubectl create secret generic "cacerts-staging" \ 53 | --namespace=$(namespace) \ 54 | --from-file=cacerts_staging=$(root)/include/cacerts_staging 55 | 56 | .PHONY: get-cert-requests 57 | get-cert-requests: 58 | -kubectl get certificaterequests --namespace $(namespace) 59 | 60 | .PHONY: get-cert-orders 61 | get-cert-orders: 62 | -kubectl get orders --namespace $(namespace) 63 | -------------------------------------------------------------------------------- /include/letsencrypt-prod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt 5 | spec: 6 | acme: 7 | # Prod: https://acme-v02.api.letsencrypt.org/directory 8 | # Staging: https://acme-staging-v02.api.letsencrypt.org/directory 9 | server: https://acme-v02.api.letsencrypt.org/directory 10 | # Email address used for ACME registration 11 | email: someone@somewhere.io 12 | # Name of a secret used to store the ACME account private key 13 | privateKeySecretRef: 14 | name: letsencrypt-account-key 15 | # Enable the HTTP-01 challenge provider 16 | solvers: 17 | - http01: 18 | ingress: 19 | class: nginx 20 | podTemplate: 21 | spec: 22 | nodeSelector: 23 | "kubernetes.io/os": linux -------------------------------------------------------------------------------- /include/letsencrypt-stage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-stage 5 | spec: 6 | acme: 7 | # Prod: https://acme-v02.api.letsencrypt.org/directory 8 | # Staging: https://acme-staging-v02.api.letsencrypt.org/directory 9 | server: https://acme-staging-v02.api.letsencrypt.org/directory 10 | preferredChain: "(STAGING) Pretend Pear X1" 11 | # Email address used for ACME registration 12 | email: someone@somewhere.io 13 | # Name of a secret used to store the ACME account private key 14 | privateKeySecretRef: 15 | name: letsencrypt-account-key 16 | # Enable the HTTP-01 challenge provider 17 | solvers: 18 | - http01: 19 | ingress: 20 | class: nginx 21 | podTemplate: 22 | spec: 23 | nodeSelector: 24 | "kubernetes.io/os": linux -------------------------------------------------------------------------------- /include/rebalance-leader-job.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: leader-balancer 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - image: "curlimages/curl:7.87.0" 10 | name: curl 11 | args: [ "-L", "-v", "-X", "POST", "http://RELEASE_NAME-zeebe-gateway:9600/actuator/rebalance" ] 12 | restartPolicy: Never -------------------------------------------------------------------------------- /include/values.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/include/values.yaml -------------------------------------------------------------------------------- /include/zbctl-deploy-job-with-auth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: zbctl-deploy 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: zbctl-deploy 10 | image: upgradingdave/zbctl 11 | command: ["zbctl", "--host", "camunda-zeebe-gateway", "--insecure", "--authzUrl", "http://camunda-keycloak/auth/realms/camunda-platform/protocol/openid-connect/token", "--clientId", "zeebe", "--clientSecret", "$(CLIENT_SECRET)" ,"deploy", "/models/CamundaProcess.bpmn"] 12 | volumeMounts: 13 | - name: models 14 | mountPath: /models/CamundaProcess.bpmn 15 | subPath: CamundaProcess.bpmn 16 | readOnly: true 17 | env: 18 | - name: CLIENT_SECRET 19 | valueFrom: 20 | secretKeyRef: 21 | name: camunda-zeebe-identity-secret 22 | key: zeebe-secret 23 | volumes: 24 | - name: models 25 | configMap: 26 | name: models 27 | restartPolicy: Never 28 | -------------------------------------------------------------------------------- /include/zbctl-deploy-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: zbctl-deploy 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: zbctl-deploy 10 | image: upgradingdave/zbctl 11 | command: ["zbctl", "--host", "camunda-zeebe-gateway", "--insecure", "deploy", "/models/CamundaProcess.bpmn"] 12 | volumeMounts: 13 | - name: models 14 | mountPath: /models/CamundaProcess.bpmn 15 | subPath: CamundaProcess.bpmn 16 | readOnly: true 17 | volumes: 18 | - name: models 19 | configMap: 20 | name: models 21 | restartPolicy: Never 22 | -------------------------------------------------------------------------------- /ingress-nginx/Camunda 8 and Nginx.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/ingress-nginx/Camunda 8 and Nginx.png -------------------------------------------------------------------------------- /ingress-nginx/Makefile: -------------------------------------------------------------------------------- 1 | namespace ?= camunda 2 | release ?= camunda 3 | chart ?= camunda/camunda-platform 4 | 5 | chartValues ?= "camunda-values.yaml" 6 | 7 | 8 | .PHONY: all 9 | all: ingress camunda 10 | 11 | .PHONY: clean 12 | clean: clean-camunda clean-ingress 13 | rm -f camunda-values-nginx.yaml 14 | 15 | include ../include/ingress-nginx.mk 16 | include ../include/camunda.mk 17 | -------------------------------------------------------------------------------- /ingress-nginx/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Ingress NGINX 2 | 3 | A configuration for Camunda Platform 8 4 | that uses [NGINX](https://www.nginx.com/products/nginx-ingress-controller/) 5 | as an [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/). 6 | 7 | This folder contains a [Helm](https://helm.sh/) [values file](camunda-values.yaml) 8 | for installing the [Camunda Platform Helm Chart](https://helm.camunda.io/) 9 | on an existing Kubernetes cluster (if you don't have one yet, 10 | see [Camunda 8 Kubernetes Installation](https://github.com/camunda-community-hub/camunda8-greenfield-installation)). 11 | A [Makefile](Makefile) is provided to automate the installation process. 12 | 13 | ![Camunda 8 and NGINX](Camunda%208%20and%20Nginx.png) 14 | 15 | ## Installation 16 | 17 | Configure the desired Kubernetes `namespace`, Helm `release` name, and Helm `chart` in [Makefile](Makefile) 18 | and run: 19 | 20 | ```sh 21 | make 22 | ``` 23 | 24 | If `make` is correctly configured, you should also get tab completion for all available make targets. 25 | 26 | ## Uninstall 27 | ```sh 28 | make clean 29 | ``` 30 | 31 | ## Troubleshooting 32 | 33 | 34 | 35 | ### Keycloak requires SSL for requests from publicly routed IP addresses 36 | 37 | > Users can interact with Keycloak without SSL so long as they stick to private IP addresses like localhost, 127.0.0.1, 10.x.x.x, 192.168.x.x, and 172.16.x.x. If you try to access Keycloak without SSL from a non-private IP address you will get an error. 38 | 39 | If your k8s cluster does not use "private" IP addresses for internal communication, i.e. it does not resolve the internal service names to "private" IP addresses, then you can apply the following procedure: 40 | 41 | Use the Keycloak UI to set "Require SSL" to "none" for both the Master realm (Keycloak needs a restart after that) and the then created Camunda Platform realm. We did an Identity restart afterwards, e.g. by deleting the pod, but it should also work if the crash loop does one more round. 42 | -------------------------------------------------------------------------------- /ingress-nginx/camunda-values.old.yaml: -------------------------------------------------------------------------------- 1 | # Chart values for the Camunda Platform 8 Helm chart. 2 | # This file deliberately contains only the values that differ from the defaults. 3 | # For changes and documentation, use your favorite diff tool to compare it with: 4 | # https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml 5 | 6 | global: 7 | identity: 8 | auth: 9 | publicIssuerUrl: "http://keycloak.127.0.0.1.nip.io/auth/realms/camunda-platform" 10 | operate: 11 | redirectUrl: "http://operate.127.0.0.1.nip.io" 12 | tasklist: 13 | redirectUrl: "http://tasklist.127.0.0.1.nip.io" 14 | optimize: 15 | redirectUrl: "http://optimize.127.0.0.1.nip.io" 16 | operate: 17 | ingress: 18 | enabled: true 19 | host: "operate.127.0.0.1.nip.io" 20 | tasklist: 21 | ingress: 22 | enabled: true 23 | host: "tasklist.127.0.0.1.nip.io" 24 | optimize: 25 | env: 26 | - name: CAMUNDA_OPTIMIZE_EMAIL_ACCESS_URL 27 | value: "http://optimize.127.0.0.1.nip.io" 28 | ingress: 29 | enabled: true 30 | host: "optimize.127.0.0.1.nip.io" 31 | identity: 32 | env: 33 | - name: IDENTITY_URL 34 | value: "http://identity.127.0.0.1.nip.io" 35 | keycloak: 36 | ingress: 37 | enabled: true 38 | ingressClassName: nginx 39 | hostname: "keycloak.127.0.0.1.nip.io" 40 | extraEnvVars: 41 | - name: KEYCLOAK_PROXY_ADDRESS_FORWARDING 42 | value: "true" 43 | - name: KEYCLOAK_FRONTEND_URL 44 | value: "http://keycloak.127.0.0.1.nip.io" 45 | ingress: 46 | enabled: true 47 | host: "identity.127.0.0.1.nip.io" 48 | 49 | # Uncomment for smaller setup e.g. for dev/testing 50 | # zeebe: 51 | # clusterSize: 1 52 | # partitionCount: 1 53 | # replicationFactor: 1 54 | # pvcSize: 1Gi 55 | 56 | # resources: 57 | # requests: 58 | # cpu: "100m" 59 | # memory: "512M" 60 | # limits: 61 | # cpu: "512m" 62 | # memory: "2Gi" 63 | 64 | # zeebe-gateway: 65 | # replicas: 1 66 | 67 | # resources: 68 | # requests: 69 | # cpu: "100m" 70 | # memory: "512M" 71 | # limits: 72 | # cpu: "1000m" 73 | # memory: "1Gi" 74 | 75 | # logLevel: ERROR 76 | 77 | # elasticsearch: 78 | # enabled: true 79 | # imageTag: 7.17.3 80 | # replicas: 1 81 | # minimumMasterNodes: 1 82 | # # Allow no backup for single node setups 83 | # clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s" 84 | 85 | # resources: 86 | # requests: 87 | # cpu: "100m" 88 | # memory: "512M" 89 | # limits: 90 | # cpu: "1000m" 91 | # memory: "2Gi" -------------------------------------------------------------------------------- /istio/README.md: -------------------------------------------------------------------------------- 1 | # Helm Profile for configuring Istio 2 | 3 | Istio Gateway and Virtual Services can be used as an alternative to nginx ingress. This directory contains yaml files 4 | and scripts to expose Camunda 8 Self Managed components using Istio. 5 | 6 | # Prerequisites 7 | 8 | ## Install `istoctl` 9 | 10 | Download from here: https://istio.io/latest/docs/setup/getting-started/#download 11 | 12 | # Install Istio Components 13 | 14 | Run the following (I guess it connects using kubeconfig?) 15 | 16 | ```shell 17 | istioctl install --set profile=demo -y 18 | ``` 19 | # Configure the Istio bookinfo demo app 20 | 21 | Label the namespace: 22 | 23 | ```shell 24 | kubectl label namespace camunda istio-injection=enabled 25 | ``` 26 | 27 | Install the demo application. The `bookinfo` demo application is downloaded when you install istioctl. You can use 28 | `bookinfo` demo to verify that istio works in your kuberenetes environment: 29 | 30 | ```shell 31 | kubectl apply -f bookinfo/platform/kube/bookinfo.yaml 32 | ``` 33 | 34 | Verify things are working: 35 | ```shell 36 | kubectl exec "$(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}')" -c ratings -- curl -sS productpage:9080/productpage | grep -o ".*" 37 | ``` 38 | 39 | Setup gateway: 40 | ```shell 41 | kubectl apply -f bookinfo/networking/bookinfo-gateway.yaml 42 | ``` 43 | 44 | Find endpoint: 45 | ```shell 46 | kubectl get svc istio-ingressgateway -n istio-system 47 | ``` 48 | 49 | Test it out: 50 | [https:///productpage](https:///productpage) 51 | 52 | How to view logs of sidecar proxy containers: 53 | ```shell 54 | kubectl logs camunda-keycloak-0 -c istio-proxy -n camunda 55 | ``` 56 | 57 | # Configure the Camunda Environment to use Istio 58 | 59 | TODO: provide steps on how to configure camunda. Look in [istio.mk](istio.mk) file for steps. 60 | 61 | # Configuring Istio in a local kubernetes environment: 62 | 63 | https://github.com/jessesimpson36/tmp-camunda-istio 64 | -------------------------------------------------------------------------------- /istio/gateway.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: Gateway 3 | metadata: 4 | name: RELEASE-gateway 5 | spec: 6 | # The selector matches the ingress gateway pod labels. 7 | # If you installed Istio using Helm following the standard documentation, this would be "istio=ingress" 8 | selector: 9 | istio: ingressgateway # use istio default controller 10 | servers: 11 | - port: 12 | number: 8080 13 | name: http-8080 14 | protocol: HTTP 15 | hosts: 16 | - "*" 17 | - port: 18 | number: 80 19 | name: http-80 20 | protocol: HTTP 21 | hosts: 22 | - "*" 23 | tls: 24 | httpsRedirect: true 25 | - port: 26 | number: 443 27 | name: https-443 28 | protocol: HTTP 29 | hosts: 30 | - "*" -------------------------------------------------------------------------------- /istio/identity.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: VirtualService 3 | metadata: 4 | name: RELEASE-keycloak-vs 5 | spec: 6 | hosts: 7 | - "keycloak.aws.c8sm.com" 8 | gateways: 9 | - RELEASE-gateway 10 | http: 11 | - match: 12 | - port: 443 13 | route: 14 | - destination: 15 | host: RELEASE-keycloak 16 | port: 17 | number: 80 18 | headers: 19 | request: 20 | set: 21 | x-forwarded-proto: https -------------------------------------------------------------------------------- /istio/istio.mk: -------------------------------------------------------------------------------- 1 | .PHONY: istio-analyze 2 | istio-analyze: 3 | istioctl analyze 4 | 5 | .PHONY: istio-install 6 | istio-install: 7 | istioctl install --set profile=demo -y # I don't know why profile=demo works, but the default profile doesn't work? it might have to do with egress? 8 | 9 | .PHONY: istio-external-url 10 | istio-external-url: 11 | kubectl get svc istio-ingressgateway -n istio-system 12 | 13 | .PHONY: istio-label-ns 14 | istio-label-ns: namespace 15 | kubectl label namespace $(namespace) istio-injection=enabled 16 | 17 | .PHONY: istio-gateway 18 | istio-gateway: 19 | cat $(root)/istio/gateway.tpl.yaml | sed -e "s/RELEASE/$(release)/g" | kubectl apply -n $(namespace) -f - 20 | 21 | .PHONY: istio-gateway-logs 22 | istio-gateway-logs: 23 | kubectl logs -f service/istio-ingressgateway -n istio-system 24 | 25 | .PHONY: istio-tasklist 26 | istio-tasklist: 27 | cat $(root)/istio/tasklist.tpl.yaml | sed -e "s/RELEASE/$(release)/g" | kubectl apply -n $(namespace) -f - 28 | 29 | .PHONY: istio-operate 30 | istio-operate: 31 | cat $(root)/istio/operate.tpl.yaml | sed -e "s/RELEASE/$(release)/g" | kubectl apply -n $(namespace) -f - 32 | 33 | .PHONY: istio-keycloak 34 | istio-keycloak: 35 | cat $(root)/istio/keycloak.tpl.yaml | sed -e "s/RELEASE/$(release)/g" | kubectl apply -n $(namespace) -f - 36 | 37 | .PHONY: istio-identity 38 | istio-identity: 39 | cat $(root)/istio/identity.tpl.yaml | sed -e "s/RELEASE/$(release)/g" | kubectl apply -n $(namespace) -f - 40 | 41 | .PHONY: istio-optimize 42 | istio-optimize: 43 | cat $(root)/istio/optimize.tpl.yaml | sed -e "s/RELEASE/$(release)/g" | kubectl apply -n $(namespace) -f - 44 | 45 | .PHONY: istio-virtual-services 46 | istio-virtual-services: istio-keycloak istio-operate istio-tasklist istio-identity istio-optimize istio-analyze 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /istio/keycloak.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: VirtualService 3 | metadata: 4 | name: RELEASE-keycloak-vs 5 | spec: 6 | hosts: 7 | - "keycloak.aws.c8sm.com" 8 | gateways: 9 | - RELEASE-gateway 10 | http: 11 | - match: 12 | - port: 443 13 | route: 14 | - destination: 15 | host: RELEASE-keycloak 16 | port: 17 | number: 80 18 | headers: 19 | request: 20 | set: 21 | x-forwarded-proto: https 22 | 23 | -------------------------------------------------------------------------------- /istio/operate.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: VirtualService 3 | metadata: 4 | name: RELEASE-operate-vs 5 | spec: 6 | hosts: 7 | - "operate.aws.c8sm.com" 8 | gateways: 9 | - RELEASE-gateway 10 | http: 11 | - match: 12 | - port: 443 13 | route: 14 | - destination: 15 | host: RELEASE-operate 16 | port: 17 | number: 80 18 | headers: 19 | request: 20 | set: 21 | x-forwarded-proto: https -------------------------------------------------------------------------------- /istio/optimize.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: VirtualService 3 | metadata: 4 | name: RELEASE-optimize-vs 5 | spec: 6 | hosts: 7 | - "optimize.aws.c8sm.com" 8 | gateways: 9 | - RELEASE-gateway 10 | http: 11 | - match: 12 | - port: 443 13 | route: 14 | - destination: 15 | host: RELEASE-optimize 16 | port: 17 | number: 80 18 | headers: 19 | request: 20 | set: 21 | x-forwarded-proto: https -------------------------------------------------------------------------------- /istio/tasklist.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: VirtualService 3 | metadata: 4 | name: RELEASE-tasklist-vs 5 | spec: 6 | hosts: 7 | - "tasklist.aws.c8sm.com" 8 | gateways: 9 | - RELEASE-gateway 10 | http: 11 | - match: 12 | - port: 443 13 | route: 14 | - destination: 15 | host: RELEASE-tasklist 16 | port: 17 | number: 80 18 | headers: 19 | request: 20 | set: 21 | x-forwarded-proto: https 22 | -------------------------------------------------------------------------------- /keycloak/README.md: -------------------------------------------------------------------------------- 1 | This folder does not contain a Helm profile but common scripts and configuration files, e.g. for GNU Make, 2 | that are shared among multiple profiles. 3 | 4 | See the [Google Keycloak Profile README](../google/keycloak/README.md) for more details about setting up a Camunda environment with an external Keycloak 5 | 6 | -------------------------------------------------------------------------------- /keycloak/keycloak-secrets.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: camunda-platform/charts/identity/charts/keycloak/templates/keycloak-secrets.tpl.yaml 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: camunda-keycloak 7 | namespace: "KEYCLOAK_NAMESPACE" 8 | labels: 9 | app.kubernetes.io/name: keycloak 10 | app.kubernetes.io/instance: camunda 11 | app.kubernetes.io/managed-by: Helm 12 | app.kubernetes.io/component: keycloak 13 | type: Opaque 14 | data: 15 | admin-password: "ADMIN_PASSWORD" 16 | management-password: "MANAGEMENT_PASSWORD" 17 | -------------------------------------------------------------------------------- /keycloak/keycloak-values.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | image: 3 | tag: KEYCLOAK_VERSION 4 | 5 | auth: 6 | adminUser: "KEYCLOAK_ADMIN_USER" 7 | existingSecret: "camunda-keycloak" 8 | passwordSecretKey: "admin-password" 9 | 10 | service: 11 | type: ClusterIP 12 | 13 | extraEnvVars: 14 | # KEYCLOAK_PROXY_ADDRESS_FORWARDING can be used with Ingress that has SSL Termination. It will be "true" if the TLS 15 | # in global Ingress is enabled, but it could be overwritten with separate Ingress setup. 16 | - name: KEYCLOAK_PROXY_ADDRESS_FORWARDING 17 | value: "true" 18 | # KEYCLOAK_HTTP_RELATIVE_PATH is valid for v19.x.x only and it's added for compatibility between Keycloak versions 19 | # where in Keycloak v16.x.x it's hard-coded as '/auth', but in v19.x.x it's '/'. 20 | - name: KEYCLOAK_HTTP_RELATIVE_PATH 21 | value: "/" 22 | 23 | # Keycloak.ingress can be used to configure an Ingress for Keycloak. No need to enable it if the global Ingress 24 | # under "global.ingress" is enabled. However, it's possible to setup Keycloak on a separate Ingress if needed. 25 | # For more details: https://github.com/bitnami/charts/tree/main/bitnami/keycloak#configure-ingress 26 | ingress: 27 | enabled: true 28 | hostname: KEYCLOAK_HOSTNAME 29 | path: KEYCLOAK_CONTEXT_PATH 30 | ingressClassName: nginx 31 | tls: true 32 | annotations: 33 | ingress.kubernetes.io/rewrite-target: / 34 | nginx.ingress.kubernetes.io/ssl-redirect: "false" 35 | # kubernetes.io/ingress.class: "nginx" 36 | cert-manager.io/cluster-issuer: "letsencrypt" 37 | -------------------------------------------------------------------------------- /keycloak/keycloak.mk: -------------------------------------------------------------------------------- 1 | .PHONY: create-namespace-keycloak 2 | create-namespace-keycloak: 3 | -kubectl create namespace $(namespace) 4 | 5 | .PHONY: clean-namespace-keycloak 6 | clean-namespace-keycloak: 7 | -kubectl delete namespace $(namespace) 8 | 9 | keycloak-secrets.yaml: 10 | sed -e "s/KEYCLOAK_NAMESPACE/$(namespace)/g;" -e "s/ADMIN_PASSWORD/$(keycloakBase64EncodedAdminPassword)/g;" -e "s/MANAGEMENT_PASSWORD/$(keycloakBase64EncodedManagementPassword)/g;" $(root)/keycloak/keycloak-secrets.tpl.yaml > $(root)/keycloak/keycloak-secrets.yaml 11 | 12 | .PHONY: clean-keycloak-secrets-yaml 13 | clean-keycloak-secrets-yaml: 14 | -rm $(root)/keycloak/keycloak-secrets.yaml 15 | 16 | .PHONY: create-secret-keycloak 17 | create-secret-keycloak: keycloak-secrets.yaml 18 | -kubectl apply -f $(root)/keycloak/keycloak-secrets.yaml --namespace $(namespace) 19 | 20 | .PHONY: keycloak-values-ip 21 | keycloak-values-ip: fqdn 22 | sed -e "s|KEYCLOAK_VERSION|$(keycloakVersion)|g;" \ 23 | -e "s|KEYCLOAK_NAMESPACE|$(namespace)|g;" \ 24 | -e "s|KEYCLOAK_ADMIN_USER|$(keycloakAdminUser)|g;" \ 25 | -e "s|KEYCLOAK_HOSTNAME|$(fqdn)|g;" \ 26 | -e "s|KEYCLOAK_CONTEXT_PATH|$(keycloakContextPath)|g;" \ 27 | -e "s|//realms|/realms|g;" \ 28 | $(root)/keycloak/keycloak-values.tpl.yaml > $(root)/keycloak/keycloak-values.yaml 29 | 30 | .PHONY: keycloak-values-hostname 31 | keycloak-values: 32 | sed -e "s|KEYCLOAK_VERSION|$(keycloakVersion)|g;" \ 33 | -e "s|KEYCLOAK_NAMESPACE|$(namespace)|g;" \ 34 | -e "s|KEYCLOAK_ADMIN_USER|$(keycloakAdminUser)|g;" \ 35 | -e "s|KEYCLOAK_HOSTNAME|$(keycloakHostName)|g;" \ 36 | -e "s|KEYCLOAK_CONTEXT_PATH|$(keycloakContextPath)|g;" \ 37 | -e "s|//realms|/realms|g;" \ 38 | $(root)/keycloak/keycloak-values.tpl.yaml > $(root)/keycloak/keycloak-values.yaml 39 | 40 | .PHONY: clean-keycloak-values-yaml 41 | clean-keycloak-values-yaml: 42 | -rm $(root)/keycloak/keycloak-values.yaml 43 | 44 | .PHONY: install-keycloak 45 | install-keycloak: 46 | -helm repo add bitnami https://charts.bitnami.com/bitnami 47 | -helm repo update bitnami 48 | -helm upgrade --namespace $(namespace) -f $(root)/keycloak/keycloak-values.yaml keycloak bitnami/keycloak --version $(keycloakChartVersion) --atomic --install 49 | 50 | .PHONY: port-keycloak 51 | port-keycloak: 52 | kubectl port-forward svc/keycloak 8080:8080 -n $(namespace) 53 | 54 | .PHONY: keycloak-password 55 | keycloak-password: 56 | $(eval kcPassword := $(shell kubectl get secret --namespace $(namespace) "camunda-keycloak" -o jsonpath="{.data.admin-password}" | base64 --decode)) 57 | @echo KeyCloak Admin password: $(kcPassword) 58 | 59 | .PHONY: clean-keycloak 60 | clean-keycloak: clean-namespace-keycloak clean-keycloak-secrets-yaml clean-keycloak-values-yaml 61 | -------------------------------------------------------------------------------- /kibana/Makefile: -------------------------------------------------------------------------------- 1 | root ?= $(shell pwd)/.. 2 | 3 | namespace ?= camunda 4 | # Make sure to match the same version of elasticsearch 5 | kibanaVersion ?= 7.17.3 6 | 7 | .PHONY: all 8 | all: install-kibana 9 | 10 | .PHONY: clean 11 | clean: clean-kibana 12 | 13 | include $(root)/kibana/kibana.mk -------------------------------------------------------------------------------- /kibana/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Kibana 2 | 3 | This folder contains a [Makefile](Makefile) that can be used to install and configure Kibana into an existing Kubernetes Cluster. 4 | 5 | If you don't have a Kubernetes Cluster yet, see the main [README](../README.md) for details on how to create a cluster on the popular cloud providers. 6 | 7 | ## Install 8 | 9 | Make sure you meet [these prerequisites](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/master/README.md#prerequisites). 10 | 11 | Open a terminal, cd to this directory, and run: 12 | 13 | ```sh 14 | make 15 | ``` 16 | 17 | This will install Kibana into the `camunda` namespace. 18 | 19 | Setup port forward: 20 | 21 | ```shell 22 | make port-kibana 23 | ``` 24 | 25 | Then, access kibana over [http://localhost:5601](http://localhost:5601) 26 | 27 | Once in Kibana, browse to the [Dev Tools Console](http://localhost:5601/app/dev_tools#/console) to send commands to elasticsearch and explore. 28 | 29 | ![Kibana Dev Console](../docs/images/kibana_dev_console.png) 30 | 31 | ## Uninstall 32 | ```sh 33 | make clean 34 | ```` 35 | -------------------------------------------------------------------------------- /kibana/kibana.mk: -------------------------------------------------------------------------------- 1 | .PHONY: install-kibana 2 | install-kibana: 3 | helm repo add elastic https://helm.elastic.co 4 | helm repo add stable https://charts.helm.sh/stable 5 | helm repo update elastic stable 6 | helm upgrade kibana elastic/kibana --version 7.17.3 --atomic --install --namespace $(namespace) --set elasticsearchHosts=http://camunda-elasticsearch:9200 7 | 8 | .PHONY: clean-kibana 9 | clean-kibana: 10 | -helm uninstall kibana --namespace $(namespace) 11 | 12 | .PHONY: port-kibana 13 | port-kibana: 14 | kubectl port-forward svc/kibana-kibana 5601:5601 -n $(namespace) 15 | 16 | .PHONY: template-kibana 17 | template-kibana: 18 | helm template kibana elastic/kibana --version $(kibanaVersion) --skip-crds --output-dir . 19 | @echo "To apply the templates use: kubectl apply -f kibana/templates/ -n $(namespace)" -------------------------------------------------------------------------------- /kind/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # The following variables should not be changed except for advanced use cases 3 | 4 | root ?= $(shell pwd)/.. 5 | 6 | # Camunda components will be installed into the following Kubernetes namespace 7 | namespace ?= camunda 8 | # Helm release name 9 | release ?= camunda 10 | # Helm chart coordinates for Camunda 11 | chart ?= camunda/camunda-platform 12 | 13 | chartValues ?= camunda-values-kind.yaml 14 | 15 | .PHONY: all 16 | all: camunda-values-kind.yaml camunda external-urls 17 | 18 | # 0 kube from cluster.mk: Create Kubernetes cluster. (No aplication gateway required) 19 | .PHONY: kube 20 | kube: kube-kind 21 | 22 | # 1 create camunda-values-kind 23 | camunda-values-kind.yaml: 24 | cp $(root)/development/camunda-values.yaml $(chartValues) 25 | 26 | # 2 helm install camunda from camunda.mk 27 | 28 | # 3 Show external URLs 29 | .PHONY: external-urls 30 | external-urls: external-urls-no-ingress 31 | 32 | .PHONY: clean 33 | clean: clean-camunda 34 | rm -f $(chartValues) 35 | 36 | .PHONY: clean-kube 37 | clean-kube: clean-kube-kind 38 | 39 | include $(root)/include/camunda.mk 40 | include $(root)/connectors/connectors.mk 41 | include $(root)/bpmn/deploy-models.mk 42 | include $(root)/kind/include/kubernetes-kind.mk 43 | 44 | # override update target because fewer credentials are needed for core setup 45 | .PHONY: update 46 | update: 47 | helm repo update camunda 48 | helm search repo $(chart) 49 | CONNECTORS_SECRET=$$(kubectl get secret --namespace $(namespace) "$(release)-connectors-auth-credentials" -o jsonpath="{.data.connectors-secret}" | base64 -d) \ 50 | helm upgrade --namespace $(namespace) $(release) $(chart) -f $(chartValues) \ 51 | --set connectors.inbound.auth.existingSecret=$CONNECTORS_SECRET 52 | -------------------------------------------------------------------------------- /kind/README.md: -------------------------------------------------------------------------------- 1 | # Helm Profiles for Camunda 8 on Kind 2 | 3 | It's possible to use `kind` to experiment with kubernetes on your local developer laptop, but please keep in mind that 4 | Kubernetes is not really intended to be run on a single machine. That being said, this can be handy for learning and 5 | experimenting with Kubernetes. 6 | 7 | Create a Camunda 8 self-managed Kubernetes Cluster in 3 Steps: 8 | 9 | Step 1: Setup some [global prerequisites](../README.md#prerequisites) 10 | 11 | Step 2: Setup command line tools and software for Kind: 12 | 13 | 1. Make sure to install Docker Desktop (https://www.docker.com/products/docker-desktop/) 14 | 15 | 2. Make sure that `kind` is installed (https://kind.sigs.k8s.io/) 16 | 17 | Again, keep in mind that `kind` is an emulated kubernetes cluster meant only for development! 18 | 19 | 3. Use `Makefile` inside the `kind` directory to create a k8s cluster and install Camunda. 20 | 21 | ```shell 22 | cd kind 23 | make kube 24 | ``` 25 | 26 | This will create a new `kind` cluster in Docker Desktop 27 | 28 | Run the following command to install Camunda 29 | 30 | ```shell 31 | cd kind 32 | make 33 | ``` 34 | 35 | The Kind environment is a stripped down version without ingress and without identity enabled. So, once pods start up, try using port forwarding to access them. 36 | 37 | For example, try `make port-operate`, and then access operate at this url: 38 | 39 | http://localhost:8081 40 | 41 | Or, try `make port-tasklist`, and then access task list here: 42 | 43 | http://localhost:8082 44 | 45 | To access Zeebe via grpc, run `make port-zeebe`, then try: 46 | 47 | ```shell 48 | zbctl status --address localhost:26500 --insecure 49 | ``` 50 | -------------------------------------------------------------------------------- /kind/include/config.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | name: camunda-kind-cluster 4 | nodes: 5 | - role: control-plane 6 | kubeadmConfigPatches: 7 | - | 8 | kind: InitConfiguration 9 | nodeRegistration: 10 | kubeletExtraArgs: 11 | node-labels: "ingress-ready=true" 12 | extraPortMappings: 13 | - containerPort: 80 14 | hostPort: 80 15 | protocol: TCP 16 | - containerPort: 443 17 | hostPort: 443 18 | protocol: TCP 19 | -------------------------------------------------------------------------------- /kind/include/kubernetes-kind.mk: -------------------------------------------------------------------------------- 1 | .PHONY: kube-kind 2 | kube-kind: 3 | kind create cluster --config=$(root)/kind/include/config.yaml 4 | kubectl apply -f $(root)/kind/include/ssd-storageclass-kind.yaml 5 | 6 | .PHONY: clean-kube-kind 7 | clean-kube-kind: use-kube 8 | kind delete cluster --name camunda-kind-cluster 9 | 10 | .PHONY: use-kube 11 | use-kube: 12 | kubectl config use-context kind-camunda-kind-cluster 13 | 14 | .PHONY: urls 15 | urls: 16 | @echo "A cluster management url is not available on Kind" 17 | 18 | .PHONY: ingress-nginx-kind 19 | ingress-nginx-kind: 20 | # helm install -f $(root)/kind/include/nginx_ingress_values.yaml ingress-nginx oci://ghcr.io/nginxinc/charts/nginx-ingress --version 0.18.0 21 | kubectl apply -f $(root)/kind/include/deploy-ingress.yml 22 | -kubectl wait --namespace ingress-nginx \ 23 | --for=condition=ready pod \ 24 | --selector=app.kubernetes.io/component=controller \ 25 | --timeout=90s 26 | # kubectl delete -A ValidatingWebhookConfiguration ingress-nginx-admission 27 | 28 | .PHONY: clean-ingress-kind 29 | clean-ingress-kind: 30 | -kubectl delete -n ingress-nginx pvc -l app.kubernetes.io/instance=ingress-nginx 31 | -kubectl delete namespace ingress-nginx 32 | -------------------------------------------------------------------------------- /kind/include/nginx_ingress_values.yaml: -------------------------------------------------------------------------------- 1 | # nginx_ingress_values.yml 2 | controller: 3 | replicaCount: 1 4 | hostNetwork: true 5 | service: 6 | type: NodePort 7 | -------------------------------------------------------------------------------- /kind/include/ssd-storageclass-kind.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: ssd 6 | provisioner: rancher.io/local-path 7 | reclaimPolicy: Delete 8 | -------------------------------------------------------------------------------- /kind/ingress/nginx/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # The following variables should not be changed except for advanced use cases 3 | 4 | ifeq ($(OS),Windows_NT) 5 | root ?= $(CURDIR)/../../.. 6 | else 7 | root ?= $(shell pwd)/../../.. 8 | endif 9 | 10 | # Camunda components will be installed into the following Kubernetes namespace 11 | namespace ?= camunda 12 | # Helm release name 13 | release ?= camunda 14 | # Helm chart coordinates for Camunda 15 | chart ?= camunda/camunda-platform 16 | 17 | chartValues ?= camunda-values-kind.yaml 18 | 19 | .PHONY: all 20 | all: ingress-nginx-kind camunda-values-kind.yaml camunda external-urls 21 | 22 | # 0 kube from cluster.mk: Create Kubernetes cluster. (No aplication gateway required) 23 | .PHONY: kube 24 | kube: kube-kind 25 | 26 | #1 install nginx ingress controller 27 | 28 | #2 create camunda-values-kind 29 | camunda-values-kind.yaml: 30 | cp $(root)/development/camunda-values-with-ingress.yaml $(chartValues) 31 | 32 | # 2 helm install camunda from camunda.mk 33 | 34 | # 3 Show external URLs 35 | .PHONY: external-urls 36 | external-urls: 37 | @echo To access operate, browse to: http://localhost/operate 38 | @echo To access tasklist, browse to: http://localhost/tasklist 39 | @echo To deploy to the cluster: make port-zeebe, then: zbctl status --address localhost:26500 --insecure 40 | 41 | .PHONY: clean 42 | clean: clean-camunda 43 | rm -f $(chartValues) 44 | 45 | .PHONY: clean-kube 46 | clean-kube: clean-kube-kind 47 | 48 | include $(root)/include/camunda.mk 49 | include $(root)/bpmn/deploy-models.mk 50 | include $(root)/connectors/connectors.mk 51 | include $(root)/kind/include/kubernetes-kind.mk 52 | -------------------------------------------------------------------------------- /kind/ingress/nginx/tls/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Ingress NGINX for Kind with TLS Certificates 2 | 3 | > **Note** This profile is still a work in progress. For latest progress, please see this [Github Issue](https://github.com/camunda-community-hub/camunda-8-helm-profiles/issues/41) 4 | 5 | Follow the instructions from the [main Kind readme](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/webmodeler-kind-update/kind/README.md) but run from this directory to enable ingress and TLS. 6 | 7 | Prerequisites 8 | 9 | Install JQ 10 | 11 | Install cfssl & cfssljosn (this is only needed if you want a working cert for zeebe clients such as zbctl) 12 | https://blog.cloudflare.com/introducing-cfssl-1-2/ 13 | https://formulae.brew.sh/formula/cfssl 14 | https://github.com/cloudflare/cfssl#installation 15 | 16 | 17 | Create the cluster 18 | ``` 19 | make kube 20 | ``` 21 | 22 | Install Camunda 23 | Run the Make command with your docker registry credentials 24 | ``` 25 | make certEmail= camundaDockerRegistryEmail= camundaDockerRegistryUsername= camundaDockerRegistryPassword= 26 | ``` 27 | 28 | NOTE: To ensure you can access all the apps with a self signed certificate on your localhost run chrome with the following command. 29 | 30 | ``` 31 | open /Applications/Google\ Chrome.app --args -unsafely-treat-insecure-origin-as-secure=https://127.0.0.1.nip.io/ -user-data-dir=/tmp/foo 32 | ``` 33 | 34 | Use Web Modeler 35 | 36 | You need to get the secret from KeyCloak by going to the zeebe client in keycloak then the `Credentials` tab. 37 | 38 | First get the password for keycloak login 39 | ``` 40 | make keycloak-password 41 | ``` 42 | 43 | Setup the connection to zeebe so webmodeler can deploy and run BPMN 44 | 45 | ![Keycloak Zeebe Client Secret](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/7bc8352e6b2ff7ccad64821541fd61f1593230d4/docs/images/webmodeler-zeebe-connect.png?raw=true) 46 | 47 | 48 | Connect to the zeebe with zbctl 49 | ``` 50 | zbctl status --address 127.0.0.1.nip.io:443 --certPath ./certs/signed-client.crt 51 | ``` 52 | -------------------------------------------------------------------------------- /metrics/Makefile: -------------------------------------------------------------------------------- 1 | root ?= $(shell pwd)/../ 2 | 3 | .PHONY: all 4 | all: metrics 5 | 6 | .PHONY: clean 7 | clean: clean-metrics 8 | 9 | include $(root)/metrics/metrics.mk -------------------------------------------------------------------------------- /metrics/README.md: -------------------------------------------------------------------------------- 1 | # Camunda 8 Helm Profile: Metrics 2 | 3 | This folder contains a [Makefile](Makefile) that can be used to install and configure Prometheus and Grafana into an existing Kubernetes Cluster. 4 | 5 | If you don't have a Kubernetes Cluster yet, see the main [README](../README.md) for details on how to create a cluster on the popular cloud providers. 6 | 7 | ## Install 8 | 9 | Make sure you meet [these prerequisites](https://github.com/camunda-community-hub/camunda-8-helm-profiles/blob/master/README.md#prerequisites). 10 | 11 | Manually create a secret to store grafana admin credentials. Save the following to a file named `grafana-secret.yml` and then run `kubectl apply -f grafana-secret.yml` 12 | 13 | ```yaml 14 | apiVersion: v1 15 | kind: Secret 16 | metadata: 17 | name: grafana-admin-password 18 | type: Opaque 19 | stringData: 20 | admin-user: camunda 21 | admin-password: 22 | ``` 23 | 24 | Open a terminal and run: 25 | 26 | ```sh 27 | make 28 | ``` 29 | 30 | If `make` is correctly configured, you should also get tab completion for all available make targets. 31 | 32 | ## Uninstall 33 | ```sh 34 | make clean 35 | ``` 36 | # Grafana URL 37 | 38 | After a succesful install, the following command can be used to see the Grafana Service: 39 | 40 | ```shell 41 | kubectl get service metrics-grafana-loadbalancer --namespace default 42 | ``` 43 | 44 | Copy the `EXTERNAL-IP` to access the Grafana Dashboard Web UI. The username and password can be found inside [grafana-secret.yml](grafana-secret.yml) 45 | 46 | -------------------------------------------------------------------------------- /metrics/grafana-load-balancer.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: metrics-grafana-loadbalancer 7 | namespace: default 8 | spec: 9 | externalTrafficPolicy: Cluster 10 | ports: 11 | - name: http 12 | nodePort: 32264 13 | port: 80 14 | protocol: TCP 15 | targetPort: grafana 16 | selector: 17 | app.kubernetes.io/instance: metrics 18 | app.kubernetes.io/name: grafana 19 | sessionAffinity: None 20 | type: LoadBalancer 21 | -------------------------------------------------------------------------------- /metrics/metrics.mk: -------------------------------------------------------------------------------- 1 | .PHONY: metrics 2 | metrics: 3 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 4 | helm repo add stable https://charts.helm.sh/stable 5 | helm repo update prometheus-community stable 6 | kubectl apply -f $(root)/metrics/grafana-secret.yml -n default 7 | @echo " ************ Grafana password : [$$(grep 'admin-password' $(root)/metrics/grafana-secret.yml | grep -v 'name:' | cut -d':' -f2- | sed 's/\r//' | xargs )] **********" 8 | helm install metrics prometheus-community/kube-prometheus-stack --wait --atomic -f $(root)/metrics/prometheus-operator-values.yml --set prometheusOperator.tlsProxy.enabled=false --namespace default 9 | kubectl apply -f $(root)/metrics/grafana-load-balancer.yml -n default 10 | 11 | # echo "Grafana password : [$(grep "admin-password" grafana-secret.yaml | cut -d':' -f2- | xargs)]" 12 | 13 | .PHONY: grafana-password 14 | grafana-password: 15 | @echo "Grafana password : [$$(grep 'admin-password' $(root)/metrics/grafana-secret.yml | grep -v 'name:' | cut -d':' -f2- | sed 's/\r//' | xargs )]" 16 | 17 | 18 | .PHONY: update-metrics 19 | update-metrics: 20 | helm upgrade metrics prometheus-community/kube-prometheus-stack --wait --atomic -f $(root)/metrics/prometheus-operator-values.yml --set prometheusOperator.tlsProxy.enabled=false --namespace default 21 | 22 | .PHONY: clean-metrics 23 | clean-metrics: 24 | -kubectl delete -f $(root)/metrics/grafana-load-balancer.yml -n default 25 | -helm uninstall metrics --namespace default 26 | -kubectl delete -f $(root)/metrics/grafana-secret.yml -n default 27 | # -kubectl delete -f $(include-dir)/ssd-storageclass.yaml -n default 28 | -kubectl delete pvc -l app.kubernetes.io/name=prometheus -n default 29 | -kubectl delete pvc -l app.kubernetes.io/name=grafana -n default 30 | 31 | .PHONY: port-grafana 32 | port-grafana: 33 | kubectl port-forward svc/metrics-grafana-loadbalancer 8080:80 -n default 34 | 35 | .PHONY: port-prometheus 36 | port-prometheus: 37 | kubectl port-forward svc/metrics-kube-prometheus-st-prometheus 9090:9090 -n default 38 | 39 | .PHONY: url-grafana 40 | url-grafana: 41 | @echo http://$(shell kubectl get services metrics-grafana-loadbalancer -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}{.status.loadBalancer.ingress[0].hostname}')/d/zeebe-dashboard/zeebe?var-namespace=$(namespace) 42 | 43 | .PHONY: open-grafana 44 | open-grafana: 45 | xdg-open http://$(shell kubectl get services metrics-grafana-loadbalancer -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}{.status.loadBalancer.ingress[0].hostname}')/d/zeebe-dashboard/zeebe?var-namespace=$(namespace) & -------------------------------------------------------------------------------- /metrics/prometheus-operator-values.yml: -------------------------------------------------------------------------------- 1 | alertmanager: 2 | enabled: false 3 | 4 | grafana: 5 | image: 6 | tag: 11.6.0 7 | admin: 8 | existingSecret: grafana-admin-password 9 | userKey: admin-user 10 | passwordKey: admin-password 11 | # grafana.ini: 12 | # auth.github: 13 | # enabled: true 14 | # allow_sign_up: true 15 | # scopes: user:email,read:org 16 | # auth_url: https://github.com/login/oauth/authorize 17 | # token_url: https://github.com/login/oauth/access_token 18 | # api_url: https://api.github.com/user 19 | # allowed_organizations: zeebe-io camunda camunda-cloud 20 | # client_id: "$__file{/etc/secrets/auth-github-oauth/client_id}" 21 | # client_secret: "$__file{/etc/secrets/auth-github-oauth/client_secret}" 22 | # role_attribute_path: "editor" 23 | # extraSecretMounts: 24 | # - name: auth-github-oauth 25 | # secretName: auth-github-oauth 26 | # defaultMode: 0440 27 | # mountPath: /etc/secrets/auth-github-oauth 28 | # readOnly: true 29 | dashboardProviders: 30 | dashboardproviders.yaml: 31 | apiVersion: 1 32 | providers: 33 | - name: default 34 | orgId: 1 35 | folder: 36 | type: file 37 | disableDeletion: true 38 | editable: false 39 | options: 40 | path: /var/lib/grafana/dashboards/default 41 | dashboards: 42 | default: 43 | zeebe: 44 | urlold: https://raw.githubusercontent.com/zeebe-io/zeebe/develop/monitor/grafana/zeebe.json 45 | url: https://raw.githubusercontent.com/camunda/camunda/main/monitor/grafana/zeebe.json 46 | 47 | persistence: 48 | enabled: true 49 | storageClassName: ssd 50 | sidecar: 51 | dashboards: 52 | searchNamespace: ALL 53 | # 54 | # prometheusOperator: 55 | # admissionWebhooks: 56 | # enabled: false 57 | 58 | prometheus: 59 | prometheusSpec: 60 | retention: 90d 61 | shards: null 62 | storageSpec: 63 | volumeClaimTemplate: 64 | metadata: 65 | name: prometheus 66 | # selector: 67 | # matchLabels: 68 | # app: prometheus 69 | spec: 70 | accessModes: 71 | - ReadWriteOnce 72 | resources: 73 | requests: 74 | storage: 50Gi 75 | storageClassName: ssd 76 | -------------------------------------------------------------------------------- /multi-region/dual-region/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all # install Camunda in both regions 2 | all: 3 | $(MAKE) -C region0 4 | $(MAKE) -C region1 5 | 6 | .PHONY: clean # uninstall Camunda from both regions 7 | clean: 8 | $(MAKE) -C region0 clean 9 | $(MAKE) -C region1 clean 10 | 11 | .PHONY: values # generate YAML snippets from namespace and region 12 | values: 13 | . ./export_environment_prerequisites.sh; \ 14 | ./generate_zeebe_helm_values.sh 15 | 16 | .PHONY: help # print this help 17 | help: 18 | @grep -oP '^\.PHONY: \K.*' Makefile | sed 's/#/\t/' 19 | 20 | ######################################################################## 21 | # The following targets are for maintainers 22 | 23 | .PHONY: meld-regions # maintain region directories in sync 24 | meld-regions: 25 | meld region0/ region1/ 26 | 27 | .PHONY: meld-scripts # maintain shell scripts in sync with Camunda docs 28 | meld-scripts: 29 | meld . ../../../c8-multi-region/aws/dual-region/scripts/ 30 | 31 | .PHONY: meld-values # maintain camunda-values.yaml in sync with Camunda docs 32 | meld-values: 33 | meld camunda-values.d/dual-region.yaml ../../../c8-multi-region/aws/dual-region/kubernetes/camunda-values.yml 34 | -------------------------------------------------------------------------------- /multi-region/dual-region/README.md: -------------------------------------------------------------------------------- 1 | # Experimental Camunda 2.5-Datacenter Setup with nearby 2 Primary Datacenters 2 | 3 | Warning: This profile is not (yet) officially supported by Camunda and it assumes a low network latency between the two primary datacenters called `region0` and `region1`, i.e. less than 15 miliseconds. 4 | 5 | Zeebe is installed as a [dual-region active-active stretch cluster](https://docs.camunda.io/docs/self-managed/concepts/multi-region/dual-region/): 6 | ![Zeebe dual-region active-active stretch cluster](https://github.com/camunda/camunda-docs/blob/main/versioned_docs/version-8.7/self-managed/concepts/multi-region/img/dual-region.svg) 7 | 8 | In opposite to the above picture/documentation, Elasticsearch is installed as a single cluster stretching across 2.5 datacenters: 9 | [![Elasticsearch 2.5-region stretch cluster](https://media.licdn.com/dms/image/v2/D5612AQEXNDJ8c1DCVw/article-inline_image-shrink_1500_2232/article-inline_image-shrink_1500_2232/0/1667946099753?e=1750896000&v=beta&t=ItGnEzQnubzaaBcgMzqRgkc76NGGIognTjZbPN0ii78)](https://www.linkedin.com/pulse/building-on-prem-multi-datacenter-stretch-cluster-senguttuvan/) 10 | 11 | Adjust the config.mk files in the region subfolders before running `make` either in the region folders or even in the root. You can run `make help` to see available targets. Running `make --dry-run` shows a preview of the commands to be executed. 12 | -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/cluster-size.yaml: -------------------------------------------------------------------------------- 1 | zeebe: 2 | # both should be multiples of the replication factor for a ballanced setup 3 | clusterSize: 4 4 | partitionCount: 1 5 | pvcSize: 10Gi 6 | pvcStorageClassName: "standard" 7 | 8 | zeebeGateway: 9 | replicas: 1 -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/connectors-disabled.yaml: -------------------------------------------------------------------------------- 1 | connectors: 2 | enabled: false -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/connectors-outbound-only.yaml: -------------------------------------------------------------------------------- 1 | connectors: 2 | enabled: true 3 | inbound: 4 | mode: disabled -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/elasticsearch-2.5-region-stretch-cluster.yaml: -------------------------------------------------------------------------------- 1 | elasticsearch: 2 | service: 3 | enabled: true 4 | type: LoadBalancer 5 | extraConfig: 6 | cluster.routing.allocation.awareness.attributes: region 7 | security: 8 | enabled: false 9 | master: 10 | replicaCount: 1 11 | networkPolicy: 12 | enabled: false 13 | extraEnvVars: 14 | - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS 15 | # yamllint disable-line rule:line-length 16 | value: "camunda-r0-elasticsearch-master-0 camunda-r1-elasticsearch-master-0 camunda-r2-elasticsearch-master-0" 17 | - name: ELASTICSEARCH_MINIMUM_MASTER_NODES 18 | value: "2" 19 | - name: ELASTICSEARCH_TOTAL_NODES 20 | value: "3" 21 | - name: ELASTICSEARCH_CLUSTER_HOSTS 22 | value: 23 | camunda-r0-elasticsearch-master-hl.camunda-r0.svc.cluster.local, 24 | camunda-r1-elasticsearch-master-hl.camunda-r1.svc.cluster.local, 25 | camunda-r2-elasticsearch-master-hl.camunda-r2.svc.cluster.local 26 | - name: ELASTICSEARCH_ENABLE_REST_TLS 27 | value: "false" 28 | -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/elasticsearch-disabled.yaml: -------------------------------------------------------------------------------- 1 | # disable Elasticsearch and all components that depend on it 2 | global: 3 | elasticsearch: 4 | enabled: false 5 | optimize: 6 | enabled: false 7 | tasklist: 8 | enabled: false 9 | operate: 10 | enabled: false 11 | elasticsearch: 12 | enabled: false -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/elasticsearch-only.yaml: -------------------------------------------------------------------------------- 1 | # disable all Camunda Platform components and install only Elasticsearch 2 | global: 3 | identity: 4 | auth: 5 | enabled: false 6 | zeebe: 7 | enabled: false 8 | zeebeGateway: 9 | enabled: false 10 | connectors: 11 | enabled: false 12 | operate: 13 | enabled: false 14 | tasklist: 15 | enabled: false 16 | optimize: 17 | enabled: false 18 | identity: 19 | enabled: false 20 | identityKeycloak: 21 | enabled: false 22 | -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/elasticsearch-version.yaml: -------------------------------------------------------------------------------- 1 | elasticsearch: 2 | image: 3 | tag: 8.17.3-debian-12-r0 -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/identity-disabled.yaml: -------------------------------------------------------------------------------- 1 | # disable Identity and all components that depend on it 2 | global: 3 | identity: 4 | auth: 5 | # Disable the Identity authentication 6 | # it will fall back to basic-auth: demo/demo as default user 7 | enabled: false 8 | identity: 9 | enabled: false 10 | identityKeycloak: 11 | enabled: false 12 | optimize: 13 | enabled: false -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/ingress.yaml: -------------------------------------------------------------------------------- 1 | zeebeGateway: 2 | ingress: 3 | enabled: false 4 | className: nginx 5 | annotations: 6 | ingress.kubernetes.io/rewrite-target: '/' 7 | nginx.ingress.kubernetes.io/ssl-redirect: 'false' 8 | nginx.ingress.kubernetes.io/proxy-buffering: 'on' 9 | nginx.ingress.kubernetes.io/proxy-buffer-size: '128k' 10 | path: / 11 | tls: 12 | enabled: false 13 | secretName: camunda-zeebe-gateway 14 | operate: 15 | ingress: 16 | enabled: true 17 | className: nginx 18 | annotations: 19 | ingress.kubernetes.io/rewrite-target: '/' 20 | nginx.ingress.kubernetes.io/ssl-redirect: 'false' 21 | nginx.ingress.kubernetes.io/proxy-buffering: 'on' 22 | nginx.ingress.kubernetes.io/proxy-buffer-size: '128k' 23 | path: / 24 | tls: 25 | enabled: false 26 | secretName: camunda-operate 27 | optimize: 28 | ingress: 29 | enabled: true 30 | className: nginx 31 | annotations: 32 | ingress.kubernetes.io/rewrite-target: '/' 33 | nginx.ingress.kubernetes.io/ssl-redirect: 'false' 34 | nginx.ingress.kubernetes.io/proxy-buffering: 'on' 35 | nginx.ingress.kubernetes.io/proxy-buffer-size: '128k' 36 | path: / 37 | tls: 38 | enabled: false 39 | secretName: camunda-optimize -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/prometheus-service-monitor.yaml: -------------------------------------------------------------------------------- 1 | prometheusServiceMonitor: 2 | # if true then a service monitor will be deployed, which allows an installed prometheus controller to scrape metrics from the deployed pods 3 | enabled: true -------------------------------------------------------------------------------- /multi-region/dual-region/camunda-values.d/zeebe-debug.yaml: -------------------------------------------------------------------------------- 1 | zeebe: 2 | debug: true 3 | logLevel: debug -------------------------------------------------------------------------------- /multi-region/dual-region/export_environment_prerequisites.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ############################################################################### 4 | # Important: Adjust the following environment variables to your setup # 5 | ############################################################################### 6 | 7 | # The script must be executed with 8 | # . ./export_environment_prerequisites.sh 9 | # to export the environment variables to the current shell 10 | 11 | # The Kubernetes namespaces for each region where Camunda 8 should be running 12 | # Namespace names must be unique to route the traffic 13 | export CAMUNDA_NAMESPACE_0=camunda-r0 14 | export CAMUNDA_NAMESPACE_1=camunda-r1 15 | 16 | # The Helm release name used for installing Camunda 8 in both Kubernetes clusters 17 | export HELM_RELEASE_NAME=camunda 18 | # renovate: datasource=helm depName=camunda-platform registryUrl=https://helm.camunda.io versioning=regex:^11(\.(?\d+))?(\.(?\d+))?$ 19 | export HELM_CHART_VERSION=11.3.0 20 | -------------------------------------------------------------------------------- /multi-region/dual-region/region0/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all # install Camunda and broker services 2 | all: install-camunda service-per-broker 3 | 4 | .PHONY: clean # uninstall Camunda and broker services 5 | clean: clean-service-per-broker uninstall-camunda 6 | 7 | .PHONY: service-per-broker # generate service for each broker 8 | service-per-broker: 9 | kubectl apply -f ../service-per-broker.yaml -n $(namespace) 10 | 11 | .PHONY: clean-service-per-broker # delete service for each broker 12 | clean-service-per-broker: 13 | -kubectl delete -f ../service-per-broker.yaml -n $(namespace) 14 | 15 | ifeq ($(OS),Windows_NT) 16 | root ?= $(CURDIR)/../../.. 17 | else 18 | root ?= $(shell pwd)/../../.. 19 | endif 20 | 21 | include config.mk 22 | include $(root)/include/camunda.mk 23 | -------------------------------------------------------------------------------- /multi-region/dual-region/region0/config.mk: -------------------------------------------------------------------------------- 1 | # Kubernetes namespace 2 | namespace ?= camunda-r0 3 | # Helm release name 4 | release ?= camunda 5 | # Helm chart version for Camunda 6 | # renovate: datasource=helm depName=camunda-platform registryUrl=https://helm.camunda.io versioning=regex:^11(\.(?\d+))?(\.(?\d+))?$ 7 | chartVersion ?= 11.3.0 8 | # Helm chart coordinates for Camunda 9 | chart ?= camunda/camunda-platform --version $(chartVersion) 10 | # Helm chart values 11 | chartValues ?= ../camunda-values.d/cluster-size.yaml \ 12 | -f ../camunda-values.d/dual-region.yaml \ 13 | -f ../camunda-values.d/elasticsearch-2.5-region-stretch-cluster.yaml \ 14 | -f region0.yaml 15 | -------------------------------------------------------------------------------- /multi-region/dual-region/region0/region0.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | multiregion: 3 | # unique id of the region. MUST start at 0 for the computation to work correctly. With 2 regions, you would have region 0 and 1. 4 | regionId: 0 5 | elasticsearch: 6 | service: 7 | loadBalancerIP: 8 | extraConfig: 9 | network.publish_host: 10 | node.attr.region: 0 -------------------------------------------------------------------------------- /multi-region/dual-region/region1/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all # install Camunda and broker services 2 | all: install-camunda service-per-broker 3 | 4 | .PHONY: clean # uninstall Camunda and broker services 5 | clean: clean-service-per-broker uninstall-camunda 6 | 7 | .PHONY: service-per-broker # generate service for each broker 8 | service-per-broker: 9 | kubectl apply -f ../service-per-broker.yaml -n $(namespace) 10 | 11 | .PHONY: clean-service-per-broker # delete service for each broker 12 | clean-service-per-broker: 13 | -kubectl delete -f ../service-per-broker.yaml -n $(namespace) 14 | 15 | ifeq ($(OS),Windows_NT) 16 | root ?= $(CURDIR)/../../.. 17 | else 18 | root ?= $(shell pwd)/../../.. 19 | endif 20 | 21 | include config.mk 22 | include $(root)/include/camunda.mk 23 | -------------------------------------------------------------------------------- /multi-region/dual-region/region1/config.mk: -------------------------------------------------------------------------------- 1 | # Kubernetes namespace 2 | namespace ?= camunda-r1 3 | # Helm release name 4 | release ?= camunda 5 | # Helm chart version for Camunda 6 | # renovate: datasource=helm depName=camunda-platform registryUrl=https://helm.camunda.io versioning=regex:^11(\.(?\d+))?(\.(?\d+))?$ 7 | chartVersion ?= 11.3.0 8 | # Helm chart coordinates for Camunda 9 | chart ?= camunda/camunda-platform --version $(chartVersion) 10 | # Helm chart values 11 | chartValues ?= ../camunda-values.d/cluster-size.yaml \ 12 | -f ../camunda-values.d/dual-region.yaml \ 13 | -f ../camunda-values.d/elasticsearch-2.5-region-stretch-cluster.yaml \ 14 | -f region1.yaml 15 | -------------------------------------------------------------------------------- /multi-region/dual-region/region1/region1.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | multiregion: 3 | # unique id of the region. MUST start at 0 for the computation to work correctly. With 2 regions, you would have region 0 and 1. 4 | regionId: 1 5 | elasticsearch: 6 | service: 7 | loadBalancerIP: 8 | extraConfig: 9 | network.publish_host: 10 | node.attr.region: 1 -------------------------------------------------------------------------------- /multi-region/dual-region/region2/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all # install Camunda and broker services 2 | all: install-camunda 3 | 4 | .PHONY: clean # uninstall Camunda and broker services 5 | clean: uninstall-camunda 6 | 7 | ifeq ($(OS),Windows_NT) 8 | root ?= $(CURDIR)/../../.. 9 | else 10 | root ?= $(shell pwd)/../../.. 11 | endif 12 | 13 | include config.mk 14 | include $(root)/include/camunda.mk 15 | -------------------------------------------------------------------------------- /multi-region/dual-region/region2/config.mk: -------------------------------------------------------------------------------- 1 | # Kubernetes namespace 2 | namespace ?= camunda-r1 3 | # Helm release name 4 | release ?= camunda 5 | # Helm chart version for Camunda 6 | # renovate: datasource=helm depName=camunda-platform registryUrl=https://helm.camunda.io versioning=regex:^11(\.(?\d+))?(\.(?\d+))?$ 7 | chartVersion ?= 11.3.0 8 | # Helm chart coordinates for Camunda 9 | chart ?= camunda/camunda-platform --version $(chartVersion) 10 | # Helm chart values 11 | chartValues ?= ../camunda-values.d/elasticsearch-only.yaml \ 12 | -f ../camunda-values.d/elasticsearch-2.5-region-stretch-cluster.yaml \ 13 | -f region2.yaml 14 | -------------------------------------------------------------------------------- /multi-region/dual-region/region2/region2.yaml: -------------------------------------------------------------------------------- 1 | # this region contains only a voting-only master node of Elasticsearch 2 | elasticsearch: 3 | service: 4 | loadBalancerIP: 5 | extraConfig: 6 | network.publish_host: 7 | node.attr.region: 2 8 | node.roles: [ master, voting_only ] 9 | master: 10 | masterOnly: true -------------------------------------------------------------------------------- /multi-region/dual-region/service-per-broker-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: "camunda-zeebe-0" 5 | labels: 6 | app: camunda-platform 7 | app.kubernetes.io/name: camunda-platform 8 | app.kubernetes.io/instance: camunda 9 | spec: 10 | clusterIP: None 11 | publishNotReadyAddresses: true 12 | type: ClusterIP 13 | ports: 14 | - port: 26502 15 | protocol: TCP 16 | name: internal 17 | - port: 26501 18 | protocol: TCP 19 | name: command 20 | selector: 21 | app: camunda-platform 22 | app.kubernetes.io/name: camunda-platform 23 | app.kubernetes.io/instance: camunda 24 | app.kubernetes.io/managed-by: Helm 25 | app.kubernetes.io/part-of: camunda-platform 26 | app.kubernetes.io/component: zeebe-broker 27 | statefulset.kubernetes.io/pod-name: camunda-zeebe-0 -------------------------------------------------------------------------------- /multi-region/dual-region/service-per-broker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: camunda-platform/templates/zeebe/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: "camunda-zeebe-0" 7 | labels: 8 | app: camunda-platform 9 | app.kubernetes.io/name: camunda-platform 10 | app.kubernetes.io/instance: camunda 11 | spec: 12 | clusterIP: None 13 | publishNotReadyAddresses: true 14 | type: ClusterIP 15 | ports: 16 | - port: 26502 17 | protocol: TCP 18 | name: internal 19 | - port: 26501 20 | protocol: TCP 21 | name: command 22 | selector: 23 | app: camunda-platform 24 | app.kubernetes.io/name: camunda-platform 25 | app.kubernetes.io/instance: camunda 26 | app.kubernetes.io/managed-by: Helm 27 | app.kubernetes.io/part-of: camunda-platform 28 | app.kubernetes.io/component: zeebe-broker 29 | statefulset.kubernetes.io/pod-name: camunda-zeebe-0 30 | --- 31 | # Source: camunda-platform/templates/zeebe/service.yaml 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: "camunda-zeebe-1" 36 | labels: 37 | app: camunda-platform 38 | app.kubernetes.io/name: camunda-platform 39 | app.kubernetes.io/instance: camunda 40 | spec: 41 | clusterIP: None 42 | publishNotReadyAddresses: true 43 | type: ClusterIP 44 | ports: 45 | - port: 26502 46 | protocol: TCP 47 | name: internal 48 | - port: 26501 49 | protocol: TCP 50 | name: command 51 | selector: 52 | app: camunda-platform 53 | app.kubernetes.io/name: camunda-platform 54 | app.kubernetes.io/instance: camunda 55 | app.kubernetes.io/managed-by: Helm 56 | app.kubernetes.io/part-of: camunda-platform 57 | app.kubernetes.io/component: zeebe-broker 58 | statefulset.kubernetes.io/pod-name: camunda-zeebe-1 -------------------------------------------------------------------------------- /oauth2-proxy/README.md: -------------------------------------------------------------------------------- 1 | 1. Create a new client for oauth2-proxy 2 | 3 | Use the following command to determine keycloak password 4 | 5 | ```shell 6 | make keykloak-password 7 | ``` 8 | 9 | Sign in and Click `clients`, then `Create`. Set Client ID to `oauth2` and accept default of `openid-connect`. 10 | 11 | Add the following to `Valid Redirect URIs` 12 | 13 | ```shell 14 | https://gke.upgradingdave.com/oauth2/callback 15 | ``` 16 | 17 | Copy and paste the client secret generated for this client and add it to the your Makefile 18 | 19 | Configure a new Audience Mapper. 20 | 21 | - Use `oauth2 Audience Mapper` for the name 22 | - Select `Audience` for Mapper Type 23 | - Choose your `oauth2` client for the included Client Audience 24 | 25 | 2. Create a zeebe client 26 | 27 | - Create a zeebe client in the normal way 28 | - Create a client scope in keycloak. Set name to `oauth2`. Set protocol `openid-connect`. 29 | - Click on mappers tab and create new mapper. Name can be anything. Set `Included Client Audience` to `oauth2` (the client you created in step 1 above) 30 | - Save 31 | 32 | # Connecting from Desktop Modeler 33 | 34 | zbctl status --address gke.upgradingdave.com:443 --clientId zeebe --clientSecret xxx --authzUrl https://gke.upgradingdave.com/auth/realms/camunda-platform/protocol/openid-connect/token 35 | 36 | https://gke.upgradingdave.com/auth/realms/camunda-platform/protocol/openid-connect/token 37 | -------------------------------------------------------------------------------- /oauth2-proxy/oauth2-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: oauth2-proxy 5 | namespace: ingress-nginx 6 | annotations: 7 | cert-manager.io/cluster-issuer: letsencrypt 8 | spec: 9 | ingressClassName: nginx 10 | rules: 11 | - host: YOUR_HOSTNAME 12 | http: 13 | paths: 14 | - backend: 15 | service: 16 | name: oauth2-proxy 17 | port: 18 | number: 80 19 | path: /oauth2 20 | pathType: Prefix 21 | tls: 22 | - hosts: 23 | - YOUR_HOSTNAME 24 | secretName: tls-secret -------------------------------------------------------------------------------- /oauth2-proxy/oauth2-values.tpl.yaml: -------------------------------------------------------------------------------- 1 | # https://github.com/oauth2-proxy/oauth2-proxy/blob/7c3efe4f42bc37ccab613fe5002c172e147e3195/docs/2_auth.md#keycloak-auth-provider 2 | extraArgs: 3 | - --skip-jwt-bearer-tokens 4 | - --provider=keycloak-oidc 5 | - --client-id=oauth2 6 | - --client-secret=YOUR_CLIENT_SECRET 7 | - --redirect-url=https://YOUR_HOSTNAME/oauth2/callback 8 | - --oidc-issuer-url=https://YOUR_HOSTNAME/auth/realms/camunda-platform 9 | - --silence-ping-logging 10 | - --auth-logging=true 11 | - --insecure-oidc-allow-unverified-email 12 | - --request-logging=true 13 | - --standard-logging=true 14 | # - --session-store-type=cookie 15 | # - --cookie-secret=gfVLP_MPTXFi4JAXFUOOiikk5EXXgCOQsBUy3wCeNG4= 16 | # - --login-url=https://tuesday.southeastasia.cloudapp.azure.com/auth/realms/camunda-platform/protocol/openid-connect/auth 17 | # - --redeem-url=https://tuesday.southeastasia.cloudapp.azure.com/auth/realms/camunda-platform/protocol/openid-connect/token 18 | # - --profile-url=https://tuesday.southeastasia.cloudapp.azure.com/auth/realms/camunda-platform/protocol/openid-connect/userinfo 19 | # - --validate-url=https://tuesday.southeastasia.cloudapp.azure.com/auth/realms/camunda-platform/protocol/openid-connect/userinfo 20 | # - --redirect-url=https://tuesday.southeastasia.cloudapp.azure.com/oauth2/callback 21 | # - --keycloak-group=/users 22 | # - --provider-display-name=LoginBlah 23 | # - --set-xauthrequest=true 24 | # - --pass-user-headers=true 25 | # - --pass-access-token=true 26 | # - --pass-authorization-header=true 27 | # # - --scope=openid 28 | # - --cookie-httponly=false 29 | 30 | 31 | replicaCount: 1 32 | affinity: 33 | podAntiAffinity: 34 | requiredDuringSchedulingIgnoredDuringExecution: 35 | - labelSelector: 36 | matchLabels: 37 | app: oauth2-proxy 38 | topologyKey: "kubernetes.io/hostname" 39 | resources: 40 | limits: 41 | cpu: 200m 42 | memory: 100Mi 43 | requests: 44 | cpu: 100m 45 | memory: 25Mi -------------------------------------------------------------------------------- /oauth2-proxy/oauth2.mk: -------------------------------------------------------------------------------- 1 | oauth2-values.yaml: 2 | if [ -n "$(baseDomainName)" ]; then \ 3 | sed "s/YOUR_HOSTNAME/$(subDomainName).$(baseDomainName)/g; s/YOUR_CLIENT_SECRET/$(clientSecret)/g" $(root)/oauth2-proxy/oauth2-values.tpl.yaml > ./oauth2-values.yaml; \ 4 | else \ 5 | sed "s/YOUR_HOSTNAME/$(IP).nip.io/g; s/YOUR_CLIENT_SECRET/$(clientSecret)/g" $(root)/oauth2-proxy/oauth2-values.tpl.yaml > ./oauth2-values.yaml; \ 6 | fi 7 | 8 | .PHONY: oauth2-proxy 9 | oauth2-proxy: oauth2-values.yaml 10 | helm repo add azure-marketplace https://marketplace.azurecr.io/helm/v1/repo 11 | helm repo update azure-marketplace 12 | helm install oauth2-proxy azure-marketplace/oauth2-proxy -n ingress-nginx --create-namespace -f oauth2-values.yaml 13 | 14 | .PHONY: update-oauth2-proxy 15 | update-oauth2-proxy: 16 | helm upgrade oauth2-proxy azure-marketplace/oauth2-proxy -n ingress-nginx -f oauth2-values.yaml 17 | 18 | .PHONY: oauth2-ingress 19 | oauth2-ingress: ingress-ip-from-service 20 | if [ -n "$(baseDomainName)" ]; then \ 21 | cat $(root)/oauth2-proxy/oauth2-ingress.yaml | sed -E "s/YOUR_HOSTNAME/$(subDomainName).$(baseDomainName)/g" | kubectl apply -f - ; \ 22 | else \ 23 | cat $(root)/oauth2-proxy/oauth2-ingress.yaml | sed -E "s/YOUR_HOSTNAME/$(IP).nip.io/g" | kubectl apply -f - ; \ 24 | fi 25 | 26 | .PHONY: zeebe-oauth2-ingress 27 | zeebe-oauth2-ingress: ingress-ip-from-service 28 | if [ -n "$(baseDomainName)" ]; then \ 29 | cat $(root)/oauth2-proxy/zeebe-oauth2-ingress.yaml | sed -E "s/YOUR_HOSTNAME/$(subDomainName).$(baseDomainName)/g" | kubectl apply -f - ; \ 30 | else \ 31 | cat $(root)/oauth2-proxy/zeebe-oauth2-ingress.yaml | sed -E "s/YOUR_HOSTNAME/$(IP).nip.io/g" | kubectl apply -f - ; \ 32 | fi 33 | 34 | .PHONY: clean-oauth2-values 35 | clean-oauth2: 36 | rm -rf oauth2-values.yaml 37 | -------------------------------------------------------------------------------- /oauth2-proxy/zeebe-oauth2-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | namespace: camunda 5 | name: zeebe-oauth2-ingress 6 | annotations: 7 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 8 | nginx.ingress.kubernetes.io/backend-protocol: "GRPC" 9 | cert-manager.io/cluster-issuer: letsencrypt 10 | nginx.ingress.kubernetes.io/auth-signin: https://$host/oauth2/start?rd=$escaped_request_uri 11 | # nginx.ingress.kubernetes.io/auth-response-headers: Authorization 12 | nginx.ingress.kubernetes.io/auth-url: https://$host/oauth2/auth 13 | spec: 14 | ingressClassName: nginx 15 | rules: 16 | - host: YOUR_HOSTNAME 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: camunda-zeebe-gateway 24 | port: 25 | number: 26500 26 | tls: 27 | - secretName: tls-secret 28 | hosts: 29 | - YOUR_HOSTNAME -------------------------------------------------------------------------------- /openshift/Makefile: -------------------------------------------------------------------------------- 1 | ifeq ($(OS),Windows_NT) 2 | root ?= $(CURDIR)/.. 3 | else 4 | root ?= $(shell pwd)/.. 5 | endif 6 | 7 | .PHONY: all 8 | all: camunda-values-openshift.yaml camunda 9 | 10 | include $(root)/openshift/openshift.mk 11 | -------------------------------------------------------------------------------- /openshift/camunda-identity-edge-routes.yaml: -------------------------------------------------------------------------------- 1 | kind: Route 2 | apiVersion: route.openshift.io/v1 3 | metadata: 4 | name: camunda-keycloak 5 | namespace: camunda 6 | labels: 7 | app: camunda-platform 8 | app.kubernetes.io/component: keycloak 9 | app.kubernetes.io/instance: camunda 10 | app.kubernetes.io/name: camunda-platform 11 | app.kubernetes.io/part-of: camunda-platform 12 | spec: 13 | host: keycloak-camunda.apps-crc.testing 14 | to: 15 | kind: Service 16 | name: camunda-keycloak 17 | weight: 100 18 | port: 19 | targetPort: http 20 | tls: 21 | termination: edge 22 | insecureEdgeTerminationPolicy: Redirect 23 | wildcardPolicy: None 24 | alternateBackends: [] 25 | --- 26 | kind: Route 27 | apiVersion: route.openshift.io/v1 28 | metadata: 29 | name: camunda-identity 30 | namespace: camunda 31 | labels: 32 | app: camunda-platform 33 | app.kubernetes.io/component: identity 34 | app.kubernetes.io/instance: camunda 35 | app.kubernetes.io/name: camunda-platform 36 | app.kubernetes.io/part-of: camunda-platform 37 | spec: 38 | host: identity-camunda.apps-crc.testing 39 | to: 40 | kind: Service 41 | name: camunda-identity 42 | weight: 100 43 | port: 44 | targetPort: http 45 | tls: 46 | termination: edge 47 | insecureEdgeTerminationPolicy: Redirect 48 | wildcardPolicy: None 49 | alternateBackends: [] 50 | --- 51 | kind: Route 52 | apiVersion: route.openshift.io/v1 53 | metadata: 54 | name: camunda-operate 55 | namespace: camunda 56 | labels: 57 | app: camunda-platform 58 | app.kubernetes.io/component: operate 59 | app.kubernetes.io/instance: camunda 60 | app.kubernetes.io/name: camunda-platform 61 | app.kubernetes.io/part-of: camunda-platform 62 | spec: 63 | host: operate-camunda.apps-crc.testing 64 | to: 65 | kind: Service 66 | name: camunda-operate 67 | weight: 100 68 | port: 69 | targetPort: http 70 | tls: 71 | termination: edge 72 | insecureEdgeTerminationPolicy: Redirect 73 | wildcardPolicy: None 74 | alternateBackends: [] 75 | --- 76 | kind: Route 77 | apiVersion: route.openshift.io/v1 78 | metadata: 79 | name: camunda-tasklist 80 | namespace: camunda 81 | labels: 82 | app: camunda-platform 83 | app.kubernetes.io/component: tasklist 84 | app.kubernetes.io/instance: camunda 85 | app.kubernetes.io/name: camunda-platform 86 | app.kubernetes.io/part-of: camunda-platform 87 | spec: 88 | host: tasklist-camunda.apps-crc.testing 89 | to: 90 | kind: Service 91 | name: camunda-tasklist 92 | weight: 100 93 | port: 94 | targetPort: http 95 | tls: 96 | termination: edge 97 | insecureEdgeTerminationPolicy: Redirect 98 | wildcardPolicy: None 99 | alternateBackends: [] 100 | -------------------------------------------------------------------------------- /openshift/certs/backend.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/openshift/certs/backend.p12 -------------------------------------------------------------------------------- /openshift/certs/backend.test.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFojCCBIqgAwIBAgIUIaeL7S8OOiT4aTRSse0Y2y9RojAwDQYJKoZIhvcNAQEL 3 | BQAwgasxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhWaXJnaW5pYTEXMBUGA1UEBwwO 4 | RnJlZGVyaWNrc2J1cmcxFjAUBgNVBAoMDXVwZ3JhZGluZ2RhdmUxEDAOBgNVBAsM 5 | B2NhbXVuZGExGzAZBgNVBAMMEnVwZ3JhZGluZ2RhdmUtcm9vdDEpMCcGCSqGSIb3 6 | DQEJARYaZGF2aWQucGFyb3VsZWtAY2FtdW5kYS5jb20wHhcNMjQwNzI2MTgxMzI0 7 | WhcNMjYxMDI5MTgxMzI0WjCBpTELMAkGA1UEBhMCVVMxETAPBgNVBAgMCFZpcmdp 8 | bmlhMRcwFQYDVQQHDA5GcmVkZXJpY2tzYnVyZzEWMBQGA1UECgwNdXBncmFkaW5n 9 | ZGF2ZTEQMA4GA1UECwwHY2FtdW5kYTEVMBMGA1UEAwwMYmFja2VuZC1jZXJ0MSkw 10 | JwYJKoZIhvcNAQkBFhpkYXZpZC5wYXJvdWxla0BjYW11bmRhLmNvbTCCASIwDQYJ 11 | KoZIhvcNAQEBBQADggEPADCCAQoCggEBALYMIVewlnAfHdRmxuqsxLaV3LXVaDD0 12 | mumEfT50GQYN3KecQXrR0LHr9RJg4NsYsQ4E2mS2ICuATCpQ1OFiFi1vdUcmOp2L 13 | TclK7ktrizrbK2yfgO8bHpAilu+H3Hyo5FUL/QM8ufyh83DLoAiQanL0nzoIeLwL 14 | +vp2obfQ3g0CkkEgP10FObQ54QU8in0/+ZjgQUajh/4H36NE4+3IB+uu0edaP5di 15 | /IK5lhRc+0nXY5dQSYtnXeNiZB86hvZghfu4Dkqx4WaVSrL83uunRDnH+SM6EGou 16 | 4dag3sB2CPEd4mp//hryaj0Q0TkzicLZaJ5BYNd4mskDeSz/zl5tEzkCAwEAAaOC 17 | AcAwggG8MB8GA1UdIwQYMBaAFPu2r93fRZRvf9vG3DTzh37JC6hHMAkGA1UdEwQC 18 | MAAwCwYDVR0PBAQDAgTwMIIBYAYDVR0RBIIBVzCCAVOCKmNhbXVuZGEta2V5Y2xv 19 | YWsuY2FtdW5kYS5zdmMuY2x1c3Rlci5sb2NhbIIqY2FtdW5kYS1pZGVudGl0eS5j 20 | YW11bmRhLnN2Yy5jbHVzdGVyLmxvY2FsgidjYW11bmRhLXplZWJlLmNhbXVuZGEu 21 | c3ZjLmNsdXN0ZXIubG9jYWyCKmNhbXVuZGEtdGFza2xpc3QuY2FtdW5kYS5zdmMu 22 | Y2x1c3Rlci5sb2NhbIIpY2FtdW5kYS1vcGVyYXRlLmNhbXVuZGEuc3ZjLmNsdXN0 23 | ZXIubG9jYWyCL2NhbXVuZGEtemVlYmUtZ2F0ZXdheS5jYW11bmRhLnN2Yy5jbHVz 24 | dGVyLmxvY2FsghBjYW11bmRhLWtleWNsb2FrghBjYW11bmRhLWlkZW50aXR5ghVj 25 | YW11bmRhLXplZWJlLWdhdGV3YXmCDWNhbXVuZGEtemVlYmUwHQYDVR0OBBYEFCbB 26 | nds6C9V/VuAzVDFxCxqyX3pBMA0GCSqGSIb3DQEBCwUAA4IBAQCZq78n4ylMUsM/ 27 | ysRiAyOfWK41te40Tk8AA2RAtk2nbFVj501ZF5Z3wkSeLRDkAULNARj9kG7ZrTb0 28 | b4NKOy0ZJ1Ok44FqMssG8CB4bCgjzTb2waJlHKOBp2s/4joqHf3LBWtjXlM2yJIa 29 | kXmEvSsNxBzloyLBiTzt1IPTed6YUeUSR07loTyQ56UkV+Fax02f0pm1BIPEXNtU 30 | gjH0TPQdHhocAEWZgIMcAO3tPU72fhrgXj93J4SdukS5xqts++m7bofvPwXTRAoQ 31 | wJtdQ2phFIJTUJe5M4rlAMe9H4DAZg/Vf8HNq0kKGpWd1HEKHnnYlgPFiB849f+N 32 | C5Ix1mhV 33 | -----END CERTIFICATE----- 34 | -------------------------------------------------------------------------------- /openshift/certs/backend.test.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIC6zCCAdMCAQAwgaUxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhWaXJnaW5pYTEX 3 | MBUGA1UEBwwORnJlZGVyaWNrc2J1cmcxFjAUBgNVBAoMDXVwZ3JhZGluZ2RhdmUx 4 | EDAOBgNVBAsMB2NhbXVuZGExFTATBgNVBAMMDGJhY2tlbmQtY2VydDEpMCcGCSqG 5 | SIb3DQEJARYaZGF2aWQucGFyb3VsZWtAY2FtdW5kYS5jb20wggEiMA0GCSqGSIb3 6 | DQEBAQUAA4IBDwAwggEKAoIBAQC2DCFXsJZwHx3UZsbqrMS2ldy11Wgw9JrphH0+ 7 | dBkGDdynnEF60dCx6/USYODbGLEOBNpktiArgEwqUNThYhYtb3VHJjqdi03JSu5L 8 | a4s62ytsn4DvGx6QIpbvh9x8qORVC/0DPLn8ofNwy6AIkGpy9J86CHi8C/r6dqG3 9 | 0N4NApJBID9dBTm0OeEFPIp9P/mY4EFGo4f+B9+jROPtyAfrrtHnWj+XYvyCuZYU 10 | XPtJ12OXUEmLZ13jYmQfOob2YIX7uA5KseFmlUqy/N7rp0Q5x/kjOhBqLuHWoN7A 11 | dgjxHeJqf/4a8mo9ENE5M4nC2WieQWDXeJrJA3ks/85ebRM5AgMBAAGgADANBgkq 12 | hkiG9w0BAQsFAAOCAQEAWu1bn18N5WHG26D6prMXhJWOEcSog5JoPVeClIYV66JA 13 | APuxv7DpwWRG3O+v3WBIHNZ599ozWUYWD20WICMg67R67QT64Mb6aBCGJbUuf1Sq 14 | /ZwPmgbF2kaO9dvIdzVb2iipzB2gfAJUwhXgSsaJoxYkkILIoFd+7U+zHURRkSXA 15 | pBWVgMjrWwD8ynBqsvcDQg1tjjiEE9j+95dEx6nuFmWucqFAx90FJLBj+x34lgj8 16 | ZFahPfBYQCThEwpUOxEhxxro2az47Ha9P/mNDAXMeolbWZC9ri6kcjdZvZvLWknK 17 | GvR9n6US2Grh4opAvZe2Co2q7n529EMxIJBvqmOLTA== 18 | -----END CERTIFICATE REQUEST----- 19 | -------------------------------------------------------------------------------- /openshift/certs/backend.test.ext: -------------------------------------------------------------------------------- 1 | authorityKeyIdentifier=keyid,issuer 2 | basicConstraints=CA:FALSE 3 | keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment 4 | subjectAltName = @alt_names 5 | 6 | [alt_names] 7 | DNS.1 = camunda-keycloak.camunda.svc.cluster.local 8 | DNS.2 = camunda-identity.camunda.svc.cluster.local 9 | DNS.3 = camunda-zeebe.camunda.svc.cluster.local 10 | DNS.4 = camunda-tasklist.camunda.svc.cluster.local 11 | DNS.5 = camunda-operate.camunda.svc.cluster.local 12 | DNS.6 = camunda-zeebe-gateway.camunda.svc.cluster.local 13 | DNS.7 = camunda-keycloak 14 | DNS.8 = camunda-identity 15 | DNS.9 = camunda-zeebe-gateway 16 | DNS.10 = camunda-zeebe 17 | -------------------------------------------------------------------------------- /openshift/certs/dave.test.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC2DCFXsJZwHx3U 3 | ZsbqrMS2ldy11Wgw9JrphH0+dBkGDdynnEF60dCx6/USYODbGLEOBNpktiArgEwq 4 | UNThYhYtb3VHJjqdi03JSu5La4s62ytsn4DvGx6QIpbvh9x8qORVC/0DPLn8ofNw 5 | y6AIkGpy9J86CHi8C/r6dqG30N4NApJBID9dBTm0OeEFPIp9P/mY4EFGo4f+B9+j 6 | ROPtyAfrrtHnWj+XYvyCuZYUXPtJ12OXUEmLZ13jYmQfOob2YIX7uA5KseFmlUqy 7 | /N7rp0Q5x/kjOhBqLuHWoN7AdgjxHeJqf/4a8mo9ENE5M4nC2WieQWDXeJrJA3ks 8 | /85ebRM5AgMBAAECggEADT1xaPQQfOcTP4QzqcdKJ0PCLmQiLi1PQ8Lwn1iVcuzf 9 | KqS6pNKA3+/OF+zj1At5SEO7sa3TrFZDmj0QWBpojBvNgJDPNbx3IJ5KgA6HE1vz 10 | tI3rmqtzDzIoBOCLFwxRgiRbdzjOoTxGFq+e9ZoYCkEWs4NnBIYtvFMuDJuSpFXd 11 | iLOaYpSSA897gn8/FcyB5MjWcK6a3EpfntMlrh1Af3UDZvkTM6ksvyFnMoezjyKX 12 | NRqCzXL7WdH2ljI/jTuxVTIc1jOMNbdtbuPg5W6JKu409Yy5dFmq3mNlFsCt/ZVU 13 | f7doxgJ/6GsF4LfT5TrQ/MuC4eH+AYsssbCZwvBJYQKBgQDx5D317eJISg6e02Sg 14 | ZSSWDfXROPBfkfBhdZYNP7NG/xtyblIqm4TfWldKMJ4KW9dvhXh8jrK03SWBwXDb 15 | JNP8pMmXlcT6lJn45Gl8SdklSocvqsXFCpq+PxQv48+4jM03cFngjZLIs0JkKHnY 16 | igj7uMasUWPA7gL2vqQ2YFeJBwKBgQDAqlYMFpf/UNLeO2fxNq3AoGy2skCAm5cn 17 | Z+JUVcEgqRQfJrkpspaCHWXS2pwneZJNt1jhRRiPPmlgbJwTI99IaTjyLaSsOw58 18 | WWBD+AdBvtHD2thgSKNkNkdRl9mIxcI/dZlp6kPmYyZtz0fArSc6uzctfPflnErE 19 | FPV9VaCxvwKBgAzmViOSXxnp0SPJNDmVi0DxV6WjurXuC3q3EZx9RCtKGYWduoYJ 20 | KynaIIOVWihFQ5Tv8j/6QC+DB6gGz+Tv7tovU4zg8NNIL3nlfHajXZAujE0Pd2KN 21 | sTjqFkKDjKVT5PHK7RAbnLNVU/oRaoYQvm/mJ7oy0PYN+1x7SDiaJLDrAoGBALwY 22 | 0J9Go6iZUPW0S9tchbANL3YCeyTUQnMvYfDyM/Xd/I4ZMSc7euOXLeT4lwGXGDYV 23 | rYlo+vxGc3y3LH0nRYOdbwJJe37GvXK7k1doXDVcxdCP5TXDVPmuYFyRgsicQfPQ 24 | rTGRjC7uWANXHfdHVYhqZQlc+2zI9NmQmMd/hL5jAoGBAKVCtXNiGeyP6XUEN6Uj 25 | s4WCMNbAPUbxlLhTlGplhk24lKDw0FRQA+SJRcnsPo4RSGI4h03aK1lC7W74HYXI 26 | 6fx2WDLx3X6/9mjs12YAj9DgXyGYW6/LtYDT6FNpb55k5sOJY5pNGDhJey6Cafa+ 27 | OsSm1cxURN0xcMDSZaoRNwvF 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /openshift/certs/frontend.test.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE+DCCA+CgAwIBAgIUIaeL7S8OOiT4aTRSse0Y2y9RoikwDQYJKoZIhvcNAQEL 3 | BQAwgasxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhWaXJnaW5pYTEXMBUGA1UEBwwO 4 | RnJlZGVyaWNrc2J1cmcxFjAUBgNVBAoMDXVwZ3JhZGluZ2RhdmUxEDAOBgNVBAsM 5 | B2NhbXVuZGExGzAZBgNVBAMMEnVwZ3JhZGluZ2RhdmUtcm9vdDEpMCcGCSqGSIb3 6 | DQEJARYaZGF2aWQucGFyb3VsZWtAY2FtdW5kYS5jb20wHhcNMjQwNzA0MTgwODU1 7 | WhcNMjYxMDA3MTgwODU1WjCBpjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCFZpcmdp 8 | bmlhMRcwFQYDVQQHDA5GcmVkZXJpY2tzYnVyZzEWMBQGA1UECgwNdXBncmFkaW5n 9 | ZGF2ZTEQMA4GA1UECwwHY2FtdW5kYTEWMBQGA1UEAwwNZnJvbnRlbmQtY2VydDEp 10 | MCcGCSqGSIb3DQEJARYaZGF2aWQucGFyb3VsZWtAY2FtdW5kYS5jb20wggEiMA0G 11 | CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2DCFXsJZwHx3UZsbqrMS2ldy11Wgw 12 | 9JrphH0+dBkGDdynnEF60dCx6/USYODbGLEOBNpktiArgEwqUNThYhYtb3VHJjqd 13 | i03JSu5La4s62ytsn4DvGx6QIpbvh9x8qORVC/0DPLn8ofNwy6AIkGpy9J86CHi8 14 | C/r6dqG30N4NApJBID9dBTm0OeEFPIp9P/mY4EFGo4f+B9+jROPtyAfrrtHnWj+X 15 | YvyCuZYUXPtJ12OXUEmLZ13jYmQfOob2YIX7uA5KseFmlUqy/N7rp0Q5x/kjOhBq 16 | LuHWoN7AdgjxHeJqf/4a8mo9ENE5M4nC2WieQWDXeJrJA3ks/85ebRM5AgMBAAGj 17 | ggEVMIIBETAfBgNVHSMEGDAWgBT7tq/d30WUb3/bxtw084d+yQuoRzAJBgNVHRME 18 | AjAAMAsGA1UdDwQEAwIE8DCBtgYDVR0RBIGuMIGrgiFrZXljbG9hay1jYW11bmRh 19 | LmFwcHMtY3JjLnRlc3RpbmeCIWlkZW50aXR5LWNhbXVuZGEuYXBwcy1jcmMudGVz 20 | dGluZ4IeemVlYmUtY2FtdW5kYS5hcHBzLWNyYy50ZXN0aW5ngiF0YXNrbGlzdC1j 21 | YW11bmRhLmFwcHMtY3JjLnRlc3RpbmeCIG9wZXJhdGUtY2FtdW5kYS5hcHBzLWNy 22 | Yy50ZXN0aW5nMB0GA1UdDgQWBBQmwZ3bOgvVf1bgM1QxcQsasl96QTANBgkqhkiG 23 | 9w0BAQsFAAOCAQEAczmxF95Rm6M5giYwYV6P+U415WLRTfyS4UCLwgp0gkoawgVn 24 | WGxth3sxGEn7sHIJZ+pg5yA8jBqenr2R17Ky/UH/+B2/WoKZ2QNGY8QVvJ3HYYaZ 25 | RilxHIrLJplPOyWDh6sGRMWWnjenm8ag66W7dYJr8LKqhmY8jA2rBm3uwCwzCn4E 26 | U8uuDoDaRyhZ2LHN5FEdY2JreyEKizoLdNKi3w8KubtovLzpPS7kDn7mZcGt7VJb 27 | 3Vz6A+iaTeHXyXiSsPpaLd8GMN2vkaXYiXuY0o+K0IbElSC5iUsVn5LcDtb4eNUd 28 | qc3hLVZBiSyifYSOk4tlFXkrakIdWvib/YlGjA== 29 | -----END CERTIFICATE----- 30 | -------------------------------------------------------------------------------- /openshift/certs/frontend.test.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIC7DCCAdQCAQAwgaYxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhWaXJnaW5pYTEX 3 | MBUGA1UEBwwORnJlZGVyaWNrc2J1cmcxFjAUBgNVBAoMDXVwZ3JhZGluZ2RhdmUx 4 | EDAOBgNVBAsMB2NhbXVuZGExFjAUBgNVBAMMDWZyb250ZW5kLWNlcnQxKTAnBgkq 5 | hkiG9w0BCQEWGmRhdmlkLnBhcm91bGVrQGNhbXVuZGEuY29tMIIBIjANBgkqhkiG 6 | 9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtgwhV7CWcB8d1GbG6qzEtpXctdVoMPSa6YR9 7 | PnQZBg3cp5xBetHQsev1EmDg2xixDgTaZLYgK4BMKlDU4WIWLW91RyY6nYtNyUru 8 | S2uLOtsrbJ+A7xsekCKW74fcfKjkVQv9Azy5/KHzcMugCJBqcvSfOgh4vAv6+nah 9 | t9DeDQKSQSA/XQU5tDnhBTyKfT/5mOBBRqOH/gffo0Tj7cgH667R51o/l2L8grmW 10 | FFz7Sddjl1BJi2dd42JkHzqG9mCF+7gOSrHhZpVKsvze66dEOcf5IzoQai7h1qDe 11 | wHYI8R3ian/+GvJqPRDROTOJwtlonkFg13iayQN5LP/OXm0TOQIDAQABoAAwDQYJ 12 | KoZIhvcNAQELBQADggEBAC1h7w48sq4j1FP3C84tklo5Yeu7OaL0rVwubHakEpp4 13 | anpEfsXmmhDSipVqKHlYREk7+4GKPscb5yOUATctLdplhoWZhmCBEUFzaZ75lZxN 14 | cPRIkm+zoQ+heU9hxh60drWqMXJ+IwRDCkJZMh8X2iYVJ+2df5RojMr2PHNV8QmP 15 | UFxGwC/w1e3QdGcGPW7Kh9ZOsKFOus+CIZd2SzKHVEYtIUmXGc0xRKrBEkLQ+gKn 16 | NAn8KYz4IjvLQIUZkE+1wdjdAMRsWoE1rIUZ6i7WIyl73NHQZZ6d9RpgY9P8Ojsi 17 | /zdmWb+Y38CX+YgNMKYHU4pj91xdMAEXjJO5+QrkOjY= 18 | -----END CERTIFICATE REQUEST----- 19 | -------------------------------------------------------------------------------- /openshift/certs/frontend.test.ext: -------------------------------------------------------------------------------- 1 | authorityKeyIdentifier=keyid,issuer 2 | basicConstraints=CA:FALSE 3 | keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment 4 | subjectAltName = @alt_names 5 | 6 | [alt_names] 7 | DNS.1 = keycloak-camunda.apps-crc.testing 8 | DNS.2 = identity-camunda.apps-crc.testing 9 | DNS.3 = zeebe-camunda.apps-crc.testing 10 | DNS.4 = tasklist-camunda.apps-crc.testing 11 | DNS.5 = operate-camunda.apps-crc.testing -------------------------------------------------------------------------------- /openshift/certs/keycloak.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/openshift/certs/keycloak.keystore.jks -------------------------------------------------------------------------------- /openshift/certs/keycloak.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camunda-community-hub/camunda-8-helm-profiles/1e1812293e7b83e047e608bd2f7781d4a576e924/openshift/certs/keycloak.truststore.jks -------------------------------------------------------------------------------- /openshift/certs/myCA.key: -------------------------------------------------------------------------------- 1 | -----BEGIN ENCRYPTED PRIVATE KEY----- 2 | MIIFJDBWBgkqhkiG9w0BBQ0wSTAxBgkqhkiG9w0BBQwwJAQQvHdswTSTWmkjjIqm 3 | JEe2lgICCAAwDAYIKoZIhvcNAgkFADAUBggqhkiG9w0DBwQIrsgkS8tNT78EggTI 4 | F35CWkNSiiOEoLBWYlGVmH72X3xiQkXBJPtOjOqbj9AQOmhyM6GjOgWRCwDm7fOw 5 | GsAPev8AJlYhIhUDDe1MQk7kKUnG33SBww1iaHMe008imB6RfpgqlZ7JSH2t+E+9 6 | ia0t/yVIZpYH8acal/U19Gs7o0VzaPkn511Gv1TRwBCBLkLMuBZKBHRj/D442Fx5 7 | gfreUcTW7yPCyh0xOvpA46Yhn91LXwOWpPHQNplkiEdpYohtnxVes0sxk+wgRMIy 8 | mBzrWNvXIDBjRzjAhLSMuiN/LdPWJkffRGKl9HYM0FohRoViAFXID9qN8ypF1S0u 9 | rZfPcN45N1Q68bXyzyjnEPINmRKfzw5CCN4qjOk81fAsFHZvDp3Bi5vGWtAg0zcX 10 | cXEXS0AfcOAU2tKbSLzkIB0xJhqDfZrANAwdJt+xlWoTJDcZtoC9T3RjAyHnGPJv 11 | /YxXvrPmtOOao7j8/GGJj9s17zTF5fcbwW8OmgSG7ww7pPBz4ICj4oDOoKY6l9bx 12 | yfyWzlqZIOiYStEJ/NxVX5squSHrPU/RIrb4jHep4YT4OJ0LxajWrSg3Wn4ab9MI 13 | F3P6xxvFyN2nfUOt9zYzzxermAYm/WwBdqvNuO+EXYpR/KR4wbcuNGAncRXzT1fd 14 | +og8vBhvtzoDus/mpqlcw8gJBtEDXeETDXeUJOy/zUcQ6jKfNYt28loXmRSZl4U8 15 | y29KW7F2nx+acCljfG0TjW/x9mWGA68tL5VjuvlLTxXCOtjWQ6L0VYoH3ryjgai0 16 | svNBcsJNyekVj7XiPsLnWtL2HY3Z7z1QPQ0CBwwdjQ+lZS1AeA3uKyGyNp7DMpB+ 17 | JrCYGU9cn7MKrALpnYPT/hT7q5untq1MN4aOeoX3dotVROwlaEuBfxC9PAiMVlFO 18 | V51/xXjCHe4nuSYs77ouTroteVh1uDkxZJrISXEIWeFkAJLuIQtlZq7Im56kwo4B 19 | NxaSZjWS6tHVbW8ZEzz3acpT4vBXMaAa4cGrQD8IYkrHNNQ3DOev02HKfvaZjN9t 20 | rEYnfEeiP7VyLRjmbDuf4cCOaEw56/DlxfZs/x1Ra6LiYTJxCFkShhy7iufH4Ejx 21 | gpdwOWAt3lB6cIRtgsSaUV4eycd5QT8WtKal5dxPUGE/C/OXFUuhz0Tv5dBue7dZ 22 | GjqcrPSUxFPrX/qVn75+ByoqinCW5yp5ORkzVKq2fUt3bdFRTntGyB8wO9lUzkHa 23 | rwLJ29QMb9RFkvP14wO91UCl8UAlLp7E/jlSCSKEIIU5q+JWPcg5rtsge2tj8H/a 24 | t+mUElWabHsDf944ZvShhRYn/vBOaaA3J1Vmmz2cZK/o6Ot/pFttpSpSkABKaJ6x 25 | RNmH2Xd7g/Jdp9VfeAd1ONzMtDIbnGoXmRPYoH0s8JNK9CHT/vPks0PC0cT7XO5t 26 | Z53JV/uMzDY6tlKfVrfzV9zbdC+So/PjNbKAL+UzD/nDaRQnyWzLtusge+vZGs4S 27 | MT0wkdcENOL7IaHotjdt31bUc4NquxUeaZtgDCHz3a0i3Q+BwX9iClcepxGpMfLr 28 | 2I2XQSgw8l2YM59tL6KrPUIWeJsSe2ZZQRH1th5WBLQnfxOFtD9fxsevRnf5rML3 29 | d45hGFxh5fUtMu2XPX/6GpQIFaVTWkiZ 30 | -----END ENCRYPTED PRIVATE KEY----- 31 | -------------------------------------------------------------------------------- /openshift/certs/myCA.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEOTCCAyGgAwIBAgIUfWYBJcES1rRugHyNfhvINb5wzHgwDQYJKoZIhvcNAQEL 3 | BQAwgasxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhWaXJnaW5pYTEXMBUGA1UEBwwO 4 | RnJlZGVyaWNrc2J1cmcxFjAUBgNVBAoMDXVwZ3JhZGluZ2RhdmUxEDAOBgNVBAsM 5 | B2NhbXVuZGExGzAZBgNVBAMMEnVwZ3JhZGluZ2RhdmUtcm9vdDEpMCcGCSqGSIb3 6 | DQEJARYaZGF2aWQucGFyb3VsZWtAY2FtdW5kYS5jb20wHhcNMjQwNzA0MTcyNjUx 7 | WhcNMjkwNzAzMTcyNjUxWjCBqzELMAkGA1UEBhMCVVMxETAPBgNVBAgMCFZpcmdp 8 | bmlhMRcwFQYDVQQHDA5GcmVkZXJpY2tzYnVyZzEWMBQGA1UECgwNdXBncmFkaW5n 9 | ZGF2ZTEQMA4GA1UECwwHY2FtdW5kYTEbMBkGA1UEAwwSdXBncmFkaW5nZGF2ZS1y 10 | b290MSkwJwYJKoZIhvcNAQkBFhpkYXZpZC5wYXJvdWxla0BjYW11bmRhLmNvbTCC 11 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALhm8TNta15jEcO8qXYzbFYH 12 | qj+4pU80UFE9gXTtqH8aCs33AKUe2v7LTbizT0oDN8KbjIEzHM24ELRhOpypW+mr 13 | 7UsxczHlLU6oGYLuIwh+HLB116P1wqL57DCtlxJNMoLN2g66IDSEsfMVStDJaO0I 14 | TEqWsBlG9Fu1rvo48RzIrXP8Lri5BLXDMMvvxY4gFCiLmYxxns8hbMgF4RY5P9NO 15 | /ueUSUonYFXtyCaBcibi59PDEaBnbCWQEIKZUQEzdFLAwWIJVKqQNyk5TWLTkLwp 16 | iOC8Jtao/JN+8L76aHwfvaj7onSbs/b3+t9EompQpviSPRySbK/WlR+xDBkLojEC 17 | AwEAAaNTMFEwHQYDVR0OBBYEFPu2r93fRZRvf9vG3DTzh37JC6hHMB8GA1UdIwQY 18 | MBaAFPu2r93fRZRvf9vG3DTzh37JC6hHMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI 19 | hvcNAQELBQADggEBAKzX7eDSoU2eL6eIpg1GAZoeEN6nl/I0EsCaMd7u2BkORC9O 20 | clbK5gF+rPhsGoYWjfQg5AvYw0kGmTULQ/PyVaU/IDU8CbapzvUl9oll+UlrGcwp 21 | rtkft3XBb/sGBqQmgRn77R0A1W8967qV2Jw/XVD774Yd3t/aXhcIvk6wxWHEU5qT 22 | ubzGXkQcIIQfj6TgSCxTMh26f2RcmLRG073q9RFYu9MlyXFQIgCSgfEjpdY+UVJj 23 | TwihFa/ssLIDSc9SR2mKjbAcmcRPjaOisQCYHcvcG20d1DklUnkSZmtjHiaUd8/7 24 | 56xrBQE5GzuivMBgAR+6zeSM12oHlvpYOX58ouY= 25 | -----END CERTIFICATE----- 26 | -------------------------------------------------------------------------------- /openshift/certs/myCA.srl: -------------------------------------------------------------------------------- 1 | 21A78BED2F0E3A24F8693452B1ED18DB2F51A230 2 | -------------------------------------------------------------------------------- /openshift/patch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | # Expected usage is as an Helm post renderer. 3 | # Example usage: 4 | # $ helm install my-release camunda/camunda-platform --post-renderer ./patch.sh 5 | # 6 | # This script is a Helm chart post-renderer for users on Helm 3.2.0 and greater. It allows removing default 7 | # values set in sub-charts/dependencies, something which should be possible but is currently not working. 8 | # See this issue for more: https://github.com/helm/helm/issues/9136 9 | # 10 | # The result of patching the rendered Helm templates is printed out to STDOUT. Any other logging from the 11 | # script is thus sent to STDERR. 12 | # 13 | # Note to contributors: this post-renderer is used in the integration tests, so make sure that it can be used 14 | # from any working directory. 15 | 16 | set -o pipefail 17 | 18 | if [[ "$(uname)" == "Darwin" ]]; then 19 | sed_command="gsed" 20 | else 21 | sed_command="sed" 22 | fi 23 | 24 | "$sed_command" -e '/\srunAsUser:\s/d' -e '/\sfsGroup:\s/d' -------------------------------------------------------------------------------- /openshift/set-env-openshift.sh: -------------------------------------------------------------------------------- 1 | 2 | export userName=kubeadmin 3 | export userSecret="xxx" 4 | 5 | export camundaVersion=8.5.4 6 | export camundaHelmVersion=10.2.1 7 | 8 | export release=camunda 9 | export chartValues=camunda-values-openshift.yaml 10 | export chart=camunda/camunda-platform 11 | 12 | export namespace=camunda 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /openshift/values/values-dev.yaml: -------------------------------------------------------------------------------- 1 | # Chart values for the Camunda Platform 8 Helm chart. 2 | # This file deliberately contains only the values that differ from the defaults. 3 | # For changes and documentation, use your favorite diff tool to compare it with: 4 | # https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml 5 | 6 | # This is a very small cluster useful for running locally and for development 7 | 8 | global: 9 | identity: 10 | auth: 11 | enabled: false 12 | 13 | identity: 14 | enabled: true 15 | 16 | identityKeycloak: 17 | enabled: false 18 | 19 | optimize: 20 | enabled: false 21 | 22 | connectors: 23 | enabled: false 24 | 25 | zeebe: 26 | clusterSize: 1 27 | partitionCount: 1 28 | replicationFactor: 1 29 | affinity: 30 | podAntiAffinity: null 31 | pvcSize: 10Gi 32 | resources: 33 | requests: 34 | cpu: "100m" 35 | memory: "512M" 36 | limits: 37 | cpu: "512m" 38 | memory: "2Gi" 39 | 40 | zeebe-gateway: 41 | replicas: 1 42 | resources: 43 | requests: 44 | cpu: "100m" 45 | memory: "512M" 46 | limits: 47 | cpu: "1000m" 48 | memory: "1Gi" 49 | 50 | elasticsearch: 51 | enabled: true 52 | securityContext: 53 | runAsUser: "@@null@@" 54 | sysctlImage: 55 | enabled: false 56 | podSecurityContext: 57 | fsGroup: "@@null@@" 58 | runAsUser: "@@null@@" 59 | master: 60 | masterOnly: false 61 | replicaCount: 1 62 | resources: 63 | requests: 64 | cpu: "100m" 65 | memory: "512M" 66 | limits: 67 | cpu: "1000m" 68 | memory: "2Gi" 69 | persistence: 70 | size: "8Gi" 71 | containerSecurityContext: 72 | enabled: false 73 | data: 74 | replicaCount: 0 75 | coordinating: 76 | replicaCount: 0 77 | ingest: 78 | replicaCount: 0 79 | 80 | -------------------------------------------------------------------------------- /operate/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | 4 | # Configure the fully qualified domain name 5 | # The dnsLabel is the first part of the domain address. It will be used no matter what baseDomain you configure below 6 | dnsLabel ?= YOUR_DNS_LABEL 7 | 8 | # By default, we'll use nip.io (See more at [https://nip.io](http://nip.io) ) 9 | # The fully qualified domain name will look something like ..nip.io 10 | # baseDomainName ?= nip.io 11 | 12 | # Another option is to replace baseDomainName with your own domain name 13 | # In this case, the fully qualified domain name will look like . 14 | baseDomainName ?= YOUR_DOMAIN_NAME 15 | 16 | # Only used for oauth2-proxy 17 | # clientSecret ?= CLIENT_SECRET 18 | 19 | # ------------------------------------ 20 | # The following variables should not be changed except for advanced use cases 21 | 22 | ifeq ($(OS),Windows_NT) 23 | root ?= $(CURDIR)/.. 24 | else 25 | root ?= $(shell pwd)/.. 26 | endif 27 | 28 | include $(root)/include/ingress-nginx.mk 29 | include $(root)/operate/include/operate.mk 30 | -------------------------------------------------------------------------------- /operate/README.md: -------------------------------------------------------------------------------- 1 | # Helm Profile for Horizontally Scaling Operate 2 | 3 | > [!IMPORTANT] 4 | > This profile is deprecated, please use [high-available-webapps](../high-available-webapps) profile instead 5 | 6 | The Camunda 8 Operate webapp [consists of 3 modules](https://docs.camunda.io/docs/self-managed/operate-deployment/importer-and-archiver/). 7 | - Webapp 8 | - Importer 9 | - Archiver 10 | 11 | This directory provides scripts that demonstrate how to horizontally scale Operate so that 1 instance of the importer/archiver, and 2 instances of the webapp are running. 12 | 13 | These scripts are not intended to be used in Production, they are for reference only. Feel free to copy and customize for your environment and your specific requirements. 14 | 15 | ## Overview 16 | 17 | 1. Use the [Camunda 8 Helm Charts](https://github.com/camunda/camunda-platform-helm) to install a full environment (including Operate) 18 | 2. Run the [make target](./include/operate.mk) `make operate-delete` to remove the existing configmap and deployment installed by the Camunda 8 Helm Charts 19 | 3. Edit the [yaml files](./include) and update them to be relevant for your existing kubernetes cluster and Camunda environment 20 | 4. Run the [make target](./include/operate.mk) `make operate-install` to use kubectl to apply the `deployment-*.yaml` and `configmap-*.yaml` files found in this directory 21 | 22 | ## Configuration Defaults 23 | 24 | By default, the scripts and yaml files will setup a single Operate importer/archiver instance and 2 Operate webapps (without the importer/archiver). 25 | 26 | The files provided here are for reference only. These files will need to be modified with specifics for your environment. 27 | 28 | Here's a table that shows some of the configurations that will need to be changed to match your specific environment and requirements: 29 | 30 | | Option | Default | Source File | 31 | |----------------------------|-------------------------|-----------------------| 32 | | Version | latest | `deployment-*.yaml` | 33 | | Context Path | /operate | `deployment-*.yaml` | 34 | | Elasticsearch clusterName | elasticsearch | `configmap-*.yaml` | 35 | | Elasticsearch host | elasticsearc-master | `configmap-*.yaml` | 36 | | Elasticsearch port | 9200 | `configmap-*.yaml` | 37 | | Elasticsearch index prefix | zeebe-record | `configmap-*.yaml` | 38 | | Many urls related to Auth | See `deployment-*.yaml` | `deployment-*.yaml` | 39 | -------------------------------------------------------------------------------- /operate/include/camunda-ingress.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: camunda-platform/templates/ingress.yaml 3 | apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | name: camunda-camunda-platform 7 | labels: 8 | app: camunda-platform 9 | app.kubernetes.io/name: camunda-platform 10 | app.kubernetes.io/instance: camunda 11 | app.kubernetes.io/managed-by: Helm 12 | app.kubernetes.io/part-of: camunda-platform 13 | annotations: 14 | ingress.kubernetes.io/rewrite-target: / 15 | nginx.ingress.kubernetes.io/ssl-redirect: "false" 16 | spec: 17 | ingressClassName: nginx 18 | rules: 19 | - host: YOUR_HOSTNAME 20 | http: 21 | paths: 22 | - backend: 23 | service: 24 | name: camunda-keycloak 25 | port: 26 | number: 80 27 | path: /auth 28 | pathType: Prefix 29 | - backend: 30 | service: 31 | name: camunda-identity 32 | port: 33 | number: 80 34 | path: /identity 35 | pathType: Prefix 36 | # - backend: 37 | # service: 38 | # name: camunda-operate 39 | # port: 40 | # number: 80 41 | # path: /operate 42 | # pathType: Prefix 43 | - backend: 44 | service: 45 | name: camunda-optimize 46 | port: 47 | number: 80 48 | path: /optimize 49 | pathType: Prefix 50 | - backend: 51 | service: 52 | name: camunda-tasklist 53 | port: 54 | number: 80 55 | path: /tasklist 56 | pathType: Prefix 57 | tls: 58 | - hosts: 59 | - YOUR_HOSTNAME 60 | secretName: tls-secret -------------------------------------------------------------------------------- /operate/include/configmap-importer-archiver.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: operate-configmap-importer-archiver 6 | labels: 7 | app.kubernetes.io/app: operate 8 | app.kubernetes.io/component: importer-archiver 9 | app.kubernetes.io/tier: importer-archiver 10 | data: 11 | application.yml: | 12 | # Operate configuration file 13 | camunda.operate: 14 | webappEnabled: false 15 | # ELS instance to store Operate data 16 | elasticsearch: 17 | # Cluster name 18 | clusterName: elasticsearch 19 | # Host 20 | host: elasticsearch-master 21 | # Transport port 22 | port: 9200 23 | # Zeebe instance 24 | zeebe: 25 | # Broker contact point 26 | brokerContactPoint: "camunda-zeebe-gateway:26500" 27 | # ELS instance to export Zeebe data to 28 | zeebeElasticsearch: 29 | # Cluster name 30 | clusterName: elasticsearch 31 | # Host 32 | host: elasticsearch-master 33 | # Transport port 34 | port: 9200 35 | # Index prefix, configured in Zeebe Elasticsearch exporter 36 | prefix: zeebe-record 37 | logging: 38 | level: 39 | ROOT: INFO 40 | io.camunda.operate: DEBUG 41 | management: 42 | health.elasticsearch.enabled: false 43 | -------------------------------------------------------------------------------- /operate/include/configmap-webapp.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: operate-configmap-webapp 6 | labels: 7 | app.kubernetes.io/app: operate 8 | app.kubernetes.io/component: importer-archiver 9 | app.kubernetes.io/tier: webapp 10 | data: 11 | application.yml: | 12 | # Operate configuration file 13 | camunda.operate: 14 | archiverEnabled: false 15 | importerEnabled: false 16 | migration.migrationEnabled: false 17 | # ELS instance to store Operate data 18 | elasticsearch: 19 | # Cluster name 20 | clusterName: elasticsearch 21 | # Host 22 | host: elasticsearch-master 23 | # Transport port 24 | port: 9200 25 | # Zeebe instance 26 | zeebe: 27 | # Broker contact point 28 | brokerContactPoint: "camunda-zeebe-gateway:26500" 29 | # ELS instance to export Zeebe data to 30 | zeebeElasticsearch: 31 | # Cluster name 32 | clusterName: elasticsearch 33 | # Host 34 | host: elasticsearch-master 35 | # Transport port 36 | port: 9200 37 | # Index prefix, configured in Zeebe Elasticsearch exporter 38 | prefix: zeebe-record 39 | logging: 40 | level: 41 | ROOT: INFO 42 | io.camunda.operate: DEBUG 43 | #Spring Boot Actuator endpoints to be exposed 44 | management: 45 | health.elasticsearch.enabled: false 46 | -------------------------------------------------------------------------------- /operate/include/operate-ingress.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: camunda-platform/templates/ingress.yaml 3 | apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | name: operate-ingress 7 | labels: 8 | app.kubernetes.io/app: operate-ingress 9 | annotations: 10 | ingress.kubernetes.io/rewrite-target: / 11 | nginx.ingress.kubernetes.io/ssl-redirect: "false" 12 | nginx.ingress.kubernetes.io/affinity: "cookie" 13 | nginx.ingress.kubernetes.io/session-cookie-name: "route" 14 | nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" 15 | spec: 16 | ingressClassName: nginx 17 | rules: 18 | - host: YOUR_HOSTNAME 19 | http: 20 | paths: 21 | - backend: 22 | service: 23 | name: camunda-operate 24 | port: 25 | number: 80 26 | path: /operate 27 | pathType: Prefix 28 | tls: 29 | - hosts: 30 | - YOUR_HOSTNAME 31 | secretName: tls-secret -------------------------------------------------------------------------------- /operate/include/service-importer-archiver.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: operate-importer-archiver 5 | labels: 6 | app.kubernetes.io/component: operate-importer-archiver 7 | spec: 8 | type: ClusterIP 9 | selector: 10 | app.kubernetes.io/component: operate-importer-archiver 11 | ports: 12 | - name: http 13 | port: 8080 14 | - name: metrics 15 | port: 8081 -------------------------------------------------------------------------------- /operate/include/service-webapp.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: camunda-operate 5 | labels: 6 | app.kubernetes.io/component: operate 7 | spec: 8 | type: ClusterIP 9 | selector: 10 | app.kubernetes.io/component: operate 11 | ports: 12 | - port: 80 13 | name: http 14 | targetPort: 8080 15 | protocol: TCP -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /spring-actuator/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # The following variables should not be changed except for advanced use cases 3 | 4 | ifeq ($(OS),Windows_NT) 5 | root ?= $(CURDIR)/.. 6 | else 7 | root ?= $(shell pwd)/.. 8 | endif 9 | 10 | # Camunda components will be installed into the following Kubernetes namespace 11 | namespace ?= camunda 12 | # Helm release name 13 | release ?= camunda 14 | # Helm chart coordinates for Camunda 15 | chart ?= camunda/camunda-platform 16 | 17 | chartValues ?= $(root)/spring-actuator/camunda-values.yaml 18 | 19 | .PHONY: all 20 | all: actuator-application-yaml camunda 21 | 22 | .PHONY: clean 23 | clean: clean-camunda 24 | 25 | include $(root)/include/camunda.mk 26 | include $(root)/spring-actuator/actuator.mk -------------------------------------------------------------------------------- /spring-actuator/actuator.mk: -------------------------------------------------------------------------------- 1 | 2 | .PHONY: actuator-application-yaml 3 | actuator-application-yaml: namespace 4 | kubectl create configmap additional-application-yaml --from-file=$(root)/spring-actuator/application.yaml -n $(namespace) 5 | 6 | -------------------------------------------------------------------------------- /spring-actuator/application.yaml: -------------------------------------------------------------------------------- 1 | management: 2 | endpoints: 3 | enabled-by-default: false 4 | jmx: 5 | exposure: 6 | exclude: "*" 7 | web: 8 | exposure: 9 | exclude: "*" 10 | -------------------------------------------------------------------------------- /spring-actuator/camunda-values.yaml: -------------------------------------------------------------------------------- 1 | # Chart values for the Camunda Platform 8 Helm chart. 2 | # This file deliberately contains only the values that differ from the defaults. 3 | # For changes and documentation, use your favorite diff tool to compare it with: 4 | # https://github.com/camunda/camunda-platform-helm/blob/main/charts/camunda-platform/values.yaml 5 | 6 | # This is a very small cluster useful for experimenting 7 | 8 | global: 9 | image: 10 | # allways work with the latest versions in development 11 | tag: 8.2.0-alpha5 12 | identity: 13 | auth: 14 | # Disable the Identity authentication 15 | # it will fall back to basic-auth: demo/demo as default user 16 | enabled: false 17 | 18 | tasklist: 19 | enabled: false 20 | 21 | operate: 22 | enabled: false 23 | 24 | identity: 25 | enabled: false 26 | 27 | optimize: 28 | enabled: false 29 | 30 | zeebe: 31 | clusterSize: 1 32 | partitionCount: 1 33 | replicationFactor: 1 34 | pvcSize: 1Gi 35 | 36 | resources: 37 | requests: 38 | cpu: "100m" 39 | memory: "512M" 40 | limits: 41 | cpu: "512m" 42 | memory: "2Gi" 43 | env: 44 | - name: JAVA_TOOL_OPTIONS 45 | value: "-Dspring.config.additional-location=file:/additional-application-yaml/application.yaml" 46 | extraVolumeMounts: 47 | - name: additional-application-yaml 48 | mountPath: additional-application-yaml 49 | extraVolumes: 50 | - name: additional-application-yaml 51 | configMap: 52 | name: additional-application-yaml 53 | 54 | zeebe-gateway: 55 | replicas: 1 56 | 57 | resources: 58 | requests: 59 | cpu: "100m" 60 | memory: "512M" 61 | limits: 62 | cpu: "1000m" 63 | memory: "1Gi" 64 | logLevel: ERROR 65 | env: 66 | - name: JAVA_TOOL_OPTIONS 67 | value: "-Dspring.config.additional-location=file:/additional-application-yaml/application.yaml" 68 | extraVolumeMounts: 69 | - name: additional-application-yaml 70 | mountPath: additional-application-yaml 71 | extraVolumes: 72 | - name: additional-application-yaml 73 | configMap: 74 | name: additional-application-yaml 75 | 76 | elasticsearch: 77 | enabled: false -------------------------------------------------------------------------------- /tasklist/Makefile: -------------------------------------------------------------------------------- 1 | # ------------------------------------ 2 | # Set the following for your specific environment 3 | 4 | # Configure the fully qualified domain name 5 | # The dnsLabel is the first part of the domain address. It will be used no matter what baseDomain you configure below 6 | dnsLabel ?= YOUR_DNS_LABEL 7 | 8 | # By default, we'll use nip.io (See more at [https://nip.io](http://nip.io) ) 9 | # The fully qualified domain name will look something like ..nip.io 10 | # baseDomainName ?= nip.io 11 | 12 | # Another option is to replace baseDomainName with your own domain name 13 | # In this case, the fully qualified domain name will look like . 14 | baseDomainName ?= YOUR_DOMAIN_NAME 15 | 16 | # Only used for oauth2-proxy 17 | # clientSecret ?= CLIENT_SECRET 18 | 19 | # ------------------------------------ 20 | # The following variables should not be changed except for advanced use cases 21 | 22 | ifeq ($(OS),Windows_NT) 23 | root ?= $(CURDIR)/.. 24 | else 25 | root ?= $(shell pwd)/.. 26 | endif 27 | 28 | include $(root)/include/ingress-nginx.mk 29 | include $(root)/operate/include/tasklist.mk 30 | -------------------------------------------------------------------------------- /tasklist/README.md: -------------------------------------------------------------------------------- 1 | # Helm Profile for Horizontally Scaling Tasklist 2 | 3 | > [!IMPORTANT] 4 | > This profile is deprecated and will be removed soon, please use [high-available-webapps](../high-available-webapps) profile instead 5 | 6 | The Camunda 8 Tasklist webapp consists of 3 modules: 7 | - Webapp 8 | - Importer 9 | - Archiver 10 | 11 | This directory provides scripts that demonstrate how to horizontally scale Tasklist so that 1 instance of the importer/archiver, and 2 instances of the webapp are running. 12 | 13 | These scripts are not intended to be used in Production, they are for reference only. Feel free to copy and customize for your environment and your specific requirements. 14 | 15 | ## Overview 16 | 17 | 1. Use the [Camunda 8 Helm Charts](https://github.com/camunda/camunda-platform-helm) to install a full environment (including tasklist) 18 | 2. Run the [make target](./include/tasklist.mk) `make tasklist-delete` to remove the existing configmap and deployment installed by the Camunda 8 Helm Charts 19 | 3. Edit the [yaml files](./include) and update them to be relevant for your existing kubernetes cluster and Camunda environment 20 | 4. Run the [make target](./include/tasklist.mk) `make tasklist-install` to use kubectl to apply the `*.yaml` files found in the [include](./include) directory 21 | 22 | ## Configuration Defaults 23 | 24 | By default, the scripts and yaml files will setup a single tasklist importer/archiver instance and 2 tasklist webapps (without the importer/archiver). 25 | 26 | The files provided here are for reference only. These files will need to be modified with specifics for your environment. 27 | 28 | Here's a table that shows some of the configurations that will need to be changed to match your specific environment and requirements: 29 | 30 | | Option | Default | Source File | 31 | |----------------------------|-------------------------|-----------------------| 32 | | Version | latest | `deployment-*.yaml` | 33 | | Context Path | /tasklist | `deployment-*.yaml` | 34 | | Elasticsearch clusterName | elasticsearch | `configmap-*.yaml` | 35 | | Elasticsearch host | elasticsearc-master | `configmap-*.yaml` | 36 | | Elasticsearch port | 9200 | `configmap-*.yaml` | 37 | | Elasticsearch index prefix | zeebe-record | `configmap-*.yaml` | 38 | | Many urls related to Auth | See `deployment-*.yaml` | `deployment-*.yaml` | 39 | -------------------------------------------------------------------------------- /tasklist/include/camunda-ingress.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: camunda-platform/templates/ingress.yaml 3 | apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | name: camunda-camunda-platform 7 | labels: 8 | app: camunda-platform 9 | app.kubernetes.io/name: camunda-platform 10 | app.kubernetes.io/instance: camunda 11 | app.kubernetes.io/managed-by: Helm 12 | app.kubernetes.io/part-of: camunda-platform 13 | annotations: 14 | ingress.kubernetes.io/rewrite-target: / 15 | nginx.ingress.kubernetes.io/ssl-redirect: "false" 16 | spec: 17 | ingressClassName: nginx 18 | rules: 19 | - host: gke.upgradingdave.com 20 | http: 21 | paths: 22 | - backend: 23 | service: 24 | name: camunda-keycloak 25 | port: 26 | number: 80 27 | path: /auth 28 | pathType: Prefix 29 | - backend: 30 | service: 31 | name: camunda-identity 32 | port: 33 | number: 80 34 | path: /identity 35 | pathType: Prefix 36 | - backend: 37 | service: 38 | name: camunda-operate 39 | port: 40 | number: 80 41 | path: /operate 42 | pathType: Prefix 43 | - backend: 44 | service: 45 | name: camunda-optimize 46 | port: 47 | number: 80 48 | path: /optimize 49 | pathType: Prefix 50 | # - backend: 51 | # service: 52 | # name: camunda-tasklist 53 | # port: 54 | # number: 80 55 | # path: /tasklist 56 | # pathType: Prefix 57 | tls: 58 | - hosts: 59 | - gke.upgradingdave.com 60 | secretName: tls-secret -------------------------------------------------------------------------------- /tasklist/include/configmap-importer-archiver.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: tasklist-configmap-importer-archiver 5 | labels: 6 | app.kubernetes.io/app: tasklist 7 | app.kubernetes.io/component: tasklist-importer-archiver 8 | app.kubernetes.io/tier: importer-archiver 9 | data: 10 | application.yml: | 11 | # Tasklist configuration file 12 | camunda.tasklist: 13 | webappEnabled: false 14 | # Set Tasklist username and password. 15 | # If user with does not exists it will be created. 16 | # Default: demo/demo 17 | #username: 18 | #password: 19 | # ELS instance to store Tasklist data 20 | elasticsearch: 21 | # Cluster name 22 | clusterName: elasticsearch 23 | # Host 24 | host: elasticsearch-master 25 | # Transport port 26 | port: 9200 27 | # Zeebe instance 28 | zeebe: 29 | # Broker contact point 30 | brokerContactPoint: "camunda-zeebe-gateway:26500" 31 | # ELS instance to export Zeebe data to 32 | zeebeElasticsearch: 33 | # Cluster name 34 | clusterName: elasticsearch 35 | # Host 36 | host: elasticsearch-master 37 | # Transport port 38 | port: 9200 39 | # Index prefix, configured in Zeebe Elasticsearch exporter 40 | prefix: zeebe-record 41 | #Spring Boot Actuator endpoints to be exposed 42 | management.endpoints.web.exposure.include: health,info,conditions,configprops,prometheus,loggers,usage-metrics,backups 43 | # Enable or disable metrics 44 | #management.metrics.export.prometheus.enabled: false -------------------------------------------------------------------------------- /tasklist/include/configmap-webapp.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: tasklist-configmap-webapp 5 | labels: 6 | app.kubernetes.io/app: tasklist 7 | app.kubernetes.io/component: tasklist 8 | app.kubernetes.io/tier: webapp 9 | data: 10 | application.yml: | 11 | # Tasklist configuration file 12 | camunda.tasklist: 13 | archiverEnabled: false 14 | importerEnabled: false 15 | # Set Tasklist username and password. 16 | # If user with does not exists it will be created. 17 | # Default: demo/demo 18 | #username: 19 | #password: 20 | # ELS instance to store Tasklist data 21 | elasticsearch: 22 | # Cluster name 23 | clusterName: elasticsearch 24 | # Host 25 | host: elasticsearch-master 26 | # Transport port 27 | port: 9200 28 | # Zeebe instance 29 | zeebe: 30 | # Broker contact point 31 | brokerContactPoint: "camunda-zeebe-gateway:26500" 32 | # ELS instance to export Zeebe data to 33 | zeebeElasticsearch: 34 | # Cluster name 35 | clusterName: elasticsearch 36 | # Host 37 | host: elasticsearch-master 38 | # Transport port 39 | port: 9200 40 | # Index prefix, configured in Zeebe Elasticsearch exporter 41 | prefix: zeebe-record 42 | #Spring Boot Actuator endpoints to be exposed 43 | management.endpoints.web.exposure.include: health,info,conditions,configprops,prometheus,loggers,usage-metrics,backups 44 | # Enable or disable metrics 45 | #management.metrics.export.prometheus.enabled: false -------------------------------------------------------------------------------- /tasklist/include/service-importer-archiver.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: tasklist-importer-archiver 6 | labels: 7 | app.kubernetes.io/app: tasklist-importer-archiver 8 | app.kubernetes.io/component: tasklist-importer-archiver 9 | spec: 10 | type: ClusterIP 11 | ports: 12 | - port: 8081 13 | name: metrics 14 | selector: 15 | app.kubernetes.io/component: tasklist-importer-archiver 16 | -------------------------------------------------------------------------------- /tasklist/include/service-webapp.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: camunda-tasklist 6 | labels: 7 | app.kubernetes.io/app: tasklist 8 | app.kubernetes.io/component: tasklist 9 | spec: 10 | type: ClusterIP 11 | ports: 12 | - port: 80 13 | name: http 14 | targetPort: 8080 15 | protocol: TCP 16 | selector: 17 | app.kubernetes.io/component: tasklist 18 | -------------------------------------------------------------------------------- /tasklist/include/tasklist-ingress.tpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: camunda-platform/templates/ingress.yaml 3 | apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | name: tasklist-ingress 7 | labels: 8 | app.kubernetes.io/app: tasklist-ingress 9 | annotations: 10 | ingress.kubernetes.io/rewrite-target: / 11 | nginx.ingress.kubernetes.io/ssl-redirect: "false" 12 | nginx.ingress.kubernetes.io/affinity: "cookie" 13 | nginx.ingress.kubernetes.io/session-cookie-name: "route" 14 | nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" 15 | spec: 16 | ingressClassName: nginx 17 | rules: 18 | - host: gke.upgradingdave.com 19 | http: 20 | paths: 21 | - backend: 22 | service: 23 | name: camunda-tasklist 24 | port: 25 | number: 80 26 | path: /tasklist 27 | pathType: Prefix 28 | tls: 29 | - hosts: 30 | - gke.upgradingdave.com 31 | secretName: tls-secret -------------------------------------------------------------------------------- /tls/Makefile: -------------------------------------------------------------------------------- 1 | ifeq ($(OS),Windows_NT) 2 | root ?= $(CURDIR)/.. 3 | else 4 | root ?= $(shell pwd)/.. 5 | endif 6 | 7 | # Camunda components will be installed into the following Kubernetes namespace 8 | namespace ?= camunda 9 | # Helm release name 10 | release ?= camunda 11 | # Helm chart coordinates for Camunda 12 | chart ?= camunda/camunda-platform 13 | 14 | certName ?= camunda 15 | 16 | .PHONY: all 17 | all: create-custom-certs list-sans create-keystore 18 | 19 | .PHONY: clean-certs 20 | clean-certs: delete-custom-certs 21 | 22 | include $(root)/tls/selfsigned/self-signed-cert.mk 23 | include $(root)/tls/keystore/keystore.mk 24 | include $(root)/tls/tls.mk 25 | -------------------------------------------------------------------------------- /tls/README.md: -------------------------------------------------------------------------------- 1 | [![Community Extension](https://img.shields.io/badge/Community%20Extension-An%20open%20source%20community%20maintained%20project-FF4700)](https://github.com/camunda-community-hub/community) 2 | ![Compatible with: Camunda Platform 8](https://img.shields.io/badge/Compatible%20with-Camunda%20Platform%208-0072Ce) 3 | [![](https://img.shields.io/badge/Lifecycle-Incubating-blue)](https://github.com/Camunda-Community-Hub/community/blob/main/extension-lifecycle.md#incubating-) 4 | 5 | This folder contains scripts to help configure TLS for Camunda Platform 8. 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /tls/keystore/README.md: -------------------------------------------------------------------------------- 1 | # self-signed certificates 2 | 3 | This folder contains scripts to create keystores and truststores for java apps 4 | 5 | 6 | -------------------------------------------------------------------------------- /tls/keystore/keystore.mk: -------------------------------------------------------------------------------- 1 | .PHONY: create-keystore 2 | create-keystore: delete-keystore 3 | openssl pkcs12 -export -in ./certs/$(certName)Server.crt -inkey ./certs/$(certName)Server.key \ 4 | -out ./certs/$(certName)Server.p12 -name $(certName)-p12 \ 5 | -CAfile ./certs/$(certName)CA.pem -caname $(certName)-ca 6 | keytool -importkeystore -deststorepass camunda -destkeypass camunda -destkeystore ./certs/keystore.jks -srckeystore ./certs/$(certName)Server.p12 -srcstoretype PKCS12 -srcstorepass $(truststorePass) 7 | 8 | .PHONY: delete-keystore 9 | delete-keystore: 10 | rm -rf ./certs/keystore.jks 11 | rm -rf ./certs/$(certName)Server.p12 12 | 13 | .PHONY: list-keystore 14 | list-keystore: 15 | keytool -list -v -keystore ./certs/keystore.jks -storepass $(truststorePass) 16 | 17 | .PHONY: create-truststore 18 | create-truststore: delete-truststore 19 | keytool -import -keystore ./certs/truststore.jks -storepass camunda -noprompt -file ./certs/$(certName)CA.pem -alias $(certName)-ca-cert 20 | 21 | .PHONY: delete-truststore 22 | delete-truststore: 23 | rm -rf ./certs/truststore.jks 24 | 25 | .PHONY: list-truststore 26 | list-truststore: 27 | keytool -list -v -keystore ./certs/truststore.jks -storepass $(truststorePass) 28 | 29 | .PHONY: create-keystore-secret 30 | create-keystore-secret: 31 | kubectl create secret generic camunda-keystore-secret --from-file=./certs/keystore.jks -n $(namespace) 32 | 33 | .PHONY: create-truststore-secret 34 | create-truststore-secret: 35 | kubectl create secret generic camunda-truststore-secret --from-file=./certs/truststore.jks -n $(namespace) 36 | 37 | .PHONY: create-keycloak-secret 38 | create-keycloak-secret: 39 | -kubectl -n $(namespace) delete secret "keycloak-secret" 40 | kubectl -n $(namespace) create secret generic keycloak-secret --from-file=./certs/keystore.jks --from-file=./certs/truststore.jks 41 | -------------------------------------------------------------------------------- /tls/selfsigned/README.md: -------------------------------------------------------------------------------- 1 | # self-signed certificates 2 | 3 | This folder contains scripts to generate self-signed Certificate Authority and self-signed Certificates 4 | 5 | 6 | -------------------------------------------------------------------------------- /tls/selfsigned/cert.tpl.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | req_extensions = req_ext 4 | prompt = no 5 | 6 | [req_distinguished_name] 7 | C = US 8 | ST = Virginia 9 | L = Fredericksburg 10 | O = Camunda 11 | OU = Presales 12 | CN = RELEASE-zeebe-gateway.NAMESPACE.svc.cluster.local 13 | 14 | [req_ext] 15 | subjectAltName = @alt_names 16 | 17 | [alt_names] 18 | DNS.1 = RELEASE-keycloak.NAMESPACE.svc.cluster.local 19 | DNS.2 = RELEASE-identity.NAMESPACE.svc.cluster.local 20 | DNS.3 = RELEASE-tasklist.NAMESPACE.svc.cluster.local 21 | DNS.4 = RELEASE-operate.NAMESPACE.svc.cluster.local 22 | DNS.5 = RELEASE-operate.NAMESPACE.svc.cluster.local 23 | DNS.6 = RELEASE-zeebe-gateway.NAMESPACE.svc.cluster.local 24 | DNS.7 = RELEASE-zeebe-0.RELEASE-zeebe.NAMESPACE.svc.cluster.local 25 | DNS.8 = RELEASE-zeebe-1.RELEASE-zeebe.NAMESPACE.svc.cluster.local 26 | DNS.9 = RELEASE-zeebe-2.RELEASE-zeebe.NAMESPACE.svc.cluster.local 27 | -------------------------------------------------------------------------------- /tls/selfsigned/san.tpl.ext: -------------------------------------------------------------------------------- 1 | authorityKeyIdentifier=keyid,issuer 2 | basicConstraints=CA:FALSE 3 | keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment 4 | subjectAltName = @alt_names 5 | 6 | [alt_names] 7 | DNS.1 = RELEASE-keycloak.NAMESPACE.svc.cluster.local 8 | DNS.2 = RELEASE-identity.NAMESPACE.svc.cluster.local 9 | DNS.3 = RELEASE-zeebe-gateway.NAMESPACE.svc.cluster.local 10 | DNS.4 = RELEASE-tasklist.NAMESPACE.svc.cluster.local 11 | DNS.5 = RELEASE-operate.NAMESPACE.svc.cluster.local 12 | DNS.6 = RELEASE-zeebe-0.RELEASE-zeebe.NAMESPACE.svc.cluster.local 13 | DNS.7 = RELEASE-zeebe-1.RELEASE-zeebe.NAMESPACE.svc.cluster.local 14 | DNS.8 = RELEASE-zeebe-2.RELEASE-zeebe.NAMESPACE.svc.cluster.local 15 | 16 | -------------------------------------------------------------------------------- /tls/tls.mk: -------------------------------------------------------------------------------- 1 | .PHONY: create-tls-secret 2 | create-tls-secret: 3 | kubectl create secret tls $(certName)-tls-secret --cert=./certs/$(certName)Server.crt --key=./certs/$(certName)Server.key -n $(namespace) 4 | 5 | .PHONY: delete-tls-secret 6 | delete-tls-secret: 7 | kubectl delete secret $(certName)-tls-secret -n $(namespace) 8 | 9 | .PHONY: create-ca-secret 10 | create-ca-secret: 11 | kubectl create secret generic $(certName)-ca-secret --from-file=./certs/$(certName)CA.pem -n $(namespace) 12 | 13 | .PHONY: delete-ca-secret 14 | delete-ca-secret: 15 | kubectl delete secret $(certName)-ca-secret 16 | 17 | .PHONY: netshoot 18 | netshoot: 19 | kubectl run tmp-shell --rm -i --tty --image nicolaka/netshoot -n $(namespace) 20 | 21 | #docker run --rm -it --entrypoint /bin/sh upgradingdave/zbctl-java:main 22 | 23 | zbctl-plaintext-job.yaml: 24 | sed "s/RELEASE/$(release)/g; s/CLIENT_SECRET/$(clientSecret)/g;" $(root)/tls/zbctl-plaintext-job.tpl.yaml > ./zbctl-plaintext-job.yaml 25 | 26 | .PHONY: create-zbctl-plaintext-job 27 | create-zbctl-plaintext-job: namespace zbctl-plaintext-job.yaml 28 | kubectl apply -f ./zbctl-plaintext-job.yaml -n $(namespace) 29 | 30 | zbctl-tls-job.yaml: 31 | sed "s/CERT_NAME/$(certName)/g; s/RELEASE/$(release)/g; s/CLIENT_SECRET/$(clientSecret)/g;" $(root)/tls/zbctl-tls-job.tpl.yaml > ./zbctl-tls-job.yaml 32 | 33 | .PHONY: create-zbctl-tls-job 34 | create-zbctl-tls-job: namespace zbctl-tls-job.yaml 35 | kubectl apply -f ./zbctl-tls-job.yaml -n $(namespace) 36 | 37 | #kubectl logs jobs/zbctl-plaintext -f -n $(namespace) 38 | 39 | .PHONY: delete-zbctl-plaintext-job 40 | delete-zbctl-jobs: 41 | -kubectl delete -f ./zbctl-plaintext-job.yaml -n $(namespace) 42 | -kubectl delete -f ./zbctl-tls-job.yaml -n $(namespace) 43 | -rm -rf ./zbctl-plaintext-job.yaml 44 | -rm -rf ./zbctl-tls-job.yaml 45 | 46 | # 47 | #$ echo | \ 48 | # openssl s_client -servername www.example.com -connect www.example.com:443 2>/dev/null | \ 49 | # openssl x509 -text 50 | 51 | -------------------------------------------------------------------------------- /tls/zbctl-plaintext-job.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: zbctl-plaintext 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: zbctl 10 | image: upgradingdave/zbctl-java 11 | command: ["java", "-jar", "/usr/src/app/zbctl.jar", 12 | "--address", "RELEASE-zeebe-gateway:26500", 13 | "--authzUrl", "http://RELEASE-keycloak:80/auth/realms/camunda-platform/protocol/openid-connect/token", 14 | "--clientId", "zeebe", 15 | "--clientSecret", "CLIENT_SECRET", 16 | "--plainText"] 17 | restartPolicy: Never -------------------------------------------------------------------------------- /tls/zbctl-tls-job.tpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: zbctl-tls 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: zbctl 10 | image: upgradingdave/zbctl-java 11 | command: ["java", "-jar", "/usr/src/app/zbctl.jar", 12 | "--address", "RELEASE-zeebe-gateway.camunda.svc.cluster.local:26500", 13 | "--authzUrl", "http://RELEASE-keycloak:80/auth/realms/camunda-platform/protocol/openid-connect/token", 14 | "--clientId", "zeebe", 15 | "--clientSecret", "CLIENT_SECRET", 16 | "--certPath", "/usr/local/config/ca.crt"] 17 | volumeMounts: 18 | - mountPath: /usr/local/config/ca.crt 19 | name: certificate 20 | subPath: ca.crt 21 | volumes: 22 | - name: certificate 23 | secret: 24 | secretName: CERT_NAME-ca-secret 25 | items: 26 | - key: CERT_NAMECA.pem 27 | path: ca.crt 28 | defaultMode: 420 29 | restartPolicy: Never 30 | 31 | --------------------------------------------------------------------------------