├── aks-cluster-config.tfvars ├── eks-cluster-config.tfvars ├── calatrava-cluster-config.tfvars ├── terraform ├── gke │ ├── outputs.tf │ ├── variables.tf │ └── main.tf ├── aks │ ├── versions.tf │ ├── outputs.tf │ ├── variables.tf │ └── aks-cluster.tf ├── eks │ ├── versions.tf │ ├── outputs.tf │ ├── variables.tf │ └── main.tf └── calatrava │ └── main.tf ├── RabbitMQ-values.yml ├── test-infrastructure ├── influx-values.yml ├── config.json ├── prom-values.yml ├── rabbitmq.yml └── benchmarker.yml ├── .gitignore ├── gke-cluster-config.tfvars ├── policy.json ├── topology.json ├── README.md ├── LICENSE └── benchmark /aks-cluster-config.tfvars: -------------------------------------------------------------------------------- 1 | vm_size = "Standard_D32d_v4" 2 | node_count = 3 3 | disk_size_gb = 100 -------------------------------------------------------------------------------- /eks-cluster-config.tfvars: -------------------------------------------------------------------------------- 1 | region = "us-west-2" 2 | node_count = 3 3 | instance_type = "m5d.16xlarge" 4 | -------------------------------------------------------------------------------- /calatrava-cluster-config.tfvars: -------------------------------------------------------------------------------- 1 | nimbus_nsname = "rabbitmq" 2 | node_count = 3 3 | workers_class = "guranteed-8xlarge" -------------------------------------------------------------------------------- /terraform/gke/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_name" { 2 | description = "Cluster name" 3 | value = module.gke.name 4 | } -------------------------------------------------------------------------------- /RabbitMQ-values.yml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | image: rabbitmq:latest 4 | imagePullSecrets: [] 5 | replicas: 3 6 | maxCPU: 32 7 | maxMemory: 100Gi 8 | storageClassName: premium-rwo 9 | storageSize: 100Gi -------------------------------------------------------------------------------- /terraform/aks/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | azurerm = { 4 | source = "hashicorp/azurerm" 5 | version = "2.42.0" 6 | } 7 | } 8 | 9 | required_version = "~> 0.14" 10 | } 11 | -------------------------------------------------------------------------------- /terraform/eks/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13.1" 3 | 4 | required_providers { 5 | aws = ">= 3.22.0" 6 | local = ">= 1.4" 7 | random = ">= 2.1" 8 | kubernetes = ">= 1.18" 9 | } 10 | } -------------------------------------------------------------------------------- /test-infrastructure/influx-values.yml: -------------------------------------------------------------------------------- 1 | --- 2 | config: 3 | http: 4 | auth-enabled: true 5 | setDefaultUser: 6 | enabled: true 7 | initScripts: 8 | enabled: true 9 | scripts: 10 | init.iql: |+ 11 | CREATE DATABASE "rabbitmq" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.tfstate 3 | *.tfstate.backup 4 | *.tfstate.lock.info 5 | *.terraform.lock.hcl 6 | 7 | # Kubeconfigs 8 | terraform/*/kubeconfig*benchmark 9 | 10 | # logs 11 | *.log 12 | 13 | # Directories 14 | .terraform/ 15 | -------------------------------------------------------------------------------- /gke-cluster-config.tfvars: -------------------------------------------------------------------------------- 1 | project_id = "PROJECT_ID" 2 | cluster_name = "rabbitmq-benchmark" 3 | region = "europe-west1" 4 | zones = [ 5 | "europe-west1-b", 6 | "europe-west1-c", 7 | "europe-west1-d", 8 | ] 9 | machine_type = "n2-standard-32" 10 | disk_size_gb = 100 11 | disk_type = "pd-ssd" -------------------------------------------------------------------------------- /terraform/aks/outputs.tf: -------------------------------------------------------------------------------- 1 | output "resource_group_name" { 2 | value = azurerm_resource_group.default.name 3 | } 4 | 5 | output "kubernetes_cluster_name" { 6 | value = azurerm_kubernetes_cluster.default.name 7 | } 8 | 9 | output "kube_config" { 10 | value = azurerm_kubernetes_cluster.default.kube_config_raw 11 | } 12 | -------------------------------------------------------------------------------- /terraform/aks/variables.tf: -------------------------------------------------------------------------------- 1 | variable "appId" { 2 | description = "Azure Kubernetes Service Cluster service principal" 3 | } 4 | 5 | variable "password" { 6 | description = "Azure Kubernetes Service Cluster password" 7 | } 8 | 9 | variable "vm_size" { 10 | default = "Standard_D32_v4" 11 | } 12 | 13 | variable "node_count" { 14 | default = 3 15 | } 16 | 17 | variable "disk_size_gb" { 18 | default = 100 19 | } 20 | -------------------------------------------------------------------------------- /test-infrastructure/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "mode": "benchmark", 3 | "technology": "rabbitmq", 4 | "version": "3.8.16", 5 | "metrics-influx-uri": "http://benchmark-data-influxdb.default.svc.cluster.local:8086", 6 | "metrics-influx-database": "rabbitmq", 7 | "metrics-influx-interval": "10", 8 | "broker-hosts": "benchmark.default.svc.cluster.local:5672", 9 | "broker-mgmt-port": "15672", 10 | "broker-port": "5672", 11 | "broker-vhost": "benchmark" 12 | } 13 | -------------------------------------------------------------------------------- /policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "policies": [ 3 | { 4 | "name": "quorum-queues", 5 | "applyTo": "queues", 6 | "pattern": "", 7 | "priority": 0, 8 | "properties" : [ 9 | { "key": "x-queue-type", "value": "quorum", "type": "string" }, 10 | { "key": "x-quorum-initial-group-size", "value": 3, "type": "int" }, 11 | { "key": "x-max-in-memory-length", "value": 0, "type": "int" } 12 | ] 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /test-infrastructure/prom-values.yml: -------------------------------------------------------------------------------- 1 | --- 2 | defaultRules: 3 | create: false 4 | nodeExporter: 5 | enabled: false 6 | prometheus: 7 | prometheusSpec: 8 | ruleSelectorNilUsesHelmValues: false 9 | podMonitorSelectorNilUsesHelmValues: false 10 | probeSelectorNilUsesHelmValues: false 11 | serviceMonitorSelectorNilUsesHelmValues: false 12 | alertmanager: 13 | alertmanagerSpec: 14 | useExistingSecret: true 15 | grafana: 16 | adminPassword: admin 17 | env: 18 | GF_INSTALL_PLUGINS: flant-statusmap-panel 19 | -------------------------------------------------------------------------------- /terraform/eks/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_endpoint" { 2 | description = "Endpoint for EKS control plane." 3 | value = module.eks.cluster_endpoint 4 | } 5 | 6 | output "cluster_security_group_id" { 7 | description = "Security group ids attached to the cluster control plane." 8 | value = module.eks.cluster_security_group_id 9 | } 10 | 11 | output "kubectl_config" { 12 | description = "kubectl config as generated by the module." 13 | value = module.eks.kubeconfig 14 | } 15 | 16 | output "config_map_aws_auth" { 17 | description = "A kubernetes configuration to authenticate to this EKS cluster." 18 | value = module.eks.config_map_aws_auth 19 | } 20 | 21 | output "region" { 22 | description = "AWS region." 23 | value = var.region 24 | } -------------------------------------------------------------------------------- /terraform/aks/aks-cluster.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_kubernetes_cluster" "benchmark" { 2 | name = "${random_pet.prefix.id}-aks" 3 | location = azurerm_resource_group.benchmark.location 4 | resource_group_name = azurerm_resource_group.benchmark.name 5 | dns_prefix = "${random_pet.prefix.id}-k8s" 6 | 7 | default_node_pool { 8 | name = "data-pool" 9 | node_count = 3 10 | vm_size = "Standard_D2_v2" 11 | os_disk_size_gb = 100 12 | } 13 | 14 | resource "azurerm_kubernetes_cluster_node_pool" "rabbit-pool" { 15 | name = "rabbit-pool" 16 | kubernetes_cluster_id = azurerm_kubernetes_cluster.benchmark.id 17 | vm_size = var.vm_size 18 | node_count = var.node_count 19 | os_disk_size_gb = var.disk_size_gb 20 | node_labels = { rabbit-pool = true } 21 | node_taints = [ "rabbit-pool=true:NoSchedule" ] 22 | 23 | tags = { 24 | Environment = "rabbit-pool" 25 | } 26 | } 27 | 28 | service_principal { 29 | client_id = var.appId 30 | client_secret = var.password 31 | } 32 | 33 | role_based_access_control { 34 | enabled = true 35 | } 36 | 37 | tags = { 38 | environment = "Benchmark" 39 | } 40 | } -------------------------------------------------------------------------------- /terraform/eks/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | default = "us-west-2" 3 | } 4 | 5 | variable "instance_type" { 6 | default = "m5d.16xlarge" 7 | } 8 | 9 | variable "node_count" { 10 | default = 3 11 | } 12 | 13 | variable "map_accounts" { 14 | description = "Additional AWS account numbers to add to the aws-auth configmap." 15 | type = list(string) 16 | 17 | default = [ 18 | "777777777777", 19 | "888888888888", 20 | ] 21 | } 22 | 23 | variable "map_roles" { 24 | description = "Additional IAM roles to add to the aws-auth configmap." 25 | type = list(object({ 26 | rolearn = string 27 | username = string 28 | groups = list(string) 29 | })) 30 | 31 | default = [ 32 | { 33 | rolearn = "arn:aws:iam::66666666666:role/role1" 34 | username = "role1" 35 | groups = ["system:masters"] 36 | }, 37 | ] 38 | } 39 | 40 | variable "map_users" { 41 | description = "Additional IAM users to add to the aws-auth configmap." 42 | type = list(object({ 43 | userarn = string 44 | username = string 45 | groups = list(string) 46 | })) 47 | 48 | default = [ 49 | { 50 | userarn = "arn:aws:iam::66666666666:user/user1" 51 | username = "user1" 52 | groups = ["system:masters"] 53 | }, 54 | { 55 | userarn = "arn:aws:iam::66666666666:user/user2" 56 | username = "user2" 57 | groups = ["system:masters"] 58 | }, 59 | ] 60 | } -------------------------------------------------------------------------------- /terraform/gke/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | description = "The project ID to host the cluster in" 3 | } 4 | 5 | variable "cluster_name" { 6 | description = "The name for the GKE cluster" 7 | default = "rabbitmq-benchmark" 8 | } 9 | 10 | variable "region" { 11 | description = "The region to host the cluster in" 12 | default = "europe-west1" 13 | } 14 | 15 | variable "zones" { 16 | description = "The Availability Zones to deploy the cluster across. Must be within the region" 17 | default = ["europe-west1-b", "europe-west1-c", "europe-west1-d"] 18 | } 19 | 20 | variable "network" { 21 | description = "The VPC network created to host the cluster in" 22 | default = "gke-network" 23 | } 24 | 25 | variable "subnetwork" { 26 | description = "The subnetwork created to host the cluster in" 27 | default = "gke-subnet" 28 | } 29 | 30 | variable "ip_range_pods_name" { 31 | description = "The secondary ip range to use for pods" 32 | default = "ip-range-pods" 33 | } 34 | 35 | variable "ip_range_services_name" { 36 | description = "The secondary ip range to use for services" 37 | default = "ip-range-services" 38 | } 39 | 40 | variable "nodes" { 41 | description = "Number of Kubernetes nodes" 42 | default = 5 43 | } 44 | 45 | variable "machine_type" { 46 | description = "Machine type for Kubernetes nodes" 47 | default = "n2-standard-32" 48 | } 49 | 50 | variable "disk_size_gb" { 51 | description = "Disk size in GB" 52 | default = 100 53 | } 54 | 55 | variable "disk_type" { 56 | description = "Disk type" 57 | default = "pd-ssd" 58 | } 59 | -------------------------------------------------------------------------------- /topology.json: -------------------------------------------------------------------------------- 1 | { 2 | "topologyType": "fixed", 3 | "benchmarkType": "throughput", 4 | "topologyGroups": [ 5 | { 6 | "name": "benchmark", 7 | "scale": 1, 8 | "queues": [{ 9 | "prefix": "qq", 10 | "scale": 100, 11 | "properties": [{ "key": "x-queue-type", "value": "quorum", "type": "string" }] 12 | }], 13 | "publishers": [{ 14 | "prefix": "p", 15 | "scale": 100, 16 | "publishMode":{ 17 | "useConfirms": true, 18 | "inFlightLimit": 100 19 | }, 20 | "sendToQueuePrefix": { 21 | "queuePrefix": "qq", 22 | "mode": "Counterpart" 23 | }, 24 | "deliveryMode": "Persistent", 25 | "messageSize": 65535, 26 | "msgsPerSecondPerPublisher": 100 27 | }], 28 | "consumers": [ 29 | { 30 | "prefix": "c", 31 | "scale": 100, 32 | "queuePrefix": "qq", 33 | "ackMode": { 34 | "manualAcks": true, 35 | "consumerPrefetch": 10, 36 | "ackInterval": 5 37 | }, 38 | "processingMs": 10 39 | } 40 | ] 41 | } 42 | ], 43 | "dimensions" : { 44 | "fixedDimensions": { 45 | "durationSeconds": 300, 46 | "rampUpSeconds": 10 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /test-infrastructure/rabbitmq.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: policy/v1beta1 4 | kind: PodDisruptionBudget 5 | metadata: 6 | name: benchmark-rabbitmq 7 | spec: 8 | maxUnavailable: 1 9 | selector: 10 | matchLabels: 11 | app.kubernetes.io/name: benchmark 12 | --- 13 | apiVersion: rabbitmq.com/v1beta1 14 | kind: RabbitmqCluster 15 | metadata: 16 | name: benchmark 17 | spec: 18 | image: #@ data.values.image 19 | imagePullSecrets: #@ data.values.imagePullSecrets 20 | replicas: #@ data.values.replicas 21 | tolerations: 22 | - key: "rabbit-pool" 23 | operator: "Equal" 24 | value: "true" 25 | effect: "NoSchedule" 26 | affinity: 27 | nodeAffinity: 28 | requiredDuringSchedulingIgnoredDuringExecution: 29 | nodeSelectorTerms: 30 | - matchExpressions: 31 | - key: rabbit-pool 32 | operator: In 33 | values: 34 | - "true" 35 | resources: 36 | limits: 37 | cpu: #@ data.values.maxCPU 38 | memory: #@ data.values.maxMemory 39 | rabbitmq: 40 | additionalConfig: | 41 | cluster_partition_handling = pause_minority 42 | vm_memory_high_watermark_paging_ratio = 0.99 43 | disk_free_limit.relative = 1.0 44 | collect_statistics_interval = 10000 45 | persistence: 46 | storageClassName: #@ data.values.storageClassName 47 | storage: #@ data.values.storageSize 48 | override: 49 | statefulSet: 50 | spec: 51 | template: 52 | spec: 53 | containers: [] 54 | topologySpreadConstraints: 55 | - maxSkew: 1 56 | topologyKey: "topology.kubernetes.io/zone" 57 | whenUnsatisfiable: DoNotSchedule 58 | labelSelector: 59 | matchLabels: 60 | app.kubernetes.io/name: benchmark 61 | -------------------------------------------------------------------------------- /terraform/calatrava/main.tf: -------------------------------------------------------------------------------- 1 | variable "nimbus_user" {} 2 | 3 | variable "nimbus_nsname" { 4 | default = "rmqBenchmark" 5 | } 6 | 7 | variable "node_count" { 8 | default = 3 9 | } 10 | 11 | variable "workers_class" { 12 | default = "guaranteed-8xlarge" 13 | } 14 | 15 | terraform { 16 | required_providers { 17 | pacific = { 18 | source = "eng.vmware.com/calatrava/pacific" 19 | } 20 | } 21 | } 22 | 23 | # Keep the nimbus server/config/ip values, they are fine for you to use 24 | resource "pacific_nimbus_namespace" "ns" { 25 | user = var.nimbus_user 26 | name = var.nimbus_nsname 27 | 28 | # Pick one of sc2-01-vc16, sc2-01-vc17, wdc-08-vc04, wdc-08-vc05, wdc-08-vc07, wdc-08-vc08, sof2-01-vc06 29 | # Check slack channel #calatrava-notice for known issues 30 | nimbus = "wdc-08-vc04" 31 | nimbus_config_file = "/mts/git/nimbus-configs/config/staging/wcp.json" 32 | } 33 | 34 | // save kubeconfig 35 | resource "local_file" "sv_kubeconfig" { 36 | sensitive_content = pacific_nimbus_namespace.ns.kubeconfig 37 | filename = "${path.module}/sv.kubeconfig" 38 | file_permission = "0644" 39 | } 40 | 41 | resource "pacific_guestcluster" "gc" { 42 | cluster_name = "gc" 43 | namespace = pacific_nimbus_namespace.ns.namespace 44 | input_kubeconfig = pacific_nimbus_namespace.ns.kubeconfig 45 | version = "v1.18" 46 | network_servicedomain = "cluster.local" 47 | topology_controlplane_count = 3 48 | topology_controlplane_class = "best-effort-medium" 49 | topology_workers_class = var.workers_class 50 | topology_workers_count = var.node_count 51 | topology_controlplane_storageclass = pacific_nimbus_namespace.ns.default_storageclass 52 | topology_workers_storageclass = pacific_nimbus_namespace.ns.default_storageclass 53 | storage_defaultclass = pacific_nimbus_namespace.ns.default_storageclass 54 | } 55 | 56 | // save kubeconfig 57 | resource "local_file" "kubeconfig" { 58 | sensitive_content = pacific_guestcluster.gc.kubeconfig 59 | filename = "${path.module}/kubeconfig-rabbitmq-benchmark" 60 | file_permission = "0644" 61 | } -------------------------------------------------------------------------------- /test-infrastructure/benchmarker.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | kind: ConfigMap 4 | apiVersion: v1 5 | metadata: 6 | name: topology-config 7 | data: 8 | topology.json: #@ data.read("topology.json") 9 | --- 10 | kind: ConfigMap 11 | apiVersion: v1 12 | metadata: 13 | name: policy-config 14 | data: 15 | policy.json: #@ data.read("policy.json") 16 | --- 17 | kind: ConfigMap 18 | apiVersion: v1 19 | metadata: 20 | name: benchmark-config 21 | data: 22 | config.json: #@ data.read("config.json") 23 | --- 24 | apiVersion: batch/v1 25 | kind: Job 26 | metadata: 27 | name: rabbit-benchmarker 28 | spec: 29 | completions: 1 30 | ttlSecondsAfterFinished: 600 31 | template: 32 | spec: 33 | restartPolicy: Never 34 | containers: 35 | - name: rabbit-test-tool 36 | image: pivotalrabbitmq/rabbittesttool:latest 37 | env: 38 | - name: RMQ_USER 39 | valueFrom: 40 | secretKeyRef: 41 | name: benchmark-default-user 42 | key: username 43 | - name: RMQ_PASSWORD 44 | valueFrom: 45 | secretKeyRef: 46 | name: benchmark-default-user 47 | key: password 48 | - name: INFLUX_USER 49 | valueFrom: 50 | secretKeyRef: 51 | name: benchmark-data-influxdb-auth 52 | key: influxdb-user 53 | - name: INFLUX_PASSWORD 54 | valueFrom: 55 | secretKeyRef: 56 | name: benchmark-data-influxdb-auth 57 | key: influxdb-password 58 | volumeMounts: 59 | - name: topology-file 60 | mountPath: /etc/config/topology.json 61 | subPath: topology.json 62 | readOnly: true 63 | - name: policy-file 64 | mountPath: /etc/config/policy.json 65 | subPath: policy.json 66 | readOnly: true 67 | - name: config-file 68 | mountPath: /etc/config/config.json 69 | subPath: config.json 70 | readOnly: true 71 | args: 72 | - "--broker-user $(RMQ_USER)" 73 | - "--broker-password $(RMQ_PASSWORD)" 74 | - "--metrics-influx-user $(INFLUX_USER)" 75 | - "--metrics-influx-password $(INFLUX_PASSWORD)" 76 | - "--topology /etc/config/topology.json" 77 | - "--policies /etc/config/policy.json" 78 | - "--config-file /etc/config/config.json" 79 | volumes: 80 | - name: topology-file 81 | configMap: 82 | name: topology-config 83 | - name: policy-file 84 | configMap: 85 | name: policy-config 86 | - name: config-file 87 | configMap: 88 | name: benchmark-config 89 | -------------------------------------------------------------------------------- /terraform/gke/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" {} 2 | 3 | module "gke_auth" { 4 | source = "terraform-google-modules/kubernetes-engine/google//modules/auth" 5 | depends_on = [module.gke] 6 | project_id = var.project_id 7 | location = module.gke.location 8 | cluster_name = module.gke.name 9 | } 10 | 11 | resource "local_file" "kubeconfig" { 12 | content = module.gke_auth.kubeconfig_raw 13 | filename = "kubeconfig-rabbitmq-benchmark" 14 | } 15 | 16 | module "gcp-network" { 17 | source = "terraform-google-modules/network/google" 18 | version = "~> 2.5" 19 | project_id = var.project_id 20 | network_name = "${var.network}-${var.cluster_name}" 21 | subnets = [ 22 | { 23 | subnet_name = "${var.subnetwork}-${var.cluster_name}" 24 | subnet_ip = "10.10.0.0/16" 25 | subnet_region = var.region 26 | }, 27 | ] 28 | secondary_ranges = { 29 | "${var.subnetwork}-${var.cluster_name}" = [ 30 | { 31 | range_name = var.ip_range_pods_name 32 | ip_cidr_range = "10.20.0.0/16" 33 | }, 34 | { 35 | range_name = var.ip_range_services_name 36 | ip_cidr_range = "10.30.0.0/16" 37 | }, 38 | ] 39 | } 40 | } 41 | 42 | module "gke" { 43 | source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster" 44 | project_id = var.project_id 45 | name = "${var.cluster_name}" 46 | regional = true 47 | region = var.region 48 | zones = var.zones 49 | network = module.gcp-network.network_name 50 | subnetwork = module.gcp-network.subnets_names[0] 51 | ip_range_pods = var.ip_range_pods_name 52 | ip_range_services = var.ip_range_services_name 53 | node_pools = [ 54 | { 55 | name = "rabbit-pool" 56 | machine_type = var.machine_type 57 | node_locations = join(",", var.zones) 58 | initial_node_count = var.nodes 59 | disk_size_gb = var.disk_size_gb 60 | disk_type = var.disk_type 61 | autoscaling = false 62 | preemptible = false 63 | }, 64 | { 65 | name = "data-pool", 66 | machine_type = "n2-standard-4", 67 | node_locations = join(",", var.zones) 68 | initial_node_count = 3 69 | disk_size_gb = 50 70 | disk_type = "pd-standard" 71 | autoscaling = false 72 | preemptible = false 73 | } 74 | ] 75 | node_pools_labels = { 76 | rabbit-pool = { rabbit-pool = true } 77 | data-pool = { data-pool = true } 78 | } 79 | node_pools_taints = { 80 | rabbit-pool = [ 81 | { 82 | key = "rabbit-pool" 83 | value = true 84 | effect = "NO_SCHEDULE" 85 | } 86 | ] 87 | } 88 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # benchmarker 2 | A tool for benchmarking RabbitMQ on Kubernetes with various hardware and messaging configurations. 3 | 4 | ## Supported Kubernetes platforms 5 | The tooling currently supports GKE, AKS, EKS, and Calatrava (Project Pacific) via terraform modules. 6 | 7 | ## Getting started 8 | 9 | ### Configuration 10 | To configure the Kubernetes cluster and hardware to deploy RabbitMQ, edit the terraform configuration in 11 | `PROVIDER-cluster-config.tfvars`, where `PROVIDER` is one of `gke`, `aks`, `eks`, or `calatrava`. 12 | 13 | To configure RabbitMQ properties, edit the values YAML in `RabbitMQ-values.yml`. 14 | 15 | To configure the benchmark topology, edit `topology.json`. 16 | To configure the benchmark RabbitMQ policies, edit `policy.json`. 17 | The topology and policy files are used to configure [RabbitTestTool](https://github.com/rabbitmq/rabbittesttool). For more detailed descriptions and configuration options, see the [RTT documentation](https://github.com/rabbitmq/RabbitTestTool/tree/main/benchmark). 18 | 19 | ### Running 20 | 21 | To provision an environment and run the benchmark, run the script 22 | ```shell 23 | benchmark --provider (gke|aks|eks|calatrava) 24 | ``` 25 | This script will deploy a Kubernetes cluster on the selected provider, deploy the cluster operator, a RabbitMQ cluster, Prometheus, Grafana, and InfluxDB on that Kubernetes cluster, then run the benchmark, exporting the results to the databases. 26 | 27 | To use an existing Kubernetes cluster to run the benchmark, run the script 28 | ```shell 29 | benchmark --skip-terraform 30 | ``` 31 | This will deploy the cluster operator, a RabbitMQ cluster, Prometheus, Grafana, and InfluxDB on the targeted Kubernetes cluster, then run the configured benchmark, exporting the results to the databases. 32 | 33 | To access the Grafana dashboards, run the command 34 | ```bash 35 | kubectl -n prom port-forward svc/prom-grafana 3000:80 36 | ``` 37 | then open a browser window to `http://localhost:3000` and login with the credentials `admin:admin`. 38 | 39 | ### Cleanup 40 | 41 | To tear down the infrastructure provisioned by the operator, run 42 | ```shell 43 | benchmark destroy --provider (gke|aks|eks|calatrava) 44 | ``` 45 | **Note**: this operation is destructive and will result in the loss of the benchmark data. 46 | 47 | ## Results 48 | 49 | There is a significant amount of performance information captured in a series of blog posts by Jack Vanlightly from 2020. 50 | - [Cluster Sizing](https://blog.rabbitmq.com/posts/2020/06/cluster-sizing-and-other-considerations/) 51 | - [Quorum Queues](https://blog.rabbitmq.com/posts/2020/06/cluster-sizing-case-study-quorum-queues-part-1/) 52 | - [Mirrored Queues](https://blog.rabbitmq.com/posts/2020/06/cluster-sizing-case-study-mirrored-queues-part-1/) 53 | 54 | | *Messages/second* | *Message Size (KB)* | *Queue Type* | *Replication Factor* | *Cluster Size* | *Cores (per Node)* | *Memory (GB per Node)* | *Disk Type* | 55 | | ----------------: | ------------------: | -----------: | -------------------: | -------------: | -----------------: | ---------------------: | ----------: | 56 | | 36,000 | 1 | quorum | 3 | 3 | 16 | 32 | SSD | 57 | | 37,000 | 1 | quorum | 3 | 3 | 36 | 72 | SSD | 58 | | 42,000 | 1 | quorum | 3 | 5 | 8 | 16 | SSD | 59 | | 54,000 | 1 | quorum | 3 | 5 | 16 | 32 | SSD | 60 | | 54,000 | 1 | quorum | 3 | 7 | 8 | 16 | SSD | 61 | | 67,000 | 1 | quorum | 3 | 7 | 16 | 16 | SSD | 62 | | 66,000 | 1 | quorum | 3 | 9 | 8 | 16 | SSD | 63 | 64 | ### Adding new results 65 | To add new benchmarking results, please open a PR with your addition. 66 | -------------------------------------------------------------------------------- /terraform/eks/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | 5 | data "aws_eks_cluster" "benchmark" { 6 | name = module.eks.cluster_id 7 | } 8 | 9 | data "aws_eks_cluster_auth" "benchmark" { 10 | name = module.eks.cluster_id 11 | } 12 | 13 | provider "kubernetes" { 14 | host = data.aws_eks_cluster.cluster.endpoint 15 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) 16 | token = data.aws_eks_cluster_auth.cluster.token 17 | load_config_file = false 18 | } 19 | 20 | data "aws_availability_zones" "available" { 21 | } 22 | 23 | locals { 24 | cluster_name = "benchmark-${random_string.suffix.result}" 25 | } 26 | 27 | resource "random_string" "suffix" { 28 | length = 8 29 | special = false 30 | } 31 | 32 | resource "aws_security_group" "worker_group_mgmt_one" { 33 | name_prefix = "worker_group_mgmt_one" 34 | vpc_id = module.vpc.vpc_id 35 | 36 | ingress { 37 | from_port = 22 38 | to_port = 22 39 | protocol = "tcp" 40 | 41 | cidr_blocks = [ 42 | "10.0.0.0/8", 43 | ] 44 | } 45 | } 46 | 47 | resource "aws_security_group" "worker_group_mgmt_two" { 48 | name_prefix = "worker_group_mgmt_two" 49 | vpc_id = module.vpc.vpc_id 50 | 51 | ingress { 52 | from_port = 22 53 | to_port = 22 54 | protocol = "tcp" 55 | 56 | cidr_blocks = [ 57 | "192.168.0.0/16", 58 | ] 59 | } 60 | } 61 | 62 | resource "aws_security_group" "all_worker_mgmt" { 63 | name_prefix = "all_worker_management" 64 | vpc_id = module.vpc.vpc_id 65 | 66 | ingress { 67 | from_port = 22 68 | to_port = 22 69 | protocol = "tcp" 70 | 71 | cidr_blocks = [ 72 | "10.0.0.0/8", 73 | "172.16.0.0/12", 74 | "192.168.0.0/16", 75 | ] 76 | } 77 | } 78 | 79 | module "vpc" { 80 | source = "terraform-aws-modules/vpc/aws" 81 | version = "~> 2.47" 82 | 83 | name = "benchmark-vpc" 84 | cidr = "10.0.0.0/16" 85 | azs = data.aws_availability_zones.available.names 86 | private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] 87 | public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] 88 | enable_nat_gateway = true 89 | single_nat_gateway = true 90 | enable_dns_hostnames = true 91 | 92 | public_subnet_tags = { 93 | "kubernetes.io/cluster/${local.cluster_name}" = "shared" 94 | "kubernetes.io/role/elb" = "1" 95 | } 96 | 97 | private_subnet_tags = { 98 | "kubernetes.io/cluster/${local.cluster_name}" = "shared" 99 | "kubernetes.io/role/internal-elb" = "1" 100 | } 101 | } 102 | 103 | module "eks" { 104 | source = "../.." 105 | cluster_name = local.cluster_name 106 | cluster_version = "1.20" 107 | subnets = module.vpc.private_subnets 108 | 109 | tags = { 110 | Environment = "benchmark" 111 | GithubRepo = "terraform-aws-eks" 112 | GithubOrg = "terraform-aws-modules" 113 | } 114 | 115 | vpc_id = module.vpc.vpc_id 116 | 117 | worker_groups = [ 118 | { 119 | name = "rabbit-pool" 120 | instance_type = var.instance_type 121 | additional_userdata = "rabbit pool" 122 | asg_desired_capacity = var.node_count 123 | additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id] 124 | kubelet_extra_args = "--node-labels=rabbit-pool --register-with-taints=rabbit-pool:NoSchedule" 125 | }, 126 | { 127 | name = "data-pool" 128 | instance_type = "t3.medium" 129 | additional_userdata = "data pool" 130 | additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id] 131 | asg_desired_capacity = 3 132 | }, 133 | ] 134 | 135 | worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id] 136 | map_roles = var.map_roles 137 | map_users = var.map_users 138 | map_accounts = var.map_accounts 139 | 140 | write_kubeconfig = true 141 | config_output_path = "./" 142 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | RabbitMQ Benchmarker 2 | Copyright 2021 VMware, Inc. 3 | 4 | The Apache 2.0 license (the "License") set forth below applies to all parts of the RabbitMQ Benchmarker project. You may not use this file except in compliance with the License. 5 | 6 | Apache License 7 | 8 | Version 2.0, January 2004 9 | http://www.apache.org/licenses/ 10 | 11 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 12 | 13 | 1. Definitions. 14 | 15 | "License" shall mean the terms and conditions for use, reproduction, 16 | and distribution as defined by Sections 1 through 9 of this document. 17 | 18 | "Licensor" shall mean the copyright owner or entity authorized by the 19 | copyright owner that is granting the License. 20 | 21 | "Legal Entity" shall mean the union of the acting entity and all other 22 | entities that control, are controlled by, or are under common control 23 | with that entity. For the purposes of this definition, "control" means 24 | (i) the power, direct or indirect, to cause the direction or management 25 | of such entity, whether by contract or otherwise, or (ii) ownership 26 | of fifty percent (50%) or more of the outstanding shares, or (iii) 27 | beneficial ownership of such entity. 28 | 29 | "You" (or "Your") shall mean an individual or Legal Entity exercising 30 | permissions granted by this License. 31 | 32 | "Source" form shall mean the preferred form for making modifications, 33 | including but not limited to software source code, documentation source, 34 | and configuration files. 35 | 36 | "Object" form shall mean any form resulting from mechanical transformation 37 | or translation of a Source form, including but not limited to compiled 38 | object code, generated documentation, and conversions to other media 39 | types. 40 | 41 | "Work" shall mean the work of authorship, whether in Source or 42 | Object form, made available under the License, as indicated by a copyright 43 | notice that is included in or attached to the work (an example is provided 44 | in the Appendix below). 45 | 46 | "Derivative Works" shall mean any work, whether in Source or Object form, 47 | that is based on (or derived from) the Work and for which the editorial 48 | revisions, annotations, elaborations, or other modifications represent, 49 | as a whole, an original work of authorship. For the purposes of this 50 | License, Derivative Works shall not include works that remain separable 51 | from, or merely link (or bind by name) to the interfaces of, the Work 52 | and Derivative Works thereof. 53 | 54 | "Contribution" shall mean any work of authorship, including the 55 | original version of the Work and any modifications or additions to 56 | that Work or Derivative Works thereof, that is intentionally submitted 57 | to Licensor for inclusion in the Work by the copyright owner or by an 58 | individual or Legal Entity authorized to submit on behalf of the copyright 59 | owner. For the purposes of this definition, "submitted" means any form of 60 | electronic, verbal, or written communication sent to the Licensor or its 61 | representatives, including but not limited to communication on electronic 62 | mailing lists, source code control systems, and issue tracking systems 63 | that are managed by, or on behalf of, the Licensor for the purpose of 64 | discussing and improving the Work, but excluding communication that is 65 | conspicuously marked or otherwise designated in writing by the copyright 66 | owner as "Not a Contribution." 67 | 68 | "Contributor" shall mean Licensor and any individual or Legal Entity 69 | on behalf of whom a Contribution has been received by Licensor and 70 | subsequently incorporated within the Work. 71 | 72 | 2. Grant of Copyright License. 73 | Subject to the terms and conditions of this License, each Contributor 74 | hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, 75 | royalty-free, irrevocable copyright license to reproduce, prepare 76 | Derivative Works of, publicly display, publicly perform, sublicense, and 77 | distribute the Work and such Derivative Works in Source or Object form. 78 | 79 | 3. Grant of Patent License. 80 | Subject to the terms and conditions of this License, each Contributor 81 | hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, 82 | royalty- free, irrevocable (except as stated in this section) patent 83 | license to make, have made, use, offer to sell, sell, import, and 84 | otherwise transfer the Work, where such license applies only to those 85 | patent claims licensable by such Contributor that are necessarily 86 | infringed by their Contribution(s) alone or by combination of 87 | their Contribution(s) with the Work to which such Contribution(s) 88 | was submitted. If You institute patent litigation against any entity 89 | (including a cross-claim or counterclaim in a lawsuit) alleging that the 90 | Work or a Contribution incorporated within the Work constitutes direct 91 | or contributory patent infringement, then any patent licenses granted 92 | to You under this License for that Work shall terminate as of the date 93 | such litigation is filed. 94 | 95 | 4. Redistribution. 96 | You may reproduce and distribute copies of the Work or Derivative Works 97 | thereof in any medium, with or without modifications, and in Source or 98 | Object form, provided that You meet the following conditions: 99 | 100 | a. You must give any other recipients of the Work or Derivative Works 101 | a copy of this License; and 102 | 103 | b. You must cause any modified files to carry prominent notices stating 104 | that You changed the files; and 105 | 106 | c. You must retain, in the Source form of any Derivative Works that 107 | You distribute, all copyright, patent, trademark, and attribution 108 | notices from the Source form of the Work, excluding those notices 109 | that do not pertain to any part of the Derivative Works; and 110 | 111 | d. If the Work includes a "NOTICE" text file as part of its 112 | distribution, then any Derivative Works that You distribute must 113 | include a readable copy of the attribution notices contained 114 | within such NOTICE file, excluding those notices that do not 115 | pertain to any part of the Derivative Works, in at least one of 116 | the following places: within a NOTICE text file distributed as part 117 | of the Derivative Works; within the Source form or documentation, 118 | if provided along with the Derivative Works; or, within a display 119 | generated by the Derivative Works, if and wherever such third-party 120 | notices normally appear. The contents of the NOTICE file are for 121 | informational purposes only and do not modify the License. You 122 | may add Your own attribution notices within Derivative Works that 123 | You distribute, alongside or as an addendum to the NOTICE text 124 | from the Work, provided that such additional attribution notices 125 | cannot be construed as modifying the License. You may add Your own 126 | copyright statement to Your modifications and may provide additional 127 | or different license terms and conditions for use, reproduction, or 128 | distribution of Your modifications, or for any such Derivative Works 129 | as a whole, provided Your use, reproduction, and distribution of the 130 | Work otherwise complies with the conditions stated in this License. 131 | 132 | 5. Submission of Contributions. 133 | Unless You explicitly state otherwise, any Contribution intentionally 134 | submitted for inclusion in the Work by You to the Licensor shall be 135 | under the terms and conditions of this License, without any additional 136 | terms or conditions. Notwithstanding the above, nothing herein shall 137 | supersede or modify the terms of any separate license agreement you may 138 | have executed with Licensor regarding such Contributions. 139 | 140 | 6. Trademarks. 141 | This License does not grant permission to use the trade names, trademarks, 142 | service marks, or product names of the Licensor, except as required for 143 | reasonable and customary use in describing the origin of the Work and 144 | reproducing the content of the NOTICE file. 145 | 146 | 7. Disclaimer of Warranty. 147 | Unless required by applicable law or agreed to in writing, Licensor 148 | provides the Work (and each Contributor provides its Contributions) on 149 | an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 150 | express or implied, including, without limitation, any warranties or 151 | conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR 152 | A PARTICULAR PURPOSE. You are solely responsible for determining the 153 | appropriateness of using or redistributing the Work and assume any risks 154 | associated with Your exercise of permissions under this License. 155 | 156 | 8. Limitation of Liability. 157 | In no event and under no legal theory, whether in tort (including 158 | negligence), contract, or otherwise, unless required by applicable law 159 | (such as deliberate and grossly negligent acts) or agreed to in writing, 160 | shall any Contributor be liable to You for damages, including any direct, 161 | indirect, special, incidental, or consequential damages of any character 162 | arising as a result of this License or out of the use or inability to 163 | use the Work (including but not limited to damages for loss of goodwill, 164 | work stoppage, computer failure or malfunction, or any and all other 165 | commercial damages or losses), even if such Contributor has been advised 166 | of the possibility of such damages. 167 | 168 | 9. Accepting Warranty or Additional Liability. 169 | While redistributing the Work or Derivative Works thereof, You may 170 | choose to offer, and charge a fee for, acceptance of support, warranty, 171 | indemnity, or other liability obligations and/or rights consistent with 172 | this License. However, in accepting such obligations, You may act only 173 | on Your own behalf and on Your sole responsibility, not on behalf of 174 | any other Contributor, and only if You agree to indemnify, defend, and 175 | hold each Contributor harmless for any liability incurred by, or claims 176 | asserted against, such Contributor by reason of your accepting any such 177 | warranty or additional liability. 178 | 179 | END OF TERMS AND CONDITIONS -------------------------------------------------------------------------------- /benchmark: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # RabbitMQ Benchmarker 4 | # Copyright 2021 VMware, Inc. 5 | # 6 | # SPDX-License-Identifier: Apache-2.0 7 | # 8 | # This product is licensed to you under the Apache 2.0 license 9 | # (the "License"). You may not use this product except in compliance 10 | # with the Apache 2.0 License. 11 | # 12 | # This product may include a number of subcomponents with separate 13 | # copyright notices and license terms. Your use of these subcomponents 14 | # is subject to the terms and conditions of the subcomponent's license, 15 | # as noted in the LICENSE file. 16 | 17 | set -euo pipefail 18 | 19 | GREEN='\033[0;32m' 20 | ORANGE='\033[0;33m' 21 | RED='\033[0;31m' 22 | NO_COLOR='\033[0m' 23 | 24 | usage() { 25 | usage=$( 26 | cat <<-END 27 | USAGE: 28 | Provision infrastructure and run benchmark on Kubernetes 29 | % benchmark --provider (gke|aks|eks|calatrava) 30 | Install and run Benchmarking tools on existing targeted Kubernetes 31 | % benchmark --skip-terraform 32 | Destroy infrastructure 33 | % benchmark destroy --provider (gke|aks|eks|calatrava) 34 | END 35 | ) 36 | echo "$usage" 37 | } 38 | 39 | terraform_env() { 40 | case "$provider" in 41 | "gke") 42 | terraform_gke 43 | ;; 44 | 45 | "aks") 46 | terraform_aks 47 | ;; 48 | 49 | "eks") 50 | terraform_eks 51 | ;; 52 | 53 | "calatrava") 54 | terraform_calatrava 55 | ;; 56 | 57 | *) 58 | printf "%sOption '%s' not recognized%s\n" "$RED" "$provider" "$NO_COLOR" 59 | exit 1 60 | ;; 61 | esac 62 | } 63 | 64 | terraform_gke() { 65 | pushd terraform/gke 66 | printf "%bTerraforming a GKE cluster.%b\n" "$GREEN" "$NO_COLOR" 67 | terraform init 68 | terraform apply -var-file="../../gke-cluster-config.tfvars" 69 | export KUBECONFIG="$PWD/kubeconfig-rabbitmq-benchmark" 70 | printf "%bCluster created, credentials are located in 'terraform/gke/kubeconfig-rabbitmq-benchmark'.%b\n" "$GREEN" "$NO_COLOR" 71 | popd 72 | } 73 | 74 | terraform_aks() { 75 | pushd terraform/aks 76 | printf "%bTerraforming an AKS cluster.%b\n" "$GREEN" "$NO_COLOR" 77 | terraform init 78 | terraform apply -var-file="../../aks-cluster-config.tfvars" 79 | terraform output kube_config > kubeconfig-rabbitmq-benchmark 80 | export KUBECONFIG="$PWD/kubeconfig-rabbitmq-benchmark" 81 | printf "%bCluster created, credentials are located in 'terraform/aks/kubeconfig-rabbitmq-benchmark'.%b\n" "$GREEN" "$NO_COLOR" 82 | popd 83 | } 84 | 85 | terraform_eks() { 86 | pushd terraform/eks 87 | printf "%bTerraforming an EKS cluster.%b\n" "$GREEN" "$NO_COLOR" 88 | terraform init 89 | terraform apply -var-file="../../eks-cluster-config.tfvars" 90 | export KUBECONFIG="$PWD/kubeconfig_benchmark" 91 | printf "%bCluster created, credentials are located in 'terraform/eks/kubeconfig_benchmark'.%b\n" "$GREEN" "$NO_COLOR" 92 | popd 93 | } 94 | 95 | compile_calatrava_provider() { 96 | calatrava_source="$(mktemp -d)" 97 | trap 'rm -rf "$calatrava_source"' EXIT 98 | git clone git@gitlab.eng.vmware.com:calatrava/calatrava.git "$calatrava_source" 99 | pushd "$calatrava_source/terraform" 100 | make install 101 | popd 102 | } 103 | 104 | terraform_calatrava() { 105 | printf "%bTerraforming a Calatrava cluster.%b\n" "$GREEN" "$NO_COLOR" 106 | compile_calatrava_provider 107 | pushd terraform/calatrava 108 | terraform init 109 | terraform apply -var-file="../../calatrava-cluster-config.tfvars" 110 | export KUBECONFIG="$PWD/kubeconfig-rabbitmq-benchmark" 111 | printf "%bCluster created, credentials are located in 'terraform/eks/kubeconfig-rabbitmq-benchmark'.%b\n" "$GREEN" "$NO_COLOR" 112 | popd 113 | configureClusterRoleBinding 114 | } 115 | 116 | configureClusterRoleBinding() { 117 | kubectl create clusterrolebinding default-binding \ 118 | --clusterrole=psp:vmware-system-unprivileged \ 119 | --group=system:authenticated 120 | } 121 | 122 | deploy() { 123 | deploy_cluster_operator 124 | deploy_influx_db 125 | deploy_kube_prometheus_stack 126 | deploy_production_cluster 127 | } 128 | 129 | deploy_cluster_operator() { 130 | printf "%bDeploying latest RabbitMQ cluster operator.%b\n" "$GREEN" "$NO_COLOR" 131 | kubectl apply -f "https://github.com/rabbitmq/cluster-operator/releases/latest/download/cluster-operator.yml" 132 | printf "%bCluster operator deployed.%b\n" "$GREEN" "$NO_COLOR" 133 | } 134 | 135 | deploy_kube_prometheus_stack() { 136 | printf "%bDeploying Kube Prometheus Stack.%b\n" "$GREEN" "$NO_COLOR" 137 | promNS="prom" 138 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 139 | helm upgrade --install prom \ 140 | --namespace "$promNS" --create-namespace \ 141 | -f test-infrastructure/prom-values.yml \ 142 | prometheus-community/kube-prometheus-stack 143 | 144 | operator="$(mktemp -d)" 145 | trap 'rm -rf "$operator"' EXIT 146 | pushd "$operator" 147 | wget -c "$(curl -s https://api.github.com/repos/rabbitmq/cluster-operator/releases/latest | jq -r '.tarball_url')" -O cluster-operator.tar.gz 148 | tar xfz cluster-operator.tar.gz 149 | source_dir="$(find "$operator" -type d -name 'rabbitmq-cluster-operator-*')" 150 | popd 151 | 152 | printf "%bDeploying ServiceMonitor and PodMonitor.%b\n" "$GREEN" "$NO_COLOR" 153 | kubectl -n "$promNS" apply -f "$source_dir/observability/prometheus/monitors/" 154 | 155 | printf "%bDeploying Prometheus Rules.%b\n" "$GREEN" "$NO_COLOR" 156 | kubectl -n "$promNS" apply --recursive -f "$source_dir/observability/prometheus/rules/" 157 | 158 | printf "%bDeploying Grafana Dashboards.%b\n" "$GREEN" "$NO_COLOR" 159 | kubectl -n "$promNS" apply -f "$source_dir/observability/grafana/dashboards/" 160 | 161 | printf "To open Grafana run\n%% kubectl -n %s port-forward svc/prom-grafana 3000:80\nand open your browser at http://localholst:3000\nusername: admin, password: admin\n\n%bKube Prometheus Stack deployed.%b\n" "$promNS" "$GREEN" "$NO_COLOR" 162 | } 163 | 164 | deploy_influx_db() { 165 | printf "%bDeploying InfluxDB for storing metrics.%b\n" "$GREEN" "$NO_COLOR" 166 | helm repo add influxdata https://helm.influxdata.com/ 167 | helm upgrade --install benchmark-data \ 168 | -f test-infrastructure/influx-values.yml \ 169 | influxdata/influxdb 170 | printf "%bInfluxDB deployed.%b\n" "$GREEN" "$NO_COLOR" 171 | } 172 | 173 | deploy_production_cluster() { 174 | printf "%bDeploying RabbitMQ cluster.%b\n" "$GREEN" "$NO_COLOR" 175 | ytt -f test-infrastructure/rabbitmq.yml -f RabbitMQ-values.yml | kubectl apply -f- 176 | printf "Waiting for RabbitMQ cluster to be ready." 177 | while [ "$(kubectl get rmq benchmark -o jsonpath='{.status.conditions[?(@.type=="AllReplicasReady")].status}')" != "True" ]; do 178 | printf "." 179 | sleep 5 180 | done 181 | printf "\n" 182 | printf "%bRabbitMQ cluster deployed.%b\n" "$GREEN" "$NO_COLOR" 183 | } 184 | 185 | # Now to actually run the benchmark 186 | # The idea is to use RabbitTestTool, which requires some policy and topology files 187 | # Additionally, we should collect the test results in an InfluxDB 188 | benchmark() { 189 | printf "%bRunning Rabbit Test Tool. Test results exported to InfluxDB.%b\n" "$GREEN" "$NO_COLOR" 190 | ytt -f test-infrastructure/benchmarker.yml -f test-infrastructure/config.json -f policy.json -f topology.json | kubectl apply -f- 191 | } 192 | 193 | destroy() { 194 | case "$provider" in 195 | "gke") 196 | destroy_gke 197 | return 198 | ;; 199 | 200 | "aks") 201 | destroy_aks 202 | return 203 | ;; 204 | 205 | "eks") 206 | destroy_eks 207 | return 208 | ;; 209 | 210 | "calatrava") 211 | destroy_calatrava 212 | return 213 | ;; 214 | 215 | *) 216 | printf "%bOption '$provider' not recognized%b\n" "$RED" "$NO_COLOR" 217 | exit 1 218 | ;; 219 | esac 220 | } 221 | 222 | destroy_gke() { 223 | pushd terraform/gke 224 | printf "%bDestroying GKE cluster.%b\n" "$ORANGE" "$NO_COLOR" 225 | terraform destroy 226 | printf "%bGKE cluster destroyed.%b\n" "$GREEN" "$NO_COLOR" 227 | popd 228 | } 229 | 230 | destroy_aks() { 231 | pushd terraform/aks 232 | printf "%bDestroying AKS cluster.%b\n" "$ORANGE" "$NO_COLOR" 233 | terraform destroy 234 | printf "%bAKS cluster destroyed.%b\n" "$GREEN" "$NO_COLOR" 235 | popd 236 | } 237 | 238 | destroy_eks() { 239 | pushd terraform/eks 240 | printf "%bDestroying EKS cluster.%b\n" "$ORANGE" "$NO_COLOR" 241 | terraform destroy 242 | printf "%bEKS cluster destroyed.%b\n" "$GREEN" "$NO_COLOR" 243 | popd 244 | } 245 | 246 | destroy_calatrava() { 247 | pushd terraform/calatrava 248 | printf "%bDestroying Calatrava cluster.%b\n" "$ORANGE" "$NO_COLOR" 249 | terraform destroy 250 | printf "%bCalatrava cluster destroyed.%b\n" "$GREEN" "$NO_COLOR" 251 | popd 252 | } 253 | 254 | case "$#" in 255 | 1) 256 | if [[ "$1" == "--help" ]]; then 257 | usage 258 | exit 0 259 | elif [[ "$1" == "--skip-terraform" ]]; then 260 | deploy 261 | benchmark 262 | exit 0 263 | else 264 | usage 265 | exit 1 266 | fi 267 | ;; 268 | 269 | 2) 270 | if [[ "$1" != "--provider" ]]; then 271 | usage 272 | exit 1 273 | fi 274 | provider="$2" 275 | terraform_env 276 | deploy 277 | benchmark 278 | exit 0 279 | ;; 280 | 281 | 3) 282 | if [[ "$1" != "destroy" ]] || [[ "$2" != "--provider" ]]; then 283 | usage 284 | exit 1 285 | fi 286 | provider="$3" 287 | destroy 288 | exit 0 289 | ;; 290 | 291 | *) 292 | usage 293 | exit 1 294 | ;; 295 | esac 296 | --------------------------------------------------------------------------------