├── .gitignore ├── README.md ├── docker-compose.yml ├── docker-configs ├── acl │ ├── cross-namespace-discovery.hcl │ ├── dc1-mgw.hcl │ ├── dc1-read.hcl │ ├── dc1-unicorn-frontend.hcl │ ├── dc2-mgw.hcl │ ├── dns-discovery.hcl │ ├── god-mode.hcl │ └── team-proj1-rw.hcl ├── agent-configs │ ├── client-dc1-alpha.hcl │ ├── client-dc1-charlie-ap1.hcl │ ├── client-dc1-echo-proj1.hcl │ ├── client-dc1-unicorn.hcl │ ├── client-dc2-bravo.hcl │ ├── client-dc2-foxtrot.hcl │ ├── client-dc2-unicorn.hcl │ ├── server1-dc1.hcl │ └── server1-dc2.hcl ├── auth │ └── oidc-auth.json ├── certs │ ├── consul-agent-ca-key.pem │ ├── consul-agent-ca.pem │ ├── dc1-server-consul-0-key.pem │ ├── dc1-server-consul-0.pem │ ├── dc2-server-consul-0-key.pem │ └── dc2-server-consul-0.pem ├── configs │ ├── exported-services │ │ ├── exported-services-dc1-default.hcl │ │ ├── exported-services-dc1-unicorn.hcl │ │ ├── exported-services-dc2-chunky.hcl │ │ ├── exported-services-dc2-default.hcl │ │ ├── exported-services-dc2-unicorn_backend.hcl │ │ └── exported-services-donkey.hcl │ ├── intentions │ │ ├── dc1-unicorn_backend_failover-allow.hcl │ │ ├── dc1-unicorn_frontend-allow.hcl │ │ ├── dc2-unicorn_frontend-allow.hcl │ │ ├── web_chunky-allow.hcl │ │ └── web_upstream-allow.hcl │ ├── mgw │ │ ├── dc1-mgw.hcl │ │ └── dc2-mgw.hcl │ ├── prepared_queries │ │ ├── pq-template.json │ │ ├── pq-unicorn-sg.json │ │ ├── pq-unicorn-targets.json │ │ ├── pq-web-chunky-peer.json │ │ └── pq-web-chunky-sg.json │ ├── proxy-defaults │ │ ├── dc1-default-proxydefaults.hcl │ │ ├── dc1-unicorn-proxydefaults.hcl │ │ ├── dc2-chunky-proxydefaults.hcl │ │ ├── dc2-default-proxydefaults.hcl │ │ └── dc2-unicorn-proxydefaults.hcl │ ├── sameness-groups │ │ ├── dc1-default-ssg-web.hcl │ │ └── dc1-unicorn-ssg-unicorn.hcl │ ├── service-defaults │ │ ├── unicorn-backend-defaults.hcl │ │ ├── unicorn-frontend-defaults.hcl │ │ ├── web-chunky-defaults.hcl │ │ ├── web-defaults.hcl │ │ └── web-upstream-defaults.hcl │ ├── service-resolver │ │ └── dc1-unicorn-backend-failover.hcl │ └── services │ │ ├── FakeService-descriptions.txt │ │ ├── dc1-josh-long.hcl │ │ ├── dc1-josh.hcl │ │ ├── dc1-proj1-baphomet-dereg.json │ │ ├── dc1-proj1-baphomet.hcl │ │ ├── dc1-proj1-baphomet0.json │ │ ├── dc1-proj1-baphomet1.json │ │ ├── dc1-proj1-baphomet2.json │ │ ├── dc1-unicorn-backend.hcl │ │ ├── dc1-unicorn-frontend.hcl │ │ ├── dc1-web-upstream_v1.hcl │ │ ├── dc1-web_v1.hcl │ │ ├── dc2-josh.hcl │ │ ├── dc2-unicorn-backend.hcl │ │ ├── dc2-web-chunky_v1.hcl │ │ └── donkey-ap1.hcl ├── docker_vars │ ├── acl-custom.env │ ├── acl-root.env │ ├── acl-secure.env │ ├── mac_arm64-acl-custom.env │ ├── mac_arm64-acl-root.env │ └── mac_arm64-acl-secure.env ├── prometheus │ ├── alert.yml │ └── prometheus.yml └── scripts │ ├── app-baphomet.sh │ ├── app-unicorn.sh │ ├── app-web.sh │ ├── jwt.sh │ ├── oidc-auth0.sh │ └── vm-outputs.sh ├── docs ├── DoctorConsul-TheManual-Draft.pdf ├── acl-everything.md ├── app-banana_split-notes.md ├── architecture.md ├── consul-clients.md ├── consul-structure.md ├── images │ └── architecture2.png ├── network.md └── ui-viz.md ├── kill.sh ├── kube-config.sh ├── kube ├── calico.yaml ├── configs │ ├── dc3 │ │ ├── acl │ │ │ └── dc3_default-terminating-gateway.hcl │ │ ├── api-gw │ │ │ ├── apigw-http-listener-cert.yaml │ │ │ ├── gateway-consul_apigw.yaml │ │ │ ├── httproute-unicorn_ingress.yaml │ │ │ ├── intention-dc3_default-externalz_http.yaml │ │ │ ├── intention-dc3_default-externalz_tcp.yaml │ │ │ ├── intention-dc3_default-unicorn_frontend.yaml │ │ │ ├── intention-dc3_default-unicorn_ssg_frontend.yaml │ │ │ └── tcproute-externalz_tcp_ingress.yaml │ │ ├── defaults │ │ │ ├── mesh-dc3_cernunnos.yaml │ │ │ ├── mesh-dc3_default.yaml │ │ │ └── proxy-defaults.yaml │ │ ├── exported-services │ │ │ ├── exported-services-dc3-cernunnos.yaml │ │ │ └── exported-services-dc3-default.yaml │ │ ├── external-services │ │ │ ├── service-defaults-example.com_http.yaml │ │ │ ├── service-defaults-example.com_tcp.yaml │ │ │ └── service-defaults-whatismyip.yaml │ │ ├── intentions │ │ │ ├── dc3-cernunnos-banana_split-ice_cream.yaml │ │ │ ├── dc3-cernunnos-paris-paris.yaml │ │ │ ├── dc3-cernunnos-unicorn_backend-allow.yaml │ │ │ ├── dc3-cernunnos-unicorn_tp_backend-allow.yaml │ │ │ ├── dc3-default-external-example_http-allow.yaml │ │ │ ├── dc3-default-external-example_tcp-allow.yaml │ │ │ ├── dc3-default-external-whatismyip-allow.yaml │ │ │ ├── dc3-default-unicorn_backend-allow.yaml │ │ │ └── dc3-default-unicorn_tp_backend-allow.yaml │ │ ├── sameness-groups │ │ │ ├── dc3-cernunnos-ssg-unicorn.yaml │ │ │ └── dc3-default-ssg-unicorn.yaml │ │ ├── service-resolver │ │ │ ├── service-resolver-ice_cream.yaml │ │ │ └── service-resolver-unicorn_sameness.yaml │ │ ├── service-splitter │ │ │ └── service-splitter-ice_cream.yaml │ │ ├── services │ │ │ ├── banana_split-icecream_chocolate.yaml │ │ │ ├── banana_split-icecream_strawberry.yaml │ │ │ ├── banana_split-icecream_vanilla.yaml │ │ │ ├── banana_split-neapolitan.yaml │ │ │ ├── externalz-http.yaml │ │ │ ├── externalz-tcp.yaml │ │ │ ├── paris-leroy_jenkins.yaml │ │ │ ├── paris-paris-cernunnos.yaml │ │ │ ├── paris-pretty_please.yaml │ │ │ ├── unicorn-backend.yaml │ │ │ ├── unicorn-cernunnos-backend.yaml │ │ │ ├── unicorn-cernunnos-frontend.yaml │ │ │ ├── unicorn-cernunnos-tp_backend.yaml │ │ │ ├── unicorn-frontend-fs.html │ │ │ ├── unicorn-frontend.yaml │ │ │ ├── unicorn-ssg_frontend.yaml │ │ │ └── unicorn-tp_backend.yaml │ │ └── tgw │ │ │ └── dc3_default-tgw.yaml │ ├── dc4 │ │ ├── acl │ │ │ └── dc4_sheol-terminating-gateway.hcl │ │ ├── defaults │ │ │ ├── mesh-dc4_default.yaml │ │ │ ├── mesh-dc4_taranis.yaml │ │ │ └── proxy-defaults.yaml │ │ ├── exported-services │ │ │ ├── exported-services-dc4-default.yaml │ │ │ └── exported-services-dc4-taranis.yaml │ │ ├── external-services │ │ │ ├── service_defaults-sheol_ext.yaml │ │ │ ├── service_defaults-sheol_ext1.yaml │ │ │ └── service_defaults-sheol_ext2.yaml │ │ ├── intentions │ │ │ ├── dc4-default-unicorn_backend-allow.yaml │ │ │ ├── dc4-default-unicorn_tp_backend-allow.yaml │ │ │ ├── dc4-taranis-unicorn_backend-allow.yaml │ │ │ ├── dc4-taranis-unicorn_tp_backend-allow.yaml │ │ │ ├── dc4_default-sheol_ext.yaml │ │ │ ├── dc4_default-sheol_ext1.yaml │ │ │ └── dc4_default-sheol_ext2.yaml │ │ ├── sameness-groups │ │ │ ├── dc4-default-ssg-unicorn.yaml │ │ │ └── dc4-taranis-ssg-unicorn.yaml │ │ ├── services │ │ │ ├── sheol_app.yaml │ │ │ ├── sheol_app1.yaml │ │ │ ├── sheol_app2.yaml │ │ │ ├── unicorn-backend.yaml │ │ │ ├── unicorn-taranis-backend.yaml │ │ │ ├── unicorn-taranis-tp_backend.yaml │ │ │ └── unicorn-tp_backend.yaml │ │ └── tgw │ │ │ └── dc4_sheol-tgw.yaml │ └── peering │ │ ├── mgw-peering.yaml │ │ ├── peering-acceptor_dc3-peeringtest_dc4-peeringtest.yaml │ │ ├── peering-dialer_dc3-peeringtest_dc4-peeringtest.yaml │ │ ├── peering_dc3-default_dc1-default.yaml │ │ ├── peering_dc3-default_dc1-unicorn.yaml │ │ └── peering_dc3-default_dc2-unicorn.yaml ├── helm │ ├── dc3-helm-values.yaml │ ├── dc3-p1-helm-values.yaml │ ├── dc4-helm-values.yaml │ ├── dc4-p1-helm-values.yaml │ └── latest-complete-helm-values.yaml ├── prometheus │ └── dc3-prometheus-service.yaml └── vault │ ├── dc3-vault-helm-values.yaml │ ├── dc4-vault-helm-values.yaml │ └── vault-latest-complete-helm-values.yaml ├── post-config.sh ├── scripts ├── apigw-config.sh ├── app-banana_split.sh ├── app-externalz.sh ├── app-paris.sh ├── app-sheol.sh ├── app-unicorn.sh ├── functions.sh ├── gke-config.sh ├── helm-install.sh ├── k3d-config.sh ├── outputs.sh ├── terminating-gateway.sh ├── vars.sh └── vault-config.sh ├── start.sh ├── xtra ├── command-notes.sh ├── k9s │ ├── macos │ │ └── plugin.yml │ ├── plugin.yml │ └── vsc └── policy-smash.sh └── zork.sh /.gitignore: -------------------------------------------------------------------------------- 1 | **/*.pyc 2 | **/.terraform/* 3 | *.audit 4 | *.bak* 5 | *.crt 6 | *.csr 7 | *.csv 8 | *.debug 9 | *.err 10 | *.key 11 | *.log 12 | *.out 13 | *.p7b 14 | *.swp 15 | *.tfstate 16 | *.tfstate.* 17 | *.xlsx 18 | *.zip 19 | *_override.tf 20 | *_override.tf.json 21 | *hclic 22 | *pass 23 | *passwd 24 | *password 25 | *snap 26 | *tfplan* 27 | *tmp* 28 | *.token 29 | .init.json 30 | .init.out 31 | .init.txt 32 | .terraform.lock.hcl 33 | .terraform.lock.hcl 34 | .terraformrc 35 | __pycache__ 36 | logs/* 37 | audit 38 | init 39 | init.json 40 | init.out 41 | init.txt 42 | license* 43 | override.tf 44 | override.tf.json 45 | secret.tfvars 46 | terraform.rc 47 | tf.plan.out 48 | tokens/* 49 | nohup.out -------------------------------------------------------------------------------- /docker-configs/acl/cross-namespace-discovery.hcl: -------------------------------------------------------------------------------- 1 | namespace_prefix "" { 2 | service_prefix "" { 3 | policy = "read" 4 | } 5 | node_prefix "" { 6 | policy = "read" 7 | } 8 | } -------------------------------------------------------------------------------- /docker-configs/acl/dc1-mgw.hcl: -------------------------------------------------------------------------------- 1 | partition_prefix "" { 2 | peering = "read" 3 | } 4 | 5 | # What else do we need here? -------------------------------------------------------------------------------- /docker-configs/acl/dc1-read.hcl: -------------------------------------------------------------------------------- 1 | operator = "read" 2 | 3 | namespace_prefix "" { 4 | service_prefix "" { 5 | policy = "read" 6 | intentions = "read" 7 | } 8 | node_prefix "" { 9 | policy = "read" 10 | } 11 | } 12 | 13 | 14 | -------------------------------------------------------------------------------- /docker-configs/acl/dc1-unicorn-frontend.hcl: -------------------------------------------------------------------------------- 1 | // service "whateverIwant" { 2 | // policy = "write" 3 | // } 4 | 5 | namespace_prefix "" { 6 | service_prefix "" { 7 | policy = "read" 8 | } 9 | node_prefix "" { 10 | policy = "read" 11 | } 12 | } 13 | 14 | namespace "frontend" { 15 | service_prefix "unicorn-frontend"{ 16 | policy = "write" 17 | } 18 | } -------------------------------------------------------------------------------- /docker-configs/acl/dc2-mgw.hcl: -------------------------------------------------------------------------------- 1 | partition_prefix "" { 2 | peering = "read" 3 | } 4 | 5 | # What else do we need here? -------------------------------------------------------------------------------- /docker-configs/acl/dns-discovery.hcl: -------------------------------------------------------------------------------- 1 | partition_prefix "" { 2 | namespace_prefix "" { 3 | service_prefix "" { 4 | policy = "read" 5 | } 6 | node_prefix "" { 7 | policy = "read" 8 | } 9 | } 10 | } 11 | 12 | 13 | -------------------------------------------------------------------------------- /docker-configs/acl/god-mode.hcl: -------------------------------------------------------------------------------- 1 | acl = "write" 2 | agent_prefix "" { 3 | policy = "write" 4 | } 5 | event_prefix "" { 6 | policy = "write" 7 | } 8 | key_prefix "" { 9 | policy = "write" 10 | } 11 | keyring = "write" 12 | node_prefix "" { 13 | policy = "write" 14 | } 15 | operator = "write" 16 | mesh = "write" 17 | peering = "write" 18 | query_prefix "" { 19 | policy = "write" 20 | } 21 | service_prefix "" { 22 | policy = "write" 23 | intentions = "write" 24 | } 25 | session_prefix "" { 26 | policy = "write" 27 | } 28 | partition_prefix "" { 29 | mesh = "write" 30 | peering = "write" 31 | namespace "default" { 32 | node_prefix "" { 33 | policy = "write" 34 | } 35 | agent_prefix "" { 36 | policy = "write" 37 | } 38 | } 39 | namespace_prefix "" { 40 | acl = "write" 41 | key_prefix "" { 42 | policy = "write" 43 | } 44 | node_prefix "" { 45 | # node policy is restricted to read within a namespace 46 | policy = "read" 47 | } 48 | session_prefix "" { 49 | policy = "write" 50 | } 51 | service_prefix "" { 52 | policy = "write" 53 | intentions = "write" 54 | } 55 | } 56 | } -------------------------------------------------------------------------------- /docker-configs/acl/team-proj1-rw.hcl: -------------------------------------------------------------------------------- 1 | partition "default" { 2 | operator = "read" 3 | } 4 | 5 | partition "proj1" { 6 | namespace_prefix "" { 7 | service_prefix "" { 8 | policy = "write" 9 | intentions = "write" 10 | } 11 | node_prefix "" { 12 | policy = "read" 13 | } 14 | } 15 | } 16 | 17 | 18 | -------------------------------------------------------------------------------- /docker-configs/agent-configs/client-dc1-alpha.hcl: -------------------------------------------------------------------------------- 1 | node_name = "client-dc1-alpha" 2 | datacenter = "dc1" 3 | partition = "default" 4 | 5 | data_dir = "/consul/data" 6 | log_level = "INFO" 7 | retry_join = ["consul-server1-dc1"] 8 | 9 | encrypt = "aPuGh+5UDskRAbkLaXRzFoSOcSM+5vAK+NEYOWHJH7w=" 10 | 11 | acl { 12 | enabled = true 13 | tokens { 14 | agent = "00000000-0000-0000-0000-000000001111" 15 | default = "00000000-0000-0000-0000-000000001111" 16 | } 17 | } 18 | 19 | auto_encrypt = { 20 | tls = true 21 | } 22 | 23 | tls { 24 | defaults { 25 | ca_file = "/consul/config/certs/consul-agent-ca.pem" 26 | 27 | verify_incoming = true 28 | verify_outgoing = true 29 | } 30 | internal_rpc { 31 | verify_server_hostname = true 32 | } 33 | } 34 | 35 | auto_reload_config = true -------------------------------------------------------------------------------- /docker-configs/agent-configs/client-dc1-charlie-ap1.hcl: -------------------------------------------------------------------------------- 1 | node_name = "client-dc1-charlie-ap1" 2 | datacenter = "dc1" 3 | partition = "donkey" 4 | 5 | data_dir = "/consul/data" 6 | log_level = "INFO" 7 | retry_join = ["consul-server1-dc1"] 8 | 9 | 10 | encrypt = "aPuGh+5UDskRAbkLaXRzFoSOcSM+5vAK+NEYOWHJH7w=" 11 | 12 | acl { 13 | enabled = true 14 | tokens { 15 | agent = "root" 16 | default = "root" 17 | } 18 | } 19 | 20 | auto_encrypt = { 21 | tls = true 22 | } 23 | 24 | tls { 25 | defaults { 26 | ca_file = "/consul/config/certs/consul-agent-ca.pem" 27 | 28 | verify_incoming = true 29 | verify_outgoing = true 30 | } 31 | internal_rpc { 32 | verify_server_hostname = true 33 | } 34 | } 35 | 36 | auto_reload_config = true -------------------------------------------------------------------------------- /docker-configs/agent-configs/client-dc1-echo-proj1.hcl: -------------------------------------------------------------------------------- 1 | node_name = "client-dc1-echo-proj1" 2 | datacenter = "dc1" 3 | partition = "proj1" 4 | 5 | data_dir = "/consul/data" 6 | log_level = "INFO" 7 | retry_join = ["consul-server1-dc1"] 8 | 9 | encrypt = "aPuGh+5UDskRAbkLaXRzFoSOcSM+5vAK+NEYOWHJH7w=" 10 | 11 | acl { 12 | enabled = true 13 | tokens { 14 | agent = "root" 15 | default = "root" 16 | } 17 | } 18 | 19 | auto_encrypt = { 20 | tls = true 21 | } 22 | 23 | tls { 24 | defaults { 25 | ca_file = "/consul/config/certs/consul-agent-ca.pem" 26 | 27 | verify_incoming = true 28 | verify_outgoing = true 29 | } 30 | internal_rpc { 31 | verify_server_hostname = true 32 | } 33 | } 34 | 35 | auto_reload_config = true -------------------------------------------------------------------------------- /docker-configs/agent-configs/client-dc1-unicorn.hcl: -------------------------------------------------------------------------------- 1 | node_name = "client-dc1-unicorn" 2 | datacenter = "dc1" 3 | partition = "unicorn" 4 | 5 | data_dir = "/consul/data" 6 | log_level = "INFO" 7 | retry_join = ["consul-server1-dc1"] 8 | 9 | addresses = { 10 | grpc = "0.0.0.0" 11 | http = "0.0.0.0" 12 | } 13 | 14 | ports = { 15 | grpc = 8502 16 | } 17 | 18 | encrypt = "aPuGh+5UDskRAbkLaXRzFoSOcSM+5vAK+NEYOWHJH7w=" 19 | 20 | acl { 21 | enabled = true 22 | tokens { 23 | agent = "root" 24 | default = "root" 25 | } 26 | } 27 | 28 | auto_encrypt = { 29 | tls = true 30 | } 31 | 32 | tls { 33 | defaults { 34 | ca_file = "/consul/config/certs/consul-agent-ca.pem" 35 | 36 | verify_incoming = true 37 | verify_outgoing = true 38 | } 39 | internal_rpc { 40 | verify_server_hostname = true 41 | } 42 | } 43 | 44 | auto_reload_config = true -------------------------------------------------------------------------------- /docker-configs/agent-configs/client-dc2-bravo.hcl: -------------------------------------------------------------------------------- 1 | node_name = "client-dc2-bravo" 2 | datacenter = "dc2" 3 | partition = "default" 4 | 5 | data_dir = "/consul/data" 6 | log_level = "INFO" 7 | retry_join = ["consul-server1-dc2"] 8 | 9 | encrypt = "dznVKWl1ri975FUJiddzAPM+3eNP9iXDad2c8hghsKA=" 10 | 11 | acl { 12 | enabled = true 13 | tokens { 14 | agent = "root" 15 | default = "root" 16 | } 17 | } 18 | 19 | auto_encrypt = { 20 | tls = true 21 | } 22 | 23 | tls { 24 | defaults { 25 | ca_file = "/consul/config/certs/consul-agent-ca.pem" 26 | 27 | verify_incoming = true 28 | verify_outgoing = true 29 | } 30 | internal_rpc { 31 | verify_server_hostname = true 32 | } 33 | } 34 | 35 | auto_reload_config = true -------------------------------------------------------------------------------- /docker-configs/agent-configs/client-dc2-foxtrot.hcl: -------------------------------------------------------------------------------- 1 | node_name = "client-dc2-foxtrot" 2 | datacenter = "dc2" 3 | partition = "chunky" 4 | 5 | data_dir = "/consul/data" 6 | log_level = "INFO" 7 | retry_join = ["consul-server1-dc2"] 8 | 9 | addresses = { 10 | grpc = "0.0.0.0" 11 | http = "0.0.0.0" 12 | } 13 | 14 | ports = { 15 | grpc = 8502 16 | } 17 | 18 | encrypt = "dznVKWl1ri975FUJiddzAPM+3eNP9iXDad2c8hghsKA=" 19 | 20 | acl { 21 | enabled = true 22 | tokens { 23 | agent = "root" 24 | default = "root" 25 | } 26 | } 27 | 28 | auto_encrypt = { 29 | tls = true 30 | } 31 | 32 | tls { 33 | defaults { 34 | ca_file = "/consul/config/certs/consul-agent-ca.pem" 35 | 36 | verify_incoming = true 37 | verify_outgoing = true 38 | } 39 | internal_rpc { 40 | verify_server_hostname = true 41 | } 42 | } 43 | 44 | auto_reload_config = true -------------------------------------------------------------------------------- /docker-configs/agent-configs/client-dc2-unicorn.hcl: -------------------------------------------------------------------------------- 1 | node_name = "client-dc2-unicorn" 2 | datacenter = "dc2" 3 | partition = "unicorn" 4 | 5 | data_dir = "/consul/data" 6 | log_level = "INFO" 7 | retry_join = ["consul-server1-dc2"] 8 | 9 | addresses = { 10 | grpc = "0.0.0.0" 11 | http = "0.0.0.0" 12 | } 13 | 14 | ports = { 15 | grpc = 8502 16 | } 17 | 18 | encrypt = "dznVKWl1ri975FUJiddzAPM+3eNP9iXDad2c8hghsKA=" 19 | 20 | acl { 21 | enabled = true 22 | tokens { 23 | agent = "root" 24 | default = "root" 25 | } 26 | } 27 | 28 | auto_encrypt = { 29 | tls = true 30 | } 31 | 32 | tls { 33 | defaults { 34 | ca_file = "/consul/config/certs/consul-agent-ca.pem" 35 | 36 | verify_incoming = true 37 | verify_outgoing = true 38 | } 39 | internal_rpc { 40 | verify_server_hostname = true 41 | } 42 | } 43 | 44 | auto_reload_config = true -------------------------------------------------------------------------------- /docker-configs/agent-configs/server1-dc1.hcl: -------------------------------------------------------------------------------- 1 | node_name = "consul-server1-dc1" 2 | datacenter = "dc1" 3 | server = true 4 | license_path = "/consul/config/license" 5 | 6 | log_level = "INFO" 7 | 8 | peering { enabled = true } 9 | 10 | ui_config = { 11 | enabled = true 12 | 13 | metrics_provider = "prometheus" 14 | metrics_proxy = { 15 | base_url = "http://10.5.0.200:9090" 16 | } 17 | } 18 | 19 | data_dir = "/consul/data" 20 | 21 | addresses = { 22 | http = "0.0.0.0" 23 | grpc = "0.0.0.0" 24 | grpc_tls = "0.0.0.0" 25 | } 26 | 27 | advertise_addr = "10.5.0.2" 28 | advertise_addr_wan = "192.169.7.2" 29 | 30 | ports = { 31 | grpc = 8502 32 | grpc_tls = 8503 33 | dns = 53 34 | } 35 | 36 | acl { 37 | enabled = true 38 | default_policy = "deny" 39 | down_policy = "extend-cache" 40 | enable_token_persistence = true 41 | 42 | tokens { 43 | initial_management = "root" 44 | agent = "root" 45 | default = "" 46 | } 47 | } 48 | 49 | auto_encrypt = { 50 | allow_tls = true 51 | } 52 | 53 | encrypt = "aPuGh+5UDskRAbkLaXRzFoSOcSM+5vAK+NEYOWHJH7w=" 54 | 55 | tls { 56 | defaults { 57 | ca_file = "/consul/config/certs/consul-agent-ca.pem" 58 | cert_file = "/consul/config/certs/dc1-server-consul-0.pem" 59 | key_file = "/consul/config/certs/dc1-server-consul-0-key.pem" 60 | 61 | verify_incoming = true 62 | verify_outgoing = true 63 | } 64 | internal_rpc { 65 | verify_server_hostname = true 66 | } 67 | } 68 | 69 | auto_reload_config = true -------------------------------------------------------------------------------- /docker-configs/agent-configs/server1-dc2.hcl: -------------------------------------------------------------------------------- 1 | node_name = "consul-server1-dc2" 2 | datacenter = "dc2" 3 | server = true 4 | license_path = "/consul/config/license" 5 | 6 | log_level = "INFO" 7 | 8 | peering { enabled = true } 9 | 10 | ui_config = { 11 | enabled = true 12 | 13 | metrics_provider = "prometheus" 14 | metrics_proxy = { 15 | base_url = "http://10.6.0.200:9090" 16 | } 17 | } 18 | 19 | data_dir = "/consul/data" 20 | 21 | addresses = { 22 | http = "0.0.0.0" 23 | grpc = "0.0.0.0" 24 | grpc_tls = "0.0.0.0" 25 | } 26 | 27 | ports = { 28 | grpc = 8502 29 | grpc_tls = 8503 30 | } 31 | 32 | acl { 33 | enabled = true 34 | default_policy = "deny" 35 | down_policy = "extend-cache" 36 | enable_token_persistence = true 37 | 38 | tokens { 39 | initial_management = "root" 40 | agent = "root" 41 | default = "" 42 | } 43 | } 44 | 45 | auto_encrypt = { 46 | allow_tls = true 47 | } 48 | 49 | encrypt = "dznVKWl1ri975FUJiddzAPM+3eNP9iXDad2c8hghsKA=" 50 | 51 | tls { 52 | defaults { 53 | ca_file = "/consul/config/certs/consul-agent-ca.pem" 54 | cert_file = "/consul/config/certs/dc2-server-consul-0.pem" 55 | key_file = "/consul/config/certs/dc2-server-consul-0-key.pem" 56 | 57 | verify_incoming = true 58 | verify_outgoing = true 59 | } 60 | internal_rpc { 61 | verify_server_hostname = true 62 | } 63 | } 64 | 65 | auto_reload_config = true -------------------------------------------------------------------------------- /docker-configs/auth/oidc-auth.json: -------------------------------------------------------------------------------- 1 | { 2 | "OIDCDiscoveryURL": "https://dev-kua61hs5.us.auth0.com/", 3 | "OIDCClientID": "0qG6RUtcamK5Xsa6sBk81PdsEqblWh0T", 4 | "OIDCClientSecret": "OxjoeYLWTi7BtJI88P56iZN0q3Gflt4TnfFYhKry0t20jskuKDPlz1U5Zd_qYPRb", 5 | "BoundAudiences": ["0qG6RUtcamK5Xsa6sBk81PdsEqblWh0T"], 6 | "AllowedRedirectURIs": [ 7 | "http://127.0.0.1:8550/oidc/callback", 8 | "http://127.0.0.1:8500/ui/oidc/callback", 9 | "http://localhost:8550/oidc/callback", 10 | "http://localhost:8500/ui/oidc/callback" 11 | ], 12 | "ListClaimMappings": { 13 | "http://consul.internal/groups": "groups" 14 | } 15 | } -------------------------------------------------------------------------------- /docker-configs/certs/consul-agent-ca-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MHcCAQEEIL8vRpKNBEzesZjvgR5fsJCYlwEtGBOz70Ticw82Ru3KoAoGCCqGSM49 3 | AwEHoUQDQgAEKb5YX4YrXW/hmBLNSWJAK7Apo0Q4bIQdabpV8uASDiDstDSvOGsn 4 | lyGErffSj1KF3+JCRVXQLgrAnr8tyjR9RQ== 5 | -----END EC PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /docker-configs/certs/consul-agent-ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIC7jCCApSgAwIBAgIRAPlMfsWwIQM20dZg/DzSdwAwCgYIKoZIzj0EAwIwgbkx 3 | CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj 4 | bzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw 5 | FQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB 6 | IDMzMTM3NDk1Njc1Nzc2MDA0NjEzMjMxMjIyMTg5NTYzNjQ0OTAyNDAeFw0yMjA5 7 | MTIyMzE3MDFaFw0zMjA5MDkyMzE3MDFaMIG5MQswCQYDVQQGEwJVUzELMAkGA1UE 8 | CBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xGjAYBgNVBAkTETEwMSBTZWNv 9 | bmQgU3RyZWV0MQ4wDAYDVQQREwU5NDEwNTEXMBUGA1UEChMOSGFzaGlDb3JwIElu 10 | Yy4xQDA+BgNVBAMTN0NvbnN1bCBBZ2VudCBDQSAzMzEzNzQ5NTY3NTc3NjAwNDYx 11 | MzIzMTIyMjE4OTU2MzY0NDkwMjQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQp 12 | vlhfhitdb+GYEs1JYkArsCmjRDhshB1pulXy4BIOIOy0NK84ayeXIYSt99KPUoXf 13 | 4kJFVdAuCsCevy3KNH1Fo3sweTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUw 14 | AwEB/zApBgNVHQ4EIgQgaJlYercDWcegi+9uaRsnC3Qgpk+OsHCE7jHmt+3Lu+Qw 15 | KwYDVR0jBCQwIoAgaJlYercDWcegi+9uaRsnC3Qgpk+OsHCE7jHmt+3Lu+QwCgYI 16 | KoZIzj0EAwIDSAAwRQIhALeH4wt6wsonwZkyPD0retz6UxPWj86n5wpnzDo6RGO3 17 | AiBHUgLCm2FT/nxzEXilY/4iInU5TepxKDqfhxXvnDvqIw== 18 | -----END CERTIFICATE----- 19 | -------------------------------------------------------------------------------- /docker-configs/certs/dc1-server-consul-0-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MHcCAQEEIF9fjcTMGrgStKxNIDyf8eaGf1XaPm04ly73mEbgOUlmoAoGCCqGSM49 3 | AwEHoUQDQgAE8SRt71Gog6Gkk0pO8EGsBQYl/+yhiIU1R6ihjalB76ShA3C4AW2a 4 | xf1wnHBsR9bF0Tn9tR5s1foNYsVKXLneXw== 5 | -----END EC PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /docker-configs/certs/dc1-server-consul-0.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICsjCCAlegAwIBAgIRAI7LaBzwSEf3myBfGKPchuYwCgYIKoZIzj0EAwIwgbkx 3 | CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj 4 | bzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw 5 | FQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB 6 | IDMzMTM3NDk1Njc1Nzc2MDA0NjEzMjMxMjIyMTg5NTYzNjQ0OTAyNDAeFw0yMjA5 7 | MTIyMzIxMjVaFw0yNzA5MTEyMzIxMjVaMBwxGjAYBgNVBAMTEXNlcnZlci5kYzEu 8 | Y29uc3VsMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8SRt71Gog6Gkk0pO8EGs 9 | BQYl/+yhiIU1R6ihjalB76ShA3C4AW2axf1wnHBsR9bF0Tn9tR5s1foNYsVKXLne 10 | X6OB2zCB2DAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG 11 | AQUFBwMCMAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEII1u2V/iyc0NpMLvZK/fxy4n 12 | asL6QOhlzuBijcO9vU4oMCsGA1UdIwQkMCKAIGiZWHq3A1nHoIvvbmkbJwt0IKZP 13 | jrBwhO4x5rfty7vkMEEGA1UdEQQ6MDiCEmNvbnN1bC1zZXJ2ZXIxLWRjMYIRc2Vy 14 | dmVyLmRjMS5jb25zdWyCCWxvY2FsaG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBG 15 | AiEAmnGjiqGjQ4Pika57aUpfq+fMHocMfKYwx0bRKQzie6sCIQDQIoliEdThT8hi 16 | c6tx9u1rISlohyABRbZCFG7zul8NvQ== 17 | -----END CERTIFICATE----- 18 | -------------------------------------------------------------------------------- /docker-configs/certs/dc2-server-consul-0-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MHcCAQEEIF+RsjcrSXYJZigLdusCWtXUnCG426AErtoGgCQjoTA+oAoGCCqGSM49 3 | AwEHoUQDQgAEGuOKa6ENM9EE+ny9ksX7icbHaLyZDhRUvQTd7MAMhNKIYlugGcD9 4 | YY5T5AvfLb3jipQmCHFXN7pZC42gXHkx0Q== 5 | -----END EC PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /docker-configs/certs/dc2-server-consul-0.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICsTCCAlegAwIBAgIRAN6mncHQu0JSUS9XeAiVpFIwCgYIKoZIzj0EAwIwgbkx 3 | CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj 4 | bzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw 5 | FQYDVQQKEw5IYXNoaUNvcnAgSW5jLjFAMD4GA1UEAxM3Q29uc3VsIEFnZW50IENB 6 | IDMzMTM3NDk1Njc1Nzc2MDA0NjEzMjMxMjIyMTg5NTYzNjQ0OTAyNDAeFw0yMjA5 7 | MTIyMzQyMDhaFw0yNzA5MTEyMzQyMDhaMBwxGjAYBgNVBAMTEXNlcnZlci5kYzIu 8 | Y29uc3VsMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGuOKa6ENM9EE+ny9ksX7 9 | icbHaLyZDhRUvQTd7MAMhNKIYlugGcD9YY5T5AvfLb3jipQmCHFXN7pZC42gXHkx 10 | 0aOB2zCB2DAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG 11 | AQUFBwMCMAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEIHOzbQsqeX2iRfi4HaHf8syU 12 | EDtAIUoHmZFa0Tw148/iMCsGA1UdIwQkMCKAIGiZWHq3A1nHoIvvbmkbJwt0IKZP 13 | jrBwhO4x5rfty7vkMEEGA1UdEQQ6MDiCEmNvbnN1bC1zZXJ2ZXIxLWRjMoIRc2Vy 14 | dmVyLmRjMi5jb25zdWyCCWxvY2FsaG9zdIcEfwAAATAKBggqhkjOPQQDAgNIADBF 15 | AiBSKU1/JngYjDzrCq2REwEMuKvPlMVm6tWbL5eSJfF0rAIhAMlypfSs2rZOaADN 16 | xqXWsom5vKCRQDmnrZYhgTNKs4Zj 17 | -----END CERTIFICATE----- 18 | -------------------------------------------------------------------------------- /docker-configs/configs/exported-services/exported-services-dc1-default.hcl: -------------------------------------------------------------------------------- 1 | Kind = "exported-services" 2 | Partition = "default" 3 | Name = "default" 4 | Services = [ 5 | { 6 | Name = "joshs-obnoxiously-long-service-name-gonna-take-awhile" 7 | Namespace = "default" 8 | Consumers = [ 9 | { 10 | Peer = "dc2-default" 11 | }, 12 | { 13 | Peer = "dc2-heimdall" 14 | } 15 | ] 16 | }, 17 | { 18 | Name = "web-upstream" 19 | Namespace = "default" 20 | Consumers = [ 21 | { 22 | Partition = "unicorn" 23 | } 24 | ] 25 | } 26 | ] 27 | 28 | -------------------------------------------------------------------------------- /docker-configs/configs/exported-services/exported-services-dc1-unicorn.hcl: -------------------------------------------------------------------------------- 1 | Kind = "exported-services" 2 | Partition = "unicorn" 3 | Name = "unicorn" 4 | Services = [ 5 | { 6 | Name = "unicorn-backend" 7 | Namespace = "backend" 8 | Consumers = [ 9 | { 10 | Partition = "default" 11 | } 12 | ] 13 | } 14 | ] 15 | 16 | -------------------------------------------------------------------------------- /docker-configs/configs/exported-services/exported-services-dc2-chunky.hcl: -------------------------------------------------------------------------------- 1 | Kind = "exported-services" 2 | Partition = "chunky" 3 | Name = "chunky" 4 | Services = [ 5 | { 6 | Name = "web-chunky" 7 | Namespace = "default" 8 | Consumers = [ 9 | { 10 | Peer = "dc1-default" 11 | } 12 | ] 13 | } 14 | ] 15 | 16 | -------------------------------------------------------------------------------- /docker-configs/configs/exported-services/exported-services-dc2-default.hcl: -------------------------------------------------------------------------------- 1 | Kind = "exported-services" 2 | Partition = "default" 3 | Name = "default" 4 | Services = [ 5 | { 6 | Name = "josh" 7 | Namespace = "default" 8 | Consumers = [ 9 | { 10 | Peer = "dc1-default" 11 | } 12 | ] 13 | } 14 | ] -------------------------------------------------------------------------------- /docker-configs/configs/exported-services/exported-services-dc2-unicorn_backend.hcl: -------------------------------------------------------------------------------- 1 | Kind = "exported-services" 2 | Partition = "unicorn" 3 | Name = "unicorn" 4 | Services = [ 5 | { 6 | Name = "unicorn-backend" 7 | Namespace = "backend" 8 | Consumers = [ 9 | { 10 | Peer = "dc1-unicorn" 11 | } 12 | ] 13 | } 14 | ] 15 | 16 | -------------------------------------------------------------------------------- /docker-configs/configs/exported-services/exported-services-donkey.hcl: -------------------------------------------------------------------------------- 1 | Kind = "exported-services" 2 | Partition = "donkey" 3 | Name = "donkey" 4 | Services = [ 5 | { 6 | Name = "donkey" 7 | Namespace = "default" 8 | Consumers = [ 9 | { 10 | Partition = "default" 11 | } 12 | ] 13 | } 14 | ] -------------------------------------------------------------------------------- /docker-configs/configs/intentions/dc1-unicorn_backend_failover-allow.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-intentions" 2 | Name = "unicorn-backend-failover" 3 | partition = "unicorn" 4 | namespace = "frontend" 5 | Sources = [ 6 | { 7 | Name = "unicorn-frontend" 8 | partition = "unicorn" 9 | namespace = "frontend" 10 | Action = "allow" 11 | } 12 | ] 13 | -------------------------------------------------------------------------------- /docker-configs/configs/intentions/dc1-unicorn_frontend-allow.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-intentions" 2 | Name = "unicorn-backend" 3 | partition = "unicorn" 4 | namespace = "backend" 5 | Sources = [ 6 | { 7 | Name = "unicorn-frontend" 8 | partition = "unicorn" 9 | namespace = "frontend" 10 | Action = "allow" 11 | } 12 | ] 13 | -------------------------------------------------------------------------------- /docker-configs/configs/intentions/dc2-unicorn_frontend-allow.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-intentions" 2 | Name = "unicorn-backend" 3 | partition = "unicorn" 4 | namespace = "backend" 5 | Sources = [ 6 | { 7 | Name = "unicorn-frontend" 8 | namespace = "frontend" 9 | peer = "dc1-unicorn" 10 | Action = "allow" 11 | }, 12 | ] 13 | 14 | -------------------------------------------------------------------------------- /docker-configs/configs/intentions/web_chunky-allow.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-intentions" 2 | Name = "web-chunky" 3 | partition = "chunky" 4 | Sources = [ 5 | { 6 | Name = "web" 7 | peer = "dc1-default" 8 | Action = "allow" 9 | } 10 | ] 11 | 12 | -------------------------------------------------------------------------------- /docker-configs/configs/intentions/web_upstream-allow.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-intentions" 2 | Name = "web-upstream" 3 | Sources = [ 4 | { 5 | Name = "web" 6 | Action = "allow" 7 | }, 8 | { 9 | Name = "unicorn-frontend" 10 | namespace = "frontend" 11 | partition = "unicorn" 12 | Action = "allow" 13 | } 14 | ] -------------------------------------------------------------------------------- /docker-configs/configs/mgw/dc1-mgw.hcl: -------------------------------------------------------------------------------- 1 | Kind = "mesh" 2 | Peering { 3 | PeerThroughMeshGateways = true 4 | } 5 | -------------------------------------------------------------------------------- /docker-configs/configs/mgw/dc2-mgw.hcl: -------------------------------------------------------------------------------- 1 | Kind = "mesh" 2 | Peering { 3 | PeerThroughMeshGateways = true 4 | } -------------------------------------------------------------------------------- /docker-configs/configs/prepared_queries/pq-template.json: -------------------------------------------------------------------------------- 1 | { 2 | "Template": { 3 | "Type": "name_prefix_match", 4 | "Regexp": "^geo-db-(.*?)-([^\\-]+?)$", 5 | "RemoveEmptyTags": false 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /docker-configs/configs/prepared_queries/pq-unicorn-sg.json: -------------------------------------------------------------------------------- 1 | { 2 | "Name": "unicorn-sameness", 3 | "Service": { 4 | "Service": "unicorn-backend", 5 | "namespace": "backend", 6 | "partition": "unicorn", 7 | "SamenessGroup": "unicorn" 8 | } 9 | } -------------------------------------------------------------------------------- /docker-configs/configs/prepared_queries/pq-unicorn-targets.json: -------------------------------------------------------------------------------- 1 | { 2 | "Name": "unicorn-targets", 3 | "Service": { 4 | "Service": "unicorn-backend", 5 | "namespace": "backend", 6 | "partition": "unicorn", 7 | "Failover": { 8 | "Targets": [ 9 | { 10 | "Peer": "dc2-unicorn", 11 | "Namespace": "backend", 12 | "Partition": "unicorn" 13 | }, 14 | { 15 | "Peer": "dc3-default", 16 | "Namespace": "unicorn", 17 | "Partition": "unicorn" 18 | }, 19 | { 20 | "Peer": "dc3-cernunnos", 21 | "Namespace": "unicorn", 22 | "Partition": "unicorn" 23 | }, 24 | { 25 | "Peer": "dc4-default", 26 | "Namespace": "unicorn", 27 | "Partition": "unicorn" 28 | }, 29 | { 30 | "Peer": "dc4-taranis", 31 | "Namespace": "unicorn", 32 | "Partition": "unicorn" 33 | } 34 | ] 35 | } 36 | } 37 | } 38 | 39 | 40 | -------------------------------------------------------------------------------- /docker-configs/configs/prepared_queries/pq-web-chunky-peer.json: -------------------------------------------------------------------------------- 1 | { 2 | "Name": "web-chunky-peer", 3 | "Service": { 4 | "Service": "web-chunky", 5 | "Namespace": "default", 6 | "Peer": "dc2-chunky" 7 | } 8 | } -------------------------------------------------------------------------------- /docker-configs/configs/prepared_queries/pq-web-chunky-sg.json: -------------------------------------------------------------------------------- 1 | { 2 | "Name": "web-chunky-sg", 3 | "Service": { 4 | "Service": "web-chunky", 5 | "Namespace": "default", 6 | "SamenessGroup": "web" 7 | } 8 | } -------------------------------------------------------------------------------- /docker-configs/configs/proxy-defaults/dc1-default-proxydefaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "proxy-defaults" 2 | Name = "global" 3 | Partition = "default" 4 | 5 | Config { 6 | envoy_prometheus_bind_addr = "0.0.0.0:9102" 7 | protocol = "http" 8 | } -------------------------------------------------------------------------------- /docker-configs/configs/proxy-defaults/dc1-unicorn-proxydefaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "proxy-defaults" 2 | Name = "global" 3 | Partition = "unicorn" 4 | 5 | Config { 6 | envoy_prometheus_bind_addr = "0.0.0.0:9102" 7 | protocol = "http" 8 | } -------------------------------------------------------------------------------- /docker-configs/configs/proxy-defaults/dc2-chunky-proxydefaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "proxy-defaults" 2 | Name = "global" 3 | Partition = "chunky" 4 | 5 | Config { 6 | envoy_prometheus_bind_addr = "0.0.0.0:9102" 7 | protocol = "http" 8 | } -------------------------------------------------------------------------------- /docker-configs/configs/proxy-defaults/dc2-default-proxydefaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "proxy-defaults" 2 | Name = "global" 3 | Partition = "default" 4 | 5 | Config { 6 | envoy_prometheus_bind_addr = "0.0.0.0:9102" 7 | protocol = "http" 8 | } -------------------------------------------------------------------------------- /docker-configs/configs/proxy-defaults/dc2-unicorn-proxydefaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "proxy-defaults" 2 | Name = "global" 3 | Partition = "unicorn" 4 | 5 | Config { 6 | envoy_prometheus_bind_addr = "0.0.0.0:9102" 7 | protocol = "http" 8 | } -------------------------------------------------------------------------------- /docker-configs/configs/sameness-groups/dc1-default-ssg-web.hcl: -------------------------------------------------------------------------------- 1 | Kind = "sameness-group" 2 | Name = "web" 3 | Partition = "default" 4 | DefaultForFailover = false 5 | Members = [ 6 | { Partition = "default" }, 7 | { Peer = "dc2-chunky" } 8 | ] -------------------------------------------------------------------------------- /docker-configs/configs/sameness-groups/dc1-unicorn-ssg-unicorn.hcl: -------------------------------------------------------------------------------- 1 | Kind = "sameness-group" 2 | Name = "unicorn" 3 | Partition = "unicorn" 4 | DefaultForFailover = false 5 | Members = [ 6 | { Partition = "unicorn" }, 7 | { Peer = "dc2-unicorn" }, 8 | { Peer = "dc3-default" }, 9 | { Peer = "dc3-cernunnos" }, 10 | { Peer = "dc4-default" }, 11 | { Peer = "dc4-taranis" } 12 | ] -------------------------------------------------------------------------------- /docker-configs/configs/service-defaults/unicorn-backend-defaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-defaults" 2 | Name = "unicorn-backend" 3 | Partition = "unicorn" 4 | Namespace = "backend" 5 | 6 | Protocol = "tcp" 7 | 8 | MeshGateway = { 9 | Mode = "local" 10 | } 11 | -------------------------------------------------------------------------------- /docker-configs/configs/service-defaults/unicorn-frontend-defaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-defaults" 2 | Name = "unicorn-frontend" 3 | Partition = "unicorn" 4 | Namespace = "frontend" 5 | 6 | Protocol = "tcp" 7 | 8 | MeshGateway = { 9 | Mode = "local" 10 | } 11 | -------------------------------------------------------------------------------- /docker-configs/configs/service-defaults/web-chunky-defaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-defaults" 2 | Name = "web-chunky" 3 | Partition = "chunky" 4 | 5 | Protocol = "http" 6 | 7 | MeshGateway = { 8 | Mode = "local" 9 | } 10 | -------------------------------------------------------------------------------- /docker-configs/configs/service-defaults/web-defaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-defaults" 2 | Name = "web" 3 | 4 | Protocol = "http" 5 | 6 | MeshGateway = { 7 | Mode = "local" 8 | } 9 | -------------------------------------------------------------------------------- /docker-configs/configs/service-defaults/web-upstream-defaults.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-defaults" 2 | Name = "web-upstream" 3 | 4 | Protocol = "http" 5 | 6 | MeshGateway = { 7 | mode = "local" 8 | } 9 | -------------------------------------------------------------------------------- /docker-configs/configs/service-resolver/dc1-unicorn-backend-failover.hcl: -------------------------------------------------------------------------------- 1 | Kind = "service-resolver" 2 | Name = "unicorn-backend" 3 | Partition = "unicorn" 4 | Namespace = "backend" 5 | 6 | ConnectTimeout = "0s" 7 | 8 | Failover = { 9 | "*" = { 10 | Targets = [ 11 | { 12 | Service = "unicorn-backend", 13 | Peer = "dc2-unicorn", 14 | Namespace = "backend" 15 | }, 16 | { 17 | Service = "unicorn-backend", 18 | Peer = "dc3-default", 19 | Namespace = "unicorn" 20 | } 21 | ] 22 | } 23 | } 24 | 25 | -------------------------------------------------------------------------------- /docker-configs/configs/services/FakeService-descriptions.txt: -------------------------------------------------------------------------------- 1 | https://www.html-code-generator.com/html/html-online-editor 2 | 3 | Replace " w/ " before putting into docker-compose variable. The editor above should automagically do it. 4 | 5 | Frontend-unicorn (DC1) 6 | ------------------------------------------ 7 |

The Unicorn Application

8 | 9 |

This Unicorn-frontend application has 3 configured upstream services. Left to right:

10 | 11 |

unicorn-backend (DC1)
12 | This is actually a service-resolver which is named the same as the real service DC1/unicorn/backend/unicorn-backend.
13 | It utilizes failover targets in the following order:

14 | 15 | 20 | 21 |

To quickly nuke containers and observe the failover in real-time, use the zork.sh script option "3) Unicorn Demo".

22 | 23 |

unicorn-backend (DC2) - Static
24 | This is the static version of the "(peer) dc2-unicorn/backend/unicorn-backend" that is used in the service-resolver in the first upstream above. 

25 | 26 |

It is handy to have duplicated explicit upstream defined, so we can better troubleshoot between issues with peering connections vs service-resolvers.

27 | 28 |

unicorn-backend (DC3) - Static
29 | This is the static version of the "(peer) dc3-unicorn/unicorn/unicorn-backend" that is used in the service-resolver above.

30 | 31 |

 

32 | ------------------------------------------ -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-josh-long.hcl: -------------------------------------------------------------------------------- 1 | services { 2 | id = "josh-long-1" 3 | name = "joshs-obnoxiously-long-service-name-gonna-take-awhile-and-i-wonder-how-far-we-can-go-before-something-breaks-hrm" 4 | address = "10.0.0.1" 5 | port = 6000 6 | } 7 | services { 8 | id = "josh-long-2" 9 | name = "joshs-obnoxiously-long-service-name-gonna-take-awhile-and-i-wonder-how-far-we-can-go-before-something-breaks-hrm" 10 | address = "10.0.0.2" 11 | port = 6000 12 | } 13 | services { 14 | id = "josh-long-3" 15 | name = "joshs-obnoxiously-long-service-name-gonna-take-awhile-and-i-wonder-how-far-we-can-go-before-something-breaks-hrm" 16 | address = "10.0.0.3" 17 | port = 6000 18 | } 19 | services { 20 | id = "josh-long-4" 21 | name = "joshs-obnoxiously-long-service-name-gonna-take-awhile-and-i-wonder-how-far-we-can-go-before-something-breaks-hrm" 22 | address = "10.0.0.4" 23 | port = 6000 24 | } 25 | services { 26 | id = "josh-long-5" 27 | name = "joshs-obnoxiously-long-service-name-gonna-take-awhile-and-i-wonder-how-far-we-can-go-before-something-breaks-hrm" 28 | address = "10.0.0.5" 29 | port = 6000 30 | } 31 | services { 32 | id = "josh-long-6" 33 | name = "joshs-obnoxiously-long-service-name-gonna-take-awhile-and-i-wonder-how-far-we-can-go-before-something-breaks-hrm" 34 | address = "10.0.0.6" 35 | port = 6000 36 | } 37 | services { 38 | id = "josh-long-7" 39 | name = "joshs-obnoxiously-long-service-name-gonna-take-awhile-and-i-wonder-how-far-we-can-go-before-something-breaks-hrm" 40 | address = "10.0.0.7" 41 | port = 6000 42 | } 43 | services { 44 | id = "josh-long-8" 45 | name = "joshs-obnoxiously-long-service-name-gonna-take-awhile-and-i-wonder-how-far-we-can-go-before-something-breaks-hrm" 46 | address = "10.0.0.8" 47 | port = 6000 48 | } -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-josh.hcl: -------------------------------------------------------------------------------- 1 | services { 2 | id = "josh-local-0" 3 | name = "josh" 4 | address = "69.0.0.0" 5 | port = 6000 6 | } 7 | services { 8 | id = "josh-local-1" 9 | name = "josh" 10 | address = "69.0.0.1" 11 | port = 6000 12 | } 13 | services { 14 | id = "josh-local-2" 15 | name = "josh" 16 | address = "69.0.0.2" 17 | port = 6000 18 | } 19 | services { 20 | id = "josh-local-3" 21 | name = "josh" 22 | address = "69.0.0.3" 23 | port = 6000 24 | } 25 | -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-proj1-baphomet-dereg.json: -------------------------------------------------------------------------------- 1 | { 2 | "Address": "notreal.local", 3 | "Node": "virtual", 4 | "partition": "proj1" 5 | } 6 | 7 | -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-proj1-baphomet.hcl: -------------------------------------------------------------------------------- 1 | services { 2 | id = "baphomet-0" 3 | name = "baphomet" 4 | address = "11.0.0.0" 5 | port = 6000 6 | partition = "proj1" 7 | } 8 | services { 9 | id = "baphomet-1" 10 | name = "baphomet" 11 | address = "11.0.0.1" 12 | port = 6000 13 | partition = "proj1" 14 | } 15 | services { 16 | id = "baphomet-2" 17 | name = "baphomet" 18 | address = "11.0.0.2" 19 | port = 6000 20 | partition = "proj1" 21 | } 22 | -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-proj1-baphomet0.json: -------------------------------------------------------------------------------- 1 | { 2 | "Address": "notreal.local", 3 | "Node": "virtual", 4 | "partition": "proj1", 5 | "Service": { 6 | "ID": "baphomet-0", 7 | "Service": "virtual-baphomet", 8 | "address": "12.0.0.1", 9 | "partition": "proj1", 10 | "Port": 80 11 | } 12 | } 13 | 14 | -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-proj1-baphomet1.json: -------------------------------------------------------------------------------- 1 | { 2 | "Address": "notreal.local", 3 | "Node": "virtual", 4 | "partition": "proj1", 5 | "Service": { 6 | "ID": "baphomet-1", 7 | "Service": "virtual-baphomet", 8 | "address": "12.0.0.2", 9 | "partition": "proj1", 10 | "Port": 80 11 | } 12 | } 13 | 14 | -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-proj1-baphomet2.json: -------------------------------------------------------------------------------- 1 | { 2 | "Address": "notreal.local", 3 | "Node": "virtual", 4 | "partition": "proj1", 5 | "Service": { 6 | "ID": "baphomet-3", 7 | "Service": "virtual-baphomet", 8 | "address": "12.0.0.3", 9 | "partition": "proj1", 10 | "Port": 80 11 | } 12 | } 13 | 14 | -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-unicorn-backend.hcl: -------------------------------------------------------------------------------- 1 | service { 2 | name = "unicorn-backend" 3 | id = "unicorn-backend-1" 4 | partition = "unicorn" 5 | namespace = "backend" 6 | address = "10.5.0.111" 7 | port = 10001 8 | 9 | connect { 10 | sidecar_service { 11 | port = 20000 12 | 13 | check { 14 | name = "Connect Envoy Sidecar" 15 | tcp = "10.5.0.111:20000" 16 | interval ="10s" 17 | } 18 | } 19 | } 20 | } -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-unicorn-frontend.hcl: -------------------------------------------------------------------------------- 1 | service { 2 | name = "unicorn-frontend" 3 | id = "unicorn-frontend-1" 4 | partition = "unicorn" 5 | namespace = "frontend" 6 | address = "10.5.0.110" 7 | port = 10000 8 | 9 | connect { 10 | sidecar_service { 11 | port = 20000 12 | 13 | check { 14 | name = "Connect Envoy Sidecar" 15 | tcp = "10.5.0.110:20000" 16 | interval ="10s" 17 | } 18 | 19 | proxy { 20 | upstreams { 21 | destination_name = "unicorn-backend" // This points to the service-resolver of the same name (SR: unicorn-backend) 22 | destination_namespace = "backend" 23 | local_bind_address = "127.0.0.1" 24 | local_bind_port = 11000 25 | } 26 | upstreams { 27 | destination_name = "unicorn-backend" 28 | destination_peer = "dc2-unicorn" 29 | destination_namespace = "backend" 30 | local_bind_address = "127.0.0.1" 31 | local_bind_port = 11001 32 | } 33 | upstreams { 34 | destination_name = "unicorn-backend" 35 | destination_peer = "dc3-default" 36 | destination_namespace = "unicorn" 37 | local_bind_address = "127.0.0.1" 38 | local_bind_port = 11002 39 | } 40 | upstreams { 41 | destination_name = "web-upstream" 42 | destination_partition = "default" 43 | destination_namespace = "default" 44 | local_bind_address = "127.0.0.1" 45 | local_bind_port = 11003 46 | } 47 | } 48 | } 49 | } 50 | } -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-web-upstream_v1.hcl: -------------------------------------------------------------------------------- 1 | service { 2 | name = "web-upstream" 3 | id = "web-upstream-v1" 4 | address = "10.5.0.101" 5 | port = 8000 6 | 7 | connect { 8 | sidecar_service { 9 | port = 20000 10 | 11 | check { 12 | name = "Connect Envoy Sidecar" 13 | tcp = "10.5.0.101:20000" 14 | interval ="10s" 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /docker-configs/configs/services/dc1-web_v1.hcl: -------------------------------------------------------------------------------- 1 | service { 2 | name = "web" 3 | id = "web-v1" 4 | address = "10.5.0.100" 5 | port = 9090 6 | 7 | connect { 8 | sidecar_service { 9 | port = 20000 10 | 11 | check { 12 | name = "Connect Envoy Sidecar" 13 | tcp = "10.5.0.100:20000" 14 | interval ="10s" 15 | } 16 | 17 | proxy { 18 | upstreams { 19 | destination_name = "web-upstream" 20 | local_bind_address = "127.0.0.1" 21 | local_bind_port = 9091 22 | } 23 | upstreams { 24 | destination_name = "web-chunky" 25 | destination_peer = "dc2-chunky" 26 | local_bind_address = "127.0.0.1" 27 | local_bind_port = 9092 28 | } 29 | } 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /docker-configs/configs/services/dc2-josh.hcl: -------------------------------------------------------------------------------- 1 | services { 2 | id = "josh3-0" 3 | name = "josh" 4 | address = "11.0.0.0" 5 | port = 6000 6 | } 7 | services { 8 | id = "josh3-1" 9 | name = "josh" 10 | address = "11.0.0.1" 11 | port = 6000 12 | } 13 | services { 14 | id = "josh3-2" 15 | name = "josh" 16 | address = "11.0.0.2" 17 | port = 6000 18 | } 19 | services { 20 | id = "josh3-3" 21 | name = "josh" 22 | address = "11.0.0.3" 23 | port = 6000 24 | } 25 | services { 26 | id = "josh3-4" 27 | name = "josh" 28 | address = "11.0.0.4" 29 | port = 6000 30 | } 31 | services { 32 | id = "josh3-5" 33 | name = "josh" 34 | address = "11.0.0.5" 35 | port = 6000 36 | } 37 | services { 38 | id = "josh3-6" 39 | name = "josh" 40 | address = "11.0.0.6" 41 | port = 6000 42 | } -------------------------------------------------------------------------------- /docker-configs/configs/services/dc2-unicorn-backend.hcl: -------------------------------------------------------------------------------- 1 | service { 2 | name = "unicorn-backend" 3 | id = "unicorn-backend-1" 4 | partition = "unicorn" 5 | namespace = "backend" 6 | address = "10.6.0.111" 7 | port = 10001 8 | 9 | connect { 10 | sidecar_service { 11 | port = 20000 12 | 13 | check { 14 | name = "Connect Envoy Sidecar" 15 | tcp = "10.6.0.111:20000" 16 | interval ="10s" 17 | } 18 | } 19 | } 20 | } -------------------------------------------------------------------------------- /docker-configs/configs/services/dc2-web-chunky_v1.hcl: -------------------------------------------------------------------------------- 1 | service { 2 | name = "web-chunky" 3 | id = "web-chunky-v1" 4 | partition = "chunky" 5 | address = "10.6.0.100" 6 | port = 8000 7 | 8 | connect { 9 | sidecar_service { 10 | port = 20000 11 | 12 | check { 13 | name = "Connect Envoy Sidecar" 14 | tcp = "10.6.0.100:20000" 15 | interval ="10s" 16 | } 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /docker-configs/configs/services/donkey-ap1.hcl: -------------------------------------------------------------------------------- 1 | services { 2 | id = "Donkey-0" 3 | name = "donkey" 4 | address = "11.0.0.0" 5 | port = 6000 6 | partition = "donkey" 7 | } 8 | services { 9 | id = "Donkey-1" 10 | name = "donkey" 11 | address = "11.0.0.1" 12 | port = 6000 13 | partition = "donkey" 14 | } 15 | services { 16 | id = "Donkey-2" 17 | name = "donkey" 18 | address = "11.0.0.2" 19 | port = 6000 20 | partition = "donkey" 21 | } 22 | services { 23 | id = "Donkey-3" 24 | name = "donkey" 25 | address = "11.0.0.3" 26 | port = 6000 27 | partition = "donkey" 28 | } 29 | services { 30 | id = "Donkey-4" 31 | name = "donkey" 32 | address = "11.0.0.4" 33 | port = 6000 34 | partition = "donkey" 35 | } 36 | -------------------------------------------------------------------------------- /docker-configs/docker_vars/acl-custom.env: -------------------------------------------------------------------------------- 1 | CONSUL_IMAGE="hashicorp/consul-enterprise:1.17.0-ent" 2 | CONVOY_IMAGE="joshwolfer/consul-envoy:v1.17.0-ent_v1.27.2" 3 | FAKESERVICE_IMAGE="nicholasjackson/fake-service:v0.26.0" 4 | 5 | TOKEN_GATEWAY_DC1="root" 6 | TOKEN_GATEWAY_DC1_UNICORN="root" 7 | TOKEN_GATEWAY_DC2="root" 8 | TOKEN_GATEWAY_DC2_CHUNKY="root" 9 | TOKEN_GATEWAY_DC2_UNICORN="root" 10 | 11 | TOKEN_WEB="00000000-0000-0000-0000-000000007777" 12 | TOKEN_WEB_UPSTREAM="00000000-0000-0000-0000-000000008888" 13 | TOKEN_WEB_CHUNKY="00000000-0000-0000-0000-000000009999" 14 | 15 | TOKEN_UNICORN_FRONTEND_DC1="root" 16 | TOKEN_UNICORN_BACKEND_DC1="root" 17 | TOKEN_UNICORN_BACKEND_DC2="root" -------------------------------------------------------------------------------- /docker-configs/docker_vars/acl-root.env: -------------------------------------------------------------------------------- 1 | CONSUL_IMAGE="hashicorp/consul-enterprise:1.17.0-ent" 2 | CONVOY_IMAGE="joshwolfer/consul-envoy:v1.17.0-ent_v1.27.2" 3 | FAKESERVICE_IMAGE="nicholasjackson/fake-service:v0.26.0" 4 | 5 | TOKEN_GATEWAY_DC1="root" 6 | TOKEN_GATEWAY_DC1_UNICORN="root" 7 | TOKEN_GATEWAY_DC2="root" 8 | TOKEN_GATEWAY_DC2_CHUNKY="root" 9 | TOKEN_GATEWAY_DC2_UNICORN="root" 10 | 11 | TOKEN_WEB="root" 12 | TOKEN_WEB_UPSTREAM="root" 13 | TOKEN_WEB_CHUNKY="root" 14 | 15 | TOKEN_UNICORN_FRONTEND_DC1="root" 16 | TOKEN_UNICORN_BACKEND_DC1="root" 17 | TOKEN_UNICORN_BACKEND_DC2="root" -------------------------------------------------------------------------------- /docker-configs/docker_vars/acl-secure.env: -------------------------------------------------------------------------------- 1 | CONSUL_IMAGE="hashicorp/consul-enterprise:1.17.0-ent" 2 | CONVOY_IMAGE="joshwolfer/consul-envoy:v1.17.0-ent_v1.27.2" 3 | FAKESERVICE_IMAGE="nicholasjackson/fake-service:v0.26.0" 4 | 5 | TOKEN_GATEWAY_DC1="root" 6 | TOKEN_GATEWAY_DC1_UNICORN="root" 7 | TOKEN_GATEWAY_DC2="root" 8 | TOKEN_GATEWAY_DC2_CHUNKY="root" 9 | TOKEN_GATEWAY_DC2_UNICORN="root" 10 | 11 | TOKEN_WEB="00000000-0000-0000-0000-000000007777" 12 | TOKEN_WEB_UPSTREAM="00000000-0000-0000-0000-000000008888" 13 | TOKEN_WEB_CHUNKY="00000000-0000-0000-0000-000000009999" 14 | 15 | TOKEN_UNICORN_FRONTEND_DC1="00000000-0000-0000-0000-000000004444" 16 | TOKEN_UNICORN_BACKEND_DC1="00000000-0000-0000-0000-000000005555" 17 | TOKEN_UNICORN_BACKEND_DC2="00000000-0000-0000-0000-000000006666" -------------------------------------------------------------------------------- /docker-configs/docker_vars/mac_arm64-acl-custom.env: -------------------------------------------------------------------------------- 1 | CONSUL_IMAGE="hashicorp/consul-enterprise:1.17.0-rc1-ent" 2 | CONVOY_IMAGE="jessingrass1/doc-consul:v1.17-dev_v1.27.0" 3 | FAKESERVICE_IMAGE="nicholasjackson/fake-service:v0.26.0" 4 | 5 | TOKEN_GATEWAY_DC1="root" 6 | TOKEN_GATEWAY_DC1_UNICORN="root" 7 | TOKEN_GATEWAY_DC2="root" 8 | TOKEN_GATEWAY_DC2_CHUNKY="root" 9 | TOKEN_GATEWAY_DC2_UNICORN="root" 10 | 11 | TOKEN_WEB="00000000-0000-0000-0000-000000007777" 12 | TOKEN_WEB_UPSTREAM="00000000-0000-0000-0000-000000008888" 13 | TOKEN_WEB_CHUNKY="00000000-0000-0000-0000-000000009999" 14 | 15 | TOKEN_UNICORN_FRONTEND_DC1="root" 16 | TOKEN_UNICORN_BACKEND_DC1="root" 17 | TOKEN_UNICORN_BACKEND_DC2="root" -------------------------------------------------------------------------------- /docker-configs/docker_vars/mac_arm64-acl-root.env: -------------------------------------------------------------------------------- 1 | CONSUL_IMAGE="hashicorp/consul-enterprise:1.17.0-rc1-ent" 2 | CONVOY_IMAGE="jessingrass1/doc-consul:v1.17-dev_v1.27.0" 3 | FAKESERVICE_IMAGE="nicholasjackson/fake-service:v0.26.0" 4 | 5 | TOKEN_GATEWAY_DC1="root" 6 | TOKEN_GATEWAY_DC1_UNICORN="root" 7 | TOKEN_GATEWAY_DC2="root" 8 | TOKEN_GATEWAY_DC2_CHUNKY="root" 9 | TOKEN_GATEWAY_DC2_UNICORN="root" 10 | 11 | TOKEN_WEB="root" 12 | TOKEN_WEB_UPSTREAM="root" 13 | TOKEN_WEB_CHUNKY="root" 14 | 15 | TOKEN_UNICORN_FRONTEND_DC1="root" 16 | TOKEN_UNICORN_BACKEND_DC1="root" 17 | TOKEN_UNICORN_BACKEND_DC2="root" -------------------------------------------------------------------------------- /docker-configs/docker_vars/mac_arm64-acl-secure.env: -------------------------------------------------------------------------------- 1 | CONSUL_IMAGE="hashicorp/consul-enterprise:1.17.0-rc1-ent" 2 | CONVOY_IMAGE="jessingrass1/doc-consul:v1.17-dev_v1.27.0" 3 | FAKESERVICE_IMAGE="nicholasjackson/fake-service:v0.26.0" 4 | 5 | TOKEN_GATEWAY_DC1="root" 6 | TOKEN_GATEWAY_DC1_UNICORN="root" 7 | TOKEN_GATEWAY_DC2="root" 8 | TOKEN_GATEWAY_DC2_CHUNKY="root" 9 | TOKEN_GATEWAY_DC2_UNICORN="root" 10 | 11 | TOKEN_WEB="00000000-0000-0000-0000-000000007777" 12 | TOKEN_WEB_UPSTREAM="00000000-0000-0000-0000-000000008888" 13 | TOKEN_WEB_CHUNKY="00000000-0000-0000-0000-000000009999" 14 | 15 | TOKEN_UNICORN_FRONTEND_DC1="00000000-0000-0000-0000-000000004444" 16 | TOKEN_UNICORN_BACKEND_DC1="00000000-0000-0000-0000-000000005555" 17 | TOKEN_UNICORN_BACKEND_DC2="00000000-0000-0000-0000-000000006666" -------------------------------------------------------------------------------- /docker-configs/prometheus/alert.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: DemoAlerts 3 | rules: 4 | - alert: InstanceDown 5 | expr: up{job="services"} < 1 6 | for: 5m -------------------------------------------------------------------------------- /docker-configs/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 30s 3 | scrape_timeout: 10s 4 | 5 | # rule_files: 6 | # - alert.yml # This is for setting up alerts which we probably don't need in Doctor Consul... 7 | 8 | scrape_configs: 9 | - job_name: services 10 | metrics_path: /metrics 11 | static_configs: 12 | - targets: 13 | # - 'prometheus:9090' 14 | - 'web:9102' 15 | - 'web-upstream:9102' 16 | - 'unicorn-frontend-dc1:9102' 17 | - 'unicorn-backend-dc1:9102' 18 | - 'web-chunky:9102' 19 | - 'unicorn-backend-dc2:9102' -------------------------------------------------------------------------------- /docker-configs/scripts/app-baphomet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # ========================================== 4 | # Register External Services 5 | # ========================================== 6 | 7 | echo -e "${GRN}" 8 | echo -e "==========================================" 9 | echo -e " Register External Baphomet Services" 10 | echo -e "==========================================${NC}" 11 | 12 | echo -e "${GRN}DC1/proj1/virtual-baphomet ${NC}" 13 | 14 | echo "" 15 | echo -e "${GRN}DC1/Proj1/default/baphomet0:${NC} $(curl -s --request PUT --data @./docker-configs/configs/services/dc1-proj1-baphomet0.json --header "X-Consul-Token: root" "${DC1}/v1/catalog/register")" 16 | echo -e "${GRN}DC1/Proj1/default/baphomet1:${NC} $(curl -s --request PUT --data @./docker-configs/configs/services/dc1-proj1-baphomet1.json --header "X-Consul-Token: root" "${DC1}/v1/catalog/register")" 17 | echo -e "${GRN}DC1/Proj1/default/baphomet2:${NC} $(curl -s --request PUT --data @./docker-configs/configs/services/dc1-proj1-baphomet2.json --header "X-Consul-Token: root" "${DC1}/v1/catalog/register")" 18 | 19 | # ------------------------------------------ 20 | # Partition proj1 RBAC 21 | # ------------------------------------------ 22 | 23 | echo -e "${GRN}" 24 | echo -e "------------------------------------------" 25 | echo -e " Partition proj1 RBAC" 26 | echo -e "------------------------------------------${NC}" 27 | echo -e "" 28 | 29 | echo -e "${GRN}ACL Policy+Role: DC1/proj1/team-proj1-rw${NC}" 30 | consul acl policy create -name team-proj1-rw -rules @./docker-configs/acl/team-proj1-rw.hcl -http-addr="$DC1" 31 | consul acl role create -name team-proj1-rw -policy-name team-proj1-rw -http-addr="$DC1" 32 | echo -e "" 33 | echo -e "${GRN}ACL Token: 000000002222${NC}" 34 | consul acl token create \ 35 | -partition=default \ 36 | -role-name=team-proj1-rw \ 37 | -secret="00000000-0000-0000-0000-000000002222" \ 38 | -accessor="00000000-0000-0000-0000-000000002222" \ 39 | -http-addr="$DC1" 40 | 41 | -------------------------------------------------------------------------------- /docker-configs/scripts/jwt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # ========================================== 4 | # JWT Auth configuration 5 | # ========================================== 6 | 7 | # OIDC is setup with Auth0 and grants read to the Baphomet services in the Proj1 Admin Partition. 8 | 9 | # Enable JWT auth in Consul - (Coming soon) 10 | 11 | # consul acl auth-method create -type jwt \ 12 | # -name jwt \ 13 | # -max-token-ttl=30m \ 14 | # -config=@./docker-configs/auth/oidc-auth.json 15 | 16 | # consul acl binding-rule create \ 17 | # -method=auth0 \ 18 | # -bind-type=role \ 19 | # -bind-name=team-proj1-rw \ 20 | # -selector='proj1 in list.groups' -------------------------------------------------------------------------------- /docker-configs/scripts/oidc-auth0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # ========================================== 4 | # OIDC Connectivity with Auth0 5 | # ========================================== 6 | 7 | # OIDC is setup with Auth0 and grants read to the Baphomet services in the Proj1 Admin Partition. 8 | 9 | # ========================================== 10 | # OIDC Auth 11 | # ========================================== 12 | 13 | echo -e "${GRN}" 14 | echo -e "==========================================" 15 | echo -e " OIDC Auth" 16 | echo -e "==========================================${NC}" 17 | 18 | # Enable OIDC in Consul 19 | echo -e "" 20 | echo -e "${GRN}Enable OIDC in Consul w/ Auth0 ${NC}" 21 | 22 | consul acl auth-method create -type oidc \ 23 | -name auth0 \ 24 | -max-token-ttl=30m \ 25 | -config=@./docker-configs/auth/oidc-auth.json \ 26 | -http-addr="$DC1" 27 | 28 | # ------------------------------------------ 29 | # Binding rule to map Auth0 groups to Consul roles 30 | # ------------------------------------------ 31 | 32 | echo -e "${GRN}" 33 | echo -e "------------------------------------------" 34 | echo -e "Binding rules to map Auth0 groups to Consul roles" 35 | echo -e "------------------------------------------${NC}" 36 | 37 | # DC1/Proj1 Admins 38 | 39 | echo -e "" 40 | echo -e "${GRN}DC1 team-proj1-rw${NC}" 41 | 42 | consul acl binding-rule create \ 43 | -method=auth0 \ 44 | -bind-type=role \ 45 | -bind-name=team-proj1-rw \ 46 | -selector='proj1 in list.groups' \ 47 | -http-addr="$DC1" 48 | 49 | # DC1 Admins 50 | 51 | echo -e "" 52 | echo -e "${GRN}DC1 consul-admins${NC}" 53 | 54 | consul acl binding-rule create \ 55 | -method=auth0 \ 56 | -bind-type=role \ 57 | -bind-name=consul-admins \ 58 | -selector='admins in list.groups' \ 59 | -http-addr="$DC1" -------------------------------------------------------------------------------- /docker-configs/scripts/vm-outputs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # -------------------------------------------------------------------------------------------- 6 | # Default Values (overwritten when provisioning to alternate environments (EKS, GKE, ...)) 7 | # -------------------------------------------------------------------------------------------- 8 | 9 | DC1_ADDR=http://127.0.0.1:8500 10 | DC2_ADDR=http://127.0.0.1:8501 11 | 12 | DC1_WEB_UI_ADDR=http://localhost:9000/ui/ 13 | DC1_UNICORN_FRONTEND_UI_ADDR=http://localhost:10000/ui/ 14 | DC1_PROMETHEUS=http://localhost:9090/ 15 | 16 | # ============================================================================================================================== 17 | # Outputs 18 | # ============================================================================================================================== 19 | 20 | # ---------------------------------------------- 21 | # Consul Addresses 22 | # ---------------------------------------------- 23 | 24 | echo "" 25 | echo -e "${GRN}------------------------------------------" 26 | echo -e " VM-Style Outputs" 27 | echo -e "------------------------------------------${NC}" 28 | echo "" 29 | 30 | echo -e "${GRN}Consul UI Addresses: ${NC}" 31 | echo -e " ${YELL}DC1${NC}: $DC1_ADDR/ui/" 32 | echo -e " ${YELL}DC2${NC}: $DC2_ADDR/ui/" 33 | echo -e "" 34 | echo -e "${RED}Don't forget to login to the UI using token${NC}: 'root'" 35 | echo -e "" 36 | 37 | echo -e "${GRN}Export ENV Variables ${NC}" 38 | echo -e " export DC1=$DC1_ADDR" 39 | echo -e " export DC2=$DC2_ADDR" 40 | echo -e " export CONSUL_HTTP_TOKEN=root" 41 | echo "" 42 | 43 | 44 | # ---------------------------------------------- 45 | # Fake Service Addresses 46 | # ---------------------------------------------- 47 | 48 | echo -e "${GRN}Fake Service UI addresses: ${NC}" 49 | echo -e " ${YELL}DC1 Web:${NC} $DC1_WEB_UI_ADDR" 50 | echo -e " ${YELL}DC1 Unicorn-Frontend:${NC} $DC1_UNICORN_FRONTEND_UI_ADDR" 51 | echo -e " ${YELL}DC1 Prometheus WebUI:${NC} $DC1_PROMETHEUS" 52 | echo "" 53 | 54 | # ---------------------------------------------- 55 | # Footer 56 | # ---------------------------------------------- 57 | echo -e "${RED}Happy Consul'ing! ${NC}" 58 | echo -e "" 59 | -------------------------------------------------------------------------------- /docs/DoctorConsul-TheManual-Draft.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/joshwolfer/doctorconsul/b970f0fc8e3a3089d69a283d151720b84d5660ce/docs/DoctorConsul-TheManual-Draft.pdf -------------------------------------------------------------------------------- /docs/acl-everything.md: -------------------------------------------------------------------------------- 1 | # ACL Auth / Policies / Roles / Tokens 2 | 3 | * `global-management` token defined as `root` 4 | * When in doubt use the `root` token. 5 | * Most tokens and roles are scoped to the `default` partitions. 6 | * This is intentional, so all tokens are at the root hierarchy and can be scoped to managed any resource in any partition. (best-practices) 7 | 8 | ## ACL Token Specifics 9 | 10 | Envoy side-car ACLs are controlled via the `start.sh` script and has three ACL modes. By default (secure mode) the environment will assign Consul ACL tokens to most of the agents and proxies using the principle of least privilege. 11 | 12 | * `./start.sh` (secure mode - Default) 13 | * `./start.sh -root` 14 | * `./start.sh -custom` 15 | 16 | It may be handy to quickly launch the entire environment using nothing but root tokens, especially when troubleshooting ACL issues (docker_vars/acl-secure.env). 17 | 18 | Each modes token definitions are kept in the `docker_vars` directory. 19 | The custom ACL Token profile is intended to set a hybrid set of tokens as needed (docker_vars/acl-custom.env). 20 | 21 | The ACL tokens listed below will only be accurate when running in the default "secure" mode. 22 | 23 | #### Token: `root` 24 | 25 | * Policy: `global-management` 26 | 27 | 28 | | Token | Privs | Purpose | 29 | | ---------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------- | 30 | | `00000000-0000-0000-0000-000000001111` | node-identity:`client-dc1-alpha:dc1`,service-identity:`joshs-obnoxiously-long-service-name-gonna-take-awhile:dc1`,service-identity:`josh:dc1` | Agent token for Consul Client`consul-client-dc1-alpha` (DC1) | 31 | | `00000000-0000-0000-0000-000000002222` | Role:`team-proj1-rw` | Grant write permissions within`DC1` / `proj1` partition. | 32 | | `00000000-0000-0000-0000-000000003333` | Role:`DC1-Read` | Read-only privileges within the entire`DC1` cluster. | 33 | | `00000000-0000-0000-0000-000000004444` | service-identy:`unicorn.frontend.unicorn-frontend:dc1` | | 34 | | `00000000-0000-0000-0000-000000005555` | service-identity:`unicorn.backend.unicorn-backend:dc1` | | 35 | | `00000000-0000-0000-0000-000000006666` | service-identity:`unicorn.backend.unicorn-backend:dc2` | | 36 | | `00000000-0000-0000-0000-000000007777` | service-identity:`default.default.web:dc1` | | 37 | | `00000000-0000-0000-0000-000000008888` | service-identity:`default.default.web-upstream:dc1` | | 38 | | `00000000-0000-0000-0000-000000009999` | service-identity:`chunky.default.web-chunky:dc2` | | 39 | 40 | ## Roles 41 | 42 | #### Role: `consul-admins` 43 | 44 | * Policy: `global-management` 45 | * Purpose: 46 | * Assign root level permissions. 47 | * Used within the Auth0 OIDC method (group: `admins`) to define who should have "god mode" in the Consul Cluster 48 | 49 | #### Role: `team-proj1-rw` 50 | 51 | * Purpose: Grant write permissions within `DC1` / `proj1` partition. 52 | * Used within the Auth0 OIDC method (group: `proj1`) to define who should have management permission of the `proj` partition 53 | 54 | #### Role: `dc1-read` 55 | 56 | * Purpose: Read-only privileges within the entire `DC1` cluster. 57 | 58 | ## OIDC Authentiction 59 | 60 | ### Auth0 61 | 62 | #### Binding Rules 63 | 64 | * auth0 groups = `proj1` 65 | * auth0 groups = `admins` -------------------------------------------------------------------------------- /docs/app-banana_split-notes.md: -------------------------------------------------------------------------------- 1 | *I poked a hole in neopolitan so i could hit it directly with TP enabled - i didn't take the time to check or setup API/Ingress gateway* 2 | 3 | # Create 4 | ```sh 5 | kubectl create ns banana-split 6 | 7 | pushd services/ 8 | kubectl apply -f banana_split-neopolitan.yaml 9 | kubectl apply -f banana_split-icecream_chocolate.yaml 10 | kubectl apply -f banana_split-icecream_vanilla.yaml 11 | kubectl apply -f banana_split-icecream_strawberry.yaml 12 | popd 13 | 14 | 15 | kubectl apply -f service-splitter/service-splitter-ice_cream.yaml 16 | kubectl apply -f intentions/dc3-cernunnos-banana_split-ice_cream.yaml 17 | ``` 18 | 19 | 20 | # Destroy 21 | ```sh 22 | 23 | kubectl delete -f banana_split-neopolitan.yaml 24 | kubectl delete -f banana_split-icecream_chocolate.yaml 25 | kubectl delete -f banana_split-icecream_vanilla.yaml 26 | kubectl delete -f banana_split-icecream_strawberry.yaml 27 | 28 | kubectl apply -f intentions/dc3-cernunnos-banana_split-ice_cream.yaml 29 | kubectl apply -f service-splitter/service-splitter-ice_cream.yaml 30 | ``` 31 | 32 | 33 | 34 | 35 | # service-resolver thoughts 36 | Envoy attempts to map a service tag eg: `vanilla.ice-cream.virtual...` and since I've got distinct services backing it doesn't work. Without much critical thinking, first impression is this pattern does not apply for kube. Out of the box, a given kube deployment will have a specific set of tags that will be 'Recreated' or 'RollingUpdate' (default) when applied resulting in a short period of time where both versions exist. 37 | 38 | for anythnig canary-like two deployments are required - 39 | https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#canary-deployment 40 | https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/#canary-deployments 41 | 42 | ```sh 43 | kubectl apply -f service-resolver/service-resolver-ice_cream.yaml 44 | # "ice-cream" synthetic service will be assigned a virtual ip but we've got no intentions 45 | # From neopolitan - 46 | # Error communicating with upstream service: Get \"http://ice-cream.virtual.banana-split.ns.cernunnos.ap.dc3.dc.consul/\": dial tcp 240.0.0.23:80: connect: connection refused 47 | 48 | 49 | kubectl delete -f service-resolver/service-resolver-ice_cream.yaml # jank 50 | ``` 51 | -------------------------------------------------------------------------------- /docs/architecture.md: -------------------------------------------------------------------------------- 1 | # Architecture Overview 2 | 3 | Architecture: 4 | ![](images/architecture2.png) 5 | 6 | ## Consul Servers 7 | 8 | * 3x single-node Consul Clusters (2 in VM, 1 in Kubernetes) 9 | 10 | ### DC1 (VM) 11 | 12 | * Servers (1) 13 | * `consul-server1-dc1` 14 | * UI exposed on local port 8500: `http://127.0.0.1:8500/ui/_default/dc1/services` 15 | * Gossip Encryption: `aPuGh+5UDskRAbkLaXRzFoSOcSM+5vAK+NEYOWHJH7w=` 16 | 17 | ### DC2 (VM) 18 | 19 | * Consul Servers (1) 20 | * `consul-server1-dc2` 21 | * UI exposed on local port 8501: `http://127.0.0.1:8501/ui/_default/dc2/services` 22 | * Gossip Encryption: `dznVKWl1ri975FUJiddzAPM+3eNP9iXDad2c8hghsKA=` 23 | 24 | ### DC3 (K3d Kubernetes) 25 | 26 | * Servers (1) 27 | * `consul-server-0` 28 | * UI exposed on local port 8502: `http://127.0.0.1:8502/ui/_default/dc3/services` 29 | * Gossip Encryption: Randomly generated into a Kube secret. 30 | 31 | ## Consul Mesh Gateways 32 | 33 | ### DC1 34 | 35 | * gateway-dc1 36 | * Internal listener: 10.5.0.5:443 37 | * Public listener: 192.169.7.3:443 38 | * dc1-unicorn-mgw 39 | * Internal listener: 10.5.0.6:443 40 | * Public listener: 192.169.7.7:443 41 | 42 | ### DC2 43 | 44 | * gateway-dc2 45 | * Internal listener: 10.6.0.5:443 46 | * Public listener: 192.169.7.5:443 47 | * dc2-chunky-mgw 48 | * Internal listener: 10.6.0.6:443 49 | * Public listener: 192.169.7.6:443 50 | * dc2-unicorn-mgw 51 | * Internal listener: 10.6.0.7:443 52 | * Public listener: 192.169.7.8:443 53 | 54 | ### DC3 (K3d) 55 | 56 | * mesh-gateway 57 | * Kube loadbalancer: 192.168.7.9:8443 (NOTE! This is dynamically assigned, it could change...) 58 | 59 | # Kubernetes (K3d) 60 | 61 | * Local Kube API listener: 127.0.0.1:6443 -------------------------------------------------------------------------------- /docs/consul-clients.md: -------------------------------------------------------------------------------- 1 | ## Consul Clients 2 | 3 | Tokens for Clients are written directly to agent config files (cannot be changed). 4 | 5 | ### consul-client-dc1-alpha (DC1) 6 | 7 | * **DC**: `DC1` 8 | * **Partition**: `default` 9 | * **Services**: 10 | * `josh` (4 instances) 11 | * This `josh` service is exported to the `DC2` peer (`default`). 12 | * The `service.id` is `josh-local-x` to differentiate between this local service and the imported service (see Notes below) 13 | * `joshs-obnoxiously-long-service-name-gonna-take-awhile` (8 instances) 14 | * This `joshs-obnoxiously-long-service-name-gonna-take-awhile` service is exported to the `DC2` peer (`default`). 15 | * **ACL Token**: `00000000-0000-0000-0000-000000001111` 16 | * `node-identity=client-dc1-alpha:dc1` 17 | * `service-identity=joshs-obnoxiously-long-service-name-gonna-take-awhile:dc1` 18 | * `service-identity=josh:dc1` 19 | * **Notes**: 20 | * Within `DC1` and `DC2`, each cluster contains a service named `josh`. 21 | * This is intentional, as to test out the behavior of when a exported service from a peer matches the same name of a local service. 22 | * `DC1/default_AP/default_NS/josh` and `DC2/default_AP/default_NS/josh => DC1/default_AP/default_NS` 23 | * (Bug) As of 1.13.2, the UI cannot list the instances of both `josh` services by clicking on them. 24 | * Both links point to the imported `josh` only (bugged) 25 | * The UI URL can be manually manipulated to view the local `josh`: `http://127.0.0.1:8500/ui/_default/dc1/services/josh/instances` 26 | 27 | ### consul-client-dc1-charlie-ap1 (DC1) 28 | 29 | * **DC**: `DC1` 30 | * **Partition**: `donkey` 31 | * **Services**: 32 | * `donkey` (5 instances) 33 | * **ACL Token**: `root` 34 | * **Notes**: 35 | 36 | ### consul-client-dc1-unicorn (DC1) 37 | 38 | * **DC**: `DC1` 39 | * **Partition**: `unicorn` 40 | * **Services**: 41 | * `unicorn-frontend` (3 instances) 42 | * Namespace: `frontend` 43 | * `unicorn-backend` (3 instances) 44 | * Namespace: `backend` 45 | * **ACL Token**: `root` 46 | * **Notes**: 47 | 48 | ### consul-client-dc1-echo-proj1 (DC1) 49 | 50 | * **DC**: `DC1` 51 | * **Partition**: `proj1` 52 | * **Services** 53 | * `baphomet` (3 instances) 54 | * **ACL Token**: `root` 55 | * **Notes**: 56 | 57 | ### virtual (DC1) 58 | 59 | * **DC**: `DC1` 60 | * **Partition**: `proj1` 61 | * **Services** 62 | * `virtual-baphomet` (3 external instances) 63 | * **Notes**: 64 | * This is a virtual node registered with the `post-config.sh` script. 65 | * It represents an externally registered service 66 | * Each `virtual-baphomet` service can be de-registered using the Zork script. 67 | 68 | ### consul-client-dc2-bravo (DC2) 69 | 70 | * **DC**: `DC2` 71 | * **Partition**: `default` 72 | * **Services**: 73 | * `josh` (7 instances) 74 | * **ACL Token**: `root` 75 | * **Notes**: 76 | * This `josh` service is exported to the `DC1` peer (`default`). 77 | 78 | ### consul-client-dc2-foxtrot (DC2) 79 | 80 | * **DC**: `DC2` 81 | * **Partition**: `chunky` 82 | * **Services**: 83 | * `web-chunky` (in-mesh) 84 | * **ACL Token**: `root` 85 | * **Notes**: -------------------------------------------------------------------------------- /docs/consul-structure.md: -------------------------------------------------------------------------------- 1 | ## Admin Partitions & Namespaces 2 | 3 | ### DC1 4 | 5 | * `default` 6 | * `donkey` 7 | * `unicorn` 8 | * `frontend` (NS) 9 | * `backend` (NS) 10 | * `proj1` 11 | * `proj2` 12 | 13 | ### DC2 14 | 15 | * `default` 16 | * `heimdall` 17 | * `unicorn` 18 | * `frontend` (NS) 19 | * `backend` (NS) 20 | 21 | ### DC3 (k3d) 22 | 23 | * `default` 24 | * `unicorn` (NS) 25 | 26 | ## Cluster Peering Relationships & Exported Services 27 | 28 | ### Configuration 29 | 30 | * Cluster Peering over Mesh Gateways enabled 31 | 32 | ### Peering Relationships 33 | 34 | * `DC1`/`default` <- `DC2`/`default` 35 | * `DC1`/`default` <- `DC2`/`heimdall` 36 | * `DC1`/`default` -> `DC2`/`chunky` 37 | * `DC1`/`unicorn` <- `DC2`/`unicorn` 38 | * `DC3`/`default` -> `DC1`/`default` 39 | * `DC3`/`default` -> `DC1`/`unicorn` 40 | * `DC3`/`default` -> `DC2`/`unicorn` 41 | 42 | ### Exported Services 43 | 44 | #### DC1 45 | 46 | * `DC1`/`donkey(AP)/donkey` > `DC1`/`default(AP)` (local partition) 47 | * `DC1`/`default(AP)/joshs-obnoxiously-long-service-name-gonna-take-awhile`>`DC2`/`default(AP)` (Peer) 48 | * `DC1`/`default(AP)/joshs-obnoxiously-long-service-name-gonna-take-awhile`>`DC2`/`heimdall(AP)` (Peer) 49 | 50 | #### DC2 51 | 52 | * `DC2`/`default(AP)/josh`>`DC1`/`default` (Peer) 53 | * `DC2`/`unicorn(AP)/unicorn-backend` > `DC1`/`unicorn` (Peer) 54 | 55 | #### DC3 56 | 57 | * `DC3`/`default(AP)/unicorn(NS)/unicorn-backend` > `DC1`/`unicorn` (peer) -------------------------------------------------------------------------------- /docs/images/architecture2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/joshwolfer/doctorconsul/b970f0fc8e3a3089d69a283d151720b84d5660ce/docs/images/architecture2.png -------------------------------------------------------------------------------- /docs/network.md: -------------------------------------------------------------------------------- 1 | # Network Quick Chart 2 | 3 | ### Local Listeners 4 | 5 | * Consul Server1 DC1 UI: http://127.0.0.1:8500/ui/ 6 | * Consul Server1 DC2 UI: http://127.0.0.1:8501/ui/ 7 | * Consul Server DC3 UI: http://127.0.0.1:8502/ui/ 8 | * Web Service UI: http://127.0.0.1:9000/ui 9 | * Unicorn-frontend (unicorn) DC1 UI: http://127.0.0.1:10000/ui 10 | * Unicorn-frontend (default) DC3 UI: http://127.0.0.1:11000/ui 11 | * Prometheus (non-kube) UI: http://localhost:9090/ 12 | * Prometheus (kube DC3) UI: http://localhost:9091/ 13 | 14 | #### Local Listeners for Envoy troubleshooting 15 | 16 | * 19001: (dc1) gateway-dc1-unicorn 17 | * 19002: (dc1) web 18 | * 19003: (dc1) web-upstream 19 | * 19004: (dc1) unicorn-frontend 20 | * 19005: (dc1) unicorn-backend-dc1 21 | * 19006: (dc1) gateway-dc1 22 | * 19007: (dc2) gateway-dc2 23 | * 19008: (dc2) gateway-dc2-chunky 24 | * 19009: (dc2) gateway-dc2-unicorn 25 | * 19010: (dc2) web-chunky 26 | * 19011: (dc2) unicorn-backend-dc2 27 | 28 | ## Shared Services 29 | 30 | * Prometheus: 31 | * dc1 network: 10.5.0.200 32 | dc2 network: 10.6.0.200 33 | 34 | ## DC1 35 | 36 | #### (DC1) Consul Core 37 | 38 | * DC1 server: 10.5.0.2 / 192.169.7.2 39 | * DC1 MGW: 10.5.0.5 / 192.169.7.3 40 | * DC1 MGW (unicorn): 10.5.0.6 / 192.169.7.7 41 | 42 | #### (DC1) Consul Clients 43 | 44 | * consul-client-dc1-alpha (default): 10.5.0.10 45 | * consul-client-dc1-charlie-ap1 (donkey): 10.5.0.11 46 | * consul-client-dc1-delta-ap2 (unicorn): 10.5.0.12 47 | * consul-client-dc1-echo-proj1 (proj1): 10.5.0.13 48 | 49 | #### (DC1) Applications 50 | 51 | * web-v1: 10.5.0.100 52 | * web-upstream: 10.5.0.101 53 | * unicorn-frontend: 10.5.0.110 54 | * unicorn-backend: 10.5.0.111 55 | 56 | ## DC2 57 | 58 | #### (DC2) Consul Core 59 | 60 | * DC2 server: 10.6.0.2 / 192.169.7.4 61 | * DC2 MGW: 10.6.0.5 / 192.169.7.5 62 | * DC2 MGW (chunky): 10.6.0.6 / 192.169.7.6 63 | * DC2 MGW (unicorn): 10.6.0.7 / 192.169.7.8 64 | 65 | #### (DC2) Consul Clients 66 | 67 | * consul-client-dc2-bravo (default): 10.6.0.10 68 | * consul-client-dc2-foxtrot (chunky): 10.6.0.11 69 | * consul-client-dc2-unicorn (unicorn): 10.6.0.12 70 | 71 | #### (DC2) Applications 72 | 73 | * web-chunky: 10.6.0.100 74 | * unicorn-backend: 10.6.0.111 75 | 76 | ## DC3 77 | 78 | #### (DC3) k3d 79 | 80 | * consul (server) 81 | * mesh-gateway 82 | * unicorn-frontend (default) 83 | * unicorn-backend (default) 84 | * prometheus-server 85 | -------------------------------------------------------------------------------- /docs/ui-viz.md: -------------------------------------------------------------------------------- 1 | # UI Visualization metrics using Prometheus 2 | 3 | Consul UI metrics are enabled in all of the Consul clusters. This provides extra details about each service mesh service, directly within the Consul UI. 4 | 5 | Consul Docs: [HERE](https://developer.hashicorp.com/consul/docs/connect/observability/ui-visualizationhttps:/) 6 | 7 | Docter Consul has two different prometheus servers: 8 | 9 | * `prometheus` in docker-compose, used for DC1 and DC2. 10 | * `prometheus-server` in K3d, used for k3d DC3. 11 | 12 | ## Overview 13 | 14 | There are three key components to making the Consul UI visualizations work: 15 | 16 | 1. Each application Envoy side-car proxy exposes metrics via a Prometheus listener. 17 | 2. Prometheus servers are configured to connect to each Envoy listener and "scrape" these metrics every 30s. 18 | 3. The Consul UI connects to the configured Prometheus server to fetch 19 | 20 | ### Other Details 21 | 22 | The UI visualizations can take between 30-60s to show any data, from the time you refresh the fake service application. Be patient. :) 23 | -------------------------------------------------------------------------------- /kill.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Shell colors for echo outputs 4 | RED='\033[1;31m' 5 | BLUE='\033[1;34m' 6 | DGRN='\033[0;32m' 7 | GRN='\033[1;32m' 8 | YELL='\033[0;33m' 9 | NC='\033[0m' 10 | 11 | echo "" 12 | 13 | help () { 14 | echo -e "Syntax: ./kill.sh [OPTIONS]" 15 | echo "Default: Kills only the k3d clusters and keeps the k3d registry intact." 16 | echo "" 17 | echo "Options:" 18 | echo " -docker Deletes the entire docker environment (Nuke from orbit)." 19 | echo " -eks Nukes consul-k8s and apps in EKS (EKSOnly)" 20 | echo " -gke Nukes consul-k8s and apps in GKE." 21 | echo "" 22 | exit 0 23 | } 24 | 25 | export ARG_HELP=false 26 | export ARG_DOCKER=false 27 | export ARG_EKS=false 28 | export ARG_GKE=false 29 | 30 | if [ $# -eq 0 ]; then 31 | echo "" 32 | else 33 | for arg in "$@"; do 34 | case $arg in 35 | -docker) 36 | ARG_DOCKER=true 37 | ;; 38 | -eks) 39 | ARG_EKS=true 40 | ;; 41 | -gke) 42 | ARG_GKE=true 43 | ;; 44 | -help) 45 | ARG_HELP=true 46 | ;; 47 | *) 48 | echo -e "${RED}Invalid Argument... ${NC}" 49 | echo "" 50 | help 51 | exit 1 52 | ;; 53 | esac 54 | done 55 | fi 56 | 57 | if $ARG_HELP; then 58 | help 59 | fi 60 | 61 | if $ARG_DOCKER; then 62 | if [[ $(docker ps -aq) ]]; 63 | then 64 | echo -e "${GRN}------------------------------------------" 65 | echo -e "Nuking all the things... except images of course :D" 66 | echo -e "------------------------------------------" 67 | echo -e "${NC}" 68 | docker ps -a | grep -v CONTAINER | awk '{print $1}' | xargs docker stop; docker ps -a | grep -v CONTAINER | awk '{print $1}' | xargs docker rm; docker volume ls | grep -v DRIVER | awk '{print $2}' | xargs docker volume rm; docker network prune -f 69 | else 70 | echo -e "${GRN}No containers to nuke.${NC}" 71 | echo "" 72 | fi 73 | exit 0 74 | fi 75 | 76 | if $ARG_EKS; then 77 | echo -e "${GRN}------------------------------------------" 78 | echo -e " Executing EKS only Nuke" 79 | echo -e "------------------------------------------${NC}" 80 | echo -e "" 81 | echo -e "Executing:${YELL} ./kube-config.sh -nuke-eks${NC}" 82 | ./kube-config.sh -nuke-eks 83 | exit 0 84 | fi 85 | 86 | if $ARG_GKE; then 87 | echo -e "${GRN}------------------------------------------" 88 | echo -e " Executing GKE only Nuke" 89 | echo -e "------------------------------------------${NC}" 90 | echo -e "" 91 | echo -e "Executing:${YELL} ./kube-config.sh -nuke-gke${NC}" 92 | ./kube-config.sh -nuke-gke 93 | exit 0 94 | fi 95 | 96 | # Default behavior 97 | echo -e "${GRN}Nuking k3d clusters ONLY ${NC}" 98 | echo "" 99 | k3d cluster delete dc3 100 | k3d cluster delete dc3-p1 101 | k3d cluster delete dc4 102 | k3d cluster delete dc4-p1 103 | echo "" 104 | exit 0 105 | 106 | -------------------------------------------------------------------------------- /kube/configs/dc3/acl/dc3_default-terminating-gateway.hcl: -------------------------------------------------------------------------------- 1 | service "example-tcp" { 2 | policy = "write" 3 | } 4 | 5 | service "example-http" { 6 | policy = "write" 7 | } -------------------------------------------------------------------------------- /kube/configs/dc3/api-gw/apigw-http-listener-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: apigw-http-listener-cert 5 | namespace: consul 6 | type: kubernetes.io/tls 7 | data: 8 | tls.crt: | 9 | 10 | tls.key: | 11 | -------------------------------------------------------------------------------- /kube/configs/dc3/api-gw/gateway-consul_apigw.yaml: -------------------------------------------------------------------------------- 1 | # This defines the actual Gateway that handles traffic, as permitted by the GatewayClass. 2 | # After the Gateway and its listeners are defined, Routes can be assigned to define which Consul services get what traffic. 3 | # Kube Docs: https://gateway-api.sigs.k8s.io/api-types/gateway/ 4 | 5 | apiVersion: gateway.networking.k8s.io/v1beta1 6 | kind: Gateway 7 | metadata: 8 | name: consul-api-gateway 9 | namespace: consul 10 | spec: 11 | gatewayClassName: consul 12 | listeners: 13 | - name: consul-apig-http-listener 14 | protocol: HTTP 15 | port: 1666 16 | allowedRoutes: 17 | kinds: 18 | - kind: HTTPRoute 19 | namespaces: # Choose which namespaces are allowed to use this Gateway. Not required. 20 | from: Selector 21 | selector: 22 | matchLabels: # This label is added automatically as of K8s 1.22 to all namespaces 23 | kubernetes.io/metadata.name: unicorn 24 | # tls: 25 | # certificateRefs: 26 | # - name: apigw-http-listener-cert 27 | - name: consul-apig-tcp-listener 28 | protocol: TCP 29 | port: 1667 30 | allowedRoutes: 31 | kinds: 32 | - kind: TCPRoute 33 | namespaces: 34 | from: Selector 35 | selector: 36 | matchLabels: 37 | kubernetes.io/metadata.name: externalz 38 | - name: consul-apig-tcp-listener2 39 | protocol: TCP 40 | port: 1668 41 | allowedRoutes: 42 | kinds: 43 | - kind: TCPRoute 44 | namespaces: 45 | from: Selector 46 | selector: 47 | matchLabels: 48 | kubernetes.io/metadata.name: externalz -------------------------------------------------------------------------------- /kube/configs/dc3/api-gw/httproute-unicorn_ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1beta1 2 | kind: HTTPRoute 3 | metadata: 4 | name: unicorn-ingress 5 | namespace: unicorn 6 | spec: 7 | parentRefs: 8 | - name: consul-api-gateway # Name of the Gateway as defined in the Gateway spec (kind: Gateway) 9 | namespace: consul # Namespace the Consul APIGW lives in 10 | rules: 11 | - matches: # Match Path + rewrite: /unicorn-frontend/ 12 | - path: 13 | type: PathPrefix 14 | value: /unicorn-frontend/ 15 | filters: 16 | - type: URLRewrite 17 | urlRewrite: 18 | path: 19 | replacePrefixMatch: / 20 | type: ReplacePrefixMatch 21 | backendRefs: # Defines the backend service (upstream Consul service). 22 | - kind: Service # Kubernetes Service. 23 | name: unicorn-frontend # Kubernetes Service name that points to the Consul Mesh service. 24 | - matches: # Match Path + rewrite: /unicorn-ssg-frontend/ 25 | - path: 26 | type: PathPrefix 27 | value: /unicorn-ssg-frontend/ 28 | filters: 29 | - type: URLRewrite 30 | urlRewrite: 31 | path: 32 | replacePrefixMatch: / 33 | type: ReplacePrefixMatch 34 | backendRefs: 35 | - kind: Service 36 | name: unicorn-ssg-frontend 37 | - matches: # Match Path + rewrite: /externalz-http/ 38 | - path: 39 | type: PathPrefix 40 | value: /externalz-http/ 41 | filters: 42 | - type: URLRewrite 43 | urlRewrite: 44 | path: 45 | replacePrefixMatch: / 46 | type: ReplacePrefixMatch 47 | backendRefs: 48 | - kind: Service 49 | name: externalz-http 50 | namespace: externalz 51 | - matches: # Match Host: unicorn-frontend 52 | - headers: 53 | - name: "Host" 54 | value: "unicorn-frontend" 55 | backendRefs: 56 | - kind: Service 57 | name: unicorn-frontend 58 | - matches: # Match Host: unicorn-ssg-frontend 59 | - headers: 60 | - name: "Host" 61 | value: "unicorn-ssg-frontend" 62 | backendRefs: 63 | - kind: Service 64 | name: unicorn-ssg-frontend 65 | - matches: # Match Host: externalz-http 66 | - headers: 67 | - name: "Host" 68 | value: "externalz-http" 69 | backendRefs: 70 | - kind: Service 71 | name: externalz-http 72 | namespace: externalz 73 | 74 | # reference grant allows a route from a different namespace to send to a destination service in the same namespace as the referenceGrant. 75 | # IE: referenceGrant and backend service in "externalz" NS and HTTPRoute in "unicorn" NS 76 | # Without a referenceGrant, the API gateway returns a 404 77 | # Docs: https://developer.hashicorp.com/consul/docs/api-gateway/configuration/routes#rules-backendrefs 78 | 79 | --- 80 | 81 | apiVersion: gateway.networking.k8s.io/v1alpha2 82 | kind: ReferenceGrant 83 | metadata: 84 | name: grants 85 | namespace: externalz # ReferenceGrant and destination upstream service need to exists in the same NS 86 | spec: 87 | from: 88 | - group: gateway.networking.k8s.io 89 | kind: HTTPRoute 90 | namespace: unicorn # NS where the HTTPRoute lives 91 | to: 92 | - group: "" # I assume this means any services within the specified namespace... 93 | kind: Service 94 | -------------------------------------------------------------------------------- /kube/configs/dc3/api-gw/intention-dc3_default-externalz_http.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: externalz-http 5 | namespace: externalz 6 | spec: 7 | destination: 8 | name: externalz-http 9 | sources: 10 | - name: consul-api-gateway 11 | namespace: consul 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/api-gw/intention-dc3_default-externalz_tcp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: externalz-tcp 5 | namespace: externalz 6 | spec: 7 | destination: 8 | name: externalz-tcp 9 | sources: 10 | - name: consul-api-gateway 11 | namespace: consul 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/api-gw/intention-dc3_default-unicorn_frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-frontend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-frontend 9 | sources: 10 | - name: consul-api-gateway 11 | namespace: consul 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/api-gw/intention-dc3_default-unicorn_ssg_frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-ssg-frontend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-ssg-frontend 9 | sources: 10 | - name: consul-api-gateway 11 | namespace: consul 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/api-gw/tcproute-externalz_tcp_ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1alpha2 2 | kind: TCPRoute 3 | metadata: 4 | name: externalz-tcp-ingress 5 | namespace: externalz 6 | spec: 7 | parentRefs: # Defines the gateway listener. 8 | - name: consul-api-gateway 9 | namespace: consul 10 | sectionName: consul-apig-tcp-listener 11 | rules: 12 | - backendRefs: # Defines the backend service. 13 | - kind: Service # Kubernetes Service. 14 | name: externalz-tcp # Kubernetes Service name that points to the Consul Mesh service. 15 | -------------------------------------------------------------------------------- /kube/configs/dc3/defaults/mesh-dc3_cernunnos.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: Mesh 3 | metadata: 4 | name: mesh 5 | namespace: default # Must be default - config applies to all namespaces within a partition. 6 | spec: 7 | # peering: 8 | # peerThroughMeshGateways: true # This is only permitted in the Default partition (responsible for the peering) 9 | allowEnablingPermissiveMutualTLS: true # Enables the ability to use Permissive Mode. Should need to 10 | transparentProxy: 11 | meshDestinationsOnly: true # Prevents Pods from *mostly* connecting to upstreams that are not in Consul. 12 | # MutualTLSMode: permissive # Not sure if this will work as a global enable of permissive mode. 13 | 14 | 15 | -------------------------------------------------------------------------------- /kube/configs/dc3/defaults/mesh-dc3_default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: Mesh 3 | metadata: 4 | name: mesh 5 | namespace: default # Must be default - config applies to all namespaces within a partition. 6 | spec: 7 | peering: 8 | peerThroughMeshGateways: true 9 | allowEnablingPermissiveMutualTLS: true # Enables the ability to use Permissive Mode. Should need to 10 | transparentProxy: 11 | meshDestinationsOnly: true # Prevents Pods from *mostly* connecting to upstreams that are not in Consul. 12 | # MutualTLSMode: permissive # Not sure if this will work as a global enable of permissive mode. 13 | 14 | 15 | -------------------------------------------------------------------------------- /kube/configs/dc3/defaults/proxy-defaults.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ProxyDefaults 3 | metadata: 4 | name: global 5 | namespace: consul 6 | spec: 7 | accessLogs: 8 | enabled: true 9 | meshGateway: 10 | mode: local 11 | # mode: remote 12 | config: 13 | protocol: http -------------------------------------------------------------------------------- /kube/configs/dc3/exported-services/exported-services-dc3-cernunnos.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ExportedServices 3 | metadata: 4 | name: cernunnos ## The name of the partition containing the service 5 | # namespace: unicorn # ExportedServices are scoped to partition only. Don't specify a NS, although it's going to be imperative that we re-scope these to NS+Partition for enterprise UX. 6 | spec: 7 | services: 8 | - name: "unicorn-backend" ## The name of the service you want to export 9 | namespace: "unicorn" 10 | consumers: 11 | - partition: default 12 | - name: "unicorn-tp-backend" ## The name of the service you want to export 13 | namespace: "unicorn" 14 | consumers: 15 | - partition: default 16 | - samenessGroup: ssg-unicorn # Adding the SSG so that it also gets exported for unicorn-ssg-frontend (part of the SSG). 17 | - name: "mesh-gateway" # 1.16: Still have to export the mesh gateway across partitions or the MGW can't discover cross partition services. Yeah. For real... (NET-4767) 18 | namespace: "default" 19 | consumers: 20 | - partition: default 21 | -------------------------------------------------------------------------------- /kube/configs/dc3/exported-services/exported-services-dc3-default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ExportedServices 3 | metadata: 4 | name: default ## The name of the partition containing the service 5 | namespace: unicorn # I believe ExportedServices are scoped to partition only, yet this still works... fun. 6 | spec: 7 | services: 8 | - name: "unicorn-backend" ## The name of the service you want to export 9 | namespace: "unicorn" 10 | consumers: 11 | - peer: dc1-unicorn ## The name of the peer that receives the service -------------------------------------------------------------------------------- /kube/configs/dc3/external-services/service-defaults-example.com_http.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceDefaults 3 | metadata: 4 | name: example-http 5 | namespace: default 6 | spec: 7 | protocol: http # Only match if the Host or :authority headers. Nothing to do with DNS resolution. 8 | destination: 9 | addresses: 10 | - "example.com" 11 | - "www.wolfmansound.com" 12 | port: 80 -------------------------------------------------------------------------------- /kube/configs/dc3/external-services/service-defaults-example.com_tcp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceDefaults 3 | metadata: 4 | name: example-tcp 5 | namespace: default 6 | spec: 7 | protocol: tcp # Only match if the traffic is TLS (SNI match) or an IP. Host headers do NOT match. 8 | destination: 9 | addresses: 10 | - "example.com" 11 | - "www.wolfmansound.com" 12 | port: 443 -------------------------------------------------------------------------------- /kube/configs/dc3/external-services/service-defaults-whatismyip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceDefaults 3 | metadata: 4 | name: whatismyip 5 | namespace: externalz 6 | spec: 7 | protocol: tcp 8 | destination: 9 | addresses: 10 | - "104.16.154.36" 11 | port: 443 -------------------------------------------------------------------------------- /kube/configs/dc3/intentions/dc3-cernunnos-banana_split-ice_cream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: ice-cream # Virtual service which then splits between ice-cream-vanilla, ice-cream-strawberry, and ice-cream-chocolate 5 | namespace: banana-split 6 | spec: 7 | destination: 8 | name: ice-cream 9 | namespace: banana-split 10 | sources: 11 | - name: neapolitan 12 | namespace: banana-split 13 | action: allow 14 | 15 | --- 16 | apiVersion: consul.hashicorp.com/v1alpha1 17 | kind: ServiceIntentions 18 | metadata: 19 | name: ice-cream-vanilla 20 | namespace: banana-split 21 | spec: 22 | destination: 23 | name: ice-cream-vanilla 24 | namespace: banana-split 25 | sources: 26 | - name: neapolitan 27 | namespace: banana-split 28 | action: allow 29 | 30 | --- 31 | apiVersion: consul.hashicorp.com/v1alpha1 32 | kind: ServiceIntentions 33 | metadata: 34 | name: ice-cream-strawberry 35 | namespace: banana-split 36 | spec: 37 | destination: 38 | name: ice-cream-strawberry 39 | namespace: banana-split 40 | sources: 41 | - name: neapolitan 42 | namespace: banana-split 43 | action: allow 44 | 45 | --- 46 | apiVersion: consul.hashicorp.com/v1alpha1 47 | kind: ServiceIntentions 48 | metadata: 49 | name: ice-cream-chocolate 50 | namespace: banana-split 51 | spec: 52 | destination: 53 | name: ice-cream-chocolate 54 | namespace: banana-split 55 | sources: 56 | - name: neapolitan 57 | namespace: banana-split 58 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/intentions/dc3-cernunnos-paris-paris.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: paris 5 | namespace: paris 6 | spec: 7 | destination: 8 | name: paris 9 | namespace: paris 10 | sources: 11 | - name: leroy-jenkins 12 | namespace: paris 13 | action: allow 14 | # - name: unicorn-ssg-frontend 15 | # # partition: default 16 | # namespace: unicorn 17 | # samenessGroup: ssg-unicorn 18 | # action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/intentions/dc3-cernunnos-unicorn_backend-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-backend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-backend 9 | namespace: unicorn 10 | sources: 11 | - name: unicorn-frontend 12 | partition: default 13 | namespace: unicorn 14 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/intentions/dc3-cernunnos-unicorn_tp_backend-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-tp-backend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-tp-backend 9 | namespace: unicorn 10 | sources: 11 | - name: unicorn-frontend 12 | partition: default 13 | namespace: unicorn 14 | action: allow 15 | - name: unicorn-ssg-frontend 16 | # partition: default 17 | namespace: unicorn 18 | samenessGroup: ssg-unicorn 19 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/intentions/dc3-default-external-example_http-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: example-http 5 | namespace: default 6 | spec: 7 | destination: 8 | name: example-http 9 | sources: 10 | - name: externalz-http 11 | namespace: externalz 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/intentions/dc3-default-external-example_tcp-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: example-tcp 5 | namespace: default 6 | spec: 7 | destination: 8 | name: example-tcp 9 | sources: 10 | - name: externalz-tcp 11 | namespace: externalz 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/intentions/dc3-default-external-whatismyip-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: whatismyip 5 | namespace: externalz 6 | spec: 7 | destination: 8 | name: whatismyip 9 | sources: 10 | - name: externalz-tcp 11 | namespace: externalz 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/intentions/dc3-default-unicorn_backend-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-backend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-backend 9 | sources: 10 | - name: unicorn-frontend 11 | namespace: unicorn 12 | action: allow 13 | - name: unicorn-frontend 14 | namespace: frontend 15 | peer: dc1-unicorn 16 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/intentions/dc3-default-unicorn_tp_backend-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-tp-backend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-tp-backend 9 | sources: 10 | - name: unicorn-frontend 11 | namespace: unicorn 12 | action: allow 13 | - name: unicorn-frontend 14 | namespace: frontend 15 | peer: dc1-unicorn 16 | action: allow 17 | - name: unicorn-ssg-frontend 18 | namespace: unicorn 19 | samenessGroup: ssg-unicorn 20 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc3/sameness-groups/dc3-cernunnos-ssg-unicorn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: SamenessGroup 3 | metadata: 4 | name: ssg-unicorn 5 | # namespace: unicorn # Can't have a namespace assigned. It'll break. 6 | spec: 7 | defaultForFailover: false # Since this is false, a service-resolver needs to be referenced. 8 | members: 9 | - partition: cernunnos # You have to include the partition that the SamenessGroup is being configured for, or Consul has a fit. 10 | - partition: default 11 | - peer: dc4-default 12 | - peer: dc4-taranis 13 | 14 | -------------------------------------------------------------------------------- /kube/configs/dc3/sameness-groups/dc3-default-ssg-unicorn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: SamenessGroup 3 | metadata: 4 | name: ssg-unicorn 5 | # namespace: unicorn # Can't have a namespace assigned. It'll break. 6 | spec: 7 | defaultForFailover: false # Since this is false, a service-resolver needs to be referenced. 8 | members: 9 | - partition: default # You have to include the partition that the SamenessGroup is being configured for, or Consul has a fit. 10 | - partition: cernunnos 11 | - peer: dc4-default 12 | - peer: dc4-taranis -------------------------------------------------------------------------------- /kube/configs/dc3/service-resolver/service-resolver-ice_cream.yaml: -------------------------------------------------------------------------------- 1 | # apiVersion: consul.hashicorp.com/v1alpha1 2 | # kind: ServiceResolver 3 | # metadata: 4 | # name: ice-cream 5 | # namespace: banana-split 6 | # spec: 7 | # defaultSubset: vanilla 8 | # subsets: 9 | # vanilla: 10 | # filter: 'Service.Service == vanilla' 11 | # strawberry: 12 | # filter: 'Service.Service == strawberry' 13 | # chocolate: 14 | # filter: 'Service.Service == chocolate' 15 | 16 | # apiVersion: consul.hashicorp.com/v1alpha1 17 | # kind: ServiceResolver 18 | # metadata: 19 | # name: ice-cream 20 | # namespace: banana-split 21 | # spec: 22 | # defaultSubset: vanilla 23 | # subsets: 24 | # vanilla: 25 | # filter: 'Service.Meta.version == vanilla' 26 | # strawberry: 27 | # filter: 'Service.Meta.version == strawberry' 28 | # chocolate: 29 | # filter: 'Service.Meta.version == chocolate' 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /kube/configs/dc3/service-resolver/service-resolver-unicorn_sameness.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceResolver 3 | metadata: 4 | name: unicorn-tp-backend 5 | namespace: unicorn 6 | spec: 7 | failover: # requires at least one of the following: service, serviceSubset, namespace, targets, datacenters 8 | '*': 9 | samenessGroup: "ssg-unicorn" 10 | # targets: 11 | # - partition: "cernunnos" 12 | # - service: 13 | # - serviceSubset: 14 | # - namespace: 15 | # - partition: 16 | # - datacenter: 17 | # - peer: 18 | -------------------------------------------------------------------------------- /kube/configs/dc3/service-splitter/service-splitter-ice_cream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceSplitter 3 | metadata: 4 | name: ice-cream 5 | namespace: banana-split 6 | spec: 7 | splits: 8 | - weight: 34 9 | service: ice-cream-vanilla # The splitter splits directly to a different service. This is a better option in Kube than trying to split to a subset based on tags. See the doctor consul manual. 10 | - weight: 33 11 | service: ice-cream-strawberry 12 | - weight: 33 13 | service: ice-cream-chocolate -------------------------------------------------------------------------------- /kube/configs/dc3/services/banana_split-icecream_chocolate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: ice-cream-chocolate 5 | namespace: banana-split 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: ice-cream-chocolate 13 | namespace: banana-split 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: ice-cream-chocolate 23 | namespace: banana-split 24 | annotations: 25 | consul.hashicorp.com/service-sync: 'false' 26 | spec: 27 | type: NodePort 28 | selector: 29 | app: ice-cream-chocolate 30 | version: chocolate 31 | ports: 32 | - name: ice-cream-port 33 | protocol: TCP 34 | port: 8993 35 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 36 | 37 | --- 38 | 39 | apiVersion: apps/v1 40 | kind: Deployment 41 | metadata: 42 | name: ice-cream-chocolate 43 | namespace: banana-split 44 | labels: 45 | app: ice-cream-chocolate 46 | version: chocolate # It doesn't appear that this Kube label is relevant at all. 47 | spec: 48 | replicas: 1 49 | selector: 50 | matchLabels: 51 | app: ice-cream-chocolate 52 | version: chocolate 53 | template: 54 | metadata: 55 | labels: 56 | app: ice-cream-chocolate 57 | version: chocolate 58 | annotations: 59 | consul.hashicorp.com/connect-inject: 'true' 60 | consul.hashicorp.com/transparent-proxy: 'true' 61 | consul.hashicorp.com/service-tags: 'dc3-cernunnos' 62 | consul.hashicorp.com/service-meta-version: chocolate # This isn't currently used in any way for resolving / routing 63 | spec: 64 | serviceAccountName: ice-cream-chocolate 65 | containers: 66 | - name: ice-cream-chocolate 67 | image: nicholasjackson/fake-service:v0.26.0 68 | ports: 69 | - containerPort: 10000 70 | env: 71 | - name: 'LISTEN_ADDR' 72 | value: '0.0.0.0:10000' 73 | - name: 'NAME' 74 | value: 'Chocolate Ice Cream (DC3 Cernunnos)' 75 | - name: 'MESSAGE' 76 | value: '

The ice-cream Application

' 77 | - name: 'SERVER_TYPE' 78 | value: 'http' 79 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 80 | value: '3s' 81 | 82 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/banana_split-icecream_strawberry.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: ice-cream-strawberry 5 | namespace: banana-split 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: ice-cream-strawberry 13 | namespace: banana-split 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: ice-cream-strawberry 23 | namespace: banana-split 24 | annotations: 25 | consul.hashicorp.com/service-sync: 'false' 26 | spec: 27 | type: NodePort 28 | selector: 29 | app: ice-cream-strawberry 30 | version: strawberry 31 | ports: 32 | - name: ice-cream-port 33 | protocol: TCP 34 | port: 8992 35 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 36 | 37 | --- 38 | 39 | apiVersion: apps/v1 40 | kind: Deployment 41 | metadata: 42 | name: ice-cream-strawberry 43 | namespace: banana-split 44 | labels: 45 | app: ice-cream-strawberry 46 | version: strawberry 47 | spec: 48 | replicas: 1 49 | selector: 50 | matchLabels: 51 | app: ice-cream-strawberry 52 | version: strawberry 53 | template: 54 | metadata: 55 | labels: 56 | app: ice-cream-strawberry 57 | version: strawberry 58 | annotations: 59 | consul.hashicorp.com/connect-inject: 'true' 60 | consul.hashicorp.com/transparent-proxy: 'true' 61 | consul.hashicorp.com/service-tags: 'dc3-cernunnos' 62 | consul.hashicorp.com/service-meta-version: strawberry # This isn't currently used in any way for resolving / routing 63 | spec: 64 | serviceAccountName: ice-cream-strawberry 65 | containers: 66 | - name: ice-cream-strawberry 67 | image: nicholasjackson/fake-service:v0.26.0 68 | ports: 69 | - containerPort: 10000 70 | env: 71 | - name: 'LISTEN_ADDR' 72 | value: '0.0.0.0:10000' 73 | - name: 'NAME' 74 | value: 'Strawberry Ice Cream (DC3 Cernunnos)' 75 | - name: 'MESSAGE' 76 | value: '

The ice-cream Application

' 77 | - name: 'SERVER_TYPE' 78 | value: 'http' 79 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 80 | value: '3s' -------------------------------------------------------------------------------- /kube/configs/dc3/services/banana_split-icecream_vanilla.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: ice-cream-vanilla 5 | namespace: banana-split 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: ice-cream-vanilla 13 | namespace: banana-split 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: ice-cream-vanilla 23 | namespace: banana-split 24 | annotations: 25 | consul.hashicorp.com/service-sync: 'false' 26 | spec: 27 | type: NodePort 28 | selector: 29 | app: ice-cream-vanilla 30 | version: vanilla 31 | ports: 32 | - name: ice-cream-port 33 | protocol: TCP 34 | port: 8991 35 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 36 | 37 | --- 38 | 39 | apiVersion: apps/v1 40 | kind: Deployment 41 | metadata: 42 | name: ice-cream-vanilla 43 | namespace: banana-split 44 | labels: 45 | app: ice-cream-vanilla 46 | version: vanilla 47 | spec: 48 | replicas: 1 49 | selector: 50 | matchLabels: 51 | app: ice-cream-vanilla 52 | version: vanilla 53 | template: 54 | metadata: 55 | labels: 56 | app: ice-cream-vanilla 57 | version: vanilla 58 | annotations: 59 | consul.hashicorp.com/connect-inject: 'true' 60 | consul.hashicorp.com/transparent-proxy: 'true' 61 | consul.hashicorp.com/service-tags: 'dc3-cernunnos' 62 | consul.hashicorp.com/service-meta-version: vanilla # This isn't currently used in any way for resolving / routing 63 | spec: 64 | serviceAccountName: ice-cream-vanilla 65 | containers: 66 | - name: ice-cream-vanilla 67 | image: nicholasjackson/fake-service:v0.26.0 68 | ports: 69 | - containerPort: 10000 70 | env: 71 | - name: 'LISTEN_ADDR' 72 | value: '0.0.0.0:10000' 73 | - name: 'NAME' 74 | value: 'Vanilla Ice Cream (DC3 Cernunnos)' 75 | - name: 'MESSAGE' 76 | value: '

The ice-cream Application

' 77 | - name: 'SERVER_TYPE' 78 | value: 'http' 79 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 80 | value: '3s' -------------------------------------------------------------------------------- /kube/configs/dc3/services/banana_split-neapolitan.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: neapolitan 5 | namespace: banana-split 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: neapolitan 13 | namespace: banana-split 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: neapolitan 23 | namespace: banana-split 24 | annotations: 25 | consul.hashicorp.com/service-sync: 'false' # Don't sync with catalog sync 26 | spec: 27 | type: LoadBalancer 28 | selector: 29 | app: neapolitan 30 | ports: 31 | - name: neapolitan-port 32 | protocol: TCP 33 | port: 8990 # Should be the port the Kube LB will listen on to forward to TCP/10000 34 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 35 | 36 | --- 37 | 38 | apiVersion: apps/v1 39 | kind: Deployment 40 | metadata: 41 | name: neapolitan 42 | namespace: banana-split 43 | labels: 44 | app: neapolitan 45 | version: v1 46 | spec: 47 | replicas: 1 48 | selector: 49 | matchLabels: 50 | app: neapolitan 51 | template: 52 | metadata: 53 | labels: 54 | app: neapolitan 55 | annotations: 56 | consul.hashicorp.com/connect-inject: 'true' 57 | consul.hashicorp.com/transparent-proxy: 'true' 58 | consul.hashicorp.com/transparent-proxy-exclude-inbound-ports: '10000' 59 | consul.hashicorp.com/service-tags: 'dc3-cernunnos' 60 | consul.hashicorp.com/service-meta-version: v1 61 | spec: 62 | serviceAccountName: neapolitan 63 | containers: 64 | - name: neapolitan 65 | image: nicholasjackson/fake-service:v0.26.0 66 | # imagePullPolicy: Always # Probably don't need this 67 | ports: 68 | - containerPort: 10000 69 | env: 70 | - name: 'LISTEN_ADDR' 71 | value: '0.0.0.0:10000' 72 | - name: 'UPSTREAM_URIS' 73 | value: 'http://ice-cream.virtual.banana-split.ns.cernunnos.ap.dc3.dc.consul' 74 | - name: 'NAME' 75 | value: 'Neapolitan (DC3 Cernunnos)' 76 | - name: 'MESSAGE' 77 | value: '

The neapolitan Application

' 78 | - name: 'SERVER_TYPE' 79 | value: 'http' 80 | - name: 'TIMING_50_PERCENTILE' 81 | value: '30ms' 82 | - name: 'TIMING_90_PERCENTILE' 83 | value: '60ms' 84 | - name: 'TIMING_99_PERCENTILE' 85 | value: '90ms' 86 | - name: 'TIMING_VARIANCE' 87 | value: '10' 88 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 89 | value: '3s' 90 | # - name: 'HTTP_CLIENT_APPEND_REQUEST' 91 | # value: 'true' 92 | # - name: 'TRACING_ZIPKIN' 93 | # value: 'http://simplest-collector.default:9411' 94 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/externalz-http.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: externalz-http 5 | namespace: externalz 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: externalz-http 13 | namespace: externalz 14 | spec: 15 | protocol: http # This is tcp because the Fake service UI itself is being handled as TCP. But the external services are access through the mesh as HTTP, as defined in the service-defaults for the external services. 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: externalz-http 23 | namespace: externalz 24 | spec: 25 | type: LoadBalancer # This may not work with k3s, since this was taken from an AKS config 26 | selector: 27 | app: externalz-http 28 | ports: 29 | - name: http 30 | protocol: TCP 31 | port: 8003 # Should be the port the Kube LB will listen on to forward to TCP/10000 32 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 33 | 34 | --- 35 | 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: externalz-http 40 | namespace: externalz 41 | labels: 42 | app: externalz-http 43 | version: v1 44 | spec: 45 | replicas: 1 46 | selector: 47 | matchLabels: 48 | app: externalz-http 49 | template: 50 | metadata: 51 | labels: 52 | app: externalz-http 53 | annotations: 54 | consul.hashicorp.com/connect-inject: 'true' 55 | consul.hashicorp.com/transparent-proxy: 'true' 56 | consul.hashicorp.com/service-tags: 'dc3' 57 | consul.hashicorp.com/transparent-proxy-exclude-inbound-ports: "10000" # Without this exclusion the FakeService UI is shitcanned. 58 | spec: 59 | serviceAccountName: externalz-http 60 | containers: 61 | - name: externalz-http 62 | image: nicholasjackson/fake-service:v0.26.0 63 | ports: 64 | - containerPort: 10000 65 | env: 66 | - name: 'LISTEN_ADDR' 67 | value: '0.0.0.0:10000' 68 | - name: 'UPSTREAM_URIS' 69 | value: 'http://example.com, https://example.com, http://www.wolfmansound.com, https://www.wolfmansound.com' 70 | - name: 'NAME' 71 | value: 'externalz-http (DC3)' 72 | - name: 'MESSAGE' 73 | value: '

The externalz-http Application

' 74 | - name: 'SERVER_TYPE' 75 | value: 'http' 76 | - name: 'TIMING_50_PERCENTILE' 77 | value: '30ms' 78 | - name: 'TIMING_90_PERCENTILE' 79 | value: '60ms' 80 | - name: 'TIMING_99_PERCENTILE' 81 | value: '90ms' 82 | - name: 'TIMING_VARIANCE' 83 | value: '10' 84 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 85 | value: '2s' 86 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/externalz-tcp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: externalz-tcp 5 | namespace: externalz 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: externalz-tcp 13 | namespace: externalz 14 | spec: 15 | protocol: tcp 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: externalz-tcp 23 | namespace: externalz 24 | spec: 25 | type: LoadBalancer # This may not work with k3s, since this was taken from an AKS config 26 | selector: 27 | app: externalz-tcp 28 | ports: 29 | - name: tcp 30 | protocol: TCP 31 | port: 8002 # Should be the port the Kube LB will listen on to forward to TCP/10000 32 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 33 | 34 | --- 35 | 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: externalz-tcp 40 | namespace: externalz 41 | labels: 42 | app: externalz-tcp 43 | version: v1 44 | spec: 45 | replicas: 1 46 | selector: 47 | matchLabels: 48 | app: externalz-tcp 49 | template: 50 | metadata: 51 | labels: 52 | app: externalz-tcp 53 | annotations: 54 | consul.hashicorp.com/connect-inject: 'true' 55 | consul.hashicorp.com/transparent-proxy: 'true' 56 | consul.hashicorp.com/service-tags: 'dc3' 57 | consul.hashicorp.com/transparent-proxy-exclude-inbound-ports: "10000" # Without this exclusion the FakeService UI is shitcanned. 58 | spec: 59 | serviceAccountName: externalz-tcp 60 | containers: 61 | - name: externalz-tcp 62 | image: nicholasjackson/fake-service:v0.26.0 63 | ports: 64 | - containerPort: 10000 65 | env: 66 | - name: 'LISTEN_ADDR' 67 | value: '0.0.0.0:10000' 68 | - name: 'UPSTREAM_URIS' 69 | value: 'http://example.com, https://example.com, http://www.wolfmansound.com, https://www.wolfmansound.com' 70 | - name: 'NAME' 71 | value: 'externalz-tcp (DC3)' 72 | - name: 'MESSAGE' 73 | value: '

The externalz-tcp Application

' 74 | - name: 'SERVER_TYPE' 75 | value: 'http' 76 | - name: 'TIMING_50_PERCENTILE' 77 | value: '30ms' 78 | - name: 'TIMING_90_PERCENTILE' 79 | value: '60ms' 80 | - name: 'TIMING_99_PERCENTILE' 81 | value: '90ms' 82 | - name: 'TIMING_VARIANCE' 83 | value: '10' 84 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 85 | value: '3s' 86 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/paris-leroy_jenkins.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: leroy-jenkins 5 | namespace: paris 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: leroy-jenkins 13 | namespace: paris 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: leroy-jenkins 23 | namespace: paris 24 | spec: 25 | type: LoadBalancer # This may not work with k3s, since this was taken from an AKS config 26 | selector: 27 | app: leroy-jenkins 28 | ports: 29 | - name: http 30 | protocol: TCP 31 | port: 8100 # Should be the port the Kube LB will listen on to forward to TCP/10000 32 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 33 | 34 | --- 35 | 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: leroy-jenkins 40 | namespace: paris 41 | labels: 42 | app: leroy-jenkins 43 | version: v1 44 | spec: 45 | replicas: 1 46 | selector: 47 | matchLabels: 48 | app: leroy-jenkins 49 | template: 50 | metadata: 51 | labels: 52 | app: leroy-jenkins 53 | annotations: 54 | consul.hashicorp.com/connect-inject: 'true' 55 | consul.hashicorp.com/transparent-proxy: 'true' 56 | consul.hashicorp.com/service-tags: 'dc3 cernunnos' 57 | consul.hashicorp.com/transparent-proxy-exclude-inbound-ports: "10000" # Without this exclusion the FakeService UI is shitcanned. 58 | spec: 59 | serviceAccountName: leroy-jenkins 60 | containers: 61 | - name: leroy-jenkins 62 | image: nicholasjackson/fake-service:v0.26.0 63 | # imagePullPolicy: Always # Probably don't need this 64 | ports: 65 | - containerPort: 10000 66 | # readinessProbe: 67 | # httpGet: 68 | # scheme: HTTP 69 | # path: / 70 | # port: 10000 71 | # initialDelaySeconds: 10 72 | # periodSeconds: 5 73 | env: 74 | - name: 'LISTEN_ADDR' 75 | value: '0.0.0.0:10000' 76 | - name: 'UPSTREAM_URIS' 77 | value: 'http://paris.virtual.paris.ns.cernunnos.ap.dc3.dc.consul' 78 | - name: 'NAME' 79 | value: 'leroy-jenkins (DC3 Cernunnos)' 80 | - name: 'MESSAGE' 81 | value: '

The leroy-jenkins Application

' 82 | - name: 'SERVER_TYPE' 83 | value: 'http' 84 | - name: 'TIMING_50_PERCENTILE' 85 | value: '30ms' 86 | - name: 'TIMING_90_PERCENTILE' 87 | value: '60ms' 88 | - name: 'TIMING_99_PERCENTILE' 89 | value: '90ms' 90 | - name: 'TIMING_VARIANCE' 91 | value: '10' 92 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 93 | value: '3s' 94 | 95 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/paris-paris-cernunnos.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: paris 5 | namespace: paris 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: paris 13 | namespace: paris 14 | spec: 15 | protocol: http 16 | mutualTLSMode: "permissive" # Enables permissive mode for the Paris/Paris Upstream 17 | 18 | --- 19 | 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: paris 24 | namespace: paris 25 | spec: 26 | type: NodePort 27 | selector: 28 | app: paris 29 | ports: 30 | - name: http # How does Permissive mode 31 | port: 8102 # Should be the port the Kube LB will listen on to forward to TCP/10000 32 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 33 | 34 | --- 35 | 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: paris 40 | namespace: paris 41 | labels: 42 | app: paris 43 | version: v1 44 | spec: 45 | replicas: 1 46 | selector: 47 | matchLabels: 48 | app: paris 49 | template: 50 | metadata: 51 | labels: 52 | app: paris 53 | annotations: 54 | consul.hashicorp.com/connect-inject: 'true' 55 | consul.hashicorp.com/transparent-proxy: 'true' 56 | consul.hashicorp.com/service-tags: 'dc3-cernunnos' 57 | spec: 58 | serviceAccountName: paris 59 | containers: 60 | - name: paris 61 | image: nicholasjackson/fake-service:v0.26.0 62 | # imagePullPolicy: Always # Probably don't need this 63 | ports: 64 | - containerPort: 10000 65 | env: 66 | - name: 'LISTEN_ADDR' 67 | value: '0.0.0.0:10000' 68 | # - name: 'UPSTREAM_URIS' 69 | # value: 'http://.virtual.paris.ns.dc3.dc.consul,' 70 | # value: 'http://127.0.0.1:11000,http://127.0.0.1:12000,http://paris-tp-backend.virtual.paris.ns.dc3.dc.consul,http://paris-tp-backend.virtual.paris.ns.cernunnos.ap.dc3.dc.consul' 71 | - name: 'NAME' 72 | value: 'paris (DC3 Cernunnos)' 73 | - name: 'MESSAGE' 74 | value: '

The paris Application

' 75 | - name: 'SERVER_TYPE' 76 | value: 'http' 77 | - name: 'TIMING_50_PERCENTILE' 78 | value: '30ms' 79 | - name: 'TIMING_90_PERCENTILE' 80 | value: '60ms' 81 | - name: 'TIMING_99_PERCENTILE' 82 | value: '90ms' 83 | - name: 'TIMING_VARIANCE' 84 | value: '10' 85 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 86 | value: '3s' 87 | # - name: 'HTTP_CLIENT_APPEND_REQUEST' 88 | # value: 'true' 89 | # - name: 'TRACING_ZIPKIN' 90 | # value: 'http://simplest-collector.default:9411' 91 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/paris-pretty_please.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: pretty-please 5 | namespace: paris 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: pretty-please 13 | namespace: paris 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: pretty-please 23 | namespace: paris 24 | spec: 25 | type: LoadBalancer # This may not work with k3s, since this was taken from an AKS config 26 | selector: 27 | app: pretty-please 28 | ports: 29 | - name: http 30 | protocol: TCP 31 | port: 8101 # Should be the port the Kube LB will listen on to forward to TCP/10000 32 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 33 | 34 | --- 35 | 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: pretty-please 40 | namespace: paris 41 | labels: 42 | app: pretty-please 43 | version: v1 44 | spec: 45 | replicas: 1 46 | selector: 47 | matchLabels: 48 | app: pretty-please 49 | template: 50 | metadata: 51 | labels: 52 | app: pretty-please 53 | # annotations: 54 | spec: 55 | serviceAccountName: pretty-please 56 | containers: 57 | - name: pretty-please 58 | image: nicholasjackson/fake-service:v0.26.0 59 | # imagePullPolicy: Always # Probably don't need this 60 | ports: 61 | - containerPort: 10000 62 | # readinessProbe: 63 | # httpGet: 64 | # scheme: HTTP 65 | # path: / 66 | # port: 10000 67 | # initialDelaySeconds: 10 68 | # periodSeconds: 5 69 | env: 70 | - name: 'LISTEN_ADDR' 71 | value: '0.0.0.0:10000' 72 | - name: 'UPSTREAM_URIS' 73 | value: 'http://paris.paris.svc.cluster.local:8102/' 74 | - name: 'NAME' 75 | value: 'pretty-please (DC3 Cernunnos)' 76 | - name: 'MESSAGE' 77 | value: '

The pretty-please Application

' 78 | - name: 'SERVER_TYPE' 79 | value: 'http' 80 | - name: 'TIMING_50_PERCENTILE' 81 | value: '30ms' 82 | - name: 'TIMING_90_PERCENTILE' 83 | value: '60ms' 84 | - name: 'TIMING_99_PERCENTILE' 85 | value: '90ms' 86 | - name: 'TIMING_VARIANCE' 87 | value: '10' 88 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 89 | value: '3s' 90 | 91 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/unicorn-backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-backend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-backend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-backend 23 | namespace: unicorn 24 | spec: 25 | type: NodePort 26 | selector: 27 | app: unicorn-backend 28 | ports: 29 | - port: 10001 30 | targetPort: 10001 31 | 32 | --- 33 | 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: unicorn-backend 38 | namespace: unicorn 39 | labels: 40 | app: unicorn-backend 41 | version: v1 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: unicorn-backend 47 | template: 48 | metadata: 49 | labels: 50 | app: unicorn-backend 51 | annotations: 52 | consul.hashicorp.com/connect-inject: 'true' 53 | consul.hashicorp.com/transparent-proxy: 'false' 54 | consul.hashicorp.com/service-tags: 'dc3' 55 | # consul.hashicorp.com/service-meta-version: 'v1' 56 | spec: 57 | serviceAccountName: unicorn-backend 58 | containers: 59 | - name: unicorn-backend 60 | image: nicholasjackson/fake-service:v0.26.0 61 | # imagePullPolicy: Always # Probably don't need this 62 | ports: 63 | - containerPort: 10001 64 | readinessProbe: 65 | httpGet: 66 | scheme: HTTP 67 | path: / 68 | port: 10001 69 | initialDelaySeconds: 10 70 | periodSeconds: 5 71 | env: 72 | - name: 'LISTEN_ADDR' 73 | value: '0.0.0.0:10001' 74 | # - name: 'UPSTREAM_URIS' 75 | # value: 'grpc://127.0.0.1:11000' 76 | - name: 'NAME' 77 | value: 'unicorn-backend (DC3)' 78 | - name: 'MESSAGE' 79 | value: 'peekaboo' 80 | - name: 'SERVER_TYPE' 81 | value: 'grpc' 82 | - name: 'TIMING_50_PERCENTILE' 83 | value: '30ms' 84 | - name: 'TIMING_90_PERCENTILE' 85 | value: '60ms' 86 | - name: 'TIMING_99_PERCENTILE' 87 | value: '90ms' 88 | - name: 'TIMING_VARIANCE' 89 | value: '10' 90 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/unicorn-cernunnos-backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-backend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-backend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-backend 23 | namespace: unicorn 24 | spec: 25 | type: NodePort 26 | selector: 27 | app: unicorn-backend 28 | ports: 29 | - port: 10002 30 | targetPort: 10002 31 | 32 | --- 33 | 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: unicorn-backend 38 | namespace: unicorn 39 | labels: 40 | app: unicorn-backend 41 | version: v1 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: unicorn-backend 47 | template: 48 | metadata: 49 | labels: 50 | app: unicorn-backend 51 | annotations: 52 | consul.hashicorp.com/connect-inject: 'true' 53 | consul.hashicorp.com/transparent-proxy: 'false' 54 | consul.hashicorp.com/service-tags: 'cernunnos' 55 | # consul.hashicorp.com/service-meta-version: 'v1' 56 | spec: 57 | serviceAccountName: unicorn-backend 58 | containers: 59 | - name: unicorn-backend 60 | image: nicholasjackson/fake-service:v0.26.0 61 | # imagePullPolicy: Always # Probably don't need this 62 | ports: 63 | - containerPort: 10002 64 | readinessProbe: 65 | httpGet: 66 | scheme: HTTP 67 | path: / 68 | port: 10002 69 | initialDelaySeconds: 10 70 | periodSeconds: 5 71 | env: 72 | - name: 'LISTEN_ADDR' 73 | value: '0.0.0.0:10002' 74 | # - name: 'UPSTREAM_URIS' 75 | # value: 'grpc://127.0.0.1:11000' 76 | - name: 'NAME' 77 | value: 'unicorn-backend (DC3 Cernunnos)' 78 | - name: 'MESSAGE' 79 | value: 'peekaboo' 80 | - name: 'SERVER_TYPE' 81 | value: 'grpc' 82 | - name: 'TIMING_50_PERCENTILE' 83 | value: '30ms' 84 | - name: 'TIMING_90_PERCENTILE' 85 | value: '60ms' 86 | - name: 'TIMING_99_PERCENTILE' 87 | value: '90ms' 88 | - name: 'TIMING_VARIANCE' 89 | value: '10' 90 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/unicorn-cernunnos-frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-frontend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-frontend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-frontend 23 | namespace: unicorn 24 | spec: 25 | type: LoadBalancer # This may not work with k3s, since this was taken from an AKS config 26 | selector: 27 | app: unicorn-frontend 28 | ports: 29 | - name: http 30 | protocol: TCP 31 | port: 8000 # Should be the port the Kube LB will listen on to forward to TCP/10000 32 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 33 | 34 | --- 35 | 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: unicorn-frontend 40 | namespace: unicorn 41 | labels: 42 | app: unicorn-frontend 43 | version: v1 44 | # service: fake-service # do I need this? 45 | spec: 46 | replicas: 1 47 | selector: 48 | matchLabels: 49 | app: unicorn-frontend 50 | template: 51 | metadata: 52 | labels: 53 | app: unicorn-frontend 54 | # service: fake-service # do I need this? 55 | annotations: 56 | consul.hashicorp.com/connect-inject: 'true' 57 | consul.hashicorp.com/transparent-proxy: 'false' 58 | consul.hashicorp.com/service-tags: 'dc3' 59 | # consul.hashicorp.com/service-meta-version: 'v1' 60 | # consul.hashicorp.com/connect-service-upstreams: "unicorn-backend.unicorn:11000:dc3" 61 | # consul.hashicorp.com/connect-service-upstreams: "unicorn-backend.unicorn.cernunnos:12000" 62 | # consul.hashicorp.com/connect-service-upstreams: "unicorn-backend.unicorn.cernunnos:12000:dc3" 63 | # consul.hashicorp.com/connect-service-upstreams: "unicorn-backend.unicorn.default:11000:dc3" 64 | consul.hashicorp.com/connect-service-upstreams: "unicorn-backend.unicorn:11000" 65 | spec: 66 | serviceAccountName: unicorn-frontend 67 | containers: 68 | - name: unicorn-frontend 69 | image: nicholasjackson/fake-service:v0.26.0 70 | # imagePullPolicy: Always # Probably don't need this 71 | ports: 72 | - containerPort: 10000 73 | readinessProbe: 74 | httpGet: 75 | scheme: HTTP 76 | path: / 77 | port: 10000 78 | initialDelaySeconds: 10 79 | periodSeconds: 5 80 | env: 81 | - name: 'LISTEN_ADDR' 82 | value: '0.0.0.0:10000' 83 | - name: 'UPSTREAM_URIS' 84 | value: 'http://127.0.0.1:11000' 85 | # value: 'grpc://127.0.0.1:11000,http://payments.payments-ns:9090' 86 | - name: 'NAME' 87 | value: 'unicorn-frontend (DC3)' 88 | - name: 'MESSAGE' 89 | value: 'Hello Operators of Doom!' 90 | - name: 'SERVER_TYPE' 91 | value: 'http' 92 | - name: 'TIMING_50_PERCENTILE' 93 | value: '30ms' 94 | - name: 'TIMING_90_PERCENTILE' 95 | value: '60ms' 96 | - name: 'TIMING_99_PERCENTILE' 97 | value: '90ms' 98 | - name: 'TIMING_VARIANCE' 99 | value: '10' 100 | # - name: 'HTTP_CLIENT_APPEND_REQUEST' 101 | # value: 'true' 102 | # - name: 'TRACING_ZIPKIN' 103 | # value: 'http://simplest-collector.default:9411' 104 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/unicorn-cernunnos-tp_backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-tp-backend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-tp-backend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-tp-backend 23 | namespace: unicorn 24 | spec: 25 | type: NodePort 26 | selector: 27 | app: unicorn-tp-backend 28 | ports: 29 | - port: 10002 30 | targetPort: 10002 31 | 32 | --- 33 | 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: unicorn-tp-backend 38 | namespace: unicorn 39 | labels: 40 | app: unicorn-tp-backend 41 | version: v1 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: unicorn-tp-backend 47 | template: 48 | metadata: 49 | labels: 50 | app: unicorn-tp-backend 51 | annotations: 52 | consul.hashicorp.com/connect-inject: 'true' 53 | consul.hashicorp.com/transparent-proxy: 'false' 54 | consul.hashicorp.com/service-tags: 'DC3-cernunnos,transparent-proxy' 55 | consul.hashicorp.com/service-meta-version: 'v1' 56 | spec: 57 | serviceAccountName: unicorn-tp-backend 58 | containers: 59 | - name: unicorn-tp-backend 60 | image: nicholasjackson/fake-service:v0.26.0 61 | # imagePullPolicy: Always # Probably don't need this 62 | ports: 63 | - containerPort: 10002 64 | readinessProbe: 65 | httpGet: 66 | scheme: HTTP 67 | path: / 68 | port: 10002 69 | initialDelaySeconds: 10 70 | periodSeconds: 5 71 | env: 72 | - name: 'LISTEN_ADDR' 73 | value: '0.0.0.0:10002' 74 | # - name: 'UPSTREAM_URIS' 75 | # value: 'grpc://127.0.0.1:11000' 76 | - name: 'NAME' 77 | value: 'unicorn-backend {Transparent} (DC3 Cernunnos)' 78 | - name: 'MESSAGE' 79 | value: 'peekaboo' 80 | - name: 'SERVER_TYPE' 81 | value: 'grpc' 82 | - name: 'TIMING_50_PERCENTILE' 83 | value: '30ms' 84 | - name: 'TIMING_90_PERCENTILE' 85 | value: '60ms' 86 | - name: 'TIMING_99_PERCENTILE' 87 | value: '90ms' 88 | - name: 'TIMING_VARIANCE' 89 | value: '10' 90 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/unicorn-frontend-fs.html: -------------------------------------------------------------------------------- 1 |

The Unicorn-frontend Application

2 | 3 |

The DC3 Unicorn-frontend application has the following features:

4 | 5 |
    6 |
  • 8 total HTTP upstream services 7 |
      8 |
    • 4 explicitly configured 9 |
        10 |
      • TCP/11000: dc3 / default (ap) / unicorn / unicorn-backend
      • 11 |
      • TCP/12000: dc3 / cernunnos (ap) / unicorn / unicorn-backend
      • 12 |
      • TCP/13000: dc4-default (peer) / unicorn / unicorn-backend
      • 13 |
      • TCP/14000: dc4-taranis (peer) / unicorn / unicorn-backend
      • 14 |
      15 |
    • 16 |
    • 4 transparently accessed 17 |
        18 |
      • http://unicorn-tp-backend.virtual.unicorn.ns.dc3.dc.consul
      • 19 |
      • http://unicorn-tp-backend.virtual.unicorn.ns.cernunnos.ap.dc3.dc.consul
      • 20 |
      • http://unicorn-tp-backend.virtual.unicorn.dc4-default.consul
      • 21 |
      • http://unicorn-tp-backend.virtual.unicorn.dc4-taranis.consul
      • 22 |
      23 |
    • 24 |
    25 |
  • 26 |
  • The dc3 / unicorn / unicorn-tp-backend service belongs to a Sameness Group (service-resolver), which uses the following order: 27 |
      28 |
    • dc3 default
    • 29 |
    • dc3 cernunnos (AP)
    • 30 |
    • dc4 default (Peer)
    • 31 |
    • dc4 taranis (Peer)
    • 32 |
    33 |
  • 34 |
  • 2 external upstreams accessible via Terminating Gateway: 35 |
      36 |
    • "example-https" 37 |
        38 |
      • example.com:443
      • 39 |
      • wolfmansound.com:443
      • 40 |
      41 |
    • 42 |
    • "whatismyip" 43 |
        44 |
      • 104.16.154.36:443
      • 45 |
      46 |
    • 47 |
    48 |
  • 49 |
50 | 51 |

Noteworthy Details

52 | 53 |

This application has a little bit of everything in it. Many different upstreams of both explicit and transparent types, service failover, and external service via a terminating gateway. 

54 | 55 |

Each transparently accessed upstream service is named with a {transparent} tag. This makes it easy to differentiate from the explicitly access services.

56 | 57 |

Demo: Failover on dc3 / unicorn / unicorn-tp-backend

58 | 59 |

Every time you refresh the Fake Service UI, a new request is made to each upstream service. Pay close attention to the service named "unicorn-backend {transparent} (DC3)" service. As we one by one destroy upstream services, you will watch in real-time as Consul switches to healthy unicorn-backend services in other locations. 

60 | 61 |

Doctor Consul provides a k9s plugin to assist with scaling pods. See the zork script.

62 | 63 |
    64 |
  • In Kube cluster k3d-dc3, scale unicorn-tp-backend to 0 pods.
  • 65 |
  • Refresh Fake Service
  • 66 |
67 | 68 |

Notice that there are now 2 instances of unicorn-backend {transparent} DC3 Cernunnos now. This is because the instance in DC3 is no longer healthy and has switched to the Cernunnos partition.

69 | 70 |
    71 |
  • In Kube cluster k3d-dc3-p1, scale unicorn-tp-backend to 0 pods.
  • 72 |
  • Refresh Fake Service
  • 73 |
74 | 75 |

Notice that upstream service has now switched to unicorn-backend {transparent} DC4 and there are two occurrences of it. Also notice that the original cernunnos upstream is now red. This is because it is down and there is no service-resolver or sameness group that instructs Consul to failover to a healthy backup. Only the unicorn-tp-backend in DC3/default has failover enabled. 

76 | 77 |
    78 |
  • In Kube cluster k3d-dc4, scale unicorn-tp-backend to 0 pods.
  • 79 |
  • Refresh Fake Service
  • 80 |
81 | 82 |

Notice that once again the upstream destination has changed, now to DC4 Taranis.

83 | 84 |
    85 |
  • In Kube cluster k3d-dc4-p1, scale unicorn-tp-backend to 0 pods.
  • 86 |
  • Refresh Fake Service
  • 87 |
88 | 89 |

Finally, since all instances of unicorn-tp-backend have been killed, we have run out of healthy upstreams and Fake Service has all red failures for the transparent upstreams. 

90 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/unicorn-frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-frontend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-frontend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-frontend 23 | namespace: unicorn 24 | spec: 25 | type: LoadBalancer # This may not work with k3s, since this was taken from an AKS config 26 | selector: 27 | app: unicorn-frontend 28 | ports: 29 | - name: http 30 | protocol: TCP 31 | port: 8000 # Should be the port the Kube LB will listen on to forward to TCP/10000 32 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 33 | 34 | --- 35 | 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: unicorn-frontend 40 | namespace: unicorn 41 | labels: 42 | app: unicorn-frontend 43 | version: v1 44 | spec: 45 | replicas: 1 46 | selector: 47 | matchLabels: 48 | app: unicorn-frontend 49 | template: 50 | metadata: 51 | labels: 52 | app: unicorn-frontend 53 | annotations: 54 | consul.hashicorp.com/connect-inject: 'true' 55 | consul.hashicorp.com/transparent-proxy: 'true' 56 | consul.hashicorp.com/service-tags: 'dc3' 57 | consul.hashicorp.com/connect-service-upstreams: "unicorn-backend.unicorn:11000:dc3,unicorn-backend.unicorn.cernunnos:12000,unicorn-backend.svc.unicorn.ns.dc4-default.peer:13000,unicorn-backend.svc.unicorn.ns.dc4-taranis.peer:14000" 58 | consul.hashicorp.com/transparent-proxy-exclude-inbound-ports: "10000" # Without this exclusion the FakeService UI is shitcanned. 59 | spec: 60 | serviceAccountName: unicorn-frontend 61 | containers: 62 | - name: unicorn-frontend 63 | image: nicholasjackson/fake-service:v0.26.0 64 | # imagePullPolicy: Always # Probably don't need this 65 | ports: 66 | - containerPort: 10000 67 | # readinessProbe: 68 | # httpGet: 69 | # scheme: HTTP 70 | # path: / 71 | # port: 10000 72 | # initialDelaySeconds: 10 73 | # periodSeconds: 5 74 | env: 75 | - name: 'LISTEN_ADDR' 76 | value: '0.0.0.0:10000' 77 | - name: 'UPSTREAM_URIS' 78 | value: 'http://127.0.0.1:11000,http://127.0.0.1:12000,http://127.0.0.1:13000,http://127.0.0.1:14000,http://unicorn-tp-backend.virtual.unicorn.ns.dc3.dc.consul,http://unicorn-tp-backend.virtual.unicorn.ns.cernunnos.ap.dc3.dc.consul,http://unicorn-tp-backend.virtual.unicorn.dc4-default.consul,http://unicorn-tp-backend.virtual.unicorn.dc4-taranis.consul' 79 | # value: 'http://127.0.0.1:11000,http://127.0.0.1:12000,http://unicorn-tp-backend.virtual.unicorn.ns.dc3.dc.consul,http://unicorn-tp-backend.virtual.unicorn.ns.cernunnos.ap.dc3.dc.consul' 80 | - name: 'NAME' 81 | value: 'unicorn-frontend (DC3)' 82 | - name: 'MESSAGE' 83 | value: '

The Unicorn-frontend Application

' 84 | - name: 'SERVER_TYPE' 85 | value: 'http' 86 | - name: 'TIMING_50_PERCENTILE' 87 | value: '30ms' 88 | - name: 'TIMING_90_PERCENTILE' 89 | value: '60ms' 90 | - name: 'TIMING_99_PERCENTILE' 91 | value: '90ms' 92 | - name: 'TIMING_VARIANCE' 93 | value: '10' 94 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 95 | value: '3s' 96 | # - name: 'HTTP_CLIENT_APPEND_REQUEST' 97 | # value: 'true' 98 | # - name: 'TRACING_ZIPKIN' 99 | # value: 'http://simplest-collector.default:9411' 100 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/unicorn-ssg_frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-ssg-frontend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-ssg-frontend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-ssg-frontend 23 | namespace: unicorn 24 | spec: 25 | type: LoadBalancer # This may not work with k3s, since this was taken from an AKS config 26 | selector: 27 | app: unicorn-ssg-frontend 28 | ports: 29 | - name: http 30 | protocol: TCP 31 | port: 8001 # Should be the port the Kube LB will listen on to forward to TCP/10000 32 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 33 | 34 | --- 35 | 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: unicorn-ssg-frontend 40 | namespace: unicorn 41 | labels: 42 | app: unicorn-ssg-frontend 43 | version: v1 44 | spec: 45 | replicas: 1 46 | selector: 47 | matchLabels: 48 | app: unicorn-ssg-frontend 49 | template: 50 | metadata: 51 | labels: 52 | app: unicorn-ssg-frontend 53 | annotations: 54 | consul.hashicorp.com/connect-inject: 'true' 55 | consul.hashicorp.com/transparent-proxy: 'true' 56 | consul.hashicorp.com/service-tags: 'dc3' 57 | consul.hashicorp.com/transparent-proxy-exclude-inbound-ports: "10000" # Without this exclusion the FakeService UI is shitcanned. 58 | spec: 59 | serviceAccountName: unicorn-ssg-frontend 60 | containers: 61 | - name: unicorn-ssg-frontend 62 | image: nicholasjackson/fake-service:v0.26.0 63 | ports: 64 | - containerPort: 10000 65 | # readinessProbe: 66 | # httpGet: 67 | # scheme: HTTP 68 | # path: / 69 | # port: 10000 70 | # initialDelaySeconds: 10 71 | # periodSeconds: 5 72 | env: 73 | - name: 'LISTEN_ADDR' 74 | value: '0.0.0.0:10000' 75 | - name: 'UPSTREAM_URIS' 76 | value: 'http://unicorn-tp-backend.virtual.unicorn.ns.dc3.dc.consul' 77 | - name: 'NAME' 78 | value: 'unicorn-SSG-frontend (DC3)' 79 | - name: 'MESSAGE' 80 | value: 'This application uses a service sameness group that pulls unicorn-backend from: default/dc3, ap: cernunnos, peer: dc4-default, peer: dc4-taranis' 81 | - name: 'SERVER_TYPE' 82 | value: 'http' 83 | - name: 'TIMING_50_PERCENTILE' 84 | value: '30ms' 85 | - name: 'TIMING_90_PERCENTILE' 86 | value: '60ms' 87 | - name: 'TIMING_99_PERCENTILE' 88 | value: '90ms' 89 | - name: 'TIMING_VARIANCE' 90 | value: '10' 91 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 92 | value: '3s' 93 | # - name: 'HTTP_CLIENT_APPEND_REQUEST' 94 | # value: 'true' 95 | # - name: 'TRACING_ZIPKIN' 96 | # value: 'http://simplest-collector.default:9411' 97 | -------------------------------------------------------------------------------- /kube/configs/dc3/services/unicorn-tp_backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-tp-backend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-tp-backend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-tp-backend 23 | namespace: unicorn 24 | spec: 25 | type: NodePort 26 | selector: 27 | app: unicorn-tp-backend 28 | ports: 29 | - port: 10001 30 | targetPort: 10001 31 | 32 | --- 33 | 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: unicorn-tp-backend 38 | namespace: unicorn 39 | labels: 40 | app: unicorn-tp-backend 41 | version: v1 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: unicorn-tp-backend 47 | template: 48 | metadata: 49 | labels: 50 | app: unicorn-tp-backend 51 | annotations: 52 | consul.hashicorp.com/connect-inject: 'true' 53 | consul.hashicorp.com/transparent-proxy: 'false' 54 | consul.hashicorp.com/service-tags: 'dc3,transparent-proxy' 55 | consul.hashicorp.com/service-meta-version: 'v1' 56 | spec: 57 | serviceAccountName: unicorn-tp-backend 58 | containers: 59 | - name: unicorn-tp-backend 60 | image: nicholasjackson/fake-service:v0.26.0 61 | # imagePullPolicy: Always # Probably don't need this 62 | ports: 63 | - containerPort: 10001 64 | readinessProbe: 65 | httpGet: 66 | scheme: HTTP 67 | path: / 68 | port: 10001 69 | initialDelaySeconds: 10 70 | periodSeconds: 5 71 | env: 72 | - name: 'LISTEN_ADDR' 73 | value: '0.0.0.0:10001' 74 | # - name: 'UPSTREAM_URIS' 75 | # value: 'grpc://127.0.0.1:11000' 76 | - name: 'NAME' 77 | value: 'unicorn-backend {Transparent} (DC3)' 78 | - name: 'MESSAGE' 79 | value: 'peekaboo' 80 | - name: 'SERVER_TYPE' 81 | value: 'grpc' 82 | - name: 'TIMING_50_PERCENTILE' 83 | value: '30ms' 84 | - name: 'TIMING_90_PERCENTILE' 85 | value: '60ms' 86 | - name: 'TIMING_99_PERCENTILE' 87 | value: '90ms' 88 | - name: 'TIMING_VARIANCE' 89 | value: '10' 90 | -------------------------------------------------------------------------------- /kube/configs/dc3/tgw/dc3_default-tgw.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: TerminatingGateway 3 | metadata: 4 | name: terminating-gateway 5 | namespace: default 6 | spec: 7 | services: 8 | - name: '*' # Testing wildcards and it works fine. I need to test wildcards on the non-default NS next. 9 | namespace: default 10 | # - name: example-tcp 11 | # - name: example-http -------------------------------------------------------------------------------- /kube/configs/dc4/acl/dc4_sheol-terminating-gateway.hcl: -------------------------------------------------------------------------------- 1 | namespace "sheol" { 2 | service "sheol-ext" { 3 | policy = "write" 4 | } 5 | } 6 | 7 | namespace "sheol-app1" { 8 | service "sheol-ext1" { 9 | policy = "write" 10 | } 11 | } 12 | 13 | namespace "sheol-app2" { 14 | service "sheol-ext2" { 15 | policy = "write" 16 | } 17 | } -------------------------------------------------------------------------------- /kube/configs/dc4/defaults/mesh-dc4_default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: Mesh 3 | metadata: 4 | name: mesh 5 | namespace: default # Must be default - config applies to all namespaces within a partition. 6 | spec: 7 | peering: 8 | peerThroughMeshGateways: true 9 | transparentProxy: 10 | meshDestinationsOnly: true # Prevents Pods from *mostly* connecting to upstreams that are not in Consul. 11 | 12 | -------------------------------------------------------------------------------- /kube/configs/dc4/defaults/mesh-dc4_taranis.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: Mesh 3 | metadata: 4 | name: mesh 5 | namespace: default # Must be default - config applies to all namespaces within a partition. 6 | spec: 7 | # peering: 8 | # peerThroughMeshGateways: true # This is only permitted in the Default partition (responsible for the peering) 9 | transparentProxy: 10 | meshDestinationsOnly: true 11 | 12 | -------------------------------------------------------------------------------- /kube/configs/dc4/defaults/proxy-defaults.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ProxyDefaults 3 | metadata: 4 | name: global 5 | namespace: consul 6 | spec: 7 | accessLogs: 8 | enabled: true 9 | meshGateway: 10 | mode: local 11 | # mode: remote 12 | config: 13 | protocol: http -------------------------------------------------------------------------------- /kube/configs/dc4/exported-services/exported-services-dc4-default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ExportedServices 3 | metadata: 4 | name: default ## The name of the partition containing the service 5 | namespace: unicorn # I believe ExportedServices are scoped to partition only, yet this still works... fun. 6 | spec: 7 | services: 8 | - name: "unicorn-backend" ## The name of the service you want to export 9 | namespace: "unicorn" 10 | consumers: 11 | - peer: dc3-default ## The name of the peer that receives the service 12 | - name: "unicorn-tp-backend" 13 | namespace: "unicorn" 14 | consumers: 15 | - peer: dc3-default 16 | - samenessGroup: ssg-unicorn -------------------------------------------------------------------------------- /kube/configs/dc4/exported-services/exported-services-dc4-taranis.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ExportedServices 3 | metadata: 4 | name: taranis ## The name of the partition containing the service 5 | # namespace: unicorn # I believe ExportedServices are scoped to partition only. 6 | spec: 7 | services: 8 | - name: "unicorn-backend" ## The name of the service you want to export 9 | namespace: "unicorn" 10 | consumers: 11 | - peer: dc3-default ## The name of the peer that receives the service 12 | - name: "unicorn-tp-backend" 13 | namespace: "unicorn" 14 | consumers: 15 | - peer: dc3-default 16 | - samenessGroup: ssg-unicorn -------------------------------------------------------------------------------- /kube/configs/dc4/external-services/service_defaults-sheol_ext.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceDefaults 3 | metadata: 4 | name: sheol-ext 5 | namespace: sheol 6 | spec: 7 | protocol: tcp 8 | destination: 9 | addresses: 10 | - github.com 11 | - api.github.com 12 | port: 443 -------------------------------------------------------------------------------- /kube/configs/dc4/external-services/service_defaults-sheol_ext1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceDefaults 3 | metadata: 4 | name: sheol-ext1 5 | namespace: sheol-app1 6 | spec: 7 | protocol: tcp 8 | destination: 9 | addresses: 10 | - example.com 11 | port: 443 -------------------------------------------------------------------------------- /kube/configs/dc4/external-services/service_defaults-sheol_ext2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceDefaults 3 | metadata: 4 | name: sheol-ext2 5 | namespace: sheol-app2 6 | spec: 7 | protocol: tcp 8 | destination: 9 | addresses: 10 | - www.google.com 11 | port: 443 -------------------------------------------------------------------------------- /kube/configs/dc4/intentions/dc4-default-unicorn_backend-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-backend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-backend 9 | namespace: unicorn 10 | sources: 11 | - name: unicorn-frontend 12 | namespace: unicorn 13 | peer: dc3-default 14 | action: allow 15 | -------------------------------------------------------------------------------- /kube/configs/dc4/intentions/dc4-default-unicorn_tp_backend-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-tp-backend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-tp-backend 9 | namespace: unicorn 10 | sources: 11 | - name: unicorn-frontend 12 | namespace: unicorn 13 | peer: dc3-default 14 | action: allow 15 | - name: unicorn-ssg-frontend 16 | namespace: unicorn 17 | # peer: dc3-default 18 | samenessGroup: ssg-unicorn 19 | action: allow 20 | -------------------------------------------------------------------------------- /kube/configs/dc4/intentions/dc4-taranis-unicorn_backend-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-backend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-backend 9 | namespace: unicorn 10 | sources: 11 | - name: unicorn-frontend 12 | namespace: unicorn 13 | peer: dc3-default 14 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc4/intentions/dc4-taranis-unicorn_tp_backend-allow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: unicorn-tp-backend 5 | namespace: unicorn 6 | spec: 7 | destination: 8 | name: unicorn-tp-backend 9 | namespace: unicorn 10 | sources: 11 | - name: unicorn-frontend 12 | namespace: unicorn 13 | peer: dc3-default 14 | action: allow 15 | - name: unicorn-ssg-frontend 16 | namespace: unicorn 17 | # peer: dc3-default 18 | samenessGroup: ssg-unicorn 19 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc4/intentions/dc4_default-sheol_ext.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: sheol-ext 5 | namespace: sheol 6 | spec: 7 | destination: 8 | name: sheol-ext 9 | sources: 10 | - name: sheol-app 11 | namespace: sheol 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc4/intentions/dc4_default-sheol_ext1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: sheol-ext1 5 | namespace: sheol-app1 6 | spec: 7 | destination: 8 | name: sheol-ext1 9 | sources: 10 | - name: sheol-app1 11 | namespace: sheol-app1 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc4/intentions/dc4_default-sheol_ext2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: ServiceIntentions 3 | metadata: 4 | name: sheol-ext2 5 | namespace: sheol-app2 6 | spec: 7 | destination: 8 | name: sheol-ext2 9 | sources: 10 | - name: sheol-app2 11 | namespace: sheol-app2 12 | action: allow -------------------------------------------------------------------------------- /kube/configs/dc4/sameness-groups/dc4-default-ssg-unicorn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: SamenessGroup 3 | metadata: 4 | name: ssg-unicorn 5 | # namespace: unicorn # Can't have a namespace assigned. It'll break. 6 | spec: 7 | defaultForFailover: false # Since this is false, a service-resolver needs to be referenced. 8 | members: 9 | - partition: default # You have to include the partition that the SamenessGroup is being configured for, or Consul has a fit. 10 | - partition: taranis 11 | - peer: dc3-default 12 | - peer: dc3-cernunnos -------------------------------------------------------------------------------- /kube/configs/dc4/sameness-groups/dc4-taranis-ssg-unicorn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: SamenessGroup 3 | metadata: 4 | name: ssg-unicorn 5 | # namespace: unicorn # Can't have a namespace assigned. It'll break. 6 | spec: 7 | defaultForFailover: false # Since this is false, a service-resolver needs to be referenced. 8 | members: 9 | - partition: taranis # You have to include the partition that the SamenessGroup is being configured for, or Consul has a fit. 10 | - partition: default 11 | - peer: dc3-default 12 | - peer: dc3-cernunnos 13 | 14 | -------------------------------------------------------------------------------- /kube/configs/dc4/services/sheol_app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: sheol-app 5 | namespace: sheol 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: sheol-app 13 | namespace: sheol 14 | spec: 15 | protocol: tcp 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: sheol-app 23 | namespace: sheol 24 | # annotations: # This would group each of the services together IF they were in the same namespace. Meh. 25 | # service.beta.kubernetes.io/aws-load-balancer-type: "nlb" 26 | # service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" 27 | # service.beta.kubernetes.io/aws-load-balancer-shared: "true" 28 | spec: 29 | type: LoadBalancer # This may not work with k3s, since this was taken from an AKS config 30 | selector: 31 | app: sheol-app 32 | ports: 33 | - name: http 34 | protocol: TCP 35 | port: 8004 # Should be the port the Kube LB will listen on to forward to TCP/10000 36 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 37 | 38 | --- 39 | 40 | apiVersion: apps/v1 41 | kind: Deployment 42 | metadata: 43 | name: sheol-app 44 | namespace: sheol 45 | labels: 46 | app: sheol-app 47 | version: v1 48 | spec: 49 | replicas: 1 50 | selector: 51 | matchLabels: 52 | app: sheol-app 53 | template: 54 | metadata: 55 | labels: 56 | app: sheol-app 57 | annotations: 58 | consul.hashicorp.com/connect-inject: 'true' 59 | consul.hashicorp.com/transparent-proxy: 'true' 60 | consul.hashicorp.com/service-tags: 'dc4' 61 | consul.hashicorp.com/transparent-proxy-exclude-inbound-ports: "10000" # Without this exclusion the FakeService UI is shitcanned. 62 | spec: 63 | serviceAccountName: sheol-app 64 | containers: 65 | - name: sheol-app 66 | image: nicholasjackson/fake-service:v0.26.0 67 | ports: 68 | - containerPort: 10000 69 | env: 70 | - name: 'LISTEN_ADDR' 71 | value: '0.0.0.0:10000' 72 | - name: 'UPSTREAM_URIS' 73 | value: 'https://github.com:443/, https://api.github.com' 74 | - name: 'NAME' 75 | value: 'sheol-app (DC4)' 76 | - name: 'MESSAGE' 77 | value: '

The sheol-app Application

' 78 | - name: 'SERVER_TYPE' 79 | value: 'http' 80 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 81 | value: '2s' 82 | -------------------------------------------------------------------------------- /kube/configs/dc4/services/sheol_app1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: sheol-app1 5 | namespace: sheol-app1 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: sheol-app1 13 | namespace: sheol-app1 14 | spec: 15 | protocol: tcp 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: sheol-app1 23 | namespace: sheol-app1 24 | # annotations: # This would group each of the services together IF they were in the same namespace. Meh. 25 | # service.beta.kubernetes.io/aws-load-balancer-type: "nlb" 26 | # service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" 27 | # service.beta.kubernetes.io/aws-load-balancer-shared: "true" 28 | spec: 29 | type: LoadBalancer 30 | selector: 31 | app: sheol-app1 32 | ports: 33 | - name: http 34 | protocol: TCP 35 | port: 8005 # Should be the port the Kube LB will listen on to forward to TCP/10000 36 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 37 | 38 | --- 39 | 40 | apiVersion: apps/v1 41 | kind: Deployment 42 | metadata: 43 | name: sheol-app1 44 | namespace: sheol-app1 45 | labels: 46 | app: sheol-app1 47 | version: v1 48 | spec: 49 | replicas: 1 50 | selector: 51 | matchLabels: 52 | app: sheol-app1 53 | template: 54 | metadata: 55 | labels: 56 | app: sheol-app1 57 | annotations: 58 | consul.hashicorp.com/connect-inject: 'true' 59 | consul.hashicorp.com/transparent-proxy: 'true' 60 | consul.hashicorp.com/service-tags: 'dc4' 61 | consul.hashicorp.com/transparent-proxy-exclude-inbound-ports: "10000" # Without this exclusion the FakeService UI is shitcanned. 62 | spec: 63 | serviceAccountName: sheol-app1 64 | containers: 65 | - name: sheol-app1 66 | image: nicholasjackson/fake-service:v0.26.0 67 | ports: 68 | - containerPort: 10000 69 | env: 70 | - name: 'LISTEN_ADDR' 71 | value: '0.0.0.0:10000' 72 | - name: 'UPSTREAM_URIS' 73 | value: 'https://example.com' 74 | - name: 'NAME' 75 | value: 'sheol-app1 (DC4)' 76 | - name: 'MESSAGE' 77 | value: '

The sheol-app1 Application

' 78 | - name: 'SERVER_TYPE' 79 | value: 'http' 80 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 81 | value: '2s' 82 | -------------------------------------------------------------------------------- /kube/configs/dc4/services/sheol_app2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: sheol-app2 5 | namespace: sheol-app2 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: sheol-app2 13 | namespace: sheol-app2 14 | spec: 15 | protocol: tcp 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: sheol-app2 23 | namespace: sheol-app2 24 | # annotations: # This would group each of the services together IF they were in the same namespace. Meh. 25 | # service.beta.kubernetes.io/aws-load-balancer-type: "nlb" 26 | # service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: "ip" 27 | # service.beta.kubernetes.io/aws-load-balancer-shared: "true" 28 | spec: 29 | type: LoadBalancer # This may not work with k3s, since this was taken from an AKS config 30 | selector: 31 | app: sheol-app2 32 | ports: 33 | - name: http 34 | protocol: TCP 35 | port: 8006 # Should be the port the Kube LB will listen on to forward to TCP/10000 36 | targetPort: 10000 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 37 | 38 | --- 39 | 40 | apiVersion: apps/v1 41 | kind: Deployment 42 | metadata: 43 | name: sheol-app2 44 | namespace: sheol-app2 45 | labels: 46 | app: sheol-app2 47 | version: v1 48 | spec: 49 | replicas: 1 50 | selector: 51 | matchLabels: 52 | app: sheol-app2 53 | template: 54 | metadata: 55 | labels: 56 | app: sheol-app2 57 | annotations: 58 | consul.hashicorp.com/connect-inject: 'true' 59 | consul.hashicorp.com/transparent-proxy: 'true' 60 | consul.hashicorp.com/service-tags: 'dc4' 61 | consul.hashicorp.com/transparent-proxy-exclude-inbound-ports: "10000" # Without this exclusion the FakeService UI is shitcanned. 62 | spec: 63 | serviceAccountName: sheol-app2 64 | containers: 65 | - name: sheol-app2 66 | image: nicholasjackson/fake-service:v0.26.0 67 | ports: 68 | - containerPort: 10000 69 | env: 70 | - name: 'LISTEN_ADDR' 71 | value: '0.0.0.0:10000' 72 | - name: 'UPSTREAM_URIS' 73 | value: 'https://www.google.com/' 74 | - name: 'NAME' 75 | value: 'sheol-app2 (DC4)' 76 | - name: 'MESSAGE' 77 | value: '

The sheol-app2 Application

' 78 | - name: 'SERVER_TYPE' 79 | value: 'http' 80 | - name: 'HTTP_CLIENT_REQUEST_TIMEOUT' 81 | value: '2s' 82 | -------------------------------------------------------------------------------- /kube/configs/dc4/services/unicorn-backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-backend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-backend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-backend 23 | namespace: unicorn 24 | spec: 25 | type: NodePort 26 | selector: 27 | app: unicorn-backend 28 | ports: 29 | - port: 10001 30 | targetPort: 10001 31 | 32 | --- 33 | 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: unicorn-backend 38 | namespace: unicorn 39 | labels: 40 | app: unicorn-backend 41 | version: v1 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: unicorn-backend 47 | template: 48 | metadata: 49 | labels: 50 | app: unicorn-backend 51 | annotations: 52 | consul.hashicorp.com/connect-inject: 'true' 53 | consul.hashicorp.com/transparent-proxy: 'false' 54 | consul.hashicorp.com/service-tags: 'dc4' 55 | consul.hashicorp.com/service-meta-version: 'v1' 56 | spec: 57 | serviceAccountName: unicorn-backend 58 | containers: 59 | - name: unicorn-backend 60 | image: nicholasjackson/fake-service:v0.26.0 61 | ports: 62 | - containerPort: 10001 63 | readinessProbe: 64 | httpGet: 65 | scheme: HTTP 66 | path: / 67 | port: 10001 68 | initialDelaySeconds: 10 69 | periodSeconds: 5 70 | env: 71 | - name: 'LISTEN_ADDR' 72 | value: '0.0.0.0:10001' 73 | - name: 'NAME' 74 | value: 'unicorn-backend (DC4)' 75 | - name: 'MESSAGE' 76 | value: 'peekaboo' 77 | - name: 'SERVER_TYPE' 78 | value: 'grpc' 79 | - name: 'TIMING_50_PERCENTILE' 80 | value: '30ms' 81 | - name: 'TIMING_90_PERCENTILE' 82 | value: '60ms' 83 | - name: 'TIMING_99_PERCENTILE' 84 | value: '90ms' 85 | - name: 'TIMING_VARIANCE' 86 | value: '10' 87 | -------------------------------------------------------------------------------- /kube/configs/dc4/services/unicorn-taranis-backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-backend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-backend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-backend 23 | namespace: unicorn 24 | spec: 25 | type: NodePort 26 | selector: 27 | app: unicorn-backend 28 | ports: 29 | - port: 10002 30 | targetPort: 10002 31 | 32 | --- 33 | 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: unicorn-backend 38 | namespace: unicorn 39 | labels: 40 | app: unicorn-backend 41 | version: v1 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: unicorn-backend 47 | template: 48 | metadata: 49 | labels: 50 | app: unicorn-backend 51 | annotations: 52 | consul.hashicorp.com/connect-inject: 'true' 53 | consul.hashicorp.com/transparent-proxy: 'false' 54 | consul.hashicorp.com/service-tags: 'dc4-taranis' 55 | consul.hashicorp.com/service-meta-version: 'v1' 56 | spec: 57 | serviceAccountName: unicorn-backend 58 | containers: 59 | - name: unicorn-backend 60 | image: nicholasjackson/fake-service:v0.26.0 61 | ports: 62 | - containerPort: 10002 63 | readinessProbe: 64 | httpGet: 65 | scheme: HTTP 66 | path: / 67 | port: 10002 68 | initialDelaySeconds: 10 69 | periodSeconds: 5 70 | env: 71 | - name: 'LISTEN_ADDR' 72 | value: '0.0.0.0:10002' 73 | # - name: 'UPSTREAM_URIS' 74 | # value: 'grpc://127.0.0.1:11000' 75 | - name: 'NAME' 76 | value: 'unicorn-backend (DC4 Taranis)' 77 | - name: 'MESSAGE' 78 | value: 'peekaboo' 79 | - name: 'SERVER_TYPE' 80 | value: 'grpc' 81 | - name: 'TIMING_50_PERCENTILE' 82 | value: '30ms' 83 | - name: 'TIMING_90_PERCENTILE' 84 | value: '60ms' 85 | - name: 'TIMING_99_PERCENTILE' 86 | value: '90ms' 87 | - name: 'TIMING_VARIANCE' 88 | value: '10' 89 | -------------------------------------------------------------------------------- /kube/configs/dc4/services/unicorn-taranis-tp_backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-tp-backend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-tp-backend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-tp-backend 23 | namespace: unicorn 24 | spec: 25 | type: NodePort 26 | selector: 27 | app: unicorn-tp-backend 28 | ports: 29 | - port: 10002 30 | targetPort: 10002 31 | 32 | --- 33 | 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: unicorn-tp-backend 38 | namespace: unicorn 39 | labels: 40 | app: unicorn-tp-backend 41 | version: v1 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: unicorn-tp-backend 47 | template: 48 | metadata: 49 | labels: 50 | app: unicorn-tp-backend 51 | annotations: 52 | consul.hashicorp.com/connect-inject: 'true' 53 | consul.hashicorp.com/transparent-proxy: 'false' 54 | consul.hashicorp.com/service-tags: 'dc4-taranis,transparent-proxy' 55 | consul.hashicorp.com/service-meta-version: 'v1' 56 | spec: 57 | serviceAccountName: unicorn-tp-backend 58 | containers: 59 | - name: unicorn-tp-backend 60 | image: nicholasjackson/fake-service:v0.26.0 61 | ports: 62 | - containerPort: 10002 63 | readinessProbe: 64 | httpGet: 65 | scheme: HTTP 66 | path: / 67 | port: 10002 68 | initialDelaySeconds: 10 69 | periodSeconds: 5 70 | env: 71 | - name: 'LISTEN_ADDR' 72 | value: '0.0.0.0:10002' 73 | - name: 'NAME' 74 | value: 'unicorn-backend {Transparent} (DC4 Taranis)' 75 | - name: 'MESSAGE' 76 | value: 'peekaboo' 77 | - name: 'SERVER_TYPE' 78 | value: 'grpc' 79 | - name: 'TIMING_50_PERCENTILE' 80 | value: '30ms' 81 | - name: 'TIMING_90_PERCENTILE' 82 | value: '60ms' 83 | - name: 'TIMING_99_PERCENTILE' 84 | value: '90ms' 85 | - name: 'TIMING_VARIANCE' 86 | value: '10' 87 | -------------------------------------------------------------------------------- /kube/configs/dc4/services/unicorn-tp_backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: unicorn-tp-backend 5 | namespace: unicorn 6 | 7 | --- 8 | 9 | apiVersion: consul.hashicorp.com/v1alpha1 10 | kind: ServiceDefaults 11 | metadata: 12 | name: unicorn-tp-backend 13 | namespace: unicorn 14 | spec: 15 | protocol: http 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: unicorn-tp-backend 23 | namespace: unicorn 24 | spec: 25 | type: NodePort 26 | selector: 27 | app: unicorn-tp-backend 28 | ports: 29 | - port: 10001 30 | targetPort: 10001 31 | 32 | --- 33 | 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: unicorn-tp-backend 38 | namespace: unicorn 39 | labels: 40 | app: unicorn-tp-backend 41 | version: v1 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: unicorn-tp-backend 47 | template: 48 | metadata: 49 | labels: 50 | app: unicorn-tp-backend 51 | annotations: 52 | consul.hashicorp.com/connect-inject: 'true' 53 | consul.hashicorp.com/transparent-proxy: 'false' 54 | consul.hashicorp.com/service-tags: 'dc4,transparent-proxy' 55 | consul.hashicorp.com/service-meta-version: 'v1' 56 | spec: 57 | serviceAccountName: unicorn-tp-backend 58 | containers: 59 | - name: unicorn-tp-backend 60 | image: nicholasjackson/fake-service:v0.26.0 61 | # imagePullPolicy: Always # Probably don't need this 62 | ports: 63 | - containerPort: 10001 64 | readinessProbe: 65 | httpGet: 66 | scheme: HTTP 67 | path: / 68 | port: 10001 69 | initialDelaySeconds: 10 70 | periodSeconds: 5 71 | env: 72 | - name: 'LISTEN_ADDR' 73 | value: '0.0.0.0:10001' 74 | - name: 'NAME' 75 | value: 'unicorn-backend {Transparent} (DC4)' 76 | - name: 'MESSAGE' 77 | value: 'peekaboo' 78 | - name: 'SERVER_TYPE' 79 | value: 'grpc' 80 | - name: 'TIMING_50_PERCENTILE' 81 | value: '30ms' 82 | - name: 'TIMING_90_PERCENTILE' 83 | value: '60ms' 84 | - name: 'TIMING_99_PERCENTILE' 85 | value: '90ms' 86 | - name: 'TIMING_VARIANCE' 87 | value: '10' 88 | -------------------------------------------------------------------------------- /kube/configs/dc4/tgw/dc4_sheol-tgw.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: TerminatingGateway 3 | metadata: 4 | name: sheol-tgw 5 | namespace: sheol 6 | spec: 7 | services: 8 | # - name: '*' 9 | # namespace: sheol 10 | # - name: '*' 11 | # namespace: sheol-app1 12 | # - name: '*' 13 | # namespace: sheol-app2 14 | - name: sheol-ext 15 | namespace: sheol 16 | - name: sheol-ext1 17 | namespace: sheol-app1 18 | - name: sheol-ext2 19 | namespace: sheol-app2 -------------------------------------------------------------------------------- /kube/configs/peering/mgw-peering.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: Mesh 3 | metadata: 4 | name: mesh 5 | namespace: default # Must be default - config applies to all namespaces within a partition. 6 | spec: 7 | peering: 8 | peerThroughMeshGateways: true -------------------------------------------------------------------------------- /kube/configs/peering/peering-acceptor_dc3-peeringtest_dc4-peeringtest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: PeeringAcceptor 3 | metadata: 4 | name: dc3-peering-test 5 | namespace: consul 6 | spec: 7 | peer: 8 | secret: 9 | name: "peering-token-dc4-peeringtest-dc3-peeringtest" 10 | key: "data" 11 | backend: "kubernetes" -------------------------------------------------------------------------------- /kube/configs/peering/peering-dialer_dc3-peeringtest_dc4-peeringtest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: PeeringDialer 3 | metadata: 4 | name: dc4-peering-test 5 | namespace: consul 6 | spec: 7 | peer: 8 | secret: 9 | name: "peering-token-dc4-peeringtest-dc3-peeringtest" 10 | key: "data" 11 | backend: "kubernetes" -------------------------------------------------------------------------------- /kube/configs/peering/peering_dc3-default_dc1-default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: PeeringDialer 3 | metadata: 4 | name: dc1-default 5 | spec: 6 | peer: 7 | secret: 8 | name: "peering-token-dc1-default-dc3-default" 9 | key: "data" 10 | backend: "kubernetes" -------------------------------------------------------------------------------- /kube/configs/peering/peering_dc3-default_dc1-unicorn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: PeeringDialer 3 | metadata: 4 | name: dc1-unicorn 5 | spec: 6 | peer: 7 | secret: 8 | name: "peering-token-dc3-default-dc1-unicorn" 9 | key: "data" 10 | backend: "kubernetes" -------------------------------------------------------------------------------- /kube/configs/peering/peering_dc3-default_dc2-unicorn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: consul.hashicorp.com/v1alpha1 2 | kind: PeeringDialer 3 | metadata: 4 | name: dc2-unicorn 5 | spec: 6 | peer: 7 | secret: 8 | name: "peering-token-dc3-default-dc2-unicorn" 9 | key: "data" 10 | backend: "kubernetes" -------------------------------------------------------------------------------- /kube/helm/dc3-p1-helm-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | enabled: false 3 | name: consul-cernunnos # Must be unique (not collide) with any other clusters in the same Consul DC. Because K8s auth. Oof 4 | domain: consul 5 | datacenter: dc3 6 | image: hashicorp/consul-enterprise:1.16.1-ent # Select an image from: https://hub.docker.com/r/hashicorp/consul-enterprise/tags 7 | # imageEnvoy: "envoyproxy/envoy:v1.25.1" # Pulls the latest supported when unspecified. 8 | # imageK8S: hashicorp/consul-k8s-control-plane:1.2.0-rc1 # If an RC version of Consul is used, this needs to also be the matching RC version. And the Helm chart version will need to be the dev version. 9th circle of version matching Hell. 9 | # imageConsulDataplane: "hashicorp/consul-dataplane:1.1.0" # Pulls the latest when unspecified. 10 | 11 | # gossipEncryption: 12 | # secretName: consul-gossip-encryption-key 13 | # secretKey: key 14 | 15 | # ^^^ I shouldn't need a gossip key when it's a dataplane cluster. 16 | 17 | acls: 18 | manageSystemACLs: true 19 | bootstrapToken: 20 | secretName: consul-partitions-acl-token # Partition token exported from DC3 and imported via k3d-config.sh 21 | secretKey: token 22 | 23 | tls: 24 | enabled: true # Required for Peering 25 | httpsOnly: false # Turns on the HTTP UI 26 | enableAutoEncrypt: true 27 | caCert: # Root CA Cert and Key exported from DC3 and imported via k3d-config.sh 28 | secretName: consul-ca-cert 29 | secretKey: tls.crt 30 | caKey: 31 | secretName: consul-ca-key 32 | secretKey: tls.key 33 | 34 | enableConsulNamespaces: true # CRDs won't setup Consul namespaces correctly if this is false (default) 35 | 36 | peering: 37 | enabled: true 38 | 39 | adminPartitions: 40 | enabled: true 41 | name: "cernunnos" 42 | 43 | enterpriseLicense: 44 | secretName: consul-license 45 | secretKey: key 46 | enableLicenseAutoload: true 47 | 48 | externalServers: 49 | enabled: true 50 | # hosts: ["{{ .Values.DC3_LB_IP }}"] 51 | httpsPort: 443 # DC3 HTTPS API port (consul-ui loadbalancer) 52 | grpcPort: 8502 # DC3 exposed gRPC port (consul-expose-servers loadbalancer) 53 | # k8sAuthMethodHost: "{{ .Values.DC3_LB_IP }}" 54 | tlsServerName: "server.dc3.consul" 55 | skipServerWatch: true 56 | 57 | meshGateway: 58 | enabled: True 59 | replicas: 1 60 | wanAddress: 61 | source: "Service" 62 | port: 8443 63 | service: 64 | type: LoadBalancer 65 | port: 8443 66 | 67 | ui: 68 | enabled: false # The UI can only be enabled on Servers. This is a Consul Dataplane + Partition cluster. 69 | 70 | 71 | # prometheus: 72 | # enabled: true 73 | 74 | # How are we going to get UI prometheus stats from services in this kube cluster when the prometheus server is in DC3 cluster? 75 | # Who knows. 76 | 77 | apiGateway: 78 | enabled: false 79 | 80 | connectInject: 81 | enabled: true 82 | default: false # Default 83 | transparentProxy: 84 | defaultEnabled: true # Default 85 | cni: 86 | enabled: true 87 | consulNamespaces: 88 | consulDestinationNamespace: "default" # Ignored when mirroringK8S is true 89 | mirroringK8S: true 90 | mirroringK8SPrefix: "" 91 | metrics: 92 | defaultEnabled: true 93 | defaultEnableMerging: false 94 | 95 | syncCatalog: 96 | enabled: true 97 | k8sPrefix: null 98 | k8sDenyNamespaces: ["kube-system", "kube-public", "unicorn"] 99 | consulNamespaces: 100 | mirroringK8S: true 101 | mirroringK8SPrefix: "" 102 | # addK8SNamespaceSuffix: false # Leave this disabled. It's a TRAP!!! if the service name matches the pod name, it'll get stomped. 103 | 104 | # apiGateway: 105 | # enabled: true 106 | # imageEnvoy: envoyproxy/envoy:v1.23.1 # changed from 'latest' 107 | # image: hashicorppreview/consul-api-gateway:0.5-dev-b2f0fd134ce4a95f9097942cbfb73dc11c260f82 108 | # managedGatewayClass: 109 | # enabled: true 110 | # serviceType: LoadBalancer 111 | # copyAnnotations: 112 | # service: 113 | # annotations: | 114 | # - service.beta.kubernetes.io/aws-load-balancer-name 115 | # - service.beta.kubernetes.io/aws-load-balancer-type 116 | # - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type 117 | # - service.beta.kubernetes.io/aws-load-balancer-scheme -------------------------------------------------------------------------------- /kube/helm/dc4-helm-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | name: consul 3 | domain: consul 4 | datacenter: dc4 5 | image: hashicorp/consul-enterprise:1.16.1-ent # Select an image from: https://hub.docker.com/r/hashicorp/consul-enterprise/tags 6 | # imageEnvoy: "envoyproxy/envoy:v1.25.1" # Pulls the latest supported when unspecified. 7 | # imageK8S: hashicorp/consul-k8s-control-plane:1.2.0-rc1 # If an RC version of Consul is used, this needs to also be the matching RC version. And the Helm chart version will need to be the dev version. 9th circle of version matching Hell. 8 | # imageConsulDataplane: "hashicorp/consul-dataplane:1.1.0" # Pulls the latest when unspecified. 9 | 10 | gossipEncryption: 11 | secretName: consul-gossip-encryption-key 12 | secretKey: key 13 | 14 | acls: 15 | manageSystemACLs: true 16 | bootstrapToken: 17 | secretName: consul-bootstrap-acl-token # This is set to "root" in the doctorconsul deployment. 18 | secretKey: key 19 | 20 | tls: 21 | enabled: true # Required for Peering 22 | httpsOnly: false # Turns on the HTTP UI 23 | # caCert: # In case we want to provide a root CA 24 | # secretName: consul-ca-cert 25 | # secretKey: tls.crt 26 | 27 | enableConsulNamespaces: true # CRDs won't setup Consul namespaces correctly if this is false (default) 28 | 29 | peering: 30 | enabled: true 31 | 32 | adminPartitions: 33 | enabled: true 34 | name: "default" 35 | 36 | federation: 37 | enabled: false 38 | 39 | enterpriseLicense: 40 | secretName: consul-license 41 | secretKey: key 42 | enableLicenseAutoload: true 43 | 44 | server: 45 | replicas: 1 46 | bootstrapExpect: 1 47 | connect: true 48 | exposeService: 49 | enabled: true 50 | type: LoadBalancer 51 | # exposeGossipAndRPCPorts: true # 52 | resources: 53 | requests: 54 | memory: "100Mi" 55 | cpu: "100m" 56 | limits: 57 | memory: "200Mi" # After adding a second kube cluster for a Consul Dataplane cluster, the UI crashed OOM on server pod on UI refresh. This fixed it. 58 | cpu: "100m" 59 | 60 | dns: 61 | enabled: true 62 | enableRedirection: true 63 | 64 | client: 65 | enabled: false 66 | grpc: true 67 | 68 | meshGateway: 69 | enabled: True 70 | replicas: 1 71 | wanAddress: 72 | source: "Service" 73 | port: 8443 74 | service: 75 | type: LoadBalancer 76 | port: 8443 77 | 78 | ui: 79 | enabled: true 80 | # service: 81 | # type: LoadBalancer 82 | # port: 83 | # http: 80 84 | # # https: 443 85 | metrics: 86 | enabled: true 87 | provider: "prometheus" 88 | baseURL: http://prometheus-server 89 | # baseURL: http://192.169.7.200:9090 90 | 91 | terminatingGateways: 92 | enabled: true 93 | defaults: 94 | replicas: 1 95 | gateways: 96 | - name: sheol-tgw 97 | consulNamespace: "sheol" 98 | 99 | prometheus: 100 | enabled: true 101 | 102 | apiGateway: 103 | enabled: false 104 | 105 | connectInject: 106 | enabled: true 107 | default: false # Default 108 | transparentProxy: 109 | defaultEnabled: true # Default 110 | cni: 111 | enabled: true 112 | # enabled: false 113 | consulNamespaces: 114 | consulDestinationNamespace: "default" # Ignored when mirroringK8S is true 115 | mirroringK8S: true 116 | mirroringK8SPrefix: "" 117 | metrics: 118 | defaultEnabled: true 119 | defaultEnableMerging: false 120 | 121 | syncCatalog: 122 | enabled: true 123 | k8sPrefix: null 124 | k8sDenyNamespaces: ["kube-system", "kube-public", "unicorn"] 125 | consulNamespaces: 126 | mirroringK8S: true 127 | mirroringK8SPrefix: "" 128 | # addK8SNamespaceSuffix: false # Leave this disabled. It's a TRAP!!! if the service name matches the pod name, it'll get stomped. 129 | 130 | controller: # Enabled CRDs 131 | enabled: true -------------------------------------------------------------------------------- /kube/helm/dc4-p1-helm-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | enabled: false 3 | name: consul-taranis # Must be unique (not collide) with any other clusters in the same Consul DC. Because K8s auth. Oof 4 | domain: consul 5 | datacenter: dc4 6 | image: hashicorp/consul-enterprise:1.16.1-ent # Select an image from: https://hub.docker.com/r/hashicorp/consul-enterprise/tags 7 | # imageEnvoy: "envoyproxy/envoy:v1.25.1" # Pulls the latest supported when unspecified. 8 | # imageK8S: hashicorp/consul-k8s-control-plane:1.2.0-rc1 # If an RC version of Consul is used, this needs to also be the matching RC version. And the Helm chart version will need to be the dev version. 9th circle of version matching Hell. 9 | # imageConsulDataplane: "hashicorp/consul-dataplane:1.1.0" # Pulls the latest when unspecified. 10 | 11 | # gossipEncryption: 12 | # secretName: consul-gossip-encryption-key 13 | # secretKey: key 14 | 15 | # ^^^ I shouldn't need a gossip key when it's a dataplane cluster 16 | 17 | acls: 18 | manageSystemACLs: true 19 | bootstrapToken: 20 | secretName: consul-partitions-acl-token # Partition token exported from DC4 and imported via k3d-config.sh 21 | secretKey: token 22 | 23 | tls: 24 | enabled: true # Required for Peering 25 | httpsOnly: false # Turns on the HTTP UI 26 | enableAutoEncrypt: true 27 | caCert: # Root CA Cert and Key exported from DC4 and imported via k3d-config.sh 28 | secretName: consul-ca-cert 29 | secretKey: tls.crt 30 | caKey: 31 | secretName: consul-ca-key 32 | secretKey: tls.key 33 | 34 | enableConsulNamespaces: true # CRDs won't setup Consul namespaces correctly if this is false (default) 35 | 36 | peering: 37 | enabled: true 38 | 39 | adminPartitions: 40 | enabled: true 41 | name: "taranis" 42 | 43 | enterpriseLicense: 44 | secretName: consul-license 45 | secretKey: key 46 | enableLicenseAutoload: true 47 | 48 | externalServers: 49 | enabled: true 50 | # hosts: ["{{ .Values.DC4_LB_IP }}"] 51 | httpsPort: 443 # DC4 HTTPS API port (consul-ui loadbalancer) 52 | grpcPort: 8502 # DC4 exposed gRPC port (consul-expose-servers loadbalancer) 53 | # k8sAuthMethodHost: "{{ .Values.DC4_LB_IP }}" 54 | tlsServerName: "server.dc4.consul" 55 | skipServerWatch: true 56 | 57 | meshGateway: 58 | enabled: True 59 | replicas: 1 60 | wanAddress: 61 | source: "Service" 62 | port: 8443 63 | service: 64 | type: LoadBalancer 65 | port: 8443 66 | 67 | ui: 68 | enabled: false # The UI can only be enabled on Servers. This is a Consul Dataplane + Partition cluster. 69 | 70 | 71 | # prometheus: 72 | # enabled: true 73 | 74 | # How are we going to get UI prometheus stats from services in this kube cluster when the prometheus server is in DC4 cluster? 75 | # Who knows. 76 | 77 | apiGateway: 78 | enabled: false 79 | 80 | connectInject: 81 | enabled: true 82 | default: false # Default 83 | transparentProxy: 84 | defaultEnabled: true # Default 85 | cni: 86 | enabled: true 87 | consulNamespaces: 88 | consulDestinationNamespace: "default" # Ignored when mirroringK8S is true 89 | mirroringK8S: true 90 | mirroringK8SPrefix: "" 91 | metrics: 92 | defaultEnabled: true 93 | defaultEnableMerging: false 94 | 95 | syncCatalog: 96 | enabled: true 97 | k8sPrefix: null 98 | k8sDenyNamespaces: ["kube-system", "kube-public", "unicorn"] 99 | consulNamespaces: 100 | mirroringK8S: true 101 | mirroringK8SPrefix: "" 102 | # addK8SNamespaceSuffix: false # Leave this disabled. It's a TRAP!!! if the service name matches the pod name, it'll get stomped. 103 | 104 | # apiGateway: 105 | # enabled: true 106 | # imageEnvoy: envoyproxy/envoy:v1.23.1 # changed from 'latest' 107 | # image: hashicorppreview/consul-api-gateway:0.5-dev-b2f0fd134ce4a95f9097942cbfb73dc11c260f82 108 | # managedGatewayClass: 109 | # enabled: true 110 | # serviceType: LoadBalancer 111 | # copyAnnotations: 112 | # service: 113 | # annotations: | 114 | # - service.beta.kubernetes.io/aws-load-balancer-name 115 | # - service.beta.kubernetes.io/aws-load-balancer-type 116 | # - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type 117 | # - service.beta.kubernetes.io/aws-load-balancer-scheme -------------------------------------------------------------------------------- /kube/prometheus/dc3-prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus-ingress 5 | namespace: consul 6 | spec: 7 | type: LoadBalancer 8 | selector: 9 | app: prometheus 10 | ports: 11 | - protocol: TCP 12 | port: 9090 # Should be the port the Kube LB will listen on to forward to TCP/10000 13 | targetPort: 9090 # This should line up with the FakeService LISTEN_ADDR: 0.0.0.0:10000 -------------------------------------------------------------------------------- /kube/vault/dc3-vault-helm-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | enabled: true 3 | 4 | server: 5 | standalone: 6 | enabled: true 7 | config: | 8 | ui = true 9 | 10 | listener "tcp" { 11 | tls_disable = 1 12 | address = "[::]:8200" 13 | cluster_address = "[::]:8201" 14 | } 15 | storage "file" { 16 | path = "/vault/data" 17 | } 18 | 19 | service: 20 | enabled: true 21 | 22 | injector: 23 | enabled: true 24 | authPath: auth/kubernetes-dc3 25 | 26 | dataStorage: 27 | enabled: true 28 | size: 10Gi 29 | storageClass: null 30 | accessMode: ReadWriteOnce 31 | 32 | ui: 33 | enabled: true 34 | serviceType: LoadBalancer -------------------------------------------------------------------------------- /kube/vault/dc4-vault-helm-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | enabled: true 3 | 4 | server: 5 | standalone: 6 | enabled: true 7 | config: | 8 | ui = true 9 | 10 | listener "tcp" { 11 | tls_disable = 1 12 | address = "[::]:8200" 13 | cluster_address = "[::]:8201" 14 | } 15 | storage "file" { 16 | path = "/vault/data" 17 | } 18 | 19 | service: 20 | enabled: true 21 | 22 | injector: 23 | enabled: true 24 | authPath: auth/kubernetes-dc4 25 | 26 | dataStorage: 27 | enabled: true 28 | size: 10Gi 29 | storageClass: null 30 | accessMode: ReadWriteOnce 31 | 32 | ui: 33 | enabled: true 34 | serviceType: LoadBalancer -------------------------------------------------------------------------------- /scripts/apigw-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # ==================================================================================== 6 | # Install Consul API Gateway (DC3) 7 | # ==================================================================================== 8 | 9 | # Well this is awkward. You don't actually install the Consul API GW anymore. It's all handled through Kube YAML and the Gateway API. 10 | 11 | # ==================================================================================== 12 | # Configuring the Gateway API Resource 13 | # ==================================================================================== 14 | 15 | echo -e "${GRN}" 16 | echo -e "------------------------------------------" 17 | echo -e " Configuring the Gateway API Resources" 18 | echo -e "------------------------------------------${NC}" 19 | 20 | echo -e "" 21 | echo -e "${GRN}DC3 Define the Gateway config ${NC}" 22 | kubectl apply --context $KDC3 --namespace consul -f ./kube/configs/dc3/api-gw/gateway-consul_apigw.yaml 23 | 24 | # ------------------------------------------ 25 | # Output the Consul APIGW LB address and ports 26 | # ------------------------------------------ 27 | 28 | # echo "" 29 | # echo -e "${GRN}DC3 Consul API Gateway LoadBalancer addresses:${NC}" 30 | 31 | # if $ARG_EKSONLY; 32 | # then 33 | # DC3_CONSUL_APIG_ADDR=$(kubectl get svc consul-api-gateway -nconsul --context $KDC3 -o json | jq -r '.status.loadBalancer.ingress[0].hostname') 34 | # # This should be correct for EKS. Need to confirm. 35 | # else 36 | # DC3_CONSUL_APIG_ADDR=$(kubectl get svc consul-api-gateway -nconsul --context $KDC3 -o json | jq -r '.status.loadBalancer.ingress[0].ip') 37 | # fi 38 | 39 | # # Create watches for these ^^^. Even in k3d it's race conditioning. 40 | 41 | # echo -e " ${YELL}Consul APIG HTTP Listener:${NC} $DC3_CONSUL_APIG_ADDR:1666" 42 | # echo -e " ${YELL}Consul APIG TCP Listener:${NC} $DC3_CONSUL_APIG_ADDR:1667" 43 | 44 | # ==================================================================================== 45 | # Configuring the Routes 46 | # ==================================================================================== 47 | 48 | echo -e "${GRN}" 49 | echo -e "------------------------------------------" 50 | echo -e " Configuring the APIGW Routes" 51 | echo -e "------------------------------------------${NC}" 52 | 53 | echo -e "" 54 | echo -e "${GRN}DC3: Add HTTPRoutes for unicorn-ingress ${NC}" 55 | kubectl apply --context $KDC3 -f ./kube/configs/dc3/api-gw/httproute-unicorn_ingress.yaml 56 | 57 | echo -e "" 58 | echo -e "${GRN}DC3: Add TCPRoute for externalz-tcp-ingress ${NC}" 59 | kubectl apply --context $KDC3 -f ./kube/configs/dc3/api-gw/tcproute-externalz_tcp_ingress.yaml 60 | 61 | 62 | # ==================================================================================== 63 | # Configure Intentions 64 | # ==================================================================================== 65 | 66 | echo -e "${GRN}" 67 | echo -e "------------------------------------------" 68 | echo -e " API GW Intentions " 69 | echo -e "------------------------------------------${NC}" 70 | 71 | echo -e "" 72 | echo -e "${GRN}DC3: Add intention for Consul APIGW -> Unicorn-frontend ${NC}" 73 | kubectl apply --context $KDC3 --namespace unicorn -f ./kube/configs/dc3/api-gw/intention-dc3_default-unicorn_frontend.yaml 74 | 75 | echo -e "" 76 | echo -e "${GRN}DC3: Add intention for Consul APIGW -> Unicorn-ssg-frontend ${NC}" 77 | kubectl apply --context $KDC3 --namespace unicorn -f ./kube/configs/dc3/api-gw/intention-dc3_default-unicorn_ssg_frontend.yaml 78 | 79 | echo -e "" 80 | echo -e "${GRN}DC3: Add intention for Consul APIGW -> externalz-http ${NC}" 81 | kubectl apply --context $KDC3 --namespace externalz -f ./kube/configs/dc3/api-gw/intention-dc3_default-externalz_http.yaml 82 | 83 | echo -e "" 84 | echo -e "${GRN}DC3: Add intention for Consul APIGW -> externalz-http ${NC}" 85 | kubectl apply --context $KDC3 --namespace externalz -f ./kube/configs/dc3/api-gw/intention-dc3_default-externalz_tcp.yaml 86 | 87 | 88 | echo "" -------------------------------------------------------------------------------- /scripts/app-banana_split.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The Banana split application is for showing route splitting across multiple upstreams (local and peered) 4 | 5 | # Banana Architecture 6 | # Neapolitan/banana-split/cernunnos/dc3 (downstream) 7 | # ice-cream (virtual service w/ splitter) 8 | # ice-cream-vanilla/banana-split/cernunnos/dc3 (34%) 9 | # ice-cream-strawberry/banana-split/cernunnos/dc3 (33%) 10 | # ice-cream-chocolate/banana-split/cernunnos/dc3 (33%) 11 | 12 | echo -e "${GRN}" 13 | echo -e "==========================================" 14 | echo -e " Banana Split Application" 15 | echo -e "==========================================${NC}" 16 | 17 | echo -e "" 18 | echo -e "${GRN}DC3 (cernunnos): Create Banana namespace${NC}" 19 | 20 | kubectl create namespace banana-split --context $KDC3_P1 21 | 22 | set -e 23 | 24 | echo "" 25 | 26 | # ------------------------------------------ 27 | # Exported-services 28 | # ------------------------------------------ 29 | 30 | # We'll need exports for upstreams that live in the other clusters. 31 | # These need to be added the previously configured exported services files (because 1 per partition) 32 | 33 | # ========================================== 34 | # Services 35 | # ========================================== 36 | 37 | echo -e "${GRN}" 38 | echo -e "------------------------------------------" 39 | echo -e " Launch Consul Service Configs" 40 | echo -e "------------------------------------------${NC}" 41 | 42 | # ---------------- 43 | # neapolitan (downstream) 44 | # ---------------- 45 | 46 | echo -e "" 47 | echo -e "${GRN}DC3 (Cernunnos): Apply Neapolitan (downstream) serviceAccount, serviceDefaults, service, deployment ${NC}" 48 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/services/banana_split-neapolitan.yaml 49 | 50 | # ---------------- 51 | # Upstreams 52 | # ---------------- 53 | 54 | echo -e "" 55 | echo -e "${GRN}DC3 (Cernunnos): Apply Ice Cream (Vanilla) (upstream) serviceAccount, serviceDefaults, service, deployment ${NC}" 56 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/services/banana_split-icecream_vanilla.yaml 57 | 58 | echo -e "" 59 | echo -e "${GRN}DC3 (Cernunnos): Apply Ice Cream (Strawberry) (upstream) serviceAccount, serviceDefaults, service, deployment ${NC}" 60 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/services/banana_split-icecream_strawberry.yaml 61 | 62 | echo -e "" 63 | echo -e "${GRN}DC3 (Cernunnos): Apply Ice Cream (Chocolate) (upstream) serviceAccount, serviceDefaults, service, deployment ${NC}" 64 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/services/banana_split-icecream_chocolate.yaml 65 | 66 | 67 | # ------------------------------------------ 68 | # Intentions 69 | # ------------------------------------------ 70 | 71 | # echo -e "${GRN}" 72 | # echo -e "------------------------------------------" 73 | # echo -e " Intentions" 74 | # echo -e "------------------------------------------${NC}" 75 | 76 | echo -e "" 77 | echo -e "${GRN}DC3 (Cernunnos): Intention for DC3/cernunnos/banana-split/ice-cream ${NC}" 78 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/intentions/dc3-cernunnos-banana_split-ice_cream.yaml 79 | 80 | echo -e "" 81 | 82 | # ------------------------------------------ 83 | # Service Resolver + Splitter 84 | # ------------------------------------------ 85 | 86 | echo -e "${GRN}DC3 (default): Apply service-splitter: ice-cream ${NC}" 87 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/service-splitter/service-splitter-ice_cream.yaml 88 | 89 | # echo -e "${GRN}DC3 (default): Apply service-resolver: ice-cream ${NC}" 90 | # kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/service-resolver/service-resolver-ice_cream.yaml 91 | 92 | # Service-resolver is not used in this case. See the manual for more details. 93 | 94 | 95 | # Delete command: 96 | 97 | # kubectl delete --context $KDC3_P1 -f ./kube/configs/dc3/services/banana_split-neapolitan.yaml 98 | # kubectl delete --context $KDC3_P1 -f ./kube/configs/dc3/services/banana_split-icecream_vanilla.yaml 99 | # kubectl delete --context $KDC3_P1 -f ./kube/configs/dc3/services/banana_split-icecream_strawberry.yaml 100 | # kubectl delete --context $KDC3_P1 -f ./kube/configs/dc3/services/banana_split-icecream_chocolate.yaml 101 | # kubectl delete --context $KDC3_P1 -f ./kube/configs/dc3/intentions/dc3-cernunnos-banana_split-ice_cream.yaml 102 | # kubectl delete --context $KDC3_P1 -f ./kube/configs/dc3/service-splitter/service-splitter-ice_cream.yaml 103 | -------------------------------------------------------------------------------- /scripts/app-paris.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo -e "${GRN}" 4 | echo -e "==========================================" 5 | echo -e " Install Paris Application" 6 | echo -e "==========================================${NC}" 7 | 8 | echo -e "" 9 | echo -e "${GRN}DC3 (cernunnos): Create unicorn namespace${NC}" 10 | 11 | kubectl create namespace paris --context $KDC3_P1 12 | 13 | set -e 14 | 15 | echo "" 16 | echo "Permissive mode requires a Mesh config to exist that permits Permissive mode at a global level." 17 | echo "Config previously configured prior to this script: kube/configs/dc3/defaults/mesh-dc3_cernunnos.yaml" 18 | echo "" 19 | 20 | # ------------------------------------------ 21 | # Exported-services 22 | # ------------------------------------------ 23 | 24 | # echo -e "${GRN}" 25 | # echo -e "------------------------------------------" 26 | # echo -e " Exported Services" 27 | # echo -e "------------------------------------------${NC}" 28 | 29 | # Shouldn't need any exports on this service. 30 | 31 | # ========================================== 32 | # Services 33 | # ========================================== 34 | 35 | echo -e "${GRN}" 36 | echo -e "------------------------------------------" 37 | echo -e " Launch Consul Service Configs" 38 | echo -e "------------------------------------------${NC}" 39 | 40 | # ---------------- 41 | # Paris (permissive upstream) 42 | # ---------------- 43 | 44 | echo -e "" 45 | echo -e "${GRN}DC3 (Cernunnos): Apply Paris (upstream) serviceAccount, serviceDefaults, service, deployment ${NC}" 46 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/services/paris-paris-cernunnos.yaml 47 | 48 | # ---------------- 49 | # Downstreams 50 | # ---------------- 51 | 52 | # Need two frontends: 53 | # • One that goes direct - no Mesh (Permissive mode) 54 | # • One that goes via the mesh 55 | 56 | echo -e "" 57 | echo -e "${GRN}DC3 (Cernunnos): Apply Pretty-Please (downstream) serviceAccount, serviceDefaults, service, deployment ${NC}" 58 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/services/paris-pretty_please.yaml 59 | 60 | echo -e "" 61 | echo -e "${GRN}DC3 (Cernunnos): Apply Leroy-Jenkins (downstream) serviceAccount, serviceDefaults, service, deployment ${NC}" 62 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/services/paris-leroy_jenkins.yaml 63 | 64 | 65 | # ------------------------------------------ 66 | # Intentions 67 | # ------------------------------------------ 68 | 69 | # echo -e "${GRN}" 70 | # echo -e "------------------------------------------" 71 | # echo -e " Intentions" 72 | # echo -e "------------------------------------------${NC}" 73 | 74 | echo -e "" 75 | echo -e "${GRN}DC3 (Cernunnos): Intention for DC3/cernunnos/paris/paris ${NC}" 76 | kubectl apply --context $KDC3_P1 -f ./kube/configs/dc3/intentions/dc3-cernunnos-paris-paris.yaml 77 | 78 | echo -e "" 79 | -------------------------------------------------------------------------------- /scripts/gke-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | source ./scripts/functions.sh 6 | 7 | # GKE stuff goes here. 8 | 9 | # gcloud auth login 10 | # Make sure it uses the business account not personal, since chrome can be logged in as either. ugh. 11 | 12 | # sudo apt-get install google-cloud-sdk-gke-gcloud-auth-plugin 13 | # Need this to authenticate to GKE 14 | 15 | gcloud config set project $GCP_PROJECT_ID 16 | 17 | gcloud container clusters list 18 | 19 | # ------------------------------------------ 20 | # Create 4 beautiful GKE clusters 21 | # ------------------------------------------ 22 | 23 | echo -e "${GRN}" 24 | echo -e "==========================================" 25 | echo -e " Create 4 beautiful GKE clusters" 26 | echo -e "==========================================${NC}" 27 | 28 | # These get created as "GKE autopilot" clusters, which evidently are not supported on Consul yet. Supposed to be in Aug 2023. 29 | # Gonna pause this project until they're supported. 30 | 31 | create_gke_cluster "$KDC3" 32 | create_gke_cluster "$KDC3_P1" 33 | create_gke_cluster "$KDC4" 34 | create_gke_cluster "$KDC4_P1" 35 | 36 | wait # wait for all background tasks to complete 37 | 38 | # If for some reason the GKE contexts aren't created or are lost, these will recreate the contexts in the kubeconfig 39 | # 40 | # gcloud container clusters get-credentials $KDC3 --region $GCP_REGION --project $GCP_PROJECT_ID 41 | # gcloud container clusters get-credentials $KDC3_P1 --region $GCP_REGION --project $GCP_PROJECT_ID 42 | # gcloud container clusters get-credentials $KDC4 --region $GCP_REGION --project $GCP_PROJECT_ID 43 | # gcloud container clusters get-credentials $KDC4_P1 --region $GCP_REGION --project $GCP_PROJECT_ID 44 | 45 | # ------------------------------------------ 46 | # Rename Kube Contexts to match Doctor Consul DC3 / DC4 47 | # ------------------------------------------ 48 | 49 | echo -e "${GRN}" 50 | echo -e "==========================================" 51 | echo -e " Rename GKE Kube Contexts for DC3 / DC4" 52 | echo -e "==========================================${NC}" 53 | 54 | # Call the function with different cluster names 55 | update_gke_context "$KDC3" 56 | update_gke_context "$KDC3_P1" 57 | update_gke_context "$KDC4" 58 | update_gke_context "$KDC4_P1" 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /scripts/terminating-gateway.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # ------------------------------------------ 6 | # Terminating Gateway Info 7 | # ------------------------------------------ 8 | 9 | # Terminating gateway are rather tricky with ACLs enabled. To add external services to a TGW a few things need to happen: 10 | # 1. The TGW ACL role needs to be updated to include service:write for EVERY service it will be fronting. 11 | # 2. If the TGW exists in a non-default namespace, the token will have to be scoped into the default namespace (yeah... fun times) 12 | # (NMD): How exactly do we do this easily? Or at all... 13 | # Example: The DC4 sheol TGW below. 14 | # 3. Service-defaults or standard service registration needs to be used to register the external services 15 | # 4. The TGW config needs to reference each service it is fronting 16 | # 5. A service-intention is needed to allow the downstream service to the upstream external service. 17 | 18 | # ------------------------------------------ 19 | # DC3 Terminating Gateway 20 | # ------------------------------------------ 21 | 22 | echo -e "${GRN}" 23 | echo -e "------------------------------------------" 24 | echo -e " DC3 Terminating Gateway" 25 | echo -e "------------------------------------------${NC}" 26 | 27 | # Add the terminating-gateway ACL policy to the TGW Role, so it can actually service:write the services it fronts. DUMB. 28 | consul acl policy create -name "Terminating-Gateway-Service-Write" -rules @./kube/configs/dc3/acl/dc3_default-terminating-gateway.hcl -http-addr="$DC3" 29 | export DC3_TGW_ROLEID=$(consul acl role list -http-addr="$DC3" -format=json | jq -r '.[] | select(.Name == "consul-terminating-gateway-acl-role") | .ID') 30 | consul acl role update -id $DC3_TGW_ROLEID -policy-name "Terminating-Gateway-Service-Write" -http-addr="$DC3" 31 | 32 | echo -e "${GRN}DC3 (default): Terminating-Gateway config ${NC}" 33 | kubectl apply --context $KDC3 -f ./kube/configs/dc3/tgw/dc3_default-tgw.yaml 34 | # kubectl delete --context $KDC3 -f ./kube/configs/dc3/tgw/dc3_default-tgw.yaml 35 | 36 | 37 | # ------------------------------------------ 38 | # DC4 Terminating Gateway 39 | # ------------------------------------------ 40 | 41 | echo -e "${GRN}" 42 | echo -e "------------------------------------------" 43 | echo -e " DC4 Terminating Gateway" 44 | echo -e "------------------------------------------${NC}" 45 | 46 | # Add the terminating-gateway ACL policy to the TGW Role, so it can actually service:write the services it fronts. DUMB. 47 | consul acl policy create -name "Terminating-Gateway-Service-Write" -rules @./kube/configs/dc4/acl/dc4_sheol-terminating-gateway.hcl -http-addr="$DC4" 48 | export DC4_TGW_ROLEID=$(consul acl role list -http-addr="$DC4" -format=json | jq -r '.[] | select(.Name == "consul-sheol-tgw-acl-role") | .ID') 49 | consul acl role update -id $DC4_TGW_ROLEID -policy-name "Terminating-Gateway-Service-Write" -http-addr="$DC4" 50 | 51 | echo -e "${GRN}DC4 (default): Terminating-Gateway config ${NC}" 52 | kubectl apply --context $KDC4 -f ./kube/configs/dc4/tgw/dc4_sheol-tgw.yaml 53 | # kubectl delete --context $KDC3 -f ./kube/configs/dc4/tgw/dc4_sheol-tgw.yaml 54 | 55 | 56 | # curl 'http://af321be31a9474e3a919914b9567f910-1542581749.us-east-1.elb.amazonaws.com:8500/v1/catalog/gateway-services/sheol-tgw?token=root&ns=sheol' 57 | # API to see what's attached to a TGW -------------------------------------------------------------------------------- /scripts/vars.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # ------------------------------------------------------------------------------------ 6 | # Set Environment Variables 7 | # ------------------------------------------------------------------------------------ 8 | 9 | 10 | echo -e "${GRN} 11 | ------------------------------------------ 12 | Environment Variables 13 | ------------------------------------------${NC} 14 | 15 | ${RED}Copy and paste these into your shell:${NC} 16 | " 17 | 18 | echo -e "$(cat << 'EOF' 19 | export CONSUL_HTTP_TOKEN=root 20 | export CONSUL_HTTP_SSL_VERIFY=false 21 | 22 | export RED='\\033[1;31m' 23 | export BLUE='\\033[1;34m' 24 | export DGRN='\\033[0;32m' 25 | export GRN='\\033[1;32m' 26 | export YELL='\\033[0;33m' 27 | export NC='\\033[0m' 28 | 29 | export DC1="http://127.0.0.1:8500" 30 | export DC2="http://127.0.0.1:8501" 31 | export DC3="https://127.0.0.1:8502" 32 | export DC4="https://127.0.0.1:8503" 33 | 34 | export KDC3="k3d-dc3" 35 | export KDC3_P1="k3d-dc3-p1" 36 | export KDC4="k3d-dc4" 37 | export KDC4_P1="k3d-dc4-p1" 38 | 39 | export FAKESERVICE_VER="v0.25.0" 40 | 41 | export HELM_CHART_VER="" 42 | 43 | EOF 44 | )" 45 | -------------------------------------------------------------------------------- /xtra/k9s/vsc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # vsc is a script to randomize a file name and launch Visual Studio Code on WSL2. 4 | # "code -" might be usable without a script on Mac, but it's broken on Windows and needs this script. 5 | 6 | # Generate a random string with a length of 6 characters 7 | random_str=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 6) # Filenames are randomized, so multiples don't stomp each other in VSC 8 | 9 | if [ "$#" -eq "1" ]; then # If there is an argument added, use it as the file extension (for syntax coloring in VSC) 10 | tmpfile=$HOME/code-${random_str}.$1 11 | else 12 | tmpfile=$HOME/code-${random_str}.txt 13 | fi 14 | 15 | [ $# -ge 1 -a -f "$1" ] && input="$1" || input="-" 16 | 17 | cat $input >> $tmpfile # Take stdin and dump it to a file 18 | 19 | code -r $tmpfile # Open the file in VSC 20 | 21 | rm $tmpfile # Remove the file so there aren't 100s of files everywhere. -------------------------------------------------------------------------------- /xtra/policy-smash.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | i=0 4 | while true; do 5 | if [[ "$i" -gt 1000 ]]; then 6 | exit 1 7 | fi 8 | consul acl policy create -name policy-$i -http-addr="http://127.0.0.1:8501" 9 | ((i++)) 10 | done 11 | 12 | --------------------------------------------------------------------------------