├── downstream-do ├── out │ └── .keep ├── provider.tf ├── variables.tf ├── node-templates.tf └── demo-cluster.tf ├── downstream-aws ├── out │ └── .keep ├── data.tf ├── provider.tf ├── variables.tf └── node-templates.tf ├── downstream-vsphere ├── out │ └── .keep ├── provider.tf ├── variables.tf ├── node-templates.tf └── demo-cluster.tf ├── rkevms ├── .gitignore ├── output.tf ├── destroy.sh ├── install_rke.sh ├── provider.tf ├── cluster.yml.tmpl ├── data.tf ├── variables.tf ├── .terraform.lock.hcl └── awsvms.tf ├── downstream-harvester-do ├── out │ └── .keep ├── provider.tf ├── node-templates.tf ├── variables.tf ├── README.md ├── sshkey.tf ├── demo-cluster.tf └── harvester.tf ├── downstream-fleet ├── .gitignore ├── output.tf ├── install_k3s.sh ├── provider.tf ├── variables.tf ├── k3s.tf └── data.tf ├── downstream-k3s ├── .gitignore ├── data.tf ├── k3s.tf ├── provider.tf ├── demo-cluster.tf ├── install_k3s.sh ├── variables.tf └── awsvms.tf ├── prod-app-deployment ├── charts │ └── .gitkeep ├── .gitignore ├── 05-create_chart.sh ├── 00-create_infra.sh ├── 99-destroy_infra.sh ├── external-dns │ └── values.yaml ├── mysql-operator │ ├── values.yaml │ └── navlink.yaml ├── fleet │ └── ghost │ │ └── fleet.yaml ├── k3s │ └── traefik-config.yaml ├── infra │ ├── output.tf │ ├── data.tf │ ├── provider.tf │ ├── variables.tf │ └── lb.tf ├── cert-manager │ └── cluster-issuer.yaml ├── 06-install_mysqloperator.sh ├── charts-prepared │ └── ghost │ │ ├── templates │ │ ├── pvc.yaml │ │ ├── service.yaml │ │ ├── tests │ │ │ └── test-connection.yaml │ │ ├── hpa.yaml │ │ ├── mysqlcluster.yaml │ │ ├── _helpers.tpl │ │ └── NOTES.txt │ │ ├── .helmignore │ │ └── Chart.yaml ├── 02-install_certmanager.sh ├── 04-install_rancher.sh ├── fleet-prepared │ └── ghost │ │ └── fleet.yaml └── 01-install_k3s_sles.sh ├── proxysetup-aws-k3s ├── .gitignore ├── locals.tf ├── providers.tf ├── keys.tf ├── vpn.tf ├── userdata │ └── cluster_vms.sh ├── data.tf ├── output.tf ├── variables.tf ├── network.tf ├── README.md └── .terraform.lock.hcl ├── proxysetup-aws-rke ├── .gitignore ├── locals.tf ├── providers.tf ├── keys.tf ├── vpn.tf ├── output.tf ├── data.tf ├── userdata │ ├── rke-cluster.yaml │ └── cluster_vms.sh ├── variables.tf ├── network.tf ├── rancher.tf └── .terraform.lock.hcl ├── k3svms ├── .gitignore ├── output.tf ├── provider.tf ├── versions.tf ├── cluster.yml ├── install_workload_clusters.sh ├── variables.tf ├── .terraform.lock.hcl ├── install_centos.sh ├── install_ubuntu.sh ├── install_suse.sh └── data.tf ├── modules └── demo-workloads │ ├── .gitignore │ ├── neuvector │ ├── .gitignore │ ├── regcred.yaml.template │ ├── license.yaml.template │ ├── values-federated.yaml │ ├── configure.sh │ └── values.yaml │ ├── cert-manager │ ├── clusterissuer-selfsigned.yaml │ ├── credentials-secret.yaml │ ├── clusterissuer-http.yaml │ └── clusterissuer.yaml │ ├── variables.tf │ ├── elk │ ├── basic-auth.yaml │ ├── kibana │ │ └── values.yaml │ └── elasticsearch │ │ └── values.yaml │ ├── mysql-operator │ ├── basic-auth.yaml │ └── values.yaml │ ├── opni │ ├── navlink.yaml │ ├── opni-np-svc.yaml │ ├── ingress-opni.yaml │ ├── ingress-grafana.yaml │ ├── ingress-logs.yaml │ └── values.yaml │ ├── loki │ ├── values.yaml │ ├── datasource.yaml │ └── logging-flow.yaml │ ├── rancher-demo │ ├── service.yaml │ ├── deployment.yaml │ └── ingress.yaml │ ├── nginx-with-pvc │ ├── pvc.yaml │ ├── service.yaml │ ├── deployment.yaml │ └── deployment-direct.yaml │ ├── harbor │ ├── harbor-values.yaml │ └── certificate.yaml │ ├── harbor-standalone │ ├── certificate.yaml │ └── harbor-values.yaml │ ├── keycloak │ ├── issuer.yaml │ └── values.yaml │ ├── demo-shop │ ├── cert.yaml │ ├── ingress.yaml │ └── ingressgateway.yaml │ ├── external-dns │ └── values.yaml │ ├── gitlab │ └── values.yaml │ ├── trow │ ├── values.yaml │ └── issuer.yaml │ ├── bookinfo │ ├── cert.yaml │ └── ingressgateway.yaml │ ├── longhorn_monitoring │ └── servicemonitor.yaml │ ├── network-debugging │ └── debug.yaml │ ├── workload.tf │ ├── argocd │ └── values.yaml │ ├── expose-prometheus │ └── ingress.yaml │ ├── mysql-cluster │ └── cluster.yaml │ ├── nexus │ └── values.yaml │ └── external-monitoring │ └── service-monitor.yaml ├── istio-multicluster ├── .gitignore ├── helloworld_service.yaml ├── expose-services-gateway.yaml ├── kiali-values.yaml ├── cluster1.yaml ├── cluster2.yaml ├── helloworld_v2.yaml ├── helloworld_v1.yaml └── sleep.yaml ├── proxysetup-k3s-to-rancher-through-proxy ├── .gitignore ├── locals.tf ├── providers.tf ├── keys.tf ├── output.tf ├── userdata │ ├── cluster_vms.sh │ ├── rancher_server.sh │ └── proxy.sh ├── data.tf ├── variables.tf ├── network.tf └── rancher.tf ├── rancher-azure ├── .gitignore ├── output.tf ├── cluster.yml.tmpl ├── userdata │ └── rancher_server.template ├── install_rke.sh ├── provider.tf ├── cloud-config.yaml.template ├── install_csi.sh ├── common_infra.tf ├── install_rancher.sh ├── variables.tf └── .terraform.lock.hcl ├── vms_lb_dns ├── .gitignore ├── rke2_config_initial.yaml ├── rke2_config_additional.yaml ├── install.sh ├── traefik-config.yaml ├── rke2-ingress-nginx-config.yaml ├── uninstall_k3s.sh ├── uninstall_rke2.sh ├── provider.tf ├── clusterissuer.yaml ├── output.tf ├── data.tf ├── install_rancher.sh ├── install_k3s_sles.sh └── variables.tf ├── private_registry ├── .gitignore ├── harbor_rancher_project.json ├── echo_install_ubuntu.sh ├── output.tf ├── clean.sh ├── provider.tf ├── data.tf ├── prepare_workstation.sh ├── install_harbor.sh ├── sync_rancher_images.sh └── variables.tf ├── vspherevms ├── output.tf ├── provider.tf ├── variables.tf └── .terraform.lock.hcl ├── gatekeeper-demo └── README.md ├── aws-cloud-provider-instance-profile ├── provider-values.yaml ├── variables.tf ├── provider.tf └── .terraform.lock.hcl ├── Makefile ├── cert-manager-demo ├── 02-issuer-selfsigned.yaml ├── 03-issuer-ca.yaml ├── 05-credentials-secret.yaml ├── 03-certificate-ca.yaml ├── 02-certificate-selfsigned.yaml ├── 04-certificate-http.yaml ├── 05-certificate-dns.yaml ├── 03-ingress-ca-cert.yaml ├── 05-ingress-dns-cert.yaml ├── 04-ingress-http-cert.yaml ├── 02-ingress-selfsigned-cert.yaml ├── 04-issuer-http.yaml ├── 05-clusterissuer-dns.yaml ├── 06-ingress.yaml ├── 07-deployment-mounting-tls-secret.yaml └── 01-deployment.yaml ├── ioloadtest ├── portworx │ └── storageclass.yaml ├── storageclass-1-replica.yaml ├── storageclass-external.yaml ├── storageos │ ├── secret.yaml │ ├── storageclass.yaml │ └── cluster.yaml └── config.yaml ├── app-in-existing-cluster ├── variables.tf ├── provider.tf ├── .terraform.lock.hcl └── app.tf ├── downstream-azure-windows ├── output.tf ├── cluster.tf ├── provider.tf ├── common_infra.tf ├── userdata │ └── server.sh └── variables.tf ├── gke-test ├── variables.tf ├── .terraform.lock.hcl └── terraform.tf ├── .gitignore ├── downstream-aks ├── provider.tf ├── aks.tf ├── variables.tf └── .terraform.lock.hcl ├── downstream-eks ├── provider.tf ├── variables.tf ├── eks.tf └── .terraform.lock.hcl ├── userdata └── server.sh ├── .githooks └── pre-commit ├── README.md ├── network-policies-demo ├── network-policy-allow-pod-label.yaml ├── network-policy-allow-other-namespace.yaml └── deployment.yaml └── monitoring-v1-custom-alertmanager └── alertmanager.yaml /downstream-do/out/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /downstream-aws/out/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /downstream-vsphere/out/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rkevms/.gitignore: -------------------------------------------------------------------------------- 1 | cluster.yml -------------------------------------------------------------------------------- /downstream-harvester-do/out/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /downstream-fleet/.gitignore: -------------------------------------------------------------------------------- 1 | kubeconfig* -------------------------------------------------------------------------------- /downstream-k3s/.gitignore: -------------------------------------------------------------------------------- 1 | kubeconfig -------------------------------------------------------------------------------- /prod-app-deployment/charts/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /proxysetup-aws-k3s/.gitignore: -------------------------------------------------------------------------------- 1 | *.ovpn -------------------------------------------------------------------------------- /proxysetup-aws-rke/.gitignore: -------------------------------------------------------------------------------- 1 | *.ovpn -------------------------------------------------------------------------------- /k3svms/.gitignore: -------------------------------------------------------------------------------- 1 | kubeconfig 2 | kubeconfig* -------------------------------------------------------------------------------- /prod-app-deployment/.gitignore: -------------------------------------------------------------------------------- 1 | kubeconfig -------------------------------------------------------------------------------- /modules/demo-workloads/.gitignore: -------------------------------------------------------------------------------- 1 | exported-charts -------------------------------------------------------------------------------- /istio-multicluster/.gitignore: -------------------------------------------------------------------------------- 1 | do-secret.yaml 2 | certs/ -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/.gitignore: -------------------------------------------------------------------------------- 1 | *.ovpn -------------------------------------------------------------------------------- /rancher-azure/.gitignore: -------------------------------------------------------------------------------- 1 | cluster.yml 2 | cloud-config.yaml -------------------------------------------------------------------------------- /vms_lb_dns/.gitignore: -------------------------------------------------------------------------------- 1 | kubeconfig 2 | rendered_install.sh -------------------------------------------------------------------------------- /modules/demo-workloads/neuvector/.gitignore: -------------------------------------------------------------------------------- 1 | regcred.yaml 2 | license.yaml -------------------------------------------------------------------------------- /k3svms/output.tf: -------------------------------------------------------------------------------- 1 | output "node_ips" { 2 | value = aws_instance.k3s.*.public_ip 3 | } -------------------------------------------------------------------------------- /rkevms/output.tf: -------------------------------------------------------------------------------- 1 | output "node_ips" { 2 | value = aws_instance.rancher-cluster.*.public_ip 3 | } 4 | -------------------------------------------------------------------------------- /vms_lb_dns/rke2_config_initial.yaml: -------------------------------------------------------------------------------- 1 | token: my-shared-secret 2 | tls-san: 3 | - kubernetes.plgrnd.be -------------------------------------------------------------------------------- /private_registry/.gitignore: -------------------------------------------------------------------------------- 1 | kubeconfig* 2 | rendered_install.sh 3 | rancher_images 4 | rancher-images.tar.gz -------------------------------------------------------------------------------- /private_registry/harbor_rancher_project.json: -------------------------------------------------------------------------------- 1 | { 2 | "project_name": "rancher", 3 | "public": false 4 | } -------------------------------------------------------------------------------- /prod-app-deployment/05-create_chart.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd charts && helm create ghost -------------------------------------------------------------------------------- /vspherevms/output.tf: -------------------------------------------------------------------------------- 1 | output "node_ips" { 2 | value = [vsphere_virtual_machine.k3s-nodes.*.default_ip_address] 3 | } -------------------------------------------------------------------------------- /gatekeeper-demo/README.md: -------------------------------------------------------------------------------- 1 | Install gatekeeper 2 | 3 | https://github.com/open-policy-agent/gatekeeper-library/tree/master/library -------------------------------------------------------------------------------- /prod-app-deployment/00-create_infra.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd infra && terraform apply -auto-approve -------------------------------------------------------------------------------- /prod-app-deployment/99-destroy_infra.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd infra && terraform destroy -auto-approve -------------------------------------------------------------------------------- /proxysetup-aws-k3s/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | rancher_hostname = join(".", ["rancher", aws_instance.proxy.private_ip, "nip.io"]) 3 | } -------------------------------------------------------------------------------- /proxysetup-aws-rke/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | rancher_hostname = join(".", ["rancher", aws_instance.proxy.private_ip, "nip.io"]) 3 | } -------------------------------------------------------------------------------- /rancher-azure/output.tf: -------------------------------------------------------------------------------- 1 | output "node_ips" { 2 | value = [ 3 | azurerm_linux_virtual_machine.linux-server.public_ip_address 4 | ] 5 | } -------------------------------------------------------------------------------- /vms_lb_dns/rke2_config_additional.yaml: -------------------------------------------------------------------------------- 1 | server: https://kubernetes.plgrnd.be:9345 2 | token: my-shared-secret 3 | tls-san: 4 | - kubernetes.plgrnd.be -------------------------------------------------------------------------------- /k3svms/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | access_key = var.aws_access_key 3 | secret_key = var.aws_secret_key 4 | region = var.aws_region 5 | } 6 | -------------------------------------------------------------------------------- /rkevms/destroy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | rm cluster.yml 4 | rm cluster.rkestate 5 | rm kube_config_cluster.yml 6 | 7 | terraform destroy -auto-approve -------------------------------------------------------------------------------- /vms_lb_dns/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | terraform apply -auto-approve 6 | bash install_k3s_sles.sh 7 | bash install_rancher.sh 8 | -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | rancher_hostname = join(".", ["rancher", aws_instance.rancher_server.public_ip, "nip.io"]) 3 | } -------------------------------------------------------------------------------- /rancher-azure/cluster.yml.tmpl: -------------------------------------------------------------------------------- 1 | nodes: 2 | - address: ${IP0} 3 | user: ubuntu 4 | role: 5 | - controlplane 6 | - etcd 7 | - worker 8 | -------------------------------------------------------------------------------- /prod-app-deployment/external-dns/values.yaml: -------------------------------------------------------------------------------- 1 | provider: digitalocean 2 | 3 | domainFilters: 4 | - k8sdemo.plgrnd.be 5 | 6 | txtOwnerId: k8sdemo 7 | policy: sync 8 | -------------------------------------------------------------------------------- /proxysetup-aws-k3s/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | access_key = var.aws_access_key 3 | secret_key = var.aws_secret_key 4 | region = var.aws_region 5 | } 6 | -------------------------------------------------------------------------------- /proxysetup-aws-rke/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | access_key = var.aws_access_key 3 | secret_key = var.aws_secret_key 4 | region = var.aws_region 5 | } 6 | -------------------------------------------------------------------------------- /aws-cloud-provider-instance-profile/provider-values.yaml: -------------------------------------------------------------------------------- 1 | nodeSelector: 2 | node-role.kubernetes.io/master: "true" 3 | 4 | image: 5 | tag: v20210510-v1.21.0-alpha.0 -------------------------------------------------------------------------------- /k3svms/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | -------------------------------------------------------------------------------- /proxysetup-aws-k3s/keys.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "ssh_key_pair" { 2 | key_name_prefix = "bhofmann-ssh-" 3 | public_key = file("${var.ssh_key_file_name}.pub") 4 | } -------------------------------------------------------------------------------- /proxysetup-aws-rke/keys.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "ssh_key_pair" { 2 | key_name_prefix = "bhofmann-ssh-" 3 | public_key = file("${var.ssh_key_file_name}.pub") 4 | } -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | init: 2 | find .githooks -type f -exec ln -sf ../../{} .git/hooks/ \; 3 | 4 | fix: 5 | terraform fmt -recursive 6 | 7 | check: 8 | terraform fmt -recursive -check -------------------------------------------------------------------------------- /cert-manager-demo/02-issuer-selfsigned.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Issuer 3 | metadata: 4 | name: selfsigned-issuer 5 | namespace: default 6 | spec: 7 | selfSigned: {} -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | access_key = var.aws_access_key 3 | secret_key = var.aws_secret_key 4 | region = var.aws_region 5 | } 6 | -------------------------------------------------------------------------------- /modules/demo-workloads/cert-manager/clusterissuer-selfsigned.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: selfsigned 5 | spec: 6 | selfSigned: {} 7 | 8 | -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/keys.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "ssh_key_pair" { 2 | key_name_prefix = "bhofmann-ssh-" 3 | public_key = file("${var.ssh_key_file_name}.pub") 4 | } -------------------------------------------------------------------------------- /cert-manager-demo/03-issuer-ca.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Issuer 3 | metadata: 4 | name: ca-issuer 5 | namespace: cattle-system 6 | spec: 7 | ca: 8 | secretName: ca-key-pair -------------------------------------------------------------------------------- /modules/demo-workloads/variables.tf: -------------------------------------------------------------------------------- 1 | variable "kubeconfig_demo" { 2 | type = string 3 | } 4 | variable "digitalocean_token" { 5 | type = string 6 | } 7 | variable "email" { 8 | type = string 9 | } -------------------------------------------------------------------------------- /prod-app-deployment/mysql-operator/values.yaml: -------------------------------------------------------------------------------- 1 | orchestrator: 2 | config: 3 | # `reset slave all` and `set read_only=0` on promoted master 4 | ApplyMySQLPromotionAfterMasterFailover: true 5 | -------------------------------------------------------------------------------- /ioloadtest/portworx/storageclass.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: portworx-sc 5 | provisioner: kubernetes.io/portworx-volume 6 | parameters: 7 | repl: "3" -------------------------------------------------------------------------------- /app-in-existing-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "rancher_url" { 2 | type = string 3 | } 4 | variable "rancher_access_key" { 5 | type = string 6 | } 7 | variable "rancher_secret_key" { 8 | type = string 9 | } -------------------------------------------------------------------------------- /downstream-fleet/output.tf: -------------------------------------------------------------------------------- 1 | output "node_ips" { 2 | value = [ 3 | aws_instance.cluster_one.public_ip, 4 | aws_instance.cluster_two.public_ip, 5 | aws_instance.cluster_three.public_ip, 6 | ] 7 | } -------------------------------------------------------------------------------- /prod-app-deployment/fleet/ghost/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: ghost 2 | 3 | helm: 4 | chart: github.com/bashofmann/rancher-demo-setups/prod-app-deployment/charts/ghost 5 | releaseName: ghost 6 | values: 7 | 8 | -------------------------------------------------------------------------------- /downstream-fleet/install_k3s.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | k3sup install \ 6 | --ip $IP \ 7 | --user ec2-user \ 8 | --k3s-extra-args "--node-external-ip ${IP}" \ 9 | --k3s-channel v1.21 10 | -------------------------------------------------------------------------------- /proxysetup-aws-k3s/vpn.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "get_vpn_config" { 2 | provisioner "local-exec" { 3 | command = "scp -oStrictHostKeyChecking=no ubuntu@${aws_instance.proxy.public_ip}:awsproxysetup.ovpn ." 4 | } 5 | } -------------------------------------------------------------------------------- /proxysetup-aws-rke/vpn.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "get_vpn_config" { 2 | provisioner "local-exec" { 3 | command = "scp -oStrictHostKeyChecking=no ubuntu@${aws_instance.proxy.public_ip}:awsproxysetup.ovpn ." 4 | } 5 | } -------------------------------------------------------------------------------- /vms_lb_dns/traefik-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.cattle.io/v1 2 | kind: HelmChartConfig 3 | metadata: 4 | name: traefik 5 | namespace: kube-system 6 | spec: 7 | valuesContent: |- 8 | deployment: 9 | replicas: 2 -------------------------------------------------------------------------------- /modules/demo-workloads/neuvector/regcred.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | type: kubernetes.io/dockerconfigjson 3 | kind: Secret 4 | metadata: 5 | name: regsecret 6 | namespace: neuvector 7 | data: 8 | .dockerconfigjson: xxxx -------------------------------------------------------------------------------- /prod-app-deployment/k3s/traefik-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.cattle.io/v1 2 | kind: HelmChartConfig 3 | metadata: 4 | name: traefik 5 | namespace: kube-system 6 | spec: 7 | valuesContent: |- 8 | deployment: 9 | replicas: 2 -------------------------------------------------------------------------------- /rancher-azure/userdata/rancher_server.template: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | export DEBIAN_FRONTEND=noninteractive 4 | curl -sL https://releases.rancher.com/install-docker/${docker_version}.sh | sh 5 | sudo usermod -aG docker ${username} 6 | -------------------------------------------------------------------------------- /downstream-azure-windows/output.tf: -------------------------------------------------------------------------------- 1 | output "windows_ip" { 2 | value = azurerm_windows_virtual_machine.windows-server.public_ip_address 3 | } 4 | output "linux_ip" { 5 | value = azurerm_linux_virtual_machine.linux-server.public_ip_address 6 | } -------------------------------------------------------------------------------- /vms_lb_dns/rke2-ingress-nginx-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.cattle.io/v1 2 | kind: HelmChartConfig 3 | metadata: 4 | name: rke2-ingress-nginx 5 | namespace: kube-system 6 | spec: 7 | valuesContent: |- 8 | controller: 9 | kind: DaemonSet -------------------------------------------------------------------------------- /modules/demo-workloads/elk/basic-auth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | # foo:bar 4 | auth: Zm9vOiRhcHIxJExXNlpJSHV5JEpZNGlHcGQvQWQvZ1ovQzN3WlIxcTEK 5 | kind: Secret 6 | metadata: 7 | name: basic-auth 8 | namespace: logging 9 | type: Opaque 10 | -------------------------------------------------------------------------------- /rkevms/install_rke.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | cat cluster.yml.tmpl | envsubst > cluster.yml 8 | 9 | rke up 10 | -------------------------------------------------------------------------------- /cert-manager-demo/05-credentials-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | # this must be base64 encoded 3 | data: 4 | access-token: #TOKEN 5 | kind: Secret 6 | metadata: 7 | name: digitalocean-credentials-secret 8 | namespace: cert-manager 9 | type: Opaque 10 | -------------------------------------------------------------------------------- /modules/demo-workloads/mysql-operator/basic-auth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | # foo:bar 4 | auth: Zm9vOiRhcHIxJExXNlpJSHV5JEpZNGlHcGQvQWQvZ1ovQzN3WlIxcTEK 5 | kind: Secret 6 | metadata: 7 | name: basic-auth 8 | namespace: mysql-operator 9 | type: Opaque -------------------------------------------------------------------------------- /gke-test/variables.tf: -------------------------------------------------------------------------------- 1 | variable "rancher_url" { 2 | type = string 3 | } 4 | variable "rancher_access_key" { 5 | type = string 6 | } 7 | variable "rancher_secret_key" { 8 | type = string 9 | } 10 | variable "gcp_credentials_file" { 11 | type = string 12 | } -------------------------------------------------------------------------------- /rancher-azure/install_rke.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | cat cluster.yml.tmpl | envsubst > cluster.yml 8 | 9 | rke up 10 | -------------------------------------------------------------------------------- /k3svms/cluster.yml: -------------------------------------------------------------------------------- 1 | # curl https://releases.rancher.com/install-docker/19.03.sh | sh 2 | # sudo usermod -aG docker centos 3 | 4 | nodes: 5 | - address: 3.72.104.6 6 | user: ec2-user 7 | role: 8 | - controlplane 9 | - etcd 10 | - worker 11 | -------------------------------------------------------------------------------- /modules/demo-workloads/neuvector/license.yaml.template: -------------------------------------------------------------------------------- 1 | controller: 2 | secret: 3 | data: 4 | eulainitcfg.yaml: 5 | license_key: xxxx 6 | userinitcfg.yaml: 7 | users: 8 | - Fullname: admin 9 | Password: xxxx 10 | Role: admin -------------------------------------------------------------------------------- /prod-app-deployment/infra/output.tf: -------------------------------------------------------------------------------- 1 | output "node_ips" { 2 | value = aws_instance.k8sdemo-vm.*.public_ip 3 | } 4 | 5 | output "rancher_lb" { 6 | value = aws_elb.k8sdemo-lb.dns_name 7 | } 8 | 9 | output "rancher_dns" { 10 | value = digitalocean_record.rancher.fqdn 11 | } 12 | -------------------------------------------------------------------------------- /cert-manager-demo/03-certificate-ca.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: cert-ca 5 | namespace: cattle-system 6 | spec: 7 | secretName: tls-rancher-ingress 8 | issuerRef: 9 | name: ca-issuer 10 | commonName: rancher.k8s-demo.plgrnd.be -------------------------------------------------------------------------------- /modules/demo-workloads/cert-manager/credentials-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | # this must be base64 encoded 3 | data: 4 | access-token: ${ENCODED_DIGITALOCEAN_TOKEN} 5 | kind: Secret 6 | metadata: 7 | name: digitalocean-credentials-secret 8 | namespace: cert-manager 9 | type: Opaque 10 | -------------------------------------------------------------------------------- /modules/demo-workloads/opni/navlink.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ui.cattle.io/v1 2 | kind: NavLink 3 | metadata: 4 | name: opni 5 | spec: 6 | sideLabel: Opni 7 | target: _blank 8 | toService: 9 | name: opni-admin-dashboard 10 | namespace: opni 11 | port: '12080' 12 | scheme: http 13 | -------------------------------------------------------------------------------- /aws-cloud-provider-instance-profile/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | variable "aws_secret_key" { 6 | type = string 7 | description = "AWS secret key used to create AWS infrastructure" 8 | } -------------------------------------------------------------------------------- /modules/demo-workloads/loki/values.yaml: -------------------------------------------------------------------------------- 1 | persistence: 2 | enabled: true 3 | accessModes: 4 | - ReadWriteOnce 5 | size: 10Gi 6 | serviceMonitor: 7 | enabled: true 8 | resources: 9 | limits: 10 | cpu: 200m 11 | memory: 256Mi 12 | requests: 13 | cpu: 100m 14 | memory: 128Mi -------------------------------------------------------------------------------- /modules/demo-workloads/rancher-demo/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rancher-demo 5 | namespace: default 6 | spec: 7 | type: ClusterIP 8 | selector: 9 | app: rancher-demo 10 | ports: 11 | - name: web 12 | port: 80 13 | targetPort: web -------------------------------------------------------------------------------- /proxysetup-aws-k3s/userdata/cluster_vms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | export HTTP_PROXY=http://${proxy_private_ip}:8888 4 | export HTTPS_PROXY=http://${proxy_private_ip}:8888 5 | export NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local 6 | 7 | curl -sfL https://get.k3s.io | sh - 8 | -------------------------------------------------------------------------------- /istio-multicluster/helloworld_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: helloworld 5 | labels: 6 | app: helloworld 7 | service: helloworld 8 | spec: 9 | ports: 10 | - port: 5000 11 | name: http 12 | selector: 13 | app: helloworld 14 | version: v1 15 | -------------------------------------------------------------------------------- /modules/demo-workloads/nginx-with-pvc/pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-www 5 | labels: 6 | app: nginx 7 | spec: 8 | storageClassName: nfs-client 9 | accessModes: 10 | - ReadWriteMany 11 | resources: 12 | requests: 13 | storage: 10Mi -------------------------------------------------------------------------------- /prod-app-deployment/mysql-operator/navlink.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ui.cattle.io/v1 2 | kind: NavLink 3 | metadata: 4 | name: mysql-operator 5 | spec: 6 | label: MYSQL Operator 7 | toService: 8 | name: mysql-operator 9 | namespace: mysql-operator 10 | path: / 11 | port: "80" 12 | scheme: http -------------------------------------------------------------------------------- /cert-manager-demo/02-certificate-selfsigned.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: cert-selfsigned 5 | namespace: default 6 | spec: 7 | secretName: cert-selfsigned-tls 8 | issuerRef: 9 | name: selfsigned-issuer 10 | commonName: demo-selfsigned.k8s-demo.plgrnd.be -------------------------------------------------------------------------------- /rkevms/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | 10 | provider "aws" { 11 | access_key = var.aws_access_key 12 | secret_key = var.aws_secret_key 13 | region = var.aws_region 14 | } 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | 11 | # terraform.tvars 12 | terraform.tfvars 13 | 14 | # vagrant state directory 15 | .vagrant 16 | 17 | *.rkestate 18 | rke-rendered.yml 19 | kube_config* -------------------------------------------------------------------------------- /private_registry/echo_install_ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | $(terraform output -state=terraform.tfstate -json "lb" | jq -r '"export LB=\(.)"') 7 | 8 | cat install_ubuntu.sh | envsubst > rendered_install.sh -------------------------------------------------------------------------------- /aws-cloud-provider-instance-profile/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | 10 | provider "aws" { 11 | access_key = var.aws_access_key 12 | secret_key = var.aws_secret_key 13 | region = "eu-central-1" 14 | } -------------------------------------------------------------------------------- /proxysetup-aws-rke/output.tf: -------------------------------------------------------------------------------- 1 | output "proxy_ip" { 2 | value = [aws_instance.proxy.public_ip] 3 | } 4 | output "proxy_private_ip" { 5 | value = [aws_instance.proxy.private_ip] 6 | } 7 | output "cluster_vm_ips" { 8 | value = [aws_instance.cluster_vms.*.private_ip] 9 | } 10 | output "dns_name" { 11 | value = local.rancher_hostname 12 | } -------------------------------------------------------------------------------- /modules/demo-workloads/nginx-with-pvc/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | type: NodePort 9 | ports: 10 | - port: 80 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | nodePort: 30888 15 | selector: 16 | app: nginx -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/output.tf: -------------------------------------------------------------------------------- 1 | output "proxy_ip" { 2 | value = [aws_instance.proxy.public_ip] 3 | } 4 | output "proxy_private_ip" { 5 | value = [aws_instance.proxy.private_ip] 6 | } 7 | output "cluster_vm_ips" { 8 | value = [aws_instance.cluster_vms.*.private_ip] 9 | } 10 | output "dns_name" { 11 | value = local.rancher_hostname 12 | } -------------------------------------------------------------------------------- /modules/demo-workloads/harbor/harbor-values.yaml: -------------------------------------------------------------------------------- 1 | externalURL: https://harbor.k8s-demo.plgrnd.be 2 | expose: 3 | ingress: 4 | hosts: 5 | core: harbor.k8s-demo.plgrnd.be 6 | tls: 7 | certSource: secret 8 | secret: 9 | secretName: harbor-harbor-ingress 10 | 11 | notary: 12 | enabled: false 13 | 14 | updateStrategy: 15 | type: Recreate -------------------------------------------------------------------------------- /modules/demo-workloads/harbor-standalone/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: harbor-tls 5 | namespace: harbor 6 | spec: 7 | dnsNames: 8 | - registry.plgrnd.be 9 | issuerRef: 10 | group: cert-manager.io 11 | kind: ClusterIssuer 12 | name: letsencrypt-prod 13 | secretName: harbor-harbor-ingress -------------------------------------------------------------------------------- /modules/demo-workloads/harbor/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: harbor-tls 5 | namespace: harbor 6 | spec: 7 | dnsNames: 8 | - harbor.k8s-demo.plgrnd.be 9 | issuerRef: 10 | group: cert-manager.io 11 | kind: ClusterIssuer 12 | name: letsencrypt-prod 13 | secretName: harbor-harbor-ingress -------------------------------------------------------------------------------- /modules/demo-workloads/keycloak/issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Issuer 3 | metadata: 4 | name: letsencrypt 5 | namespace: keycloak 6 | spec: 7 | acme: 8 | privateKeySecretRef: 9 | name: letsencrypt-production 10 | server: https://acme-v02.api.letsencrypt.org/directory 11 | solvers: 12 | - http01: 13 | ingress: {} -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/userdata/cluster_vms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | export HTTP_PROXY=http://${proxy_private_ip}:8888 4 | export HTTPS_PROXY=http://${proxy_private_ip}:8888 5 | export NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local 6 | 7 | export K3S_KUBECONFIG_MODE=644 8 | 9 | curl -sfL https://get.k3s.io | sh - 10 | -------------------------------------------------------------------------------- /cert-manager-demo/04-certificate-http.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: letsencrypt-http 5 | namespace: default 6 | spec: 7 | secretName: cert-letsencrypt-http-tls 8 | issuerRef: 9 | name: letsencrypt-http-prod 10 | commonName: demo-http.k8s-demo.plgrnd.be 11 | dnsNames: 12 | - www.demo-http.k8s-demo.plgrnd.be -------------------------------------------------------------------------------- /cert-manager-demo/05-certificate-dns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: letsencrypt-dns 5 | namespace: default 6 | spec: 7 | secretName: cert-letsencrypt-dns-tls 8 | clusterIssuerRef: 9 | name: letsencrypt-dns-prod 10 | commonName: demo-dns.k8s-demo.plgrnd.be 11 | dnsNames: 12 | - www.demo-dns.k8s-demo.plgrnd.be -------------------------------------------------------------------------------- /modules/demo-workloads/demo-shop/cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: shop-tls-certificate 5 | namespace: istio-system 6 | spec: 7 | dnsNames: 8 | - shop-istio.plgrnd.be 9 | issuerRef: 10 | group: cert-manager.io 11 | kind: ClusterIssuer 12 | name: letsencrypt-prod 13 | secretName: shop-tls-certificate-certs -------------------------------------------------------------------------------- /modules/demo-workloads/external-dns/values.yaml: -------------------------------------------------------------------------------- 1 | sources: 2 | - service 3 | - ingress 4 | - istio-gateway 5 | 6 | provider: digitalocean 7 | 8 | domainFilters: 9 | - plgrnd.be 10 | interval: "1m" 11 | registry: "txt" 12 | txtOwnerId: rancher-demo 13 | policy: sync 14 | resources: 15 | limits: 16 | memory: 50Mi 17 | requests: 18 | memory: 50Mi 19 | cpu: 10m 20 | -------------------------------------------------------------------------------- /modules/demo-workloads/gitlab/values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | hosts: 3 | domain: gitlab.plgrnd.be 4 | externalIP: 167.172.188.121 5 | 6 | certmanager-issuer: 7 | email: mail@bastianhofmann.de 8 | 9 | nginx-ingress: 10 | controller: 11 | kind: DaemonSet 12 | hostPort: 13 | enabled: true 14 | service: 15 | type: ClusterIP 16 | externalTrafficPolicy: "" -------------------------------------------------------------------------------- /modules/demo-workloads/trow/values.yaml: -------------------------------------------------------------------------------- 1 | trow: 2 | domain: trow.k8s-demo.plgrnd.be 3 | ingress: 4 | enabled: true 5 | annotations: 6 | cert-manager.io/issuer: letsencrypt-prod 7 | hosts: 8 | - host: trow.k8s-demo.plgrnd.be 9 | paths: 10 | - / 11 | tls: 12 | - hosts: 13 | - trow.k8s-demo.plgrnd.be 14 | secretName: trow-cert -------------------------------------------------------------------------------- /downstream-aks/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | rancher2 = { 4 | source = "rancher/rancher2" 5 | version = "1.25.0" 6 | } 7 | } 8 | required_version = ">= 1.0.0" 9 | } 10 | provider "rancher2" { 11 | api_url = var.rancher_url 12 | insecure = true 13 | access_key = var.rancher_access_key 14 | secret_key = var.rancher_secret_key 15 | } -------------------------------------------------------------------------------- /downstream-eks/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | rancher2 = { 4 | source = "rancher/rancher2" 5 | version = "1.17.1" 6 | } 7 | } 8 | required_version = ">= 1.0.0" 9 | } 10 | provider "rancher2" { 11 | api_url = var.rancher_url 12 | insecure = true 13 | access_key = var.rancher_access_key 14 | secret_key = var.rancher_secret_key 15 | } -------------------------------------------------------------------------------- /ioloadtest/storageclass-1-replica.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: longhorn-single-replica 6 | parameters: 7 | baseImage: "" 8 | fromBackup: "" 9 | numberOfReplicas: "1" 10 | staleReplicaTimeout: "30" 11 | provisioner: driver.longhorn.io 12 | reclaimPolicy: Delete 13 | volumeBindingMode: Immediate 14 | -------------------------------------------------------------------------------- /prod-app-deployment/cert-manager/cluster-issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-http-prod 5 | spec: 6 | acme: 7 | server: https://acme-v02.api.letsencrypt.org/directory 8 | privateKeySecretRef: 9 | name: letsencrypt-http-prod 10 | solvers: 11 | - selector: {} 12 | http01: 13 | ingress: {} -------------------------------------------------------------------------------- /downstream-aws/data.tf: -------------------------------------------------------------------------------- 1 | # Use latest Ubuntu 18.04 AMI 2 | data "aws_ami" "ubuntu" { 3 | most_recent = true 4 | owners = ["099720109477"] # Canonical 5 | 6 | filter { 7 | name = "name" 8 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 9 | } 10 | 11 | filter { 12 | name = "virtualization-type" 13 | values = ["hvm"] 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /downstream-k3s/data.tf: -------------------------------------------------------------------------------- 1 | # Use latest Ubuntu 18.04 AMI 2 | data "aws_ami" "ubuntu" { 3 | most_recent = true 4 | owners = ["099720109477"] # Canonical 5 | 6 | filter { 7 | name = "name" 8 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 9 | } 10 | 11 | filter { 12 | name = "virtualization-type" 13 | values = ["hvm"] 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /modules/demo-workloads/bookinfo/cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: bookinfo-tls-certificate 5 | namespace: istio-system 6 | spec: 7 | dnsNames: 8 | - bookinfo.k8s-demo.plgrnd.be 9 | issuerRef: 10 | group: cert-manager.io 11 | kind: ClusterIssuer 12 | name: letsencrypt-prod 13 | secretName: bookinfo-tls-certificate-certs -------------------------------------------------------------------------------- /prod-app-deployment/06-install_mysqloperator.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export KUBECONFIG=$(pwd)/kubeconfig 6 | 7 | helm repo add presslabs https://presslabs.github.io/charts 8 | 9 | helm upgrade --install mysql-operator presslabs/mysql-operator --namespace mysql-operator --create-namespace -f mysql-operator/values.yaml 10 | 11 | kubectl apply -f mysql-operator/navlink.yaml -------------------------------------------------------------------------------- /proxysetup-aws-k3s/data.tf: -------------------------------------------------------------------------------- 1 | # Use latest Ubuntu 18.04 AMI 2 | data "aws_ami" "ubuntu" { 3 | most_recent = true 4 | owners = ["099720109477"] # Canonical 5 | 6 | filter { 7 | name = "name" 8 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 9 | } 10 | 11 | filter { 12 | name = "virtualization-type" 13 | values = ["hvm"] 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /proxysetup-aws-rke/data.tf: -------------------------------------------------------------------------------- 1 | # Use latest Ubuntu 18.04 AMI 2 | data "aws_ami" "ubuntu" { 3 | most_recent = true 4 | owners = ["099720109477"] # Canonical 5 | 6 | filter { 7 | name = "name" 8 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 9 | } 10 | 11 | filter { 12 | name = "virtualization-type" 13 | values = ["hvm"] 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /app-in-existing-cluster/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | rancher2 = { 4 | source = "rancher/rancher2" 5 | version = "1.17.1" 6 | } 7 | } 8 | required_version = ">= 1.0.0" 9 | } 10 | provider "rancher2" { 11 | api_url = var.rancher_url 12 | insecure = true 13 | access_key = var.rancher_access_key 14 | secret_key = var.rancher_secret_key 15 | } -------------------------------------------------------------------------------- /prod-app-deployment/infra/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "sles" { 2 | owners = ["013907871322"] 3 | most_recent = true 4 | 5 | filter { 6 | name = "name" 7 | values = ["suse-sles-15-sp2*"] 8 | } 9 | 10 | filter { 11 | name = "architecture" 12 | values = ["x86_64"] 13 | } 14 | 15 | filter { 16 | name = "root-device-type" 17 | values = ["ebs"] 18 | } 19 | } -------------------------------------------------------------------------------- /modules/demo-workloads/harbor-standalone/harbor-values.yaml: -------------------------------------------------------------------------------- 1 | externalURL: https://registry.plgrnd.be 2 | expose: 3 | ingress: 4 | hosts: 5 | core: registry.plgrnd.be 6 | tls: 7 | certSource: secret 8 | secret: 9 | secretName: harbor-harbor-ingress 10 | 11 | notary: 12 | enabled: false 13 | 14 | persistence: 15 | persistentVolumeClaim: 16 | registry: 17 | size: 25Gi -------------------------------------------------------------------------------- /vspherevms/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | vsphere = { 4 | source = "hashicorp/vsphere" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | 10 | provider "vsphere" { 11 | user = var.vcenter_user 12 | password = var.vcenter_password 13 | vsphere_server = var.vcenter_server 14 | allow_unverified_ssl = false 15 | } 16 | -------------------------------------------------------------------------------- /ioloadtest/storageclass-external.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: longhorn-external 6 | parameters: 7 | diskSelector: "external" 8 | baseImage: "" 9 | fromBackup: "" 10 | numberOfReplicas: "2" 11 | staleReplicaTimeout: "30" 12 | provisioner: driver.longhorn.io 13 | reclaimPolicy: Delete 14 | volumeBindingMode: Immediate 15 | -------------------------------------------------------------------------------- /private_registry/output.tf: -------------------------------------------------------------------------------- 1 | output "node_ips" { 2 | value = aws_instance.vmlb.*.public_ip 3 | } 4 | 5 | output "lb" { 6 | value = aws_elb.rancher-server-lb.dns_name 7 | } 8 | 9 | output "dns" { 10 | value = digitalocean_record.wildcard.fqdn 11 | } 12 | 13 | output "reglb" { 14 | value = aws_elb.registry-server-lb.dns_name 15 | } 16 | 17 | output "regdns" { 18 | value = digitalocean_record.registry.fqdn 19 | } -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/data.tf: -------------------------------------------------------------------------------- 1 | # Use latest Ubuntu 18.04 AMI 2 | data "aws_ami" "ubuntu" { 3 | most_recent = true 4 | owners = ["099720109477"] # Canonical 5 | 6 | filter { 7 | name = "name" 8 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 9 | } 10 | 11 | filter { 12 | name = "virtualization-type" 13 | values = ["hvm"] 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /modules/demo-workloads/loki/datasource.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: loki-datasource 5 | namespace: cattle-monitoring-system 6 | labels: 7 | grafana_datasource: "1" 8 | data: 9 | loki-stack-datasource.yaml: |- 10 | apiVersion: 1 11 | datasources: 12 | - name: Loki 13 | type: loki 14 | access: proxy 15 | url: http://loki.loki:3100 16 | version: 1 -------------------------------------------------------------------------------- /modules/demo-workloads/trow/issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Issuer 3 | metadata: 4 | name: letsencrypt-prod 5 | namespace: trow 6 | spec: 7 | acme: 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | email: mail@bastianhofmann.de 10 | privateKeySecretRef: 11 | name: letsencrypt-http-prod 12 | solvers: 13 | - selector: {} 14 | http01: 15 | ingress: {} -------------------------------------------------------------------------------- /userdata/server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | curl -sL https://releases.rancher.com/install-docker/${docker_version}.sh | sh 4 | sudo usermod -aG docker ${username} 5 | 6 | cat <<'EOF' | sudo tee /etc/docker/daemon.json > /dev/null 7 | { 8 | "log-driver": "json-file", 9 | "log-opts": { 10 | "max-size": "100m", 11 | "max-file": "3" 12 | } 13 | } 14 | EOF 15 | 16 | sudo systemctl restart docker 17 | -------------------------------------------------------------------------------- /proxysetup-aws-k3s/output.tf: -------------------------------------------------------------------------------- 1 | output "proxy_ip" { 2 | value = [aws_instance.proxy.public_ip] 3 | } 4 | output "proxy_private_ip" { 5 | value = [aws_instance.proxy.private_ip] 6 | } 7 | output "cluster_vm_ips" { 8 | value = [aws_instance.cluster_vms.*.private_ip] 9 | } 10 | output "dns_name" { 11 | value = local.rancher_hostname 12 | } 13 | output "add_vm_ips" { 14 | value = [aws_instance.additional_vms.*.private_ip] 15 | } -------------------------------------------------------------------------------- /vms_lb_dns/uninstall_k3s.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | ssh -o StrictHostKeyChecking=no ec2-user@$IP0 sudo /usr/local/bin/k3s-uninstall.sh 8 | ssh -o StrictHostKeyChecking=no ec2-user@$IP1 sudo /usr/local/bin/k3s-uninstall.sh 9 | ssh -o StrictHostKeyChecking=no ec2-user@$IP2 sudo /usr/local/bin/k3s-uninstall.sh -------------------------------------------------------------------------------- /istio-multicluster/expose-services-gateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: Gateway 3 | metadata: 4 | name: cross-network-gateway 5 | namespace: istio-system 6 | spec: 7 | selector: 8 | istio: eastwestgateway 9 | servers: 10 | - port: 11 | number: 15443 12 | name: tls 13 | protocol: TLS 14 | tls: 15 | mode: AUTO_PASSTHROUGH 16 | hosts: 17 | - "*.local" -------------------------------------------------------------------------------- /modules/demo-workloads/cert-manager/clusterissuer-http.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-http-prod 5 | spec: 6 | acme: 7 | server: https://acme-v02.api.letsencrypt.org/directory 8 | email: mail@bastianhofmann.de 9 | privateKeySecretRef: 10 | name: letsencrypt-http-prod 11 | solvers: 12 | - selector: {} 13 | http01: 14 | ingress: {} 15 | -------------------------------------------------------------------------------- /rancher-azure/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | azurerm = { 4 | source = "hashicorp/azurerm" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | 10 | provider "azurerm" { 11 | features {} 12 | 13 | subscription_id = var.azure_subscription_id 14 | client_id = var.azure_client_id 15 | client_secret = var.azure_client_secret 16 | tenant_id = var.azure_tenant_id 17 | } 18 | -------------------------------------------------------------------------------- /vms_lb_dns/uninstall_rke2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | ssh -o StrictHostKeyChecking=no ec2-user@$IP0 sudo /usr/local/bin/rke2-uninstall.sh 8 | ssh -o StrictHostKeyChecking=no ec2-user@$IP1 sudo /usr/local/bin/rke2-uninstall.sh 9 | ssh -o StrictHostKeyChecking=no ec2-user@$IP2 sudo /usr/local/bin/rke2-uninstall.sh -------------------------------------------------------------------------------- /private_registry/clean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | ssh -o StrictHostKeyChecking=no ubuntu@$IP0 "sudo /usr/local/bin/k3s-uninstall.sh" 8 | ssh -o StrictHostKeyChecking=no ubuntu@$IP1 "sudo /usr/local/bin/k3s-uninstall.sh" 9 | ssh -o StrictHostKeyChecking=no ubuntu@$IP2 "sudo /usr/local/bin/k3s-uninstall.sh" 10 | -------------------------------------------------------------------------------- /modules/demo-workloads/elk/kibana/values.yaml: -------------------------------------------------------------------------------- 1 | ingress: 2 | enabled: true 3 | hosts: 4 | - host: kibana.k8s-demo.plgrnd.be 5 | paths: 6 | - path: / 7 | annotations: 8 | cert-manager.io/issuer: my-ca-issuer 9 | nginx.ingress.kubernetes.io/auth-type: basic 10 | nginx.ingress.kubernetes.io/auth-secret: basic-auth 11 | tls: 12 | - hosts: 13 | - kibana.k8s-demo.plgrnd.be 14 | secretName: kibana-tls 15 | 16 | -------------------------------------------------------------------------------- /downstream-azure-windows/cluster.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cluster" "cluster" { 2 | name = "bhofmann-windows" 3 | 4 | rke_config { 5 | network { 6 | plugin = "flannel" 7 | options = { 8 | flannel_backend_port = "4789" 9 | flannel_backend_type = "vxlan" 10 | flannel_backend_vni = "4096" 11 | } 12 | } 13 | kubernetes_version = "v1.18.12-rancher1-1" 14 | } 15 | windows_prefered_cluster = true 16 | } 17 | -------------------------------------------------------------------------------- /.githooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Format all staged terraform files 4 | for file in `git diff --cached --name-only --diff-filter=ACMR | grep "\.tf$"` 5 | do 6 | echo "Formatting ${file}" 7 | terraform fmt ${file} 8 | git add ${file} 9 | done 10 | 11 | # If no files left in index after formatting - fail 12 | ret=0 13 | if [[ ! "`git diff --cached --name-only`" ]]; then 14 | 1>&2 echo "No files left after formatting" 15 | exit 1 16 | fi -------------------------------------------------------------------------------- /cert-manager-demo/03-ingress-ca-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: rancher-demo-ca 5 | namespace: default 6 | spec: 7 | rules: 8 | - host: demo-ca.k8s-demo.plgrnd.be 9 | http: 10 | paths: 11 | - backend: 12 | serviceName: rancher-demo 13 | servicePort: 80 14 | path: / 15 | tls: 16 | - hosts: 17 | - demo-ca.k8s-demo.plgrnd.be 18 | secretName: cert-ca-tls -------------------------------------------------------------------------------- /downstream-do/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | rancher2 = { 4 | source = "rancher/rancher2" 5 | version = "1.24.2" 6 | } 7 | null = { 8 | source = "hashicorp/null" 9 | version = "3.2.1" 10 | } 11 | } 12 | required_version = ">= 1.0" 13 | } 14 | provider "rancher2" { 15 | api_url = var.rancher_url 16 | access_key = var.rancher_access_key 17 | secret_key = var.rancher_secret_key 18 | } 19 | -------------------------------------------------------------------------------- /proxysetup-aws-rke/userdata/rke-cluster.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - address: 10.0.1.200 3 | user: ubuntu 4 | role: [controlplane,worker,etcd] 5 | - address: 10.0.1.201 6 | user: ubuntu 7 | role: [controlplane,worker,etcd] 8 | - address: 10.0.1.202 9 | user: ubuntu 10 | role: [controlplane,worker,etcd] 11 | 12 | ssh_agent_auth: true 13 | 14 | services: 15 | etcd: 16 | backup_config: 17 | interval_hours: 12 18 | retention: 6 19 | -------------------------------------------------------------------------------- /istio-multicluster/kiali-values.yaml: -------------------------------------------------------------------------------- 1 | auth: 2 | strategy: anonymous 3 | deployment: 4 | ingress_enabled: false 5 | repository: rancher/kiali-kiali 6 | tag: v1.24.0 7 | external_services: 8 | prometheus: 9 | custom_metrics_url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" 10 | url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" 11 | tracing: 12 | in_cluster_url: "http://tracing.istio-system.svc:16686" -------------------------------------------------------------------------------- /modules/demo-workloads/longhorn_monitoring/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: longhorn-prometheus-servicemonitor 5 | namespace: longhorn-system 6 | labels: 7 | name: longhorn-prometheus-servicemonitor 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: longhorn-manager 12 | namespaceSelector: 13 | matchNames: 14 | - longhorn-system 15 | endpoints: 16 | - port: manager -------------------------------------------------------------------------------- /cert-manager-demo/05-ingress-dns-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: rancher-demo-dns 5 | namespace: default 6 | spec: 7 | rules: 8 | - host: demo-dns.k8s-demo.plgrnd.be 9 | http: 10 | paths: 11 | - backend: 12 | serviceName: rancher-demo 13 | servicePort: 80 14 | path: / 15 | tls: 16 | - hosts: 17 | - demo-dns.k8s-demo.plgrnd.be 18 | secretName: cert-letsencrypt-dns-tls -------------------------------------------------------------------------------- /downstream-eks/variables.tf: -------------------------------------------------------------------------------- 1 | variable "rancher_url" { 2 | type = string 3 | } 4 | variable "rancher_access_key" { 5 | type = string 6 | } 7 | variable "rancher_secret_key" { 8 | type = string 9 | } 10 | variable "aws_access_key" { 11 | type = string 12 | description = "AWS access key used to create infrastructure" 13 | } 14 | variable "aws_secret_key" { 15 | type = string 16 | description = "AWS secret key used to create AWS infrastructure" 17 | } 18 | -------------------------------------------------------------------------------- /vms_lb_dns/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | digitalocean = { 7 | source = "digitalocean/digitalocean" 8 | } 9 | } 10 | required_version = ">= 0.13" 11 | } 12 | 13 | provider "aws" { 14 | access_key = var.aws_access_key 15 | secret_key = var.aws_secret_key 16 | region = var.aws_region 17 | } 18 | provider "digitalocean" { 19 | token = var.digitalocean_token 20 | } 21 | -------------------------------------------------------------------------------- /cert-manager-demo/04-ingress-http-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: rancher-demo-http 5 | namespace: default 6 | spec: 7 | rules: 8 | - host: demo-http.k8s-demo.plgrnd.be 9 | http: 10 | paths: 11 | - backend: 12 | serviceName: rancher-demo 13 | servicePort: 80 14 | path: / 15 | tls: 16 | - hosts: 17 | - demo-http.k8s-demo.plgrnd.be 18 | secretName: cert-letsencrypt-http-tls -------------------------------------------------------------------------------- /prod-app-deployment/charts-prepared/ghost/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.storage.enabled }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: {{ include "ghost.fullname" . }} 6 | labels: 7 | {{- include "ghost.labels" . | nindent 4 }} 8 | spec: 9 | storageClassName: {{ .Values.storage.storageClassName }} 10 | accessModes: 11 | - ReadWriteMany 12 | resources: 13 | requests: 14 | storage: {{ .Values.storage.size }} 15 | {{- end }} -------------------------------------------------------------------------------- /prod-app-deployment/charts-prepared/ghost/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "ghost.fullname" . }} 5 | labels: 6 | {{- include "ghost.labels" . | nindent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | selector: 15 | {{- include "ghost.selectorLabels" . | nindent 4 }} 16 | -------------------------------------------------------------------------------- /rancher-azure/cloud-config.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | stringData: 3 | cloud-config: | 4 | { 5 | "cloud":"AzurePublicCloud", 6 | "tenantId": "", 7 | "subscriptionId": "", 8 | "aadClientId": "", 9 | "aadClientSecret": "", 10 | "resourceGroup": "bhofmann-demo-rancher", 11 | "location": "eastus" 12 | } 13 | kind: Secret 14 | metadata: 15 | name: azure-cloud-provider 16 | namespace: kube-system 17 | type: Opaque -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rancher-demo-setups 2 | 3 | This repository contains Terraform modules to set up on different cloud providers: 4 | 5 | * A highly available Rancher Management Server 6 | * A Rancher downstream cluster with test workloads 7 | 8 | **The modules are meant for demo purposes only and are not production ready.** 9 | 10 | ## Required local tools 11 | 12 | * Terraform 13 | * RKE CLI 14 | * Make 15 | * kubectl 16 | * Helm 17 | 18 | All should be installed in the current version. -------------------------------------------------------------------------------- /private_registry/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | digitalocean = { 7 | source = "digitalocean/digitalocean" 8 | } 9 | } 10 | required_version = ">= 0.13" 11 | } 12 | 13 | provider "aws" { 14 | access_key = var.aws_access_key 15 | secret_key = var.aws_secret_key 16 | region = var.aws_region 17 | } 18 | provider "digitalocean" { 19 | token = var.digitalocean_token 20 | } 21 | -------------------------------------------------------------------------------- /istio-multicluster/cluster1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: install.istio.io/v1alpha1 2 | kind: IstioOperator 3 | spec: 4 | # components: 5 | # base: 6 | # enabled: true 7 | # pilot: 8 | # enabled: true 9 | # meshConfig: 10 | # enablePrometheusMerge: true 11 | values: 12 | global: 13 | meshID: mesh1 14 | multiCluster: 15 | clusterName: cluster1 16 | network: network1 17 | # telemetry: 18 | # enabled: true 19 | # v2: 20 | # enabled: true -------------------------------------------------------------------------------- /istio-multicluster/cluster2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: install.istio.io/v1alpha1 2 | kind: IstioOperator 3 | spec: 4 | # components: 5 | # base: 6 | # enabled: true 7 | # pilot: 8 | # enabled: true 9 | # meshConfig: 10 | # enablePrometheusMerge: true 11 | values: 12 | global: 13 | meshID: mesh1 14 | multiCluster: 15 | clusterName: cluster2 16 | network: network2 17 | # telemetry: 18 | # enabled: true 19 | # v2: 20 | # enabled: true -------------------------------------------------------------------------------- /modules/demo-workloads/opni/opni-np-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: opni-np 5 | namespace: opni 6 | spec: 7 | ports: 8 | - name: grpc 9 | port: 9090 10 | nodePort: 32090 11 | protocol: TCP 12 | targetPort: grpc 13 | - name: noauth 14 | port: 4000 15 | nodePort: 32000 16 | protocol: TCP 17 | targetPort: noauth 18 | selector: 19 | app.kubernetes.io/name: opni-gateway 20 | type: NodePort 21 | -------------------------------------------------------------------------------- /prod-app-deployment/infra/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | digitalocean = { 7 | source = "digitalocean/digitalocean" 8 | } 9 | } 10 | required_version = ">= 0.13" 11 | } 12 | 13 | provider "aws" { 14 | access_key = var.aws_access_key 15 | secret_key = var.aws_secret_key 16 | region = var.aws_region 17 | } 18 | provider "digitalocean" { 19 | token = var.digitalocean_token 20 | } 21 | -------------------------------------------------------------------------------- /cert-manager-demo/02-ingress-selfsigned-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: rancher-demo-selfsigned 5 | namespace: default 6 | spec: 7 | rules: 8 | - host: demo-selfsigned.k8s-demo.plgrnd.be 9 | http: 10 | paths: 11 | - backend: 12 | serviceName: rancher-demo 13 | servicePort: 80 14 | path: / 15 | tls: 16 | - hosts: 17 | - demo-selfsigned.k8s-demo.plgrnd.be 18 | secretName: cert-selfsigned-tls -------------------------------------------------------------------------------- /cert-manager-demo/04-issuer-http.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Issuer 3 | metadata: 4 | name: letsencrypt-http-prod 5 | namespace: default 6 | spec: 7 | acme: 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | email: mail@bastianhofmann.de 10 | privateKeySecretRef: 11 | name: letsencrypt-http-prod 12 | solvers: 13 | # An empty 'selector' means that this solver matches all domains 14 | - selector: {} 15 | http01: 16 | ingress: {} -------------------------------------------------------------------------------- /prod-app-deployment/charts-prepared/ghost/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /vms_lb_dns/clusterissuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-http-prod 5 | namespace: default 6 | spec: 7 | acme: 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | email: mail@bastianhofmann.de 10 | privateKeySecretRef: 11 | name: letsencrypt-http-prod 12 | solvers: 13 | # An empty 'selector' means that this solver matches all domains 14 | - selector: {} 15 | http01: 16 | ingress: {} -------------------------------------------------------------------------------- /downstream-k3s/k3s.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "k3s" { 2 | provisioner "local-exec" { 3 | command = "bash install_k3s.sh" 4 | environment = { 5 | IP0 = aws_instance.k3s[0].public_ip 6 | IP1 = aws_instance.k3s[1].public_ip 7 | IP2 = aws_instance.k3s[2].public_ip 8 | IP3 = aws_instance.k3s[3].public_ip 9 | IP4 = aws_instance.k3s[4].public_ip 10 | } 11 | } 12 | provisioner "local-exec" { 13 | when = destroy 14 | command = "rm kubeconfig" 15 | } 16 | } -------------------------------------------------------------------------------- /network-policies-demo/network-policy-allow-pod-label.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: test-network-policy 5 | namespace: web-application 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: httpbin 10 | policyTypes: 11 | - Ingress 12 | - Egress 13 | ingress: 14 | - from: 15 | - podSelector: 16 | matchLabels: 17 | app: curl 18 | ports: 19 | - protocol: TCP 20 | port: 80 21 | -------------------------------------------------------------------------------- /private_registry/data.tf: -------------------------------------------------------------------------------- 1 | # Data for AWS module 2 | 3 | # AWS data 4 | # ---------------------------------------------------------- 5 | 6 | # Use latest Ubuntu 20.04 AMI 7 | data "aws_ami" "ubuntu" { 8 | most_recent = true 9 | owners = ["099720109477"] # Canonical 10 | 11 | filter { 12 | name = "name" 13 | values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] 14 | } 15 | 16 | filter { 17 | name = "virtualization-type" 18 | values = ["hvm"] 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /prod-app-deployment/charts-prepared/ghost/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: "{{ include "ghost.fullname" . }}-test-connection" 5 | labels: 6 | {{- include "ghost.labels" . | nindent 4 }} 7 | annotations: 8 | "helm.sh/hook": test 9 | spec: 10 | containers: 11 | - name: wget 12 | image: busybox 13 | command: ['wget'] 14 | args: ['{{ include "ghost.fullname" . }}:{{ .Values.service.port }}'] 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /modules/demo-workloads/cert-manager/clusterissuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-prod 5 | spec: 6 | acme: 7 | server: https://acme-v02.api.letsencrypt.org/directory 8 | email: ${EMAIL} 9 | privateKeySecretRef: 10 | name: letsencrypt-prod 11 | solvers: 12 | - dns01: 13 | digitalocean: 14 | tokenSecretRef: 15 | key: access-token 16 | name: digitalocean-credentials-secret 17 | -------------------------------------------------------------------------------- /network-policies-demo/network-policy-allow-other-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: test-network-policy 5 | namespace: web-application 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: httpbin 10 | policyTypes: 11 | - Ingress 12 | - Egress 13 | ingress: 14 | - from: 15 | - namespaceSelector: 16 | matchLabels: 17 | access: allowed 18 | ports: 19 | - protocol: TCP 20 | port: 80 21 | -------------------------------------------------------------------------------- /vms_lb_dns/output.tf: -------------------------------------------------------------------------------- 1 | output "node_ips" { 2 | value = aws_instance.vmlb.*.public_ip 3 | } 4 | 5 | output "rancher_lb" { 6 | value = aws_elb.rancher-server-lb.dns_name 7 | } 8 | 9 | output "rancher_dns" { 10 | value = digitalocean_record.rancher.fqdn 11 | } 12 | 13 | output "keycloak_dns" { 14 | value = digitalocean_record.keycloak.fqdn 15 | } 16 | 17 | output "kubernetes_lb" { 18 | value = aws_elb.kubernetes-lb.dns_name 19 | } 20 | 21 | output "kubernetes_dns" { 22 | value = digitalocean_record.kubernetes.fqdn 23 | } -------------------------------------------------------------------------------- /modules/demo-workloads/demo-shop/ingress.yaml: -------------------------------------------------------------------------------- 1 | kind: Ingress 2 | apiVersion: networking.k8s.io/v1beta1 3 | metadata: 4 | name: demo-shop 5 | annotations: 6 | cert-manager.io/cluster-issuer: letsencrypt-prod 7 | spec: 8 | tls: 9 | - hosts: 10 | - demo-shop.plgrnd.be 11 | secretName: demo-shop-tls 12 | rules: 13 | - host: demo-shop.plgrnd.be 14 | http: 15 | paths: 16 | - path: / 17 | backend: 18 | serviceName: frontend 19 | servicePort: 80 20 | -------------------------------------------------------------------------------- /modules/demo-workloads/keycloak/values.yaml: -------------------------------------------------------------------------------- 1 | ingress: 2 | enabled: true 3 | annotations: 4 | cert-manager.io/issuer: letsencrypt 5 | rules: 6 | - host: keycloak.plgrnd.be 7 | paths: 8 | - / 9 | tls: 10 | - hosts: 11 | - keycloak.plgrnd.be 12 | secretName: keycloak-tls 13 | 14 | extraEnv: | 15 | - name: KEYCLOAK_USER 16 | value: admin 17 | - name: KEYCLOAK_PASSWORD 18 | value: admin 19 | - name: JAVA_OPTS 20 | value: >- 21 | -Dkeycloak.frontendUrl=https://keycloak.plgrnd.be/auth -------------------------------------------------------------------------------- /cert-manager-demo/05-clusterissuer-dns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-dns-prod 5 | spec: 6 | acme: 7 | server: https://acme-v02.api.letsencrypt.org/directory 8 | email: mail@bastianhofmann.de 9 | privateKeySecretRef: 10 | name: letsencrypt-dns-prod 11 | solvers: 12 | - selector: { } 13 | dns01: 14 | digitalocean: 15 | tokenSecretRef: 16 | key: access-token 17 | name: digitalocean-credentials-secret 18 | -------------------------------------------------------------------------------- /cert-manager-demo/06-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | cert-manager.io/cluster-issuer: letsencrypt-dns-prod 6 | name: rancher-demo-ingress 7 | namespace: default 8 | spec: 9 | rules: 10 | - host: demo-ingress.k8s-demo.plgrnd.be 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: rancher-demo 15 | servicePort: 80 16 | path: / 17 | tls: 18 | - hosts: 19 | - demo-ingress.k8s-demo.plgrnd.be 20 | secretName: cert-letsencrypt-ingress-tls -------------------------------------------------------------------------------- /downstream-harvester-do/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | rancher2 = { 4 | source = "rancher/rancher2" 5 | version = "1.10.6" 6 | } 7 | kubernetes = { 8 | source = "hashicorp/kubernetes" 9 | version = "2.0.2" 10 | } 11 | } 12 | required_version = ">= 0.13" 13 | } 14 | provider "rancher2" { 15 | api_url = var.rancher_url 16 | access_key = var.rancher_access_key 17 | secret_key = var.rancher_secret_key 18 | } 19 | provider "kubernetes" { 20 | config_path = local_file.kube_config.filename 21 | } -------------------------------------------------------------------------------- /modules/demo-workloads/rancher-demo/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rancher-demo 5 | namespace: default 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: rancher-demo 11 | template: 12 | metadata: 13 | labels: 14 | app: rancher-demo 15 | spec: 16 | containers: 17 | - image: monachus/rancher-demo:latest 18 | name: rancher-demo 19 | ports: 20 | - containerPort: 8080 21 | name: web 22 | protocol: TCP 23 | 24 | -------------------------------------------------------------------------------- /modules/demo-workloads/rancher-demo/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: rancher-demo 5 | namespace: default 6 | annotations: 7 | cert-manager.io/cluster-issuer: letsencrypt-prod 8 | spec: 9 | tls: 10 | - hosts: 11 | - rancher-demo.k8s-demo.plgrnd.be 12 | secretName: rancher-demo-tls 13 | rules: 14 | - host: rancher-demo.k8s-demo.plgrnd.be 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: rancher-demo 19 | servicePort: 80 20 | path: / -------------------------------------------------------------------------------- /modules/demo-workloads/elk/elasticsearch/values.yaml: -------------------------------------------------------------------------------- 1 | volumeClaimTemplate: 2 | accessModes: [ "ReadWriteOnce" ] 3 | resources: 4 | requests: 5 | storage: 5Gi 6 | 7 | ingress: 8 | enabled: true 9 | hosts: 10 | - host: es.k8s-demo.plgrnd.be 11 | paths: 12 | - path: / 13 | annotations: 14 | cert-manager.io/issuer: my-ca-issuer 15 | nginx.ingress.kubernetes.io/auth-type: basic 16 | nginx.ingress.kubernetes.io/auth-secret: basic-auth 17 | tls: 18 | - hosts: 19 | - es.k8s-demo.plgrnd.be 20 | secretName: es-tls 21 | 22 | -------------------------------------------------------------------------------- /prod-app-deployment/02-install_certmanager.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export KUBECONFIG=$(pwd)/kubeconfig 6 | 7 | helm repo add jetstack https://charts.jetstack.io 8 | 9 | helm upgrade --install \ 10 | cert-manager jetstack/cert-manager \ 11 | --namespace cert-manager \ 12 | --set installCRDs=true \ 13 | --version v1.7.1 --create-namespace 14 | 15 | kubectl rollout status deployment -n cert-manager cert-manager 16 | kubectl rollout status deployment -n cert-manager cert-manager-webhook 17 | 18 | kubectl apply -f cert-manager/cluster-issuer.yaml -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/userdata/rancher_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | export K3S_KUBECONFIG_MODE=644 4 | 5 | curl -sfL https://get.k3s.io | sh - 6 | 7 | snap install helm --classic 8 | 9 | export KUBECONFIG=/etc/rancher/k3s/k3s.yaml 10 | 11 | helm repo add jetstack https://charts.jetstack.io 12 | helm upgrade --install \ 13 | cert-manager jetstack/cert-manager \ 14 | --namespace cert-manager \ 15 | --set installCRDs=true \ 16 | --version 1.7.1 --create-namespace 17 | 18 | kubectl rollout status deployment -n cert-manager cert-manager-webhook 19 | -------------------------------------------------------------------------------- /modules/demo-workloads/loki/logging-flow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: logging.banzaicloud.io/v1beta1 2 | kind: ClusterOutput 3 | metadata: 4 | name: loki 5 | namespace: cattle-logging-system 6 | spec: 7 | loki: 8 | url: http://loki.loki:3100 9 | configure_kubernetes_labels: true 10 | buffer: 11 | timekey: 5s 12 | timekey_wait: 2s 13 | timekey_use_utc: true 14 | --- 15 | apiVersion: logging.banzaicloud.io/v1beta1 16 | kind: ClusterFlow 17 | metadata: 18 | name: all-logs-to-loki 19 | namespace: cattle-logging-system 20 | spec: 21 | globalOutputRefs: 22 | - loki -------------------------------------------------------------------------------- /modules/demo-workloads/opni/ingress-opni.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: opni 5 | namespace: opni 6 | annotations: 7 | cert-manager.io/cluster-issuer: letsencrypt-http-prod 8 | spec: 9 | tls: 10 | - hosts: 11 | - opni.plgrnd.be 12 | secretName: opni-tls-secret 13 | rules: 14 | - host: opni.plgrnd.be 15 | http: 16 | paths: 17 | - backend: 18 | service: 19 | name: opni 20 | port: 21 | number: 9090 22 | path: / 23 | pathType: Prefix 24 | -------------------------------------------------------------------------------- /downstream-aws/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | rancher2 = { 7 | source = "rancher/rancher2" 8 | version = "1.10.6" 9 | } 10 | } 11 | required_version = ">= 0.13" 12 | } 13 | 14 | provider "aws" { 15 | access_key = var.aws_access_key 16 | secret_key = var.aws_secret_key 17 | region = var.aws_region 18 | } 19 | provider "rancher2" { 20 | api_url = var.rancher_url 21 | insecure = true 22 | access_key = var.rancher_access_key 23 | secret_key = var.rancher_secret_key 24 | } -------------------------------------------------------------------------------- /modules/demo-workloads/network-debugging/debug.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: overlaytest 5 | namespace: default 6 | spec: 7 | selector: 8 | matchLabels: 9 | name: overlaytest 10 | template: 11 | metadata: 12 | labels: 13 | name: overlaytest 14 | spec: 15 | tolerations: 16 | - operator: Exists 17 | containers: 18 | - image: busybox:1.28 19 | imagePullPolicy: Always 20 | name: busybox 21 | command: ["sh", "-c", "tail -f /dev/null"] 22 | terminationMessagePath: /dev/termination-log -------------------------------------------------------------------------------- /ioloadtest/storageos/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: "storageos-api" 5 | namespace: "storageos-operator" 6 | labels: 7 | app: "storageos" 8 | type: "kubernetes.io/storageos" 9 | data: 10 | # echo -n '' | base64 11 | apiUsername: c3RvcmFnZW9z 12 | apiPassword: c3RvcmFnZW9z 13 | # CSI Credentials 14 | csiProvisionUsername: c3RvcmFnZW9z 15 | csiProvisionPassword: c3RvcmFnZW9z 16 | csiControllerPublishUsername: c3RvcmFnZW9z 17 | csiControllerPublishPassword: c3RvcmFnZW9z 18 | csiNodePublishUsername: c3RvcmFnZW9z 19 | csiNodePublishPassword: c3RvcmFnZW9z -------------------------------------------------------------------------------- /modules/demo-workloads/opni/ingress-grafana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: grafana 5 | namespace: opni 6 | annotations: 7 | cert-manager.io/cluster-issuer: letsencrypt-http-prod 8 | spec: 9 | tls: 10 | - hosts: 11 | - grafana.plgrnd.be 12 | secretName: grafana-tls-secret 13 | rules: 14 | - host: grafana.plgrnd.be 15 | http: 16 | paths: 17 | - backend: 18 | service: 19 | name: grafana-service 20 | port: 21 | number: 3000 22 | path: / 23 | pathType: Prefix 24 | -------------------------------------------------------------------------------- /prod-app-deployment/04-install_rancher.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export KUBECONFIG=$(pwd)/kubeconfig 6 | 7 | helm repo add rancher-latest https://releases.rancher.com/server-charts/latest 8 | 9 | helm upgrade --install rancher rancher-latest/rancher \ 10 | --namespace cattle-system \ 11 | --version v2.6.2 \ 12 | --set hostname=rancher.k8sdemo.plgrnd.be --create-namespace \ 13 | --set ingress.tls.source=letsEncrypt 14 | 15 | watch "kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}' && kubectl get pods,ingress,certificates -A" -------------------------------------------------------------------------------- /downstream-k3s/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | // rancher2 = { 7 | // source = "rancher/rancher2" 8 | // version = "1.10.6" 9 | // } 10 | } 11 | required_version = ">= 0.13" 12 | } 13 | 14 | provider "aws" { 15 | access_key = var.aws_access_key 16 | secret_key = var.aws_secret_key 17 | region = var.aws_region 18 | } 19 | //provider "rancher2" { 20 | // api_url = var.rancher_url 21 | // insecure = true 22 | // access_key = var.rancher_access_key 23 | // secret_key = var.rancher_secret_key 24 | //} -------------------------------------------------------------------------------- /modules/demo-workloads/mysql-operator/values.yaml: -------------------------------------------------------------------------------- 1 | orchestrator: 2 | config: 3 | # `reset slave all` and `set read_only=0` on promoted master 4 | ApplyMySQLPromotionAfterMasterFailover: true 5 | ingress: 6 | enabled: true 7 | annotations: 8 | cert-manager.io/cluster-issuer: letsencrypt-prod 9 | nginx.ingress.kubernetes.io/auth-type: basic 10 | nginx.ingress.kubernetes.io/auth-secret: basic-auth 11 | hosts: 12 | - host: mysql-operator.k8s-demo.plgrnd.be 13 | paths: 14 | - / 15 | tls: 16 | - secretName: mysql-operator-tls 17 | hosts: 18 | - mysql-operator.k8s-demo.plgrnd.be -------------------------------------------------------------------------------- /rkevms/cluster.yml.tmpl: -------------------------------------------------------------------------------- 1 | # curl https://releases.rancher.com/install-docker/20.10.sh | sh 2 | # sudo usermod -aG docker fcbit 3 | 4 | nodes: 5 | - address: ${IP0} 6 | user: ec2-user 7 | role: 8 | - controlplane 9 | - etcd 10 | - worker 11 | - address: ${IP1} 12 | user: ec2-user 13 | role: 14 | - controlplane 15 | - etcd 16 | - worker 17 | - address: ${IP2} 18 | user: ec2-user 19 | role: 20 | - controlplane 21 | - etcd 22 | - worker 23 | 24 | kubernetes_version: v1.19.10-rancher1-1 25 | 26 | network: 27 | plugin: calico 28 | 29 | # kubernetes_version: v1.19.4-rancher1-1 -------------------------------------------------------------------------------- /downstream-aks/aks.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cloud_credential" "bhofmann-azure" { 2 | name = "bhofmann-azure" 3 | azure_credential_config { 4 | client_id = var.azure_client_id 5 | client_secret = var.azure_client_secret 6 | subscription_id = var.azure_subscription_id 7 | } 8 | } 9 | 10 | resource "rancher2_cluster" "bhofmann-aks" { 11 | name = "bhofmann-aks" 12 | aks_config_v2 { 13 | cloud_credential_id = rancher2_cloud_credential.bhofmann-azure.id 14 | resource_group = "bhofmann-aks" 15 | resource_location = var.azure_location 16 | imported = true 17 | } 18 | fleet_workspace_name = "fleet-other" 19 | } 20 | -------------------------------------------------------------------------------- /rkevms/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "ubuntu" { 2 | most_recent = true 3 | owners = ["099720109477"] # Canonical 4 | 5 | filter { 6 | name = "name" 7 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 8 | } 9 | 10 | filter { 11 | name = "virtualization-type" 12 | values = ["hvm"] 13 | } 14 | } 15 | 16 | data "aws_ami" "rhel" { 17 | most_recent = true 18 | owners = ["309956199498"] # RedHat 19 | 20 | filter { 21 | name = "name" 22 | values = ["RHEL-8.2.0_HVM-*-x86_64-0-Hourly2-GP2"] 23 | } 24 | 25 | filter { 26 | name = "virtualization-type" 27 | values = ["hvm"] 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /modules/demo-workloads/opni/ingress-logs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: logs 5 | namespace: opni 6 | annotations: 7 | cert-manager.io/cluster-issuer: letsencrypt-http-prod 8 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 9 | spec: 10 | tls: 11 | - hosts: 12 | - logs.plgrnd.be 13 | secretName: logs-tls-secret 14 | rules: 15 | - host: logs.plgrnd.be 16 | http: 17 | paths: 18 | - backend: 19 | service: 20 | name: opni-opensearch-svc-dashboards 21 | port: 22 | number: 5601 23 | path: / 24 | pathType: Prefix 25 | -------------------------------------------------------------------------------- /downstream-vsphere/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | vsphere = { 4 | source = "hashicorp/vsphere" 5 | } 6 | rancher2 = { 7 | source = "rancher/rancher2" 8 | version = "1.10.6" 9 | } 10 | } 11 | required_version = ">= 0.13" 12 | } 13 | 14 | provider "vsphere" { 15 | user = var.vcenter_user 16 | password = var.vcenter_password 17 | vsphere_server = var.vcenter_server 18 | allow_unverified_ssl = false 19 | } 20 | provider "rancher2" { 21 | api_url = var.rancher_url 22 | insecure = true 23 | access_key = var.rancher_access_key 24 | secret_key = var.rancher_secret_key 25 | } -------------------------------------------------------------------------------- /modules/demo-workloads/workload.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "workload" { 2 | triggers = { 3 | filehash = sha256(join(",", flatten([ 4 | for filename in fileset(path.module, "**") : filesha256(abspath("${path.module}/${filename}")) 5 | ] 6 | ))) 7 | } 8 | provisioner "local-exec" { 9 | command = "make -C ${path.module} install" 10 | environment = { 11 | KUBECONFIG = var.kubeconfig_demo 12 | EMAIL = var.email 13 | DIGITALOCEAN_TOKEN = var.digitalocean_token 14 | ENCODED_DIGITALOCEAN_TOKEN = base64encode(var.digitalocean_token) 15 | HELM_EXPERIMENTAL_OCI = 1 16 | } 17 | } 18 | } -------------------------------------------------------------------------------- /istio-multicluster/helloworld_v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: helloworld-v2 5 | labels: 6 | app: helloworld 7 | version: v2 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: helloworld 13 | version: v2 14 | template: 15 | metadata: 16 | labels: 17 | app: helloworld 18 | version: v2 19 | spec: 20 | containers: 21 | - name: helloworld 22 | image: docker.io/istio/examples-helloworld-v2 23 | resources: 24 | requests: 25 | cpu: "100m" 26 | imagePullPolicy: IfNotPresent #Always 27 | ports: 28 | - containerPort: 5000 -------------------------------------------------------------------------------- /istio-multicluster/helloworld_v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: helloworld-v1 5 | labels: 6 | app: helloworld 7 | version: v1 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: helloworld 13 | version: v1 14 | template: 15 | metadata: 16 | labels: 17 | app: helloworld 18 | version: v1 19 | spec: 20 | containers: 21 | - name: helloworld 22 | image: docker.io/istio/examples-helloworld-v1 23 | resources: 24 | requests: 25 | cpu: "100m" 26 | imagePullPolicy: IfNotPresent #Always 27 | ports: 28 | - containerPort: 5000 29 | -------------------------------------------------------------------------------- /private_registry/prepare_workstation.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | $(terraform output -state=terraform.tfstate -json "lb" | jq -r '"export LB=\(.)"') 7 | 8 | ssh ubuntu@$IP4 wget -O install_docker.sh https://releases.rancher.com/install-docker/20.10.sh 9 | ssh ubuntu@$IP4 bash install_docker.sh 10 | ssh ubuntu@$IP4 sudo usermod -aG docker ubuntu 11 | ssh ubuntu@$IP4 sudo snap install kubectl --classic 12 | scp kubeconfig_harbor ubuntu@$IP4:~/kubeconfig_harbor 13 | scp sync_rancher_images.sh ubuntu@$IP4:~/sync_rancher_images.sh 14 | scp harbor_rancher_project.json ubuntu@$IP4:~/harbor_rancher_project.json -------------------------------------------------------------------------------- /cert-manager-demo/07-deployment-mounting-tls-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | namespace: default 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: nginx 11 | template: 12 | metadata: 13 | labels: 14 | app: nginx 15 | spec: 16 | containers: 17 | - image: nginx 18 | name: nginx 19 | volumeMounts: 20 | - mountPath: /etc/nginx/ssl 21 | name: tls-certs 22 | ports: 23 | - containerPort: 80 24 | name: http 25 | protocol: TCP 26 | volumes: 27 | - name: tls-certs 28 | secret: 29 | secretName: cert-letsencrypt-http-tls -------------------------------------------------------------------------------- /downstream-azure-windows/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | azurerm = { 4 | source = "hashicorp/azurerm" 5 | } 6 | rancher2 = { 7 | source = "rancher/rancher2" 8 | } 9 | } 10 | required_version = ">= 0.13" 11 | } 12 | 13 | provider "azurerm" { 14 | features {} 15 | 16 | subscription_id = var.azure_subscription_id 17 | client_id = var.azure_client_id 18 | client_secret = var.azure_client_secret 19 | tenant_id = var.azure_tenant_id 20 | } 21 | 22 | provider "rancher2" { 23 | api_url = var.rancher_url 24 | insecure = true 25 | token_key = var.rancher_admin_token 26 | access_key = var.rancher_access_key 27 | secret_key = var.rancher_secret_key 28 | } -------------------------------------------------------------------------------- /rancher-azure/install_csi.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | export KUBECONFIG=kube_config_cluster.yml 8 | 9 | kubectl apply -f cloud-config.yaml 10 | 11 | helm repo add azurefile-csi-driver https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts 12 | helm upgrade --install azurefile-csi-driver azurefile-csi-driver/azurefile-csi-driver \ 13 | --namespace kube-system \ 14 | --set controller.replicas=1 \ 15 | --set windows.enabled=false 16 | 17 | kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/deploy/example/storageclass-azurefile-csi.yaml 18 | -------------------------------------------------------------------------------- /downstream-do/variables.tf: -------------------------------------------------------------------------------- 1 | variable "digitalocean_token" { 2 | type = string 3 | } 4 | variable "rancher_url" { 5 | type = string 6 | } 7 | variable "rancher_access_key" { 8 | type = string 9 | } 10 | variable "rancher_secret_key" { 11 | type = string 12 | } 13 | variable "prefix" { 14 | type = string 15 | description = "Prefix added to names of all resources" 16 | default = "bhofmann" 17 | } 18 | variable "docker_version" { 19 | type = string 20 | description = "Docker version to install on nodes" 21 | default = "20.10" 22 | } 23 | variable "ssh_key_file_name" { 24 | type = string 25 | description = "File path and name of SSH private key used for infrastructure and RKE" 26 | default = "~/.ssh/id_rsa" 27 | } -------------------------------------------------------------------------------- /downstream-harvester-do/node-templates.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cloud_credential" "do-harvester" { 2 | name = "${var.prefix}-do-harvester" 3 | 4 | digitalocean_credential_config { 5 | access_token = var.digitalocean_token 6 | } 7 | } 8 | 9 | resource "rancher2_node_template" "harvester" { 10 | name = "${var.prefix}-do-harvester" 11 | description = "Template for harvester nodes" 12 | 13 | cloud_credential_id = rancher2_cloud_credential.do-harvester.id 14 | engine_install_url = "https://releases.rancher.com/install-docker/${var.docker_version}.sh" 15 | 16 | digitalocean_config { 17 | image = "ubuntu-20-04-x64" 18 | region = "fra1" 19 | size = "s-8vcpu-32gb" 20 | userdata = "" 21 | } 22 | } 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /downstream-harvester-do/variables.tf: -------------------------------------------------------------------------------- 1 | variable "digitalocean_token" { 2 | type = string 3 | } 4 | variable "rancher_url" { 5 | type = string 6 | } 7 | variable "rancher_access_key" { 8 | type = string 9 | } 10 | variable "rancher_secret_key" { 11 | type = string 12 | } 13 | variable "prefix" { 14 | type = string 15 | description = "Prefix added to names of all resources" 16 | default = "bhofmann" 17 | } 18 | variable "docker_version" { 19 | type = string 20 | description = "Docker version to install on nodes" 21 | default = "19.03" 22 | } 23 | variable "ssh_key_file_name" { 24 | type = string 25 | description = "File path and name of SSH private key used for infrastructure and RKE" 26 | default = "~/.ssh/id_rsa" 27 | } -------------------------------------------------------------------------------- /vms_lb_dns/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "sles" { 2 | owners = ["013907871322"] 3 | most_recent = true 4 | 5 | filter { 6 | name = "name" 7 | values = ["suse-sles-15-sp4*"] 8 | } 9 | 10 | filter { 11 | name = "architecture" 12 | values = ["x86_64"] 13 | } 14 | 15 | filter { 16 | name = "root-device-type" 17 | values = ["ebs"] 18 | } 19 | } 20 | 21 | data "aws_ami" "opensuse" { 22 | owners = ["679593333241"] 23 | most_recent = true 24 | 25 | filter { 26 | name = "name" 27 | values = ["openSUSE-Leap-15.4*"] 28 | } 29 | 30 | filter { 31 | name = "architecture" 32 | values = ["x86_64"] 33 | } 34 | 35 | filter { 36 | name = "root-device-type" 37 | values = ["ebs"] 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /rancher-azure/common_infra.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_resource_group" "demo-rancher" { 2 | name = "${var.prefix}-demo-rancher" 3 | location = var.azure_location 4 | } 5 | 6 | resource "azurerm_virtual_network" "demo-rancher" { 7 | name = "${var.prefix}-network" 8 | address_space = ["10.0.0.0/16"] 9 | location = azurerm_resource_group.demo-rancher.location 10 | resource_group_name = azurerm_resource_group.demo-rancher.name 11 | } 12 | 13 | resource "azurerm_subnet" "demo-rancher-internal" { 14 | name = "${var.prefix}-network-internal" 15 | resource_group_name = azurerm_resource_group.demo-rancher.name 16 | virtual_network_name = azurerm_virtual_network.demo-rancher.name 17 | address_prefixes = ["10.0.0.0/16"] 18 | } -------------------------------------------------------------------------------- /downstream-azure-windows/common_infra.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_resource_group" "demo-windows" { 2 | name = "${var.prefix}-demo-windows" 3 | location = var.azure_location 4 | } 5 | 6 | resource "azurerm_virtual_network" "demo-windows" { 7 | name = "${var.prefix}-network" 8 | address_space = ["10.0.0.0/16"] 9 | location = azurerm_resource_group.demo-windows.location 10 | resource_group_name = azurerm_resource_group.demo-windows.name 11 | } 12 | 13 | resource "azurerm_subnet" "demo-windows-internal" { 14 | name = "${var.prefix}-network-internal" 15 | resource_group_name = azurerm_resource_group.demo-windows.name 16 | virtual_network_name = azurerm_virtual_network.demo-windows.name 17 | address_prefixes = ["10.0.0.0/16"] 18 | } -------------------------------------------------------------------------------- /cert-manager-demo/01-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rancher-demo 5 | namespace: default 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: rancher-demo 11 | template: 12 | metadata: 13 | labels: 14 | app: rancher-demo 15 | spec: 16 | containers: 17 | - image: monachus/rancher-demo:latest 18 | name: rancher-demo 19 | ports: 20 | - containerPort: 8080 21 | name: web 22 | protocol: TCP 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: rancher-demo 28 | namespace: default 29 | spec: 30 | type: ClusterIP 31 | selector: 32 | app: rancher-demo 33 | ports: 34 | - name: web 35 | port: 80 36 | targetPort: web 37 | -------------------------------------------------------------------------------- /downstream-aks/variables.tf: -------------------------------------------------------------------------------- 1 | variable "rancher_url" { 2 | type = string 3 | } 4 | variable "rancher_access_key" { 5 | type = string 6 | } 7 | variable "rancher_secret_key" { 8 | type = string 9 | } 10 | 11 | variable "azure_subscription_id" { 12 | type = string 13 | description = "Azure subscription id under which resources will be provisioned" 14 | } 15 | 16 | variable "azure_client_id" { 17 | type = string 18 | description = "Azure client id used to create resources" 19 | } 20 | 21 | variable "azure_client_secret" { 22 | type = string 23 | description = "Client secret used to authenticate with Azure apis" 24 | } 25 | 26 | variable "azure_location" { 27 | type = string 28 | description = "Azure location used for all resources" 29 | default = "East US" 30 | } -------------------------------------------------------------------------------- /downstream-eks/eks.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cloud_credential" "aws" { 2 | name = "aws" 3 | amazonec2_credential_config { 4 | access_key = var.aws_access_key 5 | secret_key = var.aws_secret_key 6 | } 7 | } 8 | 9 | resource "rancher2_cluster" "bhofmann-eks" { 10 | name = "bhofmann-eks" 11 | description = "Terraform EKS cluster" 12 | eks_config_v2 { 13 | cloud_credential_id = rancher2_cloud_credential.aws.id 14 | region = "eu-central-1" 15 | kubernetes_version = "1.21" 16 | logging_types = ["audit", "api"] 17 | node_groups { 18 | name = "rancher_node_group" 19 | instance_type = "m5.xlarge" 20 | desired_size = 2 21 | min_size = 1 22 | max_size = 3 23 | } 24 | public_access = true 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /downstream-fleet/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | rancher2 = { 7 | source = "rancher/rancher2" 8 | version = "1.23.0" 9 | } 10 | } 11 | required_version = ">= 1.0" 12 | } 13 | 14 | provider "aws" { 15 | alias = "aws_eu_west" 16 | access_key = var.aws_access_key 17 | secret_key = var.aws_secret_key 18 | region = "eu-west-1" 19 | } 20 | 21 | provider "aws" { 22 | alias = "aws_eu_central" 23 | access_key = var.aws_access_key 24 | secret_key = var.aws_secret_key 25 | region = "eu-central-1" 26 | } 27 | 28 | provider "rancher2" { 29 | api_url = var.rancher_url 30 | insecure = true 31 | access_key = var.rancher_access_key 32 | secret_key = var.rancher_secret_key 33 | } -------------------------------------------------------------------------------- /downstream-fleet/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | variable "aws_secret_key" { 6 | type = string 7 | description = "AWS secret key used to create AWS infrastructure" 8 | } 9 | variable "rancher_url" { 10 | type = string 11 | } 12 | variable "rancher_access_key" { 13 | type = string 14 | } 15 | variable "rancher_secret_key" { 16 | type = string 17 | } 18 | variable "ssh_key_file_name" { 19 | type = string 20 | description = "File path and name of SSH private key used for infrastructure and RKE" 21 | default = "~/.ssh/id_rsa" 22 | } 23 | variable "prefix" { 24 | type = string 25 | description = "Prefix added to names of all resources" 26 | default = "bhofmann" 27 | } -------------------------------------------------------------------------------- /modules/demo-workloads/argocd/values.yaml: -------------------------------------------------------------------------------- 1 | redis-ha: 2 | enabled: true 3 | 4 | controller: 5 | replicas: 1 6 | 7 | server: 8 | ingress: 9 | enabled: true 10 | annotations: 11 | cert-manager.io/cluster-issuer: letsencrypt-http-prod 12 | nginx.ingress.kubernetes.io/ssl-passthrough: "true" 13 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 14 | hosts: 15 | - argocd.plgrnd.be 16 | tls: 17 | - secretName: argocd-tls 18 | hosts: 19 | - argocd.plgrnd.be 20 | autoscaling: 21 | enabled: true 22 | minReplicas: 2 23 | 24 | repoServer: 25 | autoscaling: 26 | enabled: true 27 | minReplicas: 2 28 | 29 | applicationSet: 30 | replicaCount: 2 31 | 32 | configs: 33 | cm: 34 | url: argocd.plgrnd.be 35 | exec.enabled: true 36 | 37 | -------------------------------------------------------------------------------- /rancher-azure/install_rancher.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | export KUBECONFIG=kube_config_cluster.yml 8 | 9 | helm upgrade --install \ 10 | cert-manager jetstack/cert-manager \ 11 | --namespace cert-manager \ 12 | --set installCRDs=true \ 13 | --version 1.7.1 --create-namespace 14 | 15 | kubectl rollout status deployment -n cert-manager cert-manager 16 | kubectl rollout status deployment -n cert-manager cert-manager-webhook 17 | 18 | helm upgrade --install rancher rancher-latest/rancher \ 19 | --namespace cattle-system \ 20 | --version 2.6.4 \ 21 | --set replicas=1 \ 22 | --set hostname=rancher.${IP0}.sslip.io --create-namespace 23 | 24 | watch kubectl get pods,ingress -A 25 | -------------------------------------------------------------------------------- /downstream-k3s/demo-cluster.tf: -------------------------------------------------------------------------------- 1 | //resource "rancher2_cluster" "demo" { 2 | // name = "${var.prefix}-demo" 3 | // description = "Cluster for demos" 4 | //} 5 | // 6 | //resource "null_resource" "registration" { 7 | // depends_on = [ 8 | // null_resource.k3s, 9 | // rancher2_cluster.demo 10 | // ] 11 | // provisioner "local-exec" { 12 | // command = rancher2_cluster.demo.cluster_registration_token[0].command 13 | // environment = { 14 | // KUBECONFIG = "${path.module}/kubeconfig" 15 | // } 16 | // } 17 | //} 18 | 19 | module "demo-workloads" { 20 | depends_on = [ 21 | null_resource.k3s 22 | ] 23 | source = "../modules/demo-workloads" 24 | digitalocean_token = var.digitalocean_token 25 | kubeconfig_demo = abspath("${path.module}/kubeconfig") 26 | email = var.email 27 | } -------------------------------------------------------------------------------- /ioloadtest/storageos/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: storageos-replicated 5 | parameters: 6 | fsType: ext4 7 | pool: default 8 | storageos.com/replicas: "3" 9 | csi.storage.k8s.io/node-publish-secret-namespace: kube-system # Namespace that runs StorageOS Daemonset 10 | csi.storage.k8s.io/provisioner-secret-namespace: kube-system # Namespace that runs StorageOS Daemonset 11 | csi.storage.k8s.io/controller-publish-secret-namespace: kube-system # Namespace that runs StorageOS Daemonset 12 | csi.storage.k8s.io/node-publish-secret-name: csi-node-publish-secret 13 | csi.storage.k8s.io/provisioner-secret-name: csi-provisioner-secret 14 | csi.storage.k8s.io/controller-publish-secret-name: csi-controller-publish-secret 15 | provisioner: csi.storageos.com # Provisioner when using CSI -------------------------------------------------------------------------------- /modules/demo-workloads/neuvector/values-federated.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | replicas: 1 3 | apisvc: 4 | type: ClusterIP 5 | ranchersso: 6 | enabled: true 7 | secret: 8 | enabled: true 9 | data: 10 | sysinitcfg.yaml: 11 | Cluster_Name: cluster-two 12 | federation: 13 | managedsvc: 14 | type: ClusterIP 15 | ingress: 16 | enabled: true 17 | host: neuvector-managed.cluster-two.plgrnd.be 18 | path: "/" 19 | annotations: 20 | cert-manager.io/cluster-issuer: letsencrypt-http-prod 21 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 22 | tls: true 23 | secretName: neuvector-managed-tls-secret 24 | 25 | cve: 26 | scanner: 27 | replicas: 1 28 | 29 | k3s: 30 | enabled: true 31 | 32 | global: 33 | cattle: 34 | url: https://rancher.plgrnd.be/ 35 | -------------------------------------------------------------------------------- /modules/demo-workloads/expose-prometheus/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: prometheus 5 | namespace: cattle-prometheus 6 | annotations: 7 | cert-manager.io/cluster-issuer: letsencrypt-prod 8 | nginx.ingress.kubernetes.io/auth-type: basic 9 | nginx.ingress.kubernetes.io/auth-secret: basic-auth 10 | spec: 11 | tls: 12 | - hosts: 13 | - prometheus.k8s-demo.plgrnd.be 14 | secretName: prometheus-demo-tls 15 | rules: 16 | - host: prometheus.k8s-demo.plgrnd.be 17 | http: 18 | paths: 19 | - backend: 20 | serviceName: access-prometheus 21 | servicePort: 80 22 | path: / 23 | --- 24 | apiVersion: v1 25 | data: 26 | # foo:bar 27 | auth: Zm9vOiRhcHIxJExXNlpJSHV5JEpZNGlHcGQvQWQvZ1ovQzN3WlIxcTEK 28 | kind: Secret 29 | metadata: 30 | name: basic-auth 31 | namespace: cattle-prometheus 32 | type: Opaque -------------------------------------------------------------------------------- /modules/demo-workloads/nginx-with-pvc/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | imagePullSecrets: 18 | - name: abcd 19 | containers: 20 | - name: nginx 21 | image: nginx:latest 22 | ports: 23 | - containerPort: 80 24 | name: http 25 | protocol: TCP 26 | readinessProbe: 27 | httpGet: 28 | port: http 29 | path: / 30 | volumeMounts: 31 | - mountPath: /usr/share/nginx/html 32 | name: nginx-www 33 | volumes: 34 | - name: nginx-www 35 | persistentVolumeClaim: 36 | claimName: nginx-www 37 | -------------------------------------------------------------------------------- /vspherevms/variables.tf: -------------------------------------------------------------------------------- 1 | variable "prefix" { 2 | type = string 3 | description = "Prefix added to names of all resources" 4 | default = "bhofmann" 5 | } 6 | variable "vcenter_user" { 7 | type = string 8 | } 9 | variable "vcenter_password" { 10 | type = string 11 | } 12 | variable "vcenter_server" { 13 | type = string 14 | } 15 | variable "ssh_key_file_name" { 16 | type = string 17 | description = "File path and name of SSH private key used for infrastructure and RKE" 18 | default = "~/.ssh/id_rsa" 19 | } 20 | variable "vm_template_name" { 21 | type = string 22 | } 23 | variable "vsphere_datacenter" { 24 | type = string 25 | } 26 | variable "vsphere_cluster" { 27 | type = string 28 | default = "" 29 | } 30 | variable "vsphere_resource_pool" { 31 | type = string 32 | default = "" 33 | } 34 | variable "vsphere_datastore" { 35 | type = string 36 | } 37 | variable "vsphere_network" { 38 | type = string 39 | } -------------------------------------------------------------------------------- /k3svms/install_workload_clusters.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | 8 | k3sup install \ 9 | --ip $IP3 \ 10 | --user ubuntu \ 11 | --cluster \ 12 | --k3s-channel latest 13 | 14 | k3sup join \ 15 | --ip $IP4 \ 16 | --user ubuntu \ 17 | --server-user ubuntu \ 18 | --server-ip $IP3 \ 19 | --server \ 20 | --k3s-channel latest 21 | 22 | k3sup join \ 23 | --ip $IP5 \ 24 | --user ubuntu \ 25 | --server-user ubuntu \ 26 | --server-ip $IP3 \ 27 | --server \ 28 | --k3s-channel latest 29 | 30 | mv kubeconfig kubeconfig_cluster_one 31 | 32 | k3sup install \ 33 | --ip $IP6 \ 34 | --user ubuntu \ 35 | --k3s-channel latest 36 | 37 | mv kubeconfig kubeconfig_cluster_two 38 | 39 | k3sup install \ 40 | --ip $IP7 \ 41 | --user ubuntu \ 42 | --k3s-channel latest 43 | 44 | mv kubeconfig kubeconfig_cluster_three -------------------------------------------------------------------------------- /monitoring-v1-custom-alertmanager/alertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | name: custom-alertmanager 5 | namespace: cattle-prometheus 6 | spec: 7 | baseImage: rancher/prom-alertmanager 8 | version: v0.21.0 9 | replicas: 1 10 | listenLocal: false 11 | serviceAccountName: custom-alertmanager 12 | externalUrl: http://alertmanager-operated.cattle-prometheus:9093 13 | paused: false 14 | logFormat: "logfmt" 15 | logLevel: "info" 16 | retention: "120h" 17 | resources: 18 | limits: 19 | cpu: 1000m 20 | memory: 500Mi 21 | requests: 22 | cpu: 50m 23 | memory: 50Mi 24 | routePrefix: "/" 25 | securityContext: 26 | fsGroup: 2000 27 | runAsGroup: 2000 28 | runAsNonRoot: true 29 | runAsUser: 1000 30 | portName: web 31 | --- 32 | apiVersion: v1 33 | kind: ServiceAccount 34 | metadata: 35 | name: custom-alertmanager 36 | namespace: cattle-prometheus -------------------------------------------------------------------------------- /prod-app-deployment/infra/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | 6 | variable "aws_secret_key" { 7 | type = string 8 | description = "AWS secret key used to create AWS infrastructure" 9 | } 10 | 11 | variable "digitalocean_token" { 12 | type = string 13 | description = "API token for DigitalOcean" 14 | } 15 | 16 | variable "aws_region" { 17 | type = string 18 | description = "AWS region used for all resources" 19 | default = "eu-central-1" 20 | } 21 | 22 | variable "instance_type" { 23 | type = string 24 | description = "Instance type used for all EC2 instances" 25 | default = "t3a.xlarge" 26 | } 27 | 28 | variable "ssh_key_file_name" { 29 | type = string 30 | description = "File path and name of SSH private key used for infrastructure and RKE" 31 | default = "~/.ssh/id_rsa" 32 | } 33 | -------------------------------------------------------------------------------- /modules/demo-workloads/nginx-with-pvc/deployment-direct.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-direct 5 | labels: 6 | app: nginx-direct 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: nginx-direct 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx-direct 16 | spec: 17 | containers: 18 | - name: nginx-direct 19 | image: nginx:latest 20 | ports: 21 | - containerPort: 80 22 | name: http 23 | protocol: TCP 24 | readinessProbe: 25 | httpGet: 26 | port: http 27 | path: / 28 | volumeMounts: 29 | - mountPath: /usr/share/nginx/html 30 | name: nginx-www 31 | volumes: 32 | - name: nginx-www 33 | nfs: 34 | path: /mnt/sharedfolder/nginx-direct 35 | server: 172.31.38.123 36 | -------------------------------------------------------------------------------- /downstream-azure-windows/userdata/server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | export DEBIAN_FRONTEND=noninteractive 4 | curl -sL https://releases.rancher.com/install-docker/${docker_version}.sh | sh 5 | sudo usermod -aG docker ${username} 6 | 7 | cat <<'EOF' | sudo tee /etc/docker/daemon.json > /dev/null 8 | { 9 | "log-driver": "json-file", 10 | "log-opts": { 11 | "max-size": "100m", 12 | "max-file": "3" 13 | } 14 | } 15 | EOF 16 | 17 | sudo systemctl restart docker 18 | 19 | docker pull stedolan/jq 20 | 21 | ipData=$(curl -H "Metadata: true" http://169.254.169.254/metadata/instance/network?api-version=2019-06-01 \ 22 | | docker run --rm -i stedolan/jq .interface[0].ipv4.ipAddress[0]) 23 | 24 | publicIP=$(echo $ipData | docker run --rm -i stedolan/jq -r .publicIpAddress) 25 | privateIP=$(echo $ipData | docker run --rm -i stedolan/jq -r .privateIpAddress) 26 | 27 | ${register_command} --address $publicIP --internal-address $privateIP --etcd --controlplane --worker 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /rkevms/variables.tf: -------------------------------------------------------------------------------- 1 | # Required 2 | variable "aws_access_key" { 3 | type = string 4 | description = "AWS access key used to create infrastructure" 5 | } 6 | 7 | # Required 8 | variable "aws_secret_key" { 9 | type = string 10 | description = "AWS secret key used to create AWS infrastructure" 11 | } 12 | 13 | variable "aws_region" { 14 | type = string 15 | description = "AWS region used for all resources" 16 | default = "eu-central-1" 17 | } 18 | 19 | variable "prefix" { 20 | type = string 21 | description = "Prefix added to names of all resources" 22 | default = "bhofmann" 23 | } 24 | 25 | variable "instance_type" { 26 | type = string 27 | description = "Instance type used for all EC2 instances" 28 | default = "t3a.xlarge" 29 | } 30 | 31 | variable "ssh_key_file_name" { 32 | type = string 33 | description = "File path and name of SSH private key used for infrastructure and RKE" 34 | default = "~/.ssh/id_rsa" 35 | } 36 | -------------------------------------------------------------------------------- /modules/demo-workloads/demo-shop/ingressgateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: Gateway 3 | metadata: 4 | name: frontend-gateway 5 | namespace: shop 6 | spec: 7 | selector: 8 | istio: ingressgateway 9 | servers: 10 | - hosts: 11 | - shop-istio.plgrnd.be 12 | port: 13 | name: http 14 | number: 80 15 | protocol: HTTP 16 | tls: 17 | httpsRedirect: true 18 | - port: 19 | number: 443 20 | name: https 21 | protocol: HTTPS 22 | tls: 23 | mode: SIMPLE 24 | credentialName: shop-tls-certificate-certs 25 | hosts: 26 | - shop-istio.plgrnd.be 27 | --- 28 | apiVersion: networking.istio.io/v1alpha3 29 | kind: VirtualService 30 | metadata: 31 | name: frontend-ingress 32 | namespace: shop 33 | spec: 34 | hosts: 35 | - shop-istio.plgrnd.be 36 | gateways: 37 | - frontend-gateway 38 | http: 39 | - route: 40 | - destination: 41 | host: frontend 42 | port: 43 | number: 80 44 | -------------------------------------------------------------------------------- /rancher-azure/variables.tf: -------------------------------------------------------------------------------- 1 | variable "azure_subscription_id" { 2 | type = string 3 | description = "Azure subscription id under which resources will be provisioned" 4 | } 5 | variable "azure_client_id" { 6 | type = string 7 | description = "Azure client id used to create resources" 8 | } 9 | variable "azure_client_secret" { 10 | type = string 11 | description = "Client secret used to authenticate with Azure apis" 12 | } 13 | variable "azure_tenant_id" { 14 | type = string 15 | description = "Azure tenant id used to create resources" 16 | } 17 | variable "azure_location" { 18 | type = string 19 | description = "Azure location used for all resources" 20 | default = "East US" 21 | } 22 | variable "prefix" { 23 | type = string 24 | default = "bhofmann" 25 | } 26 | variable "ssh_key_file_name" { 27 | type = string 28 | description = "File path and name of SSH private key used for infrastructure and RKE" 29 | default = "~/.ssh/id_rsa" 30 | } -------------------------------------------------------------------------------- /downstream-harvester-do/README.md: -------------------------------------------------------------------------------- 1 | # downstream-harvester-do 2 | 3 | Creates a downstream cluster on digital ocean, with nested virtualization and installs harvester. 4 | 5 | ## Installation 6 | 7 | Before applying the module run 8 | 9 | ``` 10 | touch out/kube_conig_demo.yaml 11 | ``` 12 | 13 | Run terraform 14 | 15 | ``` 16 | terraform init 17 | terraform apply 18 | ``` 19 | 20 | ## Harvester login credentials 21 | 22 | admin/password 23 | 24 | ## Images 25 | 26 | * https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img 27 | * https://download.opensuse.org/tumbleweed/iso/openSUSE-MicroOS-DVD-x86_64-Current.iso 28 | 29 | ## Connect to vms via ssh 30 | 31 | Add SSH key before creating a VM! 32 | 33 | ``` 34 | ssh -J root@KUBERNETS_NODE_IP ubuntu@VM_IP 35 | ``` 36 | 37 | ## cloud-config to set user password 38 | 39 | ``` 40 | #cloud-config 41 | password: password 42 | chpasswd: {expire: false} 43 | ssh_pwauth: true 44 | ``` 45 | 46 | ## Cleanup 47 | 48 | ``` 49 | terraform destroy 50 | ``` -------------------------------------------------------------------------------- /private_registry/install_harbor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | $(terraform output -state=terraform.tfstate -json "lb" | jq -r '"export LB=\(.)"') 7 | 8 | k3sup install \ 9 | --ip $IP3 \ 10 | --user ubuntu \ 11 | --cluster \ 12 | --k3s-extra-args "--node-external-ip ${IP3}" \ 13 | --k3s-channel v1.19 14 | 15 | mv kubeconfig kubeconfig_harbor 16 | 17 | export KUBECONFIG=$(pwd)/kubeconfig_harbor 18 | export HELM_EXPERIMENTAL_OCI=1 19 | export ENCODED_DIGITALOCEAN_TOKEN=$(awk -F "=" '/digitalocean_token/ {print $2}' terraform.tfvars | tr -d '" \n' | base64 -w 0) 20 | 21 | make -C ../modules/demo-workloads -e install-cert-manager 22 | 23 | kubectl rollout status deployment -n cert-manager cert-manager 24 | kubectl rollout status deployment -n cert-manager cert-manager-webhook 25 | 26 | make -C ../modules/demo-workloads -e install-harbor-standalone 27 | 28 | watch kubectl get pods,ingress -A -------------------------------------------------------------------------------- /prod-app-deployment/fleet-prepared/ghost/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: ghost 2 | 3 | helm: 4 | chart: github.com/bashofmann/rancher-demo-setups/prod-app-deployment/charts/ghost 5 | releaseName: ghost 6 | values: 7 | autoscaling: 8 | enabled: false 9 | minReplicas: 2 10 | maxReplicas: 5 11 | resources: 12 | limits: 13 | cpu: 100m 14 | memory: 256Mi 15 | requests: 16 | cpu: 100m 17 | memory: 256Mi 18 | storage: 19 | enabled: false 20 | mysqlcluster: 21 | enabled: false 22 | password: abcdefghi 23 | rootPassword: abcdefghi 24 | ingress: 25 | enabled: true 26 | annotations: 27 | cert-manager.io/cluster-issuer: letsencrypt-http-prod 28 | hosts: 29 | - host: ghost.k8sdemo.plgrnd.be 30 | paths: 31 | - path: / 32 | pathType: ImplementationSpecific 33 | tls: 34 | - secretName: ghost-tls 35 | hosts: 36 | - ghost.k8sdemo.plgrnd.be 37 | -------------------------------------------------------------------------------- /vms_lb_dns/install_rancher.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export KUBECONFIG=$(pwd)/kubeconfig 6 | 7 | helm repo add jetstack https://charts.jetstack.io 8 | 9 | helm upgrade --install \ 10 | cert-manager jetstack/cert-manager \ 11 | --namespace cert-manager \ 12 | --set installCRDs=true \ 13 | --version v1.11.1 --create-namespace 14 | 15 | kubectl rollout status deployment -n cert-manager cert-manager 16 | kubectl rollout status deployment -n cert-manager cert-manager-webhook 17 | helm repo add rancher-prime https://charts.rancher.com/server-charts/prime 18 | 19 | helm upgrade --install rancher rancher-latest/rancher \ 20 | --namespace cattle-system \ 21 | --version v2.7.5 \ 22 | --set hostname=rancher.plgrnd.be --create-namespace \ 23 | --set ingress.tls.source=letsEncrypt 24 | 25 | kubectl apply -f clusterissuer.yaml 26 | 27 | watch "kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}' && kubectl get pods,ingress,certificates -A" 28 | -------------------------------------------------------------------------------- /downstream-aws/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | variable "aws_secret_key" { 6 | type = string 7 | description = "AWS secret key used to create AWS infrastructure" 8 | } 9 | variable "aws_region" { 10 | type = string 11 | description = "AWS region used for all resources" 12 | default = "eu-central-1" 13 | } 14 | variable "digitalocean_token" { 15 | type = string 16 | } 17 | variable "email" { 18 | type = string 19 | } 20 | variable "rancher_url" { 21 | type = string 22 | } 23 | variable "rancher_access_key" { 24 | type = string 25 | } 26 | variable "rancher_secret_key" { 27 | type = string 28 | } 29 | variable "prefix" { 30 | type = string 31 | description = "Prefix added to names of all resources" 32 | default = "bhofmann" 33 | } 34 | variable "docker_version" { 35 | type = string 36 | description = "Docker version to install on nodes" 37 | default = "19.03" 38 | } -------------------------------------------------------------------------------- /vms_lb_dns/install_k3s_sles.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | ssh -o StrictHostKeyChecking=no ec2-user@$IP0 "sudo mkdir -p /var/lib/rancher/k3s/server/manifests" 8 | dd if=traefik-config.yaml | ssh -o StrictHostKeyChecking=no ec2-user@$IP0 sudo dd of=/var/lib/rancher/k3s/server/manifests/traefik-config.yaml 9 | 10 | k3sup install \ 11 | --ip $IP0 \ 12 | --user ec2-user \ 13 | --cluster \ 14 | --k3s-extra-args "--node-external-ip ${IP0}" \ 15 | --k3s-channel v1.24 16 | 17 | k3sup join \ 18 | --ip $IP1 \ 19 | --user ec2-user \ 20 | --server-user ec2-user \ 21 | --server-ip $IP0 \ 22 | --server \ 23 | --k3s-extra-args "--node-external-ip ${IP1}" \ 24 | --k3s-channel v1.24 25 | 26 | k3sup join \ 27 | --ip $IP2 \ 28 | --user ec2-user \ 29 | --server-user ec2-user \ 30 | --server-ip $IP0 \ 31 | --server \ 32 | --k3s-extra-args "--node-external-ip ${IP2}" \ 33 | --k3s-channel v1.24 34 | -------------------------------------------------------------------------------- /proxysetup-aws-k3s/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | 6 | # Required 7 | variable "aws_secret_key" { 8 | type = string 9 | description = "AWS secret key used to create AWS infrastructure" 10 | } 11 | 12 | variable "aws_region" { 13 | type = string 14 | description = "AWS region used for all resources" 15 | default = "eu-central-1" 16 | } 17 | 18 | variable "vpc_cidr" { 19 | description = "CIDR for the whole VPC" 20 | default = "10.0.0.0/16" 21 | } 22 | 23 | variable "public_subnet_cidr" { 24 | description = "CIDR for the Public Subnet" 25 | default = "10.0.0.0/24" 26 | } 27 | 28 | variable "private_subnet_cidr" { 29 | description = "CIDR for the Private Subnet" 30 | default = "10.0.1.0/24" 31 | } 32 | 33 | variable "ssh_key_file_name" { 34 | type = string 35 | description = "File path and name of SSH private key used for infrastructure and RKE" 36 | default = "~/.ssh/id_rsa" 37 | } -------------------------------------------------------------------------------- /proxysetup-aws-rke/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | 6 | # Required 7 | variable "aws_secret_key" { 8 | type = string 9 | description = "AWS secret key used to create AWS infrastructure" 10 | } 11 | 12 | variable "aws_region" { 13 | type = string 14 | description = "AWS region used for all resources" 15 | default = "eu-central-1" 16 | } 17 | 18 | variable "vpc_cidr" { 19 | description = "CIDR for the whole VPC" 20 | default = "10.0.0.0/16" 21 | } 22 | 23 | variable "public_subnet_cidr" { 24 | description = "CIDR for the Public Subnet" 25 | default = "10.0.0.0/24" 26 | } 27 | 28 | variable "private_subnet_cidr" { 29 | description = "CIDR for the Private Subnet" 30 | default = "10.0.1.0/24" 31 | } 32 | 33 | variable "ssh_key_file_name" { 34 | type = string 35 | description = "File path and name of SSH private key used for infrastructure and RKE" 36 | default = "~/.ssh/id_rsa" 37 | } -------------------------------------------------------------------------------- /downstream-fleet/k3s.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "k3s_0" { 2 | provisioner "local-exec" { 3 | command = "bash install_k3s.sh && mv kubeconfig kubeconfig_0" 4 | environment = { 5 | IP = aws_instance.cluster_one.public_ip 6 | } 7 | } 8 | provisioner "local-exec" { 9 | when = destroy 10 | command = "rm kubeconfig_0" 11 | } 12 | } 13 | resource "null_resource" "k3s_1" { 14 | provisioner "local-exec" { 15 | command = "bash install_k3s.sh && mv kubeconfig kubeconfig_1" 16 | environment = { 17 | IP = aws_instance.cluster_two.public_ip 18 | } 19 | } 20 | provisioner "local-exec" { 21 | when = destroy 22 | command = "rm kubeconfig_1" 23 | } 24 | } 25 | resource "null_resource" "k3s_2" { 26 | provisioner "local-exec" { 27 | command = "bash install_k3s.sh && mv kubeconfig kubeconfig_2" 28 | environment = { 29 | IP = aws_instance.cluster_three.public_ip 30 | } 31 | } 32 | provisioner "local-exec" { 33 | when = destroy 34 | command = "rm kubeconfig_2" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /modules/demo-workloads/neuvector/configure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | cd "$(dirname "$0")" 3 | 4 | API_URL="" 5 | ADMIN_PASSWORD="admin" 6 | 7 | TOKEN_JSON=$(curl -H "Content-Type: application/json" -d "{\"password\":{\"username\":\"admin\",\"password\":\"${ADMIN_PASSWORD}\"}}" "${API_URL}/v1/auth") 8 | TOKEN=$(echo ${TOKEN_JSON} | jq -r '.token.token') 9 | 10 | curl -k -H "Content-Type: application/json" -H "X-Auth-Token: $TOKEN" "${API_URL}/v1/scan/scanner" 11 | 12 | # Get all federated clusters 13 | joint_cluster_ids=$(curl -H 'Content-Type: application/json' -H "X-Auth-Token: ${TOKEN}" -X GET "${API_URL}/v1/fed/member" | jq -r '.joint_clusters[].id') 14 | 15 | # For each cluster get a list of workloads including a scan summery 16 | for id in $joint_cluster_ids; do 17 | curl -H 'Content-Type: application/json' -H "X-Auth-Token: ${TOKEN}" -X GET "${API_URL}/v1/fed/cluster/${id}/v1/workload?view=pod" --compressed | jq '.' 18 | done 19 | 20 | # Log out 21 | curl -s -H 'Content-Type: application/json' -H "X-Auth-Token: ${TOKEN}" -X DELETE "${API_URL}/v1/auth" 22 | -------------------------------------------------------------------------------- /network-policies-demo/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: web-application 5 | --- 6 | apiVersion: v1 7 | kind: Namespace 8 | metadata: 9 | name: network-policy-test 10 | --- 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | metadata: 14 | labels: 15 | app: httpbin 16 | name: httpbin 17 | namespace: web-application 18 | spec: 19 | replicas: 1 20 | selector: 21 | matchLabels: 22 | app: httpbin 23 | template: 24 | metadata: 25 | labels: 26 | app: httpbin 27 | spec: 28 | containers: 29 | - name: httpbin 30 | image: kennethreitz/httpbin 31 | ports: 32 | - containerPort: 80 33 | readinessProbe: 34 | httpGet: 35 | path: / 36 | port: 80 37 | --- 38 | apiVersion: v1 39 | kind: Service 40 | metadata: 41 | name: httpbin 42 | namespace: web-application 43 | spec: 44 | selector: 45 | app: httpbin 46 | type: ClusterIP 47 | ports: 48 | - protocol: TCP 49 | port: 90 50 | targetPort: 80 51 | -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | 6 | # Required 7 | variable "aws_secret_key" { 8 | type = string 9 | description = "AWS secret key used to create AWS infrastructure" 10 | } 11 | 12 | variable "aws_region" { 13 | type = string 14 | description = "AWS region used for all resources" 15 | default = "eu-central-1" 16 | } 17 | 18 | variable "vpc_cidr" { 19 | description = "CIDR for the whole VPC" 20 | default = "10.0.0.0/16" 21 | } 22 | 23 | variable "public_subnet_cidr" { 24 | description = "CIDR for the Public Subnet" 25 | default = "10.0.0.0/24" 26 | } 27 | 28 | variable "private_subnet_cidr" { 29 | description = "CIDR for the Private Subnet" 30 | default = "10.0.1.0/24" 31 | } 32 | 33 | variable "ssh_key_file_name" { 34 | type = string 35 | description = "File path and name of SSH private key used for infrastructure and RKE" 36 | default = "~/.ssh/id_rsa" 37 | } -------------------------------------------------------------------------------- /prod-app-deployment/charts-prepared/ghost/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.autoscaling.enabled }} 2 | apiVersion: autoscaling/v2beta1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: {{ include "ghost.fullname" . }} 6 | labels: 7 | {{- include "ghost.labels" . | nindent 4 }} 8 | spec: 9 | scaleTargetRef: 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | name: {{ include "ghost.fullname" . }} 13 | minReplicas: {{ .Values.autoscaling.minReplicas }} 14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }} 15 | metrics: 16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} 17 | - type: Resource 18 | resource: 19 | name: cpu 20 | targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} 21 | {{- end }} 22 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} 23 | - type: Resource 24 | resource: 25 | name: memory 26 | targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} 27 | {{- end }} 28 | {{- end }} 29 | -------------------------------------------------------------------------------- /vms_lb_dns/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | 6 | variable "aws_secret_key" { 7 | type = string 8 | description = "AWS secret key used to create AWS infrastructure" 9 | } 10 | 11 | variable "digitalocean_token" { 12 | type = string 13 | description = "API token for DigitalOcean" 14 | } 15 | 16 | variable "aws_region" { 17 | type = string 18 | description = "AWS region used for all resources" 19 | default = "eu-central-1" 20 | } 21 | 22 | variable "prefix" { 23 | type = string 24 | description = "Prefix added to names of all resources" 25 | default = "bhofmann" 26 | } 27 | 28 | variable "instance_type" { 29 | type = string 30 | description = "Instance type used for all EC2 instances" 31 | default = "t3a.2xlarge" 32 | } 33 | 34 | variable "ssh_key_file_name" { 35 | type = string 36 | description = "File path and name of SSH private key used for infrastructure and RKE" 37 | default = "~/.ssh/id_rsa" 38 | } 39 | -------------------------------------------------------------------------------- /downstream-do/node-templates.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cloud_credential" "do" { 2 | name = "${var.prefix}-do" 3 | 4 | digitalocean_credential_config { 5 | access_token = var.digitalocean_token 6 | } 7 | } 8 | 9 | locals { 10 | os_version = "ubuntu-22-04-x64" 11 | } 12 | 13 | resource "rancher2_node_template" "do" { 14 | name = "${var.prefix}-do" 15 | 16 | cloud_credential_id = rancher2_cloud_credential.do.id 17 | engine_install_url = "https://releases.rancher.com/install-docker/${var.docker_version}.sh" 18 | 19 | digitalocean_config { 20 | image = local.os_version 21 | region = "fra1" 22 | size = "s-8vcpu-32gb" 23 | userdata = "" 24 | } 25 | } 26 | 27 | resource "rancher2_node_template" "dow" { 28 | name = "${var.prefix}-do" 29 | 30 | cloud_credential_id = rancher2_cloud_credential.do.id 31 | engine_install_url = "https://releases.rancher.com/install-docker/${var.docker_version}.sh" 32 | 33 | digitalocean_config { 34 | image = local.os_version 35 | region = "fra1" 36 | size = "s-4vcpu-8gb" 37 | userdata = "" 38 | } 39 | } 40 | 41 | 42 | -------------------------------------------------------------------------------- /downstream-harvester-do/sshkey.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_daemonset" "ssh-key" { 2 | depends_on = [ 3 | local_file.kube_config 4 | ] 5 | metadata { 6 | name = "ssh-key" 7 | } 8 | spec { 9 | selector { 10 | match_labels = { 11 | app = "ssh-key" 12 | } 13 | } 14 | template { 15 | metadata { 16 | labels = { 17 | app = "ssh-key" 18 | } 19 | } 20 | spec { 21 | init_container { 22 | name = "ssh-key" 23 | image = "alpine" 24 | command = ["/bin/sh", 25 | "-xc", "mkdir -p /host/root/.ssh && echo '${file("${var.ssh_key_file_name}.pub")}' > /host/root/.ssh/authorized_keys"] 26 | volume_mount { 27 | mount_path = "/host/root" 28 | name = "root-home" 29 | } 30 | } 31 | container { 32 | name = "sleep" 33 | image = "rancher/pause:3.2" 34 | } 35 | volume { 36 | name = "root-home" 37 | host_path { 38 | path = "/root" 39 | } 40 | } 41 | } 42 | } 43 | } 44 | } -------------------------------------------------------------------------------- /ioloadtest/storageos/cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "storageos.com/v1" 2 | kind: StorageOSCluster 3 | metadata: 4 | name: "storageos" 5 | namespace: "storageos-operator" 6 | spec: 7 | secretRefName: "storageos-api" # Reference from the Secret created in the previous step 8 | secretRefNamespace: "storageos-operator" # Namespace of the Secret 9 | namespace: "kube-system" 10 | k8sDistro: "rancher" 11 | disableScheduler: true 12 | kvBackend: 13 | #address: 'storageos-etcd-client.etcd:2379' # Example address, change for your etcd endpoint 14 | address: '172.16.133.218:2379,172.16.133.205:2379,172.16.133.194:2379' # You can set ETCD server ips 15 | csi: 16 | enable: true 17 | deploymentStrategy: deployment 18 | enableControllerPublishCreds: true 19 | enableNodePublishCreds: true 20 | enableProvisionCreds: true 21 | resources: 22 | requests: 23 | memory: "512Mi" 24 | # nodeSelectorTerms: 25 | # - matchExpressions: 26 | # - key: "node-role.kubernetes.io/worker" # Compute node label will vary according to your installation 27 | # operator: In 28 | # values: 29 | # - "true" -------------------------------------------------------------------------------- /downstream-vsphere/variables.tf: -------------------------------------------------------------------------------- 1 | variable "rancher_url" { 2 | type = string 3 | } 4 | variable "rancher_access_key" { 5 | type = string 6 | } 7 | variable "rancher_secret_key" { 8 | type = string 9 | } 10 | variable "prefix" { 11 | type = string 12 | description = "Prefix added to names of all resources" 13 | default = "bhofmann" 14 | } 15 | variable "vm_template_name" { 16 | type = string 17 | } 18 | variable "vsphere_datacenter" { 19 | type = string 20 | } 21 | variable "vsphere_cluster" { 22 | type = string 23 | default = "" 24 | } 25 | variable "vsphere_resource_pool" { 26 | type = string 27 | default = "" 28 | } 29 | variable "vsphere_datastore" { 30 | type = string 31 | } 32 | variable "vsphere_network" { 33 | type = string 34 | } 35 | variable "vcenter_user" { 36 | type = string 37 | } 38 | variable "vcenter_password" { 39 | type = string 40 | } 41 | variable "vcenter_server" { 42 | type = string 43 | } 44 | variable "ssh_key_file_name" { 45 | type = string 46 | description = "File path and name of SSH private key used for infrastructure and RKE" 47 | default = "~/.ssh/id_rsa" 48 | } -------------------------------------------------------------------------------- /proxysetup-aws-rke/userdata/cluster_vms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | cat <<'EOF' | sudo tee /etc/apt/apt.conf.d/proxy.conf > /dev/null 4 | Acquire::http::Proxy "http://${proxy_private_ip}:8888/"; 5 | Acquire::https::Proxy "http://${proxy_private_ip}:8888/"; 6 | EOF 7 | 8 | export HTTP_PROXY=http://${proxy_private_ip}:8888 9 | export HTTPS_PROXY=http://${proxy_private_ip}:8888 10 | 11 | curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh 12 | sudo usermod -aG docker ubuntu 13 | 14 | sudo mkdir -p mkdir /etc/systemd/system/docker.service.d 15 | cat <<'EOF' | sudo tee /etc/systemd/system/docker.service.d/http-proxy.conf > /dev/null 16 | [Service] 17 | Environment="HTTP_PROXY=http://${proxy_private_ip}:8888" 18 | Environment="HTTPS_PROXY=http://${proxy_private_ip}:8888" 19 | Environment="NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" 20 | EOF 21 | 22 | cat <<'EOF' | sudo tee /etc/docker/daemon.json > /dev/null 23 | { 24 | "log-driver": "json-file", 25 | "log-opts": { 26 | "max-size": "100m", 27 | "max-file": "3" 28 | } 29 | } 30 | EOF 31 | 32 | sudo systemctl daemon-reload 33 | sudo systemctl restart docker 34 | -------------------------------------------------------------------------------- /rkevms/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "3.37.0" 6 | hashes = [ 7 | "h1:RvLGIfRZfbzY58wUja9B6CvGdgVVINy7zLVBdLqIelA=", 8 | "zh:064c9b21bcd69be7a8631ccb3eccb8690c6a9955051145920803ef6ce6fc06bf", 9 | "zh:277dd05750187a41282cf6e066e882eac0dd0056e3211d125f94bf62c19c4b8b", 10 | "zh:47050211f72dcbf3d99c82147abd2eefbb7238efb94d5188979f60de66c8a3df", 11 | "zh:4a4e0d070399a050847545721dae925c192a2d6354802fdfbea73769077acca5", 12 | "zh:4cbc46f79239c85d69389f9e91ca9a9ebf6a8a937cfada026c5a037fd09130fb", 13 | "zh:6548dcb1ac4a388ed46034a5317fa74b3b0b0f68eec03393f2d4d09342683f95", 14 | "zh:75b4a82596aa525d95b0b2847fe648368c6e2b054059c4dc4dcdee01d374b592", 15 | "zh:75cf5cc674b61c82300667a82650f56722618b119ab0526b47b5ecbb4bbf49d0", 16 | "zh:93c896682359039960c38eb5a4b29d1cc06422f228db0572b90330427e2a21ec", 17 | "zh:c7256663aedbc9de121316b6d0623551386a476fc12b8eb77e88532ce15de354", 18 | "zh:e995c32f49c23b5938200386e08b2a3fd69cf5102b5299366c0608bbeac68429", 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /modules/demo-workloads/neuvector/values.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | replicas: 1 3 | ingress: 4 | enabled: true 5 | host: neuvector-api.cluster-one.plgrnd.be 6 | path: "/" 7 | annotations: 8 | cert-manager.io/cluster-issuer: letsencrypt-http-prod 9 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 10 | tls: true 11 | secretName: neuvector-api-tls-secret 12 | apisvc: 13 | type: ClusterIP 14 | ranchersso: 15 | enabled: true 16 | secret: 17 | enabled: true 18 | data: 19 | sysinitcfg.yaml: 20 | Cluster_Name: cluster-one 21 | federation: 22 | mastersvc: 23 | type: ClusterIP 24 | ingress: 25 | enabled: true 26 | host: neuvector-master.cluster-one.plgrnd.be 27 | path: "/" 28 | annotations: 29 | cert-manager.io/cluster-issuer: letsencrypt-http-prod 30 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 31 | tls: true 32 | secretName: neuvector-master-tls-secret 33 | 34 | cve: 35 | scanner: 36 | replicas: 1 37 | 38 | k3s: 39 | enabled: true 40 | 41 | global: 42 | cattle: 43 | url: https://rancher.plgrnd.be/ 44 | -------------------------------------------------------------------------------- /vspherevms/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/vsphere" { 5 | version = "2.0.1" 6 | hashes = [ 7 | "h1:fyi+5l52d341puFghX1Qt1K2UVu/3YtGJKOxPhKyrxA=", 8 | "zh:0262afdc6e929b7de7be330de3556cb754fb3ba389134a097496743733c4ad66", 9 | "zh:0fd94409ddc72aefa143bad2e9cee80f82edb9c93b70d4c5a4b875f478413fc1", 10 | "zh:140012cb8618bf56da7c05699d9b1610e7d4a29989046cafcd636ab4a1c4ebea", 11 | "zh:26e9b583574c2b88a1530b95cd331a35e93377e8c8b5cb81b79aa1cbee835508", 12 | "zh:3b0d91191a0f9990f0c91ee1a67e2764ad584df96a67df01d306186e80d4dca6", 13 | "zh:65b10ac0192b5054774b3292f59bbd550ebf567aa512cd9400cb2c9a0c7019fb", 14 | "zh:69c4807dcb93dbcda0fed712e78704a6ea4fb57e4ff6de3150458cd20e05f4c9", 15 | "zh:a4cff51137ceb4e5a69642dbd5ead87957e006cd07695da96bf5676ada987f89", 16 | "zh:d2105cc6a37ef8757cfdc31d1d4b2def6ff63032fd2032a30914b0e5974eab06", 17 | "zh:d7ea1ffe6a4ded4fcc40bd4237926c6700d06114500bb8fbbeddd7c50b5adbfc", 18 | "zh:f1599b1525de74f45069f6655ae0737891d0502229e132cd81d8c717f6825fb0", 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /rancher-azure/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/azurerm" { 5 | version = "2.61.0" 6 | hashes = [ 7 | "h1:efJD4eyFEhALYYKn1tS6v26shkJdf0d2kF1E87dDuRM=", 8 | "zh:011075f7a9239e3f4e592f7ace8f40aa1af60e300b97a7d97d3834ecb3a2e27c", 9 | "zh:0f6bb1f5b742360b77125c7704d2890f1377db30f62188800a54000e3bc6c764", 10 | "zh:353723efc426f48d3f5d49ce0fb52a78c72102f7cb6707c50ee300701a6aa08e", 11 | "zh:443d822d7f51327bd5315594c14099f9ea8ccd89968529f7e0337dc201ab789d", 12 | "zh:47f508eb252194e6ee71154e7df88d0cc35709a0263a2c66fb93e62b3a213928", 13 | "zh:7c522a59851482b65336a710f2ad25d69000ea2c69eebe942e48fa60f6423b88", 14 | "zh:a25fa3560ac72836a4e99f43b6814a2eb3f08016c3caf841247d512a7f2e8243", 15 | "zh:b5c5a92cb9f063cfea7b664c09b8acb7958f5a007f56f03a3c7b8c9f0902bf04", 16 | "zh:c123c5d33b40325396fbf9bd29e789881900eb6f24246a5fec30d314269f8f80", 17 | "zh:c935e9681267c052df53537e7c24afac34ec55325a4909b534846374b751b854", 18 | "zh:ff6e772af934ffb47c3bdcb36b0c21e20e7b3f3b9e91064477deea8f6c56ce2c", 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /k3svms/variables.tf: -------------------------------------------------------------------------------- 1 | # Required 2 | variable "aws_access_key" { 3 | type = string 4 | description = "AWS access key used to create infrastructure" 5 | } 6 | 7 | # Required 8 | variable "aws_secret_key" { 9 | type = string 10 | description = "AWS secret key used to create AWS infrastructure" 11 | } 12 | 13 | variable "aws_region" { 14 | type = string 15 | description = "AWS region used for all resources" 16 | default = "eu-central-1" 17 | } 18 | 19 | variable "prefix" { 20 | type = string 21 | description = "Prefix added to names of all resources" 22 | default = "bhofmann" 23 | } 24 | 25 | variable "instance_type" { 26 | type = string 27 | description = "Instance type used for all EC2 instances" 28 | default = "t3a.medium" 29 | } 30 | 31 | variable "ssh_key_file_name" { 32 | type = string 33 | description = "File path and name of SSH private key used for infrastructure and RKE" 34 | default = "~/.ssh/id_rsa" 35 | } 36 | 37 | variable "vm_count" { 38 | type = number 39 | default = 2 40 | } 41 | 42 | # Local variables used to reduce repetition 43 | locals { 44 | node_username = "ubuntu" 45 | } 46 | -------------------------------------------------------------------------------- /aws-cloud-provider-instance-profile/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "3.52.0" 6 | hashes = [ 7 | "h1:Fy/potyWfS8NVumHqWi6STgaQUX66diUmgZDfFNBeXU=", 8 | "zh:04a4f8a1b34292fd6a72c1efe03f6f10186ecbdc318df36d462d0be1c21ce72d", 9 | "zh:0601006f14f437489902555720dd8fb4e67450356438bab64b61cf6d0e1af681", 10 | "zh:14214e996b8db0a2038b74a2ddbea7356b3e53f73003cde2c9069294d9a6c421", 11 | "zh:17d1ecc280d776271b0fc0fd6a4033933be8e67eb6a39b7bfb3c242cd218645f", 12 | "zh:247ae4bc3b52fba96ed1593e7b23d62da0d2c99498fc0d968fcf28020df3c3aa", 13 | "zh:2e0432fabeb5e44d756a5566168768f1b6dea3cc0e5650fac966820e90d18367", 14 | "zh:34f6f95b88c5d8c105d9a3b7d2712e7df1181948bfbef33bb6a87d7a77c20c0d", 15 | "zh:3de6bf02b9499bf8dc13843da72a03db5ae8188b8157f0e7b3d5bf1d7cd1ac8b", 16 | "zh:43198a223ea6d6dfb82deac62b29181c3be18dc77b9ef9f8d44c32b08e44ea5c", 17 | "zh:a7de44c9445c100a2823c371df03fcaa9ecb1642750ccdc02294fa6cd1095859", 18 | "zh:c3c44bd07e5b6cdb776ff674e39feb708ba3ee3d0dff2c88d1d5db323094d942", 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /modules/demo-workloads/mysql-cluster/cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mysql-secret 5 | namespace: default 6 | type: Opaque 7 | data: 8 | # root password is required to be specified 9 | # iJKpC3OIJB3ozyIp 10 | ROOT_PASSWORD: aUpLcEMzT0lKQjNvenlJcA== 11 | # application credentials that will be created at cluster bootstrap 12 | # wordpress 13 | DATABASE: d29yZHByZXNz 14 | # wordpress 15 | USER: d29yZHByZXNz 16 | # 0lYNAYzayGoIUCfA 17 | PASSWORD: MGxZTkFZemF5R29JVUNmQQ== 18 | --- 19 | apiVersion: mysql.presslabs.org/v1alpha1 20 | kind: MysqlCluster 21 | metadata: 22 | name: db 23 | namespace: default 24 | spec: 25 | replicas: 3 26 | secretName: mysql-secret 27 | mysqlVersion: "5.7" 28 | minAvailable: "50%" 29 | 30 | mysqlConf: 31 | max_connections: "200" 32 | innodb-buffer-pool-size: "256M" 33 | tmp_table_size: 256M 34 | max_heap_table_size: 256M 35 | 36 | podSpec: 37 | resources: 38 | requests: 39 | memory: 256Mi 40 | cpu: 200m 41 | 42 | volumeSpec: 43 | persistentVolumeClaim: 44 | accessModes: [ "ReadWriteOnce" ] 45 | resources: 46 | requests: 47 | storage: 1Gi -------------------------------------------------------------------------------- /downstream-k3s/install_k3s.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | k3sup install \ 6 | --ip $IP0 \ 7 | --user ubuntu \ 8 | --cluster \ 9 | --k3s-extra-args "--disable local-storage --node-external-ip ${IP0}" \ 10 | --k3s-channel v1.21 11 | 12 | k3sup join \ 13 | --ip $IP1 \ 14 | --user ubuntu \ 15 | --server-user ubuntu \ 16 | --server-ip $IP0 \ 17 | --server \ 18 | --k3s-extra-args "--disable local-storage --node-external-ip ${IP1}" \ 19 | --k3s-channel v1.21 20 | 21 | k3sup join \ 22 | --ip $IP2 \ 23 | --user ubuntu \ 24 | --server-user ubuntu \ 25 | --server-ip $IP0 \ 26 | --server \ 27 | --k3s-extra-args "--disable local-storage --node-external-ip ${IP2}" \ 28 | --k3s-channel v1.21 29 | 30 | k3sup join \ 31 | --ip $IP3 \ 32 | --user ubuntu \ 33 | --server-user ubuntu \ 34 | --server-ip $IP0 \ 35 | --server \ 36 | --k3s-extra-args "--disable local-storage --node-external-ip ${IP3}" \ 37 | --k3s-channel v1.21 38 | 39 | k3sup join \ 40 | --ip $IP4 \ 41 | --user ubuntu \ 42 | --server-user ubuntu \ 43 | --server-ip $IP0 \ 44 | --server \ 45 | --k3s-extra-args "--disable local-storage --node-external-ip ${IP4}" \ 46 | --k3s-channel v1.21 47 | -------------------------------------------------------------------------------- /k3svms/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "4.1.0" 6 | hashes = [ 7 | "h1:0C8acviZs0ttKv+vo6gbl49n6L+sQBIDgGdMmkrSAGE=", 8 | "h1:zKrFf3a4xjClMyvAcF2zDeqLfS9EdKlz56C76CpRYyU=", 9 | "zh:0e4143cf20943e0efd96805fe69b5527dd89a023fa67f39c5f4128e5ca736e91", 10 | "zh:0f208f3497a2bc977204d195085466804f7c6c9eaa1a3cf864ab2631adf683dd", 11 | "zh:2bcfcaad7957504d7572063fc9178a2f4636ad98f24cdd5c74d4ffcc750db5a6", 12 | "zh:38100b0cddc1716f2a58d93e55a34272862ffe571439b6d472af26d79a2b5a12", 13 | "zh:3bcf9b33dd9d44e9c9562ed45b05511c65ef35496e5a48f58aa31427a76e037f", 14 | "zh:6a808deb14ef7b7f8e4f87ceb996bfac88a99d654489eb99d0f2325a0e7b3c09", 15 | "zh:81b49e8f8d3e8ec220c206c2f9af83455f1b674481d11ffd279897a2972ec66b", 16 | "zh:a37a637c48cd7be608ce248bbed717d154e70b328fccc31ae29ec94f158d64cd", 17 | "zh:bdefee374253e800402c5f2ef4637836ba7d6c6889a6c8bb4ffd0602e95b8877", 18 | "zh:cdc2df5a3bd5cdeff497572a74c023e102e087dd39610afeef27b1c3d15541a0", 19 | "zh:f3fc038d953b35f4ed3572a71d92151ed99d56a9bc3a3eaa6670be6120e30588", 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /private_registry/sync_rancher_images.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export KUBECONFIG=$(pwd)/kubeconfig_harbor 6 | 7 | export harbor_pw=$(kubectl get secret -n harbor harbor-harbor-core -o jsonpath="{.data.HARBOR_ADMIN_PASSWORD}" | base64 --decode) 8 | 9 | docker login registry.plgrnd.be -u admin -p $harbor_pw 10 | 11 | export RANCHER_VERSION=2.5.8 12 | mkdir -p rancher_images 13 | wget -O rancher_images/rancher-images.txt https://github.com/rancher/rancher/releases/download/v${RANCHER_VERSION}/rancher-images.txt 14 | wget -O rancher_images/rancher-load-images.sh https://github.com/rancher/rancher/releases/download/v${RANCHER_VERSION}/rancher-load-images.sh 15 | wget -O rancher_images/rancher-save-images.sh https://github.com/rancher/rancher/releases/download/v${RANCHER_VERSION}/rancher-save-images.sh 16 | 17 | chmod +x rancher_images/*.sh 18 | 19 | cd rancher_images 20 | 21 | ./rancher-save-images.sh --image-list ./rancher-images.txt 22 | 23 | curl https://registry.plgrnd.be/api/v2.0/projects -X POST --data "@../harbor_rancher_project.json" -H 'Content-Type: application/json' -u admin:${harbor_pw} 24 | 25 | ./rancher-load-images.sh --image-list ./rancher-images.txt --registry registry.plgrnd.be 26 | 27 | -------------------------------------------------------------------------------- /downstream-k3s/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | variable "aws_secret_key" { 6 | type = string 7 | description = "AWS secret key used to create AWS infrastructure" 8 | } 9 | variable "aws_region" { 10 | type = string 11 | description = "AWS region used for all resources" 12 | default = "eu-central-1" 13 | } 14 | variable "digitalocean_token" { 15 | type = string 16 | } 17 | variable "email" { 18 | type = string 19 | } 20 | variable "rancher_url" { 21 | type = string 22 | } 23 | variable "rancher_access_key" { 24 | type = string 25 | } 26 | variable "rancher_secret_key" { 27 | type = string 28 | } 29 | variable "prefix" { 30 | type = string 31 | description = "Prefix added to names of all resources" 32 | default = "bhofmann" 33 | } 34 | variable "instance_type" { 35 | type = string 36 | description = "Instance type used for all EC2 instances" 37 | default = "t3a.xlarge" 38 | } 39 | variable "ssh_key_file_name" { 40 | type = string 41 | description = "File path and name of SSH private key used for infrastructure and RKE" 42 | default = "~/.ssh/id_rsa" 43 | } -------------------------------------------------------------------------------- /private_registry/variables.tf: -------------------------------------------------------------------------------- 1 | # Required 2 | variable "aws_access_key" { 3 | type = string 4 | description = "AWS access key used to create infrastructure" 5 | } 6 | 7 | # Required 8 | variable "aws_secret_key" { 9 | type = string 10 | description = "AWS secret key used to create AWS infrastructure" 11 | } 12 | 13 | # Required 14 | variable "digitalocean_token" { 15 | type = string 16 | description = "API token for DigitalOcean" 17 | } 18 | 19 | variable "aws_region" { 20 | type = string 21 | description = "AWS region used for all resources" 22 | default = "eu-central-1" 23 | } 24 | 25 | variable "prefix" { 26 | type = string 27 | description = "Prefix added to names of all resources" 28 | default = "bhofmann" 29 | } 30 | 31 | variable "instance_type" { 32 | type = string 33 | description = "Instance type used for all EC2 instances" 34 | default = "t3a.medium" 35 | } 36 | 37 | variable "ssh_key_file_name" { 38 | type = string 39 | description = "File path and name of SSH private key used for infrastructure and RKE" 40 | default = "~/.ssh/id_rsa" 41 | } 42 | 43 | 44 | # Local variables used to reduce repetition 45 | locals { 46 | node_username = "ubuntu" 47 | } 48 | -------------------------------------------------------------------------------- /prod-app-deployment/charts-prepared/ghost/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ghost 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "4.24.0" 25 | -------------------------------------------------------------------------------- /gke-test/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/rancher/rancher2" { 5 | version = "1.22.2" 6 | constraints = "1.22.2" 7 | hashes = [ 8 | "h1:li+b8fyJ+4DBBRE4rhAvFkm1xQ1aQEduWo2zMlHhHDQ=", 9 | "zh:1cfe57e114f15db4bee6d5ed2f15c4712dbdd03e523ade72898fce838c849bc5", 10 | "zh:220079903c21bbe1bccab5f166c082909b4eb9cec04554df37201ee9ec792e59", 11 | "zh:361ef564ef748162e1f4f1d8590d1ef1ffec1d9ab11ffcda621cc2be8a49a013", 12 | "zh:3a2f4cd19fefc7ed4bc12f41d7b206068bb797eb5be032b06f0fe5563c2783d1", 13 | "zh:47560323ca26193dbf09f70cce2025fdfb19e06a2cf497ac2523a9fb084031ca", 14 | "zh:4d905397320d3b9c20edad57bd87c54bbdd0c4a49298a3f3a7b69b9dfd013b2d", 15 | "zh:a816d940ae6b6ac91e0c9fe46aca6bb76c0542fc2e160c92e4602fee92bfd91d", 16 | "zh:b7a2c6d49f823796c5fa64e3119e39798550287c4ec89e844bdebc6b7c3b0e7a", 17 | "zh:cc12278062a6e241dafb83d85f7bd8b26a6b08cdadd87b5489a02d2111e41319", 18 | "zh:dc96b3e655217ae51abb88febfe840ddd3e25e26409abebc215fda5a78c23f8f", 19 | "zh:e0ee416c2db05b0ce9e35692b685285c7cdc45fa13bb7b1063b56f188e97c717", 20 | "zh:e1185a1d89736c8061557d9e28e391372eaf0d00f32f5efe6a9977c79c69a6c4", 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/userdata/proxy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | sudo apt-get update 4 | 5 | sudo apt-get install -y tinyproxy 6 | 7 | cat <<'EOF' | sudo tee /etc/tinyproxy/tinyproxy.conf > /dev/null 8 | User tinyproxy 9 | Group tinyproxy 10 | 11 | Port 8888 12 | 13 | Timeout 600 14 | 15 | DefaultErrorFile "/usr/share/tinyproxy/default.html" 16 | 17 | StatFile "/usr/share/tinyproxy/stats.html" 18 | 19 | Logfile "/var/log/tinyproxy/tinyproxy.log" 20 | 21 | LogLevel Info 22 | 23 | PidFile "/run/tinyproxy/tinyproxy.pid" 24 | 25 | MaxClients 100 26 | 27 | MinSpareServers 5 28 | MaxSpareServers 20 29 | 30 | StartServers 10 31 | 32 | MaxRequestsPerChild 0 33 | 34 | Allow 127.0.0.1 35 | Allow 10.0.0.0/8 36 | 37 | ViaProxyName "tinyproxy" 38 | 39 | ConnectPort 80 40 | ConnectPort 443 41 | ConnectPort 563 42 | 43 | EOF 44 | 45 | sudo service tinyproxy restart 46 | 47 | curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" 48 | chmod +x ./kubectl 49 | sudo mv ./kubectl /usr/local/bin/kubectl 50 | 51 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 52 | chmod +x get_helm.sh 53 | sudo ./get_helm.sh 54 | 55 | -------------------------------------------------------------------------------- /downstream-aks/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/rancher/rancher2" { 5 | version = "1.25.0" 6 | constraints = "1.25.0" 7 | hashes = [ 8 | "h1:pv1IDzQI6rjCy1sM3GC9KYzVTSnCk+x91ykbWsaJ1p0=", 9 | "zh:2a2b9eb0599253c1564bff1998dc4049cddee6cb143d595a6225925aca0a1e61", 10 | "zh:2f9527c0df13cbbd79a6c29126bfdb08eb329a82a13416e22a7361d37b2b2afe", 11 | "zh:49563ec4b8fba24012c280bb5ed4a196377f8ecbc8ce5f33e897be4054a5849f", 12 | "zh:4e1707f7b97a8cae059a8294957510b7e2bff47360d2b4bba6c9e9dfabe7942c", 13 | "zh:6ac711001d29c5d71f91f6d48eefe536a830d56c74c6a469a9ccdd206e3c2e9c", 14 | "zh:7e77cb8b6dd69fbf3de8e3966a9978a762462b3910694c7ab80fc58e84050bac", 15 | "zh:814685a725079cf96ecfbfdcd9f0dff1624bd700cbf0ec85f271136cbaaeeed5", 16 | "zh:89d731d38aafe8742869e2295a9d7a3634ec2e9d865d441792f0e87f19aa0050", 17 | "zh:ca30ef1d051db2fd8ebd2584ec3a5764474f59ede1e05c149e6adf7f8a4ea087", 18 | "zh:cd16ea9e613786f6f4cf596a954dd7c38256c7c1d8c76804f2bc9d61ba3f6b9e", 19 | "zh:e0994a7e3ddd566086ba287b598c7e684e1cf373a04d5d75294dc76d3b6e1ee7", 20 | "zh:f0bae38481d8a962ab4b1cba14f2e93b404f565f5eea7cd29cb0e37e21de6dc8", 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /downstream-eks/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/rancher/rancher2" { 5 | version = "1.17.1" 6 | constraints = "1.17.1" 7 | hashes = [ 8 | "h1:lbCBMuSrh2mc+bsKO6RC17lMvtMQ6dvLvmHXBWeOBX0=", 9 | "zh:38ed6da3970b44234b295af2efe945ff719ea34027a190f9a37c4207a331a58e", 10 | "zh:4d7928109393e58b9f010eb7916f7b20da0aabef1a8725e3876dfa4c519bf49b", 11 | "zh:681a88234810c5cf444850c717cb6e842606ac9980303b72cc49c5455ce5ebfd", 12 | "zh:6d57c1e344ddea7d125203add6cce2e06af3eb64efeb2a9168a178238938b7c3", 13 | "zh:6d7fc944c216d9fe905e3c9b1c91e38b566828d19bf375008f1e65e68cc2aeff", 14 | "zh:7724b06054dc5a624caeeff37807c71031de78a26781d5517e423784d791263b", 15 | "zh:9238b514c5372c1286b39dd2b8b56d157d02084de905b9b663e4978c69f7b2b8", 16 | "zh:a7c45ffb42109ded2bf2cd8a52bdfc09a9e58d7d89a3d9f1bfe2153234f88a6b", 17 | "zh:c39dc21508b577b89c4d587945f94d92059fb4c3345d35c994fa9a724f23ceb7", 18 | "zh:db58bde23ba61ce33d074ed52dffb4ac247f5fbc849be770743522cd88402653", 19 | "zh:ef8044b8a801a609427f782ab8bc7d38f83551bd90e814ffa8209b5066319bc0", 20 | "zh:fcb96721f4c2dc67e383c2aeca910c43bea0776f911e5592fd25a62c8295c642", 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /app-in-existing-cluster/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/rancher/rancher2" { 5 | version = "1.17.1" 6 | constraints = "1.17.1" 7 | hashes = [ 8 | "h1:lbCBMuSrh2mc+bsKO6RC17lMvtMQ6dvLvmHXBWeOBX0=", 9 | "zh:38ed6da3970b44234b295af2efe945ff719ea34027a190f9a37c4207a331a58e", 10 | "zh:4d7928109393e58b9f010eb7916f7b20da0aabef1a8725e3876dfa4c519bf49b", 11 | "zh:681a88234810c5cf444850c717cb6e842606ac9980303b72cc49c5455ce5ebfd", 12 | "zh:6d57c1e344ddea7d125203add6cce2e06af3eb64efeb2a9168a178238938b7c3", 13 | "zh:6d7fc944c216d9fe905e3c9b1c91e38b566828d19bf375008f1e65e68cc2aeff", 14 | "zh:7724b06054dc5a624caeeff37807c71031de78a26781d5517e423784d791263b", 15 | "zh:9238b514c5372c1286b39dd2b8b56d157d02084de905b9b663e4978c69f7b2b8", 16 | "zh:a7c45ffb42109ded2bf2cd8a52bdfc09a9e58d7d89a3d9f1bfe2153234f88a6b", 17 | "zh:c39dc21508b577b89c4d587945f94d92059fb4c3345d35c994fa9a724f23ceb7", 18 | "zh:db58bde23ba61ce33d074ed52dffb4ac247f5fbc849be770743522cd88402653", 19 | "zh:ef8044b8a801a609427f782ab8bc7d38f83551bd90e814ffa8209b5066319bc0", 20 | "zh:fcb96721f4c2dc67e383c2aeca910c43bea0776f911e5592fd25a62c8295c642", 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /prod-app-deployment/charts-prepared/ghost/templates/mysqlcluster.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.mysqlcluster.enabled }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ include "ghost.fullname" . }}-db 6 | labels: 7 | {{- include "ghost.labels" . | nindent 4 }} 8 | type: Opaque 9 | data: 10 | ROOT_PASSWORD: {{ .Values.mysqlcluster.rootPassword | b64enc }} 11 | DATABASE: {{ .Values.mysqlcluster.db | b64enc }} 12 | USER: {{ .Values.mysqlcluster.user | b64enc }} 13 | PASSWORD: {{ .Values.mysqlcluster.password | b64enc }} 14 | --- 15 | apiVersion: mysql.presslabs.org/v1alpha1 16 | kind: MysqlCluster 17 | metadata: 18 | name: {{ include "ghost.fullname" . }}-db 19 | labels: 20 | {{- include "ghost.labels" . | nindent 4 }} 21 | spec: 22 | replicas: 2 23 | secretName: {{ include "ghost.fullname" . }}-db 24 | mysqlVersion: "5.7" 25 | minAvailable: "50%" 26 | 27 | mysqlConf: 28 | max_connections: "200" 29 | innodb-buffer-pool-size: "256M" 30 | tmp_table_size: 256M 31 | max_heap_table_size: 256M 32 | 33 | podSpec: 34 | resources: 35 | requests: 36 | memory: 256Mi 37 | cpu: 200m 38 | 39 | volumeSpec: 40 | persistentVolumeClaim: 41 | accessModes: [ "ReadWriteOnce" ] 42 | resources: 43 | requests: 44 | storage: 2Gi 45 | {{- end }} -------------------------------------------------------------------------------- /k3svms/install_centos.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | k3sup install \ 8 | --ip $IP0 \ 9 | --user centos \ 10 | --cluster \ 11 | --k3s-extra-args "--node-external-ip ${IP0}" \ 12 | --k3s-channel latest 13 | 14 | k3sup join \ 15 | --ip $IP1 \ 16 | --user centos \ 17 | --server-user centos \ 18 | --server-ip $IP0 \ 19 | --server \ 20 | --k3s-extra-args "--node-external-ip ${IP1}" \ 21 | --k3s-channel latest 22 | 23 | k3sup join \ 24 | --ip $IP2 \ 25 | --user centos \ 26 | --server-user centos \ 27 | --server-ip $IP0 \ 28 | --server \ 29 | --k3s-extra-args "--node-external-ip ${IP2}" \ 30 | --k3s-channel latest 31 | 32 | export KUBECONFIG=kubeconfig 33 | 34 | helm install \ 35 | cert-manager jetstack/cert-manager \ 36 | --namespace cert-manager \ 37 | --set installCRDs=true \ 38 | --version 1.7.1 --create-namespace 39 | 40 | kubectl rollout status deployment -n cert-manager cert-manager 41 | kubectl rollout status deployment -n cert-manager cert-manager-webhook 42 | 43 | helm install rancher rancher-latest/rancher \ 44 | --namespace cattle-system \ 45 | --version 2.6.4 \ 46 | --set hostname=rancher.${IP0}.sslip.io --create-namespace 47 | 48 | watch kubectl get pods,ingress -A -------------------------------------------------------------------------------- /app-in-existing-cluster/app.tf: -------------------------------------------------------------------------------- 1 | //data "rancher2_cluster" "local" { 2 | // name = "bhofmann-demo" 3 | //} 4 | resource "rancher2_cluster" "foo-imported" { 5 | name = "bhofmann-imported" 6 | description = "Foo rancher2 imported cluster" 7 | agent_env_vars { 8 | name = "name1" 9 | value = "value1" 10 | } 11 | agent_env_vars { 12 | name = "name2" 13 | value = "value2" 14 | } 15 | } 16 | //resource "rancher2_project" "grafana" { 17 | // cluster_id = data.rancher2_cluster.local.id 18 | // name = "grafana" 19 | //} 20 | // 21 | //resource "rancher2_namespace" "grafana" { 22 | // name = "grafana" 23 | // project_id = rancher2_project.grafana.id 24 | //} 25 | 26 | //resource "rancher2_catalog_v2" "grafana" { 27 | // cluster_id = data.rancher2_cluster.local.id 28 | // name = "grafana" 29 | // git_repo = "https://grafana.github.io/helm-charts" 30 | // git_branch = "main" 31 | //} 32 | 33 | //resource "rancher2_cluster_sync" "sync" { 34 | // cluster_id = data.rancher2_cluster.local.id 35 | // wait_catalogs = true 36 | //} 37 | // 38 | //resource "rancher2_app_v2" "rancher-cis-benchmark" { 39 | // cluster_id = rancher2_cluster_sync.sync.cluster_id 40 | // name = "grafana" 41 | // namespace = rancher2_namespace.grafana.name 42 | // repo_name = rancher2_catalog_v2.grafana.name 43 | // chart_name = "grafana" 44 | // chart_version = "6.6.3" 45 | //} -------------------------------------------------------------------------------- /modules/demo-workloads/nexus/values.yaml: -------------------------------------------------------------------------------- 1 | nexusProxy: 2 | enabled: false 3 | env: 4 | nexusDockerHost: nexus-registry.k8s-demo.plgrnd.be 5 | nexusHttpHost: nexus.k8s-demo.plgrnd.be 6 | 7 | service: 8 | enabled: true 9 | type: ClusterIP 10 | ports: 11 | - name: docker 12 | port: 5003 13 | targetPort: 5003 14 | 15 | ingress: 16 | enabled: true 17 | annotations: 18 | cert-manager.io/cluster-issuer: letsencrypt-prod 19 | nginx.ingress.kubernetes.io/proxy-body-size: 700m 20 | rules: 21 | - host: nexus-registry.k8s-demo.plgrnd.be 22 | http: 23 | paths: 24 | - backend: 25 | serviceName: sonatype-nexus-service 26 | servicePort: 5003 27 | path: / 28 | 29 | deploymentStrategy: 30 | type: Recreate 31 | 32 | persistence: 33 | enabled: true 34 | storageSize: 8Gi 35 | 36 | resources: 37 | requests: 38 | cpu: 250m 39 | # Based on https://support.sonatype.com/hc/en-us/articles/115006448847#mem 40 | # and https://twitter.com/analytically/status/894592422382063616: 41 | # Xms == Xmx 42 | # Xmx <= 4G 43 | # MaxDirectMemory >= 2G 44 | # Xmx + MaxDirectMemory <= RAM * 2/3 (hence the request for 4800Mi) 45 | # MaxRAMFraction=1 is not being set as it would allow the heap 46 | # to use all the available memory. 47 | memory: 4800Mi -------------------------------------------------------------------------------- /proxysetup-aws-k3s/network.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "default" { 2 | cidr_block = var.vpc_cidr 3 | enable_dns_hostnames = true 4 | tags = { 5 | Name = "bhofmann-test-vpc" 6 | } 7 | } 8 | 9 | resource "aws_internet_gateway" "default" { 10 | vpc_id = aws_vpc.default.id 11 | } 12 | 13 | /* 14 | Public Subnet 15 | */ 16 | resource "aws_subnet" "eu-central-1a-public" { 17 | vpc_id = aws_vpc.default.id 18 | 19 | cidr_block = var.public_subnet_cidr 20 | availability_zone = "eu-central-1a" 21 | 22 | tags = { 23 | Name = "bhofmann Public Subnet" 24 | } 25 | } 26 | 27 | resource "aws_route_table" "eu-central-1a-public" { 28 | vpc_id = aws_vpc.default.id 29 | 30 | route { 31 | cidr_block = "0.0.0.0/0" 32 | gateway_id = aws_internet_gateway.default.id 33 | } 34 | 35 | tags = { 36 | Name = "bhofmann Public Subnet" 37 | } 38 | } 39 | 40 | resource "aws_route_table_association" "eu-central-1a-public" { 41 | subnet_id = aws_subnet.eu-central-1a-public.id 42 | route_table_id = aws_route_table.eu-central-1a-public.id 43 | } 44 | 45 | /* 46 | Private Subnet 47 | */ 48 | resource "aws_subnet" "eu-central-1a-private" { 49 | vpc_id = aws_vpc.default.id 50 | 51 | cidr_block = var.private_subnet_cidr 52 | availability_zone = "eu-central-1a" 53 | 54 | tags = { 55 | Name = "bhofmann Private Subnet" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /proxysetup-aws-rke/network.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "default" { 2 | cidr_block = var.vpc_cidr 3 | enable_dns_hostnames = true 4 | tags = { 5 | Name = "bhofmann-test-vpc" 6 | } 7 | } 8 | 9 | resource "aws_internet_gateway" "default" { 10 | vpc_id = aws_vpc.default.id 11 | } 12 | 13 | /* 14 | Public Subnet 15 | */ 16 | resource "aws_subnet" "eu-central-1a-public" { 17 | vpc_id = aws_vpc.default.id 18 | 19 | cidr_block = var.public_subnet_cidr 20 | availability_zone = "eu-central-1a" 21 | 22 | tags = { 23 | Name = "bhofmann Public Subnet" 24 | } 25 | } 26 | 27 | resource "aws_route_table" "eu-central-1a-public" { 28 | vpc_id = aws_vpc.default.id 29 | 30 | route { 31 | cidr_block = "0.0.0.0/0" 32 | gateway_id = aws_internet_gateway.default.id 33 | } 34 | 35 | tags = { 36 | Name = "bhofmann Public Subnet" 37 | } 38 | } 39 | 40 | resource "aws_route_table_association" "eu-central-1a-public" { 41 | subnet_id = aws_subnet.eu-central-1a-public.id 42 | route_table_id = aws_route_table.eu-central-1a-public.id 43 | } 44 | 45 | /* 46 | Private Subnet 47 | */ 48 | resource "aws_subnet" "eu-central-1a-private" { 49 | vpc_id = aws_vpc.default.id 50 | 51 | cidr_block = var.private_subnet_cidr 52 | availability_zone = "eu-central-1a" 53 | 54 | tags = { 55 | Name = "bhofmann Private Subnet" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /modules/demo-workloads/opni/values.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | 3 | disableUsage: false 4 | 5 | gateway: 6 | enabled: true 7 | # Service type for the external gateway APIs 8 | serviceType: ClusterIP 9 | # Gateway hostname (required if gateway.enabled is true) 10 | hostname: "opni.plgrnd.be" 11 | # Auth provider config (required if gateway.enabled is true) 12 | auth: 13 | # Noauth provider: 14 | provider: noauth 15 | noauth: 16 | grafanaHostname: grafana.plgrnd.be 17 | # Openid provider: 18 | # provider: openid 19 | # openid: 20 | # {} # see docs 21 | 22 | # Alerting 23 | alerting: 24 | enabled: false 25 | s3: 26 | internal: {} 27 | 28 | opni-prometheus-crd: 29 | enabled: false # set to false if `opni-agent.kube-prometheus-stack.enabled` is true 30 | 31 | opni-agent: 32 | enabled: true 33 | address: opni 34 | fullnameOverride: opni-agent 35 | bootstrapInCluster: 36 | enabled: true 37 | managementAddress: opni-internal:11090 38 | agent: 39 | version: v2 40 | kube-prometheus-stack: 41 | enabled: true 42 | disableUsage: false 43 | 44 | kube-prometheus-stack: 45 | grafana: 46 | enabled: false # disable the default Grafana deployment 47 | prometheus: 48 | enabled: false # disable the default Prometheus deployment 49 | alertmanager: 50 | enabled: false # disable the default Alertmanager deployment 51 | -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/network.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "default" { 2 | cidr_block = var.vpc_cidr 3 | enable_dns_hostnames = true 4 | tags = { 5 | Name = "bhofmann-test-vpc" 6 | } 7 | } 8 | 9 | resource "aws_internet_gateway" "default" { 10 | vpc_id = aws_vpc.default.id 11 | } 12 | 13 | /* 14 | Public Subnet 15 | */ 16 | resource "aws_subnet" "eu-central-1a-public" { 17 | vpc_id = aws_vpc.default.id 18 | 19 | cidr_block = var.public_subnet_cidr 20 | availability_zone = "eu-central-1a" 21 | 22 | tags = { 23 | Name = "bhofmann Public Subnet" 24 | } 25 | } 26 | 27 | resource "aws_route_table" "eu-central-1a-public" { 28 | vpc_id = aws_vpc.default.id 29 | 30 | route { 31 | cidr_block = "0.0.0.0/0" 32 | gateway_id = aws_internet_gateway.default.id 33 | } 34 | 35 | tags = { 36 | Name = "bhofmann Public Subnet" 37 | } 38 | } 39 | 40 | resource "aws_route_table_association" "eu-central-1a-public" { 41 | subnet_id = aws_subnet.eu-central-1a-public.id 42 | route_table_id = aws_route_table.eu-central-1a-public.id 43 | } 44 | 45 | /* 46 | Private Subnet 47 | */ 48 | resource "aws_subnet" "eu-central-1a-private" { 49 | vpc_id = aws_vpc.default.id 50 | 51 | cidr_block = var.private_subnet_cidr 52 | availability_zone = "eu-central-1a" 53 | 54 | tags = { 55 | Name = "bhofmann Private Subnet" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /k3svms/install_ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | k3sup install \ 8 | --ip $IP0 \ 9 | --user ubuntu \ 10 | --cluster \ 11 | --k3s-extra-args "--node-external-ip ${IP0}" \ 12 | --k3s-channel latest 13 | 14 | k3sup join \ 15 | --ip $IP1 \ 16 | --user ubuntu \ 17 | --server-user ubuntu \ 18 | --server-ip $IP0 \ 19 | --server \ 20 | --k3s-extra-args "--node-external-ip ${IP1}" \ 21 | --k3s-channel latest 22 | 23 | k3sup join \ 24 | --ip $IP2 \ 25 | --user ubuntu \ 26 | --server-user ubuntu \ 27 | --server-ip $IP0 \ 28 | --server \ 29 | --k3s-extra-args "--node-external-ip ${IP2}" \ 30 | --k3s-channel latest 31 | 32 | mv kubeconfig kubeconfig_rancher 33 | 34 | export KUBECONFIG=kubeconfig_rancher 35 | 36 | helm upgrade --install \ 37 | cert-manager jetstack/cert-manager \ 38 | --namespace cert-manager \ 39 | --set installCRDs=true \ 40 | --version 1.7.1 --create-namespace 41 | 42 | kubectl rollout status deployment -n cert-manager cert-manager 43 | kubectl rollout status deployment -n cert-manager cert-manager-webhook 44 | 45 | helm upgrade --install rancher rancher-latest/rancher \ 46 | --namespace cattle-system \ 47 | --version 2.6.4 \ 48 | --set hostname=rancher.${IP0}.sslip.io --create-namespace 49 | 50 | watch kubectl get pods,ingress -A 51 | -------------------------------------------------------------------------------- /k3svms/install_suse.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | k3sup install \ 8 | --ip $IP0 \ 9 | --user ec2-user \ 10 | --cluster \ 11 | --k3s-extra-args "--node-external-ip ${IP0}" \ 12 | --k3s-channel v1.24 13 | 14 | k3sup join \ 15 | --ip $IP1 \ 16 | --user ec2-user \ 17 | --server-user ec2-user \ 18 | --server-ip $IP0 \ 19 | --server \ 20 | --k3s-extra-args "--node-external-ip ${IP1}" \ 21 | --k3s-channel v1.24 22 | 23 | #k3sup join \ 24 | # --ip $IP2 \ 25 | # --user ec2-user \ 26 | # --server-user ec2-user \ 27 | # --server-ip $IP0 \ 28 | # --server \ 29 | # --k3s-extra-args "--node-external-ip ${IP2}" \ 30 | # --k3s-channel v1.24 31 | 32 | #mv kubeconfig kubeconfig_rancher 33 | # 34 | #export KUBECONFIG=kubeconfig_rancher 35 | # 36 | #helm upgrade --install \ 37 | # cert-manager jetstack/cert-manager \ 38 | # --namespace cert-manager \ 39 | # --set installCRDs=true \ 40 | # --version 1.7.1 --create-namespace 41 | # 42 | #kubectl rollout status deployment -n cert-manager cert-manager 43 | #kubectl rollout status deployment -n cert-manager cert-manager-webhook 44 | # 45 | #helm upgrade --install rancher rancher-latest/rancher \ 46 | # --namespace cattle-system \ 47 | # --version 2.6.4 \ 48 | # --set hostname=rancher.${IP0}.sslip.io --create-namespace 49 | # 50 | #watch kubectl get pods,ingress -A 51 | -------------------------------------------------------------------------------- /prod-app-deployment/01-install_k3s_sles.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | $(terraform output -state=infra/terraform.tfstate -json node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 6 | 7 | ssh -o StrictHostKeyChecking=no ec2-user@$IP0 "sudo mkdir -p /var/lib/rancher/k3s/server/manifests" 8 | dd if=k3s/traefik-config.yaml | ssh -o StrictHostKeyChecking=no ec2-user@$IP0 sudo dd of=/var/lib/rancher/k3s/server/manifests/traefik-config.yaml 9 | 10 | k3sup install \ 11 | --ip $IP0 \ 12 | --user ec2-user \ 13 | --cluster \ 14 | --k3s-extra-args "--node-external-ip ${IP0}" \ 15 | --k3s-channel v1.21 16 | 17 | k3sup join \ 18 | --ip $IP1 \ 19 | --user ec2-user \ 20 | --server-user ec2-user \ 21 | --server-ip $IP0 \ 22 | --server \ 23 | --k3s-extra-args "--node-external-ip ${IP1}" \ 24 | --k3s-channel v1.21 25 | 26 | k3sup join \ 27 | --ip $IP2 \ 28 | --user ec2-user \ 29 | --server-user ec2-user \ 30 | --server-ip $IP0 \ 31 | --server \ 32 | --k3s-extra-args "--node-external-ip ${IP2}" \ 33 | --k3s-channel v1.21 34 | 35 | k3sup join \ 36 | --ip $IP3 \ 37 | --user ec2-user \ 38 | --server-user ec2-user \ 39 | --server-ip $IP0 \ 40 | --k3s-extra-args "--node-external-ip ${IP3}" \ 41 | --k3s-channel v1.21 42 | 43 | k3sup join \ 44 | --ip $IP4 \ 45 | --user ec2-user \ 46 | --server-user ec2-user \ 47 | --server-ip $IP0 \ 48 | --k3s-extra-args "--node-external-ip ${IP4}" \ 49 | --k3s-channel v1.21 -------------------------------------------------------------------------------- /downstream-k3s/awsvms.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "quickstart_key_pair" { 2 | key_name_prefix = "${var.prefix}-k3s-" 3 | public_key = file("${var.ssh_key_file_name}.pub") 4 | } 5 | 6 | resource "aws_security_group" "rancher_sg_allowall" { 7 | name = "${var.prefix}-k3sdown-allowall" 8 | description = "Rancher quickstart - allow all traffic" 9 | 10 | ingress { 11 | from_port = "0" 12 | to_port = "0" 13 | protocol = "-1" 14 | cidr_blocks = ["0.0.0.0/0"] 15 | } 16 | 17 | egress { 18 | from_port = "0" 19 | to_port = "0" 20 | protocol = "-1" 21 | cidr_blocks = ["0.0.0.0/0"] 22 | } 23 | } 24 | 25 | resource "aws_instance" "k3s" { 26 | count = 5 27 | ami = data.aws_ami.ubuntu.id 28 | instance_type = var.instance_type 29 | 30 | key_name = aws_key_pair.quickstart_key_pair.key_name 31 | security_groups = [aws_security_group.rancher_sg_allowall.name] 32 | 33 | root_block_device { 34 | volume_size = 80 35 | } 36 | provisioner "remote-exec" { 37 | inline = [ 38 | "cloud-init status --wait" 39 | ] 40 | 41 | connection { 42 | type = "ssh" 43 | host = self.public_ip 44 | user = "ubuntu" 45 | private_key = file(var.ssh_key_file_name) 46 | } 47 | } 48 | tags = { 49 | Name = "${var.prefix}-k3s-downstream-${count.index}" 50 | Owner = "bhofmann" 51 | DoNotDelete = "true" 52 | } 53 | } 54 | 55 | 56 | -------------------------------------------------------------------------------- /prod-app-deployment/infra/lb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_elb" "k8sdemo-lb" { 2 | name = "k8sdemo-lb" 3 | subnets = [aws_subnet.k8sdemo-subnet.id] 4 | security_groups = [aws_security_group.k8sdemo-sg.id] 5 | 6 | listener { 7 | instance_port = 80 8 | instance_protocol = "tcp" 9 | lb_port = 80 10 | lb_protocol = "tcp" 11 | } 12 | 13 | listener { 14 | instance_port = 443 15 | instance_protocol = "tcp" 16 | lb_port = 443 17 | lb_protocol = "tcp" 18 | } 19 | 20 | health_check { 21 | healthy_threshold = 2 22 | unhealthy_threshold = 2 23 | timeout = 3 24 | target = "TCP:80" 25 | interval = 30 26 | } 27 | 28 | instances = [ 29 | aws_instance.k8sdemo-vm[0].id, 30 | aws_instance.k8sdemo-vm[1].id, 31 | aws_instance.k8sdemo-vm[2].id, 32 | aws_instance.k8sdemo-vm[3].id, 33 | aws_instance.k8sdemo-vm[4].id, 34 | ] 35 | cross_zone_load_balancing = true 36 | idle_timeout = 400 37 | connection_draining = true 38 | connection_draining_timeout = 400 39 | 40 | tags = { 41 | Name = "k8sdemo-vmlb-lb" 42 | } 43 | } 44 | 45 | data "digitalocean_domain" "zone" { 46 | name = "plgrnd.be" 47 | } 48 | 49 | resource "digitalocean_record" "rancher" { 50 | domain = data.digitalocean_domain.zone.name 51 | type = "CNAME" 52 | name = "*.k8sdemo" 53 | value = "${aws_elb.k8sdemo-lb.dns_name}." 54 | ttl = 60 55 | } -------------------------------------------------------------------------------- /downstream-harvester-do/demo-cluster.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cluster" "harvester" { 2 | depends_on = [ 3 | rancher2_node_template.harvester 4 | ] 5 | name = "${var.prefix}-harvester" 6 | description = "Cluster for harvester demo" 7 | cluster_auth_endpoint { 8 | enabled = true 9 | } 10 | rke_config { 11 | kubernetes_version = "v1.19.7-rancher1-1" 12 | addons = file("cni_multus_flannel.yaml") 13 | network { 14 | plugin = "none" 15 | } 16 | services { 17 | kubelet { 18 | extra_args = { 19 | cni-conf-dir = "/etc/cni/net.d" 20 | cni-bin-dir = "/opt/cni/bin" 21 | } 22 | } 23 | } 24 | } 25 | } 26 | 27 | resource "rancher2_node_pool" "harvester" { 28 | cluster_id = rancher2_cluster.harvester.id 29 | name = "harvester" 30 | hostname_prefix = "${var.prefix}-harvester" 31 | node_template_id = rancher2_node_template.harvester.id 32 | quantity = 3 33 | control_plane = true 34 | etcd = true 35 | worker = true 36 | } 37 | 38 | resource "rancher2_cluster_sync" "harvester" { 39 | cluster_id = rancher2_cluster.harvester.id 40 | 41 | node_pool_ids = [ 42 | rancher2_node_pool.harvester.id, 43 | ] 44 | 45 | state_confirm = 4 46 | } 47 | 48 | resource "local_file" "kube_config" { 49 | filename = "out/kube_config_demo.yaml" 50 | content = rancher2_cluster_sync.harvester.kube_config 51 | file_permission = "0600" 52 | } 53 | -------------------------------------------------------------------------------- /downstream-azure-windows/variables.tf: -------------------------------------------------------------------------------- 1 | variable "rancher_url" { 2 | type = string 3 | } 4 | variable "rancher_admin_token" { 5 | type = string 6 | default = null 7 | } 8 | variable "rancher_access_key" { 9 | type = string 10 | default = null 11 | } 12 | variable "rancher_secret_key" { 13 | type = string 14 | default = null 15 | } 16 | variable "azure_subscription_id" { 17 | type = string 18 | description = "Azure subscription id under which resources will be provisioned" 19 | } 20 | 21 | variable "azure_client_id" { 22 | type = string 23 | description = "Azure client id used to create resources" 24 | } 25 | 26 | variable "azure_client_secret" { 27 | type = string 28 | description = "Client secret used to authenticate with Azure apis" 29 | } 30 | 31 | variable "azure_tenant_id" { 32 | type = string 33 | description = "Azure tenant id used to create resources" 34 | } 35 | 36 | variable "azure_location" { 37 | type = string 38 | description = "Azure location used for all resources" 39 | default = "East US" 40 | } 41 | variable "admin_password" { 42 | type = string 43 | } 44 | variable "prefix" { 45 | type = string 46 | default = "bhofmann" 47 | } 48 | variable "ssh_key_file_name" { 49 | type = string 50 | description = "File path and name of SSH private key used for infrastructure and RKE" 51 | default = "~/.ssh/id_rsa" 52 | } 53 | variable "docker_version" { 54 | type = string 55 | description = "Docker version to install on nodes" 56 | default = "19.03" 57 | } -------------------------------------------------------------------------------- /downstream-vsphere/node-templates.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cloud_credential" "vcenter" { 2 | name = "vcenter" 3 | 4 | vsphere_credential_config { 5 | vcenter = var.vcenter_server 6 | username = var.vcenter_user 7 | password = var.vcenter_password 8 | } 9 | } 10 | 11 | resource "rancher2_node_template" "controlplane" { 12 | name = "controlplane" 13 | description = "Template for control plane nodes" 14 | 15 | cloud_credential_id = rancher2_cloud_credential.vcenter.id 16 | 17 | vsphere_config { 18 | clone_from = var.vm_template_name 19 | creation_type = "template" 20 | datacenter = var.vsphere_datacenter 21 | datastore = var.vsphere_datastore 22 | pool = var.vsphere_resource_pool 23 | network = [ 24 | var.vsphere_network 25 | ] 26 | cpu_count = 2 27 | memory_size = 4096 28 | cfgparam = [ 29 | "disk.enableUUID=TRUE" 30 | ] 31 | } 32 | } 33 | 34 | resource "rancher2_node_template" "worker" { 35 | name = "worker" 36 | description = "Template for worker nodes" 37 | 38 | cloud_credential_id = rancher2_cloud_credential.vcenter.id 39 | 40 | vsphere_config { 41 | clone_from = var.vm_template_name 42 | creation_type = "template" 43 | datacenter = var.vsphere_datacenter 44 | datastore = var.vsphere_datastore 45 | pool = var.vsphere_resource_pool 46 | network = [ 47 | var.vsphere_network 48 | ] 49 | cpu_count = 4 50 | memory_size = 16384 51 | cfgparam = [ 52 | "disk.enableUUID=TRUE" 53 | ] 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /k3svms/data.tf: -------------------------------------------------------------------------------- 1 | # Data for AWS module 2 | 3 | # AWS data 4 | # ---------------------------------------------------------- 5 | 6 | # Use latest Ubuntu 20.04 AMI 7 | data "aws_ami" "ubuntu" { 8 | most_recent = true 9 | owners = ["099720109477"] # Canonical 10 | 11 | filter { 12 | name = "name" 13 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 14 | } 15 | 16 | filter { 17 | name = "virtualization-type" 18 | values = ["hvm"] 19 | } 20 | } 21 | 22 | data "aws_ami" "sles" { 23 | owners = ["013907871322"] 24 | most_recent = true 25 | 26 | filter { 27 | name = "name" 28 | values = ["suse-sles-15-sp2*"] 29 | } 30 | 31 | filter { 32 | name = "architecture" 33 | values = ["x86_64"] 34 | } 35 | 36 | filter { 37 | name = "root-device-type" 38 | values = ["ebs"] 39 | } 40 | } 41 | 42 | data "aws_ami" "opensuse" { 43 | owners = ["679593333241"] 44 | most_recent = true 45 | 46 | filter { 47 | name = "name" 48 | values = ["openSUSE-Leap-15.4*"] 49 | } 50 | 51 | filter { 52 | name = "architecture" 53 | values = ["x86_64"] 54 | } 55 | 56 | filter { 57 | name = "root-device-type" 58 | values = ["ebs"] 59 | } 60 | } 61 | 62 | #data "aws_ami" "rhel" { 63 | # most_recent = true 64 | # owners = ["309956199498"] # RedHat 65 | # 66 | # filter { 67 | # name = "name" 68 | # values = ["RHEL-8.3_HVM-*-x86_64-0-Hourly2-GP2"] 69 | # } 70 | # 71 | # filter { 72 | # name = "virtualization-type" 73 | # values = ["hvm"] 74 | # } 75 | #} 76 | -------------------------------------------------------------------------------- /modules/demo-workloads/external-monitoring/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: edge-clusters 5 | namespace: cattle-monitoring-system 6 | spec: 7 | endpoints: 8 | - interval: 1m 9 | honorLabels: true 10 | scrapeTimeout: 55s 11 | path: "/metrics" 12 | port: node-exporter 13 | - interval: 1m 14 | honorLabels: true 15 | scrapeTimeout: 55s 16 | path: "/metrics" 17 | port: kube-state-metrics 18 | namespaceSelector: 19 | matchNames: 20 | - cattle-monitoring-system 21 | selector: 22 | matchLabels: 23 | cluster: edge-clusters 24 | --- 25 | apiVersion: v1 26 | kind: Service 27 | metadata: 28 | name: edge-clusters 29 | labels: 30 | cluster: edge-clusters 31 | namespace: cattle-monitoring-system 32 | spec: 33 | type: ExternalName 34 | externalName: external 35 | clusterIP: "" 36 | ports: 37 | - port: 9100 38 | targetPort: 9100 39 | name: node-exporter 40 | protocol: TCP 41 | - port: 30080 42 | targetPort: 30080 43 | name: kube-state-metrics 44 | protocol: TCP 45 | --- 46 | apiVersion: v1 47 | kind: Endpoints 48 | metadata: 49 | name: edge-clusters 50 | labels: 51 | cluster: edge-clusters 52 | namespace: cattle-monitoring-system 53 | subsets: 54 | - addresses: 55 | - ip: 56 | - ip: 57 | - ip: 58 | ports: 59 | - port: 9100 60 | name: node-exporter 61 | protocol: TCP 62 | - port: 30080 63 | name: kube-state-metrics 64 | protocol: TCP -------------------------------------------------------------------------------- /rkevms/awsvms.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "quickstart_key_pair" { 2 | key_name_prefix = "${var.prefix}-k3s-" 3 | public_key = file("${var.ssh_key_file_name}.pub") 4 | } 5 | 6 | resource "aws_security_group" "rancher_sg_allowall" { 7 | name = "${var.prefix}-aws-allowall" 8 | description = "Rancher quickstart - allow all traffic" 9 | 10 | ingress { 11 | from_port = "0" 12 | to_port = "0" 13 | protocol = "-1" 14 | cidr_blocks = ["0.0.0.0/0"] 15 | } 16 | 17 | egress { 18 | from_port = "0" 19 | to_port = "0" 20 | protocol = "-1" 21 | cidr_blocks = ["0.0.0.0/0"] 22 | } 23 | } 24 | 25 | resource "aws_instance" "rancher-cluster" { 26 | count = 3 27 | ami = data.aws_ami.rhel.id 28 | instance_type = var.instance_type 29 | 30 | key_name = aws_key_pair.quickstart_key_pair.key_name 31 | security_groups = [aws_security_group.rancher_sg_allowall.name] 32 | 33 | tags = { 34 | Name = "${var.prefix}-rc-${count.index}" 35 | Owner = "bhofmann" 36 | DoNotDelete = "true" 37 | } 38 | 39 | user_data = templatefile("../userdata/server.sh", { 40 | docker_version = "20.10" 41 | username = "ec2-user" 42 | }) 43 | 44 | root_block_device { 45 | volume_size = 80 46 | } 47 | 48 | provisioner "remote-exec" { 49 | inline = [ 50 | "cloud-init status --wait" 51 | ] 52 | 53 | connection { 54 | type = "ssh" 55 | host = self.public_ip 56 | user = "ec2-user" 57 | private_key = file(var.ssh_key_file_name) 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /downstream-fleet/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "sles_central" { 2 | provider = aws.aws_eu_central 3 | owners = ["013907871322"] 4 | most_recent = true 5 | 6 | filter { 7 | name = "name" 8 | values = ["suse-sles-15-sp2*"] 9 | } 10 | 11 | filter { 12 | name = "architecture" 13 | values = ["x86_64"] 14 | } 15 | 16 | filter { 17 | name = "root-device-type" 18 | values = ["ebs"] 19 | } 20 | } 21 | data "aws_ami" "sles_arm_central" { 22 | provider = aws.aws_eu_central 23 | owners = ["013907871322"] 24 | most_recent = true 25 | 26 | filter { 27 | name = "name" 28 | values = ["suse-sles-15-sp2*"] 29 | } 30 | 31 | filter { 32 | name = "architecture" 33 | values = ["arm64"] 34 | } 35 | 36 | filter { 37 | name = "root-device-type" 38 | values = ["ebs"] 39 | } 40 | } 41 | data "aws_ami" "sles_west" { 42 | provider = aws.aws_eu_west 43 | owners = ["013907871322"] 44 | most_recent = true 45 | 46 | filter { 47 | name = "name" 48 | values = ["suse-sles-15-sp2*"] 49 | } 50 | 51 | filter { 52 | name = "architecture" 53 | values = ["x86_64"] 54 | } 55 | 56 | filter { 57 | name = "root-device-type" 58 | values = ["ebs"] 59 | } 60 | } 61 | data "aws_ami" "sles_arm_west" { 62 | provider = aws.aws_eu_west 63 | owners = ["013907871322"] 64 | most_recent = true 65 | 66 | filter { 67 | name = "name" 68 | values = ["suse-sles-15-sp2*"] 69 | } 70 | 71 | filter { 72 | name = "architecture" 73 | values = ["arm64"] 74 | } 75 | 76 | filter { 77 | name = "root-device-type" 78 | values = ["ebs"] 79 | } 80 | } -------------------------------------------------------------------------------- /ioloadtest/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: loadtest-config 5 | namespace: loadtest 6 | data: 7 | # Directory to save the output data (It's not a fio parameter). 8 | gen-data.output-dir: '/app/benchmarks' 9 | # Prefix filename with this directory (see FILENAME). 10 | gen-data.directory: '/app/data' 11 | # The block size in bytes used for I/O units. Default: 4096. 12 | gen-data.block-size: '4096' 13 | # If value is true, use non-buffered I/O. Default: false. 14 | gen-data.direct: '1' 15 | # Fio normally makes up a filename based on the job name. 16 | gen-data.filename: 'fio-test.bin' 17 | # Defines how the job issues I/O to the file. 18 | # libaio Linux native asynchronous I/O. 19 | gen-data.io-engine: 'libaio' 20 | # By default, fio will log an entry in the iops, latency, or bw log 21 | # for every I/O that completes. When writing to the disk log, that 22 | # can quickly grow to a very large size. Setting this option makes 23 | # fio average the each log entry over the specified period of time, 24 | # reducing the resolution of the log. 25 | gen-data.log-avg-msec: '1000' 26 | # Seed the random number generator used for random I/O patterns in a 27 | # predictable way so the pattern is repeatable across runs. 28 | # Default: true. 29 | gen-data.rand-repeat: '1' 30 | # Tell fio to terminate processing after the specified period of time. 31 | gen-data.runtime: '120' 32 | # How much data are we going to be reading/writing. 33 | gen-data.size: '50Mi' 34 | # If set, fio will run for the duration of the runtime specified 35 | # even if the file(s) are completely read or written 36 | gen-data.time-based: '1' -------------------------------------------------------------------------------- /prod-app-deployment/charts-prepared/ghost/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "ghost.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "ghost.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "ghost.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "ghost.labels" -}} 37 | helm.sh/chart: {{ include "ghost.chart" . }} 38 | {{ include "ghost.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "ghost.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "ghost.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | -------------------------------------------------------------------------------- /downstream-harvester-do/harvester.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_catalog" "harvester" { 2 | name = "harvester" 3 | url = "https://github.com/rancher/harvester" 4 | version = "helm_v3" 5 | scope = "cluster" 6 | cluster_id = rancher2_cluster_sync.harvester.id 7 | } 8 | 9 | resource "rancher2_catalog" "longhorn" { 10 | name = "longhorn" 11 | url = "https://charts.longhorn.io" 12 | version = "helm_v3" 13 | scope = "cluster" 14 | cluster_id = rancher2_cluster_sync.harvester.id 15 | } 16 | 17 | resource "rancher2_project" "harvester" { 18 | name = "harvester" 19 | cluster_id = rancher2_cluster.harvester.id 20 | } 21 | 22 | resource "rancher2_namespace" "longhorn" { 23 | name = "longhorn-system" 24 | project_id = rancher2_project.harvester.id 25 | } 26 | 27 | resource "rancher2_namespace" "harvester" { 28 | name = "harvester-system" 29 | project_id = rancher2_project.harvester.id 30 | } 31 | 32 | resource "rancher2_app" "longhorn" { 33 | catalog_name = "${rancher2_cluster.harvester.id}:${rancher2_catalog.longhorn.name}" 34 | name = "longhorn-system" 35 | project_id = rancher2_project.harvester.id 36 | template_name = "longhorn" 37 | template_version = "1.1.0" 38 | target_namespace = rancher2_namespace.longhorn.id 39 | } 40 | 41 | resource "rancher2_app" "harvester" { 42 | depends_on = [ 43 | rancher2_app.longhorn 44 | ] 45 | catalog_name = "${rancher2_cluster.harvester.id}:${rancher2_catalog.harvester.name}" 46 | name = "harvester-system" 47 | project_id = rancher2_project.harvester.id 48 | template_name = "harvester" 49 | template_version = "0.1.0" 50 | target_namespace = rancher2_namespace.harvester.id 51 | } -------------------------------------------------------------------------------- /downstream-aws/node-templates.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cloud_credential" "aws" { 2 | name = "${var.prefix}-aws" 3 | 4 | amazonec2_credential_config { 5 | access_key = var.aws_access_key 6 | secret_key = var.aws_secret_key 7 | } 8 | } 9 | 10 | resource "rancher2_node_template" "controlplane" { 11 | name = "${var.prefix}-aws-controlplane" 12 | description = "Template for control plane nodes" 13 | 14 | cloud_credential_id = rancher2_cloud_credential.aws.id 15 | engine_install_url = "https://releases.rancher.com/install-docker/${var.docker_version}.sh" 16 | 17 | amazonec2_config { 18 | ami = data.aws_ami.ubuntu.id 19 | region = var.aws_region 20 | security_group = [aws_security_group.rancher_sg_allowall.name] 21 | subnet_id = "" 22 | vpc_id = "" 23 | zone = "a" 24 | root_size = "10" 25 | instance_type = "t3a.medium" 26 | iam_instance_profile = aws_iam_instance_profile.rke_profile.name 27 | } 28 | } 29 | 30 | resource "rancher2_node_template" "worker" { 31 | name = "${var.prefix}-aws-worker" 32 | description = "Template for worker nodes" 33 | 34 | cloud_credential_id = rancher2_cloud_credential.aws.id 35 | engine_install_url = "https://releases.rancher.com/install-docker/${var.docker_version}.sh" 36 | 37 | amazonec2_config { 38 | ami = data.aws_ami.ubuntu.id 39 | region = var.aws_region 40 | security_group = [aws_security_group.rancher_sg_allowall.name] 41 | subnet_id = "" 42 | vpc_id = "" 43 | zone = "a" 44 | root_size = "20" 45 | instance_type = "t3a.xlarge" 46 | iam_instance_profile = aws_iam_instance_profile.rke_profile.name 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /modules/demo-workloads/bookinfo/ingressgateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: Gateway 3 | metadata: 4 | name: bookinfo-gateway 5 | namespace: bookinfo 6 | spec: 7 | selector: 8 | istio: ingressgateway # use istio default controller 9 | servers: 10 | - port: 11 | number: 80 12 | name: http 13 | protocol: HTTP 14 | tls: 15 | httpsRedirect: true 16 | hosts: 17 | - bookinfo.k8s-demo.plgrnd.be 18 | - port: 19 | number: 443 20 | name: https 21 | protocol: HTTPS 22 | tls: 23 | mode: SIMPLE 24 | credentialName: bookinfo-tls-certificate-certs 25 | hosts: 26 | - bookinfo.k8s-demo.plgrnd.be 27 | --- 28 | apiVersion: networking.istio.io/v1alpha3 29 | kind: VirtualService 30 | metadata: 31 | name: bookinfo 32 | namespace: bookinfo 33 | spec: 34 | hosts: 35 | - bookinfo.istio.plgrnd.be 36 | gateways: 37 | - bookinfo-gateway 38 | http: 39 | - match: 40 | - uri: 41 | exact: / 42 | - uri: 43 | exact: /productpage 44 | - uri: 45 | prefix: /static 46 | - uri: 47 | exact: /login 48 | - uri: 49 | exact: /logout 50 | - uri: 51 | prefix: /api/v1/products 52 | route: 53 | - destination: 54 | host: productpage 55 | port: 56 | number: 9080 57 | 58 | --- 59 | apiVersion: networking.istio.io/v1alpha3 60 | kind: VirtualService 61 | metadata: 62 | name: bookinfo 63 | namespace: bookinfo 64 | spec: 65 | hosts: 66 | - productpage 67 | http: 68 | - match: 69 | - header: 70 | exact: 71 | - FOO: bar 72 | route: 73 | - destination: 74 | host: productpage-v1 75 | port: 76 | number: 9080 77 | - route: 78 | - destination: 79 | host: productpage-v2 80 | port: 81 | number: 9080 -------------------------------------------------------------------------------- /prod-app-deployment/charts-prepared/ghost/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range $host := .Values.ingress.hosts }} 4 | {{- range .paths }} 5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} 6 | {{- end }} 7 | {{- end }} 8 | {{- else if contains "NodePort" .Values.service.type }} 9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "ghost.fullname" . }}) 10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 11 | echo http://$NODE_IP:$NODE_PORT 12 | {{- else if contains "LoadBalancer" .Values.service.type }} 13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 14 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "ghost.fullname" . }}' 15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "ghost.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 16 | echo http://$SERVICE_IP:{{ .Values.service.port }} 17 | {{- else if contains "ClusterIP" .Values.service.type }} 18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "ghost.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 19 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") 20 | echo "Visit http://127.0.0.1:8080 to use your application" 21 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /downstream-do/demo-cluster.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cluster" "do" { 2 | depends_on = [ 3 | rancher2_node_template.do 4 | ] 5 | name = "${var.prefix}-do" 6 | cluster_auth_endpoint { 7 | enabled = true 8 | } 9 | rke_config { 10 | kubernetes_version = "v1.19.7-rancher1-1" 11 | services { 12 | kubelet { 13 | extra_args = { 14 | cni-conf-dir = "/etc/cni/net.d" 15 | cni-bin-dir = "/opt/cni/bin" 16 | } 17 | } 18 | } 19 | } 20 | } 21 | 22 | resource "rancher2_node_pool" "do-cp" { 23 | cluster_id = rancher2_cluster.do.id 24 | name = "do-cp" 25 | hostname_prefix = "${var.prefix}-doc" 26 | node_template_id = rancher2_node_template.do.id 27 | quantity = 3 28 | control_plane = true 29 | etcd = true 30 | worker = false 31 | } 32 | 33 | resource "rancher2_node_pool" "do-w-1" { 34 | cluster_id = rancher2_cluster.do.id 35 | name = "do-1w" 36 | hostname_prefix = "${var.prefix}-do1w" 37 | node_template_id = rancher2_node_template.dow.id 38 | quantity = 2 39 | control_plane = false 40 | etcd = false 41 | worker = true 42 | labels = { 43 | version = "1" 44 | } 45 | 46 | # lifecycle { 47 | # create_before_destroy = true 48 | # replace_triggered_by = [ 49 | # rancher2_node_template.dow, 50 | # ] 51 | # } 52 | } 53 | 54 | resource "rancher2_cluster_sync" "harvester" { 55 | cluster_id = rancher2_cluster.do.id 56 | 57 | node_pool_ids = [ 58 | rancher2_node_pool.do-cp.id, 59 | rancher2_node_pool.do-w-1.id, 60 | ] 61 | 62 | state_confirm = 4 63 | } 64 | 65 | resource "local_file" "kube_config" { 66 | filename = "out/kube_config_demo.yaml" 67 | content = rancher2_cluster_sync.harvester.kube_config 68 | file_permission = "0600" 69 | } 70 | -------------------------------------------------------------------------------- /downstream-vsphere/demo-cluster.tf: -------------------------------------------------------------------------------- 1 | resource "rancher2_cluster" "demo" { 2 | name = "${var.prefix}-demo" 3 | description = "Cluster for demos" 4 | cluster_auth_endpoint { 5 | enabled = true 6 | } 7 | rke_config { 8 | kubernetes_version = "v1.18.12-rancher1-1" 9 | network { 10 | plugin = "canal" 11 | } 12 | services { 13 | etcd { 14 | creation = "6h" 15 | retention = "24h" 16 | } 17 | } 18 | upgrade_strategy { 19 | drain = false 20 | max_unavailable_worker = "20%" 21 | } 22 | // todo add vsphere cloud provider 23 | // cloud_provider { 24 | // aws_cloud_provider { 25 | // } 26 | // } 27 | } 28 | } 29 | 30 | resource "rancher2_node_pool" "demo-control" { 31 | cluster_id = rancher2_cluster.demo.id 32 | name = "demo-control" 33 | hostname_prefix = "${var.prefix}-demo-control" 34 | node_template_id = rancher2_node_template.controlplane.id 35 | quantity = 3 36 | control_plane = true 37 | etcd = true 38 | worker = false 39 | } 40 | 41 | resource "rancher2_node_pool" "demo-worker" { 42 | cluster_id = rancher2_cluster.demo.id 43 | name = "demo-worker" 44 | hostname_prefix = "${var.prefix}-demo-worker" 45 | node_template_id = rancher2_node_template.worker.id 46 | quantity = 4 47 | control_plane = false 48 | etcd = false 49 | worker = true 50 | } 51 | 52 | resource "rancher2_cluster_sync" "demo" { 53 | cluster_id = rancher2_cluster.demo.id 54 | node_pool_ids = [ 55 | rancher2_node_pool.demo-control.id, 56 | rancher2_node_pool.demo-worker.id 57 | ] 58 | } 59 | 60 | resource "local_file" "kube_config" { 61 | filename = "out/kube_config_demo.yml" 62 | content = rancher2_cluster.demo.kube_config 63 | file_permission = "0600" 64 | } 65 | -------------------------------------------------------------------------------- /istio-multicluster/sleep.yaml: -------------------------------------------------------------------------------- 1 | # Copyright Istio Authors 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ################################################################################################## 16 | # Sleep service 17 | ################################################################################################## 18 | apiVersion: v1 19 | kind: ServiceAccount 20 | metadata: 21 | name: sleep 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: sleep 27 | labels: 28 | app: sleep 29 | service: sleep 30 | spec: 31 | ports: 32 | - port: 80 33 | name: http 34 | selector: 35 | app: sleep 36 | --- 37 | apiVersion: apps/v1 38 | kind: Deployment 39 | metadata: 40 | name: sleep 41 | spec: 42 | replicas: 1 43 | selector: 44 | matchLabels: 45 | app: sleep 46 | template: 47 | metadata: 48 | labels: 49 | app: sleep 50 | spec: 51 | serviceAccountName: sleep 52 | containers: 53 | - name: sleep 54 | # radial/busyboxplus:curl 55 | image: governmentpaas/curl-ssl 56 | command: ["/bin/sleep", "3650d"] 57 | imagePullPolicy: IfNotPresent 58 | volumeMounts: 59 | - mountPath: /etc/sleep/tls 60 | name: secret-volume 61 | volumes: 62 | - name: secret-volume 63 | secret: 64 | secretName: sleep-secret 65 | optional: true 66 | --- -------------------------------------------------------------------------------- /proxysetup-k3s-to-rancher-through-proxy/rancher.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "rancher_sg_allowall" { 2 | name = "bhofmann-rancher-allowall-k3s-proxy" 3 | description = "Rancher quickstart - allow all traffic" 4 | 5 | ingress { 6 | from_port = "0" 7 | to_port = "0" 8 | protocol = "-1" 9 | cidr_blocks = ["0.0.0.0/0"] 10 | } 11 | 12 | egress { 13 | from_port = "0" 14 | to_port = "0" 15 | protocol = "-1" 16 | cidr_blocks = ["0.0.0.0/0"] 17 | } 18 | 19 | tags = { 20 | Creator = "rancher-quickstart" 21 | } 22 | } 23 | 24 | resource "aws_instance" "rancher_server" { 25 | ami = data.aws_ami.ubuntu.id 26 | instance_type = "t3a.medium" 27 | 28 | key_name = aws_key_pair.ssh_key_pair.key_name 29 | security_groups = [aws_security_group.rancher_sg_allowall.name] 30 | 31 | user_data = templatefile("userdata/rancher_server.sh", { 32 | }) 33 | 34 | root_block_device { 35 | volume_size = 16 36 | } 37 | 38 | provisioner "remote-exec" { 39 | inline = [ 40 | "echo 'Waiting for cloud-init to complete...'", 41 | "cloud-init status --wait > /dev/null", 42 | "echo 'Completed cloud-init!'", 43 | "helm repo add rancher-latest https://releases.rancher.com/server-charts/latest", 44 | "helm --kubeconfig /etc/rancher/k3s/k3s.yaml upgrade --install rancher rancher-latest/rancher --namespace cattle-system --version 2.6.4 --set hostname=${join(".", ["rancher", self.public_ip, "nip.io"])} --create-namespace || true", 45 | "kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml rollout status deployment -n cattle-system rancher" 46 | ] 47 | 48 | connection { 49 | type = "ssh" 50 | host = self.public_ip 51 | user = "ubuntu" 52 | private_key = file(var.ssh_key_file_name) 53 | } 54 | } 55 | 56 | tags = { 57 | Name = "bhofmann-rancher-server" 58 | Creator = "rancher-quickstart" 59 | } 60 | } -------------------------------------------------------------------------------- /proxysetup-aws-rke/rancher.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "rancher" { 2 | depends_on = [ 3 | aws_instance.proxy, 4 | aws_instance.cluster_vms 5 | ] 6 | provisioner "remote-exec" { 7 | inline = [ 8 | "rke up --config rke-cluster.yaml", 9 | "helm repo add rancher-latest https://releases.rancher.com/server-charts/latest", 10 | "helm repo add jetstack https://charts.jetstack.io", 11 | "kubectl --kubeconfig kube_config_rke-cluster.yaml create namespace cert-manager || true", 12 | "kubectl --kubeconfig kube_config_rke-cluster.yaml create namespace cattle-system || true", 13 | "helm --kubeconfig kube_config_rke-cluster.yaml upgrade --install cert-manager jetstack/cert-manager --namespace cert-manager --version 1.7.1 --set installCRDs=true --set http_proxy=http://${aws_instance.proxy.private_ip}:8888 --set https_proxy=http://${aws_instance.proxy.private_ip}:8888 --set no_proxy=127.0.0.0/8\\\\,10.0.0.0/8\\\\,172.16.0.0/12\\\\,192.168.0.0/16", 14 | "kubectl --kubeconfig kube_config_rke-cluster.yaml rollout status deployment -n cert-manager cert-manager", 15 | "kubectl --kubeconfig kube_config_rke-cluster.yaml rollout status deployment -n cert-manager cert-manager-webhook", 16 | "sleep 60", // hack: wait until webhook certificate was created 17 | "helm --kubeconfig kube_config_rke-cluster.yaml upgrade --install rancher rancher-latest/rancher --version v2.6.4 --namespace cattle-system --set hostname=${local.rancher_hostname} --set proxy=http://${aws_instance.proxy.private_ip}:8888 --set noProxy=127.0.0.0/8\\\\,10.0.0.0/8\\\\,172.16.0.0/12\\\\,192.168.0.0/16\\\\,.svc\\\\,.cluster.local", 18 | "kubectl --kubeconfig kube_config_rke-cluster.yaml rollout status deployment -n cattle-system rancher", 19 | ] 20 | 21 | connection { 22 | type = "ssh" 23 | host = aws_instance.proxy.public_ip 24 | user = "ubuntu" 25 | private_key = file(var.ssh_key_file_name) 26 | } 27 | } 28 | } -------------------------------------------------------------------------------- /gke-test/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | rancher2 = { 4 | source = "rancher/rancher2" 5 | version = "1.22.2" 6 | } 7 | } 8 | } 9 | provider "rancher2" { 10 | api_url = var.rancher_url 11 | insecure = true 12 | access_key = var.rancher_access_key 13 | secret_key = var.rancher_secret_key 14 | } 15 | 16 | resource "rancher2_cloud_credential" "foo-google" { 17 | name = "bhofmann-google" 18 | description = "Terraform cloudCredential acceptance test" 19 | google_credential_config { 20 | auth_encoded_json = file(var.gcp_credentials_file) 21 | } 22 | } 23 | 24 | resource "rancher2_cluster" "foo" { 25 | name = "bhofmann-gke" 26 | description = "Terraform GKE cluster" 27 | 28 | gke_config_v2 { 29 | name = "foo" 30 | google_credential_secret = rancher2_cloud_credential.foo-google.id 31 | project_id = "rancher-dev" 32 | zone = "us-central1-c" 33 | kubernetes_version = "1.21.5-gke.1302" 34 | network = "projects/rancher-dev/global/networks/bhofmann-vpc-test" 35 | subnetwork = "projects/rancher-dev/regions/us-central1/subnetworks/bhofmann-nodes" 36 | 37 | ip_allocation_policy { 38 | cluster_secondary_range_name = "pods" 39 | services_secondary_range_name = "services" 40 | } 41 | 42 | master_authorized_networks_config { 43 | cidr_blocks { 44 | cidr_block = "10.0.0.0/8" 45 | display_name = "testblock" 46 | } 47 | enabled = true 48 | } 49 | 50 | private_cluster_config { 51 | master_ipv4_cidr_block = "10.200.221.208/28" 52 | enable_private_endpoint = true 53 | enable_private_nodes = true 54 | } 55 | 56 | node_pools { 57 | initial_node_count = 1 58 | max_pods_constraint = 110 59 | name = "testnodepool" 60 | version = "1.21.5-gke.1302" 61 | } 62 | } 63 | } -------------------------------------------------------------------------------- /proxysetup-aws-k3s/README.md: -------------------------------------------------------------------------------- 1 | # Rancher setup behind HTTP proxy on AWS 2 | 3 | This terraform example sets up Rancher in a private AWS Subnet that only has Internet connectivity through an HTTP Proxy. 4 | 5 | The following resources will be set up: 6 | 7 | * One VPC 8 | * One public subnet with an Internet Gateway 9 | * One private subnet without an Internet Gateway 10 | * One proxy VM in the public subnet with the following tools installed 11 | * OpenVPN Server 12 | * Tinyproxy 13 | * Nginx 14 | * A security group for the proxy VM that allows 15 | * Incoming SSH traffic from all networks (port 22) 16 | * Incoming VPN traffic from all networks to the OpenVPN server (port 1194) 17 | * Incoming HTTP and HTTPS traffic from all networks to the nginx (ports 80 and 443) 18 | * Outgoing HTTP and HTTPS traffic to the internet (ports 80 and 443) 19 | * Outgoing HTTP and HTTPS traffic to the ingress controller on cluster vms in the private subnet (ports 80 and 443) 20 | * Outgoing HTTPS traffic to the kubernetes apiserver on cluster vms in the private subnet (ports 6443) 21 | * Outgoing SSH traffic to the whole VPC (port 22) 22 | * Incoming traffic to a proxy from the whole VPC to tinyproxy (port 8888) 23 | * 1 cluster VMs in the private subnet 24 | * A security group for the cluster vms that allows 25 | * Incoming SSH trafic from the public subnet (port 22) 26 | * Incoming HTTPS traffic to the kubernetes apiserver from the public subnet (port 6443) 27 | * Incoming HTTP and HTTPS traffic to the ingress controller from the public subnet (ports 80 and 443) 28 | * Incoming and outgoing traffic on all ports between the cluster vms in the private subnet 29 | * Outgoing traffic to the tinyproxy in the public subnet (port 8888) 30 | 31 | On the cluster VMs in the private subnet then 32 | 33 | * An K3s Kubernetes cluster will be created and configured to pull images through tinyprox 34 | * cert-manager will be installed with tinyproxy configured 35 | * Rancher will be installed with tinyproxy configured 36 | -------------------------------------------------------------------------------- /proxysetup-aws-k3s/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "3.25.0" 6 | hashes = [ 7 | "h1:oyaXLqVhtPnHDnwk2yU9qP4dTgfPHyuo27mX/ljeCTQ=", 8 | "zh:2d3c65461bc63ec39bce7b5afdbed9a3b4dd5c2c8ee94616ad1866e24cf9b8f0", 9 | "zh:2fb2ea6ccac30b909b603e183433737a30c58ec1f9a6a8b5565f0f051490c07a", 10 | "zh:31a5f192c8cf29fb677cd639824f9a685578a2564c6b790517db33ea56229045", 11 | "zh:437a12cf9a4d7bc92c9bf14ee7e224d5d3545cbd2154ba113ae82c4bb68edc27", 12 | "zh:4bbdc3155a5dea90b2d50adfa460b0759c4dd959efaf7f66b2a0385a53b469b2", 13 | "zh:63a8cd523ba31358692a34a06e111d88769576ac6d0e5adad8e0b4ae0a2d8882", 14 | "zh:c4301ce86e8cb2c464949bb99e729ffe7b0c55eaf34b82ba526bb5039bca36f3", 15 | "zh:c97b84861c6c550b8d2feb12d089660fffbf51dc7d660dcc9d54d4a7b3c2c882", 16 | "zh:d6a103570e2d5c387b068fac4b88654dfa21d44ca1bdfa4bc8ab94c88effd71a", 17 | "zh:f08cf2faf960a8ca374ac860f37c31c88ed2bab460116ac74678e0591babaac5", 18 | ] 19 | } 20 | 21 | provider "registry.terraform.io/hashicorp/null" { 22 | version = "3.0.0" 23 | hashes = [ 24 | "h1:V1tzrSG6t3e7zWvUwRbGbhsWU2Jd/anrJpOl9XM+R/8=", 25 | "zh:05fb7eab469324c97e9b73a61d2ece6f91de4e9b493e573bfeda0f2077bc3a4c", 26 | "zh:1688aa91885a395c4ae67636d411475d0b831e422e005dcf02eedacaafac3bb4", 27 | "zh:24a0b1292e3a474f57c483a7a4512d797e041bc9c2fbaac42fe12e86a7fb5a3c", 28 | "zh:2fc951bd0d1b9b23427acc93be09b6909d72871e464088171da60fbee4fdde03", 29 | "zh:6db825759425599a326385a68acc6be2d9ba0d7d6ef587191d0cdc6daef9ac63", 30 | "zh:85985763d02618993c32c294072cc6ec51f1692b803cb506fcfedca9d40eaec9", 31 | "zh:a53186599c57058be1509f904da512342cfdc5d808efdaf02dec15f0f3cb039a", 32 | "zh:c2e07b49b6efa676bdc7b00c06333ea1792a983a5720f9e2233db27323d2707c", 33 | "zh:cdc8fe1096103cf5374751e2e8408ec4abd2eb67d5a1c5151fe2c7ecfd525bef", 34 | "zh:dbdef21df0c012b0d08776f3d4f34eb0f2f229adfde07ff252a119e52c0f65b7", 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /proxysetup-aws-rke/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "3.23.0" 6 | hashes = [ 7 | "h1:tSznQxPJvolDnmqqaTK9SsJ0bluTws7OAWcnc1t0ABs=", 8 | "zh:30b0733027c00472618da998bc77967c692e238ae117c07e046fdd7336b83fa3", 9 | "zh:3677550a8bef8e01c67cb615407dc8a69d32f4e36017033cd6f71a831c99d5de", 10 | "zh:3c2fb4c14bfd43cf20ee25d0068ce09f1d48758408b8f1c88a096cea243612b3", 11 | "zh:5577543322003693c4fe24a69ed0d47e58f867426fd704fac94cf5c16d3d6153", 12 | "zh:6771f09d76ad01ffc04baa3bce7a3eed09f6a8a949274ffbd9d11756a58a4329", 13 | "zh:7a57b79d304d17cf52ee3ddce91679f6b4289c5bdda2e31b763bf7d512e542d9", 14 | "zh:815fb027e17bfe754b05367d20bd0694726a95a99b81e8d939ddd44e2b1f05a9", 15 | "zh:a3d67db5ec0f4e9750eb19676a9a1aff36b0721e276a4ba789f42b991bf5951c", 16 | "zh:cd67ff33860ad578172c19412ce608ba818e7590083197df2b793f870d6f50a3", 17 | "zh:fbe0835055d1260fb77ad19a32a8726248ba7ac187f6c463ded90737b4cea8e6", 18 | ] 19 | } 20 | 21 | provider "registry.terraform.io/hashicorp/null" { 22 | version = "3.0.0" 23 | hashes = [ 24 | "h1:V1tzrSG6t3e7zWvUwRbGbhsWU2Jd/anrJpOl9XM+R/8=", 25 | "zh:05fb7eab469324c97e9b73a61d2ece6f91de4e9b493e573bfeda0f2077bc3a4c", 26 | "zh:1688aa91885a395c4ae67636d411475d0b831e422e005dcf02eedacaafac3bb4", 27 | "zh:24a0b1292e3a474f57c483a7a4512d797e041bc9c2fbaac42fe12e86a7fb5a3c", 28 | "zh:2fc951bd0d1b9b23427acc93be09b6909d72871e464088171da60fbee4fdde03", 29 | "zh:6db825759425599a326385a68acc6be2d9ba0d7d6ef587191d0cdc6daef9ac63", 30 | "zh:85985763d02618993c32c294072cc6ec51f1692b803cb506fcfedca9d40eaec9", 31 | "zh:a53186599c57058be1509f904da512342cfdc5d808efdaf02dec15f0f3cb039a", 32 | "zh:c2e07b49b6efa676bdc7b00c06333ea1792a983a5720f9e2233db27323d2707c", 33 | "zh:cdc8fe1096103cf5374751e2e8408ec4abd2eb67d5a1c5151fe2c7ecfd525bef", 34 | "zh:dbdef21df0c012b0d08776f3d4f34eb0f2f229adfde07ff252a119e52c0f65b7", 35 | ] 36 | } 37 | --------------------------------------------------------------------------------