├── depends ├── sub │ ├── fleet.yaml │ ├── service.yaml │ └── deployment.yaml └── main │ ├── fleet.yaml │ ├── service.yaml │ └── deployment.yaml ├── edge-cluster └── .gitkeep ├── fleet-examples ├── .gitkeep ├── navlink │ ├── fleet.yaml │ └── navlink.yaml ├── longhorn │ └── fleet.yaml ├── hello-world-with-overlay │ ├── fleet.yaml │ ├── overlays │ │ └── green-color │ │ │ └── deployment_patch.yaml │ ├── service.yaml │ └── deployment.yaml └── tetris │ └── fleet.yaml ├── fleet-upgrades └── .gitkeep ├── monitoring-cluster ├── .gitkeep └── edgme-monitoring-servicemonitor │ └── servicemonitor.yaml ├── psp ├── templates │ ├── fleet.yaml │ └── template.yaml └── constraints │ ├── fleet.yaml │ └── constraint.yaml ├── kasten-k10-demo ├── navlink │ ├── fleet.yaml │ └── navlink.yaml ├── example-workload │ ├── fleet.yaml │ ├── 01-namespace.yaml │ ├── backup-policy.yaml │ └── nginx-deployment.yaml ├── longhorn │ └── fleet.yaml └── kasten-k10 │ └── fleet.yaml ├── susexchange └── rancher-demo │ ├── fleet.yaml │ ├── service.yaml │ └── deployment.yaml ├── fleet-examples-prepared ├── demo-app │ ├── fleet.yaml │ └── manifests │ │ ├── logging.yaml │ │ └── workload.yaml ├── prom-example-app │ ├── fleet.yaml │ └── manifests │ │ ├── podmonitor.yaml │ │ ├── servicemonitor.yaml │ │ └── workload.yaml ├── logging │ └── fleet.yaml ├── logging-crd │ └── fleet.yaml ├── metal-lb-config │ ├── metal-lb-config │ │ ├── Chart.yaml │ │ ├── values.yaml │ │ ├── .helmignore │ │ └── templates │ │ │ ├── config.yaml │ │ │ └── _helpers.tpl │ └── fleet.yaml ├── monitoring-crd │ └── fleet.yaml ├── rancher-demo │ └── fleet.yaml ├── hello-world-with-overlay │ ├── fleet.yaml │ ├── overlays │ │ └── green-color │ │ │ └── deployment_patch.yaml │ ├── service.yaml │ └── deployment.yaml ├── beta-ingress │ └── ingress.yaml ├── hello-world │ ├── service.yaml │ └── deployment.yaml ├── netdata │ └── fleet.yaml ├── ingress-cluster-labels │ └── fleet.yaml ├── cluster-label-policy │ ├── constraint.yaml │ └── template.yaml ├── upgrades │ └── plan.yaml ├── edge-monitoring │ └── edge-monitoring.yaml ├── loki │ └── fleet.yaml ├── ingress-target-customization │ └── fleet.yaml ├── edgme-monitoring-servicemonitor │ ├── fedservicemonitor.yaml │ └── servicemonitor.yaml └── monitoring │ └── fleet.yaml ├── .gitignore ├── cicd ├── test │ └── namespace.yaml └── rancher-demo │ └── fleet.yaml ├── get_env.sh ├── storage ├── longhorn │ └── fleet.yaml ├── nginx │ ├── pvc.yaml │ ├── service.yaml │ └── deployment.yaml └── kubeview │ └── fleet.yaml ├── run_on.sh ├── valuesfrom ├── secret │ └── fleet.yaml ├── configmap │ └── fleet.yaml └── configs │ ├── secret.yaml │ └── configmap.yaml ├── fleet-examples-local └── do-demo-cluster │ ├── nodeconfig.yaml │ └── cluster.yaml ├── terraform-setup ├── provider.tf ├── output.tf ├── data.tf ├── variables.tf ├── lb.tf ├── main.tf └── .terraform.lock.hcl ├── README.md └── Makefile /depends/sub/fleet.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /edge-cluster/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fleet-examples/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fleet-upgrades/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /monitoring-cluster/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /psp/templates/fleet.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fleet-examples/navlink/fleet.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kasten-k10-demo/navlink/fleet.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /susexchange/rancher-demo/fleet.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fleet-examples-prepared/demo-app/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: default -------------------------------------------------------------------------------- /depends/main/fleet.yaml: -------------------------------------------------------------------------------- 1 | dependsOn: 2 | - name: fleet-depends-sub -------------------------------------------------------------------------------- /fleet-examples-prepared/prom-example-app/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: default -------------------------------------------------------------------------------- /psp/constraints/fleet.yaml: -------------------------------------------------------------------------------- 1 | dependsOn: 2 | - name: psp-repo-psp-templates -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.tfvars 2 | *.tfstate 3 | *.tfstate.* 4 | .terraform 5 | kubeconfig* -------------------------------------------------------------------------------- /cicd/test/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: created-by-fleet 5 | -------------------------------------------------------------------------------- /kasten-k10-demo/example-workload/fleet.yaml: -------------------------------------------------------------------------------- 1 | dependsOn: 2 | - name: kasten-k10-demo-kasten-k10-demo-kasten-k10 -------------------------------------------------------------------------------- /kasten-k10-demo/example-workload/01-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: app 5 | labels: 6 | k10.kasten.io/appNamespace: app -------------------------------------------------------------------------------- /get_env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | $(terraform output -state=terraform-setup/terraform.tfstate -json all_node_ips | jq -r 'keys[] as $k | "export IP\($k)=\(.[$k])"') 4 | -------------------------------------------------------------------------------- /fleet-examples/navlink/navlink.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ui.cattle.io/v1 2 | kind: NavLink 3 | metadata: 4 | name: testtwo 5 | spec: 6 | target: _blank 7 | toURL: https://google.com 8 | -------------------------------------------------------------------------------- /storage/longhorn/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: longhorn-system 2 | helm: 3 | releaseName: longhorn 4 | chart: longhorn 5 | repo: https://charts.longhorn.io 6 | version: 1.1.1 7 | -------------------------------------------------------------------------------- /fleet-examples/longhorn/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: longhorn-system 2 | helm: 3 | releaseName: longhorn 4 | chart: longhorn 5 | repo: https://charts.longhorn.io 6 | version: 1.4.1 7 | -------------------------------------------------------------------------------- /kasten-k10-demo/longhorn/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: longhorn-system 2 | helm: 3 | releaseName: longhorn 4 | chart: longhorn 5 | repo: https://charts.longhorn.io 6 | version: 1.4.1 7 | -------------------------------------------------------------------------------- /fleet-examples-prepared/logging/fleet.yaml: -------------------------------------------------------------------------------- 1 | 2 | defaultNamespace: rancher-logging 3 | helm: 4 | repo: https://charts.rancher.io 5 | chart: rancher-logging 6 | releaseName: rancher-logging 7 | -------------------------------------------------------------------------------- /run_on.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | NODE_ID=$1 4 | CMD="${@:2}" 5 | 6 | source get_env.sh 7 | 8 | var="IP${NODE_ID}" 9 | NODE_IP=${!var} 10 | 11 | ssh ec2-user@"${NODE_IP}" ${CMD} 12 | -------------------------------------------------------------------------------- /fleet-examples-prepared/logging-crd/fleet.yaml: -------------------------------------------------------------------------------- 1 | 2 | defaultNamespace: rancher-logging 3 | helm: 4 | releaseName: rancher-logging-crd 5 | repo: https://charts.rancher.io 6 | chart: rancher-logging-crd -------------------------------------------------------------------------------- /fleet-examples-prepared/metal-lb-config/metal-lb-config/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: metal-lb-config 3 | description: metal-lb-config 4 | type: application 5 | version: 0.1.0 6 | appVersion: "0.1.0" 7 | -------------------------------------------------------------------------------- /fleet-examples-prepared/metal-lb-config/metal-lb-config/values.yaml: -------------------------------------------------------------------------------- 1 | addressPool: "" 2 | addressPoolId: "" 3 | 4 | potentialPools: 5 | pool-1: "192.168.1.240-192.168.1.250" 6 | pool-2: "192.168.2.240-192.168.2.250" -------------------------------------------------------------------------------- /fleet-examples-prepared/monitoring-crd/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: cattle-monitoring-system 2 | helm: 3 | releaseName: rancher-monitoring-crd 4 | chart: rancher-monitoring-crd 5 | repo: https://charts.rancher.io 6 | -------------------------------------------------------------------------------- /fleet-examples/hello-world-with-overlay/fleet.yaml: -------------------------------------------------------------------------------- 1 | targetCustomizations: 2 | - name: west 3 | yaml: 4 | overlays: 5 | - green-color 6 | clusterSelector: 7 | matchLabels: 8 | location: west -------------------------------------------------------------------------------- /fleet-examples-prepared/rancher-demo/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: rancher-demo 2 | 3 | helm: 4 | chart: rancher-demo 5 | repo: https://bashofmann.github.io/demo-charts 6 | releaseName: rancher-demo 7 | version: 2.0.0 8 | -------------------------------------------------------------------------------- /fleet-examples-prepared/hello-world-with-overlay/fleet.yaml: -------------------------------------------------------------------------------- 1 | targetCustomizations: 2 | - name: west 3 | yaml: 4 | overlays: 5 | - green-color 6 | clusterSelector: 7 | matchLabels: 8 | location: west -------------------------------------------------------------------------------- /fleet-examples/hello-world-with-overlay/overlays/green-color/deployment_patch.yaml: -------------------------------------------------------------------------------- 1 | spec: 2 | template: 3 | spec: 4 | containers: 5 | - name: hello-world 6 | env: 7 | - name: COW_COLOR 8 | value: green -------------------------------------------------------------------------------- /fleet-examples-prepared/hello-world-with-overlay/overlays/green-color/deployment_patch.yaml: -------------------------------------------------------------------------------- 1 | spec: 2 | template: 3 | spec: 4 | containers: 5 | - name: hello-world 6 | env: 7 | - name: COW_COLOR 8 | value: green -------------------------------------------------------------------------------- /cicd/rancher-demo/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: rancher-demo 2 | 3 | helm: 4 | chart: rancher-demo 5 | repo: https://bashofmann.github.io/demo-charts 6 | releaseName: rancher-demo 7 | version: 2.5.1 8 | values: 9 | app: 10 | title: Demo 11 | -------------------------------------------------------------------------------- /kasten-k10-demo/kasten-k10/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: k10 2 | 3 | helm: 4 | chart: k10 5 | repo: https://partner-charts.rancher.io/ 6 | releaseName: k10 7 | version: 5.5.801 8 | 9 | dependsOn: 10 | - name: kasten-k10-demo-kasten-k10-demo-longhorn 11 | -------------------------------------------------------------------------------- /fleet-examples/tetris/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: default 2 | 3 | helm: 4 | chart: tetris 5 | repo: https://rancher.github.io/rodeo 6 | version: 0.1.9 7 | releaseName: tetris 8 | values: 9 | service: 10 | type: NodePort 11 | ingress: 12 | enabled: false -------------------------------------------------------------------------------- /storage/nginx/pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-www 5 | labels: 6 | app: nginx 7 | spec: 8 | storageClassName: longhorn 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 10Mi -------------------------------------------------------------------------------- /kasten-k10-demo/navlink/navlink.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ui.cattle.io/v1 2 | kind: NavLink 3 | metadata: 4 | name: kasten-k10 5 | spec: 6 | label: Kasten K10 7 | target: _blank 8 | toService: 9 | name: gateway 10 | namespace: k10 11 | path: k10/ 12 | port: "8000" 13 | scheme: http -------------------------------------------------------------------------------- /psp/constraints/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sPSPPrivilegedContainer 3 | metadata: 4 | name: psp-privileged-container 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Pod"] 10 | excludedNamespaces: ["kube-system"] -------------------------------------------------------------------------------- /depends/sub/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-world-sub 5 | namespace: default 6 | spec: 7 | type: NodePort 8 | selector: 9 | app: hello-world-sub 10 | ports: 11 | - name: web 12 | port: 80 13 | targetPort: web 14 | nodePort: 30002 -------------------------------------------------------------------------------- /depends/main/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-world-main 5 | namespace: default 6 | spec: 7 | type: NodePort 8 | selector: 9 | app: hello-world-main 10 | ports: 11 | - name: web 12 | port: 80 13 | targetPort: web 14 | nodePort: 30001 -------------------------------------------------------------------------------- /susexchange/rancher-demo/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-world 5 | namespace: default 6 | spec: 7 | type: NodePort 8 | selector: 9 | app: hello-world 10 | ports: 11 | - name: web 12 | port: 80 13 | targetPort: web 14 | nodePort: 30001 -------------------------------------------------------------------------------- /fleet-examples-prepared/beta-ingress/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: example-ingress 5 | spec: 6 | rules: 7 | - http: 8 | paths: 9 | - path: /bar 10 | backend: 11 | serviceName: bar-service 12 | servicePort: 5678 -------------------------------------------------------------------------------- /fleet-examples-prepared/hello-world/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-world 5 | namespace: default 6 | spec: 7 | type: NodePort 8 | selector: 9 | app: hello-world 10 | ports: 11 | - name: web 12 | port: 80 13 | targetPort: web 14 | nodePort: 30001 -------------------------------------------------------------------------------- /valuesfrom/secret/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: default 2 | helm: 3 | releaseName: rancher-demo-secret 4 | repo: https://rancher.github.io/rodeo 5 | chart: rancher-demo 6 | valuesFrom: 7 | - secretKeyRef: 8 | name: rancher-demo-secret-values 9 | namespace: default 10 | key: values.yaml -------------------------------------------------------------------------------- /fleet-examples/hello-world-with-overlay/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-world 5 | namespace: default 6 | spec: 7 | type: NodePort 8 | selector: 9 | app: hello-world 10 | ports: 11 | - name: web 12 | port: 80 13 | targetPort: web 14 | nodePort: 30001 -------------------------------------------------------------------------------- /storage/nginx/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | type: NodePort 9 | ports: 10 | - port: 80 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | nodePort: 30888 15 | selector: 16 | app: nginx -------------------------------------------------------------------------------- /valuesfrom/configmap/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: default 2 | helm: 3 | releaseName: rancher-demo-configmap 4 | repo: https://rancher.github.io/rodeo 5 | chart: rancher-demo 6 | valuesFrom: 7 | - configMapKeyRef: 8 | name: rancher-demo-configmap-values 9 | namespace: default 10 | key: values.yaml -------------------------------------------------------------------------------- /fleet-examples-prepared/hello-world-with-overlay/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-world 5 | namespace: default 6 | spec: 7 | type: NodePort 8 | selector: 9 | app: hello-world 10 | ports: 11 | - name: web 12 | port: 80 13 | targetPort: web 14 | nodePort: 30001 -------------------------------------------------------------------------------- /valuesfrom/configs/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: rancher-demo-secret-values 5 | namespace: default 6 | stringData: 7 | values.yaml: | 8 | replicaCount: 3 9 | ingress: 10 | enabled: false 11 | service: 12 | type: NodePort 13 | app: 14 | localization: 15 | title: SECRET -------------------------------------------------------------------------------- /fleet-examples-prepared/prom-example-app/manifests/podmonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PodMonitor 3 | metadata: 4 | name: example-app-pod-monitor 5 | namespace: example 6 | labels: 7 | team: frontend 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: example-app 12 | podMetricsEndpoints: 13 | - port: web -------------------------------------------------------------------------------- /valuesfrom/configs/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: rancher-demo-configmap-values 5 | namespace: default 6 | data: 7 | values.yaml: | 8 | replicaCount: 2 9 | ingress: 10 | enabled: false 11 | service: 12 | type: NodePort 13 | app: 14 | localization: 15 | title: CONFIGMAP -------------------------------------------------------------------------------- /fleet-examples-prepared/prom-example-app/manifests/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: example-app-service-monitor 5 | namespace: example 6 | labels: 7 | team: frontend 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: example-app 12 | endpoints: 13 | - port: web -------------------------------------------------------------------------------- /kasten-k10-demo/example-workload/backup-policy.yaml: -------------------------------------------------------------------------------- 1 | kind: Policy 2 | apiVersion: config.kio.kasten.io/v1alpha1 3 | metadata: 4 | name: nginx-backup 5 | namespace: app 6 | spec: 7 | frequency: "@onDemand" 8 | selector: 9 | matchExpressions: 10 | - key: k10.kasten.io/appNamespace 11 | operator: In 12 | values: 13 | - app 14 | actions: 15 | - action: backup -------------------------------------------------------------------------------- /kasten-k10-demo/example-workload/nginx-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: nginx 6 | name: nginx 7 | namespace: app 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - image: nginx 19 | name: nginx -------------------------------------------------------------------------------- /fleet-examples-prepared/netdata/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: netdata 2 | 3 | helm: 4 | chart: github.com/netdata/helmchart/charts/netdata 5 | releaseName: netdata 6 | values: 7 | service: 8 | type: NodePort 9 | ingress: 10 | enabled: false 11 | 12 | targetCustomization: 13 | - name: arm 14 | clusterGroup: arm 15 | helm: 16 | values: 17 | service: 18 | type: ClusterIP -------------------------------------------------------------------------------- /fleet-examples-prepared/ingress-cluster-labels/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: rancher-demo-two 2 | 3 | helm: 4 | chart: rancher-demo 5 | repo: https://bashofmann.github.io/demo-charts 6 | releaseName: app-two 7 | version: 2.5.1 8 | values: 9 | clusterName: global.fleet.clusterLabels.management.cattle.io/cluster-display-name 10 | ingress: 11 | enabled: true 12 | host: app-two.{{ .Values.clusterName }}.plgrnd.be 13 | -------------------------------------------------------------------------------- /fleet-examples-prepared/metal-lb-config/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: metallb-system 2 | 3 | helm: 4 | chart: ./metal-lb-config 5 | releaseName: metal-lb-config 6 | values: 7 | # OPTION 1: the address pool is added as a label to the cluster 8 | adressPool: global.fleet.clusterLabels.adress-pool 9 | 10 | # OPTION 2: configure an address pool id as a label 11 | adressPoolId: global.fleet.clusterLabels.adress-pool-id 12 | -------------------------------------------------------------------------------- /fleet-examples-local/do-demo-cluster/nodeconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rke-machine-config.cattle.io/v1 2 | kind: DigitaloceanConfig 3 | metadata: 4 | name: do-demo-cluster-nodes 5 | namespace: fleet-default 6 | monitoring: false 7 | privateNetworking: false 8 | region: sfo3 9 | size: s-2vcpu-4gb 10 | sshKeyContents: null 11 | sshKeyFingerprint: null 12 | sshPort: "22" 13 | sshUser: root 14 | backups: false 15 | image: ubuntu-20-04-x64 16 | ipv6: false -------------------------------------------------------------------------------- /fleet-examples-prepared/cluster-label-policy/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sRequiredUniqueLabels 3 | metadata: 4 | name: all-must-unique-pool-id 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["provisioning.cattle.io"] 9 | kinds: ["Cluster"] 10 | parameters: 11 | message: "All clusters must have a unique, valid `pool-id` label" 12 | labels: 13 | - key: pool-id 14 | allowedRegex: "^pool-[0-9]+$" -------------------------------------------------------------------------------- /fleet-examples-prepared/upgrades/plan.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: upgrade.cattle.io/v1 2 | kind: Plan 3 | metadata: 4 | name: server-plan 5 | namespace: system-upgrade 6 | spec: 7 | concurrency: 1 8 | cordon: true 9 | nodeSelector: 10 | matchExpressions: 11 | - key: node-role.kubernetes.io/master 12 | operator: In 13 | values: 14 | - "true" 15 | serviceAccountName: system-upgrade 16 | upgrade: 17 | image: rancher/k3s-upgrade 18 | version: v1.21.5+k3s1 -------------------------------------------------------------------------------- /terraform-setup/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | digitalocean = { 7 | source = "digitalocean/digitalocean" 8 | } 9 | } 10 | required_version = ">= 0.13" 11 | } 12 | 13 | provider "aws" { 14 | access_key = var.aws_access_key 15 | secret_key = var.aws_secret_key 16 | region = var.aws_region 17 | } 18 | 19 | provider "digitalocean" { 20 | token = var.digitalocean_token 21 | } -------------------------------------------------------------------------------- /fleet-examples-prepared/metal-lb-config/metal-lb-config/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /storage/kubeview/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: kubeview 2 | 3 | helm: 4 | chart: kubeview 5 | repo: https://benc-uk.github.io/kubeview/charts 6 | version: 0.1.20 7 | releaseName: kubeview 8 | values: 9 | ingress: 10 | enabled: true 11 | hosts: 12 | - host: nip.io 13 | paths: 14 | - / 15 | diff: 16 | comparePatches: 17 | - apiVersion: extensions/v1beta1 18 | kind: Ingress 19 | name: kubeview 20 | namespace: kubeview 21 | operations: 22 | - {"op":"remove", "path":"/spec/rules"} 23 | -------------------------------------------------------------------------------- /fleet-examples-prepared/metal-lb-config/metal-lb-config/templates/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: {{ .Release.Namespace }} 5 | name: {{ include "metal-lb-config.name" . }} 6 | data: 7 | config: | 8 | address-pools: 9 | - name: default 10 | protocol: layer2 11 | addresses: 12 | {{- if .Values.addressPool }} 13 | - {{ .Values.addressPool }} 14 | {{- end }} 15 | {{- if .Values.addressPoolId }} 16 | - {{ index .Values.potentialPools .Values.addressPoolId }} 17 | {{- end }} -------------------------------------------------------------------------------- /terraform-setup/output.tf: -------------------------------------------------------------------------------- 1 | output "x86_node_ips" { 2 | value = aws_instance.x86_vms.*.public_ip 3 | } 4 | output "arm_node_ips" { 5 | value = aws_instance.arm_vms.*.public_ip 6 | } 7 | output "rancher_domain" { 8 | value = digitalocean_record.rancher.fqdn 9 | } 10 | output "rancher_cluster_ips" { 11 | value = [ 12 | aws_instance.x86_vms.0.public_ip, 13 | aws_instance.x86_vms.1.public_ip, 14 | aws_instance.x86_vms.2.public_ip, 15 | ] 16 | } 17 | output "all_node_ips" { 18 | value = concat( 19 | aws_instance.x86_vms.*.public_ip, 20 | aws_instance.arm_vms.*.public_ip, 21 | ) 22 | } -------------------------------------------------------------------------------- /storage/nginx/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:latest 20 | ports: 21 | - containerPort: 80 22 | name: http 23 | protocol: TCP 24 | volumeMounts: 25 | - mountPath: /usr/share/nginx/html 26 | name: nginx-www 27 | volumes: 28 | - name: nginx-www 29 | persistentVolumeClaim: 30 | claimName: nginx-www 31 | -------------------------------------------------------------------------------- /terraform-setup/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "sles_x86" { 2 | owners = ["013907871322"] 3 | most_recent = true 4 | 5 | filter { 6 | name = "name" 7 | values = ["suse-sles-15-sp2*"] 8 | } 9 | 10 | filter { 11 | name = "architecture" 12 | values = ["x86_64"] 13 | } 14 | 15 | filter { 16 | name = "root-device-type" 17 | values = ["ebs"] 18 | } 19 | } 20 | 21 | data "aws_ami" "sles_arm" { 22 | owners = ["013907871322"] 23 | most_recent = true 24 | 25 | filter { 26 | name = "name" 27 | values = ["suse-sles-15-sp2*"] 28 | } 29 | 30 | filter { 31 | name = "architecture" 32 | values = ["arm64"] 33 | } 34 | 35 | filter { 36 | name = "root-device-type" 37 | values = ["ebs"] 38 | } 39 | } -------------------------------------------------------------------------------- /depends/sub/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-world-sub 5 | namespace: default 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: hello-world-sub 11 | template: 12 | metadata: 13 | labels: 14 | app: hello-world-sub 15 | spec: 16 | containers: 17 | - name: hello-world-sub 18 | image: bashofmann/rancher-demo:1.0.0 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 8080 22 | name: web 23 | protocol: TCP 24 | env: 25 | - name: COW_COLOR 26 | value: purple 27 | readinessProbe: 28 | httpGet: 29 | port: web 30 | path: / -------------------------------------------------------------------------------- /depends/main/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-world-main 5 | namespace: default 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: hello-world-main 11 | template: 12 | metadata: 13 | labels: 14 | app: hello-world-main 15 | spec: 16 | containers: 17 | - name: hello-world-main 18 | image: bashofmann/rancher-demo:1.0.0 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 8080 22 | name: web 23 | protocol: TCP 24 | env: 25 | - name: COW_COLOR 26 | value: purple 27 | readinessProbe: 28 | httpGet: 29 | port: web 30 | path: / -------------------------------------------------------------------------------- /susexchange/rancher-demo/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-world 5 | namespace: default 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: hello-world 11 | template: 12 | metadata: 13 | labels: 14 | app: hello-world 15 | spec: 16 | containers: 17 | - name: hello-world 18 | image: bashofmann/rancher-demo:1.0.0 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 8080 22 | name: web 23 | protocol: TCP 24 | env: 25 | - name: COW_COLOR 26 | value: blue 27 | readinessProbe: 28 | httpGet: 29 | port: web 30 | path: / 31 | -------------------------------------------------------------------------------- /fleet-examples-prepared/hello-world/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-world 5 | namespace: default 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: hello-world 11 | template: 12 | metadata: 13 | labels: 14 | app: hello-world 15 | spec: 16 | containers: 17 | - name: hello-world 18 | image: bashofmann/rancher-demo:1.0.0 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 8080 22 | name: web 23 | protocol: TCP 24 | env: 25 | - name: COW_COLOR 26 | value: purple 27 | readinessProbe: 28 | httpGet: 29 | port: web 30 | path: / -------------------------------------------------------------------------------- /fleet-examples-prepared/prom-example-app/manifests/workload.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: example-app 5 | namespace: example 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: example-app 11 | template: 12 | metadata: 13 | labels: 14 | app: example-app 15 | spec: 16 | containers: 17 | - name: example-app 18 | image: fabxc/instrumented_app 19 | ports: 20 | - name: web 21 | containerPort: 8080 22 | --- 23 | kind: Service 24 | apiVersion: v1 25 | metadata: 26 | name: example-app 27 | namespace: example 28 | labels: 29 | app: example-app 30 | spec: 31 | selector: 32 | app: example-app 33 | ports: 34 | - name: web 35 | port: 8080 36 | 37 | -------------------------------------------------------------------------------- /fleet-examples/hello-world-with-overlay/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-world 5 | namespace: default 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: hello-world 11 | template: 12 | metadata: 13 | labels: 14 | app: hello-world 15 | spec: 16 | containers: 17 | - name: hello-world 18 | image: bashofmann/rancher-demo:1.0.0 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 8080 22 | name: web 23 | protocol: TCP 24 | env: 25 | - name: COW_COLOR 26 | value: blue 27 | readinessProbe: 28 | httpGet: 29 | port: web 30 | path: / 31 | -------------------------------------------------------------------------------- /fleet-examples-prepared/hello-world-with-overlay/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-world 5 | namespace: default 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: hello-world 11 | template: 12 | metadata: 13 | labels: 14 | app: hello-world 15 | spec: 16 | containers: 17 | - name: hello-world 18 | image: bashofmann/rancher-demo:1.0.0 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 8080 22 | name: web 23 | protocol: TCP 24 | env: 25 | - name: COW_COLOR 26 | value: purple 27 | readinessProbe: 28 | httpGet: 29 | port: web 30 | path: / -------------------------------------------------------------------------------- /fleet-examples-prepared/demo-app/manifests/logging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: logging.banzaicloud.io/v1beta1 2 | kind: Output 3 | metadata: 4 | name: loki-output 5 | spec: 6 | loki: 7 | url: http://loki.loki.svc:3100 8 | configure_kubernetes_labels: true 9 | buffer: 10 | timekey: 5s 11 | timekey_wait: 2s 12 | timekey_use_utc: true 13 | --- 14 | apiVersion: logging.banzaicloud.io/v1beta1 15 | kind: Flow 16 | metadata: 17 | name: loki-flow 18 | spec: 19 | filters: 20 | - tag_normaliser: {} 21 | - parser: 22 | remove_key_name_field: true 23 | reserve_data: true 24 | parse: 25 | type: none 26 | match: 27 | - select: 28 | labels: 29 | app.kubernetes.io/name: loki-example-app 30 | localOutputRefs: 31 | - loki-output 32 | -------------------------------------------------------------------------------- /fleet-examples-prepared/edge-monitoring/edge-monitoring.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | --- 6 | apiVersion: helm.cattle.io/v1 7 | kind: HelmChart 8 | metadata: 9 | name: node-exporter 10 | namespace: kube-system 11 | spec: 12 | chart: prometheus-node-exporter 13 | repo: https://prometheus-community.github.io/helm-charts 14 | targetNamespace: monitoring 15 | valuesContent: |- 16 | service: 17 | type: NodePort 18 | nodePort: 30091 19 | --- 20 | apiVersion: helm.cattle.io/v1 21 | kind: HelmChart 22 | metadata: 23 | name: kube-state-metrics 24 | namespace: kube-system 25 | spec: 26 | chart: kube-state-metrics 27 | repo: https://prometheus-community.github.io/helm-charts 28 | targetNamespace: monitoring 29 | valuesContent: |- 30 | service: 31 | type: NodePort 32 | nodePort: 30080 -------------------------------------------------------------------------------- /fleet-examples-local/do-demo-cluster/cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: provisioning.cattle.io/v1 2 | kind: Cluster 3 | metadata: 4 | name: do-demo-cluster 5 | namespace: fleet-default 6 | spec: 7 | cloudCredentialSecretName: cattle-global-data:cc-2s57m 8 | kubernetesVersion: 1.21.8+rke2r2 9 | rkeConfig: 10 | machinePools: 11 | - controlPlaneRole: true 12 | etcdRole: true 13 | workerRole: true 14 | quantity: 3 15 | name: do-demo-cluster-nodes 16 | machineConfigRef: 17 | kind: DigitaloceanConfig 18 | name: do-demo-cluster-nodes 19 | paused: false 20 | controlPlaneConfig: 21 | cni: calico 22 | upgradeStrategy: 23 | controlPlaneDrainOptions: 24 | enabled: false 25 | workerDrainOptions: 26 | enabled: false 27 | workerConcurrency: "10%" 28 | controlPlaneConcurrency: "10%" -------------------------------------------------------------------------------- /terraform-setup/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_access_key" { 2 | type = string 3 | description = "AWS access key used to create infrastructure" 4 | } 5 | variable "aws_secret_key" { 6 | type = string 7 | description = "AWS secret key used to create AWS infrastructure" 8 | } 9 | variable "aws_region" { 10 | type = string 11 | description = "AWS region used for all resources" 12 | default = "eu-central-1" 13 | } 14 | variable "digitalocean_token" { 15 | type = string 16 | description = "API token for DigitalOcean" 17 | } 18 | variable "ssh_key_file_name" { 19 | type = string 20 | description = "File path and name of SSH private key used for infrastructure and RKE" 21 | default = "~/.ssh/id_rsa" 22 | } 23 | variable "prefix" { 24 | type = string 25 | description = "Prefix added to names of all resources" 26 | default = "bhofmann" 27 | } -------------------------------------------------------------------------------- /fleet-examples-prepared/loki/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: loki 2 | helm: 3 | releaseName: loki 4 | repo: https://kubernetes-charts.banzaicloud.com 5 | chart: loki 6 | diff: 7 | comparePatches: 8 | - apiVersion: policy/v1beta1 9 | kind: PodSecurityPolicy 10 | name: loki 11 | operations: 12 | - {"op":"remove", "path":"/spec/hostIPC"} 13 | - {"op":"remove", "path":"/spec/hostNetwork"} 14 | - {"op":"remove", "path":"/spec/hostPID"} 15 | - {"op":"remove", "path":"/spec/privileged"} 16 | - apiVersion: apps/v1 17 | kind: StatefulSet 18 | name: loki 19 | namespace: loki 20 | operations: 21 | - {"op":"remove", "path":"/spec/template/spec/containers/0/volumeMounts/1/subPath"} 22 | - {"op":"remove", "path":"/spec/template/spec/containers/0/env"} 23 | - {"op":"remove", "path":"/spec/template/spec/nodeSelector"} 24 | - {"op":"remove", "path":"/spec/template/spec/tolerations"} 25 | -------------------------------------------------------------------------------- /fleet-examples-prepared/ingress-target-customization/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: rancher-demo 2 | 3 | helm: 4 | chart: rancher-demo 5 | repo: https://bashofmann.github.io/demo-charts 6 | releaseName: app-one 7 | version: 2.5.1 8 | values: 9 | ingress: 10 | enabled: true 11 | 12 | targetCustomizations: 13 | - name: bhofmann-fleet-0 14 | helm: 15 | values: 16 | ingress: 17 | host: app-one.bhofmann-fleet-0.plgrnd.be 18 | clusterSelector: 19 | matchLabels: 20 | management.cattle.io/cluster-display-name: bhofmann-fleet-0 21 | - name: bhofmann-fleet-1 22 | helm: 23 | values: 24 | ingress: 25 | host: app-one.bhofmann-fleet-1.plgrnd.be 26 | clusterSelector: 27 | matchLabels: 28 | management.cattle.io/cluster-display-name: bhofmann-fleet-1 29 | - name: bhofmann-fleet-2 30 | helm: 31 | values: 32 | ingress: 33 | host: app-one.bhofmann-fleet-2.plgrnd.be 34 | clusterSelector: 35 | matchLabels: 36 | management.cattle.io/cluster-display-name: bhofmann-fleet-2 -------------------------------------------------------------------------------- /fleet-examples-prepared/edgme-monitoring-servicemonitor/fedservicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: external-prometheus 5 | namespace: cattle-monitoring-system 6 | spec: 7 | endpoints: 8 | - interval: 1m 9 | honorLabels: true 10 | scrapeTimeout: 55s 11 | path: "/federate" 12 | port: prometheus-nodeport 13 | namespaceSelector: 14 | matchNames: 15 | - cattle-monitoring-system 16 | selector: 17 | matchLabels: 18 | app: external-prometheus 19 | --- 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: cluster-one 24 | labels: 25 | app: external-prometheus 26 | namespace: cattle-monitoring-system 27 | spec: 28 | type: ExternalName 29 | externalName: 18.156.119.236 30 | clusterIP: "" 31 | ports: 32 | - port: 3000 33 | targetPort: 3000 34 | name: prometheus-nodeport 35 | protocol: TCP 36 | --- 37 | apiVersion: v1 38 | kind: Endpoints 39 | metadata: 40 | name: cluster-one 41 | labels: 42 | app: external-prometheus 43 | namespace: cattle-monitoring-system 44 | subsets: 45 | - addresses: 46 | - ip: 18.156.119.236 47 | ports: 48 | - port: 3000 49 | name: prometheus-nodeport 50 | protocol: TCP -------------------------------------------------------------------------------- /terraform-setup/lb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_elb" "rancher-server-lb" { 2 | name = "${var.prefix}-rancher-server-lb" 3 | availability_zones = aws_instance.x86_vms[*].availability_zone 4 | 5 | listener { 6 | instance_port = 80 7 | instance_protocol = "tcp" 8 | lb_port = 80 9 | lb_protocol = "tcp" 10 | } 11 | 12 | listener { 13 | instance_port = 443 14 | instance_protocol = "tcp" 15 | lb_port = 443 16 | lb_protocol = "tcp" 17 | } 18 | 19 | health_check { 20 | healthy_threshold = 2 21 | unhealthy_threshold = 2 22 | timeout = 3 23 | target = "TCP:80" 24 | interval = 30 25 | } 26 | 27 | instances = [ 28 | aws_instance.x86_vms[0].id, 29 | aws_instance.x86_vms[1].id, 30 | aws_instance.x86_vms[2].id 31 | ] 32 | cross_zone_load_balancing = true 33 | idle_timeout = 400 34 | connection_draining = true 35 | connection_draining_timeout = 400 36 | 37 | tags = { 38 | Name = "${var.prefix}-rancher-server-lb" 39 | } 40 | } 41 | 42 | data "digitalocean_domain" "rancher" { 43 | name = "plgrnd.be" 44 | } 45 | 46 | resource "digitalocean_record" "rancher" { 47 | domain = data.digitalocean_domain.rancher.name 48 | type = "CNAME" 49 | name = "rancher-demo" 50 | value = "${aws_elb.rancher-server-lb.dns_name}." 51 | ttl = 60 52 | } 53 | -------------------------------------------------------------------------------- /terraform-setup/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "ssh_key_pair" { 2 | key_name_prefix = "${var.prefix}-rancher-k3s-fleet-" 3 | public_key = file("${var.ssh_key_file_name}.pub") 4 | } 5 | 6 | # Security group to allow all traffic 7 | resource "aws_security_group" "sg_allowall" { 8 | name = "${var.prefix}-rancher-k3s-fleet-allowall" 9 | 10 | ingress { 11 | from_port = "0" 12 | to_port = "0" 13 | protocol = "-1" 14 | cidr_blocks = ["0.0.0.0/0"] 15 | } 16 | 17 | egress { 18 | from_port = "0" 19 | to_port = "0" 20 | protocol = "-1" 21 | cidr_blocks = ["0.0.0.0/0"] 22 | } 23 | } 24 | 25 | resource "aws_instance" "x86_vms" { 26 | count = 5 27 | ami = data.aws_ami.sles_x86.id 28 | instance_type = "t3a.xlarge" 29 | 30 | key_name = aws_key_pair.ssh_key_pair.key_name 31 | security_groups = [aws_security_group.sg_allowall.name] 32 | 33 | root_block_device { 34 | volume_size = 80 35 | } 36 | 37 | tags = { 38 | Name = "${var.prefix}-rancher-k3s-fleet-x86" 39 | } 40 | } 41 | 42 | resource "aws_instance" "arm_vms" { 43 | count = 1 44 | ami = data.aws_ami.sles_arm.id 45 | instance_type = "a1.medium" 46 | 47 | key_name = aws_key_pair.ssh_key_pair.key_name 48 | security_groups = [aws_security_group.sg_allowall.name] 49 | 50 | root_block_device { 51 | volume_size = 80 52 | } 53 | 54 | tags = { 55 | Name = "${var.prefix}-rancher-k3s-fleet-arm" 56 | } 57 | } -------------------------------------------------------------------------------- /psp/templates/template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8spspprivilegedcontainer 5 | annotations: 6 | description: >- 7 | Controls the ability of any container to enable privileged mode. 8 | Corresponds to the `privileged` field in a PodSecurityPolicy. For more 9 | information, see 10 | https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privileged 11 | spec: 12 | crd: 13 | spec: 14 | names: 15 | kind: K8sPSPPrivilegedContainer 16 | validation: 17 | openAPIV3Schema: 18 | description: >- 19 | Controls the ability of any container to enable privileged mode. 20 | Corresponds to the `privileged` field in a PodSecurityPolicy. For more 21 | information, see 22 | https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privileged 23 | targets: 24 | - target: admission.k8s.gatekeeper.sh 25 | rego: | 26 | package k8spspprivileged 27 | violation[{"msg": msg, "details": {}}] { 28 | c := input_containers[_] 29 | c.securityContext.privileged 30 | msg := sprintf("Privileged container is not allowed: %v, securityContext: %v", [c.name, c.securityContext]) 31 | } 32 | input_containers[c] { 33 | c := input.review.object.spec.containers[_] 34 | } 35 | input_containers[c] { 36 | c := input.review.object.spec.initContainers[_] 37 | } -------------------------------------------------------------------------------- /fleet-examples-prepared/demo-app/manifests/workload.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: loki-example-app 6 | name: loki-example-app 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app.kubernetes.io/name: loki-example-app 12 | template: 13 | metadata: 14 | labels: 15 | app.kubernetes.io/name: loki-example-app 16 | spec: 17 | containers: 18 | - name: loki-example-app 19 | image: chrisurwin/rancher-demo 20 | ports: 21 | - name: http 22 | containerPort: 8080 23 | protocol: TCP 24 | livenessProbe: 25 | httpGet: 26 | path: / 27 | port: http 28 | readinessProbe: 29 | httpGet: 30 | path: / 31 | port: http 32 | --- 33 | apiVersion: v1 34 | kind: Service 35 | metadata: 36 | name: loki-example-app 37 | labels: 38 | app.kubernetes.io/name: loki-example-app 39 | spec: 40 | selector: 41 | app.kubernetes.io/name: loki-example-app 42 | ports: 43 | - protocol: TCP 44 | port: 8080 45 | targetPort: 8080 46 | name: http 47 | --- 48 | apiVersion: networking.k8s.io/v1beta1 49 | kind: Ingress 50 | metadata: 51 | name: loki-example-app 52 | spec: 53 | rules: 54 | - host: xip.io 55 | http: 56 | paths: 57 | - path: / 58 | pathType: ImplementationSpecific 59 | backend: 60 | serviceName: loki-example-app 61 | servicePort: 8080 -------------------------------------------------------------------------------- /fleet-examples-prepared/monitoring/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: cattle-monitoring-system 2 | helm: 3 | releaseName: rancher-monitoring 4 | repo: https://charts.rancher.io 5 | chart: rancher-monitoring 6 | diff: 7 | comparePatches: 8 | - apiVersion: admissionregistration.k8s.io/v1beta1 9 | kind: MutatingWebhookConfiguration 10 | name: rancher-monitoring-admission 11 | operations: 12 | - {"op":"remove", "path":"/webhooks/0/failurePolicy"} 13 | - {"op":"remove", "path":"/webhooks/0/rules/0/scope"} 14 | - {"op":"remove", "path":"/admission-prometheusrules/mutate"} 15 | - apiVersion: admissionregistration.k8s.io/v1beta1 16 | kind: ValidatingWebhookConfiguration 17 | name: rancher-monitoring-admission 18 | operations: 19 | - {"op":"remove", "path":"/webhooks/0/failurePolicy"} 20 | - {"op":"remove", "path":"/webhooks/0/rules/0/scope"} 21 | - {"op":"remove", "path":"/admission-prometheusrules/validate"} 22 | - apiVersion: policy/v1beta1 23 | kind: PodSecurityPolicy 24 | operations: 25 | - {"op":"remove", "path":"/spec/hostIPC"} 26 | - {"op":"remove", "path":"/spec/hostNetwork"} 27 | - {"op":"remove", "path":"/spec/hostPID"} 28 | - {"op":"remove", "path":"/spec/privileged"} 29 | - {"op":"remove", "path":"/spec/readOnlyRootFilesystem"} 30 | - apiVersion: apps/v1 31 | kind: Deployment 32 | name: rancher-monitoring-grafana 33 | namespace: cattle-monitoring-system 34 | operations: 35 | - {"op":"remove", "path":"/spec/template/spec/containers/0/env/0/value"} 36 | - apiVersion: apps/v1 37 | kind: Deployment 38 | operations: 39 | - {"op":"remove", "path":"/spec/template/spec/hostNetwork"} 40 | - {"op":"remove", "path":"/spec/template/spec/nodeSelector"} 41 | - {"op":"remove", "path":"/spec/template/spec/priorityClassName"} 42 | - {"op":"remove", "path":"/spec/template/spec/tolerations"} 43 | - apiVersion: v1 44 | kind: ServiceAccount 45 | operations: 46 | - {"op":"remove", "path":"/imagePullSecrets"} 47 | -------------------------------------------------------------------------------- /fleet-examples-prepared/metal-lb-config/metal-lb-config/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "metal-lb-config.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "metal-lb-config.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "metal-lb-config.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "metal-lb-config.labels" -}} 37 | helm.sh/chart: {{ include "metal-lb-config.chart" . }} 38 | {{ include "metal-lb-config.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "metal-lb-config.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "metal-lb-config.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "metal-lb-config.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "metal-lb-config.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /terraform-setup/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/digitalocean/digitalocean" { 5 | version = "2.19.0" 6 | hashes = [ 7 | "h1:XcLoKA2a1MR4foSFeFbJgCADRvsXtN9SyIiJX0fSGys=", 8 | "zh:01cee85343dee2dfc01499e19ef4e56b0c9260eee0a47329231cf500c07b4386", 9 | "zh:099eeddf9baf9c282430231da501a8b96b3fb28507ce3b78e3a185cc9d4b3860", 10 | "zh:191e090e8553355d91842163737d71051aeb499c8ddb23d2e8aae9dab2f8a1a5", 11 | "zh:25356abb47769270730b0ddb0a3eb89aec637395cdcb77c309d23e55839e4461", 12 | "zh:28876afb75ba5367d20e508e05c7657f90922142ff80d8a81a4d68b3381adb86", 13 | "zh:404a304e37c3dec8017318b16ab701553e5242dc2460211346a9dd39242709a6", 14 | "zh:40f53111b01fc78fdc7a6ba47a80d51c9a45e77e5b7d7d5bcae3a0c6f58ffbdf", 15 | "zh:48f212068234df3dcfe5544c96b10403b15a190203742756d7d0573ee0857c17", 16 | "zh:5189fe4fffdbff5c280f6741f55b2de9cb2b8c653cda0b2339c28cd1e3bc7884", 17 | "zh:a7d5840ca789a03a285c67d2838af4d8687c99f3e8fac4ce56fcd23802a66156", 18 | "zh:c0bd3c4555e5d7e6c96d3add3ddd8e41aa0df9e4a4518ad3b7f1d726a4e0a9f4", 19 | "zh:d70a903a6d75533aa4713e255c9c967ec453195f2209439981f015f203805a6e", 20 | "zh:db8110736bd47f99213d72309ebb720718a80b15ddd46e34a8ee9b2125903079", 21 | "zh:e2180f334506601e0a6af8863159cc719ce584fdb23bd45ddc120f33d22cec19", 22 | "zh:eb515a24d231e7f1ef344b9b88fa2071f760ec34fbb47d80bbacdf7e35f3daca", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/hashicorp/aws" { 27 | version = "4.8.0" 28 | hashes = [ 29 | "h1:W2cPGKmqkPbTc91lu42QeC3RFBqB5TnRnS3IxNME2FM=", 30 | "zh:16cbdbc03ad13358d12433e645e2ab5a615e3a3662a74e3c317267c9377713d8", 31 | "zh:1d813c5e6c21fe370652495e29f783db4e65037f913ff0d53d28515c36fbb70a", 32 | "zh:31ad8282e31d0fac62e96fc2321a68ad4b92ab90f560be5f875d1b01a493e491", 33 | "zh:5099a9e699784cabb5686d2cb52ca910f9c697e977c654ecedd196e838387623", 34 | "zh:5758cbb813091db8573f27bba37c48f63ba95f2104f3bc49f13131e3c305b848", 35 | "zh:67ea77fb00bf0a09e712f5259a7acb494ce503a34809b7919996744fd92e3312", 36 | "zh:72c87be5d1f7917d4281c14a3335a9ec3cd57bf63d95a440faa7035248083dcd", 37 | "zh:79005154b9f5eccc1580e0eb803f0dfee68ba856703ef6489719cb014a3c2b18", 38 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 39 | "zh:d27f9a8b5b30883a3e45f77506391524df0c66a76c3bc71f7236c3fc81d0597d", 40 | "zh:e2985563dc652cf9b10420bc62f0a710308ef5c31e46b94c8ea10b8f27fa1ef3", 41 | "zh:f11bb34ee0dad4bc865db51e7e299a4f030c5e9f6b6080d611797cc99deeb40a", 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Code examples for Talk about Multi-Cluster Kubernetes Management with Rancher, K3S and Fleet 2 | 3 | ## Installation steps 4 | 5 | 1. Setup infrastructure 6 | 7 | Fill out `terraform-setup/terraform.tfvars` with aws and digital ocean credentials. 8 | 9 | ``` 10 | make step_01 11 | ``` 12 | 13 | 2. Create 3 Node HA Cluster for Rancher 14 | 15 | ``` 16 | make step_02 17 | ``` 18 | 19 | 3. Install cert-manager and Rancher 20 | 21 | ``` 22 | make step_03 23 | ``` 24 | 25 | 4. Create 3 single-node downstream k3s clusters 26 | 27 | ``` 28 | make step_04 29 | ``` 30 | 31 | ### Configure Rancher 32 | 33 | Go to https://rancher-demo.plgrnd.be/login and set up admin password and server url. 34 | 35 | ### Add 3 downstream clusters to Rancher 36 | 37 | "Add Cluster" -> "Register an existing Kubernetes cluster" => "Other Cluster" 38 | 39 | Add "group" label with values "amd" and "arm". 40 | 41 | To register every cluster run 42 | 43 | ``` 44 | ./run_on.sh [3-5] kubectl apply .... 45 | ``` 46 | 47 | Wait until all clusters are read. 48 | 49 | ### Configure Fleet 50 | 51 | Go to "Cluster Explorer" of local cluster -> "Continuous Delivery" 52 | 53 | Add arm and amd Cluster Groups matching the cluster labels from above in "fleet-default". 54 | 55 | ## Use fleet 56 | 57 | Get Download Kubeconfig from all clusters. 58 | 59 | Commands to watch clusters 60 | 61 | ``` 62 | watch kubectl --kubeconfig kubeconfig_cluster_one --insecure-skip-tls-verify get nodes,pods -A 63 | watch kubectl --kubeconfig kubeconfig_cluster_two --insecure-skip-tls-verify get nodes,pods -A 64 | watch kubectl --kubeconfig kubeconfig_cluster_three --insecure-skip-tls-verify get nodes,pods -A 65 | ``` 66 | 67 | ### Upgrade all clusters 68 | 69 | Add Git repo to deploy system-upgrade-controller 70 | Repo: https://github.com/rancher/system-upgrade-controller 71 | Path: manifests 72 | All clusters. 73 | 74 | Add Git Repo to deploy upgrade Plan 75 | Repo: https://github.com/bashofmann/rancher-k3s-fleet-examples 76 | Path: fleet-upgrades 77 | Only amd clusters 78 | 79 | * Deploy upgrade plan 80 | 81 | Add Git Repo to deploy rest 82 | Repo: https://github.com/bashofmann/rancher-k3s-fleet-examples 83 | Path: fleet-examples 84 | All clusters 85 | 86 | ### Deploy applications 87 | 88 | * Add hello-world example 89 | * Open webapp on nodeports 90 | * Update hello-world example with overlays 91 | * Show that on arm cluster the color changed 92 | * Deploy netdata 93 | * Change gitrepo cluster selector 94 | 95 | ## Cleanup 96 | 97 | To remove everything 98 | 99 | ``` 100 | make destroy 101 | ``` -------------------------------------------------------------------------------- /fleet-examples-prepared/cluster-label-policy/template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8srequireduniquelabels 5 | annotations: 6 | description: Requires all resources to contain a specified label with a value 7 | matching a provided regular expression and are unique. 8 | spec: 9 | crd: 10 | spec: 11 | names: 12 | kind: K8sRequiredUniqueLabels 13 | validation: 14 | # Schema for the `parameters` field 15 | openAPIV3Schema: 16 | type: object 17 | properties: 18 | message: 19 | type: string 20 | labels: 21 | type: array 22 | items: 23 | type: object 24 | properties: 25 | key: 26 | type: string 27 | allowedRegex: 28 | type: string 29 | targets: 30 | - target: admission.k8s.gatekeeper.sh 31 | rego: | 32 | package k8srequireduniquelabels 33 | get_message(parameters, _default) = msg { 34 | not parameters.message 35 | msg := _default 36 | } 37 | get_message(parameters, _default) = msg { 38 | msg := parameters.message 39 | } 40 | identical(obj, review) { 41 | obj.metadata.namespace == review.object.metadata.namespace 42 | obj.metadata.name == review.object.metadata.name 43 | } 44 | violation[{"msg": msg, "details": {"missing_labels": missing}}] { 45 | provided := {label | input.review.object.metadata.labels[label]} 46 | required := {label | label := input.parameters.labels[_].key} 47 | missing := required - provided 48 | count(missing) > 0 49 | def_msg := sprintf("you must provide labels: %v", [missing]) 50 | msg := get_message(input.parameters, def_msg) 51 | } 52 | violation[{"msg": msg, "details": {"not_matching_labels": value}}] { 53 | value := input.review.object.metadata.labels[key] 54 | expected := input.parameters.labels[_] 55 | expected.key == key 56 | # do not match if allowedRegex is not defined, or is an empty string 57 | expected.allowedRegex != "" 58 | not re_match(expected.allowedRegex, value) 59 | def_msg := sprintf("Label <%v: %v> does not satisfy allowed regex: %v", [key, value, expected.allowedRegex]) 60 | msg := get_message(input.parameters, def_msg) 61 | } 62 | violation[{"msg": msg}] { 63 | kind := input.review.kind.kind 64 | group := input.review.kind.group 65 | value := input.review.object.metadata.labels[key] 66 | other := data.inventory.namespace[_][group][kind][name] 67 | other.metadata.labels[key] == value 68 | not identical(other, input.review) 69 | msg := sprintf("%v label %x conflicts with an existing %v <%v>", [kind, key, kind, value]) 70 | } -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | 3 | K3S_TOKEN="VA87qPxet2SB8BDuLPWfU2xnPUSoETYF" 4 | 5 | export KUBECONFIG=kubeconfig 6 | 7 | destroy: 8 | cd terraform-setup && terraform destroy -auto-approve && rm terraform.tfstate terraform.tfstate.backup 9 | 10 | all: step_01 step_02 step_03 step_04 11 | 12 | step_01: 13 | echo "Creating infrastructure" 14 | cd terraform-setup && terraform init && terraform apply -auto-approve 15 | 16 | step_02: 17 | echo "Creating k3s cluster on x86 vms 0,1,2" 18 | source get_env.sh && ssh -o StrictHostKeyChecking=no ec2-user@$${IP0} "curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.21 INSTALL_K3S_EXEC='server' K3S_TOKEN=$(K3S_TOKEN) K3S_KUBECONFIG_MODE=644 K3S_CLUSTER_INIT=1 sh -" 19 | source get_env.sh && ssh -o StrictHostKeyChecking=no ec2-user@$${IP1} "curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.21 INSTALL_K3S_EXEC='server' K3S_TOKEN=$(K3S_TOKEN) K3S_URL=https://$${IP0}:6443 sh - " 20 | source get_env.sh && ssh -o StrictHostKeyChecking=no ec2-user@$${IP2} "curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.21 INSTALL_K3S_EXEC='server' K3S_TOKEN=$(K3S_TOKEN) K3S_URL=https://$${IP0}:6443 sh - " 21 | source get_env.sh && scp -o StrictHostKeyChecking=no ec2-user@$${IP0}:/etc/rancher/k3s/k3s.yaml kubeconfig 22 | source get_env.sh && sed -i "s/127.0.0.1/$${IP0}/g" kubeconfig 23 | 24 | print_step_02: 25 | echo "Creating k3s cluster on x86 vms 0,1,2" 26 | source get_env.sh && echo "curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.21 INSTALL_K3S_EXEC='server' K3S_TOKEN=$(K3S_TOKEN) K3S_KUBECONFIG_MODE=644 K3S_CLUSTER_INIT=1 sh -" 27 | source get_env.sh && echo "curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.21 INSTALL_K3S_EXEC='server' K3S_TOKEN=$(K3S_TOKEN) K3S_URL=https://$${IP0}:6443 sh - " 28 | source get_env.sh && echo "curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.21 INSTALL_K3S_EXEC='server' K3S_TOKEN=$(K3S_TOKEN) K3S_URL=https://$${IP0}:6443 sh - " 29 | 30 | kubeconfigs: 31 | source get_env.sh && scp -o StrictHostKeyChecking=no ec2-user@$${IP0}:/etc/rancher/k3s/k3s.yaml kubeconfig 32 | source get_env.sh && sed -i "s/127.0.0.1/$${IP0}/g" kubeconfig 33 | touch kubeconfig_cluster_one 34 | touch kubeconfig_cluster_two 35 | touch kubeconfig_cluster_three 36 | 37 | step_03: 38 | echo "Installing cert-manager and Rancher" 39 | helm repo update 40 | helm upgrade --install \ 41 | cert-manager jetstack/cert-manager \ 42 | --namespace cert-manager \ 43 | --version v1.7.1 --create-namespace --set installCRDs=true 44 | kubectl rollout status deployment -n cert-manager cert-manager 45 | kubectl rollout status deployment -n cert-manager cert-manager-webhook 46 | helm upgrade --install rancher rancher-latest/rancher \ 47 | --namespace cattle-system \ 48 | --version 2.6.4 \ 49 | --set hostname=rancher-demo.plgrnd.be --create-namespace 50 | kubectl rollout status deployment -n cattle-system rancher 51 | kubectl -n cattle-system wait --for=condition=ready certificate/tls-rancher-ingress 52 | 53 | step_04: 54 | source get_env.sh 55 | echo "Creating downstream k3s clusters" 56 | source get_env.sh && ssh -o StrictHostKeyChecking=no ec2-user@$${IP3} "curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.21 K3S_KUBECONFIG_MODE=644 sh -" 57 | source get_env.sh && ssh -o StrictHostKeyChecking=no ec2-user@$${IP4} "curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.21 K3S_KUBECONFIG_MODE=644 sh -" 58 | source get_env.sh && ssh -o StrictHostKeyChecking=no ec2-user@$${IP5} "curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.21 K3S_KUBECONFIG_MODE=644 sh -" 59 | touch kubeconfig_cluster_one 60 | touch kubeconfig_cluster_two 61 | touch kubeconfig_cluster_three -------------------------------------------------------------------------------- /monitoring-cluster/edgme-monitoring-servicemonitor/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: edge-clusters 5 | namespace: cattle-monitoring-system 6 | labels: 7 | cluster: edge-clusters 8 | spec: 9 | endpoints: 10 | - interval: 1m 11 | honorLabels: true 12 | scrapeTimeout: 55s 13 | path: "/metrics" 14 | port: node-exporter 15 | metricRelabelings: 16 | - sourceLabels: 17 | - service 18 | targetLabel: cluster 19 | - interval: 1m 20 | honorLabels: true 21 | scrapeTimeout: 55s 22 | path: "/metrics" 23 | port: kube-state-metrics 24 | metricRelabelings: 25 | - sourceLabels: 26 | - service 27 | targetLabel: cluster 28 | namespaceSelector: 29 | matchNames: 30 | - cattle-monitoring-system 31 | selector: 32 | matchLabels: 33 | cluster: edge-clusters 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: cluster-one 39 | labels: 40 | cluster: edge-clusters 41 | namespace: cattle-monitoring-system 42 | spec: 43 | type: ExternalName 44 | externalName: 18.156.134.94 45 | clusterIP: "" 46 | ports: 47 | - port: 9100 48 | targetPort: 9100 49 | name: node-exporter 50 | protocol: TCP 51 | - port: 30080 52 | targetPort: 30080 53 | name: kube-state-metrics 54 | protocol: TCP 55 | --- 56 | apiVersion: v1 57 | kind: Endpoints 58 | metadata: 59 | name: cluster-one 60 | labels: 61 | cluster: edge-clusters 62 | namespace: cattle-monitoring-system 63 | subsets: 64 | - addresses: 65 | - ip: 18.156.134.94 66 | ports: 67 | - port: 9100 68 | name: node-exporter 69 | protocol: TCP 70 | - port: 30080 71 | name: kube-state-metrics 72 | protocol: TCP 73 | --- 74 | apiVersion: v1 75 | kind: Service 76 | metadata: 77 | name: cluster-two 78 | labels: 79 | cluster: edge-clusters 80 | namespace: cattle-monitoring-system 81 | spec: 82 | type: ExternalName 83 | externalName: 34.241.125.82 84 | clusterIP: "" 85 | ports: 86 | - port: 9100 87 | targetPort: 9100 88 | name: node-exporter 89 | protocol: TCP 90 | - port: 30080 91 | targetPort: 30080 92 | name: kube-state-metrics 93 | protocol: TCP 94 | --- 95 | apiVersion: v1 96 | kind: Endpoints 97 | metadata: 98 | name: cluster-two 99 | labels: 100 | cluster: edge-clusters 101 | namespace: cattle-monitoring-system 102 | subsets: 103 | - addresses: 104 | - ip: 34.241.125.82 105 | ports: 106 | - port: 9100 107 | name: node-exporter 108 | protocol: TCP 109 | - port: 30080 110 | name: kube-state-metrics 111 | protocol: TCP 112 | --- 113 | apiVersion: v1 114 | kind: Service 115 | metadata: 116 | name: cluster-three 117 | labels: 118 | cluster: edge-clusters 119 | namespace: cattle-monitoring-system 120 | spec: 121 | type: ExternalName 122 | externalName: 18.198.2.200 123 | clusterIP: "" 124 | ports: 125 | - port: 9100 126 | targetPort: 9100 127 | name: node-exporter 128 | protocol: TCP 129 | - port: 30080 130 | targetPort: 30080 131 | name: kube-state-metrics 132 | protocol: TCP 133 | --- 134 | apiVersion: v1 135 | kind: Endpoints 136 | metadata: 137 | name: cluster-three 138 | labels: 139 | cluster: edge-clusters 140 | namespace: cattle-monitoring-system 141 | subsets: 142 | - addresses: 143 | - ip: 18.198.2.200 144 | ports: 145 | - port: 9100 146 | name: node-exporter 147 | protocol: TCP 148 | - port: 30080 149 | name: kube-state-metrics 150 | protocol: TCP 151 | --- 152 | apiVersion: v1 153 | kind: Service 154 | metadata: 155 | name: cluster-four 156 | labels: 157 | cluster: edge-clusters 158 | namespace: cattle-monitoring-system 159 | spec: 160 | type: ExternalName 161 | externalName: 3.250.58.172 162 | clusterIP: "" 163 | ports: 164 | - port: 9100 165 | targetPort: 9100 166 | name: node-exporter 167 | protocol: TCP 168 | - port: 30080 169 | targetPort: 30080 170 | name: kube-state-metrics 171 | protocol: TCP 172 | --- 173 | apiVersion: v1 174 | kind: Endpoints 175 | metadata: 176 | name: cluster-four 177 | labels: 178 | cluster: edge-clusters 179 | namespace: cattle-monitoring-system 180 | subsets: 181 | - addresses: 182 | - ip: 3.250.58.172 183 | ports: 184 | - port: 9100 185 | name: node-exporter 186 | protocol: TCP 187 | - port: 30080 188 | name: kube-state-metrics 189 | protocol: TCP -------------------------------------------------------------------------------- /fleet-examples-prepared/edgme-monitoring-servicemonitor/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: edge-clusters 5 | namespace: cattle-monitoring-system 6 | labels: 7 | cluster: edge-clusters 8 | spec: 9 | endpoints: 10 | - interval: 1m 11 | honorLabels: true 12 | scrapeTimeout: 55s 13 | path: "/metrics" 14 | port: node-exporter 15 | metricRelabelings: 16 | - sourceLabels: 17 | - service 18 | targetLabel: cluster 19 | - interval: 1m 20 | honorLabels: true 21 | scrapeTimeout: 55s 22 | path: "/metrics" 23 | port: kube-state-metrics 24 | metricRelabelings: 25 | - sourceLabels: 26 | - service 27 | targetLabel: cluster 28 | namespaceSelector: 29 | matchNames: 30 | - cattle-monitoring-system 31 | selector: 32 | matchLabels: 33 | cluster: edge-clusters 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: cluster-one 39 | labels: 40 | cluster: edge-clusters 41 | namespace: cattle-monitoring-system 42 | spec: 43 | type: ExternalName 44 | externalName: 18.156.119.236 45 | clusterIP: "" 46 | ports: 47 | - port: 9100 48 | targetPort: 9100 49 | name: node-exporter 50 | protocol: TCP 51 | - port: 30080 52 | targetPort: 30080 53 | name: kube-state-metrics 54 | protocol: TCP 55 | --- 56 | apiVersion: v1 57 | kind: Endpoints 58 | metadata: 59 | name: cluster-one 60 | labels: 61 | cluster: edge-clusters 62 | namespace: cattle-monitoring-system 63 | subsets: 64 | - addresses: 65 | - ip: 18.156.119.236 66 | ports: 67 | - port: 9100 68 | name: node-exporter 69 | protocol: TCP 70 | - port: 30080 71 | name: kube-state-metrics 72 | protocol: TCP 73 | --- 74 | apiVersion: v1 75 | kind: Service 76 | metadata: 77 | name: cluster-two 78 | labels: 79 | cluster: edge-clusters 80 | namespace: cattle-monitoring-system 81 | spec: 82 | type: ExternalName 83 | externalName: 54.246.50.116 84 | clusterIP: "" 85 | ports: 86 | - port: 9100 87 | targetPort: 9100 88 | name: node-exporter 89 | protocol: TCP 90 | - port: 30080 91 | targetPort: 30080 92 | name: kube-state-metrics 93 | protocol: TCP 94 | --- 95 | apiVersion: v1 96 | kind: Endpoints 97 | metadata: 98 | name: cluster-two 99 | labels: 100 | cluster: edge-clusters 101 | namespace: cattle-monitoring-system 102 | subsets: 103 | - addresses: 104 | - ip: 54.246.50.116 105 | ports: 106 | - port: 9100 107 | name: node-exporter 108 | protocol: TCP 109 | - port: 30080 110 | name: kube-state-metrics 111 | protocol: TCP 112 | --- 113 | apiVersion: v1 114 | kind: Service 115 | metadata: 116 | name: cluster-three 117 | labels: 118 | cluster: edge-clusters 119 | namespace: cattle-monitoring-system 120 | spec: 121 | type: ExternalName 122 | externalName: 3.66.225.46 123 | clusterIP: "" 124 | ports: 125 | - port: 9100 126 | targetPort: 9100 127 | name: node-exporter 128 | protocol: TCP 129 | - port: 30080 130 | targetPort: 30080 131 | name: kube-state-metrics 132 | protocol: TCP 133 | --- 134 | apiVersion: v1 135 | kind: Endpoints 136 | metadata: 137 | name: cluster-three 138 | labels: 139 | cluster: edge-clusters 140 | namespace: cattle-monitoring-system 141 | subsets: 142 | - addresses: 143 | - ip: 3.66.225.46 144 | ports: 145 | - port: 9100 146 | name: node-exporter 147 | protocol: TCP 148 | - port: 30080 149 | name: kube-state-metrics 150 | protocol: TCP 151 | --- 152 | apiVersion: v1 153 | kind: Service 154 | metadata: 155 | name: cluster-four 156 | labels: 157 | cluster: edge-clusters 158 | namespace: cattle-monitoring-system 159 | spec: 160 | type: ExternalName 161 | externalName: 34.244.139.148 162 | clusterIP: "" 163 | ports: 164 | - port: 9100 165 | targetPort: 9100 166 | name: node-exporter 167 | protocol: TCP 168 | - port: 30080 169 | targetPort: 30080 170 | name: kube-state-metrics 171 | protocol: TCP 172 | --- 173 | apiVersion: v1 174 | kind: Endpoints 175 | metadata: 176 | name: cluster-four 177 | labels: 178 | cluster: edge-clusters 179 | namespace: cattle-monitoring-system 180 | subsets: 181 | - addresses: 182 | - ip: 34.244.139.148 183 | ports: 184 | - port: 9100 185 | name: node-exporter 186 | protocol: TCP 187 | - port: 30080 188 | name: kube-state-metrics 189 | protocol: TCP --------------------------------------------------------------------------------