├── README.md ├── harvester_rancher.sh ├── classifications.sh ├── stigs_tldr.md ├── rke2.sh ├── deprecated_harv.sh └── functions.sh /README.md: -------------------------------------------------------------------------------- 1 | # My Full Demo Stack 2 | 3 | The script is meant to simplify the building of a demo stack for play and profit. With both you will get : 4 | 5 | * [DigitalOcean](https://digitalocean.com) - VMs 6 | * [Rocky Linux](https://rockylinux.org/) 7 | * [RKE2](https://docs.rke2.io/) - RKE2 Kube / [K3s](http://k3s.io) - K3s Kube 8 | * [Rancher](https://rancher.com/products/rancher) - Rancher Cluster Manager 9 | * [Longhorn](https://longhorn.io) - Stateful storage 10 | * [Minio](https://Minio.io) - S3 object store 11 | * [Gitea](https://gitea.io/en-us/) - Version Control 12 | * [KeyCloak](https://keycloak.org) - Authentication 13 | * [Harbor](https://goharbor.io) - Registry 14 | 15 | 16 | Please pay attention to the variables at the top of the scripts. 17 | 18 | Any questions please feel free to create an issue or email me at clemenko@gmail.com. 19 | -------------------------------------------------------------------------------- /harvester_rancher.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # this does something with Rancher and Harester 3 | 4 | # rancher url 5 | rancher_url=rancher.rfed.io 6 | 7 | # harvester vip/url 8 | harvester_url=192.168.1.4 9 | 10 | # get rancher token 11 | token=$(curl -sk -X POST https://$rancher_urlv3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"Pa22word"}' | jq -r .token) 12 | 13 | # create havester connection 14 | curl -sk https://$rancher_urlv1/provisioning.cattle.io.clusters -H "Authorization: Bearer $token" -X POST -H 'content-type: application/json' -d '{"type":"provisioning.cattle.io.cluster","metadata":{"namespace":"fleet-default","name":"ms01","labels":{"provider.cattle.io":"harvester"}},"cachedHarvesterClusterVersion":"","spec":{"agentEnvVars":[]}}' 15 | 16 | # get client url 17 | client_url=$(kubectl get clusterregistrationtokens.management.cattle.io -n $(kubectl get ns | grep "c-m" | awk '{ print $1}') default-token -o json | jq -r .status.manifestUrl) 18 | 19 | # get harvester token 20 | token=$(curl -sk -X POST https://$harvester_url/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"Pa22word"}' | jq -r .token) 21 | 22 | # get kubeconfig for harvester 23 | curl -sk https://$harvester_url/v1/management.cattle.io.clusters/local?action=generateKubeconfig -H "Authorization: Bearer $token" -X POST -H 'content-type: application/json' | jq -r .config > $harvester_url.yaml 24 | 25 | # use kubeconfig for creating url 26 | export KUBECONFIG=$harvester_url.yaml 27 | 28 | # apply it 29 | cat < /dev/null 2>&1 30 | apiVersion: harvesterhci.io/v1beta1 31 | kind: Setting 32 | metadata: 33 | name: cluster-registration-url 34 | status: 35 | value: $client_url 36 | EOF 37 | 38 | # clean up 39 | unset KUBECONFIG 40 | rm -rf $harvester_url.yaml 41 | 42 | -------------------------------------------------------------------------------- /classifications.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # clemenko@gmail.com 3 | #here is how to use the API to push a logon banner as well as header and footers for classification. 4 | # https://www.astrouxds.com/components/classification-markings/ 5 | 6 | class=$1 7 | 8 | if [ -z $class ]; then 9 | echo "$RED [warn]$NORMAL Please ensure you have kubeconfig and classification to the command." 10 | echo " $BLUE Use:$NORMAL $0 " 11 | echo " $BLUE Use:$NORMAL $0 TS " 12 | exit 13 | fi 14 | 15 | # check for kubctl 16 | command -v kubectl >/dev/null 2>&1 || { echo -e "$RED" " ** Kubectl was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 17 | 18 | # check for kubeconfig 19 | if [ $(kubectl get ns cattle-system --no-headers | wc -l) != "1" ]; then echo -e "$RED" " ** kubeconfig was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; fi 20 | 21 | #gov logon message 22 | export govmessage=$(cat < /dev/null 2>&1 31 | apiVersion: management.cattle.io/v3 32 | kind: Setting 33 | metadata: 34 | name: ui-banners 35 | value: '{"bannerHeader":{"background":"#007a33","color":"#ffffff","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"UNCLASSIFIED//FOUO"},"bannerFooter":{"background":"#007a33","color":"#ffffff","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"UNCLASSIFIED//FOUO"},"bannerConsent":{"background":"#ffffff","color":"#000000","textAlignment":"left","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":false,"text":"$govmessage","button":"Accept"},"showHeader":"true","showFooter":"true","showConsent":"true"}' 36 | EOF 37 | ;; 38 | 39 | TS | ts ) 40 | #top secret 41 | cat < /dev/null 2>&1 42 | apiVersion: management.cattle.io/v3 43 | kind: Setting 44 | metadata: 45 | name: ui-banners 46 | value: '{"bannerHeader":{"background":"#fce83a","color":"#000000","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"TOP SECRET//SCI"},"bannerFooter":{"background":"#fce83a","color":"#000000","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"TOP SECRET//SCI"},"bannerConsent":{"background":"#ffffff","color":"#000000","textAlignment":"left","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":false,"text":"$govmessage","button":"Accept"},"showHeader":"true","showFooter":"true","showConsent":"true"}' 47 | EOF 48 | ;; 49 | 50 | clear ) 51 | cat < /dev/null 2>&1 52 | apiVersion: management.cattle.io/v3 53 | kind: Setting 54 | metadata: 55 | name: ui-banners 56 | value: '{"bannerHeader":{"background":"#ffffff","color":"#000000","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":""},"bannerFooter":{"background":"#ffffff","color":"#000000","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":""},"bannerConsent":{"background":"#ffffff","color":"#000000","textAlignment":"left","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":false,"text":"","button":"Accept"},"showHeader":"false","showFooter":"false","showConsent":"false"}' 57 | EOF 58 | ;; 59 | 60 | *) echo "Usage: $0 {clear | TS | U }"; exit 1 61 | 62 | esac 63 | -------------------------------------------------------------------------------- /stigs_tldr.md: -------------------------------------------------------------------------------- 1 | # STIG's tl:dr 2 | 3 | Simply go to https://public.cyber.mil/stigs/downloads/ and search for `rancher`. 4 | 5 | Direct Downloads 6 | 7 | - https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_RGS_MCM_V1R3_STIG.zip 8 | - https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_RGS_RKE2_V1R5_STIG.zip 9 | - https://public.cyber.mil/stigs/srg-stig-tools/ 10 | 11 | ## RKE2 STIG tl:dr 12 | 13 | ### config.yaml 14 | 15 | `/etc/rancher/rke2/config.yaml` 16 | 17 | ```yaml 18 | profile: cis # for 1.28 and older cis-1.23 19 | selinux: true 20 | secrets-encryption: true 21 | token: bootstrapAllTheThings 22 | tls-san: 23 | - rke.rfed.io 24 | write-kubeconfig-mode: 0600 25 | use-service-account-credentials: true 26 | pod-security-admission-config-file: /etc/rancher/rke2/rancher-psact.yaml 27 | kube-controller-manager-arg: 28 | - bind-address=127.0.0.1 29 | - use-service-account-credentials=true 30 | - tls-min-version=VersionTLS12 31 | - tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 32 | kube-scheduler-arg: 33 | - tls-min-version=VersionTLS12 34 | - tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 35 | kube-apiserver-arg: 36 | - tls-min-version=VersionTLS12 37 | - tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 38 | - authorization-mode=RBAC,Node 39 | - anonymous-auth=false 40 | - audit-policy-file=/etc/rancher/rke2/audit-policy.yaml 41 | - audit-log-mode=blocking-strict 42 | - audit-log-maxage=30 43 | - audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log 44 | kubelet-arg: 45 | - kube-reserved=cpu=400m,memory=1Gi 46 | - system-reserved=cpu=400m,memory=1Gi 47 | - protect-kernel-defaults=true 48 | - read-only-port=0 49 | - authorization-mode=Webhook 50 | - streaming-connection-idle-timeout=5m 51 | - max-pods=400 52 | ``` 53 | 54 | ### Audit Policy 55 | 56 | `/etc/rancher/rke2/audit-policy.yaml` 57 | ```yaml 58 | apiVersion: audit.k8s.io/v1 59 | kind: Policy 60 | metadata: 61 | name: rke2-audit-policy 62 | rules: 63 | - level: Metadata 64 | resources: 65 | - group: "" 66 | resources: ["secrets"] 67 | - level: RequestResponse 68 | resources: 69 | - group: "" 70 | resources: ["*"] 71 | ``` 72 | 73 | ### PSA 74 | 75 | Keep in mind that the namespace can be labeled for the correct PSP after it is created. 76 | 77 | `kubectl label ns $NAMESPACE pod-security.kubernetes.io/audit=privileged pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=privileged` 78 | 79 | OR add it to `/etc/rancher/rke2/rancher-psact.yaml`. 80 | 81 | ```yaml 82 | apiVersion: apiserver.config.k8s.io/v1 83 | kind: AdmissionConfiguration 84 | plugins: 85 | - name: PodSecurity 86 | configuration: 87 | apiVersion: pod-security.admission.config.k8s.io/v1 88 | kind: PodSecurityConfiguration 89 | defaults: 90 | enforce: "restricted" 91 | enforce-version: "latest" 92 | audit: "restricted" 93 | audit-version: "latest" 94 | warn: "restricted" 95 | warn-version: "latest" 96 | exemptions: 97 | usernames: [] 98 | runtimeClasses: [] 99 | namespaces: [calico-apiserver, 100 | calico-system, 101 | carbide-docs-system, 102 | carbide-stigatron-system, 103 | cattle-alerting, 104 | cattle-csp-adapter-system, 105 | cattle-elemental-system, 106 | cattle-epinio-system, 107 | cattle-externalip-system, 108 | cattle-fleet-local-system, 109 | cattle-fleet-system, 110 | cattle-gatekeeper-system, 111 | cattle-global-data, 112 | cattle-global-nt, 113 | cattle-impersonation-system, 114 | cattle-istio, 115 | cattle-istio-system, 116 | cattle-logging, 117 | cattle-logging-system, 118 | cattle-monitoring-system, 119 | cattle-neuvector-system, 120 | cattle-prometheus, 121 | cattle-provisioning-capi-system, 122 | cattle-resources-system, 123 | cattle-sriov-system, 124 | cattle-system, 125 | cattle-ui-plugin-system, 126 | cattle-windows-gmsa-system, 127 | cert-manager, 128 | cis-operator-system, 129 | fleet-default, 130 | fleet-local, 131 | ingress-nginx, 132 | istio-system, 133 | kube-node-lease, 134 | kube-public, 135 | kube-system, 136 | longhorn-system, 137 | rancher-alerting-drivers, 138 | security-scan, 139 | tigera-operator, 140 | neuvector, 141 | flask, 142 | ghost, 143 | kubecon, 144 | minio, 145 | whoami, 146 | harbor, 147 | gitea, 148 | tailscale, 149 | gitness, 150 | stackrox, 151 | keycloak] 152 | ``` 153 | 154 | ## Rancher STIG tl:dr 155 | 156 | From 157 | - https://ranchermanager.docs.rancher.com/v2.8/how-to-guides/advanced-user-guides/enable-api-audit-log 158 | - https://ranchermanager.docs.rancher.com/getting-started/installation-and-upgrade/installation-references/helm-chart-options#advanced-options 159 | 160 | ```bash 161 | helm upgrade -i rancher rancher-latest/rancher -n cattle-system --create-namespace --set hostname=rancher.$domain --set bootstrapPassword=bootStrapAllTheThings --set auditLog.level=2 --set auditLog.destination=hostPath --set auditLog.hostPath=/var/log/rancher/audit --set auditLog.maxAge=30 --set antiAffinity=required -------------------------------------------------------------------------------- /rke2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # https://github.com/clemenko/rke2 4 | # this script assumes digitalocean is setup with DNS. 5 | # you need doctl, kubectl, uuid, jq, k3sup, pdsh and curl installed. 6 | # clemenko@gmail.com 7 | 8 | ################################### 9 | # edit varsw 10 | ################################### 11 | set -e 12 | num=3 13 | password=Pa22word 14 | zone=nyc1 15 | size=s-4vcpu-8gb 16 | # s-8vcpu-16gb 17 | domain=rfed.io 18 | 19 | #image=ubuntu-22-04-x64 20 | image=rockylinux-9-x64 21 | 22 | # rancher / k8s 23 | prefix=rke # no rke k3s 24 | k8s_version=stable #latest 25 | # curl -s https://update.rke2.io/v1-release/channels | jq '.data[] | select(.id=="stable") | .latest' 26 | 27 | ###### NO MOAR EDITS ####### 28 | #export PDSH_RCMD_TYPE=ssh 29 | 30 | #better error checking 31 | command -v doctl >/dev/null 2>&1 || { fatal "Doctl was not found. Please install" ; } 32 | command -v curl >/dev/null 2>&1 || { fatal "Curl was not found. Please install" ; } 33 | command -v jq >/dev/null 2>&1 || { fatal "Jq was not found. Please install" ; } 34 | command -v pdsh >/dev/null 2>&1 || { fatal "Pdsh was not found. Please install" ; } 35 | command -v k3sup >/dev/null 2>&1 || { fatal "K3sup was not found. Please install" ; } 36 | command -v kubectl >/dev/null 2>&1 || { fatal "Kubectl was not found. Please install" ; } 37 | 38 | #### doctl_list #### 39 | function dolist () { doctl compute droplet list --no-header|grep $prefix |sort -k 2; } 40 | 41 | source functions.sh 42 | 43 | # update helm 44 | helm repo update > /dev/null 2>&1 45 | 46 | ################################# up ################################ 47 | function up () { 48 | build_list="" 49 | # helm repo update > /dev/null 2>&1 50 | 51 | if [[ -n "$(dolist)" ]]; then 52 | fatal "Warning - cluster already detected..." 53 | exit 54 | fi 55 | 56 | #rando list generation 57 | for i in $(seq 1 $num); do build_list="$build_list $prefix$i"; done 58 | 59 | #build VMS 60 | echo -e -n " - building vms -$build_list" 61 | doctl compute droplet create $build_list --region $zone --image $image --size $size --ssh-keys 30:98:4f:c5:47:c2:88:28:fe:3c:23:cd:52:49:51:01 --wait > /dev/null 2>&1 || fatal "vms did not build" 62 | info_ok 63 | 64 | #check for SSH 65 | echo -e -n " - checking for ssh " 66 | for ext in $(dolist | awk '{print $3}'); do 67 | until [ $(ssh -o ConnectTimeout=1 root@$ext 'exit' 2>&1 | grep 'timed out\|refused' | wc -l) = 0 ]; do echo -e -n "." ; sleep 5; done 68 | done 69 | info_ok 70 | 71 | #get ips 72 | host_list=$(dolist | awk '{printf $3","}' | sed 's/,$//') 73 | server=$(dolist | sed -n 1p | awk '{print $3}') 74 | worker_list=$(dolist | sed 1d | awk '{printf $3","}' | sed 's/,$//') 75 | 76 | #update DNS 77 | echo -e -n " - updating dns" 78 | doctl compute domain records create $domain --record-type A --record-name $prefix --record-ttl 60 --record-data $server > /dev/null 2>&1 79 | doctl compute domain records create $domain --record-type CNAME --record-name "*" --record-ttl 60 --record-data $prefix.$domain. > /dev/null 2>&1 80 | info_ok 81 | 82 | sleep 10 83 | 84 | #host modifications 85 | if [[ "$image" = *"ubuntu"* ]]; then 86 | echo -e -n " - adding os packages" 87 | pdsh -l root -w $host_list 'mkdir -p /opt/kube; systemctl stop ufw; systemctl disable ufw; echo -e "PubkeyAcceptedKeyTypes=+ssh-rsa" >> /etc/ssh/sshd_config; systemctl restart sshd; export DEBIAN_FRONTEND=noninteractive; apt update; apt install nfs-common -y; #apt upgrade -y; apt autoremove -y' > /dev/null 2>&1 88 | info_ok 89 | fi 90 | 91 | if [[ "$image" = *"centos"* || "$image" = *"rocky"* || "$image" = *"alma"* ]]; then centos_packages; fi 92 | 93 | #kernel tuning from functions 94 | kernel 95 | 96 | #or deploy k3s 97 | if [ "$prefix" != k3s ] && [ "$prefix" != rke ]; then exit; fi 98 | 99 | if [ "$prefix" = k3s ]; then 100 | echo -e -n " - deploying k3s" 101 | k3sup install --ip $server --user root --cluster --k3s-extra-args '' --k3s-channel $k8s_version --local-path ~/.kube/config > /dev/null 2>&1 102 | # --k3s-extra-args '--disable traefik' 103 | 104 | for workeri in $(dolist | sed 1d | awk '{print $3}'); do 105 | k3sup join --ip $workeri --server-ip $server --user root --k3s-extra-args '' --k3s-channel $k8s_version > /dev/null 2>&1 106 | done 107 | 108 | info_ok 109 | fi 110 | 111 | #or deploy rke2 112 | if [ "$prefix" = rke ]; then 113 | echo -e -n "$BLUE" "deploying rke2" "$NO_COLOR" 114 | 115 | # systemctl disable nm-cloud-setup.service nm-cloud-setup.timer 116 | 117 | ssh root@$server 'mkdir -p /var/lib/rancher/rke2/server/manifests/ /etc/rancher/rke2/; useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U; echo -e "apiVersion: audit.k8s.io/v1\nkind: Policy\nmetadata:\n name: rke2-audit-policy\nrules:\n - level: Metadata\n resources:\n - group: \"\"\n resources: [\"secrets\"]\n - level: RequestResponse\n resources:\n - group: \"\"\n resources: [\"*\"]" > /etc/rancher/rke2/audit-policy.yaml; echo -e "#profile: cis\n#selinux: true\nsecrets-encryption: true\ntoken: bootstrapAllTheThings\ntls-san:\n- rke."'$domain'"\nwrite-kubeconfig-mode: 0600\n#pod-security-admission-config-file: /etc/rancher/rke2/rancher-psact.yaml\nkube-controller-manager-arg:\n- bind-address=127.0.0.1\n- use-service-account-credentials=true\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\nkube-scheduler-arg:\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\nkube-apiserver-arg:\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- authorization-mode=RBAC,Node\n- anonymous-auth=false\n- audit-policy-file=/etc/rancher/rke2/audit-policy.yaml\n- audit-log-mode=blocking-strict\n- audit-log-maxage=30\nkubelet-arg:\n- kube-reserved=cpu=400m,memory=1Gi\n- system-reserved=cpu=400m,memory=1Gi\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook\n- streaming-connection-idle-timeout=5m\n- max-pods=400" > /etc/rancher/rke2/config.yaml; curl -s https://raw.githubusercontent.com/clemenko/k8s_yaml/master/rancher-psact.yaml -o /etc/rancher/rke2/rancher-psact.yaml ; echo -e "apiVersion: helm.cattle.io/v1\nkind: HelmChartConfig\nmetadata:\n name: rke2-ingress-nginx\n namespace: kube-system\nspec:\n valuesContent: |-\n controller:\n config:\n use-forwarded-headers: true\n extraArgs:\n enable-ssl-passthrough: true" > /var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml; curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$k8s_version' sh - ; systemctl enable --now rke2-server.service' > /dev/null 2>&1 118 | 119 | sleep 15 120 | 121 | pdsh -l root -w $worker_list 'curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$k8s_version' INSTALL_RKE2_TYPE=agent sh - && echo -e "selinux: true\nserver: https://"'$server'":9345\ntoken: bootstrapAllTheThings\nprofile: cis\nkubelet-arg:\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook" > /etc/rancher/rke2/config.yaml; systemctl enable --now rke2-agent.service' > /dev/null 2>&1 122 | 123 | ssh root@$server cat /etc/rancher/rke2/rke2.yaml | sed -e "s/127.0.0.1/$server/g" > ~/.kube/config 124 | chmod 0600 ~/.kube/config 125 | 126 | info_ok 127 | fi 128 | 129 | echo -e -n " - cluster active " 130 | sleep 10 131 | until [ $(kubectl get node|grep NotReady|wc -l) = 0 ]; do echo -e -n "."; sleep 2; done 132 | sleep 10 133 | info_ok 134 | } 135 | 136 | ############################## kill ################################ 137 | #remove the vms 138 | function kill () { 139 | 140 | if [ ! -z $(dolist | awk '{printf $3","}' | sed 's/,$//') ]; then 141 | echo -e -n " killing it all " 142 | for i in $(dolist | awk '{print $2}'); do doctl compute droplet delete --force $i; done 143 | for i in $(dolist | awk '{print $3}'); do ssh-keygen -q -R $i > /dev/null 2>&1; done 144 | for i in $(doctl compute domain records list $domain|grep $prefix |awk '{print $1}'); do doctl compute domain records delete -f $domain $i; done 145 | until [ $(dolist | wc -l | sed 's/ //g') == 0 ]; do echo -e -n "."; sleep 2; done 146 | for i in $(doctl compute volume list --no-header |awk '{print $1}'); do doctl compute volume delete -f $i; done 147 | 148 | rm -rf *.txt *.log *.zip *.pub env.* certs backup.tar ~/.kube/config central* sensor* *token kubeconfig *TOKEN 149 | 150 | else 151 | echo -e -n " no cluster found " 152 | fi 153 | 154 | info_ok 155 | } 156 | 157 | case "$1" in 158 | up) up;; 159 | kill) kill;; 160 | px) portworx;; 161 | dolist) dolist;; 162 | keycloak) keycloak;; 163 | longhorn) longhorn;; 164 | rancher) rancher;; 165 | demo) demo;; 166 | fleet) fleet;; 167 | *) usage;; 168 | esac 169 | -------------------------------------------------------------------------------- /deprecated_harv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # this script assumes you have harvester setup 4 | # you need harvester, kubectl, uuid, jq, k3sup, pdsh and curl installed. 5 | # clemenko@gmail.com 6 | 7 | ################################### 8 | # edit varsw 9 | ################################### 10 | set -e 11 | num=6 12 | zone=nyc1 13 | size=s-4vcpu-8gb-amd 14 | key=30:98:4f:c5:47:c2:88:28:fe:3c:23:cd:52:49:51:01 15 | image=rockylinux-9-x64 16 | 17 | export password=Pa22word 18 | export domain=rfed.io 19 | 20 | template=rocky 21 | 22 | # rancher / k8s 23 | prefix=rke- # no rke k3s 24 | rke2_channel=v1.27 #latest 25 | export TOKEN=fuzzybunnyslippers 26 | 27 | # Carbide creds 28 | export CARBIDE=false # or true to enable carbide 29 | export CARBIDEUSER=andy-clemenko-read-token 30 | #export CARBIDEPASS= # set on the command line 31 | 32 | ###### NO MOAR EDITS ####### 33 | export RED='\x1b[0;31m' 34 | export GREEN='\x1b[32m' 35 | export BLUE='\x1b[34m' 36 | export YELLOW='\x1b[33m' 37 | export NO_COLOR='\x1b[0m' 38 | export PDSH_RCMD_TYPE=ssh 39 | 40 | #better error checking 41 | command -v curl >/dev/null 2>&1 || { echo -e "$RED" " ** Curl was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 42 | command -v jq >/dev/null 2>&1 || { echo -e "$RED" " ** Jq was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 43 | command -v pdsh >/dev/null 2>&1 || { echo -e "$RED" " ** Pdsh was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 44 | command -v kubectl >/dev/null 2>&1 || { echo -e "$RED" " ** Kubectl was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 45 | 46 | #### doctl_list #### 47 | #function dolist () { harvester vm |grep -v NAME | grep Run | grep $prefix| awk '{print $1" "$2" "$6" "$4" "$5}'; } 48 | function dolist () { doctl compute droplet list --no-header|grep $prefix |sort -k 2; } 49 | 50 | source functions.sh 51 | 52 | ################################# up ################################ 53 | function up () { 54 | build_list="" 55 | # helm repo update > /dev/null 2>&1 56 | 57 | if [ ! -z $(dolist) ]; then 58 | echo -e "$RED" "Warning - cluster already detected..." "$NO_COLOR" 59 | exit 60 | fi 61 | 62 | #build VMS 63 | echo -e -n " building vms -$build_list " 64 | #harvester vm create --template $template --count $num rke > /dev/null 2>&1 65 | #until [ $(dolist | grep "192.168" | wc -l) = $num ]; do echo -e -n "." ; sleep 2; done 66 | #sleep 10 67 | for i in $(seq 1 $num); do build_list="$build_list $prefix$i"; done 68 | doctl compute droplet create $build_list --region $zone --image $image --size $size --ssh-keys $key --wait --droplet-agent=false > /dev/null 2>&1 69 | 70 | echo -e "$GREEN" "ok" "$NO_COLOR" 71 | 72 | #check for SSH 73 | echo -e -n " checking for ssh " 74 | for ext in $(dolist | awk '{print $3}'); do 75 | until [ $(ssh -o ConnectTimeout=1 root@$ext 'exit' 2>&1 | grep 'timed out\|refused' | wc -l) = 0 ]; do echo -e -n "." ; sleep 5; done 76 | done 77 | echo -e "$GREEN" "ok" "$NO_COLOR" 78 | 79 | #get ips 80 | host_list=$(dolist | awk '{printf $3","}' | sed 's/,$//') 81 | server=$(dolist | sed -n 1p | awk '{print $3}') 82 | worker_list=$(dolist | sed 1d | awk '{printf $3","}' | sed 's/,$//') 83 | 84 | # update node list 85 | node1=$(dolist | sed -n 1p | awk '{print $3}') 86 | node2=$(dolist | sed -n 2p | awk '{print $3}') 87 | node3=$(dolist | sed -n 3p | awk '{print $3}') 88 | worker_list=$(dolist | sed '1,3d' | awk '{printf $3","}' | sed -e 's/,$//') 89 | 90 | # update DNS 91 | echo -e -n " updating dns" 92 | doctl compute domain records create $domain --record-type A --record-name $prefix"1" --record-ttl 60 --record-data $node1 > /dev/null 2>&1 93 | doctl compute domain records create $domain --record-type A --record-name rke --record-ttl 60 --record-data $node1 > /dev/null 2>&1 94 | doctl compute domain records create $domain --record-type A --record-name rke --record-ttl 60 --record-data $node2 > /dev/null 2>&1 95 | doctl compute domain records create $domain --record-type A --record-name rke --record-ttl 60 --record-data $node3 > /dev/null 2>&1 96 | doctl compute domain records create $domain --record-type CNAME --record-name "*" --record-ttl 60 --record-data rke.$domain. > /dev/null 2>&1 97 | echo -e "$GREEN" "ok" "$NO_COLOR" 98 | 99 | sleep 10 100 | 101 | centos_packages 102 | 103 | #carbide_reg 104 | 105 | kernel 106 | 107 | #or deploy k3s 108 | if [ "$prefix" != rke- ]; then exit; fi 109 | 110 | echo -e -n " deploying rke2 " 111 | ssh root@$node1 'mkdir -p /etc/rancher/rke2/ /var/lib/rancher/rke2/server/manifests/; useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U; echo -e "apiVersion: audit.k8s.io/v1\nkind: Policy\nrules:\n- level: RequestResponse" > /etc/rancher/rke2/audit-policy.yaml; echo -e "#profile: cis-1.6\nselinux: true\ntoken: '$TOKEN'\nsecrets-encryption: true\ntls-san:\n- rke.'$domain'\nwrite-kubeconfig-mode: 0600\nuse-service-account-credentials: true\nkube-controller-manager-arg:\n- bind-address=127.0.0.1\n- use-service-account-credentials=true\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\nkube-apiserver-arg:\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- authorization-mode=RBAC,Node\n- anonymous-auth=false\n- audit-policy-file=/etc/rancher/rke2/audit-policy.yaml\n- audit-log-mode=blocking-strict\n- audit-log-maxage=30\nkubelet-arg:\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook\n- streaming-connection-idle-timeout=5m" > /etc/rancher/rke2/config.yaml ; echo -e "apiVersion: helm.cattle.io/v1\nkind: HelmChartConfig\nmetadata:\n name: rke2-ingress-nginx\n namespace: kube-system\nspec:\n valuesContent: |-\n controller:\n config:\n use-forwarded-headers: true\n extraArgs:\n enable-ssl-passthrough: true" > /var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml; curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$rke2_channel' sh - && systemctl enable --now rke2-server.service' > /dev/null 2>&1 112 | 113 | sleep 10 114 | 115 | pdsh -l root -w $node2,$node3 'mkdir -p /etc/rancher/rke2/ /var/lib/rancher/rke2/server/manifests/; useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U; echo -e "apiVersion: audit.k8s.io/v1\nkind: Policy\nrules:\n- level: RequestResponse" > /etc/rancher/rke2/audit-policy.yaml; echo -e "server: https://'$node1':9345\ntoken: '$TOKEN'\n#profile: cis-1.6\nselinux: true\nsecrets-encryption: true\ntls-san:\n- rke.'$domain'\nwrite-kubeconfig-mode: 0600\nkube-controller-manager-arg:\n- bind-address=127.0.0.1\n- use-service-account-credentials=true\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\nkube-apiserver-arg:\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- authorization-mode=RBAC,Node\n- anonymous-auth=false\n- audit-policy-file=/etc/rancher/rke2/audit-policy.yaml\n- audit-log-mode=blocking-strict\n- audit-log-maxage=30\nkubelet-arg:\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook" > /etc/rancher/rke2/config.yaml ; echo -e "apiVersion: helm.cattle.io/v1\nkind: HelmChartConfig\nmetadata:\n name: rke2-ingress-nginx\n namespace: kube-system\nspec:\n valuesContent: |-\n controller:\n config:\n use-forwarded-headers: true\n extraArgs:\n enable-ssl-passthrough: true" > /var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml; curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$rke2_channel' sh - && systemctl enable --now rke2-server.service' > /dev/null 2>&1 116 | 117 | sleep 10 118 | 119 | pdsh -l root -w $worker_list 'yum install -y http://dl.rockylinux.org/pub/rocky/9.1/AppStream/x86_64/os/Packages/c/container-selinux-2.189.0-1.el9.noarch.rpm; curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$rke2_channel' INSTALL_RKE2_TYPE=agent sh - && mkdir -p /etc/rancher/rke2/ && echo -e "selinux: true\nserver: https://rke.'$domain':9345\ntoken: '$TOKEN'\nwrite-kubeconfig-mode: 0600\n#profile: cis-1.6\nkube-apiserver-arg:\n- authorization-mode=RBAC,Node\nkubelet-arg:\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook" > /etc/rancher/rke2/config.yaml && systemctl enable --now rke2-agent.service' > /dev/null 2>&1 120 | 121 | ssh root@$server cat /etc/rancher/rke2/rke2.yaml | sed -e "s/127.0.0.1/$server/g" > ~/.kube/config 122 | chmod 0600 ~/.kube/config 123 | 124 | echo -e "$GREEN" "ok" "$NO_COLOR" 125 | 126 | echo -e -n " - cluster active " 127 | sleep 5 128 | until [ $(kubectl get node|grep NotReady|wc -l) = 0 ]; do echo -e -n "."; sleep 2; done 129 | echo -e "$GREEN" "ok" "$NO_COLOR" 130 | } 131 | 132 | ############################## kill ################################ 133 | #remove the vms 134 | function kill () { 135 | 136 | if [ ! -z $(dolist | awk '{printf $3","}' | sed 's/,$//') ]; then 137 | echo -e -n " killing it all " 138 | harvester vm delete $(harvester vm |grep -v NAME | grep $prefix | awk '{printf $2" "}') > /dev/null 2>&1 139 | for i in $(dolist | awk '{print $3}'); do ssh-keygen -q -R $i > /dev/null 2>&1; done 140 | for i in $(doctl compute domain records list $domain|grep rke |awk '{print $1}'); do doctl compute domain records delete -f $domain $i; done 141 | until [ $(dolist | wc -l | sed 's/ //g') == 0 ]; do echo -e -n "."; sleep 2; done 142 | for i in $(doctl compute volume list --no-header |awk '{print $1}'); do doctl compute volume delete -f $i; done 143 | 144 | rm -rf *.txt *.log *.zip *.pub env.* certs backup.tar ~/.kube/config central* sensor* *token kubeconfig *TOKEN 145 | 146 | else 147 | echo -e -n " no cluster found " 148 | fi 149 | 150 | echo -e "$GREEN" "ok" "$NO_COLOR" 151 | } 152 | 153 | case "$1" in 154 | up) up;; 155 | tl) up && traefik && longhorn;; 156 | kill) kill;; 157 | rox) rox;; 158 | neu) neu;; 159 | dolist) dolist;; 160 | traefik) traefik;; 161 | keycloak) keycloak;; 162 | longhorn) longhorn;; 163 | rancher) rancher;; 164 | demo) demo;; 165 | fleet) fleet;; 166 | *) usage;; 167 | esac 168 | -------------------------------------------------------------------------------- /functions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # functions 4 | # color 5 | export RED='\x1b[0;31m' 6 | export GREEN='\x1b[32m' 7 | export BLUE='\x1b[34m' 8 | export YELLOW='\x1b[33m' 9 | export NO_COLOR='\x1b[0m' 10 | 11 | # set functions for debugging/logging 12 | function info { echo -e "$GREEN[info]$NO_COLOR $1" ; } 13 | function warn { echo -e "$YELLOW[warn]$NO_COLOR $1" ; } 14 | function fatal { echo -e "$RED[error]$NO_COLOR $1" ; exit 1 ; } 15 | function info_ok { echo -e "$GREEN"" ok""$NO_COLOR" ; } 16 | 17 | #gov logon message 18 | export govmessage=$(cat < /etc/NetworkManager/conf.d/rke2-canal.conf; yum install -y nfs-utils cryptsetup iscsi-initiator-utils iptables-services iptables-utils device-mapper-multipath; systemctl enable --now iscsid; yum update openssh -y; #yum update -y' > /dev/null 2>&1 48 | info_ok 49 | } 50 | 51 | ############################# kernel ################################ 52 | function kernel () { 53 | #kernel tuning 54 | echo -e -n " - updating kernel settings" 55 | pdsh -l root -w $host_list 'cat << EOF >> /etc/sysctl.conf 56 | # SWAP settings 57 | vm.swappiness=0 58 | vm.panic_on_oom=0 59 | vm.overcommit_memory=1 60 | kernel.panic=10 61 | kernel.panic_on_oops=1 62 | vm.max_map_count = 262144 63 | net.ipv4.ip_local_port_range=1024 65000 64 | net.core.somaxconn=10000 65 | net.ipv4.tcp_tw_reuse=1 66 | net.ipv4.tcp_fin_timeout=15 67 | net.core.somaxconn=4096 68 | net.core.netdev_max_backlog=4096 69 | net.core.rmem_max=536870912 70 | net.core.wmem_max=536870912 71 | net.ipv4.tcp_max_syn_backlog=20480 72 | net.ipv4.tcp_max_tw_buckets=400000 73 | net.ipv4.tcp_no_metrics_save=1 74 | net.ipv4.tcp_rmem=4096 87380 268435456 75 | net.ipv4.tcp_wmem=4096 87380 268435456 76 | net.ipv4.tcp_syn_retries=2 77 | net.ipv4.tcp_synack_retries=2 78 | net.ipv4.neigh.default.gc_thresh1=8096 79 | net.ipv4.neigh.default.gc_thresh2=12288 80 | net.ipv4.neigh.default.gc_thresh3=16384 81 | net.ipv4.tcp_keepalive_time=600 82 | net.ipv4.ip_forward=1 83 | fs.inotify.max_user_instances=8192 84 | fs.inotify.max_user_watches=1048576 85 | net.ipv6.conf.all.disable_ipv6 = 1 86 | net.ipv6.conf.default.disable_ipv6 = 1 87 | EOF 88 | sysctl -p' > /dev/null 2>&1 89 | info_ok 90 | } 91 | 92 | ################################ portworx ############################## 93 | function portworx () { 94 | 95 | # from https://gist.github.com/clemenko/00dcbb344476aafda18dbae207952d71 96 | 97 | # add volumes 98 | echo -e -n " - px - checking volumes" 99 | if [ "$(doctl compute volume list --no-header | wc -l | xargs )" != 0 ]; then echo -e -n " "$GREEN"- detected -"; 100 | 101 | else 102 | echo -e -n " - adding" 103 | for num in 1 2 3; do 104 | doctl compute volume-action attach $(doctl compute volume create port$num --region nyc1 --size 60GiB | grep -v ID| awk '{print $1}') $(doctl compute droplet list | grep rke$num | awk '{print $1}') > /dev/null 2>&1 105 | done 106 | fi 107 | 108 | info_ok 109 | 110 | echo -e -n " - px - adding operator and storagecluster - "$RED"can take about 15 min"$NO_COLOR"" 111 | # operator 112 | echo -e -n " ." 113 | kubectl apply -f 'https://install.portworx.com/3.3?comp=pxoperator&kbver=1.31.0&ns=portworx' > /dev/null 2>&1 114 | sleep 15 115 | echo -e -n " ." 116 | kubectl wait --for condition=containersready -n portworx pod --all > /dev/null 2>&1 117 | 118 | # StorageCluster spec 119 | echo -e -n " ." 120 | kubectl apply -f 'https://install.portworx.com/3.3?operator=true&mc=false&kbver=1.31.0&ns=portworx&b=true&iop=6&c=px-cluster1&stork=true&csi=true&mon=true&tel=false&st=k8s&promop=true' > /dev/null 2>&1 121 | sleep 60 122 | echo -e -n " ." 123 | kubectl wait --for condition=Ready -n portworx pod --all --timeout=60000s > /dev/null 2>&1 124 | 125 | # make a default storage class 126 | echo -e -n " ." 127 | until [ $(kubectl get sc | grep px-csi | wc -l | xargs) = 8 ]; do sleep 5; done 128 | kubectl patch storageclass px-csi-db -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' > /dev/null 2>&1 129 | 130 | info_ok 131 | 132 | echo -e -n " - px - adding central" 133 | 134 | helm upgrade -i px-central px-central --repo http://charts.portworx.io/ -n px-central --create-namespace --set persistentStorage.enabled=true,persistentStorage.storageClassName="px-csi-db",service.pxCentralUIServiceType="ClusterIP",pxbackup.enabled=true,pxmonitor.enabled=false,installCRDs=true > /dev/null 2>&1 135 | 136 | until [ $(kubectl get pod -n px-central | wc -l | xargs ) -gt 16 ]; do sleep 5; echo -e -n "."; done 137 | 138 | cat < /dev/null 2>&1 139 | apiVersion: networking.k8s.io/v1 140 | kind: Ingress 141 | metadata: 142 | name: px-central-ui 143 | namespace: px-central 144 | spec: 145 | rules: 146 | - host: central.$domain 147 | http: 148 | paths: 149 | - backend: 150 | service: 151 | name: px-central-ui 152 | port: 153 | number: 80 154 | path: / 155 | pathType: ImplementationSpecific 156 | EOF 157 | 158 | info_ok 159 | 160 | echo -e -n " - px - adding grafana" 161 | 162 | export PX_URL="https://docs.portworx.com/samples/portworx-enterprise/k8s/pxc" 163 | 164 | # create config maps 165 | kubectl create configmap -n portworx grafana-dashboard-config --from-literal=grafana-dashboard-config.yaml="$(curl -sk $PX_URL/grafana-dashboard-config.yaml)" > /dev/null 2>&1 166 | kubectl create configmap -n portworx grafana-source-config --from-literal=grafana-dashboard-config.yaml="$(curl -sk $PX_URL/grafana-datasource.yaml)" > /dev/null 2>&1 167 | 168 | # dashboards 169 | kubectl -n portworx create configmap grafana-dashboards \ 170 | --from-literal=portworx-cluster-dashboard.json="$(curl -sk $PX_URL/portworx-cluster-dashboard.json)" \ 171 | --from-literal=portworx-performance-dashboard.json="$(curl -sk $PX_URL/portworx-performance-dashboard.json)" \ 172 | --from-literal=portworx-node-dashboard.json="$(curl -sk $PX_URL/portworx-node-dashboard.json)" \ 173 | --from-literal=portworx-volume-dashboard.json="$(curl -sk $PX_URL/portworx-volume-dashboard.json)" \ 174 | --from-literal=portworx-etcd-dashboard.json="$(curl -sk $PX_URL/portworx-etcd-dashboard.json)" > /dev/null 2>&1 175 | 176 | # install with ingress 177 | cat << EOF | kubectl apply -n portworx -f - > /dev/null 2>&1 178 | apiVersion: v1 179 | kind: Service 180 | metadata: 181 | name: grafana 182 | labels: 183 | app: grafana 184 | spec: 185 | type: ClusterIP 186 | ports: 187 | - port: 3000 188 | selector: 189 | app: grafana 190 | --- 191 | apiVersion: apps/v1 192 | kind: Deployment 193 | metadata: 194 | name: grafana 195 | labels: 196 | app: grafana 197 | spec: 198 | replicas: 1 199 | selector: 200 | matchLabels: 201 | app: grafana 202 | template: 203 | metadata: 204 | labels: 205 | app: grafana 206 | spec: 207 | containers: 208 | - image: grafana/grafana 209 | name: grafana 210 | imagePullPolicy: IfNotPresent 211 | volumeMounts: 212 | - name: grafana-dash-config 213 | mountPath: /etc/grafana/provisioning/dashboards 214 | - name: dashboard-templates 215 | mountPath: /var/lib/grafana/dashboards 216 | - name: grafana-source-config 217 | mountPath: /etc/grafana/provisioning/datasources 218 | volumes: 219 | - name: grafana-source-config 220 | configMap: 221 | name: grafana-source-config 222 | - name: grafana-dash-config 223 | configMap: 224 | name: grafana-dashboard-config 225 | - name: dashboard-templates 226 | configMap: 227 | name: grafana-dashboards 228 | --- 229 | apiVersion: networking.k8s.io/v1 230 | kind: Ingress 231 | metadata: 232 | name: grafana 233 | spec: 234 | rules: 235 | - host: grafana.$domain 236 | http: 237 | paths: 238 | - backend: 239 | service: 240 | name: grafana 241 | port: 242 | number: 3000 243 | path: / 244 | pathType: ImplementationSpecific 245 | EOF 246 | 247 | info_ok 248 | 249 | 250 | echo -e -n " - px - webservices up" 251 | until [[ "$(curl -skL -H "Content-Type: application/json" -o /dev/null -w '%{http_code}' https://central.$domain )" == "200" ]]; do echo -e -n .; sleep 5; done 252 | 253 | until [[ "$(curl -skL -H "Content-Type: application/json" -o /dev/null -w '%{http_code}' https://grafana.$domain )" == "200" ]]; do echo -e -n .; sleep 5; done 254 | 255 | echo -e "" 256 | 257 | info "navigate to - "$BLUE"https://central.$domain "$GREEN"admin / admin"$NO_COLOR"" 258 | info "navigate to - "$BLUE"https://grafana.$domain "$GREEN"admin / admin"$NO_COLOR"" 259 | 260 | } 261 | 262 | ################################ rancher ############################## 263 | function rancher () { 264 | 265 | if [[ -z $(dolist | awk '{printf $3","}' | sed 's/,$//') ]] && ! kubectl get node > /dev/null 2>&1 ; then 266 | echo -e "$BLUE" "Building cluster first." "$NO_COLOR" 267 | up && longhorn 268 | fi 269 | 270 | echo -e "$BLUE" "deploying rancher" "$NO_COLOR" 271 | 272 | echo -e -n " - helm - cert-manager" 273 | helm upgrade -i cert-manager cert-manager --repo https://charts.jetstack.io -n cert-manager --create-namespace --set crds.enabled=true > /dev/null 2>&1 274 | 275 | info_ok 276 | 277 | echo -e -n " - helm - rancher" 278 | 279 | # custom TLS certs 280 | kubectl create ns cattle-system > /dev/null 2>&1 281 | kubectl -n cattle-system create secret tls tls-rancher-ingress --cert=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.cert --key=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.key > /dev/null 2>&1 282 | kubectl -n cattle-system create secret generic tls-ca --from-file=/Users/clemenko/Dropbox/work/rfed.me/io/cacerts.pem > /dev/null 2>&1 283 | 284 | helm upgrade -i rancher rancher --repo https://releases.rancher.com/server-charts/latest -n cattle-system --create-namespace --set hostname=rancher.$domain --set bootstrapPassword=bootStrapAllTheThings --set replicas=1 --set auditLog.level=2 --set auditLog.destination=hostPath --set auditLog.hostPath=/var/log/rancher/audit --set auditLog.maxAge=30 --set antiAffinity=required --set antiAffinity=required --set ingress.tls.source=secret --set ingress.tls.secretName=tls-rancher-ingress --set privateCA=true --set 'extraEnv[0].name=CATTLE_FEATURES' --set 'extraEnv[0].value=ui-sql-cache=true' > /dev/null 2>&1 285 | 286 | info_ok 287 | 288 | # wait for rancher 289 | echo -e -n " - waiting for rancher" 290 | until [ $(curl -sk https://rancher.$domain/v3-public/authproviders | grep local | wc -l ) = 1 ]; do 291 | sleep 2; echo -e -n "."; done 292 | 293 | info_ok 294 | 295 | echo -e -n " - bootstrapping" 296 | cat < /dev/null 2>&1 297 | apiVersion: management.cattle.io/v3 298 | kind: Setting 299 | metadata: 300 | name: password-min-length 301 | namespace: cattle-system 302 | value: "8" 303 | EOF 304 | 305 | #set password 306 | token=$(curl -sk -X POST https://rancher.$domain/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"bootStrapAllTheThings"}' | jq -r .token) 307 | 308 | curl -sk https://rancher.$domain/v3/users?action=changepassword -H 'content-type: application/json' -H "Authorization: Bearer $token" -d '{"currentPassword":"bootStrapAllTheThings","newPassword":"'$password'"}' > /dev/null 2>&1 309 | 310 | api_token=$(curl -sk https://rancher.$domain/v3/token -H 'content-type: application/json' -H "Authorization: Bearer $token" -d '{"type":"token","description":"automation"}' | jq -r .token) 311 | 312 | curl -sk https://rancher.$domain/v3/settings/server-url -H 'content-type: application/json' -H "Authorization: Bearer $api_token" -X PUT -d '{"name":"server-url","value":"https://rancher.'$domain'"}' > /dev/null 2>&1 313 | 314 | curl -sk https://rancher.$domain/v3/settings/telemetry-opt -X PUT -H 'content-type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $api_token" -d '{"value":"out"}' > /dev/null 2>&1 315 | 316 | info_ok 317 | 318 | # class banners 319 | cat < /dev/null 2>&1 320 | apiVersion: management.cattle.io/v3 321 | kind: Setting 322 | metadata: 323 | name: ui-banners 324 | value: '{"bannerHeader":{"background":"#007a33","color":"#ffffff","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"UNCLASSIFIED//FOUO"},"bannerFooter":{"background":"#007a33","color":"#ffffff","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"UNCLASSIFIED//FOUO"},"bannerConsent":{"background":"#ffffff","color":"#000000","textAlignment":"left","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":false,"text":"$govmessage","button":"Accept"},"showHeader":"true","showFooter":"true","showConsent":"true"}' 325 | EOF 326 | 327 | } 328 | 329 | ################################ longhorn ############################## 330 | function longhorn () { 331 | echo -e -n " - longhorn" 332 | 333 | # to http basic auth --> https://longhorn.io/docs/1.4.1/deploy/accessing-the-ui/longhorn-ingress/ 334 | 335 | helm upgrade -i longhorn longhorn --repo https://charts.longhorn.io -n longhorn-system --create-namespace --set ingress.enabled=true,ingress.host=longhorn.$domain > /dev/null 2>&1 336 | 337 | #,defaultSettings.allowCollectingLonghornUsageMetrics=false,persistence.defaultDataLocality="best-effort" --set persistence.dataEngine=v2 --set defaultSettings.v2DataEngine=true --set defaultSettings.v1DataEngine=false > /dev/null 2>&1 338 | 339 | sleep 5 340 | 341 | #wait for longhorn to initiaize 342 | until [ $(kubectl get pod -n longhorn-system | grep -v 'Running\|NAME' | wc -l) = 0 ] && [ "$(kubectl get pod -n longhorn-system | wc -l)" -gt 19 ] ; do echo -e -n "." ; sleep 2; done 343 | # testing out ` kubectl wait --for condition=containersready -n longhorn-system pod --all` 344 | 345 | if [ "$prefix" = k3s ]; then kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' > /dev/null 2>&1; fi 346 | 347 | # add encryption per volume storage class 348 | kubectl apply -f https://raw.githubusercontent.com/clemenko/k8s_yaml/master/longhorn_encryption.yml > /dev/null 2>&1 349 | 350 | info_ok 351 | } 352 | 353 | ############################# fleet ################################ 354 | function fleet () { 355 | echo -e -n " fleet-ing" 356 | # for downstream clusters 357 | # kubectl create secret -n cattle-global-data generic awscred --from-literal=amazonec2credentialConfig-defaultRegion=us-east-1 --from-literal=amazonec2credentialConfig-accessKey=${AWS_ACCESS_KEY} --from-literal=amazonec2credentialConfig-secretKey=${AWS_SECRET_KEY} > /dev/null 2>&1 358 | 359 | kubectl create secret -n cattle-global-data generic docreds --from-literal=digitaloceancredentialConfig-accessToken=${DO_TOKEN} > /dev/null 2>&1 360 | 361 | kubectl apply -f https://raw.githubusercontent.com/clemenko/fleet/main/gitrepo.yml > /dev/null 2>&1 362 | 363 | info_ok 364 | } 365 | 366 | ############################# demo ################################ 367 | function demo () { 368 | echo -e " demo-ing" 369 | 370 | echo -e -n " - whoami ";kubectl apply -f https://raw.githubusercontent.com/clemenko/k8s_yaml/master/whoami.yml > /dev/null 2>&1; info_ok 371 | 372 | echo -e -n " - flask ";kubectl apply -f https://raw.githubusercontent.com/clemenko/k8s_yaml/master/flask_simple_nginx.yml > /dev/null 2>&1; info_ok 373 | 374 | echo -e -n " - minio" 375 | helm upgrade -i minio minio --repo https://charts.min.io -n minio --set rootUser=admin,rootPassword=$password --create-namespace --set mode=standalone --set resources.requests.memory=1Gi --set persistence.size=10Gi --set mode=standalone --set ingress.enabled=true --set ingress.hosts[0]=s3.$domain --set consoleIngress.enabled=true --set consoleIngress.hosts[0]=minio.$domain --set ingress.annotations."nginx\.ingress\.kubernetes\.io/proxy-body-size"="1024m" --set consoleIngress.annotations."nginx\.ingress\.kubernetes\.io/proxy-body-size"="1024m" --set image.repository=cgr.dev/chainguard/minio --set image.tag=latest > /dev/null 2>&1 376 | info_ok 377 | 378 | # echo -e -n " - harbor" 379 | # helm upgrade -i harbor harbor --repo https://helm.goharbor.io -n harbor --create-namespace --set expose.tls.certSource=secret --set expose.tls.secret.secretName=tls-ingress --set expose.tls.enabled=false --set expose.tls.auto.commonName=harbor.$domain --set expose.ingress.hosts.core=harbor.$domain --set persistence.enabled=false --set harborAdminPassword=$password --set externalURL=http://harbor.$domain --set notary.enabled=false > /dev/null 2>&1; 380 | # info_ok 381 | 382 | echo -e -n " - gitea" 383 | helm upgrade -i gitea oci://registry-1.docker.io/giteacharts/gitea -n gitea --create-namespace --set gitea.admin.password=$password --set gitea.admin.username=gitea --set persistence.size=500Mi --set ingress.enabled=true --set ingress.hosts[0].host=git.$domain --set ingress.hosts[0].paths[0].path=/ --set ingress.hosts[0].paths[0].pathType=Prefix --set gitea.config.server.DOMAIN=git.$domain --set postgresql-ha.enabled=false --set valkey-cluster.enabled=false --set gitea.config.database.DB_TYPE=sqlite3 --set gitea.config.session.PROVIDER=memory --set gitea.config.cache.ADAPTER=memory --set gitea.config.queue.TYPE=level > /dev/null 2>&1 384 | 385 | # mirror github 386 | until [ $(curl -s http://git.$domain/explore/repos| grep "" | wc -l) = 1 ]; do sleep 2; echo -n "."; done 387 | 388 | sleep 5 389 | 390 | curl -X POST http://git.$domain/api/v1/repos/migrate -H 'accept: application/json' -H 'authorization: Basic Z2l0ZWE6UGEyMndvcmQ=' -H 'Content-Type: application/json' -d '{ "clone_addr": "https://github.com/clemenko/fleet", "repo_name": "fleet","repo_owner": "gitea"}' > /dev/null 2>&1 391 | info_ok 392 | 393 | # echo -e -n " - tailscale " 394 | # curl -s https://raw.githubusercontent.com/clemenko/k8s_yaml/master/tailscale.yaml | sed -e "s/XXX/$TAILSCALE_ID/g" -e "s/ZZZ/$TAILSCALE_TOKEN/g" | kubectl apply -f - > /dev/null 2>&1 395 | # info_ok 396 | } 397 | 398 | ################################ keycloak ############################## 399 | function keycloak () { 400 | 401 | KEY_URL=keycloak.$domain 402 | RANCHER_URL=rancher.$domain 403 | 404 | echo -e " keycloaking" 405 | echo -e -n " - deploying" 406 | 407 | kubectl create ns keycloak > /dev/null 2>&1 408 | kubectl -n keycloak create secret tls tls-ingress --cert=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.cert --key=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.key > /dev/null 2>&1 409 | # kubectl -n keycloak create secret generic tls-ca --from-file=/Users/clemenko/Dropbox/work/rfed.me/io/cacerts.pem > /dev/null 2>&1 410 | 411 | curl -s https://raw.githubusercontent.com/clemenko/k8s_yaml/master/keycloak.yml | sed "s/rfed.xx/$domain/g" | kubectl apply -f - > /dev/null 2>&1 412 | 413 | info_ok 414 | 415 | echo -e -n " - waiting for keycloak" 416 | 417 | until [ $(curl -sk https://$KEY_URL/ | grep "Administration Console" | wc -l) = 1 ]; do echo -e -n "." ; sleep 2; done 418 | info_ok 419 | 420 | echo -e -n " - adding realm and client" 421 | 422 | # get auth token - notice keycloak's password 423 | export key_token=$(curl -sk -X POST https://$KEY_URL/realms/master/protocol/openid-connect/token -d 'client_id=admin-cli&username=admin&password='$password'&credentialId=&grant_type=password' | jq -r .access_token) 424 | 425 | # add realm 426 | curl -sk -X POST https://$KEY_URL/admin/realms -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"enabled":true,"id":"rancher","realm":"rancher"}' 427 | 428 | # add client 429 | curl -sk -X POST https://$KEY_URL/admin/realms/rancher/clients -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"enabled":true,"attributes":{},"redirectUris":[],"clientId":"rancher","protocol":"openid-connect","publicClient": false,"redirectUris":["https://'$RANCHER_URL'/verify-auth"]}' 430 | 431 | # get client id 432 | export client_id=$(curl -sk https://$KEY_URL/admin/realms/rancher/clients/ -H "authorization: Bearer $key_token" | jq -r '.[] | select(.clientId=="rancher") | .id') 433 | 434 | # get client_secret 435 | export client_secret=$(curl -sk https://$KEY_URL/admin/realms/rancher/clients/$client_id/client-secret -H "authorization: Bearer $key_token" | jq -r .value) 436 | 437 | # add mappers 438 | curl -sk -X POST https://$KEY_URL/admin/realms/rancher/clients/$client_id/protocol-mappers/models -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"protocol":"openid-connect","config":{"full.path":"true","id.token.claim":"false","access.token.claim":"false","userinfo.token.claim":"true","claim.name":"groups"},"name":"Groups Mapper","protocolMapper":"oidc-group-membership-mapper"}' 439 | 440 | curl -sk -X POST https://$KEY_URL/admin/realms/rancher/clients/$client_id/protocol-mappers/models -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"protocol":"openid-connect","config":{"id.token.claim":"false","access.token.claim":"true","included.client.audience":"rancher"},"name":"Client Audience","protocolMapper":"oidc-audience-mapper"}' 441 | 442 | curl -sk -X POST https://$KEY_URL/admin/realms/rancher/clients/$client_id/protocol-mappers/models -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"protocol":"openid-connect","config":{"full.path":"true","id.token.claim":"true","access.token.claim":"true","userinfo.token.claim":"true","claim.name":"full_group_path"},"name":"Group Path","protocolMapper":"oidc-group-membership-mapper"}' 443 | 444 | # add realm-managementview-users 445 | # get role id 446 | # role_ID=$(curl -sk -X GET https://$KEY_URL/admin/realms/rancher/roles -H "authorization: Bearer $key_token" | jq -r '.[] | select(.name=="default-roles-rancher") | .id') 447 | 448 | # curl -sk https://$KEY_URL/admin/realms/rancher/roles-by-id/$role_ID/composites -H "authorization: Bearer $key_token" -d '[{"id":"d8ef39c5-c8b6-4bcc-8010-7244b7e5cf4a","name":"view-users","description":"${role_view-users}"}]' 449 | 450 | # add groups admin / dev 451 | curl -k https://$KEY_URL/admin/realms/rancher/groups -H 'Content-Type: application/json' -H "authorization: Bearer $key_token" -d '{"name":"devs"}' 452 | 453 | curl -k https://$KEY_URL/admin/realms/rancher/groups -H 'Content-Type: application/json' -H "authorization: Bearer $key_token" -d '{"name":"admins"}' 454 | 455 | 456 | # add keycloak user clemenko / Pa22word 457 | curl -k 'https://'$KEY_URL'/admin/realms/rancher/users' -H 'Content-Type: application/json' -H "authorization: Bearer $key_token" -d '{"enabled":true,"attributes":{},"groups":["/devs"],"credentials":[{"type":"password","value":"'$password'","temporary":false}],"username":"clemenko","emailVerified":"","firstName":"Andy","lastName":"Clemenko"}' 458 | 459 | # add keycloak user admin / Pa22word 460 | curl -k 'https://'$KEY_URL'/admin/realms/rancher/users' -H 'Content-Type: application/json' -H "authorization: Bearer $key_token" -d '{"enabled":true,"attributes":{},"groups":["/admins", "/devs"],"credentials":[{"type":"password","value":"'$password'","temporary":false}],"username":"admin","emailVerified":"","firstName":"Admin","lastName":"Clemenko"}' 461 | 462 | info_ok 463 | 464 | echo -e -n " - configuring rancher" 465 | # configure rancher 466 | token=$(curl -sk -X POST https://$RANCHER_URL/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"'$password'"}' | jq -r .token) 467 | 468 | api_token=$(curl -sk https://$RANCHER_URL/v3/token -H 'content-type: application/json' -H "Authorization: Bearer $token" -d '{"type":"token","description":"automation"}' | jq -r .token) 469 | 470 | curl -sk -X PUT https://$RANCHER_URL/v3/keyCloakOIDCConfigs/keycloakoidc?action=testAndEnable -H 'accept: application/json' -H 'accept-language: en-US,en;q=0.9' -H 'content-type: application/json;charset=UTF-8' -H 'content-type: application/json' -H "Authorization: Bearer $api_token" -X PUT -d '{"enabled":true,"id":"keycloakoidc","name":"keycloakoidc","type":"keyCloakOIDCConfig","accessMode":"unrestricted","rancherUrl":"https://rancher.'$domain'/verify-auth","scope":"openid profile email","clientId":"rancher","clientSecret":"'$client_secret'","issuer":"https://keycloak.'$domain'/realms/rancher","authEndpoint":"https://'$KEY_URL'/realms/rancher/protocol/openid-connect/auth/"}' > /dev/null 2>&1 471 | 472 | # login with keycloak user - manual 473 | 474 | info_ok 475 | 476 | } 477 | 478 | # PSA notes 479 | # kubectl label ns spark pod-security.kubernetes.io/audit=privileged pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=privileged --------------------------------------------------------------------------------