├── README.md ├── classifications.sh ├── deprecated_harv.sh ├── functions.sh ├── harvester_rancher.sh ├── rke2.sh └── stigs_tldr.md /README.md: -------------------------------------------------------------------------------- 1 | # My Full Demo Stack 2 | 3 | The script is meant to simplify the building of a demo stack for play and profit. With both you will get : 4 | 5 | * [DigitalOcean](https://digitalocean.com) - VMs 6 | * [Rocky Linux](https://rockylinux.org/) 7 | * [RKE2](https://docs.rke2.io/) - RKE2 Kube / [K3s](http://k3s.io) - K3s Kube 8 | * [Rancher](https://rancher.com/products/rancher) - Rancher Cluster Manager 9 | * [Longhorn](https://longhorn.io) - Stateful storage 10 | * [Minio](https://Minio.io) - S3 object store 11 | * [Gitea](https://gitea.io/en-us/) - Version Control 12 | * [KeyCloak](https://keycloak.org) - Authentication 13 | * [Harbor](https://goharbor.io) - Registry 14 | 15 | 16 | Please pay attention to the variables at the top of the scripts. 17 | 18 | Any questions please feel free to create an issue or email me at clemenko@gmail.com. 19 | -------------------------------------------------------------------------------- /classifications.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # clemenko@gmail.com 3 | #here is how to use the API to push a logon banner as well as header and footers for classification. 4 | # https://www.astrouxds.com/components/classification-markings/ 5 | 6 | class=$1 7 | 8 | if [ -z $class ]; then 9 | echo "$RED [warn]$NORMAL Please ensure you have kubeconfig and classification to the command." 10 | echo " $BLUE Use:$NORMAL $0 " 11 | echo " $BLUE Use:$NORMAL $0 TS " 12 | exit 13 | fi 14 | 15 | # check for kubctl 16 | command -v kubectl >/dev/null 2>&1 || { echo -e "$RED" " ** Kubectl was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 17 | 18 | # check for kubeconfig 19 | if [ $(kubectl get ns cattle-system --no-headers | wc -l) != "1" ]; then echo -e "$RED" " ** kubeconfig was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; fi 20 | 21 | #gov logon message 22 | export govmessage=$(cat < /dev/null 2>&1 31 | apiVersion: management.cattle.io/v3 32 | kind: Setting 33 | metadata: 34 | name: ui-banners 35 | value: '{"bannerHeader":{"background":"#007a33","color":"#ffffff","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"UNCLASSIFIED//FOUO"},"bannerFooter":{"background":"#007a33","color":"#ffffff","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"UNCLASSIFIED//FOUO"},"bannerConsent":{"background":"#ffffff","color":"#000000","textAlignment":"left","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":false,"text":"$govmessage","button":"Accept"},"showHeader":"true","showFooter":"true","showConsent":"true"}' 36 | EOF 37 | ;; 38 | 39 | TS | ts ) 40 | #top secret 41 | cat < /dev/null 2>&1 42 | apiVersion: management.cattle.io/v3 43 | kind: Setting 44 | metadata: 45 | name: ui-banners 46 | value: '{"bannerHeader":{"background":"#fce83a","color":"#000000","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"TOP SECRET//SCI"},"bannerFooter":{"background":"#fce83a","color":"#000000","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"TOP SECRET//SCI"},"bannerConsent":{"background":"#ffffff","color":"#000000","textAlignment":"left","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":false,"text":"$govmessage","button":"Accept"},"showHeader":"true","showFooter":"true","showConsent":"true"}' 47 | EOF 48 | ;; 49 | 50 | clear ) 51 | cat < /dev/null 2>&1 52 | apiVersion: management.cattle.io/v3 53 | kind: Setting 54 | metadata: 55 | name: ui-banners 56 | value: '{"bannerHeader":{"background":"#ffffff","color":"#000000","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":""},"bannerFooter":{"background":"#ffffff","color":"#000000","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":""},"bannerConsent":{"background":"#ffffff","color":"#000000","textAlignment":"left","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":false,"text":"","button":"Accept"},"showHeader":"false","showFooter":"false","showConsent":"false"}' 57 | EOF 58 | ;; 59 | 60 | *) echo "Usage: $0 {clear | TS | U }"; exit 1 61 | 62 | esac 63 | -------------------------------------------------------------------------------- /deprecated_harv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # this script assumes you have harvester setup 4 | # you need harvester, kubectl, uuid, jq, k3sup, pdsh and curl installed. 5 | # clemenko@gmail.com 6 | 7 | ################################### 8 | # edit varsw 9 | ################################### 10 | set -e 11 | num=6 12 | zone=nyc1 13 | size=s-4vcpu-8gb-amd 14 | key=30:98:4f:c5:47:c2:88:28:fe:3c:23:cd:52:49:51:01 15 | image=rockylinux-9-x64 16 | 17 | export password=Pa22word 18 | export domain=rfed.io 19 | 20 | template=rocky 21 | 22 | # rancher / k8s 23 | prefix=rke- # no rke k3s 24 | rke2_channel=v1.27 #latest 25 | export TOKEN=fuzzybunnyslippers 26 | 27 | # Carbide creds 28 | export CARBIDE=false # or true to enable carbide 29 | export CARBIDEUSER=andy-clemenko-read-token 30 | #export CARBIDEPASS= # set on the command line 31 | 32 | ###### NO MOAR EDITS ####### 33 | export RED='\x1b[0;31m' 34 | export GREEN='\x1b[32m' 35 | export BLUE='\x1b[34m' 36 | export YELLOW='\x1b[33m' 37 | export NO_COLOR='\x1b[0m' 38 | export PDSH_RCMD_TYPE=ssh 39 | 40 | #better error checking 41 | command -v curl >/dev/null 2>&1 || { echo -e "$RED" " ** Curl was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 42 | command -v jq >/dev/null 2>&1 || { echo -e "$RED" " ** Jq was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 43 | command -v pdsh >/dev/null 2>&1 || { echo -e "$RED" " ** Pdsh was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 44 | command -v kubectl >/dev/null 2>&1 || { echo -e "$RED" " ** Kubectl was not found. Please install. ** " "$NO_COLOR" >&2; exit 1; } 45 | 46 | #### doctl_list #### 47 | #function dolist () { harvester vm |grep -v NAME | grep Run | grep $prefix| awk '{print $1" "$2" "$6" "$4" "$5}'; } 48 | function dolist () { doctl compute droplet list --no-header|grep $prefix |sort -k 2; } 49 | 50 | source functions.sh 51 | 52 | ################################# up ################################ 53 | function up () { 54 | build_list="" 55 | # helm repo update > /dev/null 2>&1 56 | 57 | if [ ! -z $(dolist) ]; then 58 | echo -e "$RED" "Warning - cluster already detected..." "$NO_COLOR" 59 | exit 60 | fi 61 | 62 | #build VMS 63 | echo -e -n " building vms -$build_list " 64 | #harvester vm create --template $template --count $num rke > /dev/null 2>&1 65 | #until [ $(dolist | grep "192.168" | wc -l) = $num ]; do echo -e -n "." ; sleep 2; done 66 | #sleep 10 67 | for i in $(seq 1 $num); do build_list="$build_list $prefix$i"; done 68 | doctl compute droplet create $build_list --region $zone --image $image --size $size --ssh-keys $key --wait --droplet-agent=false > /dev/null 2>&1 69 | 70 | echo -e "$GREEN" "ok" "$NO_COLOR" 71 | 72 | #check for SSH 73 | echo -e -n " checking for ssh " 74 | for ext in $(dolist | awk '{print $3}'); do 75 | until [ $(ssh -o ConnectTimeout=1 root@$ext 'exit' 2>&1 | grep 'timed out\|refused' | wc -l) = 0 ]; do echo -e -n "." ; sleep 5; done 76 | done 77 | echo -e "$GREEN" "ok" "$NO_COLOR" 78 | 79 | #get ips 80 | host_list=$(dolist | awk '{printf $3","}' | sed 's/,$//') 81 | server=$(dolist | sed -n 1p | awk '{print $3}') 82 | worker_list=$(dolist | sed 1d | awk '{printf $3","}' | sed 's/,$//') 83 | 84 | # update node list 85 | node1=$(dolist | sed -n 1p | awk '{print $3}') 86 | node2=$(dolist | sed -n 2p | awk '{print $3}') 87 | node3=$(dolist | sed -n 3p | awk '{print $3}') 88 | worker_list=$(dolist | sed '1,3d' | awk '{printf $3","}' | sed -e 's/,$//') 89 | 90 | # update DNS 91 | echo -e -n " updating dns" 92 | doctl compute domain records create $domain --record-type A --record-name $prefix"1" --record-ttl 60 --record-data $node1 > /dev/null 2>&1 93 | doctl compute domain records create $domain --record-type A --record-name rke --record-ttl 60 --record-data $node1 > /dev/null 2>&1 94 | doctl compute domain records create $domain --record-type A --record-name rke --record-ttl 60 --record-data $node2 > /dev/null 2>&1 95 | doctl compute domain records create $domain --record-type A --record-name rke --record-ttl 60 --record-data $node3 > /dev/null 2>&1 96 | doctl compute domain records create $domain --record-type CNAME --record-name "*" --record-ttl 60 --record-data rke.$domain. > /dev/null 2>&1 97 | echo -e "$GREEN" "ok" "$NO_COLOR" 98 | 99 | sleep 10 100 | 101 | centos_packages 102 | 103 | #carbide_reg 104 | 105 | kernel 106 | 107 | #or deploy k3s 108 | if [ "$prefix" != rke- ]; then exit; fi 109 | 110 | echo -e -n " deploying rke2 " 111 | ssh root@$node1 'mkdir -p /etc/rancher/rke2/ /var/lib/rancher/rke2/server/manifests/; useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U; echo -e "apiVersion: audit.k8s.io/v1\nkind: Policy\nrules:\n- level: RequestResponse" > /etc/rancher/rke2/audit-policy.yaml; echo -e "#profile: cis-1.6\nselinux: true\ntoken: '$TOKEN'\nsecrets-encryption: true\ntls-san:\n- rke.'$domain'\nwrite-kubeconfig-mode: 0600\nuse-service-account-credentials: true\nkube-controller-manager-arg:\n- bind-address=127.0.0.1\n- use-service-account-credentials=true\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\nkube-apiserver-arg:\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- authorization-mode=RBAC,Node\n- anonymous-auth=false\n- audit-policy-file=/etc/rancher/rke2/audit-policy.yaml\n- audit-log-mode=blocking-strict\n- audit-log-maxage=30\nkubelet-arg:\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook\n- streaming-connection-idle-timeout=5m" > /etc/rancher/rke2/config.yaml ; echo -e "apiVersion: helm.cattle.io/v1\nkind: HelmChartConfig\nmetadata:\n name: rke2-ingress-nginx\n namespace: kube-system\nspec:\n valuesContent: |-\n controller:\n config:\n use-forwarded-headers: true\n extraArgs:\n enable-ssl-passthrough: true" > /var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml; curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$rke2_channel' sh - && systemctl enable --now rke2-server.service' > /dev/null 2>&1 112 | 113 | sleep 10 114 | 115 | pdsh -l root -w $node2,$node3 'mkdir -p /etc/rancher/rke2/ /var/lib/rancher/rke2/server/manifests/; useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U; echo -e "apiVersion: audit.k8s.io/v1\nkind: Policy\nrules:\n- level: RequestResponse" > /etc/rancher/rke2/audit-policy.yaml; echo -e "server: https://'$node1':9345\ntoken: '$TOKEN'\n#profile: cis-1.6\nselinux: true\nsecrets-encryption: true\ntls-san:\n- rke.'$domain'\nwrite-kubeconfig-mode: 0600\nkube-controller-manager-arg:\n- bind-address=127.0.0.1\n- use-service-account-credentials=true\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\nkube-apiserver-arg:\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n- authorization-mode=RBAC,Node\n- anonymous-auth=false\n- audit-policy-file=/etc/rancher/rke2/audit-policy.yaml\n- audit-log-mode=blocking-strict\n- audit-log-maxage=30\nkubelet-arg:\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook" > /etc/rancher/rke2/config.yaml ; echo -e "apiVersion: helm.cattle.io/v1\nkind: HelmChartConfig\nmetadata:\n name: rke2-ingress-nginx\n namespace: kube-system\nspec:\n valuesContent: |-\n controller:\n config:\n use-forwarded-headers: true\n extraArgs:\n enable-ssl-passthrough: true" > /var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml; curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$rke2_channel' sh - && systemctl enable --now rke2-server.service' > /dev/null 2>&1 116 | 117 | sleep 10 118 | 119 | pdsh -l root -w $worker_list 'yum install -y http://dl.rockylinux.org/pub/rocky/9.1/AppStream/x86_64/os/Packages/c/container-selinux-2.189.0-1.el9.noarch.rpm; curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$rke2_channel' INSTALL_RKE2_TYPE=agent sh - && mkdir -p /etc/rancher/rke2/ && echo -e "selinux: true\nserver: https://rke.'$domain':9345\ntoken: '$TOKEN'\nwrite-kubeconfig-mode: 0600\n#profile: cis-1.6\nkube-apiserver-arg:\n- authorization-mode=RBAC,Node\nkubelet-arg:\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook" > /etc/rancher/rke2/config.yaml && systemctl enable --now rke2-agent.service' > /dev/null 2>&1 120 | 121 | ssh root@$server cat /etc/rancher/rke2/rke2.yaml | sed -e "s/127.0.0.1/$server/g" > ~/.kube/config 122 | chmod 0600 ~/.kube/config 123 | 124 | echo -e "$GREEN" "ok" "$NO_COLOR" 125 | 126 | echo -e -n " - cluster active " 127 | sleep 5 128 | until [ $(kubectl get node|grep NotReady|wc -l) = 0 ]; do echo -e -n "."; sleep 2; done 129 | echo -e "$GREEN" "ok" "$NO_COLOR" 130 | } 131 | 132 | ############################## kill ################################ 133 | #remove the vms 134 | function kill () { 135 | 136 | if [ ! -z $(dolist | awk '{printf $3","}' | sed 's/,$//') ]; then 137 | echo -e -n " killing it all " 138 | harvester vm delete $(harvester vm |grep -v NAME | grep $prefix | awk '{printf $2" "}') > /dev/null 2>&1 139 | for i in $(dolist | awk '{print $3}'); do ssh-keygen -q -R $i > /dev/null 2>&1; done 140 | for i in $(doctl compute domain records list $domain|grep rke |awk '{print $1}'); do doctl compute domain records delete -f $domain $i; done 141 | until [ $(dolist | wc -l | sed 's/ //g') == 0 ]; do echo -e -n "."; sleep 2; done 142 | for i in $(doctl compute volume list --no-header |awk '{print $1}'); do doctl compute volume delete -f $i; done 143 | 144 | rm -rf *.txt *.log *.zip *.pub env.* certs backup.tar ~/.kube/config central* sensor* *token kubeconfig *TOKEN 145 | 146 | else 147 | echo -e -n " no cluster found " 148 | fi 149 | 150 | echo -e "$GREEN" "ok" "$NO_COLOR" 151 | } 152 | 153 | case "$1" in 154 | up) up;; 155 | tl) up && traefik && longhorn;; 156 | kill) kill;; 157 | rox) rox;; 158 | neu) neu;; 159 | dolist) dolist;; 160 | traefik) traefik;; 161 | keycloak) keycloak;; 162 | longhorn) longhorn;; 163 | rancher) rancher;; 164 | demo) demo;; 165 | fleet) fleet;; 166 | *) usage;; 167 | esac 168 | -------------------------------------------------------------------------------- /functions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # functions 4 | # color 5 | export RED='\x1b[0;31m' 6 | export GREEN='\x1b[32m' 7 | export BLUE='\x1b[34m' 8 | export YELLOW='\x1b[33m' 9 | export NO_COLOR='\x1b[0m' 10 | 11 | # set functions for debugging/logging 12 | function info { echo -e "$GREEN[info]$NO_COLOR $1" ; } 13 | function warn { echo -e "$YELLOW[warn]$NO_COLOR $1" ; } 14 | function fatal { echo -e "$RED[error]$NO_COLOR $1" ; exit 1 ; } 15 | function info_ok { echo -e "$GREEN" "ok" "$NO_COLOR" ; } 16 | 17 | #gov logon message 18 | export govmessage=$(cat < /etc/NetworkManager/conf.d/rke2-canal.conf; yum install -y nfs-utils cryptsetup iscsi-initiator-utils; systemctl enable --now iscsid; yum update openssh -y; #yum update -y' > /dev/null 2>&1 48 | echo -e "$GREEN" "ok" "$NO_COLOR" 49 | } 50 | 51 | ############################# kernel ################################ 52 | function kernel () { 53 | #kernel tuning 54 | echo -e -n " - updating kernel settings" 55 | pdsh -l root -w $host_list 'cat << EOF >> /etc/sysctl.conf 56 | # SWAP settings 57 | vm.swappiness=0 58 | vm.panic_on_oom=0 59 | vm.overcommit_memory=1 60 | kernel.panic=10 61 | kernel.panic_on_oops=1 62 | vm.max_map_count = 262144 63 | net.ipv4.ip_local_port_range=1024 65000 64 | net.core.somaxconn=10000 65 | net.ipv4.tcp_tw_reuse=1 66 | net.ipv4.tcp_fin_timeout=15 67 | net.core.somaxconn=4096 68 | net.core.netdev_max_backlog=4096 69 | net.core.rmem_max=536870912 70 | net.core.wmem_max=536870912 71 | net.ipv4.tcp_max_syn_backlog=20480 72 | net.ipv4.tcp_max_tw_buckets=400000 73 | net.ipv4.tcp_no_metrics_save=1 74 | net.ipv4.tcp_rmem=4096 87380 268435456 75 | net.ipv4.tcp_wmem=4096 87380 268435456 76 | net.ipv4.tcp_syn_retries=2 77 | net.ipv4.tcp_synack_retries=2 78 | net.ipv4.neigh.default.gc_thresh1=8096 79 | net.ipv4.neigh.default.gc_thresh2=12288 80 | net.ipv4.neigh.default.gc_thresh3=16384 81 | net.ipv4.tcp_keepalive_time=600 82 | net.ipv4.ip_forward=1 83 | fs.inotify.max_user_instances=8192 84 | fs.inotify.max_user_watches=1048576 85 | net.ipv6.conf.all.disable_ipv6 = 1 86 | net.ipv6.conf.default.disable_ipv6 = 1 87 | EOF 88 | sysctl -p' > /dev/null 2>&1 89 | echo -e "$GREEN" "ok" "$NO_COLOR" 90 | } 91 | 92 | ################################ rancher ############################## 93 | function rancher () { 94 | 95 | if [[ -z $(dolist | awk '{printf $3","}' | sed 's/,$//') ]] && ! kubectl get node > /dev/null 2>&1 ; then 96 | echo -e "$BLUE" "Building cluster first." "$NO_COLOR" 97 | up && longhorn 98 | fi 99 | 100 | echo -e "$BLUE" "deploying rancher" "$NO_COLOR" 101 | #helm repo add rancher-latest https://releases.rancher.com/server-charts/latest --force-update > /dev/null 2>&1 102 | #helm repo add jetstack https://charts.jetstack.io --force-update > /dev/null 2>&1 103 | 104 | echo -e -n " - helm - cert-manager " 105 | helm upgrade -i cert-manager jetstack/cert-manager -n cert-manager --create-namespace --set crds.enabled=true > /dev/null 2>&1 106 | 107 | echo -e "$GREEN" "ok" "$NO_COLOR" 108 | 109 | echo -e -n " - helm - rancher " 110 | 111 | # custom TLS certs 112 | kubectl create ns cattle-system > /dev/null 2>&1 113 | # kubectl -n cattle-system create secret tls tls-rancher-ingress --cert=tls.crt --key=tls.key 114 | kubectl -n cattle-system create secret tls tls-rancher-ingress --cert=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.cert --key=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.key > /dev/null 2>&1 115 | # kubectl -n cattle-system create secret generic tls-ca --from-file=cacerts.pem 116 | kubectl -n cattle-system create secret generic tls-ca --from-file=/Users/clemenko/Dropbox/work/rfed.me/io/cacerts.pem > /dev/null 2>&1 117 | # kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=cacerts.pem 118 | 119 | helm upgrade -i rancher rancher-latest/rancher -n cattle-system --create-namespace --set hostname=rancher.$domain --set bootstrapPassword=bootStrapAllTheThings --set replicas=1 --set auditLog.level=2 --set auditLog.destination=hostPath --set auditLog.hostPath=/var/log/rancher/audit --set auditLog.maxAge=30 --set antiAffinity=required --set antiAffinity=required --set ingress.tls.source=secret --set ingress.tls.secretName=tls-rancher-ingress --set privateCA=true --set 'extraEnv[0].name=CATTLE_FEATURES' --set 'extraEnv[0].value=ui-sql-cache=true' > /dev/null 2>&1 120 | 121 | echo -e "$GREEN" "ok" "$NO_COLOR" 122 | 123 | # wait for rancher 124 | echo -e -n " - waiting for rancher " 125 | until [ $(curl -sk curl -sk https://rancher.$domain/v3-public/authproviders | grep local | wc -l ) = 1 ]; do 126 | sleep 2 127 | echo -e -n "." 128 | done 129 | echo -e "$GREEN" "ok" "$NO_COLOR" 130 | 131 | echo -e -n " - bootstrapping " 132 | cat < /dev/null 2>&1 133 | apiVersion: management.cattle.io/v3 134 | kind: Setting 135 | metadata: 136 | name: password-min-length 137 | namespace: cattle-system 138 | value: "8" 139 | EOF 140 | 141 | #set password 142 | token=$(curl -sk -X POST https://rancher.$domain/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"bootStrapAllTheThings"}' | jq -r .token) 143 | 144 | curl -sk https://rancher.$domain/v3/users?action=changepassword -H 'content-type: application/json' -H "Authorization: Bearer $token" -d '{"currentPassword":"bootStrapAllTheThings","newPassword":"'$password'"}' > /dev/null 2>&1 145 | 146 | api_token=$(curl -sk https://rancher.$domain/v3/token -H 'content-type: application/json' -H "Authorization: Bearer $token" -d '{"type":"token","description":"automation"}' | jq -r .token) 147 | 148 | curl -sk https://rancher.$domain/v3/settings/server-url -H 'content-type: application/json' -H "Authorization: Bearer $api_token" -X PUT -d '{"name":"server-url","value":"https://rancher.'$domain'"}' > /dev/null 2>&1 149 | 150 | curl -sk https://rancher.$domain/v3/settings/telemetry-opt -X PUT -H 'content-type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $api_token" -d '{"value":"out"}' > /dev/null 2>&1 151 | echo -e "$GREEN" "ok" "$NO_COLOR" 152 | 153 | # class banners 154 | cat < /dev/null 2>&1 155 | apiVersion: management.cattle.io/v3 156 | kind: Setting 157 | metadata: 158 | name: ui-banners 159 | value: '{"bannerHeader":{"background":"#007a33","color":"#ffffff","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"UNCLASSIFIED//FOUO"},"bannerFooter":{"background":"#007a33","color":"#ffffff","textAlignment":"center","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":null,"text":"UNCLASSIFIED//FOUO"},"bannerConsent":{"background":"#ffffff","color":"#000000","textAlignment":"left","fontWeight":null,"fontStyle":null,"fontSize":"14px","textDecoration":false,"text":"$govmessage","button":"Accept"},"showHeader":"true","showFooter":"true","showConsent":"true"}' 160 | EOF 161 | 162 | } 163 | 164 | ################################ longhorn ############################## 165 | function longhorn () { 166 | echo -e -n " - longhorn " 167 | # helm repo add longhorn https://charts.longhorn.io --force-update 168 | 169 | # to http basic auth --> https://longhorn.io/docs/1.4.1/deploy/accessing-the-ui/longhorn-ingress/ 170 | 171 | helm upgrade -i longhorn longhorn/longhorn -n longhorn-system --create-namespace --set ingress.enabled=true --set ingress.host=longhorn.$domain --set defaultSettings.storageMinimalAvailablePercentage=25 --set defaultSettings.storageOverProvisioningPercentage=200 --set defaultSettings.allowCollectingLonghornUsageMetrics=false --set persistence.defaultDataLocality="best-effort" > /dev/null 2>&1 #--set defaultSettings.v2DataEngine=true #--set defaultSettings.v1DataEngine=false 172 | 173 | sleep 5 174 | 175 | #wait for longhorn to initiaize 176 | until [ $(kubectl get pod -n longhorn-system | grep -v 'Running\|NAME' | wc -l) = 0 ] && [ "$(kubectl get pod -n longhorn-system | wc -l)" -gt 19 ] ; do echo -e -n "." ; sleep 2; done 177 | # testing out ` kubectl wait --for condition=containersready -n longhorn-system pod --all` 178 | 179 | kubectl patch storageclass longhorn -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' > /dev/null 2>&1 180 | if [ "$prefix" = k3s ]; then kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' > /dev/null 2>&1; fi 181 | 182 | # add encryption per volume storage class 183 | kubectl apply -f https://raw.githubusercontent.com/clemenko/k8s_yaml/master/longhorn_encryption.yml > /dev/null 2>&1 184 | 185 | echo -e "$GREEN" "ok" "$NO_COLOR" 186 | } 187 | 188 | ################################ neu ############################## 189 | function neu () { 190 | echo -e -n " - neuvector " 191 | 192 | # helm repo add neuvector https://neuvector.github.io/neuvector-helm/ --force-update 193 | 194 | # custom TLS certs 195 | kubectl create ns neuvector > /dev/null 2>&1 196 | kubectl -n neuvector create secret tls tls-ingress --cert=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.cert --key=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.key > /dev/null 2>&1 197 | 198 | kubectl -n neuvector create secret tls tls-fed-ingress --cert=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.cert --key=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.key > /dev/null 2>&1 199 | 200 | cat < /dev/null 2>&1 201 | apiVersion: v1 202 | kind: ConfigMap 203 | metadata: 204 | name: neuvector-init 205 | namespace: neuvector 206 | data: 207 | sysinitcfg.yaml: | 208 | always_reload: true 209 | Cluster_Name: neuvector.$domain 210 | No_Telemetry_Report: true 211 | Scan_Config: 212 | Auto_Scan: true 213 | Scanner_Autoscale: 214 | Min_Pods: 1 215 | Max_Pods: 3 216 | userinitcfg.yaml: | 217 | always_reload: true 218 | users: 219 | - Fullname: admin 220 | Password: $password 221 | Role: admin 222 | Timeout: 3600 223 | EOF 224 | 225 | export govmessage_html="PGI+WW91IGFyZSBhY2Nlc3NpbmcgYSBVLlMuIEdvdmVybm1lbnQgKFVTRykgSW5mb3JtYXRpb24gU3lzdGVtIChJUykgdGhhdCBpcyBwcm92aWRlZCBmb3IgVVNHLWF1dGhvcml6ZWQgdXNlIG9ubHkuPC9iPjxicj4KPGJyPgpCeSB1c2luZyB0aGlzIElTICh3aGljaCBpbmNsdWRlcyBhbnkgZGV2aWNlIGF0dGFjaGVkIHRvIHRoaXMgSVMpLCB5b3UgY29uc2VudCB0byB0aGUgZm9sbG93aW5nIGNvbmRpdGlvbnM6PGJyPgo8YnI+Ci1UaGUgVVNHIHJvdXRpbmVseSBpbnRlcmNlcHRzIGFuZCBtb25pdG9ycyBjb21tdW5pY2F0aW9ucyBvbiB0aGlzIElTIGZvciBwdXJwb3NlcyBpbmNsdWRpbmcsIGJ1dCBub3QgbGltaXRlZCB0bywgcGVuZXRyYXRpb24gdGVzdGluZywgQ09NU0VDIG1vbml0b3JpbmcsIG5ldHdvcmsgb3BlcmF0aW9ucyBhbmQgZGVmZW5zZSwgcGVyc29ubmVsIG1pc2NvbmR1Y3QgKFBNKSwgbGF3IGVuZm9yY2VtZW50IChMRSksIGFuZCBjb3VudGVyaW50ZWxsaWdlbmNlIChDSSkgaW52ZXN0aWdhdGlvbnMuPGJyPgo8YnI+Ci1BdCBhbnkgdGltZSwgdGhlIFVTRyBtYXkgaW5zcGVjdCBhbmQgc2VpemUgZGF0YSBzdG9yZWQgb24gdGhpcyBJUy48YnI+Cjxicj4KLUNvbW11bmljYXRpb25zIHVzaW5nLCBvciBkYXRhIHN0b3JlZCBvbiwgdGhpcyBJUyBhcmUgbm90IHByaXZhdGUsIGFyZSBzdWJqZWN0IHRvIHJvdXRpbmUgbW9uaXRvcmluZywgaW50ZXJjZXB0aW9uLCBhbmQgc2VhcmNoLCBhbmQgbWF5IGJlIGRpc2Nsb3NlZCBvciB1c2VkIGZvciBhbnkgVVNHLWF1dGhvcml6ZWQgcHVycG9zZS48YnI+Cjxicj4KLVRoaXMgSVMgaW5jbHVkZXMgc2VjdXJpdHkgbWVhc3VyZXMgKGUuZy4sIGF1dGhlbnRpY2F0aW9uIGFuZCBhY2Nlc3MgY29udHJvbHMpIHRvIHByb3RlY3QgVVNHIGludGVyZXN0cy0tbm90IGZvciB5b3VyIHBlcnNvbmFsIGJlbmVmaXQgb3IgcHJpdmFjeS48YnI+Cjxicj4KLU5vdHdpdGhzdGFuZGluZyB0aGUgYWJvdmUsIHVzaW5nIHRoaXMgSVMgZG9lcyBub3QgY29uc3RpdHV0ZSBjb25zZW50IHRvIFBNLCBMRSBvciBDSSBpbnZlc3RpZ2F0aXZlIHNlYXJjaGluZyBvciBtb25pdG9yaW5nIG9mIHRoZSBjb250ZW50IG9mIHByaXZpbGVnZWQgY29tbXVuaWNhdGlvbnMsIG9yIHdvcmsgcHJvZHVjdCwgcmVsYXRlZCB0byBwZXJzb25hbCByZXByZXNlbnRhdGlvbiBvciBzZXJ2aWNlcyBieSBhdHRvcm5leXMsIHBzeWNob3RoZXJhcGlzdHMsIG9yIGNsZXJneSwgYW5kIHRoZWlyIGFzc2lzdGFudHMuIFN1Y2ggY29tbXVuaWNhdGlvbnMgYW5kIHdvcmsgcHJvZHVjdCBhcmUgcHJpdmF0ZSBhbmQgY29uZmlkZW50aWFsLiBTZWUgVXNlciBBZ3JlZW1lbnQgZm9yIGRldGFpbHMuIDxicj4gCg==" 226 | 227 | # clear 228 | # helm upgrade -i neuvector -n neuvector neuvector/core --create-namespace --set k3s.enabled=true --set controller.pvc.enabled=true --set controller.pvc.capacity=500Mi --set internal.certmanager.enabled=true --set manager.ingress.enabled=true --set manager.ingress.host=neuvector.$domain --set manager.ingress.tls=true --set manager.ingress.secretName=tls-ingress --set controller.federation.mastersvc.ingress.enabled=true --set controller.federation.mastersvc.ingress.host=nv-api.rfed.io --set controller.federation.mastersvc.ingress.tls=true --set controller.federation.mastersvc.ingress.secretName=tls-ingress --set controller.federation.mastersvc.type=ClusterIP > /dev/null 2>&1 229 | 230 | # Unclass 231 | helm upgrade -i neuvector -n neuvector neuvector/core --create-namespace --set controller.pvc.enabled=true --set controller.pvc.capacity=500Mi --set manager.env.envs[0].name=CUSTOM_PAGE_HEADER_COLOR --set manager.env.envs[0].value="#007a33" --set manager.env.envs[1].name=CUSTOM_PAGE_HEADER_CONTENT --set manager.env.envs[1].value="VU5DTEFTU0lGSUVELy9GT1VPCg==" --set manager.env.envs[2].name=CUSTOM_PAGE_FOOTER_COLOR --set manager.env.envs[2].value="#007a33" --set manager.env.envs[3].name=CUSTOM_PAGE_FOOTER_CONTENT --set manager.env.envs[3].value="VU5DTEFTU0lGSUVELy9GT1VPCg==" --set manager.env.envs[4].name=CUSTOM_EULA_POLICY --set manager.env.envs[4].value=$govmessage_html --set manager.ingress.enabled=true --set manager.ingress.host=neuvector.$domain --set manager.ingress.tls=true --set manager.ingress.secretName=tls-ingress > /dev/null 2>&1 232 | 233 | # federation 234 | # --set controller.federation.mastersvc.ingress.enabled=true --set controller.federation.mastersvc.ingress.host=nv-api.rfed.io --set controller.federation.mastersvc.ingress.tls=true --set controller.federation.mastersvc.ingress.secretName=tls-fed-ingress --set controller.federation.mastersvc.type=ClusterIP 235 | 236 | # TS 237 | # helm upgrade -i neuvector -n neuvector neuvector/core --create-namespace --set k3s.enabled=true --set controller.pvc.enabled=true --set controller.pvc.capacity=500Mi --set internal.certmanager.enabled=true --set manager.env.envs[0].name=CUSTOM_PAGE_HEADER_COLOR --set manager.env.envs[0].value="#fce83a" --set manager.env.envs[1].name=CUSTOM_PAGE_HEADER_CONTENT --set manager.env.envs[1].value="VE9QIFNFQ1JFVC8vU0NJCg==" --set manager.env.envs[2].name=CUSTOM_PAGE_FOOTER_COLOR --set manager.env.envs[2].value="#fce83a" --set manager.env.envs[3].name=CUSTOM_PAGE_FOOTER_CONTENT --set manager.env.envs[3].value="VE9QIFNFQ1JFVC8vU0NJCg==" --set manager.env.envs[4].name=CUSTOM_EULA_POLICY --set manager.env.envs[4].value=$govmessage_html --set manager.ingress.enabled=true --set manager.ingress.host=neuvector.$domain --set manager.ingress.secretName=tls-ingress --set controller.federation.mastersvc.ingress.enabled=true --set controller.federation.mastersvc.ingress.host=nv-api.rfed.io --set controller.federation.mastersvc.ingress.tls=true --set controller.federation.mastersvc.ingress.secretName=tls-ingress --set controller.federation.mastersvc.type=ClusterIP > /dev/null 2>&1 238 | 239 | until [[ "$(curl -skL -H "Content-Type: application/json" -o /dev/null -w '%{http_code}' https://neuvector.$domain/auth -d '{"isRancherSSOUrl":false, "username": "admin", "password": "'$password'"}')" == "200" ]]; do echo -e -n .; sleep 1; done 240 | 241 | TOKEN=$(curl -sk -H "Content-Type: application/json" https://neuvector.$domain/auth -d '{"isRancherSSOUrl":false, "username": "admin", "password": "'$password'"}' | jq -r .token.token) 242 | 243 | curl -sk -H "Content-Type: application/json" -H 'Token: '$TOKEN https://neuvector.$domain/eula -d '{"accepted":true}' > /dev/null 2>&1 244 | 245 | echo -e "$GREEN" "ok" "$NO_COLOR" 246 | 247 | # federation managed 248 | # helm upgrade -i neuvector -n neuvector neuvector/core --create-namespace --set k3s.enabled=true --set manager.ingress.enabled=true --set manager.ingress.host=neuvector2.rfed.io --set manager.ingress.tls=true --set manager.ingress.secretName=tls-ingress --set controller.federation.managedsvc.ingress.enabled=true --set controller.federation.managedsvc.ingress.host=nv-down1.rfed.io --set controller.federation.managedsvc.ingress.tls=true --set controller.federation.managedsvc.ingress.secretName=tls-ingress --set controller.federation.managedsvc.type=ClusterIP 249 | 250 | # https://gist.github.com/clemenko/385d6ce697e1f7a4601dbfc24d9a87e2 251 | } 252 | 253 | ############################# fleet ################################ 254 | function fleet () { 255 | echo -e -n " fleet-ing " 256 | # for downstream clusters 257 | # kubectl create secret -n cattle-global-data generic awscred --from-literal=amazonec2credentialConfig-defaultRegion=us-east-1 --from-literal=amazonec2credentialConfig-accessKey=${AWS_ACCESS_KEY} --from-literal=amazonec2credentialConfig-secretKey=${AWS_SECRET_KEY} > /dev/null 2>&1 258 | 259 | kubectl create secret -n cattle-global-data generic docreds --from-literal=digitaloceancredentialConfig-accessToken=${DO_TOKEN} > /dev/null 2>&1 260 | 261 | kubectl apply -f https://raw.githubusercontent.com/clemenko/fleet/main/gitrepo.yml > /dev/null 2>&1 262 | echo -e "$GREEN""ok" "$NO_COLOR" 263 | } 264 | 265 | ############################# demo ################################ 266 | function demo () { 267 | echo -e " demo-ing " 268 | 269 | # echo -e -n " - whoami ";kubectl apply -f https://raw.githubusercontent.com/clemenko/k8s_yaml/master/whoami.yml > /dev/null 2>&1; echo -e "$GREEN""ok" "$NO_COLOR" 270 | 271 | echo -e -n " - flask ";kubectl apply -f https://raw.githubusercontent.com/clemenko/k8s_yaml/master/flask_simple_nginx.yml > /dev/null 2>&1; echo -e "$GREEN""ok" "$NO_COLOR" 272 | 273 | # echo -e -n " - ghost ";kubectl apply -f https://raw.githubusercontent.com/clemenko/k8s_yaml/refs/heads/master/ghost.yaml > /dev/null 2>&1; echo -e "$GREEN""ok" "$NO_COLOR" 274 | 275 | echo -e -n " - minio " 276 | helm repo add minio https://charts.min.io/ --force-update 277 | #kubectl create ns minio > /dev/null 2>&1 278 | # kubectl -n minio create secret tls tls-ingress --cert=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.cert --key=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.key > /dev/null 2>&1 279 | 280 | helm upgrade -i minio minio/minio -n minio --set rootUser=admin,rootPassword=$password --create-namespace --set mode=standalone --set resources.requests.memory=1Gi --set persistence.size=10Gi --set mode=standalone --set ingress.enabled=true --set ingress.hosts[0]=s3.$domain --set consoleIngress.enabled=true --set consoleIngress.hosts[0]=minio.$domain --set ingress.annotations."nginx\.ingress\.kubernetes\.io/proxy-body-size"="1024m" --set consoleIngress.annotations."nginx\.ingress\.kubernetes\.io/proxy-body-size"="1024m" > /dev/null 2>&1 281 | echo -e "$GREEN""ok" "$NO_COLOR" 282 | # --set consoleIngress.tls[0].secretName=tls-ingress --set ingress.tls[0].secretName=tls-ingress 283 | 284 | echo -e -n " - gitness " 285 | curl -s https://raw.githubusercontent.com/clemenko/k8s_yaml/master/gitness.yaml | sed "s/rfed.xx/$domain/g" | kubectl apply -f - > /dev/null 2>&1 286 | echo -e "$GREEN""ok" "$NO_COLOR" 287 | 288 | echo -e -n " - harbor " 289 | # helm repo add harbor https://helm.goharbor.io --force-update 290 | kubectl create ns harbor > /dev/null 2>&1 291 | kubectl -n harbor create secret tls tls-ingress --cert=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.cert --key=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.key > /dev/null 2>&1 292 | 293 | helm upgrade -i harbor harbor/harbor -n harbor --create-namespace --set expose.tls.certSource=secret --set expose.tls.secret.secretName=tls-ingress --set expose.tls.enabled=false --set expose.tls.auto.commonName=harbor.$domain --set expose.ingress.hosts.core=harbor.$domain --set persistence.enabled=false --set harborAdminPassword=$password --set externalURL=http://harbor.$domain --set notary.enabled=false > /dev/null 2>&1; 294 | echo -e "$GREEN""ok" "$NO_COLOR" 295 | 296 | echo -e -n " - gitea " 297 | helm upgrade -i gitea oci://registry-1.docker.io/giteacharts/gitea -n gitea --create-namespace --set gitea.admin.password=$password --set gitea.admin.username=gitea --set persistence.size=500Mi --set ingress.enabled=true --set ingress.hosts[0].host=git.$domain --set ingress.hosts[0].paths[0].path=/ --set ingress.hosts[0].paths[0].pathType=Prefix --set gitea.config.server.DOMAIN=git.$domain --set postgresql-ha.enabled=false --set redis-cluster.enabled=false --set gitea.config.database.DB_TYPE=sqlite3 --set gitea.config.session.PROVIDER=memory --set gitea.config.cache.ADAPTER=memory --set gitea.config.queue.TYPE=level > /dev/null 2>&1 298 | 299 | # mirror github 300 | until [ $(curl -s http://git.$domain/explore/repos| grep "" | wc -l) = 1 ]; do sleep 2; echo -n "."; done 301 | 302 | sleep 5 303 | 304 | curl -X POST http://git.$domain/api/v1/repos/migrate -H 'accept: application/json' -H 'authorization: Basic Z2l0ZWE6UGEyMndvcmQ=' -H 'Content-Type: application/json' -d '{ "clone_addr": "https://github.com/clemenko/fleet", "repo_name": "fleet","repo_owner": "gitea"}' > /dev/null 2>&1 305 | echo -e "$GREEN""ok" "$NO_COLOR" 306 | 307 | # echo -e -n " - tailscale " 308 | # curl -s https://raw.githubusercontent.com/clemenko/k8s_yaml/master/tailscale.yaml | sed -e "s/XXX/$TAILSCALE_ID/g" -e "s/ZZZ/$TAILSCALE_TOKEN/g" | kubectl apply -f - > /dev/null 2>&1 309 | # echo -e "$GREEN""ok" "$NO_COLOR" 310 | } 311 | 312 | ################################ keycloak ############################## 313 | # helm repo add bitnami https://charts.bitnami.com/bitnami --force-update 314 | 315 | function keycloak () { 316 | 317 | KEY_URL=keycloak.$domain 318 | RANCHER_URL=rancher.$domain 319 | 320 | echo -e " keycloaking" 321 | echo -e -n " - deploying " 322 | 323 | kubectl create ns keycloak > /dev/null 2>&1 324 | kubectl -n keycloak create secret tls tls-ingress --cert=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.cert --key=/Users/clemenko/Dropbox/work/rfed.me/io/star.rfed.io.key > /dev/null 2>&1 325 | # kubectl -n keycloak create secret generic tls-ca --from-file=/Users/clemenko/Dropbox/work/rfed.me/io/cacerts.pem > /dev/null 2>&1 326 | 327 | curl -s https://raw.githubusercontent.com/clemenko/k8s_yaml/master/keycloak.yml | sed "s/rfed.xx/$domain/g" | kubectl apply -f - > /dev/null 2>&1 328 | 329 | #helm upgrade -i keycloak bitnami/keycloak --namespace keycloak --create-namespace --set auth.adminUser=admin --set auth.adminPassword=Pa22word > /dev/null 2>&1 330 | # --set ingress.enabled=true --set ingress.hostname=keycloak.$domain --set ingress.tls=true --set tls.enabled=true --set httpRelativePath="/" 331 | 332 | echo -e "$GREEN""ok" "$NO_COLOR" 333 | 334 | echo -e -n " - waiting for keycloak " 335 | 336 | until [ $(curl -sk https://$KEY_URL/ | grep "Administration Console" | wc -l) = 1 ]; do echo -e -n "." ; sleep 2; done 337 | echo -e "$GREEN"" ok" "$NO_COLOR" 338 | 339 | echo -e -n " - adding realm and client " 340 | 341 | # get auth token - notice keycloak's password 342 | export key_token=$(curl -sk -X POST https://$KEY_URL/realms/master/protocol/openid-connect/token -d 'client_id=admin-cli&username=admin&password='$password'&credentialId=&grant_type=password' | jq -r .access_token) 343 | 344 | # add realm 345 | curl -sk -X POST https://$KEY_URL/admin/realms -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"enabled":true,"id":"rancher","realm":"rancher"}' 346 | 347 | # add client 348 | curl -sk -X POST https://$KEY_URL/admin/realms/rancher/clients -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"enabled":true,"attributes":{},"redirectUris":[],"clientId":"rancher","protocol":"openid-connect","publicClient": false,"redirectUris":["https://'$RANCHER_URL'/verify-auth"]}' 349 | #,"implicitFlowEnabled":true 350 | 351 | # get client id 352 | export client_id=$(curl -sk https://$KEY_URL/admin/realms/rancher/clients/ -H "authorization: Bearer $key_token" | jq -r '.[] | select(.clientId=="rancher") | .id') 353 | 354 | # get client_secret 355 | export client_secret=$(curl -sk https://$KEY_URL/admin/realms/rancher/clients/$client_id/client-secret -H "authorization: Bearer $key_token" | jq -r .value) 356 | 357 | # add mappers 358 | curl -sk -X POST https://$KEY_URL/admin/realms/rancher/clients/$client_id/protocol-mappers/models -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"protocol":"openid-connect","config":{"full.path":"true","id.token.claim":"false","access.token.claim":"false","userinfo.token.claim":"true","claim.name":"groups"},"name":"Groups Mapper","protocolMapper":"oidc-group-membership-mapper"}' 359 | 360 | curl -sk -X POST https://$KEY_URL/admin/realms/rancher/clients/$client_id/protocol-mappers/models -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"protocol":"openid-connect","config":{"id.token.claim":"false","access.token.claim":"true","included.client.audience":"rancher"},"name":"Client Audience","protocolMapper":"oidc-audience-mapper"}' 361 | 362 | curl -sk -X POST https://$KEY_URL/admin/realms/rancher/clients/$client_id/protocol-mappers/models -H "authorization: Bearer $key_token" -H 'accept: application/json, text/plain, */*' -H 'content-type: application/json;charset=UTF-8' -d '{"protocol":"openid-connect","config":{"full.path":"true","id.token.claim":"true","access.token.claim":"true","userinfo.token.claim":"true","claim.name":"full_group_path"},"name":"Group Path","protocolMapper":"oidc-group-membership-mapper"}' 363 | 364 | # add realm-managementview-users 365 | # get role id 366 | # role_ID=$(curl -sk -X GET https://$KEY_URL/admin/realms/rancher/roles -H "authorization: Bearer $key_token" | jq -r '.[] | select(.name=="default-roles-rancher") | .id') 367 | 368 | # curl -sk https://$KEY_URL/admin/realms/rancher/roles-by-id/$role_ID/composites -H "authorization: Bearer $key_token" -d '[{"id":"d8ef39c5-c8b6-4bcc-8010-7244b7e5cf4a","name":"view-users","description":"${role_view-users}"}]' 369 | 370 | # add groups admin / dev 371 | curl -k https://$KEY_URL/admin/realms/rancher/groups -H 'Content-Type: application/json' -H "authorization: Bearer $key_token" -d '{"name":"devs"}' 372 | 373 | curl -k https://$KEY_URL/admin/realms/rancher/groups -H 'Content-Type: application/json' -H "authorization: Bearer $key_token" -d '{"name":"admins"}' 374 | 375 | 376 | # add keycloak user clemenko / Pa22word 377 | curl -k 'https://'$KEY_URL'/admin/realms/rancher/users' -H 'Content-Type: application/json' -H "authorization: Bearer $key_token" -d '{"enabled":true,"attributes":{},"groups":["/devs"],"credentials":[{"type":"password","value":"'$password'","temporary":false}],"username":"clemenko","emailVerified":"","firstName":"Andy","lastName":"Clemenko"}' 378 | 379 | # add keycloak user admin / Pa22word 380 | curl -k 'https://'$KEY_URL'/admin/realms/rancher/users' -H 'Content-Type: application/json' -H "authorization: Bearer $key_token" -d '{"enabled":true,"attributes":{},"groups":["/admins", "/devs"],"credentials":[{"type":"password","value":"'$password'","temporary":false}],"username":"admin","emailVerified":"","firstName":"Admin","lastName":"Clemenko"}' 381 | 382 | echo -e "$GREEN""ok" "$NO_COLOR" 383 | 384 | echo -e -n " - configuring rancher " 385 | # configure rancher 386 | token=$(curl -sk -X POST https://$RANCHER_URL/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"'$password'"}' | jq -r .token) 387 | 388 | api_token=$(curl -sk https://$RANCHER_URL/v3/token -H 'content-type: application/json' -H "Authorization: Bearer $token" -d '{"type":"token","description":"automation"}' | jq -r .token) 389 | 390 | curl -sk -X PUT https://$RANCHER_URL/v3/keyCloakOIDCConfigs/keycloakoidc?action=testAndEnable -H 'accept: application/json' -H 'accept-language: en-US,en;q=0.9' -H 'content-type: application/json;charset=UTF-8' -H 'content-type: application/json' -H "Authorization: Bearer $api_token" -X PUT -d '{"enabled":true,"id":"keycloakoidc","name":"keycloakoidc","type":"keyCloakOIDCConfig","accessMode":"unrestricted","rancherUrl":"https://rancher.'$domain'/verify-auth","scope":"openid profile email","clientId":"rancher","clientSecret":"'$client_secret'","issuer":"https://keycloak.'$domain'/realms/rancher","authEndpoint":"https://'$KEY_URL'/realms/rancher/protocol/openid-connect/auth/"}' > /dev/null 2>&1 391 | 392 | # login with keycloak user - manual 393 | 394 | echo -e "$GREEN""ok" "$NO_COLOR" 395 | 396 | } 397 | 398 | ################################ stackrox ############################## 399 | function rox () { 400 | # https://github.com/stackrox/stackrox#quick-installation-using-helm 401 | # helm repo add stackrox https://raw.githubusercontent.com/stackrox/helm-charts/main/opensource/ --force-update 402 | 403 | echo -e -n " - stackrox " 404 | 405 | helm upgrade -i stackrox-central-services stackrox/stackrox-central-services -n stackrox --create-namespace --set central.adminPassword.value=$password --set central.resources.requests.memory=1Gi --set central.resources.limits.memory=2Gi --set central.db.resources.requests.memory=1Gi --set central.db.resources.limits.memory=2Gi --set scanner.autoscaling.disable=true --set scanner.replicas=1 --set scanner.resources.requests.memory=500Mi --set scanner.resources.limits.memory=2500Mi --set central.resources.requests.cpu=1 --set central.resources.limits.cpu=1 --set central.db.resources.requests.cpu=500m --set central.db.resources.limits.cpu=1 --set central.persistence.none=true --set central.db.persistence.persistentVolumeClaim.size=1Gi > /dev/null 2>&1 406 | 407 | #--set central.exposure.loadBalancer.enabled=true 408 | 409 | until [ $(kubectl get pod -n stackrox |grep Running| wc -l) = 4 ] ; do echo -e -n "." ; sleep 2; done 410 | 411 | cat <<EOF | kubectl apply -f - > /dev/null 2>&1 412 | apiVersion: networking.k8s.io/v1 413 | kind: Ingress 414 | metadata: 415 | name: stackrox 416 | namespace: stackrox 417 | annotations: 418 | nginx.ingress.kubernetes.io/ssl-passthrough: "true" 419 | spec: 420 | rules: 421 | - host: stackrox.$domain 422 | http: 423 | paths: 424 | - path: / 425 | pathType: Prefix 426 | backend: 427 | service: 428 | name: central 429 | port: 430 | number: 443 431 | EOF 432 | sleep 5 433 | 434 | export ROX_API_TOKEN=$(curl -sk -X POST -u admin:$password https://stackrox.$domain/v1/apitokens/generate -d '{"name":"admin","role":null,"roles":["Admin"]}'| jq -r .token) 435 | 436 | curl -ks https://stackrox.$domain/v1/cluster-init/init-bundles -H 'accept: application/json, text/plain, */*' -H "authorization: Bearer $ROX_API_TOKEN" -H 'content-type: application/json' -d '{"name":"rke2"}' |jq -r .helmValuesBundle | base64 -D > stackrox-init-bundle.yaml 437 | 438 | helm upgrade --install --create-namespace -n stackrox stackrox-secured-cluster-services stackrox/stackrox-secured-cluster-services -f stackrox-init-bundle.yaml --set clusterName=rke2 --set centralEndpoint="central.stackrox.svc:443" --set sensor.resources.requests.memory=500Mi --set sensor.resources.requests.cpu=500m --set sensor.resources.limits.memory=500Mi --set sensor.resources.limits.cpu=500m > /dev/null 2>&1 439 | 440 | rm -rf stackrox-init-bundle.yaml 441 | 442 | echo -e "$GREEN"" ok" "$NO_COLOR" 443 | } 444 | 445 | 446 | # PSA notes 447 | # kubectl label ns spark pod-security.kubernetes.io/audit=privileged pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=privileged -------------------------------------------------------------------------------- /harvester_rancher.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # this does something with Rancher and Harester 3 | 4 | # rancher url 5 | rancher_url=rancher.rfed.io 6 | 7 | # harvester vip/url 8 | harvester_url=192.168.1.4 9 | 10 | # get rancher token 11 | token=$(curl -sk -X POST https://$rancher_urlv3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"Pa22word"}' | jq -r .token) 12 | 13 | # create havester connection 14 | curl -sk https://$rancher_urlv1/provisioning.cattle.io.clusters -H "Authorization: Bearer $token" -X POST -H 'content-type: application/json' -d '{"type":"provisioning.cattle.io.cluster","metadata":{"namespace":"fleet-default","name":"ms01","labels":{"provider.cattle.io":"harvester"}},"cachedHarvesterClusterVersion":"","spec":{"agentEnvVars":[]}}' 15 | 16 | # get client url 17 | client_url=$(kubectl get clusterregistrationtokens.management.cattle.io -n $(kubectl get ns | grep "c-m" | awk '{ print $1}') default-token -o json | jq -r .status.manifestUrl) 18 | 19 | # get harvester token 20 | token=$(curl -sk -X POST https://$harvester_url/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d '{"username":"admin","password":"Pa22word"}' | jq -r .token) 21 | 22 | # get kubeconfig for harvester 23 | curl -sk https://$harvester_url/v1/management.cattle.io.clusters/local?action=generateKubeconfig -H "Authorization: Bearer $token" -X POST -H 'content-type: application/json' | jq -r .config > $harvester_url.yaml 24 | 25 | # use kubeconfig for creating url 26 | export KUBECONFIG=$harvester_url.yaml 27 | 28 | # apply it 29 | cat <<EOF | kubectl --insecure-skip-tls-verify apply -f - > /dev/null 2>&1 30 | apiVersion: harvesterhci.io/v1beta1 31 | kind: Setting 32 | metadata: 33 | name: cluster-registration-url 34 | status: 35 | value: $client_url 36 | EOF 37 | 38 | # clean up 39 | unset KUBECONFIG 40 | rm -rf $harvester_url.yaml 41 | 42 | -------------------------------------------------------------------------------- /rke2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # https://github.com/clemenko/rke2 4 | # this script assumes digitalocean is setup with DNS. 5 | # you need doctl, kubectl, uuid, jq, k3sup, pdsh and curl installed. 6 | # clemenko@gmail.com 7 | 8 | ################################### 9 | # edit varsw 10 | ################################### 11 | set -e 12 | num=3 13 | password=Pa22word 14 | zone=nyc1 15 | size=s-4vcpu-8gb 16 | # s-8vcpu-16gb 17 | key=30:98:4f:c5:47:c2:88:28:fe:3c:23:cd:52:49:51:01 18 | domain=rfed.io 19 | 20 | #image=ubuntu-22-04-x64 21 | image=rockylinux-9-x64 22 | 23 | # rancher / k8s 24 | prefix=rke # no rke k3s 25 | k8s_version=v1.31 #latest 26 | # curl -s https://update.rke2.io/v1-release/channels | jq '.data[] | select(.id=="stable") | .latest' 27 | 28 | ###### NO MOAR EDITS ####### 29 | export PDSH_RCMD_TYPE=ssh 30 | 31 | #better error checking 32 | command -v doctl >/dev/null 2>&1 || { fatal "Doctl was not found. Please install" ; } 33 | command -v curl >/dev/null 2>&1 || { fatal "Curl was not found. Please install" ; } 34 | command -v jq >/dev/null 2>&1 || { fatal "Jq was not found. Please install" ; } 35 | command -v pdsh >/dev/null 2>&1 || { fatal "Pdsh was not found. Please install" ; } 36 | command -v k3sup >/dev/null 2>&1 || { fatal "K3sup was not found. Please install" ; } 37 | command -v kubectl >/dev/null 2>&1 || { fatal "Kubectl was not found. Please install" ; } 38 | 39 | #### doctl_list #### 40 | function dolist () { doctl compute droplet list --no-header|grep $prefix |sort -k 2; } 41 | 42 | source functions.sh 43 | 44 | # update helm 45 | helm repo update > /dev/null 2>&1 46 | 47 | ################################# up ################################ 48 | function up () { 49 | build_list="" 50 | # helm repo update > /dev/null 2>&1 51 | 52 | if [ ! -z $(dolist) ]; then 53 | fatal "Warning - cluster already detected..." 54 | exit 55 | fi 56 | 57 | #rando list generation 58 | for i in $(seq 1 $num); do build_list="$build_list $prefix$i"; done 59 | 60 | #build VMS 61 | echo -e -n " - building vms -$build_list" 62 | doctl compute droplet create $build_list --region $zone --image $image --size $size --ssh-keys $key --wait > /dev/null 2>&1 || fatal "vms did not build" 63 | info_ok 64 | 65 | #check for SSH 66 | echo -e -n " - checking for ssh " 67 | for ext in $(dolist | awk '{print $3}'); do 68 | until [ $(ssh -o ConnectTimeout=1 root@$ext 'exit' 2>&1 | grep 'timed out\|refused' | wc -l) = 0 ]; do echo -e -n "." ; sleep 5; done 69 | done 70 | info_ok 71 | 72 | #get ips 73 | host_list=$(dolist | awk '{printf $3","}' | sed 's/,$//') 74 | server=$(dolist | sed -n 1p | awk '{print $3}') 75 | worker_list=$(dolist | sed 1d | awk '{printf $3","}' | sed 's/,$//') 76 | 77 | #update DNS 78 | echo -e -n " - updating dns" 79 | doctl compute domain records create $domain --record-type A --record-name $prefix --record-ttl 60 --record-data $server > /dev/null 2>&1 80 | doctl compute domain records create $domain --record-type CNAME --record-name "*" --record-ttl 60 --record-data $prefix.$domain. > /dev/null 2>&1 81 | info_ok 82 | 83 | sleep 10 84 | 85 | #host modifications 86 | if [[ "$image" = *"ubuntu"* ]]; then 87 | echo -e -n " - adding os packages" 88 | pdsh -l root -w $host_list 'mkdir -p /opt/kube; systemctl stop ufw; systemctl disable ufw; echo -e "PubkeyAcceptedKeyTypes=+ssh-rsa" >> /etc/ssh/sshd_config; systemctl restart sshd; export DEBIAN_FRONTEND=noninteractive; apt update; apt install nfs-common -y; #apt upgrade -y; apt autoremove -y' > /dev/null 2>&1 89 | info_ok 90 | fi 91 | 92 | if [[ "$image" = *"centos"* || "$image" = *"rocky"* || "$image" = *"alma"* ]]; then centos_packages; fi 93 | 94 | #kernel tuning from functions 95 | kernel 96 | 97 | #or deploy k3s 98 | if [ "$prefix" != k3s ] && [ "$prefix" != rke ]; then exit; fi 99 | 100 | if [ "$prefix" = k3s ]; then 101 | echo -e -n " - deploying k3s" 102 | k3sup install --ip $server --user root --cluster --k3s-extra-args '' --k3s-channel $k8s_version --local-path ~/.kube/config > /dev/null 2>&1 103 | # --k3s-extra-args '--disable traefik' 104 | 105 | for workeri in $(dolist | sed 1d | awk '{print $3}'); do 106 | k3sup join --ip $workeri --server-ip $server --user root --k3s-extra-args '' --k3s-channel $k8s_version > /dev/null 2>&1 107 | done 108 | 109 | info_ok 110 | fi 111 | 112 | #or deploy rke2 113 | if [ "$prefix" = rke ]; then 114 | echo -e -n "$BLUE" "deploying rke2" "$NO_COLOR" 115 | 116 | # systemctl disable nm-cloud-setup.service nm-cloud-setup.timer 117 | 118 | ssh root@$server 'mkdir -p /var/lib/rancher/rke2/server/manifests/ /etc/rancher/rke2/; useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U; echo -e "apiVersion: audit.k8s.io/v1\nkind: Policy\nmetadata:\n name: rke2-audit-policy\nrules:\n - level: Metadata\n resources:\n - group: \"\"\n resources: [\"secrets\"]\n - level: RequestResponse\n resources:\n - group: \"\"\n resources: [\"*\"]" > /etc/rancher/rke2/audit-policy.yaml; echo -e "#profile: cis\n#selinux: true\nsecrets-encryption: true\ntoken: bootstrapAllTheThings\ntls-san:\n- rke."'$domain'"\nwrite-kubeconfig-mode: 0600\n#pod-security-admission-config-file: /etc/rancher/rke2/rancher-psact.yaml\nkube-controller-manager-arg:\n- bind-address=127.0.0.1\n- use-service-account-credentials=true\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\nkube-scheduler-arg:\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\nkube-apiserver-arg:\n- tls-min-version=VersionTLS12\n- tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\n- authorization-mode=RBAC,Node\n- anonymous-auth=false\n- audit-policy-file=/etc/rancher/rke2/audit-policy.yaml\n- audit-log-mode=blocking-strict\n- audit-log-maxage=30\nkubelet-arg:\n- kube-reserved=cpu=400m,memory=1Gi\n- system-reserved=cpu=400m,memory=1Gi\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook\n- streaming-connection-idle-timeout=5m\n- max-pods=400" > /etc/rancher/rke2/config.yaml; curl -s https://raw.githubusercontent.com/clemenko/k8s_yaml/master/rancher-psact.yaml -o /etc/rancher/rke2/rancher-psact.yaml ; curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$k8s_version' sh - ; systemctl enable --now rke2-server.service' > /dev/null 2>&1 119 | 120 | # ssl-passthrough 121 | # echo -e "apiVersion: helm.cattle.io/v1\nkind: HelmChartConfig\nmetadata:\n name: rke2-ingress-nginx\n namespace: kube-system\nspec:\n valuesContent: |-\n controller:\n config:\n use-forwarded-headers: true\n extraArgs:\n enable-ssl-passthrough: true" > /var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml; 122 | 123 | # INSTALL_RKE2_VERSION=v1.24.4+rke2r1 124 | 125 | sleep 15 126 | 127 | pdsh -l root -w $worker_list 'curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL='$k8s_version' INSTALL_RKE2_TYPE=agent sh - && echo -e "selinux: true\nserver: https://"'$server'":9345\ntoken: bootstrapAllTheThings\nprofile: cis\nkubelet-arg:\n- protect-kernel-defaults=true\n- read-only-port=0\n- authorization-mode=Webhook" > /etc/rancher/rke2/config.yaml; systemctl enable --now rke2-agent.service' > /dev/null 2>&1 128 | 129 | ssh root@$server cat /etc/rancher/rke2/rke2.yaml | sed -e "s/127.0.0.1/$server/g" > ~/.kube/config 130 | chmod 0600 ~/.kube/config 131 | 132 | info_ok 133 | fi 134 | 135 | echo -e -n " - cluster active " 136 | sleep 10 137 | until [ $(kubectl get node|grep NotReady|wc -l) = 0 ]; do echo -e -n "."; sleep 2; done 138 | sleep 10 139 | info_ok 140 | } 141 | 142 | ############################## kill ################################ 143 | #remove the vms 144 | function kill () { 145 | 146 | if [ ! -z $(dolist | awk '{printf $3","}' | sed 's/,$//') ]; then 147 | echo -e -n " killing it all " 148 | for i in $(dolist | awk '{print $2}'); do doctl compute droplet delete --force $i; done 149 | for i in $(dolist | awk '{print $3}'); do ssh-keygen -q -R $i > /dev/null 2>&1; done 150 | for i in $(doctl compute domain records list $domain|grep $prefix |awk '{print $1}'); do doctl compute domain records delete -f $domain $i; done 151 | until [ $(dolist | wc -l | sed 's/ //g') == 0 ]; do echo -e -n "."; sleep 2; done 152 | for i in $(doctl compute volume list --no-header |awk '{print $1}'); do doctl compute volume delete -f $i; done 153 | 154 | rm -rf *.txt *.log *.zip *.pub env.* certs backup.tar ~/.kube/config central* sensor* *token kubeconfig *TOKEN 155 | 156 | else 157 | echo -e -n " no cluster found " 158 | fi 159 | 160 | info_ok 161 | } 162 | 163 | case "$1" in 164 | up) up;; 165 | kill) kill;; 166 | neu) neu;; 167 | dolist) dolist;; 168 | keycloak) keycloak;; 169 | longhorn) longhorn;; 170 | rancher) rancher;; 171 | demo) demo;; 172 | fleet) fleet;; 173 | rox) rox;; 174 | *) usage;; 175 | esac 176 | -------------------------------------------------------------------------------- /stigs_tldr.md: -------------------------------------------------------------------------------- 1 | # STIG's tl:dr 2 | 3 | Simply go to https://public.cyber.mil/stigs/downloads/ and search for `rancher`. 4 | 5 | Direct Downloads 6 | 7 | - https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_RGS_MCM_V1R3_STIG.zip 8 | - https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_RGS_RKE2_V1R5_STIG.zip 9 | - https://public.cyber.mil/stigs/srg-stig-tools/ 10 | 11 | ## RKE2 STIG tl:dr 12 | 13 | ### config.yaml 14 | 15 | `/etc/rancher/rke2/config.yaml` 16 | 17 | ```yaml 18 | profile: cis # for 1.28 and older cis-1.23 19 | selinux: true 20 | secrets-encryption: true 21 | token: bootstrapAllTheThings 22 | tls-san: 23 | - rke.rfed.io 24 | write-kubeconfig-mode: 0600 25 | use-service-account-credentials: true 26 | pod-security-admission-config-file: /etc/rancher/rke2/rancher-psact.yaml 27 | kube-controller-manager-arg: 28 | - bind-address=127.0.0.1 29 | - use-service-account-credentials=true 30 | - tls-min-version=VersionTLS12 31 | - tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 32 | kube-scheduler-arg: 33 | - tls-min-version=VersionTLS12 34 | - tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 35 | kube-apiserver-arg: 36 | - tls-min-version=VersionTLS12 37 | - tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 38 | - authorization-mode=RBAC,Node 39 | - anonymous-auth=false 40 | - audit-policy-file=/etc/rancher/rke2/audit-policy.yaml 41 | - audit-log-mode=blocking-strict 42 | - audit-log-maxage=30 43 | - audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log 44 | kubelet-arg: 45 | - kube-reserved=cpu=400m,memory=1Gi 46 | - system-reserved=cpu=400m,memory=1Gi 47 | - protect-kernel-defaults=true 48 | - read-only-port=0 49 | - authorization-mode=Webhook 50 | - streaming-connection-idle-timeout=5m 51 | - max-pods=400 52 | ``` 53 | 54 | ### Audit Policy 55 | 56 | `/etc/rancher/rke2/audit-policy.yaml` 57 | ```yaml 58 | apiVersion: audit.k8s.io/v1 59 | kind: Policy 60 | metadata: 61 | name: rke2-audit-policy 62 | rules: 63 | - level: Metadata 64 | resources: 65 | - group: "" 66 | resources: ["secrets"] 67 | - level: RequestResponse 68 | resources: 69 | - group: "" 70 | resources: ["*"] 71 | ``` 72 | 73 | ### PSA 74 | 75 | Keep in mind that the namespace can be labeled for the correct PSP after it is created. 76 | 77 | `kubectl label ns $NAMESPACE pod-security.kubernetes.io/audit=privileged pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=privileged` 78 | 79 | OR add it to `/etc/rancher/rke2/rancher-psact.yaml`. 80 | 81 | ```yaml 82 | apiVersion: apiserver.config.k8s.io/v1 83 | kind: AdmissionConfiguration 84 | plugins: 85 | - name: PodSecurity 86 | configuration: 87 | apiVersion: pod-security.admission.config.k8s.io/v1 88 | kind: PodSecurityConfiguration 89 | defaults: 90 | enforce: "restricted" 91 | enforce-version: "latest" 92 | audit: "restricted" 93 | audit-version: "latest" 94 | warn: "restricted" 95 | warn-version: "latest" 96 | exemptions: 97 | usernames: [] 98 | runtimeClasses: [] 99 | namespaces: [calico-apiserver, 100 | calico-system, 101 | carbide-docs-system, 102 | carbide-stigatron-system, 103 | cattle-alerting, 104 | cattle-csp-adapter-system, 105 | cattle-elemental-system, 106 | cattle-epinio-system, 107 | cattle-externalip-system, 108 | cattle-fleet-local-system, 109 | cattle-fleet-system, 110 | cattle-gatekeeper-system, 111 | cattle-global-data, 112 | cattle-global-nt, 113 | cattle-impersonation-system, 114 | cattle-istio, 115 | cattle-istio-system, 116 | cattle-logging, 117 | cattle-logging-system, 118 | cattle-monitoring-system, 119 | cattle-neuvector-system, 120 | cattle-prometheus, 121 | cattle-provisioning-capi-system, 122 | cattle-resources-system, 123 | cattle-sriov-system, 124 | cattle-system, 125 | cattle-ui-plugin-system, 126 | cattle-windows-gmsa-system, 127 | cert-manager, 128 | cis-operator-system, 129 | fleet-default, 130 | fleet-local, 131 | ingress-nginx, 132 | istio-system, 133 | kube-node-lease, 134 | kube-public, 135 | kube-system, 136 | longhorn-system, 137 | rancher-alerting-drivers, 138 | security-scan, 139 | tigera-operator, 140 | neuvector, 141 | flask, 142 | ghost, 143 | kubecon, 144 | minio, 145 | whoami, 146 | harbor, 147 | gitea, 148 | tailscale, 149 | gitness, 150 | stackrox, 151 | keycloak] 152 | ``` 153 | 154 | ## Rancher STIG tl:dr 155 | 156 | From 157 | - https://ranchermanager.docs.rancher.com/v2.8/how-to-guides/advanced-user-guides/enable-api-audit-log 158 | - https://ranchermanager.docs.rancher.com/getting-started/installation-and-upgrade/installation-references/helm-chart-options#advanced-options 159 | 160 | ```bash 161 | helm upgrade -i rancher rancher-latest/rancher -n cattle-system --create-namespace --set hostname=rancher.$domain --set bootstrapPassword=bootStrapAllTheThings --set auditLog.level=2 --set auditLog.destination=hostPath --set auditLog.hostPath=/var/log/rancher/audit --set auditLog.maxAge=30 --set antiAffinity=required --------------------------------------------------------------------------------