├── README.md ├── deploy-eck.sh ├── deploy-eck.sh.20220824 ├── deploy-elastic-airgapped.sh ├── deploy-elastic.sh ├── deploy-elastic.sh-fleet-notoken ├── deploy-elastic.sh.20220521 ├── deploy-elastick8s.sh ├── expectdo.exp ├── forking.sh ├── gcp-ecklab.sh ├── gcp-postinstall-ecklab.sh ├── gcp-postinstall-pre.ps1 ├── gcp-postinstall.ps1 ├── gcp-postinstall.sh ├── gcp-postinstall.sh.20240519 ├── gcp.sh ├── gke.sh ├── gke.sh-backup-20221207 ├── kube-ecklab.sh ├── kube.sh ├── kube.sh.20240519 ├── minio.sh ├── mywiki.sh ├── netbackup-split-per-OS.sh ├── netinfo.sh ├── runscript.sh ├── serverinfo.sh ├── shard_counter.sh ├── supportlab-clean.sh ├── wildcard.sh ├── windows-finish.ps1 └── wwn.sh /README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jlim0930/scripts/f6a582e9711e578ba6f4b094b1338e62d790bf40/README.md -------------------------------------------------------------------------------- /deploy-eck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # justin lim 4 | # 5 | # version 1.0 6 | 7 | ################## 8 | # 9 | # this script is no longer being maintained. Please use the new script curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/deploy-elastick8s.sh -o deploy-elastick8s.sh 10 | # 11 | ################# 12 | # curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/deploy-eck.sh -o deploy-eck.sh 13 | # 14 | # NOTES 15 | # eck 1.2+ 16 | # start of elasticsearchRef 17 | # ES 6.8+ & 7.1+ 18 | # Beats 7.0+ 19 | # entsearch 7.7+ 20 | # all-in-one operator 21 | # eck 1.4+ 22 | # ES 7.17.3+ 23 | # eck 1.6+ 24 | # elastic-agent 7.10+ 25 | # elastic map server 7.11+ 26 | # eck 1.7+ 27 | # crds.yaml & operator.yaml 28 | # fleet 7.14 29 | # sidecar container stack monitoring started with ES 7.14 30 | # eck 1.9+ 31 | # helm 3.2.0 32 | # eck 2.1+ 33 | # kibana configuration becomre longer and more specific for fleet server 34 | # eck 2.2 35 | # ES 8.0+ 36 | 37 | # Starting 7.17 stack container image changed to ubuntu - some fixes are needed due to this 38 | 39 | 40 | # set WORKDIR 41 | WORKDIR="${HOME}/eckstack" 42 | 43 | ############################################################################################################### 44 | # colors 45 | red=`tput setaf 1` 46 | green=`tput setaf 2` 47 | blue=`tput setaf 4` 48 | reset=`tput sgr0` 49 | 50 | ############################################################################################################### 51 | # help 52 | help() 53 | { 54 | echo "" 55 | echo "${green}This script is limited to ECK Operator 1.4.0+ & Stack 7.10.0+." 56 | echo "${green} - various commands have additional limitations that will be listed below." 57 | echo "" 58 | echo "${green}USAGE:${reset} ./`basename $0` command STACKversion ECKversion" 59 | echo "" 60 | echo "${blue}COMMANDS:${reset}" 61 | echo " ${green}operator${reset} - will just stand up the operator only and apply a trial license" 62 | echo " ${green}stack|start|build${reset} - will stand up the ECK Operator, elasticsearch, & kibana with CLUSTER name : ${blue}eck-lab${reset}" 63 | echo " ${green}dedicated${reset} - will stand up the ECK Operator, elasticsearch, & kibana with CLUSTER name : ${blue}eck-lab${reset} with 3 dedicated masters and 3 dedicated data nodes" 64 | echo " ${green}beats${reset} - will stand up the basic stack + filebeat, metricbeat, packetbeat, & heartbeat" 65 | echo " ${green}monitor1${reset} - will stand up the basic stack named ${blue}eck-lab${reset} and a monitoring stack named ${blue}eck-lab-monitor${reset}, filebeat, & metricbeat as PODS to report stack monitoring to ${blue}eck-lab-monitor${reset}" 66 | echo " ${green}monitor2${reset} - will be the same as ${blue}monitor1${reset} however both filebeat & metricbeat will be a sidecar container inside of elasticsearch & kibana Pods. Limited to ECK ${blue}1.7.0+${reset} & STACK ${blue}7.14.0+${reset}" 67 | echo " ${green}fleet${reset} - will stand up the basic stack + FLEET Server & elastic-agent as DaemonSet on each ECK node." 68 | echo "" 69 | echo " ${green}cleanup${reset} - will delete all the resources including the ECK operator" 70 | echo "" 71 | echo "${green}EXAMPLE: ${reset}./`basename $0` fleet 8.2.0 2.2.0" 72 | echo "" 73 | echo "All yaml files will be stored in ${blue}~/eckstack${reset}" 74 | echo " ${blue}~/eckstack/notes${reset} will contain all endpoint and password information" 75 | echo " ${blue}~/eckstack/ca.crt${reset} will be the CA used to sign the public certificate" 76 | echo "" 77 | } # end of help 78 | 79 | ############################################################################################################### 80 | # functions 81 | 82 | # cleanup function 83 | cleanup() 84 | { 85 | # make sure to name all yaml files as .yaml so that it can be picked up during cleanup 86 | echo "" 87 | echo "${green}********** Cleaning up **********${reset}" 88 | echo "" 89 | 90 | for item in `ls -1t ${WORKDIR}/*.yaml 2>/dev/null` 91 | do 92 | echo "${green}[DEBUG]${reset} DELETING Resources for: ${blue}${item}${reset}" 93 | kubectl delete -f ${item} > /dev/null 2>&1 94 | done 95 | 96 | rm -rf ${WORKDIR} > /dev/null 2>&1 97 | echo "" 98 | echo "${green}[DEBUG]${reset} All cleanedup" 99 | echo "" 100 | } # end of cleanup function 101 | 102 | createsummary() 103 | { 104 | unset PASSWORD 105 | while [ "${PASSWORD}" = "" ] 106 | do 107 | PASSWORD=$(kubectl get secret ${1}-es-elastic-user -o go-template='{{.data.elastic | base64decode}}') 108 | echo "${green}[DEBUG]${reset} Grabbing elastic password for ${1}: ${blue}${PASSWORD}${reset}" 109 | done 110 | echo "${1} elastic password: ${PASSWORD}" >> notes 111 | 112 | unset ESIP 113 | while [ "${ESIP}" = "" ] 114 | do 115 | ESIP=`kubectl get service | grep ${1}-es-http | awk '{ print $4 }'` 116 | echo "${green}[DEBUG]${reset} Grabbing elasticsearch endpoint for ${1}: ${blue}https://${ESIP}:9200${reset}" 117 | done 118 | echo "${1} elasticsearch endpoint: https://${ESIP}:9200" >> notes 119 | 120 | unset KIBANAIP 121 | while [ "${KIBANAIP}" = "" -o "${KIBANAIP}" = "" ] 122 | do 123 | KIBANAIP=`kubectl get service | grep ${1}-kb-http | awk '{ print $4 }'` 124 | echo "${green}[DEBUG]${reset} Grabbing kibana endpoint for ${1}: ${blue}https://${KIBANAIP}:5601${reset}" 125 | sleep 2 126 | done 127 | echo "${1} kibana endpoint: https://${KIBANAIP}:5601" >> notes 128 | 129 | if [ "${1}" = "eck-lab" ]; then 130 | kubectl get secrets ${1}-es-http-certs-public -o jsonpath="{.data.ca\.crt}" | base64 -d > ca.crt 131 | fi 132 | 133 | echo "" 134 | } 135 | 136 | summary() 137 | { 138 | echo "" 139 | echo "${green}[SUMMARY]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset}" 140 | echo "" 141 | kubectl get all 142 | echo "" 143 | echo "${green}[SUMMARY]${reset} STACK INFO:" 144 | while read line 145 | do 146 | string1=`echo $line | awk -F": " '{ print $1 }'` 147 | string2=`echo $line | awk -F": " '{ print $2 }'` 148 | echo "${string1}: ${blue}${string2}${reset}" 149 | done < ${WORKDIR}/notes 150 | 151 | # cat ${WORKDIR}/notes 152 | 153 | #echo "${green}[SUMMARY]${reset} ${1} elastic user password: ${blue}`cat ${WORKDIR}/notes | grep 'ECK-LAB elastic password' | awk '{ print $NF }'`${reset}" 154 | #echo "${green}[SUMMARY]${reset} ${1} elasticsearch endpoint: ${blue}`cat ${WORKDIR}/notes | grep 'ECK-LAB elasticsearch endpoint' | awk '{ print $NF }'`${reset}" 155 | #echo "${green}[SUMMARY]${reset} ECK-LAB kibana endpoint: ${blue}`cat ${WORKDIR}/notes | grep 'ECK-LAB kibana endpoint' | awk '{ print $NF }'`${reset}" 156 | echo "" 157 | echo "${green}[SUMMARY]${reset} ${blue}ca.crt${reset} is located in ${blue}${WORKDIR}/ca.crt${reset}" 158 | #echo "${green}[SUMMARY]${reset} EXAMPLE: ${blue}curl --cacert ${WORKDIR}/ca.crt -u \"elastic:${PASSWORD}\" https://${ESIP}:9200${reset}" 159 | # curl --cacert ${WORKDIR}/ca.crt -u "elastic:${PASSWORD}" https://${ESIP}:9200 160 | echo "" 161 | echo "${green}[NOTE]${reset} If you missed the summary its also in ${blue}${WORKDIR}/notes${reset}" 162 | echo "${green}[NOTE]${reset} You can start logging into kibana but please give things few minutes for proper startup and letting components settle down." 163 | echo "" 164 | } 165 | 166 | # check jq 167 | checkjq() 168 | { 169 | if ! [ -x "$(command -v jq)" ]; then 170 | echo "${red}[DEBUG]${reset} jq is not installed. Please install jq and try again" 171 | exit 172 | fi 173 | } # end of checkjq 174 | 175 | # check kubectl 176 | checkkubectl() 177 | { 178 | if [ `kubectl version 2>/dev/null | grep -c "Client Version"` -lt 1 ]; then 179 | echo "${red}[DEBUG]${reset} kubectl is not installed. Please install kubectl and try again" 180 | exit 181 | fi 182 | if [ `kubectl version 2>/dev/null | grep -c "Server Version"` -lt 1 ]; then 183 | echo "${red}[DEBUG]${reset} kubectl is not connecting to any kubernetes environment" 184 | echo "${red}[DEBUG]${reset} if you did not setup your k8s environment. Please configure your kubernetes environment and try again" 185 | exit 186 | fi 187 | } # end checkubectl 188 | 189 | # function used for version checking and comparing 190 | checkversion() 191 | { 192 | echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }' 193 | } # end of checkversion function 194 | 195 | # check directory exist 196 | checkdir() 197 | { 198 | # check to see if the directory exists should not since this is the start 199 | if [ -d ${WORKDIR} ]; then 200 | echo "${red}[DEBUG]${reset} Looks like ${WORKDIR} already exists." 201 | echo "${red}[DEBUG]${reset} Please run ${blue}`basename $0` cleanup${reset} before trying again" 202 | echo "" 203 | help 204 | exit 205 | fi # end of if to check if WORKDIR exist 206 | 207 | # create directorys and files 208 | mkdir -p ${WORKDIR} 209 | cd ${WORKDIR} 210 | mkdir temp 211 | echo ${VERSION} > VERSION 212 | echo ${ECKVERSION} > ECKVERSION 213 | 214 | } # checkdir 215 | 216 | # check health of various things 217 | checkhealth() { 218 | sleep 3 219 | while true 220 | do 221 | if [ "`kubectl get ${1} | grep "${2} " | awk '{ print $2 }'`" = "green" ]; then 222 | sleep 2 223 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} is ${green}HEALTHY${reset}" 224 | echo "" 225 | kubectl get ${1} 226 | echo "" 227 | break 228 | else 229 | echo "${red}[DEBUG]${reset} ${1} is starting. Checking again in 20 seconds. If this does not finish in few minutes something is wrong. CTRL-C please" 230 | #echo "" 231 | #kubectl get ${1} 232 | #echo "" 233 | #kubectl get pods | grep "${2} " 234 | #echo "" 235 | sleep 20 236 | fi 237 | done 238 | } # end checkhealth 239 | 240 | ############################################################################################################### 241 | # operator 242 | operator() 243 | { 244 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} OPERATOR **************${reset}" 245 | echo "" 246 | # all version checks complete & directory structures created starting operator 247 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") ]; then # if version is less than 1.7.0 248 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} downloading operator: all-in-one.yaml" 249 | if curl -sL --fail https://download.elastic.co/downloads/eck/${ECKVERSION}/all-in-one.yaml -o all-in-one.yaml; then # if curl is successful 250 | kubectl apply -f all-in-one.yaml > /dev/null 2>&1 251 | else 252 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Failed to get all-in-one.yaml - check network/version?" 253 | echo "" 254 | help 255 | exit 256 | fi 257 | else # if eckversion is not less than 1.7.0 258 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} downloading crds: crds.yaml" 259 | if curl -fsSL https://download.elastic.co/downloads/eck/${ECKVERSION}/crds.yaml -o crds.yaml; then 260 | kubectl create -f crds.yaml > /dev/null 2>&1 261 | else 262 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Failed to get crds.yaml - check network/version?" 263 | echo "" 264 | help 265 | exit 266 | fi 267 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} downloading operator: operator.yaml" 268 | if curl -fsSL https://download.elastic.co/downloads/eck/${ECKVERSION}/operator.yaml -o operator.yaml; then 269 | kubectl create -f operator.yaml > /dev/null 2>&1 270 | else 271 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Failed to get operator.yaml - check network/version?" 272 | echo "" 273 | help 274 | exit 275 | fi 276 | fi 277 | 278 | while true 279 | do 280 | if [ "`kubectl -n elastic-system get pod | grep elastic-operator | awk '{ print $3 }'`" = "Running" ]; then 281 | sleep 2 282 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} OPERATOR is ${green}HEALTHY${reset}" 283 | echo "" 284 | kubectl -n elastic-system get all 285 | echo "" 286 | break 287 | else 288 | echo "${red}[DEBUG]${reset} ECK Operator is starting. Checking again in 20 seconds. If the operator does not goto Running status in few minutes something is wrong. CTRL-C please" 289 | # kubectl -n elastic-system get pod 290 | echo "" 291 | sleep 20 292 | fi 293 | done 294 | 295 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Creating license.yaml" 296 | # apply trial licence 297 | cat >>license.yaml< /dev/null 2>&1 310 | # sleep 30 311 | # kubectl -n elastic-system get configmap elastic-licensing -o json | jq -r '.data' 312 | } # end of operator 313 | 314 | ############################################################################################################### 315 | # stack 316 | stack() 317 | { 318 | echo "" 319 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} CLUSTER ${blue}${1}${reset} **************${reset}" 320 | echo "" 321 | 322 | # create elasticsearch.yaml 323 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating elasticsearch.yaml" 324 | cat >> elasticsearch-${1}.yaml < /dev/null 2>&1 357 | 358 | # checkeshealth 359 | checkhealth "elasticsearch" "${1}" 360 | 361 | # create kibana.yaml 362 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating kibana.yaml" 363 | cat >> kibana-${1}.yaml < /dev/null 2>&1 386 | 387 | #checkkbhealth 388 | checkhealth "kibana" "${1}" 389 | 390 | createsummary ${1} 391 | 392 | } # end of stack 393 | 394 | ############################################################################################################### 395 | # dedicated 396 | dedicated() 397 | { 398 | echo "" 399 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} CLUSTER ${blue}${1}${reset} with DEDICATED masters and data nodes **************${reset}" 400 | echo "" 401 | 402 | # create elasticsearch.yaml 403 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating elasticsearch.yaml" 404 | cat >> elasticsearch-${1}.yaml < /dev/null 2>&1 453 | 454 | # checkeshealth 455 | checkhealth "elasticsearch" "${1}" 456 | 457 | # create kibana.yaml 458 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating kibana.yaml" 459 | cat >> kibana-${1}.yaml < /dev/null 2>&1 482 | 483 | #checkkbhealth 484 | checkhealth "kibana" "${1}" 485 | 486 | createsummary ${1} 487 | 488 | } # end of dedicated 489 | 490 | ############################################################################################################### 491 | # filebeat autodiscover & metricbeat hosts as daemonset onto k8s hosts 492 | beats() 493 | { 494 | echo "" 495 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} with BEATS **************${reset}" 496 | echo "" 497 | 498 | # Create and apply metricbeat-rbac 499 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Creating BEATS crds" 500 | cat >> beats-crds.yaml< /dev/null 2>&1 601 | 602 | # Create and apply metricbeat-rbac 603 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Creating BEATS" 604 | cat >> beats.yaml< /dev/null 2>&1 838 | 839 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} filebeat, metricbeat, packetbeat, & heartbeat deployed" 840 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Please wait a few minutes for the beats to become healthy. (it will restart 3-4 times before it becomes healthy) & for the data to start showing" 841 | #sleep 30 842 | #echo "" 843 | #kubectl get daemonset 844 | echo "" 845 | 846 | } 847 | 848 | ############################################################################################################### 849 | # stack monitoring - beats in pods 850 | monitor1() 851 | { 852 | echo "" 853 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} Stack Monitoring with BEATS in Pods **************${reset}" 854 | echo "" 855 | 856 | # remove labels from eck-lab-montor pods 857 | # is this needed? 858 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Removing scrape label from monitoring pods" 859 | for item in `kubectl get pods --no-headers -o custom-columns=":metadata.name" | grep eck-lab-monitor` 860 | do 861 | kubectl label pod ${item} scrape- > /dev/null 2>&1 862 | done 863 | sleep 10 864 | 865 | # Create and apply monitor1.yaml 866 | cat >> monitor1.yaml< /dev/null 2>&1 1081 | 1082 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Stack monitoring with BEATS in PODS deployed" 1083 | 1084 | #echo "" 1085 | #kubectl get daemonset 1086 | echo "" 1087 | } 1088 | ############################################################################################################### 1089 | # stack monitoring - side car 1090 | monitor2() 1091 | { 1092 | echo "" 1093 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} Stack Monitoring with BEATS in sidecar containers **************${reset}" 1094 | echo "" 1095 | 1096 | # create elasticsearch-eck-lab.yaml 1097 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating elasticsearch.yaml" 1098 | cat >> monitor2.yaml < /dev/null 2>&1 1163 | 1164 | #checkkbhealth 1165 | checkhealth "kibana" "${1}" 1166 | 1167 | createsummary "${1}" 1168 | echo "" 1169 | 1170 | # notes 1171 | # you can create a normal deployment and patch it using kubectl patch kibana eck-lab --type merge -p '{"spec":{"monitoring":{"logs":{"elasticsearchRefs":[{"name":"eck-lab-monitor"}]},"metrics":{"elasticsearchRefs":[{"name":"eck-lab-monitor"}]}}}}' to change it to sidecar monitoring 1172 | # 1173 | 1174 | } 1175 | ############################################################################################################### 1176 | # fleet server 1177 | fleet() 1178 | { 1179 | echo "" 1180 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${blue}${VERSION}${green} Fleet Server & elastic-agent **************${reset}" 1181 | echo "" 1182 | 1183 | # patch kibana 1184 | echo "${green}[DEBUG]${reset} Patching kibana to set fleet settings" 1185 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "2.1.0") ]; then 1186 | kubectl patch kibana eck-lab --type merge -p '{"spec":{"config":{"xpack.fleet.agentPolicies":[{"is_default_fleet_server":true,"name":"Default Fleet Server on ECK policy","package_policies":[{"name":"fleet_server-1","package":{"name":"fleet_server"}}]},{"is_default":true,"name":"Default Elastic Agent on ECK policy","package_policies":[{"name":"system-1","package":{"name":"system"}},{"name":"kubernetes-1","package":{"name":"kubernetes"}}],"unenroll_timeout":900}],"xpack.fleet.agents.elasticsearch.host":"https://eck-lab-es-http.default.svc:9200","xpack.fleet.agents.fleet_server.hosts":["https://fleet-server-agent-http.default.svc:8220"],"xpack.fleet.packages":[{"name":"kubernetes","version":"latest"}]}}}' 1187 | elif [ $(checkversion $ECKVERSION) -ge $(checkversion "2.1.0") ]; then 1188 | kubectl patch kibana eck-lab --type merge -p '{"spec":{"config":{"xpack.fleet.agentPolicies":[{"id":"eck-fleet-server","is_default_fleet_server":true,"monitoring_enabled":["logs","metrics"],"name":"Fleet Server on ECK policy","namespace":"default","package_policies":[{"id":"fleet_server-1","name":"fleet_server-1","package":{"name":"fleet_server"}}]},{"id":"eck-agent","is_default":true,"monitoring_enabled":["logs","metrics"],"name":"Elastic Agent on ECK policy","namespace":"default","package_policies":[{"name":"system-1","package":{"name":"system"}},{"name":"kubernetes-1","package":{"name":"kubernetes"}}],"unenroll_timeout":900}],"xpack.fleet.agents.elasticsearch.host":"https://eck-lab-es-http.default.svc:9200","xpack.fleet.agents.fleet_server.hosts":["https://fleet-server-agent-http.default.svc:8220"],"xpack.fleet.packages":[{"name":"system","version":"latest"},{"name":"elastic_agent","version":"latest"},{"name":"fleet_server","version":"latest"},{"name":"kubernetes","version":"0.14.0"}]}}}' > /dev/null 2>&1 1189 | fi 1190 | echo "${green}[DEBUG]${reset} Sleeping for 60 seconds to wait for kibana to be updated with the patch" 1191 | sleep 60 & # no healthchecks on fleet so just going to sleep for 60 1192 | while kill -0 $! >/dev/null 2>&1 1193 | do 1194 | echo -n "." 1195 | sleep 2 1196 | done 1197 | echo "" 1198 | 1199 | # create fleet-server.yaml 1200 | echo "${green}[DEBUG]${reset} Creating fleet.yaml" 1201 | cat >> fleet.yaml<> fleet.yaml<> fleet.yaml< /dev/null 2>&1 1449 | 1450 | # checkfleethealth 1451 | checkhealth "agent" "elastic-agent" 1452 | 1453 | # get fleet url 1454 | unset FLEETIP 1455 | while [ "${FLEETIP}" = "" -o "${FLEETIP}" = "" ] 1456 | do 1457 | FLEETIP=`kubectl get service | grep fleet-server-agent-http | awk '{ print $4 }'` 1458 | echo "${green}[DEBUG]${reset} Grabbing Fleet Server endpoint (external): ${blue}https://${FLEETIP}:8220${reset}" 1459 | sleep 2 1460 | done 1461 | echo "${1} Fleet Server endpoint: https://${FLEETIP}:8220" >> notes 1462 | 1463 | 1464 | #### 1465 | # things needed 1466 | # fleet ip -> FLEETIP 1467 | # es ip -> ESIP 1468 | # fingerprint 1469 | # ## fingerprint FINGERPRINT=`openssl x509 -fingerprint -sha256 -noout -in ${WORKDIR}/ca.crt | awk -F"=" {' print $2 '} | sed s/://g` 1470 | 1471 | # for Fleet Server 8.2+ - Add external output with fingerprint and verification_mode 1472 | if [ $(checkversion $VERSION) -ge $(checkversion "8.2.0") ]; then 1473 | 1474 | echo "${green}[DEBUG]${reset} Waiting 30 seconds for fleet server to calm down to set the external output" 1475 | sleep 30 & 1476 | while kill -0 $! >/dev/null 2>&1 1477 | do 1478 | echo -n "." 1479 | sleep 2 1480 | done 1481 | echo "" 1482 | 1483 | # need to set fleet server url 1484 | generate_post_data() 1485 | { 1486 | cat </dev/null 2>&1 1497 | 1498 | sleep 10 1499 | 1500 | # generate fingerprint 1501 | FINGERPRINT=`openssl x509 -fingerprint -sha256 -noout -in ${WORKDIR}/ca.crt | awk -F"=" {' print $2 '} | sed s/://g` 1502 | 1503 | generate_post_data() 1504 | { 1505 | cat </dev/null 2>&1 1522 | 1523 | sleep 10 1524 | 1525 | # Lets go ahead and create an External agent policy 1526 | # get id for the external output 1527 | EXTID=`curl -s -k -u "elastic:${PASSWORD}" https://${KIBANAIP}:5601/api/fleet/outputs | jq -r '.items[]| select(.name=="external")|.id'` 1528 | 1529 | 1530 | generate_post_data() 1531 | { 1532 | cat </dev/null 2>&1 1548 | 1549 | sleep 10 1550 | 1551 | echo "${green}[DEBUG]${reset} Output: external created. You can use this output for elastic-agent from outside of k8s cluster." 1552 | echo "${green}[DEBUG]${reset} Please create a new agent policy using the external output if you want to use elastic-agent from outside of k8s cluster." 1553 | echo "${green}[DEBUG]${reset} Please use https://${FLEETIP}:8220 with --insecure to register your elastic-agent if you are coming from outside of k8s cluster." 1554 | echo "" 1555 | 1556 | fi # end if for fleet server 8.2+ external output 1557 | 1558 | # for Fleet Server 8.1 - 1 output no changes needed 1559 | 1560 | # for Fleet Server 8.0 - 1 output sometimes the output is not set correctly. going to fix 1561 | if [ $(checkversion $VERSION) -ge $(checkversion "8.0.0") ] && [ $(checkversion $VERSION) -lt $(checkversion "8.2.0") ]; then 1562 | 1563 | echo "${green}[DEBUG]${reset} Waiting 30 seconds for fleet server to calm down to set the output" 1564 | sleep 30 & 1565 | while kill -0 $! >/dev/null 2>&1 1566 | do 1567 | echo -n "." 1568 | sleep 2 1569 | done 1570 | echo "" 1571 | 1572 | generate_post_data() 1573 | { 1574 | cat </dev/null 2>&1 1591 | 1592 | 1593 | fi # end if for fleet server 8.0-8.1 1594 | 1595 | # for Fleet Server < 8.0 only 1 output can be set - do not need to do anything 1596 | 1597 | } # end fleet-server 1598 | 1599 | 1600 | ############################################################################################################### 1601 | # enterprisesearch 1602 | 1603 | ############################################################################################################### 1604 | # maps server 1605 | 1606 | ############################################################################################################### 1607 | # main script 1608 | 1609 | 1610 | ckversion() { 1611 | # manually limiting elasticsearch version to 7.10.0 or greater 1612 | if [ $(checkversion $VERSION) -lt $(checkversion "7.10.0") ]; then 1613 | echo "${red}[DEBUG]${reset} Script is limited to stack version 7.10.0 and higher" 1614 | echo "" 1615 | help 1616 | exit 1617 | fi 1618 | 1619 | # manually limiting eck version to 1.4 or greater 1620 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.4.0") ]; then 1621 | echo "${red}[DEBUG]${reset} Script is limited to operator 1.4.0 and higher" 1622 | echo "" 1623 | help 1624 | exit 1625 | else 1626 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") ]; then 1627 | if curl -sL --fail https://download.elastic.co/downloads/eck/${ECKVERSION}/all-in-one.yaml -o /dev/null; then 1628 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version validated." 1629 | else 1630 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version is invalid." 1631 | echo "" 1632 | help 1633 | exit 1634 | fi 1635 | elif [ $(checkversion $ECKVERSION) -ge $(checkversion "1.7.0") ]; then 1636 | if curl -sL --fail https://download.elastic.co/downloads/eck/${ECKVERSION}/crds.yaml -o /dev/null; then 1637 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version validated." 1638 | else 1639 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version is invalid." 1640 | echo "" 1641 | help 1642 | exit 1643 | fi 1644 | fi 1645 | fi 1646 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "2.2.0") -a $(checkversion $VERSION) -ge $(checkversion "8.0.0") ]; then 1647 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Can not run 8.x. Please use operator 2.2.0+" 1648 | echo "" 1649 | help 1650 | exit 1651 | fi 1652 | echo "${green}[DEBUG]${reset} This might take a while. In another window you can ${blue}watch -n2 kubectl get all${reset} or ${blue}kubectl get events -w${reset} to watch the stack being stood up" 1653 | echo "" 1654 | } # end ckversion 1655 | 1656 | # preflight checks before creating directories 1657 | 1658 | checkjq 1659 | checkkubectl 1660 | 1661 | case ${1} in 1662 | operator) 1663 | ECKVERSION=${2} 1664 | checkdir 1665 | operator 1666 | ;; 1667 | build|start|stack) 1668 | VERSION=${2} 1669 | ECKVERSION=${3} 1670 | ckversion 1671 | checkdir 1672 | operator 1673 | stack "eck-lab" 1674 | summary 1675 | ;; 1676 | dedicated) 1677 | VERSION=${2} 1678 | ECKVERSION=${3} 1679 | ckversion 1680 | checkdir 1681 | operator 1682 | dedicated "eck-lab" 1683 | summary 1684 | ;; 1685 | beats|beat) 1686 | VERSION=${2} 1687 | ECKVERSION=${3} 1688 | ckversion 1689 | checkdir 1690 | operator 1691 | stack "eck-lab" 1692 | beats "eck-lab" 1693 | summary 1694 | ;; 1695 | monitor1) 1696 | VERSION=${2} 1697 | ECKVERSION=${3} 1698 | ckversion 1699 | checkdir 1700 | operator 1701 | stack "eck-lab" 1702 | stack "eck-lab-monitor" 1703 | monitor1 "eck-lab" 1704 | summary 1705 | ;; 1706 | monitor2) 1707 | VERSION=${2} 1708 | ECKVERSION=${3} 1709 | ckversion 1710 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") -o $(checkversion $VERSION) -lt $(checkversion "7.14.0") ]; then 1711 | echo "${red}[DEBUG]${reset} Sidecar stack monitoring started with ECK 1.7.0 & STACK 7.14.0. Please run cleanup and re-run wiht ECK operator 1.7.0+/Stack 7.14.0+" 1712 | echo "" 1713 | help 1714 | exit 1715 | else 1716 | checkdir 1717 | operator 1718 | stack "eck-lab-monitor" 1719 | monitor2 "eck-lab" 1720 | summary 1721 | fi 1722 | ;; 1723 | # snapshot) 1724 | # snapshot ${2} ${3} 1725 | # ;; 1726 | fleet) 1727 | VERSION=${2} 1728 | ECKVERSION=${3} 1729 | ckversion 1730 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") -o $(checkversion $VERSION) -lt $(checkversion "7.14.0") ]; then 1731 | echo "${red}[DEBUG]${reset} Fleet server started with ECK 1.7.0 and STACK 7.14.0. Please run cleanup and re-run with ECK operator 1.7.0+/Stack 7.14.0+" 1732 | echo "" 1733 | help 1734 | exit 1735 | else 1736 | checkdir 1737 | operator 1738 | stack "eck-lab" 1739 | fleet "eck-lab" 1740 | summary 1741 | fi 1742 | ;; 1743 | cleanup|clean|teardown|stop) 1744 | cleanup 1745 | exit 1746 | ;; 1747 | info|summary|detail) 1748 | summary 1749 | ;; 1750 | *) 1751 | help 1752 | exit 1753 | ;; 1754 | esac 1755 | -------------------------------------------------------------------------------- /deploy-eck.sh.20220824: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # justin lim 4 | # 5 | # version 1.0 6 | 7 | # curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/deploy-eck.sh -o deploy-eck.sh 8 | # 9 | # NOTES 10 | # eck 1.2+ 11 | # start of elasticsearchRef 12 | # ES 6.8+ & 7.1+ 13 | # Beats 7.0+ 14 | # entsearch 7.7+ 15 | # all-in-one operator 16 | # eck 1.4+ 17 | # ES 7.17.3+ 18 | # eck 1.6+ 19 | # elastic-agent 7.10+ 20 | # elastic map server 7.11+ 21 | # eck 1.7+ 22 | # crds.yaml & operator.yaml 23 | # fleet 7.14 24 | # sidecar container stack monitoring started with ES 7.14 25 | # eck 1.9+ 26 | # helm 3.2.0 27 | # eck 2.1+ 28 | # kibana configuration becomre longer and more specific for fleet server 29 | # eck 2.2 30 | # ES 8.0+ 31 | 32 | # Starting 7.17 stack container image changed to ubuntu - some fixes are needed due to this 33 | 34 | 35 | # set WORKDIR 36 | WORKDIR="${HOME}/eckstack" 37 | 38 | ############################################################################################################### 39 | # colors 40 | red=`tput setaf 1` 41 | green=`tput setaf 2` 42 | blue=`tput setaf 4` 43 | reset=`tput sgr0` 44 | 45 | ############################################################################################################### 46 | # help 47 | help() 48 | { 49 | echo "" 50 | echo "${green}This script is limited to ECK Operator 1.4.0+ & Stack 7.10.0+." 51 | echo "${green} - various commands have additional limitations that will be listed below." 52 | echo "" 53 | echo "${green}USAGE:${reset} ./`basename $0` command STACKversion ECKversion" 54 | echo "" 55 | echo "${blue}COMMANDS:${reset}" 56 | echo " ${green}operator${reset} - will just stand up the operator only and apply a trial license" 57 | echo " ${green}stack|start|build${reset} - will stand up the ECK Operator, elasticsearch, & kibana with CLUSTER name : ${blue}eck-lab${reset}" 58 | echo " ${green}beats${reset} - will stand up the basic stack + filebeat, metricbeat, packetbeat, & heartbeat" 59 | echo " ${green}monitor1${reset} - will stand up the basic stack named ${blue}eck-lab${reset} and a monitoring stack named ${blue}eck-lab-monitor${reset}, filebeat, & metricbeat as PODS to report stack monitoring to ${blue}eck-lab-monitor${reset}" 60 | echo " ${green}monitor2${reset} - will be the same as ${blue}monitor1${reset} however both filebeat & metricbeat will be a sidecar container inside of elasticsearch & kibana Pods. Limited to ECK ${blue}1.7.0+${reset} & STACK ${blue}7.14.0+${reset}" 61 | echo " ${green}fleet${reset} - will stand up the basic stack + FLEET Server & elastic-agent as DaemonSet on each ECK node." 62 | echo "" 63 | echo " ${green}cleanup${reset} - will delete all the resources including the ECK operator" 64 | echo "" 65 | echo "${green}EXAMPLE: ${reset}./`basename $0` fleet 8.2.0 2.2.0" 66 | echo "" 67 | echo "All yaml files will be stored in ${blue}~/eckstack${reset}" 68 | echo " ${blue}~/eckstack/notes${reset} will contain all endpoint and password information" 69 | echo " ${blue}~/eckstack/ca.crt${reset} will be the CA used to sign the public certificate" 70 | echo "" 71 | } # end of help 72 | 73 | ############################################################################################################### 74 | # functions 75 | 76 | # cleanup function 77 | cleanup() 78 | { 79 | # make sure to name all yaml files as .yaml so that it can be picked up during cleanup 80 | echo "" 81 | echo "${green}********** Cleaning up **********${reset}" 82 | echo "" 83 | 84 | for item in `ls -1t ${WORKDIR}/*.yaml 2>/dev/null` 85 | do 86 | echo "${green}[DEBUG]${reset} DELETING Resources for: ${blue}${item}${reset}" 87 | kubectl delete -f ${item} > /dev/null 2>&1 88 | done 89 | 90 | rm -rf ${WORKDIR} > /dev/null 2>&1 91 | echo "" 92 | echo "${green}[DEBUG]${reset} All cleanedup" 93 | echo "" 94 | } # end of cleanup function 95 | 96 | createsummary() 97 | { 98 | unset PASSWORD 99 | while [ "${PASSWORD}" = "" ] 100 | do 101 | PASSWORD=$(kubectl get secret ${1}-es-elastic-user -o go-template='{{.data.elastic | base64decode}}') 102 | echo "${green}[DEBUG]${reset} Grabbing elastic password for ${1}: ${blue}${PASSWORD}${reset}" 103 | done 104 | echo "${1} elastic password: ${PASSWORD}" >> notes 105 | 106 | unset ESIP 107 | while [ "${ESIP}" = "" ] 108 | do 109 | ESIP=`kubectl get service | grep ${1}-es-http | awk '{ print $4 }'` 110 | echo "${green}[DEBUG]${reset} Grabbing elasticsearch endpoint for ${1}: ${blue}https://${ESIP}:9200${reset}" 111 | done 112 | echo "${1} elasticsearch endpoint: https://${ESIP}:9200" >> notes 113 | 114 | unset KIBANAIP 115 | while [ "${KIBANAIP}" = "" -o "${KIBANAIP}" = "" ] 116 | do 117 | KIBANAIP=`kubectl get service | grep ${1}-kb-http | awk '{ print $4 }'` 118 | echo "${green}[DEBUG]${reset} Grabbing kibana endpoint for ${1}: ${blue}https://${KIBANAIP}:5601${reset}" 119 | sleep 2 120 | done 121 | echo "${1} kibana endpoint: https://${KIBANAIP}:5601" >> notes 122 | 123 | if [ "${1}" = "eck-lab" ]; then 124 | kubectl get secrets ${1}-es-http-certs-public -o jsonpath="{.data.ca\.crt}" | base64 -d > ca.crt 125 | fi 126 | 127 | echo "" 128 | } 129 | 130 | summary() 131 | { 132 | echo "" 133 | echo "${green}[SUMMARY]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset}" 134 | echo "" 135 | kubectl get all 136 | echo "" 137 | echo "${green}[SUMMARY]${reset} STACK INFO:" 138 | while read line 139 | do 140 | string1=`echo $line | awk -F": " '{ print $1 }'` 141 | string2=`echo $line | awk -F": " '{ print $2 }'` 142 | echo "${string1}: ${blue}${string2}${reset}" 143 | done < ${WORKDIR}/notes 144 | 145 | # cat ${WORKDIR}/notes 146 | 147 | #echo "${green}[SUMMARY]${reset} ${1} elastic user password: ${blue}`cat ${WORKDIR}/notes | grep 'ECK-LAB elastic password' | awk '{ print $NF }'`${reset}" 148 | #echo "${green}[SUMMARY]${reset} ${1} elasticsearch endpoint: ${blue}`cat ${WORKDIR}/notes | grep 'ECK-LAB elasticsearch endpoint' | awk '{ print $NF }'`${reset}" 149 | #echo "${green}[SUMMARY]${reset} ECK-LAB kibana endpoint: ${blue}`cat ${WORKDIR}/notes | grep 'ECK-LAB kibana endpoint' | awk '{ print $NF }'`${reset}" 150 | echo "" 151 | echo "${green}[SUMMARY]${reset} ${blue}ca.crt${reset} is located in ${blue}${WORKDIR}/ca.crt${reset}" 152 | #echo "${green}[SUMMARY]${reset} EXAMPLE: ${blue}curl --cacert ${WORKDIR}/ca.crt -u \"elastic:${PASSWORD}\" https://${ESIP}:9200${reset}" 153 | # curl --cacert ${WORKDIR}/ca.crt -u "elastic:${PASSWORD}" https://${ESIP}:9200 154 | echo "" 155 | echo "${green}[NOTE]${reset} If you missed the summary its also in ${blue}${WORKDIR}/notes${reset}" 156 | echo "${green}[NOTE]${reset} You can start logging into kibana but please give things few minutes for proper startup and letting components settle down." 157 | echo "" 158 | } 159 | 160 | # check jq 161 | checkjq() 162 | { 163 | if ! [ -x "$(command -v jq)" ]; then 164 | echo "${red}[DEBUG]${reset} jq is not installed. Please install jq and try again" 165 | exit 166 | fi 167 | } # end of checkjq 168 | 169 | # check kubectl 170 | checkkubectl() 171 | { 172 | if [ `kubectl version 2>/dev/null | grep -c "Client Version"` -lt 1 ]; then 173 | echo "${red}[DEBUG]${reset} kubectl is not installed. Please install kubectl and try again" 174 | exit 175 | fi 176 | if [ `kubectl version 2>/dev/null | grep -c "Server Version"` -lt 1 ]; then 177 | echo "${red}[DEBUG]${reset} kubectl is not connecting to any kubernetes environment" 178 | echo "${red}[DEBUG]${reset} if you did not setup your k8s environment. Please configure your kubernetes environment and try again" 179 | exit 180 | fi 181 | } # end checkubectl 182 | 183 | # function used for version checking and comparing 184 | checkversion() 185 | { 186 | echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }' 187 | } # end of checkversion function 188 | 189 | # check directory exist 190 | checkdir() 191 | { 192 | # check to see if the directory exists should not since this is the start 193 | if [ -d ${WORKDIR} ]; then 194 | echo "${red}[DEBUG]${reset} Looks like ${WORKDIR} already exists." 195 | echo "${red}[DEBUG]${reset} Please run ${blue}`basename $0` cleanup${reset} before trying again" 196 | echo "" 197 | help 198 | exit 199 | fi # end of if to check if WORKDIR exist 200 | 201 | # create directorys and files 202 | mkdir -p ${WORKDIR} 203 | cd ${WORKDIR} 204 | mkdir temp 205 | echo ${VERSION} > VERSION 206 | echo ${ECKVERSION} > ECKVERSION 207 | 208 | } # checkdir 209 | 210 | # check health of various things 211 | checkhealth() { 212 | sleep 3 213 | while true 214 | do 215 | if [ "`kubectl get ${1} | grep "${2} " | awk '{ print $2 }'`" = "green" ]; then 216 | sleep 2 217 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} is ${green}HEALTHY${reset}" 218 | echo "" 219 | kubectl get ${1} 220 | echo "" 221 | break 222 | else 223 | echo "${red}[DEBUG]${reset} ${1} is starting. Checking again in 20 seconds. If this does not finish in few minutes something is wrong. CTRL-C please" 224 | #echo "" 225 | #kubectl get ${1} 226 | #echo "" 227 | #kubectl get pods | grep "${2} " 228 | #echo "" 229 | sleep 20 230 | fi 231 | done 232 | } # end checkhealth 233 | 234 | ############################################################################################################### 235 | # operator 236 | operator() 237 | { 238 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} OPERATOR **************${reset}" 239 | echo "" 240 | # all version checks complete & directory structures created starting operator 241 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") ]; then # if version is less than 1.7.0 242 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} downloading operator: all-in-one.yaml" 243 | if curl -sL --fail https://download.elastic.co/downloads/eck/${ECKVERSION}/all-in-one.yaml -o all-in-one.yaml; then # if curl is successful 244 | kubectl apply -f all-in-one.yaml > /dev/null 2>&1 245 | else 246 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Failed to get all-in-one.yaml - check network/version?" 247 | echo "" 248 | help 249 | exit 250 | fi 251 | else # if eckversion is not less than 1.7.0 252 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} downloading crds: crds.yaml" 253 | if curl -fsSL https://download.elastic.co/downloads/eck/${ECKVERSION}/crds.yaml -o crds.yaml; then 254 | kubectl create -f crds.yaml > /dev/null 2>&1 255 | else 256 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Failed to get crds.yaml - check network/version?" 257 | echo "" 258 | help 259 | exit 260 | fi 261 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} downloading operator: operator.yaml" 262 | if curl -fsSL https://download.elastic.co/downloads/eck/${ECKVERSION}/operator.yaml -o operator.yaml; then 263 | kubectl create -f operator.yaml > /dev/null 2>&1 264 | else 265 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Failed to get operator.yaml - check network/version?" 266 | echo "" 267 | help 268 | exit 269 | fi 270 | fi 271 | 272 | while true 273 | do 274 | if [ "`kubectl -n elastic-system get pod | grep elastic-operator | awk '{ print $3 }'`" = "Running" ]; then 275 | sleep 2 276 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} OPERATOR is ${green}HEALTHY${reset}" 277 | echo "" 278 | kubectl -n elastic-system get all 279 | echo "" 280 | break 281 | else 282 | echo "${red}[DEBUG]${reset} ECK Operator is starting. Checking again in 20 seconds. If the operator does not goto Running status in few minutes something is wrong. CTRL-C please" 283 | # kubectl -n elastic-system get pod 284 | echo "" 285 | sleep 20 286 | fi 287 | done 288 | 289 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Creating license.yaml" 290 | # apply trial licence 291 | cat >>license.yaml< /dev/null 2>&1 304 | # sleep 30 305 | # kubectl -n elastic-system get configmap elastic-licensing -o json | jq -r '.data' 306 | } # end of operator 307 | 308 | ############################################################################################################### 309 | # stack 310 | stack() 311 | { 312 | echo "" 313 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} CLUSTER ${blue}${1}${reset} **************${reset}" 314 | echo "" 315 | 316 | # create elasticsearch.yaml 317 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating elasticsearch.yaml" 318 | cat >> elasticsearch-${1}.yaml < /dev/null 2>&1 351 | 352 | # checkeshealth 353 | checkhealth "elasticsearch" "${1}" 354 | 355 | # create kibana.yaml 356 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating kibana.yaml" 357 | cat >> kibana-${1}.yaml < /dev/null 2>&1 380 | 381 | #checkkbhealth 382 | checkhealth "kibana" "${1}" 383 | 384 | createsummary ${1} 385 | 386 | } # end of stack 387 | 388 | ############################################################################################################### 389 | # filebeat autodiscover & metricbeat hosts as daemonset onto k8s hosts 390 | beats() 391 | { 392 | echo "" 393 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} with BEATS **************${reset}" 394 | echo "" 395 | 396 | # Create and apply metricbeat-rbac 397 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Creating BEATS crds" 398 | cat >> beats-crds.yaml< /dev/null 2>&1 499 | 500 | # Create and apply metricbeat-rbac 501 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Creating BEATS" 502 | cat >> beats.yaml< /dev/null 2>&1 736 | 737 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} filebeat, metricbeat, packetbeat, & heartbeat deployed" 738 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Please wait a few minutes for the beats to become healthy. (it will restart 3-4 times before it becomes healthy) & for the data to start showing" 739 | #sleep 30 740 | #echo "" 741 | #kubectl get daemonset 742 | echo "" 743 | 744 | } 745 | 746 | ############################################################################################################### 747 | # stack monitoring - beats in pods 748 | monitor1() 749 | { 750 | echo "" 751 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} Stack Monitoring with BEATS in Pods **************${reset}" 752 | echo "" 753 | 754 | # remove labels from eck-lab-montor pods 755 | # is this needed? 756 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Removing scrape label from monitoring pods" 757 | for item in `kubectl get pods --no-headers -o custom-columns=":metadata.name" | grep eck-lab-monitor` 758 | do 759 | kubectl label pod ${item} scrape- > /dev/null 2>&1 760 | done 761 | sleep 10 762 | 763 | # Create and apply monitor1.yaml 764 | cat >> monitor1.yaml< /dev/null 2>&1 979 | 980 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Stack monitoring with BEATS in PODS deployed" 981 | 982 | #echo "" 983 | #kubectl get daemonset 984 | echo "" 985 | } 986 | ############################################################################################################### 987 | # stack monitoring - side car 988 | monitor2() 989 | { 990 | echo "" 991 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} Stack Monitoring with BEATS in sidecar containers **************${reset}" 992 | echo "" 993 | 994 | # create elasticsearch-eck-lab.yaml 995 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating elasticsearch.yaml" 996 | cat >> monitor2.yaml < /dev/null 2>&1 1061 | 1062 | #checkkbhealth 1063 | checkhealth "kibana" "${1}" 1064 | 1065 | createsummary "${1}" 1066 | echo "" 1067 | 1068 | # notes 1069 | # you can create a normal deployment and patch it using kubectl patch kibana eck-lab --type merge -p '{"spec":{"monitoring":{"logs":{"elasticsearchRefs":[{"name":"eck-lab-monitor"}]},"metrics":{"elasticsearchRefs":[{"name":"eck-lab-monitor"}]}}}}' to change it to sidecar monitoring 1070 | # 1071 | 1072 | } 1073 | ############################################################################################################### 1074 | # fleet server 1075 | fleet() 1076 | { 1077 | echo "" 1078 | echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${blue}${VERSION}${green} Fleet Server & elastic-agent **************${reset}" 1079 | echo "" 1080 | 1081 | # patch kibana 1082 | echo "${green}[DEBUG]${reset} Patching kibana to set fleet settings" 1083 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "2.1.0") ]; then 1084 | kubectl patch kibana eck-lab --type merge -p '{"spec":{"config":{"xpack.fleet.agentPolicies":[{"is_default_fleet_server":true,"name":"Default Fleet Server on ECK policy","package_policies":[{"name":"fleet_server-1","package":{"name":"fleet_server"}}]},{"is_default":true,"name":"Default Elastic Agent on ECK policy","package_policies":[{"name":"system-1","package":{"name":"system"}},{"name":"kubernetes-1","package":{"name":"kubernetes"}}],"unenroll_timeout":900}],"xpack.fleet.agents.elasticsearch.host":"https://eck-lab-es-http.default.svc:9200","xpack.fleet.agents.fleet_server.hosts":["https://fleet-server-agent-http.default.svc:8220"],"xpack.fleet.packages":[{"name":"kubernetes","version":"latest"}]}}}' 1085 | elif [ $(checkversion $ECKVERSION) -ge $(checkversion "2.1.0") ]; then 1086 | kubectl patch kibana eck-lab --type merge -p '{"spec":{"config":{"xpack.fleet.agentPolicies":[{"id":"eck-fleet-server","is_default_fleet_server":true,"monitoring_enabled":["logs","metrics"],"name":"Fleet Server on ECK policy","namespace":"default","package_policies":[{"id":"fleet_server-1","name":"fleet_server-1","package":{"name":"fleet_server"}}]},{"id":"eck-agent","is_default":true,"monitoring_enabled":["logs","metrics"],"name":"Elastic Agent on ECK policy","namespace":"default","package_policies":[{"name":"system-1","package":{"name":"system"}},{"name":"kubernetes-1","package":{"name":"kubernetes"}}],"unenroll_timeout":900}],"xpack.fleet.agents.elasticsearch.host":"https://eck-lab-es-http.default.svc:9200","xpack.fleet.agents.fleet_server.hosts":["https://fleet-server-agent-http.default.svc:8220"],"xpack.fleet.packages":[{"name":"system","version":"latest"},{"name":"elastic_agent","version":"latest"},{"name":"fleet_server","version":"latest"},{"name":"kubernetes","version":"0.14.0"}]}}}' > /dev/null 2>&1 1087 | fi 1088 | echo "${green}[DEBUG]${reset} Sleeping for 60 seconds to wait for kibana to be updated with the patch" 1089 | sleep 60 & # no healthchecks on fleet so just going to sleep for 60 1090 | while kill -0 $! >/dev/null 2>&1 1091 | do 1092 | echo -n "." 1093 | sleep 2 1094 | done 1095 | echo "" 1096 | 1097 | # create fleet-server.yaml 1098 | echo "${green}[DEBUG]${reset} Creating fleet.yaml" 1099 | cat >> fleet.yaml<> fleet.yaml<> fleet.yaml< /dev/null 2>&1 1347 | 1348 | # checkfleethealth 1349 | checkhealth "agent" "elastic-agent" 1350 | 1351 | # get fleet url 1352 | unset FLEETIP 1353 | while [ "${FLEETIP}" = "" -o "${FLEETIP}" = "" ] 1354 | do 1355 | FLEETIP=`kubectl get service | grep fleet-server-agent-http | awk '{ print $4 }'` 1356 | echo "${green}[DEBUG]${reset} Grabbing Fleet Server endpoint (external): ${blue}https://${FLEETIP}:8220${reset}" 1357 | sleep 2 1358 | done 1359 | echo "${1} Fleet Server endpoint: https://${FLEETIP}:8220" >> notes 1360 | 1361 | 1362 | #### 1363 | # things needed 1364 | # fleet ip -> FLEETIP 1365 | # es ip -> ESIP 1366 | # fingerprint 1367 | # ## fingerprint FINGERPRINT=`openssl x509 -fingerprint -sha256 -noout -in ${WORKDIR}/ca.crt | awk -F"=" {' print $2 '} | sed s/://g` 1368 | 1369 | # for Fleet Server 8.2+ - Add external output with fingerprint and verification_mode 1370 | if [ $(checkversion $VERSION) -ge $(checkversion "8.2.0") ]; then 1371 | 1372 | echo "${green}[DEBUG]${reset} Waiting 30 seconds for fleet server to calm down to set the external output" 1373 | sleep 30 & 1374 | while kill -0 $! >/dev/null 2>&1 1375 | do 1376 | echo -n "." 1377 | sleep 2 1378 | done 1379 | echo "" 1380 | 1381 | # need to set fleet server url 1382 | generate_post_data() 1383 | { 1384 | cat </dev/null 2>&1 1395 | 1396 | sleep 10 1397 | 1398 | # generate fingerprint 1399 | FINGERPRINT=`openssl x509 -fingerprint -sha256 -noout -in ${WORKDIR}/ca.crt | awk -F"=" {' print $2 '} | sed s/://g` 1400 | 1401 | generate_post_data() 1402 | { 1403 | cat </dev/null 2>&1 1420 | 1421 | sleep 10 1422 | 1423 | # Lets go ahead and create an External agent policy 1424 | # get id for the external output 1425 | EXTID=`curl -s -k -u "elastic:${PASSWORD}" https://${KIBANAIP}:5601/api/fleet/outputs | jq -r '.items[]| select(.name=="external")|.id'` 1426 | 1427 | 1428 | generate_post_data() 1429 | { 1430 | cat </dev/null 2>&1 1446 | 1447 | sleep 10 1448 | 1449 | echo "${green}[DEBUG]${reset} Output: external created. You can use this output for elastic-agent from outside of k8s cluster." 1450 | echo "${green}[DEBUG]${reset} Please create a new agent policy using the external output if you want to use elastic-agent from outside of k8s cluster." 1451 | echo "${green}[DEBUG]${reset} Please use https://${FLEETIP}:8220 with --insecure to register your elastic-agent if you are coming from outside of k8s cluster." 1452 | echo "" 1453 | 1454 | fi # end if for fleet server 8.2+ external output 1455 | 1456 | # for Fleet Server 8.1 - 1 output no changes needed 1457 | 1458 | # for Fleet Server 8.0 - 1 output sometimes the output is not set correctly. going to fix 1459 | if [ $(checkversion $VERSION) -ge $(checkversion "8.0.0") ] && [ $(checkversion $VERSION) -lt $(checkversion "8.2.0") ]; then 1460 | 1461 | echo "${green}[DEBUG]${reset} Waiting 30 seconds for fleet server to calm down to set the output" 1462 | sleep 30 & 1463 | while kill -0 $! >/dev/null 2>&1 1464 | do 1465 | echo -n "." 1466 | sleep 2 1467 | done 1468 | echo "" 1469 | 1470 | generate_post_data() 1471 | { 1472 | cat </dev/null 2>&1 1489 | 1490 | 1491 | fi # end if for fleet server 8.0-8.1 1492 | 1493 | # for Fleet Server < 8.0 only 1 output can be set - do not need to do anything 1494 | 1495 | } # end fleet-server 1496 | 1497 | 1498 | ############################################################################################################### 1499 | # enterprisesearch 1500 | 1501 | ############################################################################################################### 1502 | # maps server 1503 | 1504 | ############################################################################################################### 1505 | # main script 1506 | 1507 | # manually checking versions and limiting version if not cleanup 1508 | if [ "${1}" = "operator" ]; then 1509 | ECKVERSION=${2} 1510 | elif [[ "${1}" != @(cleanup|info|summary|detail) ]]; then 1511 | VERSION=${2} 1512 | ECKVERSION=${3} 1513 | # manually limiting elasticsearch version to 7.10.0 or greater 1514 | if [ $(checkversion $VERSION) -lt $(checkversion "7.10.0") ]; then 1515 | echo "${red}[DEBUG]${reset} Script is limited to stack version 7.10.0 and higher" 1516 | echo "" 1517 | help 1518 | exit 1519 | fi 1520 | 1521 | # manually limiting eck version to 1.4 or greater 1522 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.4.0") ]; then 1523 | echo "${red}[DEBUG]${reset} Script is limited to operator 1.4.0 and higher" 1524 | echo "" 1525 | help 1526 | exit 1527 | else 1528 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") ]; then 1529 | if curl -sL --fail https://download.elastic.co/downloads/eck/${ECKVERSION}/all-in-one.yaml -o /dev/null; then 1530 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version validated." 1531 | else 1532 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version is invalid." 1533 | echo "" 1534 | help 1535 | exit 1536 | fi 1537 | elif [ $(checkversion $ECKVERSION) -ge $(checkversion "1.7.0") ]; then 1538 | if curl -sL --fail https://download.elastic.co/downloads/eck/${ECKVERSION}/crds.yaml -o /dev/null; then 1539 | echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version validated." 1540 | else 1541 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version is invalid." 1542 | echo "" 1543 | help 1544 | exit 1545 | fi 1546 | fi 1547 | fi 1548 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "2.2.0") -a $(checkversion $VERSION) -ge $(checkversion "8.0.0") ]; then 1549 | echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Can not run 8.x. Please use operator 2.2.0+" 1550 | echo "" 1551 | help 1552 | exit 1553 | fi 1554 | echo "${green}[DEBUG]${reset} This might take a while. In another window you can ${blue}watch -n2 kubectl get all${reset} or ${blue}kubectl get events -w${reset} to watch the stack being stood up" 1555 | echo "" 1556 | fi 1557 | 1558 | # preflight checks before creating directories 1559 | 1560 | checkjq 1561 | checkkubectl 1562 | 1563 | case ${1} in 1564 | operator) 1565 | checkdir 1566 | operator 1567 | ;; 1568 | build|start|stack) 1569 | checkdir 1570 | operator 1571 | stack "eck-lab" 1572 | summary 1573 | ;; 1574 | beats|beat) 1575 | checkdir 1576 | operator 1577 | stack "eck-lab" 1578 | beats "eck-lab" 1579 | summary 1580 | ;; 1581 | monitor1) 1582 | checkdir 1583 | operator 1584 | stack "eck-lab" 1585 | stack "eck-lab-monitor" 1586 | monitor1 "eck-lab" 1587 | summary 1588 | ;; 1589 | monitor2) 1590 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") -o $(checkversion $VERSION) -lt $(checkversion "7.14.0") ]; then 1591 | echo "${red}[DEBUG]${reset} Sidecar stack monitoring started with ECK 1.7.0 & STACK 7.14.0. Please run cleanup and re-run wiht ECK operator 1.7.0+/Stack 7.14.0+" 1592 | echo "" 1593 | help 1594 | exit 1595 | else 1596 | checkdir 1597 | operator 1598 | stack "eck-lab-monitor" 1599 | monitor2 "eck-lab" 1600 | summary 1601 | fi 1602 | ;; 1603 | # snapshot) 1604 | # snapshot ${2} ${3} 1605 | # ;; 1606 | fleet) 1607 | if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") -o $(checkversion $VERSION) -lt $(checkversion "7.14.0") ]; then 1608 | echo "${red}[DEBUG]${reset} Fleet server started with ECK 1.7.0 and STACK 7.14.0. Please run cleanup and re-run with ECK operator 1.7.0+/Stack 7.14.0+" 1609 | echo "" 1610 | help 1611 | exit 1612 | else 1613 | checkdir 1614 | operator 1615 | stack "eck-lab" 1616 | fleet "eck-lab" 1617 | summary 1618 | fi 1619 | ;; 1620 | cleanup|clean|teardown|stop) 1621 | cleanup 1622 | exit 1623 | ;; 1624 | info|summary|detail) 1625 | summary 1626 | ;; 1627 | *) 1628 | help 1629 | exit 1630 | ;; 1631 | esac 1632 | -------------------------------------------------------------------------------- /expectdo.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | # 3 | # 0.3 additional optimizations and fixes 4 | # added paths 5 | # removed the 2nd prompt during spawn 6 | # 7 | 8 | # SET VARS 9 | set prompt "\\$|#|>" 10 | set passprompt "\[Pp\]assword:" 11 | set timedout "" 12 | set closed "" 13 | 14 | 15 | # GET VARS 16 | ## set timeout to forever 17 | set timeout -1 18 | 19 | ## get ssh username as $userid 20 | stty echo 21 | send_user "\nSSH USERNAME : " 22 | expect_user -re "(.*)\n" 23 | set userid $expect_out(1,string) 24 | 25 | ## get ssh password as $pass without echo to console 26 | stty -echo 27 | send_user "SSH PASSWORD : " 28 | expect_user -re "(.*)\n" 29 | set pass $expect_out(1,string) 30 | send_user "\n" 31 | 32 | ## get filename for the file containing the list 33 | stty echo 34 | send_user "LIST FILE : " 35 | expect_user -re "(.*)\n" 36 | set filename $expect_out(1,string) 37 | if ![file exists $filename] { 38 | error "File $filename does not exist" 39 | } 40 | 41 | ## get the command that needs to be ran 42 | stty echo 43 | send_user "COMMAND : " 44 | expect_user -re "(.*)\n" 45 | set cmd $expect_out(1,string) 46 | send_user "\n" 47 | 48 | set timeout 5 49 | 50 | # load the list 51 | set fid [open $filename] 52 | set content [read -nonewline $fid] 53 | close $fid 54 | set hosts [split $content "\n"] 55 | 56 | 57 | foreach host $hosts { 58 | set skip 0 59 | send_user "\n\[\*DEBUG\] ====================================> $host \n" 60 | # log_user 0 # enable this and line 104 if you want to reduce logging 61 | spawn -noecho ssh -q -t -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $userid@$host 62 | set timeout 10 63 | expect { 64 | $passprompt { 65 | send "$pass\r" 66 | expect -re $prompt 67 | } 68 | -re $prompt { 69 | } 70 | timeout { 71 | send_user "\n\[\*DEBUG\] ====================================> $host did not respond\n" 72 | lappend timedout $host 73 | set skip 1 74 | } 75 | eof { 76 | send_user "\n\[\*DEBUG\] ====================================> $host closed!\n" 77 | lappend closed $host 78 | set skip 1 79 | } 80 | } 81 | if {$skip != 1} { 82 | expect -re $prompt 83 | send "export PATH; PATH=\$PATH:/bin:/usr/bin:/usr/local/bin:/sbin:/usr/sbin:/usr/local/sbin:/opt/csw/bin:/opt/csw/sbin:/usr/sfw/bin:/usr/sfw/sbin\n" 84 | expect -re $prompt 85 | send "$cmd\n" 86 | # log_user 1 87 | expect { 88 | -re $passprompt { send "$pass\r" } 89 | -re $prompt { } 90 | } 91 | expect -re $prompt 92 | send "exit" 93 | } 94 | } 95 | 96 | # send outputs 97 | send_user "\n\[\*DEBUG\] ====================================> DONE\n" 98 | 99 | send_user "\[\*DEBUG\] ========[llength $timedout] hosts timed-out\n" 100 | foreach host $timedout { 101 | send_user "---- $host\n" 102 | } 103 | 104 | send_user "\[\*DEBUG\] ========[llength $closed] hosts closed early\n" 105 | foreach host $closed { 106 | send_user "---- $host\n" 107 | } 108 | 109 | 110 | exit -------------------------------------------------------------------------------- /forking.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # this script will launch MAXFORK amount of processes against a list 4 | # monitor it and wait for it to finish before launching additional 5 | 6 | MAXFORK=20 7 | CMD="xxxxxxx' 8 | DEAD_ONLY=0 9 | 10 | [ $# -eq 0 ] && echo "Usage: $0 [-d] list_file" && exit 1 11 | 12 | [$1 = "-d" ] && DEAD_ONLY=1 && shift 1 13 | 14 | i=0 15 | for host in `cat $1` 16 | do 17 | ( 18 | eval $CMD 19 | ) & 20 | 21 | i=$((i+1)) 22 | while [ "$(jobs -r -p | wc -l)" -ge $MAXFORK ]; do 23 | sleep 1 24 | done 25 | done 26 | 27 | if [ -n "$(jobs -r -p)" ]; then 28 | wait 29 | fi 30 | -------------------------------------------------------------------------------- /gcp-ecklab.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ## Creates GCP instances 4 | 5 | # --------------EDIT information below 6 | 7 | ### PERSONAL ################### 8 | gcp_name="$(whoami | sed $'s/[^[:alnum:]\t]//g')-ecklab" # GCP name will automatically set to your username with special chars removed-eck 9 | gcp_zone="us-central1-b" # GCP zone - select one that is close to you 10 | 11 | ### ORGANIZATION ############### 12 | 13 | gcp_project="elastic-support" 14 | machine_type="e2-standard-8" # GCP machine type - gcloud compute machine-types list 15 | boot_disk_type="pd-ssd" # disk type - gcloud compute disk-types list 16 | label="division=support,org=support,team=support,project=${gcp_name}" 17 | 18 | # -------- do not edit below 19 | 20 | 21 | # colors 22 | bold=`tput bold` 23 | red=`tput setaf 1` 24 | green=`tput setaf 2` 25 | blue=`tput setaf 14` 26 | reset=`tput sgr0` 27 | 28 | help() 29 | { 30 | echo "This script is to stand up a GCP environment in ${gcp_project} Project" 31 | echo "" 32 | echo "${green}Usage:${reset} ./`basename $0` COMMAND" 33 | echo "${blue}COMMANDS${reset}" 34 | echo " ${green}create${reset} - Creates your GCP environment & run post-scripts(linux)" 35 | echo " defaults to rocky-linux-8-optimized-gcp if image is not specified" 36 | echo " ${green}find${reset} - Finds info about your GCP environment" 37 | echo " ${green}delete${reset} - Deletes your GCP environment" 38 | } # end help 39 | 40 | # find 41 | find() { 42 | # finds the info for your compute instance 43 | if [ $(gcloud compute instances list --project ${gcp_project} 2> /dev/null | grep ${gcp_name} | wc -l) -gt 0 ]; then 44 | echo "${green}[DEBUG]${reset} Instance(s) found" 45 | echo "" 46 | # gcloud compute instances list --project ${gcp_project} --filter="name:${gcp_name}" --format="table[box] (name, zone.basename(), machineType.basename(), status, networkInterfaces[0].networkIP, networkInterfaces[0].accessConfigs[0].natIP, disks.licenses)" 47 | gcloud compute instances list --project "${gcp_project}" --filter="name:${gcp_name}" --format="table[box](name, zone.basename(), machineType.basename(), status, networkInterfaces[0].networkIP, networkInterfaces[0].accessConfigs[0].natIP, disks[0].licenses[0].basename())" 48 | else 49 | echo "${red}[DEBUG]${reset} You dont have any instances running" 50 | fi 51 | } # end find 52 | 53 | delete() { 54 | if [ $(gcloud compute instances list --project ${gcp_project} 2> /dev/null | grep ${gcp_name} | wc -l) -gt 0 ]; then 55 | for instance in $(gcloud compute instances list --project ${gcp_project} | grep ${gcp_name} | awk {' print $1 '}) 56 | do 57 | echo "${green}[DEBUG]${reset} Deleting ${instance}" 58 | gcloud compute instances delete ${instance} --project ${gcp_project} --zone=${gcp_zone} --quiet 59 | done 60 | else 61 | echo "${red}[DEBUG]${reset} Instance ${gcp_name} not found" 62 | fi 63 | } # end delete 64 | 65 | 66 | # create 67 | create() 68 | { 69 | # find_image ${image} 70 | echo "${green}[DEBUG]${reset} Creating instance ${blue}${gcp_name}${reset} with ${blue}${image}${reset}" 71 | #gcloud compute instances create ${gcp_name} \ 72 | # --quiet \ 73 | # --labels ${label} \ 74 | # --project=${gcp_project} \ 75 | # --zone=${gcp_zone} \ 76 | # --machine-type=${machine_type} \ 77 | # --network-interface=network-tier=PREMIUM,subnet=default \ 78 | # --maintenance-policy=MIGRATE \ 79 | # --provisioning-model=STANDARD \ 80 | # --tags=http-server,https-server \ 81 | # --stack-type=IPV4_IPV6 \ 82 | # --create-disk=auto-delete=yes,boot=yes,device-name=${gcp_name},image=projects/${PROJECT}/global/images/${IMAGE},mode=rw,type=projects/elastic-support/zones/${gcp_zone}/diskTypes/${boot_disk_type} \ 83 | # --metadata=startup-script='#!/bin/sh 84 | # if [ ! -f /ran_startup ]; then 85 | # curl -s https://raw.githubusercontent.com/jlim0930/scripts/master/gcp-postinstall-ecklab.sh | sh 86 | # fi 87 | # ' 88 | 89 | gcloud compute instances create ${gcp_name} \ 90 | --quiet \ 91 | --project ${gcp_project} \ 92 | --image-family rocky-linux-8-optimized-gcp \ 93 | --image-project rocky-linux-cloud \ 94 | --zone ${gcp_zone} \ 95 | --labels ${label} \ 96 | --machine-type ${machine_type} \ 97 | --tags=http-server,https-server \ 98 | --metadata=startup-script='#!/bin/sh 99 | if [ ! -f /ran_startup ]; then 100 | curl -s https://raw.githubusercontent.com/jlim0930/scripts/master/gcp-postinstall-ecklab.sh | sh 101 | fi 102 | ' 103 | echo "" 104 | 105 | sleep 2 106 | } # end create 107 | 108 | 109 | ## main body 110 | case ${1} in 111 | create|start) 112 | image="rocky-linux-8-optimized-gcp" 113 | echo "${green}[DEBUG]${reset} ${blue}${image}${reset} instance starting on ${blue}${machine_type}${reset} in ${blue}${gcp_zone}${reset}" 114 | create ${image} 115 | 116 | echo "" 117 | echo "====================================================================================================================================" 118 | echo "" 119 | echo "${blue}[DEBUG]${reset} ${bold}There is a post install script running and it will reboot the instance once complete, usually in about 3-5 minutes.${reset}" 120 | echo "${green}[DEBUG]${reset} Please ${blue}gcloud compute ssh ${gcp_name} [--zone ${gcp_zone}]${reset}." 121 | echo "" 122 | ;; 123 | find|info|status|check) 124 | find 125 | ;; 126 | delete|cleanup|stop) 127 | delete 128 | ;; 129 | *) 130 | help 131 | ;; 132 | esac 133 | -------------------------------------------------------------------------------- /gcp-postinstall-ecklab.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # shell script to run for GCP environments 4 | # to finish off compute installs 5 | 6 | 7 | if [ -z "$SCRIPT" ] 8 | then 9 | script /tmp/post-install.txt /bin/sh -c "$0 $*" 10 | exit 0 11 | fi 12 | 13 | # check for a flag and exit 14 | if [ -f /ran_startup ]; then 15 | exit; 16 | fi 17 | 18 | function distro() { 19 | if [ -f /etc/os-release ]; then 20 | source /etc/os-release 21 | echo $ID 22 | else 23 | uname 24 | fi 25 | } 26 | 27 | 28 | # make /etc/hosts entry 29 | cat >> /etc/hosts < /etc/dnf/dnf.conf <> /etc/xrdp/xrdp.ini 70 | 71 | systemctl daemon-reload 72 | systemctl enable xrdp 73 | systemctl start xrdp 74 | 75 | # disable services 76 | for service in auditd firewalld mdmonitor postfix bluetooth 77 | do 78 | systemctl disable ${service} 79 | done 80 | 81 | # install helm 82 | curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash 83 | 84 | # install some scripts for lab 85 | curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/deploy-elastick8s.sh -o /usr/local/bin/deploy-elastick8s.sh 86 | curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/kube-ecklab.sh -o /usr/local/bin/kube.sh 87 | chmod +x /usr/local/bin/*.sh 88 | 89 | yum update -y 90 | 91 | 92 | 93 | 94 | echo "done" > /ran_startup 95 | reboot 96 | 97 | -------------------------------------------------------------------------------- /gcp-postinstall-pre.ps1: -------------------------------------------------------------------------------- 1 | $text = @" 2 | powershell.exe -ExecutionPolicy Bypass -NoProfile -Command "& {iex ((New-Object System.Net.WebClient).DownloadString('https://raw.githubusercontent.com/jlim0930/scripts/master/gcp-postinstall.ps1'))}" 3 | "@ 4 | 5 | Add-Content -Path "C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Startup\startup.bat" -Value $text -NoNewline -Force 6 | -------------------------------------------------------------------------------- /gcp-postinstall.ps1: -------------------------------------------------------------------------------- 1 | # execution policy 2 | Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser -Force 3 | 4 | # fix for ssl 5 | [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12 6 | 7 | # install scoop 8 | # irm get.scoop.sh | iex 9 | iex "& {$(irm get.scoop.sh)} -RunAsAdmin" 10 | 11 | scoop install sudo 12 | 13 | scoop install 7zip git 14 | scoop bucket add extras 15 | 16 | scoop install putty firefox mobaxterm 7zip 17 | -------------------------------------------------------------------------------- /gcp-postinstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # shell script to run for GCP environments 4 | # to finish off compute installs 5 | 6 | 7 | if [ -z "$SCRIPT" ] 8 | then 9 | script /tmp/post-install.txt /bin/sh -c "$0 $*" 10 | exit 0 11 | fi 12 | 13 | # check for a flag and exit 14 | if [ -f /ran_startup ]; then 15 | exit; 16 | fi 17 | 18 | function distro() { 19 | if [ -f /etc/os-release ]; then 20 | source /etc/os-release 21 | echo $ID 22 | else 23 | uname 24 | fi 25 | } 26 | 27 | 28 | # if OS is RHEL based 29 | if [[ $(distro) = @(centos|rhel|rocky|alma|fedora) ]]; then 30 | 31 | # disable selinux 32 | sed -i 's/SELINUX=enforcing/SELINUX=permissive/g' /etc/sysconfig/selinux 33 | setenforce Permissive 34 | 35 | # create elasticsearch repo 36 | rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch 37 | cat >> /etc/yum.repos.d/elasticsearch.repo< /etc/dnf/dnf.conf<> /etc/sysctl.d/20-elastic.conf<> /etc/sysctl.d/20-elastic.conf< /ran_startup 147 | reboot 148 | 149 | -------------------------------------------------------------------------------- /gcp-postinstall.sh.20240519: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # shell script to run for GCP environments 4 | # to finish off compute installs 5 | 6 | # create a flag file and check for it 7 | if [ -f /ran_startup ]; then 8 | exit; 9 | fi 10 | 11 | # if OS is RHEL based 12 | if [[ `cat /etc/os-release | grep ^ID` =~ "centos" ]] || [[ `cat /etc/os-release | grep ^ID` =~ "rhel" ]] || [[ `cat /etc/os-release | grep ^ID` =~ "rocky" ]] || [[ `cat /etc/os-release | grep ^ID` =~ "alma" ]]; then 13 | # disable selinux 14 | sed -i 's/SELINUX=enforcing/SELINUX=permissive/g' /etc/sysconfig/selinux 15 | setenforce Permissive 16 | 17 | # create elasticsearch repo 18 | rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch 19 | cat >> /etc/yum.repos.d/elasticsearch.repo<> /etc/sysctl.d/20-elastic.conf<> /etc/sysctl.d/20-elastic.conf< /ran_startup 111 | reboot -------------------------------------------------------------------------------- /gcp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ## Creates GCP instances 4 | 5 | # -------------- EDIT information below 6 | 7 | ### ORGANIZATION ############### 8 | 9 | gcp_project="elastic-support" 10 | REGION="us-central1" 11 | # gcp_zone="us-central1-b" # GCP zone - select one that is close to you 12 | machine_type="e2-standard-4" # GCP machine type - gcloud compute machine-types list 13 | boot_disk_type="pd-ssd" # disk type - gcloud compute disk-types list 14 | label="division=support,org=support,team=support,project=gcp-lab" 15 | 16 | # -------------- Do not edit below 17 | 18 | ### PERSONAL ################### 19 | gcp_name="$(whoami | sed $'s/[^[:alnum:]\t]//g')-lab" 20 | 21 | # colors 22 | red=$(tput setaf 1) 23 | green=$(tput setaf 2) 24 | blue=$(tput setaf 14) 25 | reset=$(tput sgr0) 26 | 27 | # Function to display help 28 | help() { 29 | cat << EOF 30 | This script is to stand up a GCP environment in ${gcp_project} Project 31 | 32 | ${green}Usage:${reset} ./$(basename "$0") COMMAND 33 | ${blue}COMMANDS${reset} 34 | ${green}create|start${reset} - Creates your GCP environment & runs post-scripts (Linux) 35 | ${green}find|info|status|check${reset} - Finds info about your GCP environment 36 | ${green}delete|cleanup|stop${reset} - Deletes your GCP environment 37 | EOF 38 | } 39 | 40 | debug() { 41 | echo "${green}[DEBUG]${reset} $1" 42 | } 43 | 44 | debugr() { 45 | echo "${red}[DEBUG]${reset} $1" 46 | } 47 | 48 | # load image list 49 | load_image_list() { 50 | debug "Generating a list of supported images" 51 | image_list=$(gcloud compute images list --format="table(name, family, selfLink)" --filter="-name=sql AND -name=sap" | grep -v arm | grep "\-cloud" | sort) 52 | IFS=$'\n' read -r -d '' -a images <<< "$image_list" 53 | 54 | if [ -z "$image_list" ]; then 55 | debugr "No images found with the specified filters." 56 | exit 1 57 | fi 58 | 59 | families=($(echo "$image_list" | awk '{print $2}' | sort -u)) 60 | } # end 61 | 62 | select_image() { 63 | debug "Select an image family:" 64 | original_columns=$COLUMNS 65 | COLUMNS=1 66 | select selected_family in "${families[@]}"; do 67 | if [ -n "$selected_family" ]; then 68 | selected_image=$(echo "$image_list" | grep "$selected_family " | head -n 1) 69 | selected_image_name=$(echo "$selected_image" | awk '{print $1}') 70 | selected_project=$(echo "$selected_image" | awk '{print $3}') 71 | break 72 | else 73 | debugr "Invalid selection. Please try again." 74 | fi 75 | done 76 | COLUMNS=$original_columns 77 | } 78 | 79 | 80 | # find 81 | find_instances() { 82 | instance_count=$(gcloud compute instances list --project "${gcp_project}" --filter="name:${gcp_name}" --format="value(name)" | wc -l) 83 | if [ "$instance_count" -gt 0 ]; then 84 | debug "Instance(s) found" 85 | gcloud compute instances list --project "${gcp_project}" --filter="name:${gcp_name}" --format="table[box](name, zone.basename(), machineType.basename(), status, networkInterfaces[0].networkIP, networkInterfaces[0].accessConfigs[0].natIP, disks[0].licenses[0].basename())" 86 | else 87 | debugr "No instances found" 88 | fi 89 | } 90 | 91 | delete_instances() { 92 | instancelist=$(gcloud compute instances list --project "${gcp_project}" --filter="name:${gcp_name}" --format="value(name,zone)") 93 | if [ -z "$instancelist" ]; then 94 | debugr "No instances found with name ${gcp_name}" 95 | return 0 96 | fi 97 | 98 | # Iterate over the list of instances and delete them 99 | while read -r instance_name instance_zone; do 100 | debug "Deleting instance ${blue}$instance_name${reset} in zone ${blue}${instance_zone}${reset}..." 101 | gcloud compute instances delete "$instance_name" --zone="$instance_zone" --delete-disks all --quiet 102 | done <<< "$instancelist" 103 | } 104 | 105 | # delete_instances() { 106 | # instance_count=$(gcloud compute instances list --project "${gcp_project}" --filter="name:${gcp_name}" --format="value(name)" | wc -l) 107 | # if [ "$instance_count" -gt 0 ]; then 108 | # debug "Deleting instances" 109 | # gcloud compute instances delete $(gcloud compute instances list --project "${gcp_project}" --filter="name:${gcp_name}" --format="value(name)") --project "${gcp_project}" --zone="${gcp_zone}" --quiet 110 | # else 111 | # debugr "No instances found with name ${gcp_name}" 112 | # fi 113 | # } 114 | 115 | get_random_zone() { 116 | local zone_array=($zones) 117 | local zone_count=${#zone_array[@]} 118 | local random_index=$((RANDOM % zone_count)) 119 | local selected_zone=${zone_array[$random_index]} 120 | 121 | echo $selected_zone 122 | } 123 | 124 | create_instances() { 125 | read -p "${green}[DEBUG]${reset} Please input the number of instances [1]: " max 126 | max="${max:-1}" 127 | 128 | load_image_list 129 | 130 | zones=$(gcloud compute zones list --filter="region:(${REGION})" --format="value(name)") 131 | 132 | 133 | for count in $(seq 1 "$max"); do 134 | select_image 135 | gcp_zone=$(get_random_zone) 136 | echo "" 137 | debug "Creating instance ${blue}${gcp_name}-${count}${reset} with image ${blue}${selected_image_name}${reset}" 138 | echo "" 139 | if [ -z "$(echo ${selected_image_name} | grep "window")" ]; then 140 | gcloud compute instances create ${gcp_name}-${count} \ 141 | --quiet \ 142 | --labels ${label} \ 143 | --project=${gcp_project} \ 144 | --zone=${gcp_zone} \ 145 | --machine-type=${machine_type} \ 146 | --network-interface=network-tier=PREMIUM,subnet=default \ 147 | --maintenance-policy=MIGRATE \ 148 | --provisioning-model=STANDARD \ 149 | --tags=http-server,https-server \ 150 | --stack-type=IPV4_IPV6 \ 151 | --create-disk=auto-delete=yes,boot=yes,device-name=${gcp_name}-${count},image=projects/${selected_project}/global/images/${selected_image_name},mode=rw,type=projects/elastic-support/zones/${gcp_zone}/diskTypes/${boot_disk_type} \ 152 | --metadata=startup-script='#!/usr/bin/env bash 153 | if [ ! -f /ran_startup ]; then 154 | curl -s https://raw.githubusercontent.com/jlim0930/scripts/master/gcp-postinstall.sh | bash 155 | fi' >/dev/null 2>&1 156 | echo "" 157 | else 158 | gcloud compute instances create ${gcp_name}-${count} \ 159 | --quiet \ 160 | --labels ${label} \ 161 | --project=${gcp_project} \ 162 | --zone=${gcp_zone} \ 163 | --machine-type=${machine_type} \ 164 | --network-interface=network-tier=PREMIUM,subnet=default \ 165 | --maintenance-policy=MIGRATE \ 166 | --provisioning-model=STANDARD \ 167 | --tags=http-server,https-server \ 168 | --stack-type=IPV4_IPV6 \ 169 | --create-disk=auto-delete=yes,boot=yes,device-name=${gcp_name}-${count},image=projects/${selected_project}/global/images/${selected_image_name},mode=rw,type=projects/elastic-support/zones/${gcp_zone}/diskTypes/${boot_disk_type} >/dev/null 2>&1 170 | echo "" 171 | fi 172 | done 173 | 174 | find_instances 175 | 176 | cat << EOF 177 | 178 | ==================================================================================================================================== 179 | 180 | ${green}[DEBUG]${reset} For ${blue}linux${reset} instances: 181 | ${green}[DEBUG]${reset} Please ${blue}gcloud compute ssh ${gcp_name}-X [--zone ${gcp_zone}]${reset}. 182 | ${green}[DEBUG]${reset} There is a post install script running and it will reboot the instance once complete, usually in about 3-5 minutes. 183 | 184 | ${green}[DEBUG]${reset} For ${blue}windows${reset} instances: Please create your password ${blue}gcloud compute reset-windows-password ${gcp_name}-X[--zone ${gcp_zone}]${reset} 185 | ${green}[DEBUG]${reset} Please open powershell(non-admin) and run the following lines to install mobaxterm/firefox/powertoys/other tools: 186 | ${blue}[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12${reset} 187 | ${blue}iex ((New-Object System.Net.WebClient).DownloadString('https://raw.githubusercontent.com/jlim0930/scripts/master/gcp-postinstall.ps1'))${reset} 188 | EOF 189 | } 190 | 191 | ## main body 192 | case ${1} in 193 | create|start) 194 | find_instances 195 | create_instances 196 | ;; 197 | find|info|status|check) 198 | find_instances 199 | ;; 200 | delete|cleanup|stop) 201 | delete_instances 202 | ;; 203 | *) 204 | help 205 | ;; 206 | esac 207 | 208 | 209 | -------------------------------------------------------------------------------- /gke.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # ===== User Configurable Variables ===== 4 | gke_project="elastic-support-k8s-dev" 5 | gke_region="us-central1" 6 | gke_machine_type="e2-standard-4" 7 | label="division=support,org=support,team=support,project=gkelab" 8 | gke_cluster_nodes="1" 9 | 10 | # ======================================= 11 | gke_cluster_name="$(whoami | sed $'s/[^[:alnum:]\t]//g')-gkelab" 12 | kubectl_url_base="https://dl.k8s.io/release/$(curl -s https://dl.k8s.io/release/stable.txt)/bin" 13 | kubectl_install_path="/usr/local/bin/kubectl" 14 | 15 | ### colors 16 | red=$(tput setaf 1) 17 | green=$(tput setaf 2) 18 | blue=$(tput setaf 14) 19 | reset=$(tput sgr0) 20 | 21 | # Function to display help 22 | help() { 23 | cat << EOF 24 | This script is to stand up a GKE environment in ${gcp_project} Project 25 | 26 | ${green}Usage:${reset} ./$(basename "$0") COMMAND 27 | ${blue}COMMANDS${reset} 28 | ${green}create|start|deploy${reset} - Creates your GCP environment & runs post-scripts (Linux) 29 | ${green}find|check|info|status${reset} - Finds info about your GCP environment 30 | ${green}delete|cleanup|stop${reset} - Deletes your GCP environment 31 | EOF 32 | } 33 | 34 | # Helper function to display debug messages 35 | debug() { 36 | echo "${green}[DEBUG]${reset} $1" 37 | } 38 | 39 | debugr() { 40 | echo "${red}[DEBUG]${reset} $1" 41 | } 42 | 43 | checkkubectl() { 44 | if ! command -v kubectl &>/dev/null; then 45 | debugr "kubectl not found. Installing." 46 | case ${OS} in 47 | "linux") os_path="linux/amd64" ;; 48 | "macos-x86_64") os_path="darwin/amd64" ;; 49 | "macos-arm64") os_path="darwin/arm64" ;; 50 | *) echo "${red}[ERROR]${reset} Unsupported OS: ${OS}"; return 1 ;; 51 | esac 52 | 53 | # Download kubectl 54 | curl -LO "${kubectl_url_base}/${os_path}/kubectl" 55 | 56 | # Install kubectl and clean up 57 | sudo install kubectl "${kubectl_install_path}" && rm -f kubectl 58 | else 59 | debug "kubectl found." 60 | fi 61 | } 62 | 63 | find_cluster() { 64 | if gcloud container clusters list --project "${gke_project}" 2> /dev/null| grep -q "${gke_cluster_name}"; then 65 | debug "Cluster ${gke_cluster_name} exists." 66 | echo "" 67 | gcloud container clusters list --project "${gke_project}" 2> /dev/null| grep -E "STATUS|${gke_cluster_name}" 68 | exit 69 | else 70 | debugr "Cluster ${gke_cluster_name} not found." 71 | fi 72 | } 73 | 74 | start_cluster() { 75 | find_cluster 76 | debug "Creating cluster ${gke_cluster_name}" 77 | echo "" 78 | 79 | gcloud container clusters create "${gke_cluster_name}" \ 80 | --labels="${label}" \ 81 | --project="${gke_project}" \ 82 | --region="${gke_region}" \ 83 | --num-nodes="${gke_cluster_nodes}" \ 84 | --machine-type="${gke_machine_type}" \ 85 | --disk-type="pd-ssd" \ 86 | --disk-size="100" \ 87 | --image-type="COS_CONTAINERD" \ 88 | --release-channel="stable" \ 89 | --max-pods-per-node="110" \ 90 | --cluster-ipv4-cidr="/17" \ 91 | --services-ipv4-cidr="/22" \ 92 | --enable-ip-alias \ 93 | --enable-autorepair 94 | 95 | debug "Configuring kubectl context for ${gke_cluster_name}" 96 | gcloud container clusters get-credentials "${gke_cluster_name}" --region="${gke_region}" --project="${gke_project}" 97 | 98 | debug "Adding gcloud RBAC for cluster admin role" 99 | kubectl create clusterrolebinding cluster-admin-binding \ 100 | --clusterrole=cluster-admin \ 101 | --user="$(gcloud auth list --filter=status:ACTIVE --format="value(account)")" 102 | } 103 | 104 | delete_cluster() { 105 | if gcloud container clusters list --project "${gke_project}" 2> /dev/null| grep -q "${gke_cluster_name}"; then 106 | debug "Removing kubectl context" 107 | kubectl config unset current-context 108 | kubectl config delete-context "gke_${gke_project}_${gke_region}_${gke_cluster_name}" 109 | 110 | debug "Deleting ${gke_cluster_name}" 111 | gcloud container clusters delete "${gke_cluster_name}" --project="${gke_project}" --region="${gke_region}" --quiet 112 | else 113 | debugr "Cluster ${gke_cluster_name} not found" 114 | fi 115 | } 116 | 117 | # ===== Main Script ===== 118 | OS=$(uname -s) 119 | case ${OS} in 120 | "Linux") 121 | OS="linux" 122 | ;; 123 | "Darwin") 124 | OS="macos-$(uname -m)" 125 | ;; 126 | *) 127 | debugr "This script only supports macOS and Linux" 128 | exit 1 129 | ;; 130 | esac 131 | 132 | case ${1} in 133 | start|deploy|create) 134 | checkkubectl 135 | start_cluster 136 | ;; 137 | find|check|info|status) 138 | find_cluster 139 | ;; 140 | delete|cleanup|stop) 141 | delete_cluster 142 | ;; 143 | *) 144 | help 145 | exit 1 146 | ;; 147 | esac 148 | -------------------------------------------------------------------------------- /gke.sh-backup-20221207: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Creates a GKE cluster 4 | # ------- EDIT information below to customize for your needs 5 | gke_cluster_name="justinlim-gke" # name of your k8s cluster 6 | gke_project="elastic-support-k8s-dev" # project that you are linked to 7 | 8 | #gke_zone="us-central1-c" # zone 9 | gke_region="us-central1" # region 10 | gke_cluster_nodes="1" # number of cluster 11 | gke_machine_type="e2-standard-4" # node machine type 12 | # gke_cluster_node_vCPUs="4" # vCPUs for node 13 | # gke_cluster_node_RAM="16384" 14 | 15 | # -------- do not edit below 16 | 17 | # colors 18 | red=`tput setaf 1` 19 | green=`tput setaf 2` 20 | blue=`tput setaf 4` 21 | reset=`tput sgr0` 22 | 23 | 24 | # help function 25 | help() { 26 | echo "This script is to stand up a GKE environment in ${gke_project}" 27 | echo "" 28 | echo "${green}Usage:${reset} ./`basename $0` COMMAND" 29 | echo "${blue}COMMANDS${reset}" 30 | echo " ${green}start${reset} - Starts your GKE environment" 31 | echo " ${green}find${reset} - Searchs for your deployment" 32 | echo " ${green}delete${reset} - Deletes your GKE environment" 33 | 34 | } # end help 35 | 36 | # check for kubectl and install it - will need sudo 37 | checkkubectl() { 38 | if ! [ -x "$(command -v kubectl)" ]; then 39 | echo "${red}[DEBUG]${reset} kubectl not found. Installing." 40 | if [ $OS == "linux" ]; then 41 | echo "${green}[DEBUG]${reset} Linux found." 42 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 43 | sudo install kubectl /usr/local/bin/kubectl 44 | rm -rf kubectl >/dev/null 2>&1 45 | elif [ ${OS} == "macos-x86_64" ]; then 46 | echo "${gree}[DEBUG]${reset} macOS x86_64 found." 47 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" 48 | sudo install kubectl /usr/local/bin/kubectl 49 | rm -rf kubectl >/dev/null 2>&1 50 | elif [ ${OS} == "macos-arm64" ]; then 51 | echo "${gree}[DEBUG]${reset} macOS arm64 found." 52 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl" 53 | sudo install kubectl /usr/local/bin/kubectl 54 | rm -rf kubectl >/dev/null 2>&1 55 | fi 56 | else 57 | echo "${green}[DEBUG]${reset} kubectl found." 58 | fi 59 | } # end checkkubectl 60 | 61 | # find function 62 | find() { 63 | if [ $(gcloud container clusters list 2> /dev/null --project ${gke_project} | grep ${gke_cluster_name} | wc -l) -gt 0 ]; then 64 | echo "${green}[DEBUG]${reset} Cluster ${gke_cluster_name} exists." 65 | echo "" 66 | gcloud container clusters list --project ${gke_project} | egrep "STATUS|${gke_cluster_name}" 67 | exit 68 | else 69 | echo "${green}[DEBUG]${reset} Cluster ${gke_cluster_name} not found." 70 | fi 71 | } # end find 72 | 73 | # start the deloyment 74 | start() { 75 | find 76 | echo "${green}[DEBUG]${reset} Creating cluster ${gke_cluster_name}" 77 | echo "" 78 | 79 | gcloud container clusters create "${gke_cluster_name}" \ 80 | --project "${gke_project}" \ 81 | --region "${gke_region}" \ 82 | --num-nodes "${gke_cluster_nodes}" \ 83 | --machine-type "${gke_machine_type}" \ 84 | --disk-type "pd-ssd" \ 85 | --disk-size "100" \ 86 | --image-type "COS_CONTAINERD" \ 87 | --release-channel "stable" \ 88 | --max-pods-per-node "110" \ 89 | --enable-ip-alias \ 90 | --enable-autoscaling \ 91 | --min-nodes "1" \ 92 | --max-nodes "2" \ 93 | --addons HorizontalPodAutoscaling,HttpLoadBalancing,GcePersistentDiskCsiDriver \ 94 | --autoscaling-profile optimize-utilization \ 95 | --enable-autorepair 96 | 97 | echo "" 98 | echo "${green}[DEBUG]${reset} Configure kubectl context for ${gke_cluster_name}" 99 | gcloud container clusters get-credentials ${gke_cluster_name} --region ${gke_region} --project ${gke_project} 100 | 101 | echo "${green}[DEBUG]${reset} Adding gcloud RBAC for cluster admin role" 102 | kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=$(gcloud auth list --filter=status:ACTIVE --format="value(account)") 103 | } # end start 104 | 105 | # delete | cleanup 106 | delete() { 107 | if [ $(gcloud container clusters list 2> /dev/null --project ${gke_project} | grep ${gke_cluster_name} | wc -l) -gt 0 ]; then 108 | echo "${green}[DEBUG]${reset} Deleting ${gke_cluster_name}" 109 | gcloud container clusters delete ${gke_cluster_name} --project ${gke_project} --region ${gke_region} --quiet; 110 | 111 | echo "${green}[DEBUG]${reset} Remove kubectl context" 112 | kubectl config unset contexts.${gke_cluster_name} 113 | else 114 | echo "${red}[DEBUG]${reset} Cluster ${gke_cluster_name} not found" 115 | fi 116 | } # end delete 117 | 118 | OS=`uname -s` 119 | case ${OS} in 120 | "Linux") 121 | OS="linux" 122 | ;; 123 | "Darwin") 124 | if [ `uname -m` == "x86_64" ]; then 125 | OS="macos-x86_64" 126 | elif [ `uname -m` == "arm64" ]; then 127 | OS="macos-arm64" 128 | fi 129 | ;; 130 | *) 131 | echo "${red}[DEBUG]${reset} This script only supports macOS and linux" 132 | exit 133 | ;; 134 | esac 135 | 136 | case ${1} in 137 | deploy|start) 138 | checkkubectl 139 | start 140 | ;; 141 | find|check|info|status) 142 | find 143 | ;; 144 | cleanup|delete|stop) 145 | delete 146 | ;; 147 | *) 148 | help 149 | exit 150 | ;; 151 | esac 152 | -------------------------------------------------------------------------------- /kube-ecklab.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # justin lim 4 | 5 | # $ curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/kube.sh -o kube.sh 6 | 7 | # This script will have options to start up minikube or clean it up. 8 | # options: start|stop|delete 9 | # if minikube & kubectl is not installed it will automatically install it. 10 | # will enable metallb and add IP pool with minikube ip network and pool of 150-175 11 | # sudo access is required for some instances 12 | # tested on linux and macOS 13 | # 14 | 15 | ################# 16 | 17 | ## User configurable variables 18 | CPU="" # Please change this to the # of cores you want minikube to use. If not set it will use half of your total core count 19 | MEM="" # Please change this to the amount of memory to give to minikube. If not set it will use half of your memory up to 16GB max 20 | # HDD="" # Please change this to the amount of hdd space to give for minikube. default is 20,000MB 21 | 22 | VERSION="v1.33.0" 23 | 24 | SHELL=`env | grep SHELL | awk -F"/" '{ print $NF }'` 25 | 26 | ## vars 27 | 28 | # colors 29 | red=`tput setaf 1` 30 | green=`tput setaf 2` 31 | blue=`tput setaf 4` 32 | reset=`tput sgr0` 33 | 34 | # if root exit 35 | if [ `id -u` -eq 0 ]; then 36 | echo "${red}[DEBUG]${reset}Please do not run as root" 37 | exit 38 | fi 39 | 40 | # set CPU 41 | if [ -z ${CPU} ]; then 42 | temp=`nproc` 43 | CPU=`echo $((${temp}-1))` 44 | fi 45 | 46 | # set MEM 47 | if [ -z ${MEM} ]; then 48 | temp=`free -m | grep Mem | awk {' print $2 '}` 49 | value=`echo $((${temp}-4096))` 50 | if [ ${value} -gt "16384" ]; then 51 | MEM="16384" 52 | else 53 | MEM="${value}" 54 | fi 55 | fi 56 | 57 | 58 | ## functions 59 | 60 | # function help 61 | function help() { 62 | echo -e "${green}Usage:{$reset} ./`basename $0` [start|stop|delete]" 63 | exit 64 | } 65 | 66 | # function check and install minikube 67 | function checkminikube() { 68 | if ! { [ -x "$(command -v minikube)" ] && [ `minikube version | grep version | awk {' print $3 '}` = "${VERSION}" ]; } then 69 | echo "${red}[DEBUG]${reset} minikube not found or wrong version. Installing." 70 | curl -LO -s https://storage.googleapis.com/minikube/releases/${VERSION}/minikube-linux-amd64 71 | sudo install minikube-linux-amd64 /usr/local/bin/minikube 72 | rm -rf minikube-linux-amd64 >/dev/null 2>&1 73 | else 74 | echo "${green}[DEBUG]${reset} minikube found." 75 | fi 76 | } 77 | 78 | # function check and install kubectl 79 | function checkkubectl() { 80 | if ! [ -x "$(command -v kubectl)" ]; then 81 | echo "${red}[DEBUG]${reset} kubectl not found. Installing." 82 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 83 | sudo install kubectl /usr/local/bin/kubectl 84 | rm -rf kubectl >/dev/null 2>&1 85 | else 86 | echo "${green}[DEBUG]${reset} kubectl found." 87 | fi 88 | } 89 | 90 | # function delete the local minikube and kubectl in /usr/local/bin 91 | function deletemk() { 92 | # if [ -x "$(command -v kubectl)" ]; then 93 | # rm -rf $(command -v kubectl) 94 | # echo "${green}[DEBUG]${reset} Deleted kubectl" 95 | # fi 96 | if [ -x "$(command -v minikube)" ]; then 97 | rm -rf $(command -v minikube) 98 | echo "${green}[DEBUG]${reset} Deleted minikube" 99 | fi 100 | } 101 | 102 | # function check docker 103 | function checkdocker() { 104 | docker info >/dev/null 2>&1 105 | if [ $? -ne 0 ]; then 106 | echo "${red}[DEBUG]${reset} Docker is not running or installed or your not part of the docker group. Please fix" 107 | exit 108 | fi 109 | } 110 | 111 | # function start 112 | function build() { 113 | echo "${green}[DEBUG]${reset} CPU will be set to ${CPU} cores" 114 | minikube config set cpus ${CPU} 115 | echo "${green}[DEBUG]${reset} MEM will be set to ${MEM}mb" 116 | minikube config set memory ${MEM} 117 | 118 | # adding host entries onto the host 119 | echo "192.168.49.170 kibana.eck.lab" > /etc/hosts 120 | # adding host entries for minikube 121 | mkdir -p ~/.minikube/files/etc 122 | echo "127.0.0.1 localhost" > ~/.minikube/files/etc/hosts 123 | echo "192.168.49.170 kibana.eck.lab" >> ~/.minikube/files/etc/hosts 124 | 125 | minikube start --driver=docker 126 | minikube addons enable metallb 127 | baseip=`minikube ip | cut -d"." -f1-3` 128 | startip="${baseip}.150" 129 | endip="${baseip}.175" 130 | cat > /tmp/metallb-config.yaml </dev/null 2>&1 146 | rm -rf /tmp/metallb-config.yaml >/dev/null 2>&1 147 | echo "${green}[DEBUG]${reset} minikube IP is: `minikube ip`" 148 | echo "${green}[DEBUG]${reset} LoadBalancer Pool: ${startip} - ${endip}" 149 | 150 | } 151 | 152 | ## script 153 | 154 | case ${1} in 155 | build|start) 156 | checkminikube 157 | checkkubectl 158 | checkdocker 159 | echo "${green}[DEBUG]${reset} build minikube" 160 | build 161 | ;; 162 | stop) 163 | echo "${green}[DEBUG]${reset} Stopping minikube" 164 | minikube stop 165 | ;; 166 | upgrade) 167 | deletemk 168 | checkminikube 169 | checkkubectl 170 | echo "${green}[DEBUG]${reset} minikube and kubectl upgraded" 171 | ;; 172 | delete|cleanup) 173 | echo "${green}[DEBUG]${reset} Deleting minikube" 174 | minikube delete 175 | rm -rf ${HOME}/.minikube >/dev/null 2>&1 176 | rm -rf ${HOME}/.kube >/dev/null 2>&1 177 | ;; 178 | *) 179 | help 180 | ;; 181 | esac 182 | -------------------------------------------------------------------------------- /kube.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # justin lim 4 | 5 | # $ curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/kube.sh -o kube.sh 6 | 7 | # This script will have options to start up minikube or clean it up. 8 | # options: start|stop|delete 9 | # if minikube & kubectl is not installed it will automatically install it. 10 | # will enable metallb and add IP pool with minikube ip network and pool of 150-175 11 | # sudo access is required for some instances 12 | # tested on linux and macOS 13 | # 14 | 15 | ################# 16 | 17 | ## User configurable variables 18 | CPU="" # Please change this to the # of cores you want minikube to use. If not set it will use half of your total core count 19 | MEM="" # Please change this to the amount of memory to give to minikube. If not set it will use half of your memory up to 16GB max 20 | # HDD="" # Please change this to the amount of hdd space to give for minikube. default is 20,000MB 21 | 22 | VERSION="v1.33.0" 23 | 24 | SHELL=`env | grep SHELL | awk -F"/" '{ print $NF }'` 25 | 26 | ## vars 27 | 28 | # colors 29 | red=`tput setaf 1` 30 | green=`tput setaf 2` 31 | blue=`tput setaf 4` 32 | reset=`tput sgr0` 33 | 34 | # Get OS & set CPU & MEM 35 | OS=`uname -s` 36 | case ${OS} in 37 | "Linux") 38 | OS="linux" 39 | if [ -z ${CPU} ]; then 40 | temp=`nproc` 41 | CPU=`echo $((${temp}/2))` 42 | fi 43 | if [ -z ${MEM} ]; then 44 | temp=`free -m | grep Mem | awk {' print $2 '}` 45 | value=`echo $((${temp}/2))` 46 | if [ ${value} -gt "16384" ]; then 47 | MEM="16384" 48 | else 49 | MEM="${value}" 50 | fi 51 | fi 52 | ;; 53 | "Darwin") 54 | if [ `uname -m` == "x86_64" ]; then 55 | OS="macos-x86_64" 56 | elif [ `uname -m` == "arm64" ]; then 57 | OS="macos-arm64" 58 | fi 59 | if [ -z ${CPU} ]; then 60 | temp=`sysctl -n hw.ncpu` 61 | CPU=`echo "${temp}/2" | bc` 62 | fi 63 | if [ -z ${MEM} ]; then 64 | temp=`sysctl -n hw.memsize` 65 | value=`docker system info --format '{{.MemTotal}}' | grep -o '[0-9]*' | awk '{printf "%.0f\n", $1/1024/1024}'` 66 | if [ ${value} -gt "16384" ]; then 67 | MEM="16384" 68 | else 69 | MEM="${value}" 70 | fi 71 | fi 72 | ;; 73 | *) 74 | echo "${red}[DEBUG]${reset} This script only supports macOS and linux" 75 | exit 76 | ;; 77 | esac 78 | 79 | ## functions 80 | 81 | # function help 82 | function help() { 83 | echo -e "${green}Usage:{$reset} ./`basename $0` [start|stop|delete]" 84 | exit 85 | } 86 | 87 | # function check and install minikube 88 | function checkminikube() { 89 | if ! { [ -x "$(command -v minikube)" ] && [ `minikube version | grep version | awk {' print $3 '}` = "${VERSION}" ]; } then 90 | echo "${red}[DEBUG]${reset} minikube not found or wrong version. Installing." 91 | if [ $OS == "linux" ]; then 92 | echo "${green}[DEBUG]${reset} Linux found." 93 | curl -LO -s https://storage.googleapis.com/minikube/releases/${VERSION}/minikube-linux-amd64 94 | sudo install minikube-linux-amd64 /usr/local/bin/minikube 95 | rm -rf minikube-linux-amd64 >/dev/null 2>&1 96 | elif [ ${OS} == "macos-x86_64" ]; then 97 | echo "${gree}[DEBUG]${reset} macOS x86_64 found." 98 | curl -LO -s https://storage.googleapis.com/minikube/releases/${VERSION}/minikube-darwin-amd64 99 | sudo install minikube-darwin-amd64 /usr/local/bin/minikube 100 | rm -rf minikube-darwin-amd64 >/dev/null 2>&1 101 | elif [ ${OS} == "macos-arm64" ]; then 102 | echo "${green}[DEBUG]${reset} macOS arm64 found." 103 | curl -LO -s https://storage.googleapis.com/minikube/releases/${VERSION}/minikube-darwin-arm64 104 | sudo install minikube-darwin-arm64 /usr/local/bin/minikube 105 | rm -rf minikube-darwin-arm64 >/dev/null 2>&1 106 | fi 107 | else 108 | echo "${green}[DEBUG]${reset} minikube found." 109 | fi 110 | } 111 | 112 | # function check and install kubectl 113 | function checkkubectl() { 114 | if ! [ -x "$(command -v kubectl)" ]; then 115 | echo "${red}[DEBUG]${reset} kubectl not found. Installing." 116 | if [ $OS == "linux" ]; then 117 | echo "${green}[DEBUG]${reset} Linux found." 118 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 119 | sudo install kubectl /usr/local/bin/kubectl 120 | rm -rf kubectl >/dev/null 2>&1 121 | elif [ ${OS} == "macos-x86_64" ]; then 122 | echo "${gree}[DEBUG]${reset} macOS x86_64 found." 123 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" 124 | sudo install kubectl /usr/local/bin/kubectl 125 | rm -rf kubectl >/dev/null 2>&1 126 | elif [ ${OS} == "macos-arm64" ]; then 127 | echo "${gree}[DEBUG]${reset} macOS arm64 found." 128 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl" 129 | sudo install kubectl /usr/local/bin/kubectl 130 | rm -rf kubectl >/dev/null 2>&1 131 | fi 132 | else 133 | echo "${green}[DEBUG]${reset} kubectl found." 134 | fi 135 | } 136 | 137 | # function delete the local minikube and kubectl in /usr/local/bin 138 | function deletemk() { 139 | # if [ -x "$(command -v kubectl)" ]; then 140 | # rm -rf $(command -v kubectl) 141 | # echo "${green}[DEBUG]${reset} Deleted kubectl" 142 | # fi 143 | if [ -x "$(command -v minikube)" ]; then 144 | rm -rf $(command -v minikube) 145 | echo "${green}[DEBUG]${reset} Deleted minikube" 146 | fi 147 | } 148 | 149 | # function check docker 150 | function checkdocker() { 151 | if [ ${OS} == "macos-x86_64" ] || [ ${OS} == "macos-arm64" ]; then 152 | echo "${green}[DEBUG]${reset} Docker found" 153 | else 154 | docker info >/dev/null 2>&1 155 | if [ $? -ne 0 ]; then 156 | echo "${red}[DEBUG]${reset} Docker is not running or installed or your not part of the docker group. Please fix" 157 | exit 158 | fi 159 | fi 160 | } 161 | 162 | # function start 163 | function build() { 164 | echo "${green}[DEBUG]${reset} CPU will be set to ${CPU} cores" 165 | minikube config set cpus ${CPU} 166 | echo "${green}[DEBUG]${reset} MEM will be set to ${MEM}mb" 167 | minikube config set memory ${MEM} 168 | # minikube config set disk-size ${HDD} 169 | if [ ${OS} == "linux" ]; then 170 | minikube start --driver=docker 171 | else 172 | minikube start 173 | fi 174 | minikube addons enable metallb 175 | baseip=`minikube ip | cut -d"." -f1-3` 176 | startip="${baseip}.150" 177 | endip="${baseip}.175" 178 | cat > /tmp/metallb-config.yaml </dev/null 2>&1 194 | rm -rf /tmp/metallb-config.yaml >/dev/null 2>&1 195 | echo "${green}[DEBUG]${reset} minikube IP is: `minikube ip`" 196 | echo "${green}[DEBUG]${reset} LoadBalancer Pool: ${startip} - ${endip}" 197 | source <(kubectl completion ${SHELL}) 198 | } 199 | 200 | ## script 201 | 202 | case ${1} in 203 | build|start) 204 | checkminikube 205 | checkkubectl 206 | checkdocker 207 | echo "${green}[DEBUG]${reset} build minikube" 208 | build 209 | ;; 210 | stop) 211 | echo "${green}[DEBUG]${reset} Stopping minikube" 212 | minikube stop 213 | ;; 214 | upgrade) 215 | deletemk 216 | checkminikube 217 | checkkubectl 218 | echo "${green}[DEBUG]${reset} minikube and kubectl upgraded" 219 | ;; 220 | delete|cleanup) 221 | echo "${green}[DEBUG]${reset} Deleting minikube" 222 | minikube delete 223 | rm -rf ${HOME}/.minikube >/dev/null 2>&1 224 | rm -rf ${HOME}/.kube >/dev/null 2>&1 225 | ;; 226 | *) 227 | help 228 | ;; 229 | esac 230 | 231 | 232 | -------------------------------------------------------------------------------- /kube.sh.20240519: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # justin lim 4 | 5 | # $ curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/kube.sh -o kube.sh 6 | 7 | # This script will have options to start up minikube or clean it up. 8 | # options: start|stop|delete 9 | # if minikube & kubectl is not installed it will automatically install it. 10 | # will enable metallb and add IP pool with minikube ip network and pool of 150-175 11 | # sudo access is required for some instances 12 | # tested on linux and macOS 13 | # 14 | # for linux it will use the docker driver 15 | # for macOS it will use the hyperkit driver 16 | ################# 17 | 18 | ## User configurable variables 19 | CPU="" # Please change this to the # of cores you want minikube to use. If not set it will use half of your total core count 20 | MEM="" # Please change this to the amount of memory to give to minikube. If not set it will use half of your memory up to 16GB max 21 | # HDD="" # Please change this to the amount of hdd space to give for minikube. default is 20,000MB 22 | VERSION="v1.33.0" 23 | SHELL=`env | grep SHELL | awk -F"/" '{ print $NF }'` 24 | 25 | ## vars 26 | 27 | # colors 28 | red=`tput setaf 1` 29 | green=`tput setaf 2` 30 | blue=`tput setaf 4` 31 | reset=`tput sgr0` 32 | 33 | # Get OS & set CPU & MEM 34 | OS=`uname -s` 35 | case ${OS} in 36 | "Linux") 37 | OS="linux" 38 | if [ -z ${CPU} ]; then 39 | temp=`nproc` 40 | CPU=`echo "${temp}/2" | bc` 41 | fi 42 | if [ -z ${MEM} ]; then 43 | temp=`free -m | awk '/Mem\:/ { print $2 }'` 44 | value=`echo "${temp}/2" | bc` 45 | if [ ${value} -gt "16384" ]; then 46 | MEM="16384" 47 | else 48 | MEM="${value}" 49 | fi 50 | fi 51 | ;; 52 | "Darwin") 53 | if [ `uname -m` == "x86_64" ]; then 54 | OS="macos-x86_64" 55 | elif [ `uname -m` == "arm64" ]; then 56 | OS="macos-arm64" 57 | fi 58 | if [ -z ${CPU} ]; then 59 | temp=`sysctl -n hw.ncpu` 60 | CPU=`echo "${temp}/2" | bc` 61 | fi 62 | if [ -z ${MEM} ]; then 63 | temp=`sysctl -n hw.memsize` 64 | # value=`echo "${temp}/2097152" | bc` 65 | value=`docker system info --format '{{.MemTotal}}' | grep -o '[0-9]*' | awk '{printf "%.0f\n", $1/1024/1024}'` 66 | if [ ${value} -gt "16384" ]; then 67 | MEM="16384" 68 | else 69 | MEM="${value}" 70 | fi 71 | fi 72 | ;; 73 | *) 74 | echo "${red}[DEBUG]${reset} This script only supports macOS and linux" 75 | exit 76 | ;; 77 | esac 78 | 79 | ## functions 80 | 81 | # function help 82 | function help() { 83 | echo -e "${green}Usage:{$reset} ./`basename $0` [start|stop|delete]" 84 | exit 85 | } 86 | 87 | # function check and install minikube 88 | function checkminikube() { 89 | if ! { [ -x "$(command -v minikube)" ] && [ `minikube version | grep version | awk {' print $3 '}` = "${VERSION}" ]; } then 90 | echo "${red}[DEBUG]${reset} minikube not found or wrong version. Installing." 91 | if [ $OS == "linux" ]; then 92 | echo "${green}[DEBUG]${reset} Linux found." 93 | curl -LO -s https://storage.googleapis.com/minikube/releases/${VERSION}/minikube-linux-amd64 94 | sudo install minikube-linux-amd64 /usr/local/bin/minikube 95 | rm -rf minikube-linux-amd64 >/dev/null 2>&1 96 | elif [ ${OS} == "macos-x86_64" ]; then 97 | echo "${gree}[DEBUG]${reset} macOS x86_64 found." 98 | curl -LO -s https://storage.googleapis.com/minikube/releases/${VERSION}/minikube-darwin-amd64 99 | sudo install minikube-darwin-amd64 /usr/local/bin/minikube 100 | rm -rf minikube-darwin-amd64 >/dev/null 2>&1 101 | elif [ ${OS} == "macos-arm64" ]; then 102 | echo "${green}[DEBUG]${reset} macOS arm64 found." 103 | curl -LO -s https://storage.googleapis.com/minikube/releases/${VERSION}/minikube-darwin-arm64 104 | sudo install minikube-darwin-arm64 /usr/local/bin/minikube 105 | rm -rf minikube-darwin-arm64 >/dev/null 2>&1 106 | fi 107 | else 108 | echo "${green}[DEBUG]${reset} minikube found." 109 | fi 110 | } 111 | 112 | # function check and install kubectl 113 | function checkkubectl() { 114 | if ! [ -x "$(command -v kubectl)" ]; then 115 | echo "${red}[DEBUG]${reset} kubectl not found. Installing." 116 | if [ $OS == "linux" ]; then 117 | echo "${green}[DEBUG]${reset} Linux found." 118 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 119 | sudo install kubectl /usr/local/bin/kubectl 120 | rm -rf kubectl >/dev/null 2>&1 121 | elif [ ${OS} == "macos-x86_64" ]; then 122 | echo "${gree}[DEBUG]${reset} macOS x86_64 found." 123 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" 124 | sudo install kubectl /usr/local/bin/kubectl 125 | rm -rf kubectl >/dev/null 2>&1 126 | elif [ ${OS} == "macos-arm64" ]; then 127 | echo "${gree}[DEBUG]${reset} macOS arm64 found." 128 | curl -LO -s "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl" 129 | sudo install kubectl /usr/local/bin/kubectl 130 | rm -rf kubectl >/dev/null 2>&1 131 | fi 132 | else 133 | echo "${green}[DEBUG]${reset} kubectl found." 134 | fi 135 | } 136 | 137 | # function delete the local minikube and kubectl in /usr/local/bin 138 | function deletemk() { 139 | if [ -x "$(command -v kubectl)" ]; then 140 | rm -rf $(command -v kubectl) 141 | echo "${green}[DEBUG]${reset} Deleted kubectl" 142 | fi 143 | if [ -x "$(command -v minikube)" ]; then 144 | rm -rf $(command -v minikube) 145 | echo "${green}[DEBUG]${reset} Deleted minikube" 146 | fi 147 | } 148 | 149 | # function check docker 150 | function checkdocker() { 151 | if [ ${OS} == "macos-x86_64" ] || [ ${OS} == "macos-arm64" ]; then 152 | echo "${green}[DEBUG]${reset} macos found will use hyperkit instead of docker" 153 | else 154 | docker info >/dev/null 2>&1 155 | if [ $? -ne 0 ]; then 156 | echo "${red}[DEBUG]${reset} Docker is not running or installed or your not part of the docker group. Please fix" 157 | exit 158 | fi 159 | fi 160 | } 161 | 162 | # function start 163 | function build() { 164 | echo "${green}[DEBUG]${reset} CPU will be set to ${CPU} cores" 165 | minikube config set cpus ${CPU} 166 | echo "${green}[DEBUG]${reset} MEM will be set to ${MEM}mb" 167 | minikube config set memory ${MEM} 168 | # minikube config set disk-size ${HDD} 169 | if [ ${OS} == "linux" ]; then 170 | minikube start --driver=docker 171 | else 172 | minikube start 173 | fi 174 | minikube addons enable metallb 175 | baseip=`minikube ip | cut -d"." -f1-3` 176 | startip="${baseip}.150" 177 | endip="${baseip}.175" 178 | cat > /tmp/metallb-config.yaml </dev/null 2>&1 194 | rm -rf /tmp/metallb-config.yaml >/dev/null 2>&1 195 | echo "${green}[DEBUG]${reset} minikube IP is: `minikube ip`" 196 | echo "${green}[DEBUG]${reset} LoadBalancer Pool: ${startip} - ${endip}" 197 | source <(kubectl completion ${SHELL}) 198 | } 199 | 200 | ## script 201 | 202 | case ${1} in 203 | build|start) 204 | checkminikube 205 | checkkubectl 206 | checkdocker 207 | echo "${green}[DEBUG]${reset} build minikube" 208 | build 209 | ;; 210 | stop) 211 | echo "${green}[DEBUG]${reset} Stopping minikube" 212 | minikube stop 213 | ;; 214 | upgrade) 215 | deletemk 216 | checkminikube 217 | checkkubectl 218 | echo "${green}[DEBUG]${reset} minikube and kubectl upgraded" 219 | ;; 220 | delete|cleanup) 221 | echo "${green}[DEBUG]${reset} Deleting minikube" 222 | minikube delete 223 | rm -rf ${HOME}/.minikube >/dev/null 2>&1 224 | rm -rf ${HOME}/.kube >/dev/null 2>&1 225 | ;; 226 | *) 227 | help 228 | ;; 229 | esac 230 | 231 | 232 | -------------------------------------------------------------------------------- /minio.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # min.io script to stand up docker-ized mini.io server on localhost and have the endpoint exposed as http://IP:9000 4 | # will create a minio directory for the data directory. 5 | 6 | # find IP 7 | if [ "`uname -s`" != "Darwin" ]; then 8 | IP=`hostname -I | awk '{ print $1 }'` 9 | else 10 | IP=`ifconfig | grep inet | grep -v inet6 | grep -v 127.0.0.1 | awk '{ print $2 }'` 11 | fi 12 | 13 | help() { 14 | echo -e "./`basename $0` command" 15 | echo -e "\tCOMMANDS" 16 | echo -e "\t\tbuild - Fresh install - it will build a new minio docker instance named myminio" 17 | echo -e "\t\t\t\tmyminio directory will be created in your homedir" 18 | echo -e "\t\tstart - Start myminio container if stopped" 19 | echo -e "\t\tstop - Stop myminio container" 20 | echo -e "\t\tcleanup - Stops myminio container and delete it and delete ${HOME}/myminio" 21 | } 22 | 23 | checkdocker() { 24 | # check to ensure docker is running and you can run docker commands 25 | docker info >/dev/null 2>&1 26 | if [ $? -ne 0 ]; then 27 | echo "[DEBUG] Docker is not running or you are not part of the docker group" 28 | exit 29 | fi 30 | } 31 | 32 | makehome() { 33 | # check for minio directory and create if there is none. 34 | if [ -f ${HOME}/myminio ]; then 35 | mkdir -p ${HOME}/myminio 36 | echo "Created minio directory" 37 | fi 38 | } 39 | 40 | pullminio() { 41 | # checking for image - minio 42 | docker image inspect minio/minio:latest > /dev/null 2>&1 43 | if [ $? -ne 0 ]; then 44 | echo "[DEBUG] Pulling minio image.. might take a while" 45 | docker pull minio/minio:latest 46 | else 47 | echo "[DEBUG] Using existing minio image" 48 | fi 49 | } 50 | 51 | build() { 52 | makehome 53 | docker run -d -p 9000:9000 \ 54 | --user $(id -u):$(id -g) \ 55 | --name myminio \ 56 | -e "MINIO_ROOT_USER=minio" \ 57 | -e "MINIO_ROOT_PASSWORD=minio123" \ 58 | -v ${HOME}/myminio:/data \ 59 | minio/minio server /data 60 | if [ $? -eq 0 ]; then 61 | echo "[DEBUG] mmyinio started. http://localhost:9000 or http://${IP}:9000. access_key: minio secret_key: minio123" 62 | echo "" 63 | echo "[DEBUG] Please visit https://dl.minio.io/client/mc/release/ and download the mc client for your machine and chmod a+x mc and place it in your path" 64 | echo "[DEBUG] Add myminio server into mc: mc config host add myminio http://127.0.0.1:9000 minio minio123" 65 | echo "[DEBUG] mc commands are located on https://dl.minio.io/client/mc/release/" 66 | echo "[DEBUG] For quick use: Create Bucket: mc mb myminio/bucketname" 67 | echo "[DEBUG] You can use s3cmd as well." 68 | 69 | fi 70 | } 71 | 72 | start() { 73 | # check to see if container exists and if not build it and start it 74 | if [ "$(docker ps | grep -c myminio)" -eq 1 ]; then 75 | echo "[DEBUG] myminio is already running" 76 | elif [ "$(docker ps -a | grep -c myminio)" -eq 1 ]; then 77 | echo "[DEBUG] myminio found and starting container" 78 | docker start myminio 79 | else 80 | echo "[DEBUG] myminio container doesnt exist. Building" 81 | build 82 | fi 83 | } 84 | 85 | stop() { 86 | if [ "$(docker ps | grep myminio)" ]; then 87 | docker stop myminio 88 | echo "[DEBUG] Stopping myminio container" 89 | else 90 | echo "[DEBUG] myminio was not running" 91 | fi 92 | } 93 | 94 | cleanup() { 95 | stop 96 | if [ "$(docker ps -aq -f status=exited -f name=myminio)" ]; then 97 | docker rm myminio 98 | fi 99 | rm -rf ${HOME}/myminio 100 | echo "[DEBUG] Deleted ${HOME}/myminio" 101 | } 102 | 103 | # modes 104 | case $1 in 105 | build) 106 | checkdocker 107 | build 108 | ;; 109 | start) 110 | checkdocker 111 | start 112 | ;; 113 | stop) 114 | stop 115 | ;; 116 | cleanup) 117 | cleanup 118 | ;; 119 | *) 120 | help 121 | ;; 122 | esac 123 | -------------------------------------------------------------------------------- /mywiki.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script will use linuxserver.io dokuwiki container and manage your content on github repo 4 | 5 | # Please create a github repo named wiki and set it to private 6 | # https://docs.github.com/en/github/getting-started-with-github/create-a-repo 7 | # setup ssh keys for github so that this script can use it to get/push content 8 | # https://docs.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh 9 | 10 | 11 | # This script will create ${HOME}/wiki to store your content 12 | # Container name will be wiki 13 | 14 | # Editable variables 15 | GHUSERNAME="" 16 | PORT="9090" 17 | TLSPORT="9091" 18 | 19 | # colors 20 | red=`tput setaf 1` 21 | green=`tput setaf 2` 22 | reset=`tput sgr0` 23 | 24 | # assign vars 25 | userID=$(id -u) 26 | groupID=$(id -g) 27 | DATE=$(date +"%Y%m%d-%H%M") 28 | 29 | # functions 30 | build() { 31 | # 32 | if [ ! -d ${HOME}/wiki ]; then 33 | echo "${green}[DEBUG]${reset} Creating ${HOME}/wiki" 34 | mkdir -p ${HOME}/wiki > /dev/null 2>&1 35 | git clone git@github.com:${GHUSERNAME}/wiki.git ${HOME}/wiki 36 | if [ $? -ne 0 ]; then 37 | echo "${red}[DEBUG]${reset} Unable to clone from github. Exiting...." 38 | exit 39 | fi 40 | # decided this isnt needed 41 | # else 42 | # echo "${green}[DEBUG]${reset} ${HOME}/wiki already exists.. will create the container" 43 | fi 44 | if [ $(docker ps | grep -c wiki) -ge 1 ]; then 45 | echo "${green}[DEBUG]${reset} wiki container is already running" 46 | elif [ $(docker ps -a | grep -c wiki) -ge 1 ]; then 47 | echo "${green}[DEBUG]${reset} wiki container exits but is not running. Starting container" 48 | docker start wiki >/dev/null 2>&1 49 | elif [ $(docker ps -a | grep -c wiki) -eq 0 ]; then 50 | echo "${green}[DEBUG]${reset} Creating wiki container" 51 | docker run -d --name=wiki -e PUID=${userID} -e GUID=${groupID} -e TZ=America\Chicago -p ${PORT}:80 -p ${TLSPORT}:443 -v ${HOME}/wiki:/config --restart unless-stopped ghcr.io/linuxserver/dokuwiki >/dev/null 2>&1 52 | fi 53 | } 54 | 55 | stopcontainer() { 56 | if [ $(docker ps -a | grep -c wiki) -ge 1 ]; then 57 | echo "${green}[DEBUG]${reset} Stopping wiki container" 58 | docker stop wiki >/dev/null 2>&1 59 | else 60 | echo "${red}[DEBUG]${reset} container is not running. Nothing to stop" 61 | fi 62 | } 63 | 64 | gitpush () { 65 | if [ ! -d ${HOME}/wiki ]; then 66 | echo "${red}[DEBUG]${reset} ${HOME}/wiki does not exist so nothing to push." 67 | exit 68 | fi 69 | cd ${HOME}/wiki 70 | if [ ! -f ${HOME}/wiki/.gitignore ]; then 71 | cat > ${HOME}/wiki/.gitignore</dev/null 2>&1 93 | fi 94 | } 95 | 96 | cleanup() { 97 | echo "${green}[DEBUG]${reset} Stopping container" 98 | docker stop wiki >/dev/null 2>&1 99 | echo "${green}[DEBUG]${reset} Removing container" 100 | docker rm wiki >/dev/null 2>&1 101 | echo "${green}[DEBUG]${reset} Removing ${HOME}/wiki directory" 102 | rm -rf ${HOME}/wiki 103 | } 104 | 105 | case $1 in 106 | build) 107 | build 108 | ;; 109 | start) 110 | build 111 | ;; 112 | pull) 113 | gitpull 114 | ;; 115 | push) 116 | gitpush 117 | ;; 118 | stop) 119 | stopcontainer 120 | ;; 121 | cleanup) 122 | cleanup 123 | ;; 124 | *) 125 | echo "${green}Usage:${reset} ./`basename $0` command" 126 | echo " ${green}build${reset} - Create ${HOME}/wiki and git clone" 127 | echo " ${green}push${reset} - push contents to git repo" 128 | echo " ${green}pull${reset} - pull contents from git repo" 129 | echo " ${green}start${reset} - init and start container" 130 | echo " ${green}stop${reset} - stops wiki container" 131 | echo " ${green}cleanup${reset} - stops and removes container and ${HOME}/wiki" 132 | echo "" 133 | echo "Please make sure edit the editable section in the script" 134 | echo "Please create your github repo first" 135 | ;; 136 | esac 137 | -------------------------------------------------------------------------------- /netbackup-split-per-OS.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script will seperate the big tarballs for NBU into individual clients for each OS. 4 | # Please make sure that there is enough space on the drive to perform the seperation. 5 | # Copy this script and the 2 tarballs into a directory and run the script. 6 | 7 | # set directories 8 | TMP=nbutmp 9 | BASEDIR=$(dirname $0) 10 | 11 | # take input 12 | read -p "Version: " VERSION 13 | echo "" 14 | echo "[DEBUG] Version $VERSION entered." 15 | 16 | CLIENT1="NetBackup_${VERSION}_CLIENTS1.tar.gz" 17 | CLIENT2="NetBackup_${VERSION}_CLIENTS2.tar.gz" 18 | LOC1="NetBackup_${VERSION}_CLIENTS1/NBClients/anb/Clients/usr/openv/netbackup/client" 19 | LOC2="NetBackup_${VERSION}_CLIENTS2/NBClients/anb/Clients/usr/openv/netbackup/client" 20 | 21 | if [ ! -f $BASEDIR/$CLIENT1 ]; then 22 | echo "[DEBUG] File ${CLIENT1} does not exist. Please copy over and start over again." 23 | exit 24 | else 25 | echo "[DEBUG] File ${CLIENT1} found." 26 | fi 27 | 28 | if [ ! -f $BASEDIR/$CLIENT2 ]; then 29 | echo "[DEBUG] File ${CLIENT2} does not exist. Please copy over and start over again." 30 | exit 31 | else 32 | echo "[DEBUG] File ${CLIENT2} found." 33 | fi 34 | 35 | echo "[DEBUG] Please make sure that you have enough space to perform the seperation. This process will require 3x the size of both tarballs to seperate." 36 | 37 | 38 | if [ -d ${BASEDIR}/${TMP} ]; then 39 | echo "[DEBUG] Removing old temp directory." 40 | rm -rf ${BASEDIR}/${TMP} 41 | fi 42 | 43 | echo "[DEBUG] Creating temp directory." 44 | mkdir ${BASEDIR}/${TMP} 45 | 46 | if tar -zxf ${CLIENT1} ; then 47 | echo "[DEBUG] File ${CLIENT1} untarred removing source." 48 | rm ${CLIENT1} 49 | mv ${BASEDIR}/${LOC1}/* ${BASEDIR}/${TMP}/ 50 | else 51 | echo "[DEBUG] Problem untaring ${CLIENT1}... exiting" 52 | exit 53 | fi 54 | 55 | if tar -zxf ${CLIENT2} ; then 56 | echo "[DEBUG] File ${CLIENT2} untarred removing source." 57 | rm ${CLIENT2} 58 | mv ${BASEDIR}/${LOC2}/* ${BASEDIR}/${TMP}/ 59 | else 60 | echo "[DEBUG] Problem untaring ${CLIENT2}... exiting" 61 | exit 62 | fi 63 | 64 | 65 | echo "[DEBUG] Creating tarball for HP-UX-IA64" 66 | mv ${BASEDIR}/${TMP}/HP-UX-IA64/ ${BASEDIR}/${LOC1}/ 67 | tar -h -cf NetBackup_${VERSION}_specific.HP-UX-IA64.tar NetBackup_${VERSION}_CLIENTS1/ 68 | gzip NetBackup_${VERSION}_specific.HP-UX-IA64.tar 69 | rm -rf ${BASEDIR}/${LOC1}/HP-UX-IA64 70 | 71 | echo "[DEBUG] Creating tarball for INTEL" 72 | mv ${BASEDIR}/${TMP}/INTEL/ ${BASEDIR}/${LOC1}/ 73 | tar -h -cf NetBackup_${VERSION}_specific.INTEL-FreeBSD6.0.tar NetBackup_${VERSION}_CLIENTS1/ 74 | gzip NetBackup_${VERSION}_specific.INTEL-FreeBSD6.0.tar 75 | rm -rf ${BASEDIR}/${LOC1}/INTEL 76 | 77 | echo "[DEBUG] Creating tarball for MACINTOSH" 78 | mv ${BASEDIR}/${TMP}/MACINTOSH/ ${BASEDIR}/${LOC1}/ 79 | tar -h -cf NetBackup_${VERSION}_specific.MACINTOSH-MacOSX10.6.tar NetBackup_${VERSION}_CLIENTS1/ 80 | gzip NetBackup_${VERSION}_specific.MACINTOSH-MacOSX10.6.tar 81 | rm -rf ${BASEDIR}/${LOC1}/MACINTOSH 82 | 83 | echo "[DEBUG] Creating tarball for RS6000" 84 | mv ${BASEDIR}/${TMP}/RS6000/ ${BASEDIR}/${LOC1}/ 85 | tar -h -cf NetBackup_${VERSION}_specific.RS6000-AIX6.tar NetBackup_${VERSION}_CLIENTS1/ 86 | gzip NetBackup_${VERSION}_specific.RS6000-AIX6.tar 87 | rm -rf ${BASEDIR}/${LOC1}/RS6000 88 | 89 | mv ${BASEDIR}/${TMP}/Solaris/ ${LOC1}/ 90 | for dir in `ls -1 ${LOC1}/Solaris/` 91 | do 92 | echo "[DEBUG] Creating tarball for Solaris.${dir}" 93 | tar -h -cf NetBackup_${VERSION}_specific.Solaris.${dir}.tar NetBackup_${VERSION}_CLIENTS1/Doc/ NetBackup_${VERSION}_CLIENTS1/LICENSE NetBackup_${VERSION}_CLIENTS1/NBClients/anb/Clients/usr/openv/netbackup/client/Solaris/${dir}/ NetBackup_${VERSION}_CLIENTS1/NBClients/catalog/ NetBackup_${VERSION}_CLIENTS1/VSM_README NetBackup_${VERSION}_CLIENTS1/install 94 | gzip NetBackup_${VERSION}_specific.Solaris.${dir}.tar 95 | done 96 | 97 | mv ${BASEDIR}/${TMP}/Linux/ ${LOC2}/ 98 | for dir in `ls -1 ${LOC2}/Linux/` 99 | do 100 | echo "[DEBUG] Creating tarball for LINUX.${dir}" 101 | tar -h -cf NetBackup_${VERSION}_specific.Linux.${dir}.tar NetBackup_${VERSION}_CLIENTS2/Doc/ NetBackup_${VERSION}_CLIENTS2/LICENSE NetBackup_${VERSION}_CLIENTS2/NBClients/anb/Clients/usr/openv/netbackup/client/Linux/${dir}/ NetBackup_${VERSION}_CLIENTS2/NBClients/catalog/ NetBackup_${VERSION}_CLIENTS2/VSM_README NetBackup_${VERSION}_CLIENTS2/install 102 | gzip NetBackup_${VERSION}_specific.Linux.${dir}.tar 103 | done 104 | 105 | echo "[ INFO ] Cleanup..." 106 | rm -rf ${BASEDIR}/${TMP} 107 | rm -rf ${BASEDIR}/NetBackup_${VERSION}_CLIENTS1 108 | rm -rf ${BASEDIR}/NetBackup_${VERSION}_CLIENTS2 109 | 110 | 111 | echo "[DEBUG] DONE!" 112 | ls -la 113 | -------------------------------------------------------------------------------- /netinfo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # this script will plumb up all the interfaces and grab its information 4 | # version 0.2 - 2012-11-13 completely rewritten 5 | # version 0.1 - inital 6 | 7 | HOSTNAME=`hostname` 8 | echo $HOSTNAME 9 | echo "" 10 | 11 | format="%10s%19s%16s%16s%8s%8s%12s%8s %-s\n" 12 | printf "$format" "INTERFACE" "MAC" "IP" "NETMASK" "SLAVE?" "LINK" "SPEED" "DUPLEX" 13 | printf "$format" "---------" "---" "--" "-------" "------" "----" "-----" "------" 14 | 15 | #printf "INT\tMAC\t\t\tIP\t\tNETMASK\t\t\tSLAVE\tLINK\tSPEED\tDUPLEX\n" 16 | 17 | LIST=`ip link show | grep BROADCAST | awk {' print $2 '} | awk -F: {' print $1 '}` 18 | 19 | 20 | for INTERFACE in $LIST 21 | do 22 | # configure the interface up 23 | ifconfig $INTERFACE up 24 | 25 | # find the link status 26 | LINK="no" 27 | if [ `ip link show | grep $INTERFACE | grep -c MASTER` -eq 1 ]; then 28 | LINK="bonded" 29 | else 30 | LINK=`ethtool $INTERFACE | grep Link | awk {' print $3 '}` 31 | fi 32 | 33 | # if link is up find speed and duplex 34 | SPEED="n/a" 35 | DUPLEX="n/a" 36 | if [ $LINK == "yes" ]; then 37 | SPEED=`ethtool $INTERFACE | grep Speed | awk {' print $2 '}` 38 | DUPLEX=`ethtool $INTERFACE | grep Duplex | awk {' print $2 '}` 39 | fi 40 | 41 | # MAC address 42 | MAC=`ifconfig $INTERFACE | grep HWaddr | awk {' print $5 '}` 43 | if [ `ip link show | grep $INTERFACE | grep -c SLAVE` -eq 1 ]; then 44 | MAC=`cat /proc/net/bonding/bond* | egrep "Slave Interface|Permanent" | sed 'N;s/\n/ /' | grep $INTERFACE | awk {' print $7 '}` 45 | fi 46 | 47 | # IP address & netmask 48 | if [ `ifconfig $INTERFACE | grep -c 'inet addr'` -lt 1 ]; then 49 | IP="none " 50 | NETMASK="none" 51 | else 52 | IP=`ifconfig $INTERFACE | grep "inet addr" | awk {' print $2 '} | awk -F: {' print $2 '}` 53 | NETMASK=`ifconfig $INTERFACE | grep "inet addr" | awk {' print $4 '} | awk -F: {' print $2 '}` 54 | fi 55 | 56 | # SLAVE ? 57 | if [ `ip link show | grep $INTERFACE | grep -c SLAVE` -eq 1 ]; then 58 | SLAVE=`ip link show | grep $INTERFACE | awk {' print $9 '}` 59 | else 60 | SLAVE="no" 61 | fi 62 | 63 | # printf "$INTERFACE\t$MAC\t$IP\t$NETMASK\t\t\t$SLAVE\t$LINK\t$SPEED\t$DUPLEX\n" 64 | printf "$format" $INTERFACE $MAC $IP $NETMASK $SLAVE $LINK $SPEED $DUPLEX 65 | done 66 | 67 | echo "" 68 | # print routing table 69 | netstat -rn 70 | -------------------------------------------------------------------------------- /runscript.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # this script will run the command specified in CMD against a list of 4 | # hosts in list_file and take logs while it runs 5 | # it will exit if the list is empty 6 | 7 | CMD="xxxxx" 8 | 9 | [ $# = 0 ] && echo "$0 list_file" && exit 1 10 | 11 | for host in `cat $i` 12 | do 13 | echo "$host: " | tee -a $$.out 14 | ssh $host "$CMD" | tee -a $$.out 15 | echo 16 | done 17 | -------------------------------------------------------------------------------- /serverinfo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # get various server information 4 | # getting vars 5 | # 6 | # CPU INFO 7 | CPUMODEL=`cat /proc/cpuinfo | grep "model name" | uniq | awk -F: {' print $2 '} | sed 's/ *//g'` 8 | PHYSICALCPU=`cat /proc/cpuinfo | grep "physical id" | sort -n | uniq | grep -c id` 9 | CORESPERCPU=`cat /proc/cpuinfo | grep "core id" | sort -n | uniq | grep -c id` 10 | # 11 | # MEMORY INFO 12 | PHYSICALMEMORY=`free -m | grep Mem | awk {' print $2 '}` 13 | SWAPMEMORY=`free -m | grep Swap | awk {' print $2 '}` 14 | # 15 | # chassis 16 | SERIAL=`dmidecode | grep "Serial Number" | head -1 | sed 's/Serial Number: //g' | sed 's/\t//g'` 17 | # 18 | # DISK 19 | LOCALDISK=` mount | grep boot | awk {' print $1 '} | sed 's/1//g'` 20 | TOTALLOCALDISK=`fdisk -l | grep "$LOCALDISK:" | awk -F: {' print $2 '} | awk -F, {' print $1 '}` 21 | # MISC 22 | ARCH=`uname -m` 23 | KERNEL=`uname -r` 24 | TZ=`date | awk {' print $5 '}` 25 | # 26 | # rhel4 or higher 27 | RHEL4=0 28 | if [ `grep -c Nahant /etc/redhat-release` = 1 ] 29 | then 30 | RHEL4=1 31 | fi 32 | 33 | echo "HOSTNAME : " 34 | hostname 35 | echo "" 36 | echo "SERIAL NUMBER : $SERIAL" 37 | echo "" 38 | echo "CPU MODEL : $CPUMODEL" 39 | echo "NUMBER OF PHYSICAL SOCKETS : $PHYSICALCPU" 40 | echo "NUMBER OF CORES PER CPU : $CORESPERCPU" 41 | echo "" 42 | echo "PHYSICAL MEMORY : $PHYSICALMEMORY" 43 | echo "SWAP MEMORY : $SWAPMEMORY" 44 | echo "" 45 | echo "Arch : $ARCH" 46 | echo "Kernel : $KERNEL" 47 | echo "Release : " 48 | cat /etc/redhat-release 49 | echo "Time Zone : $TZ" 50 | echo "" 51 | echo "resolv.conf" 52 | cat /etc/resolv.conf 53 | echo "" 54 | echo "NTP servers" 55 | cat /etc/ntp.conf | grep server | grep -v ^# 56 | echo "" 57 | echo "Total Local Disk : $TOTALLOCALDISK" 58 | echo "" 59 | echo "WWN : " 60 | if [ $RHEL4 -eq 1 ] 61 | then 62 | cat /proc/scsi/qla2xxx/* | grep adapter-port | awk -F= {' print $2 '} | cut -c 1-16 | sed 'N;s/\n/\,/' 63 | else 64 | for i in `ls -1 /sys/class/fc_host/`; do cat /sys/class/fc_host/$i/port_name| sed 's/0x//g'; done | sed 'N;s/\n/\,/' 65 | fi 66 | echo "" 67 | if [ $RHEL4 -eq 1 ] 68 | then 69 | echo "Powerpath :" 70 | powermt display dev=all 71 | else 72 | echo "multipath :" 73 | multipath -l 74 | fi 75 | echo "" 76 | echo "pvs" 77 | pvs 78 | echo "vgs" 79 | vgs 80 | echo "lvs" 81 | lvs 82 | echo "df -h" 83 | df -h 84 | echo "/etc/fstab" 85 | cat /etc/fstab 86 | echo "" 87 | echo "Interfaces :" 88 | ifconfig -a | grep addr 89 | cat /proc/net/bonding/bond* 90 | netstat -rn 91 | echo "" 92 | echo "" 93 | echo "" 94 | echo "" 95 | -------------------------------------------------------------------------------- /shard_counter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | 4 | # get total number of shards and size per instance from cat_shards.txt (GET _cat/shards?v) 5 | # 6 | # 7 | 8 | # colors 9 | red=`tput setaf 1` 10 | green=`tput setaf 2` 11 | blue=`tput setaf 14` 12 | reset=`tput sgr0` 13 | 14 | # help 15 | if [ -z "${1}" ]; then 16 | echo "${green}Usage :${reset} ${0} /path/cat_shards.txt" 17 | exit 18 | fi 19 | 20 | sum_and_convert_to_gb() { 21 | total_size_bytes=0 22 | 23 | while read line; do 24 | size=$(echo $line | sed -E 's/[^0-9.]//g') 25 | unit=$(echo $line | sed -E 's/[^bkmg]//g') 26 | 27 | case $unit in 28 | "b") total_size_bytes=$(echo "$total_size_bytes + $size" | bc) ;; 29 | "kb") total_size_bytes=$(echo "$total_size_bytes + ($size * 1024)" | bc) ;; 30 | "mb") total_size_bytes=$(echo "$total_size_bytes + ($size * 1024 * 1024)" | bc) ;; 31 | "gb") total_size_bytes=$(echo "$total_size_bytes + ($size * 1024 * 1024 * 1024)" | bc) ;; 32 | esac 33 | 34 | done 35 | 36 | total_size_gb=$(echo "scale=2; $total_size_bytes / 1024 / 1024 / 1024" | bc) 37 | echo $total_size_gb 38 | } 39 | 40 | fmt="%-40s%-10s%-12s\n" 41 | printf "$fmt" "INSTANCE" "SHARDS" "SIZE(mb)" 42 | 43 | for instance in `cat "${1}" | awk {' print $NF '} | grep -v "^node$" | sort |uniq` 44 | do 45 | shards=`cat "${1}" | grep -c "${instance}"` 46 | size=`cat "${1}" | grep "${instance}" | awk {' print $7 '} | sum_and_convert_to_gb` 47 | 48 | 49 | printf "$fmt" "${instance}" "${shards}" "${size}" 50 | done 51 | -------------------------------------------------------------------------------- /supportlab-clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # please edit mystring to match your username 4 | mystring="justinlim" 5 | 6 | 7 | label="division=support,org=support,team=support,project=${mystring}" 8 | 9 | for instance in $(gcloud compute instances list --format='value[separator=","](name,zone)' | grep ${mystring}) 10 | do 11 | name="${instance%,*}"; 12 | zone="${instance#*,}"; 13 | echo "Running docker image prune -a -f for ${name} in ${zone}" 14 | gcloud compute ssh ${name} --zone=${zone} --command="docker image prune -a -f" & 15 | # gcloud compute instances add-labels ${name} --zone=${zone} --labels="${label}" 16 | done 17 | -------------------------------------------------------------------------------- /wildcard.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This scrip will create a CA cert and key and a wildcard cert 4 | 5 | # CONFIG 6 | COUNTRY="US" # COUNTY 7 | STATE="TX" # STATE 8 | LOCALITY="AUSTIN" # LOCALITY 9 | ORGANIZATION="HOME" # ORGANIZATION NAME 10 | ORGANIZATIONUNIT="LIM" # ORGANIZATION UNIT 11 | COMMONNAME="device" # COMMON NAME 12 | SAN="DNS.1 = *.lim.home 13 | DNS.2 = localhost 14 | IP.1 = 127.0.0.1 15 | IP.2 = 192.168.1.1 16 | IP.3 = 192.168.1.2 17 | IP.4 = 192.168.1.3 18 | IP.5 = 192.168.1.4 19 | IP.6 = 192.168.1.5 20 | IP.7 = 192.168.1.10 21 | IP.8 = 192.168.1.254 22 | IP.9 = 192.168.1.253 23 | IP.10 = 192.168.1.252" 24 | 25 | 26 | # Do not change anything below 27 | 28 | # set -x 29 | 30 | 31 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 32 | NAME=${1:-localhost} 33 | SUBJECT="/C=${COUNTRY}/ST=${STATE}/L=${LOCALITY}/O=${ORGANIZATION}/OU=${ORGANIZATIONUNIT}/CN=${COMMONNAME}" 34 | 35 | # Create CA Key 36 | CA_KEY=${DIR}/ca.key 37 | PASS=`openssl rand -base64 48` 38 | echo ${PASS} > ${DIR}/ca.passphrase 39 | [ ! -f ${CA_KEY} ] || rm -rf ${CA_KEY} 40 | openssl genrsa -des3 -passout pass:${PASS} -out ${CA_KEY} 41 | 42 | # Create non Crypt CA Key 43 | CA_NONCRYPT_KEY=${DIR}/ca.noncrypt.key 44 | openssl rsa -passin pass:${PASS} -in ${CA_KEY} -out ${CA_NONCRYPT_KEY} 45 | 46 | # Create CA Cert 47 | CA_CERT=${DIR}/ca.crt 48 | [ -f ${CA_CERT} ] || rm -rf ${CA_CERT} 49 | openssl req -x509 -new -nodes -key ${CA_KEY} -sha256 -days 3650 -passin pass:${PASS} -out ${CA_CERT} -subj ${SUBJECT} 50 | #openssl req -x509 -new -nodes -key ${CA_NONCRYPT_KEY} -sha256 -days 365 -out ${CA_CERT} -subj ${SUBJECT} 51 | 52 | # Create wildcard key 53 | WILD_KEY=${DIR}/wildcard.key 54 | [ -f ${WILD_KEY} ] || rm -rf ${WILD_KEY} 55 | openssl genrsa -out ${WILD_KEY} 2048 56 | 57 | # Create wildcard CSR 58 | CSRCONFIG=${DIR}/csrconfig.cnf 59 | WILD_CSR=wildcard.csr 60 | [ ! -f ${CSRCONFIG} ] || rm -rf ${CSRCONFIG} 61 | cat > ${CSRCONFIG}< ${CERTCONFIG}< ${CSRCONFIG}< ${CERTCONFIG}< "${Env:Temp}\openssh.ps1" 37 | Start-Process -FilePath "PowerShell" -ArgumentList "${Env:Temp}\openssh.ps1" -Verb RunAs -Wait -WindowStyle Hidden 38 | Remove-Item -Path "${Env:Temp}\openssh.ps1" -Force 39 | 40 | # Configure git 41 | scoop install 7zip git 42 | reg import "C:\Users\$env:USERNAME\scoop\apps\7zip\current\install-context.reg" 43 | 44 | 45 | # Set Timezone 46 | Set-TimeZone -Name "Central Standard Time" 47 | 48 | # Add Language 49 | Install-Language ko-KR -AsJob 50 | 51 | # Change User variables 52 | [Environment]::SetEnvironmentVariable("TEMP", "C:\temptemp", "User") 53 | [Environment]::SetEnvironmentVariable("TMP", "C:\temptemp", "User") 54 | 55 | # Change System variables 56 | sudo [Environment]::SetEnvironmentVariable("TMP", "C:\temptemp", "Machine") 57 | sudo [Environment]::SetEnvironmentVariable("TEMP", "C:\temptemp", "Machine") 58 | 59 | # scoop stuff 60 | scoop install curl grep sed less touch 61 | 62 | # Add Buckets 63 | scoop bucket add extras 64 | scoop bucket add nonportable 65 | scoop bucket add java 66 | scoop bucket add nerd-fonts 67 | scoop bucket add nirsoft 68 | 69 | # scoop packages 70 | scoop install firefox 71 | scoop install brave 72 | scoop install mobaxterm 73 | scoop install putty 74 | scoop install totalcommander 75 | scoop install bitwarden 76 | scoop install sharex 77 | scoop install vscode 78 | reg import "C:\Users\$env:USERNAME\scoop\apps\vscode\current\install-associations.reg" 79 | scoop install notepadplusplus 80 | reg import "C:\Users\$env:USERNAME\scoop\apps\notepadplusplus\current\install-context.reg" 81 | scoop install k-lite-codec-pack-standard-np 82 | scoop install winrar 83 | scoop install vlc 84 | scoop install revouninstaller 85 | scoop install spotify 86 | scoop install foxit-pdf-reader 87 | scoop install unlocker 88 | scoop install winaero-tweaker 89 | scoop install powertoys 90 | scoop install Noto-CJK-Mega-OTC 91 | scoop isntall clink 92 | scoop install sysinternals 93 | 94 | # winget packages 95 | winget install --id="AOMEI.PartitionAssistant" -e 96 | winget install --id="Microsoft.DotNet.DesktopRuntime.3_1" -e 97 | winget install --id="Microsoft.DotNet.DesktopRuntime.5" -e 98 | winget install --id="Microsoft.DotNet.DesktopRuntime.6" -e 99 | winget install --id="Microsoft.DotNet.DesktopRuntime.7" -e 100 | winget install --id="Google.Chrome" -e 101 | 102 | # wsl 103 | Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Windows-Subsystem-Linux 104 | scoop bucket add wsl https://github.com/KNOXDEV/wsl 105 | 106 | 107 | # Done 108 | Write-Output "Install complete! Please reboot your machine/worksation!" 109 | Start-Sleep -Seconds 10 110 | -------------------------------------------------------------------------------- /wwn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 0.2 - changed up the script a bit and added some other checks 4 | # justin 5 | # get wwn's 6 | # if you run it as root you will get the count of HBA's via lspci if not then just wwn's 7 | 8 | 9 | # get vars 10 | COUNT=`lspci | grep -c -i "Fibre Channel"` 11 | HOSTNAME=`hostname` 12 | 13 | # if COUNT is 0 then echo output and exit 14 | if [ $COUNT -eq 0 ] 15 | then 16 | echo "No HBAs found on $HOSTNAME" 17 | exit 18 | fi 19 | 20 | # 2.4 qlogic flag 21 | FLAG=0 22 | if [ `ls -1 /proc/scsi | grep -c qla` -ge 1 ] 23 | then 24 | FLAG=1 25 | fi 26 | 27 | # output 28 | echo "$HOSTNAME contains $COUNT HBA : " 29 | 30 | if [ $FLAG -eq 1 ] 31 | then 32 | cat /proc/scsi/qla2xxx/* | grep adapter-port | awk -F= {' print $2 '} | cut -c 1-16 33 | else 34 | for i in `ls -1 /sys/class/fc_host/`; do echo "WWN : `cat /sys/class/fc_host/$i/port_name | sed 's/0x//g'` PORT STATUS : `cat /sys/class/fc_host/$i/port_state`"; done 35 | fi 36 | --------------------------------------------------------------------------------