├── LICENSE ├── README.md ├── cloud ├── aws │ └── create_delete_eks.sh ├── azure │ ├── pipelines │ │ └── examp01.yml │ └── scripts │ │ ├── create_aks.sh │ │ ├── create_aks_cilium.sh │ │ └── create_aks_cilium_byocni.sh └── gcp │ ├── create_delete_k8s.py │ ├── create_delete_k8s.sh │ ├── create_k8s.go │ └── delete_k8s.go ├── deployments ├── app_diagnostic.yml ├── auth_data.yml ├── auth_gateway.yml ├── cleanup_daemonset.yml ├── clusterip_svc.yml ├── create_namespace.yml ├── create_ns_limit_range.yml ├── create_ns_limits.yml ├── host-storage-pv.yml ├── host-storage-pvc.yml ├── init_container_pod.yml ├── init_svc_example.yml ├── iperf3.yml ├── iperf3_daemonset.yml ├── localdisk.yml ├── maintain_pod.yml ├── multi_container_pod.yml ├── network_policy_01.yml ├── network_policy_02.yml ├── network_policy_example.yml ├── nginx_pod.yml ├── nodeport_svc.yml ├── pod_reader_role.yml ├── pod_reader_rolebinding.yml ├── pv-pod.yml ├── pv_example.yml ├── pv_pod_example.yml ├── pvc_example.yml ├── serviceaccount_example.yml ├── shared_data_pod.yml └── storage_class.yml ├── docker ├── create_Dockerfile.py ├── create_Dockerfile.sh ├── docker-remove-unused-images.sh ├── docker-remove-unused-volumes.sh ├── dockerfiles │ ├── kubectl │ │ ├── 01 │ │ │ └── Dockerfile │ │ └── 02 │ │ │ └── Dockerfile │ ├── nginx │ │ └── Dockerfile │ └── traefik │ │ └── Dockerfile └── remove_dangling_images.sh └── scripts ├── backup_restore_etcd.sh ├── check_ingress.sh ├── check_ingress_02.sh ├── cilium_connectivity_test.sh ├── cilium_enable_ingress.sh ├── cleanup_empty_ns.sh ├── cleanup_unused_secrets.sh ├── cluster_health.sh ├── containers_images_list.sh ├── create_aks_cilium.sh ├── create_aks_cilium_byocni.sh ├── deployment-health.sh ├── enable_ports_rke2.sh ├── enable_ports_rke2_func.sh ├── get_pod_ip.sh ├── inject_secrets.sh ├── install_cilium_talos.sh ├── install_extras.sh ├── install_k8s_calico.sh ├── install_k8s_cilium.sh ├── k8s_cheat_sheet.sh ├── k8s_limits_requests.py ├── k8s_probes.py ├── k8s_shortcuts.sh ├── label_k8s_resources.sh ├── list_k8s_secrets.sh ├── pod_resource_req.sh ├── pods_limit_check.sh ├── pvc_pv_cleanup.sh ├── top_pods_nodes.sh ├── troubleshoot_app.sh ├── update_aks_sp.sh ├── update_azureCNI_to_cilium.sh ├── update_hashi_keys.sh └── upgrade_k8s.sh /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Ghassan Malke 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes automated tasks 2 | 3 | ![image](https://github.com/gma1k/scripts/assets/138721734/558562f1-9a24-4143-96c9-6651641c50e2) 4 | 5 | A repository of bash, python, and go scripts for automating various tasks on Kubernetes, such as managing clusters, nodes, pods, services, deployments, etc. These scripts are useful for saving time and effort when working with Kubernetes on different cloud providers such as AWS, Azure, and GCP. 6 | 7 | ## Table of Contents 8 | 9 | - [Technologies](#technologies) 10 | - [Features](#features) 11 | - [Usage](#usage) 12 | - [License](#license) 13 | 14 | ## Technologies 15 | 16 | - k8s 17 | - Cloud: AWS, Azure and GCP 18 | - Bash 19 | - Python 20 | - Go 21 | 22 | ## Features 23 | 24 | - **Kubernetes management tasks**: Useful scripts for managing random tasks on k8s cluster. 25 | - **Kubernetes deployment examples**: Examples of Kubernetes deployment files for deploying applications on Kubernetes. 26 | - **Easy to use**: The scripts are designed to be simple and intuitive to use, with clear instructions and options. 27 | 28 | ## Usage 29 | Can be executed directly from the command line. 30 | 31 | ``` 32 | git clone https://github.com/gma1k/k8s.git 33 | cd k8s 34 | ``` 35 | 36 | ## License 37 | 38 | [MIT](LICENSE) 39 | -------------------------------------------------------------------------------- /cloud/aws/create_delete_eks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | create_cluster() { 4 | echo "Please enter the following values separated by commas:" 5 | echo "Cluster name, region, node type" 6 | read -p "-> " input 7 | 8 | IFS=',' read -r cluster region node <<< "$input" 9 | 10 | if [ -z "$cluster" ] || [ -z "$region" ] || [ -z "$node" ]; then 11 | echo "Invalid input format" 12 | exit 1 13 | fi 14 | 15 | echo "Creating cluster..." 16 | aws eks create-cluster --name "$cluster" --region "$region" --nodegroup-name "$cluster-nodes" --node-type "$node" --nodes 2 17 | 18 | echo "Cluster created: $cluster" 19 | } 20 | 21 | delete_cluster() { 22 | echo "Please enter the following values separated by commas:" 23 | echo "Cluster name, region" 24 | read -p "-> " input 25 | 26 | IFS=',' read -r cluster region <<< "$input" 27 | 28 | if [ -z "$cluster" ] || [ -z "$region" ]; then 29 | echo "Invalid input format" 30 | exit 1 31 | fi 32 | 33 | echo "Deleting cluster..." 34 | aws eks delete-cluster --name "$cluster" --region "$region" 35 | 36 | echo "Cluster deleted: $cluster" 37 | } 38 | 39 | echo "Please choose an option:" 40 | echo "1) Create a cluster" 41 | echo "2) Delete a cluster" 42 | read -p "-> " option 43 | 44 | case $option in 45 | 1) create_cluster ;; 46 | 2) delete_cluster ;; 47 | *) echo "Invalid option" ;; 48 | esac 49 | -------------------------------------------------------------------------------- /cloud/azure/pipelines/examp01.yml: -------------------------------------------------------------------------------- 1 | name: $(Date:yyyyMMdd)$(Rev:.r) 2 | variables: 3 | vmImageName: Ubuntu-18.04 4 | dockerfilePath: Dockerfile 5 | dockerRegistryServiceConnection: topaasnsco 6 | tag: latest 7 | trigger: 8 | - master 9 | stages: 10 | - stage: Build 11 | displayName: Build and push stage 12 | jobs: 13 | - job: Build 14 | displayName: Build job 15 | pool: 16 | vmImage: $(vmImageName) 17 | steps: 18 | - task: Docker@2 19 | displayName: Build and push an image to container registry 20 | inputs: 21 | command: buildAndPush 22 | repository: $(imageRepository) 23 | dockerfile: $(dockerfilePath) 24 | containerRegistry: $(dockerRegistryServiceConnection) 25 | tags: | 26 | $(tag) 27 | -------------------------------------------------------------------------------- /cloud/azure/scripts/create_aks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | az group create --name aks-rg --location westeurope 4 | 5 | az network vnet create --resource-group aks-rg --name aks-vnet --address-prefixes 10.0.0.0/8 --subnet-name aks-subnet --subnet-prefix 10.240.0.0/16 6 | 7 | az ad sp create-for-rbac --skip-assignment --name aks-sp 8 | AKS_SP_ID=$(az ad sp show --id http://aks-sp --query appId -o tsv) 9 | AKS_SP_SECRET=$(az ad sp credential reset --name http://aks-sp --query password -o tsv) 10 | 11 | AKS_SUBNET_ID=$(az network vnet subnet show --resource-group aks-rg --vnet-name aks-vnet --name aks-subnet --query id -o tsv) 12 | 13 | az aks create --resource-group aks-rg --name aks-west-cluster --location westeurope --node-count 3 --min-count 1 --max-count 5 --enable-cluster-autoscaler --network-plugin azure --vnet-subnet-id $AKS_SUBNET_ID --service-principal $AKS_SP_ID --client-secret $AKS_SP_SECRET --enable-aad 14 | 15 | az aks get-credentials --resource-group aks-rg --name aks-west-cluster 16 | 17 | az group create --name aks-east-rg --location eastus 18 | 19 | az network vnet create --resource-group aks-east-rg --name aks-east-vnet --address-prefixes 10.1.0.0/8 --subnet-name aks-east-subnet --subnet-prefix 10.241.0.0/16 20 | 21 | AKS_EAST_SUBNET_ID=$(az network vnet subnet show --resource-group aks-east-rg --vnet-name aks-east-vnet --name aks-east-subnet --query id -o tsv) 22 | 23 | az aks create --resource-group aks-east-rg --name aks-east-cluster --location eastus --node-count 3 --min-count 1 --max-count 5 --enable-cluster-autoscaler --network-plugin azure --vnet-subnet-id $AKS_EAST_SUBNET_ID --service-principal $AKS_SP_ID --client-secret $AKS_SP_SECRET 24 | 25 | az aks get-credentials -g aks-east-rg -n aks-east-cluster 26 | 27 | az aks list -o table 28 | -------------------------------------------------------------------------------- /cloud/azure/scripts/create_aks_cilium.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Create a resource group 4 | create_resource_group() { 5 | echo "Creating resource group $resourceGroupName..." 6 | az group create --name "$resourceGroupName" --location "$location" 7 | } 8 | 9 | # Create a virtual network 10 | create_virtual_network() { 11 | echo "Creating virtual network $vnetName..." 12 | az network vnet create -g "$resourceGroupName" --location "$location" \ 13 | --name "$vnetName" --address-prefixes "$vnetAddressPrefix" -o none 14 | az network vnet subnet create -g "$resourceGroupName" --vnet-name "$vnetName" \ 15 | --name nodesubnet --address-prefixes "$nodesubnetAddressPrefix" -o none 16 | az network vnet subnet create -g "$resourceGroupName" --vnet-name "$vnetName" \ 17 | --name podsubnet --address-prefixes "$podsubnetAddressPrefix" -o none 18 | } 19 | 20 | # Create an AKS cluster with Azure CNI Overlay networking 21 | create_aks_overlay() { 22 | echo "Creating AKS cluster with Azure CNI Overlay networking..." 23 | az aks create -n "$clusterName" -g "$resourceGroupName" -l "$location" \ 24 | --network-plugin azure --network-plugin-mode overlay \ 25 | --pod-cidr 192.168.0.0/16 --network-dataplane cilium 26 | } 27 | 28 | # Create an AKS cluster with Azure CNI using a virtual network 29 | create_aks_vnet() { 30 | echo "Creating AKS cluster with Azure CNI using a virtual network..." 31 | az aks create -n "$clusterName" -g "$resourceGroupName" -l "$location" \ 32 | --max-pods 250 --network-plugin azure \ 33 | --vnet-subnet-id "/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Network/virtualNetworks/$vnetName/subnets/nodesubnet" \ 34 | --pod-subnet-id "/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Network/virtualNetworks/$vnetName/subnets/podsubnet" \ 35 | --network-dataplane cilium 36 | } 37 | 38 | # Main script 39 | echo "Choose an option:" 40 | echo "1. Assign IP addresses from an overlay network" 41 | echo "2. Assign IP addresses from a virtual network" 42 | read -p "Enter your choice (1 or 2): " option 43 | 44 | case "$option" in 45 | 1) 46 | read -p "Enter AKS cluster name: " clusterName 47 | read -p "Enter resource group name: " resourceGroupName 48 | read -p "Enter location: " location 49 | if [[ -z "$clusterName" || -z "$resourceGroupName" || -z "$location" ]]; then 50 | echo "Error: All input fields are required." 51 | exit 1 52 | fi 53 | create_resource_group 54 | create_aks_overlay 55 | ;; 56 | 2) 57 | read -p "Enter AKS cluster name: " clusterName 58 | read -p "Enter resource group name: " resourceGroupName 59 | read -p "Enter location: " location 60 | read -p "Enter subscription ID: " subscriptionId 61 | read -p "Enter virtual network name: " vnetName 62 | read -p "Enter virtual network address prefix (e.g., 10.0.0.0/8): " vnetAddressPrefix 63 | read -p "Enter nodesubnet address prefix (e.g., 10.240.0.0/16): " nodesubnetAddressPrefix 64 | read -p "Enter podsubnet address prefix (e.g., 10.241.0.0/16): " podsubnetAddressPrefix 65 | if [[ -z "$clusterName" || -z "$resourceGroupName" || -z "$location" || -z "$subscriptionId" || -z "$vnetName" || -z "$vnetAddressPrefix" || -z "$nodesubnetAddressPrefix" || -z "$podsubnetAddressPrefix" ]]; then 66 | echo "Error: All input fields are required." 67 | exit 1 68 | fi 69 | create_resource_group 70 | create_virtual_network 71 | create_aks_vnet 72 | ;; 73 | *) 74 | echo "Invalid choice. Please select 1 or 2." 75 | ;; 76 | esac 77 | -------------------------------------------------------------------------------- /cloud/azure/scripts/create_aks_cilium_byocni.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Add AKS extension 4 | add_aks_extension() { 5 | az extension add --name aks-preview 6 | az extension update --name aks-preview 7 | } 8 | 9 | # Register AKS feature 10 | register_aks_feature() { 11 | az feature register --namespace "Microsoft.ContainerService" --name "KubeProxyConfigurationPreview" 12 | az provider register --namespace Microsoft.ContainerService 13 | } 14 | 15 | # Create a resource group 16 | create_resource_group() { 17 | az group create --name "$resource_group" --location $location 18 | } 19 | 20 | # Create a virtual network 21 | create_virtual_network() { 22 | echo "Creating virtual network $vnet_name..." 23 | az network vnet create -g "$resource_group" --location "$location" \ 24 | --name "$vnet_name" --address-prefixes "$vnetAddressPrefix" --subnet-name "$subnet_name" -o none 25 | az network vnet subnet create -g "$resource_group" --vnet-name "$vnet_name" \ 26 | --name nodesubnet --address-prefixes "$nodesubnetAddressPrefix" -o none 27 | az network vnet subnet create -g "$resource_group" --vnet-name "$vnet_name" \ 28 | --name podsubnet --address-prefixes "$podsubnetAddressPrefix" -o none 29 | } 30 | 31 | # Create an AKS cluster 32 | create_aks_cluster() { 33 | az aks create --resource-group "$resource_group" --name "$cluster_name" --location "$location" --network-plugin none --vnet-subnet-id "/subscriptions/$subscriptionId/resourceGroups/$resource_group/providers/Microsoft.Network/virtualNetworks/$vnet_name/subnets/$subnet_name" 34 | } 35 | 36 | # Install Helm 37 | install_helm() { 38 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 39 | chmod +x get_helm.sh 40 | ./get_helm.sh 41 | } 42 | 43 | # Configure Helm 44 | configure_helm() { 45 | kubectl create serviceaccount tiller --namespace kube-system 46 | kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller 47 | helm init --service-account tiller 48 | } 49 | 50 | # Install Cilium 51 | install_cilium() { 52 | helm install cilium cilium/cilium --version 1.14.0 \ 53 | --namespace kube-system \ 54 | --set kubeProxyReplacement=true \ 55 | --set k8sServiceHost="$api_server_ip" \ 56 | --set k8sServicePort="$api_server_port" \ 57 | --set aksbyocni.enabled=true \ 58 | --set nodeinit.enabled=true \ 59 | --set hubble.enabled=true 60 | } 61 | 62 | # Main script 63 | add_aks_extension 64 | register_aks_feature 65 | 66 | read -p "Enter a unique resource group name: " resource_group 67 | read -p "Enter a unique AKS cluster name: " cluster_name 68 | read -p "Enter location: " location 69 | read -p "Enter subscription ID: " subscriptionId 70 | read -p "Enter a VNet name: " vnet_name 71 | read -p "Enter a subnet name: " subnet_name 72 | read -p "Enter vnet address prefix (e.g., 10.0.0.0/8): " vnetAddressPrefix 73 | read -p "Enter nodesubnet address prefix (e.g., 10.240.0.0/16): " nodesubnetAddressPrefix 74 | read -p "Enter podsubnet address prefix (e.g., 10.241.0.0/16): " podsubnetAddressPrefix 75 | 76 | create_resource_group "$resource_group" 77 | create_virtual_network "$resource_group" "$vnet_name" "$subnet_name" 78 | create_aks_cluster "$resource_group" "$cluster_name" "$vnet_name" "$subnet_name" 79 | install_helm 80 | configure_helm 81 | 82 | # Get API server IP and port 83 | api_server_ip=$(kubectl config view -o jsonpath='{"Cluster name\tServer\n"}{range .clusters[*]}{.name}{"\t"}{.cluster.server}{"\n"}{end}' | cut -d':' -f2 | cut -d'/' -f3) 84 | api_server_port=$(kubectl config view -o jsonpath='{"Cluster name\tServer\n"}{range .clusters[*]}{.name}{"\t"}{.cluster.server}{"\n"}{end}' | cut -d':' -f3) 85 | 86 | install_cilium "$api_server_ip" "$api_server_port" 87 | 88 | echo "AKS cluster with Cilium CNI has been set up successfully!" 89 | -------------------------------------------------------------------------------- /cloud/gcp/create_delete_k8s.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import subprocess 5 | 6 | def create_cluster(): 7 | print("Please enter the following values separated by commas:") 8 | print("Project ID, cluster name, zone, number of nodes") 9 | input = input("-> ") 10 | 11 | values = input.split(",") 12 | if len(values) != 4: 13 | print("Invalid input format") 14 | exit(1) 15 | 16 | project = values[0] 17 | cluster = values[1] 18 | zone = values[2] 19 | nodes = values[3] 20 | 21 | print("Creating cluster...") 22 | subprocess.run(["gcloud", "container", "clusters", "create", cluster, "--project", project, "--zone", zone, "--num-nodes", nodes, "--quiet"]) 23 | 24 | print(f"Cluster created: {cluster}") 25 | 26 | def delete_cluster(): 27 | print("Please enter the following values separated by commas:") 28 | print("Project ID, cluster name, zone") 29 | input = input("-> ") 30 | 31 | values = input.split(",") 32 | if len(values) != 3: 33 | print("Invalid input format") 34 | exit(1) 35 | 36 | project = values[0] 37 | cluster = values[1] 38 | zone = values[2] 39 | 40 | print("Deleting cluster...") 41 | subprocess.run(["gcloud", "container", "clusters", "delete", cluster, "--project", project, "--zone", zone, "--quiet"]) 42 | 43 | print(f"Cluster deleted: {cluster}") 44 | 45 | print("Please choose an option:") 46 | print("1) Create a cluster") 47 | print("2) Delete a cluster") 48 | option = input("-> ") 49 | 50 | if option == "1": 51 | create_cluster() 52 | elif option == "2": 53 | delete_cluster() 54 | else: 55 | print("Invalid option") 56 | -------------------------------------------------------------------------------- /cloud/gcp/create_delete_k8s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | create_cluster() { 4 | echo "Please enter the following values separated by commas:" 5 | echo "Project ID, cluster name, zone, number of nodes" 6 | read -p "-> " input 7 | 8 | IFS=',' read -r project cluster zone nodes <<< "$input" 9 | 10 | if [ -z "$project" ] || [ -z "$cluster" ] || [ -z "$zone" ] || [ -z "$nodes" ]; then 11 | echo "Invalid input format" 12 | exit 1 13 | fi 14 | 15 | echo "Creating cluster..." 16 | gcloud container clusters create "$cluster" --project "$project" --zone "$zone" --num-nodes "$nodes" --quiet 17 | 18 | echo "Cluster created: $cluster" 19 | } 20 | 21 | delete_cluster() { 22 | echo "Please enter the following values separated by commas:" 23 | echo "Project ID, cluster name, zone" 24 | read -p "-> " input 25 | 26 | IFS=',' read -r project cluster zone <<< "$input" 27 | 28 | if [ -z "$project" ] || [ -z "$cluster" ] || [ -z "$zone" ]; then 29 | echo "Invalid input format" 30 | exit 1 31 | fi 32 | 33 | echo "Deleting cluster..." 34 | gcloud container clusters delete "$cluster" --project "$project" --zone "$zone" --quiet 35 | 36 | echo "Cluster deleted: $cluster" 37 | } 38 | 39 | echo "Please choose an option:" 40 | echo "1) Create a cluster" 41 | echo "2) Delete a cluster" 42 | read -p "-> " option 43 | 44 | case $option in 45 | 1) create_cluster ;; 46 | 2) delete_cluster ;; 47 | *) echo "Invalid option" ;; 48 | esac 49 | -------------------------------------------------------------------------------- /cloud/gcp/create_k8s.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "log" 8 | "os" 9 | "strings" 10 | 11 | container "cloud.google.com/go/container/apiv1" 12 | containerpb "google.golang.org/genproto/googleapis/container/v1" 13 | ) 14 | 15 | func main() { 16 | // Create a context 17 | ctx := context.Background() 18 | 19 | // Create a container client 20 | client, err := container.NewClusterManagerClient(ctx) 21 | if err != nil { 22 | log.Fatal(err) 23 | } 24 | defer client.Close() 25 | 26 | // Ask for user input 27 | reader := bufio.NewReader(os.Stdin) 28 | fmt.Println("Please enter the following values separated by commas:") 29 | fmt.Println("Project ID, cluster name, zone, number of nodes") 30 | fmt.Print("-> ") 31 | input, err := reader.ReadString('\n') 32 | if err != nil { 33 | log.Fatal(err) 34 | } 35 | 36 | // Split the input by commas 37 | input = strings.TrimSpace(input) 38 | values := strings.Split(input, ",") 39 | if len(values) != 4 { 40 | log.Fatal("Invalid input format") 41 | } 42 | 43 | // Assign the values to variables 44 | projectID := values[0] 45 | clusterName := values[1] 46 | zone := values[2] 47 | numNodes := values[3] 48 | 49 | // Create a request to create a cluster 50 | req := &containerpb.CreateClusterRequest{ 51 | ProjectId: projectID, 52 | Zone: zone, 53 | Cluster: &containerpb.Cluster{ 54 | Name: clusterName, 55 | InitialNodeCount: numNodes, 56 | NodeConfig: &containerpb.NodeConfig{ 57 | MachineType: "n1-standard-1", 58 | }, 59 | }, 60 | } 61 | 62 | // Call the create cluster method 63 | fmt.Println("Creating cluster...") 64 | op, err := client.CreateCluster(ctx, req) 65 | if err != nil { 66 | log.Fatal(err) 67 | } 68 | 69 | // Wait for the operation to finish 70 | fmt.Println("Waiting for operation to finish...") 71 | resp, err := op.Wait(ctx) 72 | if err != nil { 73 | log.Fatal(err) 74 | } 75 | 76 | // Print the response 77 | fmt.Printf("Cluster created: %v\n", resp) 78 | } 79 | -------------------------------------------------------------------------------- /cloud/gcp/delete_k8s.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "log" 8 | "os" 9 | "strings" 10 | 11 | container "cloud.google.com/go/container/apiv1" 12 | containerpb "google.golang.org/genproto/googleapis/container/v1" 13 | ) 14 | 15 | func main() { 16 | // Create a context 17 | ctx := context.Background() 18 | 19 | // Create a container client 20 | client, err := container.NewClusterManagerClient(ctx) 21 | if err != nil { 22 | log.Fatal(err) 23 | } 24 | defer client.Close() 25 | 26 | // Ask for user input 27 | reader := bufio.NewReader(os.Stdin) 28 | fmt.Println("Please enter the following values separated by commas:") 29 | fmt.Println("Project ID, cluster name, zone") 30 | fmt.Print("-> ") 31 | input, err := reader.ReadString('\n') 32 | if err != nil { 33 | log.Fatal(err) 34 | } 35 | 36 | // Split the input by commas 37 | input = strings.TrimSpace(input) 38 | values := strings.Split(input, ",") 39 | if len(values) != 3 { 40 | log.Fatal("Invalid input format") 41 | } 42 | 43 | // Assign the values to variables 44 | projectID := values[0] 45 | clusterName := values[1] 46 | zone := values[2] 47 | 48 | // Create a request to delete a cluster 49 | req := &containerpb.DeleteClusterRequest{ 50 | Name: fmt.Sprintf("projects/%s/locations/%s/clusters/%s", projectID, zone, clusterName), 51 | } 52 | 53 | // Call the delete cluster method 54 | fmt.Println("Deleting cluster...") 55 | op, err := client.DeleteCluster(ctx, req) 56 | if err != nil { 57 | log.Fatal(err) 58 | } 59 | 60 | // Wait for the operation to finish 61 | fmt.Println("Waiting for operation to finish...") 62 | err = op.Wait(ctx) 63 | if err != nil { 64 | log.Fatal(err) 65 | } 66 | 67 | // Print the result 68 | fmt.Printf("Cluster deleted: %s\n", clusterName) 69 | } 70 | -------------------------------------------------------------------------------- /deployments/app_diagnostic.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: app-diagnostic 5 | spec: 6 | containers: 7 | - name: app-diagnostic 8 | image: acgorg/app-diagnostic:1 9 | ports: 10 | - containerPort: 80 11 | -------------------------------------------------------------------------------- /deployments/auth_data.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: auth-data 5 | namespace: dev-ns 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: auth-data 11 | template: 12 | metadata: 13 | labels: 14 | app: auth-data 15 | spec: 16 | nodeSelector: 17 | external-auth-services: "true" 18 | containers: 19 | - name: nginx 20 | image: nginx:1.19.1 21 | ports: 22 | - containerPort: 80 23 | -------------------------------------------------------------------------------- /deployments/auth_gateway.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: auth-gateway 5 | namespace: dev-ns 6 | spec: 7 | nodeSelector: 8 | external-auth-services: "true" 9 | containers: 10 | - name: nginx 11 | image: nginx:1.19.1 12 | ports: 13 | - containerPort: 80 14 | -------------------------------------------------------------------------------- /deployments/cleanup_daemonset.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: beebox-cleanup 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: beebox-cleanup 9 | template: 10 | metadata: 11 | labels: 12 | app: beebox-cleanup 13 | spec: 14 | containers: 15 | - name: busybox 16 | image: busybox:1.27 17 | command: ['sh', '-c', 'while true; do rm -rf /beebox-temp/*; sleep 60; done'] 18 | volumeMounts: 19 | - name: beebox-tmp 20 | mountPath: /beebox-temp 21 | volumes: 22 | - name: beebox-tmp 23 | hostPath: 24 | path: /etc/beebox/tmp 25 | -------------------------------------------------------------------------------- /deployments/clusterip_svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: user-db-svc 5 | spec: 6 | type: ClusterIP 7 | selector: 8 | app: user-db 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 80 13 | -------------------------------------------------------------------------------- /deployments/create_namespace.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: dev-ns-app 6 | labels: 7 | app: dev-app 8 | env: dev 9 | --- 10 | apiVersion: v1 11 | kind: ResourceQuota 12 | metadata: 13 | name: dev-app-qouta 14 | namespace: dev-ns-app 15 | spec: 16 | hard: 17 | limits.cpu: "10" 18 | limits.memory: "24Gi" 19 | -------------------------------------------------------------------------------- /deployments/create_ns_limit_range.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: dev-ns-app 6 | labels: 7 | app: dev-app 8 | env: dev 9 | spec: 10 | hard: 11 | limits.cpu: "10" 12 | limits.memory: "24Gi" 13 | resources: 14 | requests: 15 | cpu: "6" 16 | memory: "16Gi" 17 | limits: 18 | cpu: "10" 19 | memory: "24Gi" 20 | --- 21 | apiVersion: v1 22 | kind: LimitRange 23 | metadata: 24 | name: dev-app-limit-range 25 | namespace: dev-ns-app 26 | spec: 27 | limits: 28 | - type: Pod 29 | min: 30 | cpu: "500m" 31 | memory: "256Mi" 32 | max: 33 | cpu: "8" 34 | memory: "20Gi" 35 | - type: Container 36 | min: 37 | cpu: "100m" 38 | memory: "128Mi" 39 | max: 40 | cpu: "8" 41 | memory: "20Gi" 42 | default: 43 | cpu: "4" 44 | memory: "8Gi" 45 | defaultRequest: 46 | cpu: "500m" 47 | memory: "256Mi" 48 | -------------------------------------------------------------------------------- /deployments/create_ns_limits.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dev-ns-app 5 | labels: 6 | app: dev-app 7 | env: dev 8 | spec: 9 | hard: 10 | limits.cpu: "10" 11 | limits.memory: "24Gi" 12 | resources: 13 | requests: 14 | cpu: "6" 15 | memory: "16Gi" 16 | limits: 17 | cpu: "10" 18 | memory: "24Gi" 19 | -------------------------------------------------------------------------------- /deployments/host-storage-pv.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: host-storage-pv 5 | spec: 6 | storageClassName: localdisk 7 | persistentVolumeReclaimPolicy: Recycle 8 | capacity: 9 | storage: 1Gi 10 | accessModes: 11 | - ReadWriteOnce 12 | hostPath: 13 | path: /etc/data 14 | -------------------------------------------------------------------------------- /deployments/host-storage-pvc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: host-storage-pvc 5 | namespace: auth 6 | spec: 7 | storageClassName: localdisk 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 100Mi 13 | -------------------------------------------------------------------------------- /deployments/init_container_pod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: init-container-pod 5 | namespace: dev-ns 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx:1.19.1 10 | initContainers: 11 | - name: init-svc 12 | image: busybox:1.27 13 | command: ['sh', '-c', 'until nslookup shipping-svc; do echo waiting for shipping-svc; sleep 2; done'] 14 | -------------------------------------------------------------------------------- /deployments/init_svc_example.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: init-svc 5 | spec: 6 | selector: 7 | app: init-svc 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 80 12 | --- 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | name: init-backend 17 | labels: 18 | app: init-svc 19 | spec: 20 | containers: 21 | - name: nginx 22 | image: nginx:1.19.1 23 | -------------------------------------------------------------------------------- /deployments/iperf3.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: iperf3-deployment 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: iperf3 10 | template: 11 | metadata: 12 | labels: 13 | app: iperf3 14 | spec: 15 | containers: 16 | - name: iperf3 17 | image: leodotcloud/swiss-army-knife 18 | ports: 19 | - containerPort: 5201 20 | -------------------------------------------------------------------------------- /deployments/iperf3_daemonset.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: iperf3-daemonset 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: iperf3 9 | template: 10 | metadata: 11 | labels: 12 | app: iperf3 13 | spec: 14 | containers: 15 | - name: iperf3 16 | image: leodotcloud/swiss-army-knife 17 | ports: 18 | - containerPort: 5201 19 | -------------------------------------------------------------------------------- /deployments/localdisk.yml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: localdisk 5 | provisioner: kubernetes.io/no-provisioner 6 | allowVolumeExpansion: true 7 | -------------------------------------------------------------------------------- /deployments/maintain_pod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: maintenance-pod 5 | spec: 6 | containers: 7 | - name: busybox 8 | image: busybox 9 | command: ['sh', '-c', 'while true; do echo Success! >> /output/output.txt; sleep 5; done'] 10 | 11 | volumeMounts: 12 | - name: output-vol 13 | mountPath: /output 14 | 15 | volumes: 16 | - name: output-vol 17 | hostPath: 18 | path: /var/data 19 | -------------------------------------------------------------------------------- /deployments/multi_container_pod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: multi-container-pod 5 | namespace: dev-ns 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | ports: 11 | - containerPort: 80 12 | - name: redis 13 | image: redis 14 | ports: 15 | - containerPort: 6379 16 | - name: couchbase 17 | image: couchbase 18 | ports: 19 | - containerPort: 8091 20 | - containerPort: 8092 21 | - containerPort: 8093 22 | - containerPort: 8094 23 | - containerPort: 11210 24 | -------------------------------------------------------------------------------- /deployments/network_policy_01.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: np 5 | namespace: foo 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: app 10 | policyTypes: 11 | - Ingress 12 | - Egress 13 | -------------------------------------------------------------------------------- /deployments/network_policy_02.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: np-users-backend-80 5 | namespace: backend 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Ingress 10 | ingress: 11 | - from: 12 | - namespaceSelector: 13 | matchLabels: 14 | app: backend 15 | ports: 16 | - protocol: TCP 17 | port: 80 18 | -------------------------------------------------------------------------------- /deployments/network_policy_example.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: nginx-policy 5 | namespace: dev-ns 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: nginx 10 | policyTypes: 11 | - Ingress 12 | - Egress 13 | ingress: 14 | - from: 15 | - podSelector: 16 | matchLabels: 17 | role: frontend 18 | ports: 19 | - protocol: TCP 20 | port: 80 21 | egress: [] 22 | -------------------------------------------------------------------------------- /deployments/nginx_pod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: nginx 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx 18 | ports: 19 | - containerPort: 80 20 | -------------------------------------------------------------------------------- /deployments/nodeport_svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: web-frontend-svc 5 | spec: 6 | type: NodePort 7 | selector: 8 | app: web-frontend 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 80 13 | nodePort: 30080 14 | -------------------------------------------------------------------------------- /deployments/pod_reader_role.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | namespace: dev-ns 5 | name: pod-reader 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods", "pods/log"] 9 | verbs: ["get", "watch", "list"] 10 | -------------------------------------------------------------------------------- /deployments/pod_reader_rolebinding.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: pod-reader 5 | namespace: dev-ns 6 | subjects: 7 | - kind: User 8 | name: dev 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: Role 12 | name: pod-reader 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /deployments/pv-pod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pv-pod 5 | namespace: auth 6 | spec: 7 | containers: 8 | - name: busybox 9 | image: busybox 10 | command: ['sh', '-c', 'while true; do echo success > /output/output.log; sleep 5; done'] 11 | volumeMounts: 12 | - name: pv-storage 13 | mountPath: /output 14 | volumes: 15 | - name: pv-storage 16 | persistentVolumeClaim: 17 | claimName: host-storage-pvc 18 | -------------------------------------------------------------------------------- /deployments/pv_example.yml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: host-pv 5 | spec: 6 | storageClassName: localdisk 7 | persistentVolumeReclaimPolicy: Recycle 8 | capacity: 9 | storage: 1Gi 10 | accessModes: 11 | - ReadWriteOnce 12 | hostPath: 13 | path: /var/output 14 | -------------------------------------------------------------------------------- /deployments/pv_pod_example.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pv-pod 5 | spec: 6 | containers: 7 | - name: busybox 8 | image: busybox 9 | command: ['sh', '-c', 'while true; do echo Success! > /output/success.txt; sleep 5; done'] 10 | volumes: 11 | - name: pv-storage 12 | persistentVolumeClaim: 13 | claimName: host-pvc 14 | volumeMounts: 15 | - name: pv-storage 16 | mountPath: /output 17 | -------------------------------------------------------------------------------- /deployments/pvc_example.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: host-pvc 5 | spec: 6 | storageClassName: localdisk 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 100Mi 12 | -------------------------------------------------------------------------------- /deployments/serviceaccount_example.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-pod 5 | spec: 6 | serviceAccountName: my-service-account 7 | containers: 8 | - name: my-app 9 | image: public.ecr.aws/nginx/nginx:X.XX 10 | -------------------------------------------------------------------------------- /deployments/shared_data_pod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: shared-data-pod 5 | spec: 6 | containers: 7 | - name: busybox1 8 | image: busybox 9 | command: ['sh', '-c', 'while true; do echo Success! >> /output/output.txt; sleep 5; done'] 10 | volumeMounts: 11 | - name: shared-vol 12 | mountPath: /output 13 | - name: busybox2 14 | image: busybox 15 | command: ['sh', '-c', 'while true; do cat /input/output.txt; sleep 5; done'] 16 | volumeMounts: 17 | - name: shared-vol 18 | mountPath: /input 19 | volumes: 20 | - name: shared-vol 21 | emptyDir: {} 22 | -------------------------------------------------------------------------------- /deployments/storage_class.yml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: localdisk 5 | provisioner: kubernetes.io/no-provisioner 6 | allowVolumeExpansion: true 7 | -------------------------------------------------------------------------------- /docker/create_Dockerfile.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # chmod +x create_Dockerfile.py 3 | # Usage: ./create_Dockerfile.py 4 | # Optional arguments for HTTP_PROXY and HTTPS_PROXY values 5 | # Usage: ./create_dockerfile.py --http-proxy http://proxy.example.com:8080 --https-proxy https://proxy.example.com:8080 6 | 7 | import subprocess 8 | import argparse 9 | 10 | def check_input(instruction): 11 | valid_instructions = ["FROM", "COPY", "RUN", "CMD"] 12 | return instruction in valid_instructions 13 | 14 | def build_image(image_name): 15 | result = subprocess.run(["docker", "build", "-t", image_name, "-f", "Dockerfile", "."]) 16 | if result.returncode == 0: 17 | print(f"Docker image {image_name} built successfully.") 18 | exit(0) 19 | else: 20 | print("Docker build failed.") 21 | answer = input("Do you want to try again? (y/n): ") 22 | if answer == "y": 23 | build_image(image_name) 24 | else: 25 | exit(1) 26 | 27 | with open("Dockerfile", "w") as f: 28 | f.write("") 29 | 30 | parser = argparse.ArgumentParser(description="Create a dockerfile and build a docker image") 31 | 32 | parser.add_argument("--http-proxy", help="set HTTP_PROXY value") 33 | parser.add_argument("--https-proxy", help="set HTTPS_PROXY value") 34 | 35 | args = parser.parse_args() 36 | 37 | if args.http_proxy: 38 | with open("Dockerfile", "a") as f: 39 | f.write(f"ENV HTTP_PROXY {args.http_proxy}\n") 40 | 41 | if args.https_proxy: 42 | with open("Dockerfile", "a") as f: 43 | f.write(f"ENV HTTPS_PROXY {args.https_proxy}\n") 44 | 45 | while True: 46 | instruction = input("Enter a dockerfile instruction or 'done' to finish: ") 47 | if instruction == "done": 48 | print("Dockerfile completed.") 49 | choice = input("Do you want to build the image? (y/n): ") 50 | if choice == "y": 51 | image_name = input("Enter the image name: ") 52 | build_image(image_name) 53 | elif choice == "n": 54 | print("You can build the image later with 'docker build -t -f Dockerfile .'") 55 | exit(0) 56 | else: 57 | print("Invalid choice. Please enter y or n.") 58 | exit(1) 59 | else: 60 | if check_input(instruction): 61 | arguments = input(f"Enter the arguments for {instruction}: ") 62 | with open("Dockerfile", "a") as f: 63 | f.write(f"{instruction} {arguments}\n") 64 | else: 65 | print("Invalid instruction. Please enter one of FROM, COPY, RUN, or CMD.") 66 | continue 67 | -------------------------------------------------------------------------------- /docker/create_Dockerfile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_input() { 4 | case $1 in 5 | FROM|COPY|RUN|CMD) 6 | return 0 7 | ;; 8 | *) 9 | return 1 10 | ;; 11 | esac 12 | } 13 | 14 | build_image() { 15 | read -p "Enter the image name: " image_name 16 | docker build -t $image_name -f Dockerfile . 17 | if [ $? -eq 0 ]; then 18 | echo "Docker image $image_name built successfully." 19 | exit 0 20 | else 21 | echo "Docker build failed." 22 | read -p "Do you want to try again? (y/n): " answer 23 | if [ $answer = "y" ]; then 24 | build_image 25 | else 26 | exit 1 27 | fi 28 | fi 29 | } 30 | 31 | echo "" > Dockerfile 32 | 33 | read -p "Enter the HTTP_PROXY value or leave empty to skip: " http_proxy 34 | 35 | case $http_proxy in 36 | "") 37 | echo "Skipping HTTP_PROXY." 38 | ;; 39 | *) 40 | echo "ENV HTTP_PROXY $http_proxy" > Dockerfile 41 | ;; 42 | esac 43 | 44 | read -p "Enter the HTTPS_PROXY value or leave empty to skip: " https_proxy 45 | 46 | case $https_proxy in 47 | "") 48 | echo "Skipping HTTPS_PROXY." 49 | ;; 50 | *) 51 | echo "ENV HTTPS_PROXY $https_proxy" >> Dockerfile 52 | ;; 53 | esac 54 | 55 | while true; do 56 | read -p "Enter a dockerfile instruction or 'done' to finish: " instruction 57 | if [ $instruction = "done" ]; then 58 | echo "Dockerfile completed." 59 | read -p "Do you want to build the image? (y/n): " choice 60 | case $choice in 61 | y) 62 | build_image 63 | ;; 64 | n) 65 | echo "You can build the image later with 'docker build -t -f Dockerfile .'" 66 | exit 0 67 | ;; 68 | *) 69 | echo "Invalid choice. Please enter y or n." 70 | exit 1 71 | ;; 72 | esac 73 | else 74 | check_input $instruction 75 | if [ $? -eq 0 ]; then 76 | read -p "Enter the arguments for $instruction: " arguments 77 | echo "$instruction $arguments" >> Dockerfile 78 | else 79 | echo "Invalid instruction. Please enter one of FROM, COPY, RUN, or CMD." 80 | continue 81 | fi 82 | fi 83 | done 84 | -------------------------------------------------------------------------------- /docker/docker-remove-unused-images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker images --quiet --filter=dangling=true | xargs --no-run-if-empty docker rmi 4 | -------------------------------------------------------------------------------- /docker/docker-remove-unused-volumes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker volume ls -qf dangling=true | xargs --no-run-if-empty docker volume rm 4 | -------------------------------------------------------------------------------- /docker/dockerfiles/kubectl/01/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | ARG KUBECTL_VERSION=1.15.6 3 | ARG HELM_VERSION=3.0.2 4 | ARG HELM_DIFF_VERSION=v3.0.0-rc.7 5 | ARG HELMFILE_VERSION=0.98.1 6 | ARG EKSCTL_VERSION=0.11.1 7 | ENV NODE_VERSION=12.6.0 8 | ENV HELM_FILE_NAME helm-v${HELM_VERSION}-linux-amd64.tar.gz 9 | WORKDIR / 10 | RUN apt-get update -qq && \ 11 | apt-get install -qqy --no-install-recommends \ 12 | apt-transport-https \ 13 | build-essential \ 14 | curl \ 15 | ca-certificates \ 16 | coreutils \ 17 | git \ 18 | gnupg \ 19 | gettext \ 20 | lsb-release \ 21 | python3 \ 22 | python3-pip \ 23 | python3-setuptools \ 24 | rlwrap \ 25 | vim \ 26 | nano \ 27 | groff \ 28 | jq 29 | RUN curl -sL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor | tee /etc/apt/trusted.gpg.d/microsoft.asc.gpg > /dev/null 30 | RUN AZ_REPO=$(lsb_release -cs) && echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $AZ_REPO main" | tee /etc/apt/sources.list.d/azure-cli.list 31 | RUN apt-get update -qq && apt-get install -qqy azure-cli \ 32 | && rm -rf /var/lib/apt/lists/* 33 | ADD https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl /usr/local/bin/kubectl 34 | RUN chmod +x /usr/local/bin/kubectl 35 | ADD https://get.helm.sh/${HELM_FILE_NAME} /tmp 36 | RUN tar -zxvf /tmp/${HELM_FILE_NAME} -C /tmp \ 37 | && mv /tmp/linux-amd64/helm /bin/helm \ 38 | && rm -rf /tmp/* 39 | RUN helm plugin install https://github.com/databus23/helm-diff --version ${HELM_DIFF_VERSION} 40 | ADD https://github.com/roboll/helmfile/releases/download/v${HELMFILE_VERSION}/helmfile_linux_amd64 /usr/local/bin/helmfile 41 | RUN chmod 0755 /usr/local/bin/helmfile 42 | ADD https://github.com/weaveworks/eksctl/releases/download/${EKSCTL_VERSION}/eksctl_Linux_amd64.tar.gz /tmp 43 | RUN tar -zxvf /tmp/eksctl_Linux_amd64.tar.gz -C /tmp \ 44 | && mv /tmp/eksctl /bin/eksctl \ 45 | && rm -rf /tmp/* 46 | RUN pip3 install --upgrade --no-cache-dir awscli 47 | ENV PATH $PATH:/root/google-cloud-sdk/bin 48 | ADD https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator /usr/local/bin/aws-iam-authenticator 49 | RUN chmod +x /usr/local/bin/aws-iam-authenticator 50 | RUN curl -Lo yaml2json https://github.com/wakeful/yaml2json/releases/latest/download/yaml2json-linux-amd64 && chmod +x yaml2json && mv yaml2json /usr/local/bin/ 51 | CMD "/bin/bash" 52 | -------------------------------------------------------------------------------- /docker/dockerfiles/kubectl/02/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | ARG KUBECTL_VERSION=1.15.6 3 | ARG HELM_VERSION=3.0.2 4 | ARG HELM_DIFF_VERSION=v3.0.0-rc.7 5 | ARG HELMFILE_VERSION=0.98.1 6 | ARG EKSCTL_VERSION=0.11.1 7 | ENV NODE_VERSION=12.6.0 8 | ENV HELM_FILE_NAME helm-v${HELM_VERSION}-linux-amd64.tar.gz 9 | WORKDIR / 10 | RUN apt-get update -qq && \ 11 | apt-get install -qqy --no-install-recommends \ 12 | apt-transport-https \ 13 | build-essential \ 14 | curl \ 15 | ca-certificates \ 16 | coreutils \ 17 | git \ 18 | gnupg \ 19 | gettext \ 20 | lsb-release \ 21 | python3 \ 22 | python3-pip \ 23 | python3-setuptools \ 24 | rlwrap \ 25 | vim \ 26 | nano \ 27 | groff \ 28 | jq && \ 29 | curl -sL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor | tee /etc/apt/trusted.gpg.d/microsoft.asc.gpg > /dev/null && \ 30 | AZ_REPO=$(lsb_release -cs) && echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $AZ_REPO main" | tee /etc/apt/sources.list.d/azure-cli.list && \ 31 | apt-get update -qq && apt-get install -qqy azure-cli && rm -rf /var/lib/apt/lists/* && \ 32 | curl -Lo /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && chmod +x /usr/local/bin/kubectl && \ 33 | curl -Lo /tmp/${HELM_FILE_NAME} https://get.helm.sh/${HELM_FILE_NAME} && tar -zxvf /tmp/${HELM_FILE_NAME} -C /tmp && mv /tmp/linux-amd64/helm /bin/helm && rm -rf /tmp/* && \ 34 | helm plugin install https://github.com/databus23/helm-diff --version ${HELM_DIFF_VERSION} && \ 35 | curl -Lo /usr/local/bin/helmfile https://github.com/roboll/helmfile/releases/download/v${HELMFILE_VERSION}/helmfile_linux_amd64 && chmod +x /usr/local/bin/helmfile && \ 36 | curl -Lo /tmp/eksctl_Linux_amd64.tar.gz https://github.com/weaveworks/eksctl/releases/download/${EKSCTL_VERSION}/eksctl_Linux_amd64.tar.gz && tar -zxvf /tmp/eksctl_Linux_amd64.tar.gz -C /tmp && mv /tmp/eksctl /bin/eksctl && rm -rf /tmp/* && \ 37 | pip3 install --upgrade --no-cache-dir awscli && \ 38 | ENV PATH $PATH:/root/google-cloud-sdk/bin && \ 39 | curl -Lo /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator && chmod +x /usr/local/bin/aws-iam-authenticator && \ 40 | curl -Lo /usr/local/bin/yaml2json https://github.com/wakeful/yaml2json/releases/latest/download/yaml2json-linux-amd64 && chmod +x yaml2json 41 | CMD "/bin/bash" 42 | -------------------------------------------------------------------------------- /docker/dockerfiles/nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | curl \ 5 | iproute2 \ 6 | iptables \ 7 | iputils-ping \ 8 | net-tools \ 9 | procps 10 | 11 | RUN groupadd -r nginx && useradd --no-log-init -r -g nginx nginx 12 | 13 | USER nginx 14 | 15 | COPY --chown=nginx:nginx . /opt/nginx-ingress-controller 16 | RUN chmod -R 755 /opt/nginx-ingress-controller 17 | 18 | ENV KUBERNETES_SERVICE_HOST=kubernetes.default.svc.cluster.local \ 19 | KUBERNETES_SERVICE_PORT=443 \ 20 | POD_NAME=nginx-ingress-controller \ 21 | POD_NAMESPACE=default 22 | 23 | EXPOSE 80 443 24 | 25 | WORKDIR /opt/nginx-ingress-controller 26 | 27 | CMD ["/opt/nginx-ingress-controller/nginx-ingress"] 28 | -------------------------------------------------------------------------------- /docker/dockerfiles/traefik/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | curl \ 5 | iproute2 \ 6 | iptables \ 7 | iputils-ping \ 8 | net-tools \ 9 | procps 10 | 11 | RUN groupadd -r traefik && useradd --no-log-init -r -g traefik traefik 12 | 13 | USER traefik 14 | 15 | COPY --chown=traefik:traefik . /opt/traefik-ingress-controller 16 | RUN chmod -R 755 /opt/traefik-ingress-controller 17 | 18 | ENV KUBERNETES_SERVICE_HOST=kubernetes.default.svc.cluster.local \ 19 | KUBERNETES_SERVICE_PORT=443 \ 20 | POD_NAME=traefik-ingress-controller \ 21 | POD_NAMESPACE=default 22 | 23 | EXPOSE 80 443 24 | 25 | WORKDIR /opt/traefik-ingress-controller 26 | 27 | CMD ["/opt/traefik-ingress-controller/traefik"] 28 | -------------------------------------------------------------------------------- /docker/remove_dangling_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | # List dangling Docker images 6 | list_dangling_images() { 7 | docker images --quiet --filter=dangling=true 8 | } 9 | 10 | # Remove dangling images 11 | remove_dangling_images() { 12 | local image_ids="$1" 13 | if [ -n "$image_ids" ]; then 14 | echo "Removing dangling images..." 15 | echo "$image_ids" | xargs --no-run-if-empty docker rmi 16 | else 17 | echo "No dangling images found." 18 | fi 19 | } 20 | -------------------------------------------------------------------------------- /scripts/backup_restore_etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Perform etcd backup 4 | perform_backup() { 5 | echo "Performing etcd backup..." 6 | read -p "Enter etcd endpoints (e.g., https://10.0.1.101:2379): " endpoints 7 | read -p "Enter path to etcd CA certificate (e.g., /home/k8s_user/etcd-certs/etcd-ca.pem): " cacert 8 | read -p "Enter path to etcd server certificate (e.g., /home/k8s_user/etcd-certs/etcd.crt): " cert 9 | read -p "Enter path to etcd server key (e.g., /home/k8s_user/etcd-certs/etcd.key): " key 10 | read -p "Enter backup file path (e.g., /home/cloud_user/etcd_backup.db): " backup_file 11 | 12 | ETCDCTL_API=3 etcdctl snapshot save "$backup_file" \ 13 | --endpoints="$endpoints" \ 14 | --cacert="$cacert" \ 15 | --cert="$cert" \ 16 | --key="$key" 17 | 18 | echo "Backup completed successfully." 19 | } 20 | 21 | # Perform etcd restore 22 | perform_restore() { 23 | echo "Performing etcd restore..." 24 | read -p "Enter initial cluster configuration (e.g., etcd-restore=https://10.0.1.101:2380): " initial_cluster 25 | read -p "Enter initial advertise peer URLs (e.g., https://10.0.1.101:2380): " advertise_urls 26 | read -p "Enter the name for the restored cluster (e.g., etcd-restore): " cluster_name 27 | read -p "Enter backup file path (e.g., /home/cloud_user/etcd_backup.db): " backup_file 28 | read -p "Enter data directory path (e.g., /var/lib/etcd): " data_dir 29 | 30 | sudo ETCDCTL_API=3 etcdctl snapshot restore "$backup_file" \ 31 | --initial-cluster "$initial_cluster" \ 32 | --initial-advertise-peer-urls "$advertise_urls" \ 33 | --name "$cluster_name" \ 34 | --data-dir "$data_dir" 35 | 36 | sudo chown -R etcd:etcd "$data_dir" 37 | sudo systemctl start etcd 38 | 39 | echo "Restore completed successfully." 40 | } 41 | 42 | # Prompt to preform a action 43 | echo "Choose an action:" 44 | echo "1. Backup" 45 | echo "2. Restore" 46 | read -p "Enter your choice (1 or 2): " choice 47 | 48 | case "$choice" in 49 | 1) 50 | perform_backup 51 | ;; 52 | 2) 53 | perform_restore 54 | ;; 55 | *) 56 | echo "Invalid choice. Exiting." 57 | exit 1 58 | ;; 59 | esac 60 | -------------------------------------------------------------------------------- /scripts/check_ingress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to test ingress 3 | set -euf -o pipefail 4 | 5 | # Retrieve ingress hostnames 6 | get_ingress_hostnames() { 7 | kubectl get ing --all-namespaces | tail -n +2 | awk '{print $3}' | cut -d',' -f1 8 | } 9 | 10 | # Check ingress endpoints 11 | check_ingress_endpoints() { 12 | local hostname="$1" 13 | echo "Checking $hostname.." 14 | curl -sLI -w "HTTP Response: %{http_code}\n" "https://$hostname" -o /dev/null 15 | echo "" 16 | } 17 | 18 | # Process ingress endpoints 19 | process_ingress_endpoints() { 20 | local ingress_list=($(get_ingress_hostnames)) 21 | 22 | for ingress in "${ingress_list[@]}"; do 23 | check_ingress_endpoints "$ingress" 24 | done 25 | } 26 | -------------------------------------------------------------------------------- /scripts/check_ingress_02.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # List namespaces 4 | list_namespaces() { 5 | kubectl get namespaces -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' 6 | } 7 | 8 | # Test ingress with curl 9 | test_ingress() { 10 | local namespace="$1" 11 | local ingress_host 12 | 13 | # Get ingress details for the specified namespace 14 | ingress_host=$(kubectl get ingress -n "$namespace" -o jsonpath='{.spec.rules[0].host}') 15 | 16 | if [[ -n "$ingress_host" ]]; then 17 | echo "Testing ingress for namespace $namespace (Host: $ingress_host)" 18 | 19 | # Check if ingress uses HTTPS 20 | if kubectl get ingress -n "$namespace" -o jsonpath='{.spec.tls[0].hosts[0]}' >/dev/null 2>&1; then 21 | curl -kL "https://$ingress_host" 22 | fi 23 | else 24 | echo "No ingress found for namespace $namespace." 25 | fi 26 | } 27 | 28 | # Main menu 29 | echo "Choose an option:" 30 | echo "1. Test ingress on all namespaces" 31 | echo "2. Test ingress on a specific namespace" 32 | read -p "Enter your choice (1 or 2): " user_choice 33 | 34 | case "$user_choice" in 35 | 1) 36 | echo "Testing ingress on all namespaces:" 37 | for ns in $(list_namespaces); do 38 | test_ingress "$ns" 39 | done 40 | ;; 41 | 2) 42 | echo "Available namespaces:" 43 | list_namespaces 44 | read -p "Enter the namespace to test ingress: " chosen_namespace 45 | test_ingress "$chosen_namespace" 46 | ;; 47 | *) 48 | echo "Invalid choice. Exiting." 49 | ;; 50 | esac 51 | -------------------------------------------------------------------------------- /scripts/cilium_connectivity_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | # Test all connectivities 5 | test_all_connectivity() { 6 | echo "Testing all connectivities..." 7 | cilium connectivity test 8 | echo "** Results: **" 9 | results=$(cilium connectivity test 2>&1) 10 | echo "$results" 11 | if [[ $? -eq 0 ]]; then 12 | echo "All connectivities passed!" 13 | else 14 | echo "Failed to test all connectivities!" 15 | exit 1 16 | fi 17 | } 18 | 19 | # Test specific connectivity 20 | test_specific_connectivity() { 21 | echo "Enter the name of the specific connectivity test: " 22 | read -r test_name 23 | if [[ -z "$test_name" ]]; then 24 | echo "Error: Please enter a valid test name." 25 | exit 1 26 | fi 27 | echo "Testing connectivity for '$test_name'..." 28 | cilium connectivity test --test "$test_name" 29 | echo "** Results: **" 30 | results=$(cilium connectivity test --test "$test_name" 2>&1) 31 | echo "$results" 32 | if [[ $? -eq 0 ]]; then 33 | echo "Connectivity test for '$test_name' passed!" 34 | else 35 | echo "Failed to test connectivity for '$test_name'!" 36 | exit 1 37 | fi 38 | } 39 | 40 | # Menu options 41 | echo "Select an option:" 42 | echo " 1) Test all connectivities" 43 | echo " 2) Test specific connectivity" 44 | read -r choice 45 | 46 | # Process user choice 47 | if [[ $choice -eq 1 ]]; then 48 | test_all_connectivity 49 | elif [[ $choice -eq 2 ]]; then 50 | test_specific_connectivity 51 | else 52 | echo "Invalid choice. Please enter 1 or 2." 53 | exit 1 54 | fi 55 | -------------------------------------------------------------------------------- /scripts/cilium_enable_ingress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CILIUM_VERSION="1.17.4" 4 | NAMESPACE="kube-system" 5 | 6 | upgrade_cilium() { 7 | echo "Upgrading Cilium to version $CILIUM_VERSION in namespace $NAMESPACE..." 8 | helm upgrade cilium cilium/cilium \ 9 | --version "$CILIUM_VERSION" \ 10 | --namespace "$NAMESPACE" \ 11 | --reuse-values \ 12 | --set ingressController.enabled=true \ 13 | --set ingressController.loadbalancerMode=dedicated 14 | } 15 | 16 | restart_cilium_operator() { 17 | echo "Restarting Cilium operator in namespace $NAMESPACE..." 18 | kubectl -n "$NAMESPACE" rollout restart deployment/cilium-operator 19 | } 20 | 21 | restart_cilium_ds() { 22 | echo "Restarting Cilium DaemonSet in namespace $NAMESPACE..." 23 | kubectl -n "$NAMESPACE" rollout restart ds/cilium 24 | } 25 | 26 | main() { 27 | upgrade_cilium 28 | restart_cilium_operator 29 | restart_cilium_ds 30 | } 31 | 32 | main 33 | -------------------------------------------------------------------------------- /scripts/cleanup_empty_ns.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | # List empty namespaces 5 | list_empty_namespaces() { 6 | kubectl get ns --no-headers -o custom-columns=":metadata.name" | while read -r namespace; do 7 | if kubectl get all -n "$namespace" 2>&1 | grep -q "No"; then 8 | echo "Empty namespace: $namespace" 9 | fi 10 | done 11 | } 12 | 13 | # Delete empty namespaces 14 | delete_empty_namespaces() { 15 | echo "Choose an option:" 16 | echo "1. Delete all empty namespaces" 17 | echo "2. Delete specific empty namespaces" 18 | read -p "Enter your choice: " choice 19 | 20 | case "$choice" in 21 | 1) 22 | kubectl get ns --no-headers -o custom-columns=":metadata.name" | while read -r namespace; do 23 | if kubectl get all -n "$namespace" 2>&1 | grep -q "No"; then 24 | kubectl delete namespace "$namespace" 25 | fi 26 | done 27 | ;; 28 | 2) 29 | read -p "Enter namespaces to delete (comma-separated): " namespaces 30 | IFS=',' read -ra ns_array <<< "$namespaces" 31 | for ns in "${ns_array[@]}"; do 32 | kubectl delete namespace "$ns" 33 | done 34 | ;; 35 | *) 36 | echo "Invalid choice. Exiting." 37 | ;; 38 | esac 39 | } 40 | 41 | # Main menu 42 | echo "Options Menu:" 43 | echo "1. List empty namespaces" 44 | echo "2. Delete empty namespaces" 45 | read -p "Enter your choice: " main_choice 46 | 47 | case "$main_choice" in 48 | 1) 49 | list_empty_namespaces 50 | ;; 51 | 2) 52 | delete_empty_namespaces 53 | ;; 54 | *) 55 | echo "Invalid choice. Exiting." 56 | ;; 57 | esac 58 | -------------------------------------------------------------------------------- /scripts/cleanup_unused_secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | # List all namespaces 5 | list_namespaces() { 6 | kubectl get namespaces -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' 7 | } 8 | 9 | # Get unused secrets 10 | get_unused_secrets() { 11 | local namespace=$1 12 | 13 | envSecrets=$(kubectl get pods -n $namespace -o jsonpath='{.items[*].spec.containers[*].env[*].valueFrom.secretKeyRef.name}' | xargs -n1) 14 | envSecrets2=$(kubectl get pods -n $namespace -o jsonpath='{.items[*].spec.containers[*].envFrom[*].secretRef.name}' | xargs -n1) 15 | volumeSecrets=$(kubectl get pods -n $namespace -o jsonpath='{.items[*].spec.volumes[*].secret.secretName}' | xargs -n1) 16 | pullSecrets=$(kubectl get pods -n $namespace -o jsonpath='{.items[*].spec.imagePullSecrets[*].name}' | xargs -n1) 17 | tlsSecrets=$(kubectl get ingress -n $namespace -o jsonpath='{.items[*].spec.tls[*].secretName}' | xargs -n1) 18 | SASecrets=$(kubectl get secrets -n $namespace --field-selector=type=kubernetes.io/service-account-token -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | xargs -n1) 19 | 20 | usedSecrets=$(echo "$envSecrets\n$envSecrets2\n$volumeSecrets\n$pullSecrets\n$tlsSecrets\n$SASecrets" | sort | uniq) 21 | allSecrets=$(kubectl get secrets -n $namespace -o jsonpath='{.items[*].metadata.name}' | xargs -n1 | sort | uniq) 22 | 23 | unusedSecrets=$(diff <(echo "$usedSecrets") <(echo "$allSecrets") | grep '>' | awk '{print $2}') 24 | echo "$unusedSecrets" 25 | } 26 | 27 | # Delete unused secrets 28 | delete_unused_secrets() { 29 | local namespace=$1 30 | unusedSecrets=$(get_unused_secrets $namespace) 31 | if [ -z "$unusedSecrets" ]; then 32 | echo "No unused secrets found in namespace $namespace." 33 | return 34 | fi 35 | 36 | echo "Unused secrets in namespace $namespace:" 37 | echo "$unusedSecrets" 38 | read -p "Do you want to delete these secrets? (y/n): " confirm 39 | if [ "$confirm" == "y" ]; then 40 | for secret in $unusedSecrets; do 41 | kubectl delete secret $secret -n $namespace 42 | done 43 | echo "Unused secrets deleted." 44 | else 45 | echo "Deletion aborted." 46 | fi 47 | } 48 | 49 | # Menu 50 | echo "Select an option:" 51 | echo "1) Clean up unused secrets in a specific namespace" 52 | echo "2) Clean up unused secrets in all namespaces" 53 | echo "3) Exit" 54 | read -p "Enter your choice: " choice 55 | 56 | case $choice in 57 | 1) 58 | echo "Available namespaces:" 59 | list_namespaces 60 | read -p "Enter the namespace: " namespace 61 | delete_unused_secrets $namespace 62 | ;; 63 | 2) 64 | for namespace in $(list_namespaces); do 65 | delete_unused_secrets $namespace 66 | done 67 | ;; 68 | 3) 69 | exit 0 70 | ;; 71 | *) 72 | echo "Invalid choice, exiting." 73 | ;; 74 | esac 75 | -------------------------------------------------------------------------------- /scripts/cluster_health.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script to check the health status of the cluster and report the objects and resources 4 | 5 | # Colors 6 | RED='\033[0;31m' 7 | GREEN='\033[0;32m' 8 | YELLOW='\033[0;33m' 9 | BLUE='\033[0;36m' 10 | PLAIN='\033[0m' 11 | bold=$(tput bold) 12 | normal=$(tput sgr0) 13 | 14 | deploy="$2" 15 | namespace="$1" 16 | 17 | cluster_objects() { 18 | echo -e "\e[44mCollecting Information from the Cluster:\e[21m" 19 | deployments=$(kubectl get deployment --all-namespaces | grep -v NAMESPACE | wc -l) 20 | pods=$(kubectl get po --all-namespaces | grep -v NAMESPACE | wc -l) 21 | services=$(kubectl get svc --all-namespaces | grep -v NAMESPACE | wc -l) 22 | ingresses=$(kubectl get ing --all-namespaces | grep -v NAMESPACE | wc -l) 23 | statefulset=$(kubectl get statefulset --all-namespaces | grep -v NAMESPACE | wc -l) 24 | postgresql=$(kubectl get postgresql --all-namespaces | grep -v NAMESPACE | wc -l) 25 | daemonset=$(kubectl get daemonset --all-namespaces | grep -v NAMESPACE | wc -l) 26 | replicaset=$(kubectl get rs --all-namespaces | grep -v NAMESPACE | wc -l) 27 | serviceaccount=$(kubectl get sa --all-namespaces | grep -v NAMESPACE | wc -l) 28 | storageclass=$(kubectl get sc --all-namespaces | grep -v NAMESPACE | wc -l) 29 | PodDistrubtion=$(kubectl get pdb --all-namespaces | grep -v NAMESPACE | wc -l) 30 | CustomResources=$(kubectl get crd --all-namespaces | grep -v NAMESPACE | wc -l) 31 | cronjobs=$(kubectl get cronjobs --all-namespaces | grep -v NAMESPACE | wc -l) 32 | persistancevolumes=$(kubectl get pv --all-namespaces | grep -v NAMESPACE | wc -l) 33 | volumeclaims=$(kubectl get pvc --all-namespaces | grep -v NAMESPACE | wc -l) 34 | hpa=$(kubectl get hpa --all-namespaces | grep -v NAMESPACE | wc -l) 35 | echo -e "\e[1m\e[39mCluster Resources:\e[21m" 36 | echo -e "${BLUE}"Deployments" :${GREEN}$deployments" 37 | echo -e "${BLUE}"Services" :${GREEN}$services" 38 | echo -e "${BLUE}"Ingresses" :${GREEN}$ingresses" 39 | echo -e "${BLUE}"StatefulSets" :${GREEN}$statefulset" 40 | echo -e "${BLUE}"Pods" :${GREEN}$pods" 41 | echo -e "${BLUE}"DaemonSets" :${GREEN}$daemonset" 42 | echo -e "${BLUE}"ReplicaSets" :${GREEN}$replicaset" 43 | echo -e "${BLUE}"StorageClasses" :${GREEN}$storageclass" 44 | echo -e "${BLUE}"CronJobs" :${GREEN}$cronjobs" 45 | echo -e "${BLUE}"PostgreSQL" :${GREEN}$postgresql" 46 | echo -e "${BLUE}"CustomResources" :${GREEN}$CustomResources" 47 | echo -e "${BLUE}"HorizontalPodAutoscaler" :${GREEN}$hpa" 48 | echo -e "${BLUE}"PersistanceVolumes" :${GREEN}$persistancevolumes" 49 | echo -e "${BLUE}"VolumeClaims" :${GREEN}$volumeclaims" 50 | 51 | } 52 | 53 | cluster_nodes() { 54 | nodes=$(kubectl get nodes | grep -v NAME | wc -l) 55 | worker=$(kubectl get nodes | grep -v NAME | grep worker | wc -l) 56 | master=$(kubectl get nodes | grep -v NAME | grep master | wc -l) 57 | node_status=$(for i in $(kubectl get node | grep -v NAME | awk {'print $2'} | sort -u); do echo "$i";done) 58 | echo -e "\e[1m\e[39mCluster Node Status:\e[21m" 59 | echo -e "${BLUE}"ALL Nodes" :${GREEN}$nodes" 60 | echo -e "${BLUE}"Worker Nodes" :${GREEN}$worker" 61 | echo -e "${BLUE}"Master Nodes" :${GREEN}$master" 62 | echo -e "${BLUE}"Nodes Status" :${GREEN}$node_status" 63 | echo -e "\e[1m\e[39mNodes Conditions:\e[21m" 64 | echo -e "${BLUE}$(kubectl describe node | grep kubelet | awk {'print $15'} | sort -u)" 65 | echo -e "\e[1m\e[39mPods Per Node:\e[21m" 66 | for node in $(kubectl get node | grep -v NAME | awk {'print $1'}) 67 | do pod_per_node=$(kubectl get pods --all-namespaces --field-selector spec.nodeName=$node -o wide | wc -l) 68 | echo -e "${BLUE}"$node" \t :${GREEN}$pod_per_node" 69 | done 70 | # Nodes Per AZ 71 | a=$(kubectl get node -l failure-domain.beta.kubernetes.io/zone=eu-central-1a | grep -v NAME | grep -v master | wc -l) 72 | b=$(kubectl get node -l failure-domain.beta.kubernetes.io/zone=eu-central-1b | grep -v NAME | grep -v master | wc -l) 73 | c=$(kubectl get node -l failure-domain.beta.kubernetes.io/zone=eu-central-1c | grep -v NAME | grep -v master | wc -l) 74 | echo -e "\e[1m\e[39mWorker Nodes per AZ:\e[21m" 75 | echo -e "${BLUE}"eu-central-1a" \t :${GREEN}$a" 76 | echo -e "${BLUE}"eu-central-1b" \t :${GREEN}$b" 77 | echo -e "${BLUE}"eu-central-1c" \t :${GREEN}$c" 78 | #Node Types 79 | types=$(kubectl describe node | grep beta.kubernetes.io/instance-type | cut -d"=" -f2 | sort | uniq -c | awk -F$'\t' {'print $2 $1'}) 80 | echo -e "\e[1m\e[39mCluster Node Types:\e[21m" 81 | echo -e "\e[34m$types" 82 | } 83 | 84 | pod_with_issues() { 85 | echo -e "\e[1m\e[39mPods not in Running or Completed State:\e[21m" 86 | kubectl get pods --all-namespaces --field-selector=status.phase!=Running | grep -v Completed 87 | } 88 | 89 | top_mem_pods() { 90 | echo -e "\e[1m\e[39mTop Pods According to Memory Limits:\e[21m" 91 | for node in $(kubectl get node | awk {'print $1'} | grep -v NAME) 92 | do kubectl describe node $node | sed -n "/Non-terminated Pods/,/Allocated resources/p"| grep -P -v "terminated|Allocated|Namespace" 93 | done | grep '[0-9]G' | awk -v OFS=' \t' '{if ($9 >= '2Gi') print "\033[0;36m"$2," ", "\033[0;31m"$9}' | sort -k2 -r | column -t 94 | 95 | } 96 | top_cpu_pods() { 97 | echo -e "\e[1m\e[39mTop Pods According to CPU Limits:\e[21m" 98 | for node in $(kubectl get node | awk {'print $1'} | grep -v NAME) 99 | do kubectl describe node $node | sed -n "/Non-terminated Pods/,/Allocated resources/p" | grep -P -v "terminated|Allocated|Namespace" 100 | done | awk -v OFS=' \t' '{if ($5 ~/^[2-9]+$/) print "\033[0;36m"$2, "\033[0;31m"$5}' | sort -k2 -r | column -t 101 | } 102 | 103 | clear 104 | cluster_objects 105 | cluster_nodes 106 | pod_with_issues 107 | top_mem_pods 108 | top_cpu_pods 109 | -------------------------------------------------------------------------------- /scripts/containers_images_list.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Argument 1: Task -t 3 | # Argument 2: Task number (1, 2, 3, 4) 4 | # Example 1: ./kubectl_list_containers_images.sh -t 1 5 | # Example 2: ./kubectl_list_containers_images.sh to list availble tasks 6 | # Reference: https://kubernetes.io/docs/tasks/access-application-cluster/list-all-running-container-images/ 7 | set -euf -o pipefail 8 | 9 | usage() { 10 | echo "Usage: $0 [-t task]" 11 | echo "Available tasks are:" 12 | echo "1 - List all Container images" 13 | echo "2 - List Container images by Pod" 14 | echo "3 - List Container images filtering by Pod label" 15 | echo "4 - List Container images filtering by Pod namespace" 16 | exit 1 17 | } 18 | 19 | while getopts ":t:" opt; do 20 | case $opt in 21 | t) 22 | task=$OPTARG 23 | ;; 24 | \?) 25 | echo "Invalid option: -$OPTARG" >&2 26 | usage 27 | ;; 28 | :) 29 | echo "Option -$OPTARG requires an argument." >&2 30 | usage 31 | ;; 32 | esac 33 | done 34 | 35 | shift $((OPTIND-1)) 36 | 37 | if [ -z "$task" ]; then 38 | echo "No task specified. Please choose one of the available tasks." 39 | usage 40 | else 41 | PS3="Please enter your choice: " 42 | select namespace in "All namespaces" $(kubectl get ns | awk 'NR>1 {print $1}') 43 | do 44 | case $namespace in 45 | "") 46 | echo "Invalid choice. Please try again." 47 | ;; 48 | "All namespaces") 49 | echo "You chose all namespaces." 50 | namespace="" 51 | break 52 | ;; 53 | *) 54 | echo "You chose $namespace." 55 | break 56 | ;; 57 | esac 58 | done 59 | 60 | case $task in 61 | 1) 62 | if [ -z "$namespace" ]; then # If namespace is empty 63 | echo "Listing all container images." 64 | kubectl get pods -o jsonpath="{.items[*].spec.containers[*].image}" |\ 65 | tr -s '[[:space:]]' '\n' |\ 66 | sort |\ 67 | uniq -c 68 | else 69 | echo "Listing all container images in namespace $namespace." 70 | kubectl get pods -n $namespace -o jsonpath="{.items[*].spec.containers[*].image}" |\ 71 | tr -s '[[:space:]]' '\n' |\ 72 | sort |\ 73 | uniq -c 74 | fi 75 | break 76 | ;; 77 | 2) 78 | if [ -z "$namespace" ]; then 79 | echo "Listing container images by pod." 80 | kubectl get pods -o jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' |\ 81 | sort 82 | else # 83 | echo "Listing container images by pod in namespace $namespace." 84 | kubectl get pods -n $namespace -o jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' |\ 85 | sort 86 | fi 87 | break 88 | ;; 89 | 3) 90 | if [ -z "$namespace" ]; then 91 | echo "No namespace specified. Please choose a namespace first." 92 | break 93 | else 94 | PS3="Please enter your choice: " 95 | select label in $(kubectl get pods -n $namespace --show-labels | awk 'NR>1 {print $NF}' | awk -F, '{for (i=1;i<=NF;i++) print $i}' | sort | uniq) 96 | do 97 | case $label in 98 | "") 99 | echo "Invalid choice. Please try again." 100 | ;; 101 | *) 102 | echo "You chose $label." 103 | label=$(echo $label | cut -d'=' -f1) # Get the name of the label 104 | break 105 | ;; 106 | esac 107 | done 108 | echo "Listing container images filtering by pod label $label in namespace $namespace." 109 | kubectl get pods -n $namespace -o jsonpath="{.items[*].spec.containers[*].image}" -l $label 110 | fi 111 | break 112 | ;; 113 | 4) 114 | if [ -z "$namespace" ]; then # If namespace is empty 115 | echo "No namespace specified. Listing container images filtering by pod namespace." 116 | kubectl get pods -o jsonpath="{.items[*].spec.containers[*].image}" 117 | else 118 | echo "Listing container images filtering by pod namespace in namespace $namespace." 119 | kubectl get pods -n $namespace -o jsonpath="{.items[*].spec.containers[*].image}" 120 | fi 121 | break 122 | ;; 123 | *) 124 | echo "Invalid task: $task. Please choose one of the available tasks." 125 | usage 126 | ;; 127 | esac 128 | fi 129 | -------------------------------------------------------------------------------- /scripts/create_aks_cilium.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Create a resource group 4 | create_resource_group() { 5 | echo "Creating resource group $resourceGroupName..." 6 | az group create --name "$resourceGroupName" --location "$location" 7 | } 8 | 9 | # Create a virtual network 10 | create_virtual_network() { 11 | echo "Creating virtual network $vnetName..." 12 | az network vnet create -g "$resourceGroupName" --location "$location" \ 13 | --name "$vnetName" --address-prefixes "$vnetAddressPrefix" -o none 14 | az network vnet subnet create -g "$resourceGroupName" --vnet-name "$vnetName" \ 15 | --name nodesubnet --address-prefixes "$nodesubnetAddressPrefix" -o none 16 | az network vnet subnet create -g "$resourceGroupName" --vnet-name "$vnetName" \ 17 | --name podsubnet --address-prefixes "$podsubnetAddressPrefix" -o none 18 | } 19 | 20 | # Create an AKS cluster with Azure CNI Overlay networking 21 | create_aks_overlay() { 22 | echo "Creating AKS cluster with Azure CNI Overlay networking..." 23 | az aks create -n "$clusterName" -g "$resourceGroupName" -l "$location" \ 24 | --network-plugin azure --network-plugin-mode overlay \ 25 | --pod-cidr 192.168.0.0/16 --network-dataplane cilium 26 | } 27 | 28 | # Create an AKS cluster with Azure CNI using a virtual network 29 | create_aks_vnet() { 30 | echo "Creating AKS cluster with Azure CNI using a virtual network..." 31 | az aks create -n "$clusterName" -g "$resourceGroupName" -l "$location" \ 32 | --max-pods 250 --network-plugin azure \ 33 | --vnet-subnet-id "/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Network/virtualNetworks/$vnetName/subnets/nodesubnet" \ 34 | --pod-subnet-id "/subscriptions/$subscriptionId/resourceGroups/$resourceGroupName/providers/Microsoft.Network/virtualNetworks/$vnetName/subnets/podsubnet" \ 35 | --network-dataplane cilium 36 | } 37 | 38 | # Main script 39 | echo "Choose an option:" 40 | echo "1. Assign IP addresses from an overlay network" 41 | echo "2. Assign IP addresses from a virtual network" 42 | read -p "Enter your choice (1 or 2): " option 43 | 44 | case "$option" in 45 | 1) 46 | read -p "Enter AKS cluster name: " clusterName 47 | read -p "Enter resource group name: " resourceGroupName 48 | read -p "Enter location: " location 49 | if [[ -z "$clusterName" || -z "$resourceGroupName" || -z "$location" ]]; then 50 | echo "Error: All input fields are required." 51 | exit 1 52 | fi 53 | create_resource_group 54 | create_aks_overlay 55 | ;; 56 | 2) 57 | read -p "Enter AKS cluster name: " clusterName 58 | read -p "Enter resource group name: " resourceGroupName 59 | read -p "Enter location: " location 60 | read -p "Enter subscription ID: " subscriptionId 61 | read -p "Enter virtual network name: " vnetName 62 | read -p "Enter virtual network address prefix (e.g., 10.0.0.0/8): " vnetAddressPrefix 63 | read -p "Enter nodesubnet address prefix (e.g., 10.240.0.0/16): " nodesubnetAddressPrefix 64 | read -p "Enter podsubnet address prefix (e.g., 10.241.0.0/16): " podsubnetAddressPrefix 65 | if [[ -z "$clusterName" || -z "$resourceGroupName" || -z "$location" || -z "$subscriptionId" || -z "$vnetName" || -z "$vnetAddressPrefix" || -z "$nodesubnetAddressPrefix" || -z "$podsubnetAddressPrefix" ]]; then 66 | echo "Error: All input fields are required." 67 | exit 1 68 | fi 69 | create_resource_group 70 | create_virtual_network 71 | create_aks_vnet 72 | ;; 73 | *) 74 | echo "Invalid choice. Please select 1 or 2." 75 | ;; 76 | esac 77 | -------------------------------------------------------------------------------- /scripts/create_aks_cilium_byocni.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Add AKS extension 4 | add_aks_extension() { 5 | az extension add --name aks-preview 6 | az extension update --name aks-preview 7 | } 8 | 9 | # Register AKS feature 10 | register_aks_feature() { 11 | az feature register --namespace "Microsoft.ContainerService" --name "KubeProxyConfigurationPreview" 12 | az provider register --namespace Microsoft.ContainerService 13 | } 14 | 15 | # Create a resource group 16 | create_resource_group() { 17 | az group create --name "$resource_group" --location $location 18 | } 19 | 20 | # Create a virtual network 21 | create_virtual_network() { 22 | echo "Creating virtual network $vnet_name..." 23 | az network vnet create -g "$resource_group" --location "$location" \ 24 | --name "$vnet_name" --address-prefixes "$vnetAddressPrefix" --subnet-name "$subnet_name" -o none 25 | az network vnet subnet create -g "$resource_group" --vnet-name "$vnet_name" \ 26 | --name nodesubnet --address-prefixes "$nodesubnetAddressPrefix" -o none 27 | az network vnet subnet create -g "$resource_group" --vnet-name "$vnet_name" \ 28 | --name podsubnet --address-prefixes "$podsubnetAddressPrefix" -o none 29 | } 30 | 31 | # Create an AKS cluster 32 | create_aks_cluster() { 33 | az aks create --resource-group "$resource_group" --name "$cluster_name" --location "$location" --network-plugin none --vnet-subnet-id "/subscriptions/$subscriptionId/resourceGroups/$resource_group/providers/Microsoft.Network/virtualNetworks/$vnet_name/subnets/$subnet_name" 34 | } 35 | 36 | # Install Helm 37 | install_helm() { 38 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 39 | chmod +x get_helm.sh 40 | ./get_helm.sh 41 | } 42 | 43 | # Configure Helm 44 | configure_helm() { 45 | kubectl create serviceaccount tiller --namespace kube-system 46 | kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller 47 | helm init --service-account tiller 48 | } 49 | 50 | # Install Cilium 51 | install_cilium() { 52 | helm install cilium cilium/cilium --version 1.14.0 \ 53 | --namespace kube-system \ 54 | --set kubeProxyReplacement=true \ 55 | --set k8sServiceHost="$api_server_ip" \ 56 | --set k8sServicePort="$api_server_port" \ 57 | --set aksbyocni.enabled=true \ 58 | --set nodeinit.enabled=true \ 59 | --set hubble.enabled=true 60 | } 61 | 62 | # Main script 63 | add_aks_extension 64 | register_aks_feature 65 | 66 | read -p "Enter a unique resource group name: " resource_group 67 | read -p "Enter a unique AKS cluster name: " cluster_name 68 | read -p "Enter location: " location 69 | read -p "Enter subscription ID: " subscriptionId 70 | read -p "Enter a VNet name: " vnet_name 71 | read -p "Enter a subnet name: " subnet_name 72 | read -p "Enter vnet address prefix (e.g., 10.0.0.0/8): " vnetAddressPrefix 73 | read -p "Enter nodesubnet address prefix (e.g., 10.240.0.0/16): " nodesubnetAddressPrefix 74 | read -p "Enter podsubnet address prefix (e.g., 10.241.0.0/16): " podsubnetAddressPrefix 75 | 76 | create_resource_group "$resource_group" 77 | create_virtual_network "$resource_group" "$vnet_name" "$subnet_name" 78 | create_aks_cluster "$resource_group" "$cluster_name" "$vnet_name" "$subnet_name" 79 | install_helm 80 | configure_helm 81 | 82 | # Get API server IP and port 83 | api_server_ip=$(kubectl config view -o jsonpath='{"Cluster name\tServer\n"}{range .clusters[*]}{.name}{"\t"}{.cluster.server}{"\n"}{end}' | cut -d':' -f2 | cut -d'/' -f3) 84 | api_server_port=$(kubectl config view -o jsonpath='{"Cluster name\tServer\n"}{range .clusters[*]}{.name}{"\t"}{.cluster.server}{"\n"}{end}' | cut -d':' -f3) 85 | 86 | install_cilium "$api_server_ip" "$api_server_port" 87 | 88 | echo "AKS cluster with Cilium CNI has been set up successfully!" 89 | -------------------------------------------------------------------------------- /scripts/deployment-health.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Colors 4 | RED='\033[0;31m' 5 | GREEN='\033[0;32m' 6 | YELLOW='\033[0;33m' 7 | BLUE='\033[0;36m' 8 | PLAIN='\033[0m' 9 | bold=$(tput bold) 10 | normal=$(tput sgr0) 11 | 12 | deploy="$2" 13 | namespace="$1" 14 | 15 | if [ $# -ne 2 ]; then 16 | echo "usage: $0 " 17 | exit 1 18 | fi 19 | 20 | var=$(kubectl get deployment -n ${namespace} --output=json ${deploy} | \ 21 | jq -j '.spec.selector.matchLabels | to_entries | .[] | "\(.key)=\(.value),"') 22 | selector=${var%?} 23 | 24 | pod_status() { 25 | no_of_pods=$(kubectl get po -n $namespace -l $selector | grep -v NAME | wc -l) 26 | if [[ $no_of_pods -eq 0 ]] 27 | then 28 | echo "Deployment $deploy has 0 replicas" 29 | exit 0 30 | fi 31 | pods_status=$(for i in $(kubectl get po -n $namespace -l $selector | grep -v NAME | awk {'print $3'} | sort -u); do echo "$i";done) 32 | restart_count=$(kubectl get po -n $namespace -l $selector | grep -v NAME | awk {'print $4'} | grep -v RESTARTS | sort -ur | awk 'FNR <= 1') 33 | echo -e "${BLUE}"Number of Pods" :${GREEN}$no_of_pods" 34 | echo -e "${BLUE}"Pods Status" :${GREEN}$pods_status" 35 | echo -e "${BLUE}"MAX Pod Restart Count" :${GREEN}$restart_count" 36 | readiness() { 37 | r=$(kubectl get po -n $namespace | grep $deploy | grep -vE '1/1|2/2|3/3|4/4|5/5|6/6|7/7' &> /dev/null ) 38 | if [[ $? -ne 0 ]] 39 | then 40 | echo -e "${BLUE}"Readiness" :${GREEN}"ALL Pods are Ready"" 41 | else 42 | echo -e "${BLUE}"Readiness" :${RED}"You have some Pods not ready "" 43 | fi 44 | } 45 | readiness 46 | 47 | } 48 | pod_distribution() { 49 | echo -e "\e[1m\e[39mPod Distribution per Node\e[21m" 50 | for nodes in $(kubectl get po -n $namespace -l $selector -o wide | grep $deploy | awk {'print $7'} | sort -u) 51 | do 52 | echo -e "${BLUE}$nodes \t \t :${GREEN}$(kubectl describe node $nodes | grep $deploy | wc -l)" 53 | done 54 | echo -e "\e[1m\e[39mNode Distribution per Availability Zone\e[21m" 55 | node_dist=$(for node in $(kubectl get po -n $namespace -l $selector -o wide | grep $deploy | awk {'print $7'} | sort -u) 56 | do kubectl get node --show-labels $node 57 | done | awk {'print $6'} | grep -v LABELS) 58 | a=$(echo $node_dist | grep -o eu-central-1a | wc -l) 59 | b=$(echo $node_dist | grep -o eu-central-1b | wc -l) 60 | c=$(echo $node_dist | grep -o eu-central-1c | wc -l) 61 | echo -e "${BLUE}"eu-central-1a" \t \t :${GREEN}$a" 62 | echo -e "${BLUE}"eu-central-1b" \t \t :${GREEN}$b" 63 | echo -e "${BLUE}"eu-central-1c" \t \t :${GREEN}$c" 64 | 65 | } 66 | 67 | pod_utilization() { 68 | 69 | cpulimit=$(kubectl describe node | grep $(kubectl get po -n ${namespace} -l ${selector} | grep -v NAME | \ 70 | awk {'print $1'} | head -n1) | awk {'print $5'} | grep -Ev "^$" | sort -u | \ 71 | awk '{ if ($0 ~ /[0-9]*m/) print $0; else print $0*1000;}' | sed 's/[^0-9]*//g') 72 | 73 | memlimit=$(kubectl describe node | grep $(kubectl get po -n ${namespace} -l ${selector} | grep -v NAME | \ 74 | awk {'print $1'} | head -n1) | awk {'print $9'} | grep -Ev "^$" | sort -u | \ 75 | awk '{ if ($0 ~ /[0-9]*Gi/) print $0*1024; else if ($0 ~ /[0-9]*G/) print $0*1000; \ 76 | else if ($0 ~ /[0-9]*M/ || $0 ~ /[0-9]*Mi/) print $0 ; else print $0}' | sed 's/[^0-9]*//g') 77 | dcores=$(kubectl top pods -n $namespace | grep $deploy | awk {'print $2'} | sed 's/[^0-9]*//g' | awk '{n += $1}; END{print n}') 78 | dmem=$(kubectl top pods -n $namespace | grep $deploy | awk {'print $3'} | sed 's/[^0-9]*//g' | awk '{n += $1}; END{print n}') 79 | 80 | 81 | if [ $cpulimit -eq 0 ] 82 | then 83 | echo -e "\e[1m\e[33mWARN: Pods do not have CPU Limits\e[21m" 84 | else 85 | echo -e "\e[1m\e[39mAverage Utilization \e[21m" 86 | deploymentcpu=$(bc <<< "scale=2;$dcores/($cpulimit*$no_of_pods)*100") 87 | echo -e "${BLUE}"CPU Utilization" :${GREEN}$deploymentcpu%" 88 | if [ $memlimit -ne 0 ] 89 | then 90 | deploymentmem=$(bc <<< "scale=2;$dmem/($memlimit*$no_of_pods)*100") 91 | echo -e "${BLUE}"Memory Utilization" :${GREEN}$deploymentmem%" 92 | fi 93 | echo -e "\e[1m\e[39mTop Pods CPU Utilization\e[21m" 94 | kubectl top pods -n $namespace -l $selector | grep -v NAME| \ 95 | awk 'FNR <= 5' | awk {'print $1,$2'}| awk '$2=($2/'$cpulimit')*100"%"' | \ 96 | awk '{printf $1 " " "%0.2f\n",$2}' | sort -k2 -r | \ 97 | awk -v OFS='\t' '{if ($2 >= 80) print "\033[0;36m"$1," ", "\033[0;31m"":"$2"%"; else print "\033[0;36m"$1," ","\033[0;32m"":"$2"%";}' 98 | fi 99 | if [ $memlimit -eq 0 ] 100 | then 101 | echo -e "\e[1m\e[33mWARN: Pods do not have Memory Limits\e[21m" 102 | else 103 | echo -e "\e[1m\e[39mTop Pods Memory Utilization\e[21m" 104 | kubectl top pods -n $namespace -l $selector | grep -v NAME | \ 105 | awk 'FNR <= 5' | awk {'print $1,$3'} | awk '$2=($2/'$memlimit')*100"%"' | \ 106 | awk '{printf $1 " " "%0.2f\n",$2}' | sort -k2 -r | \ 107 | awk -v OFS=' \t' '{if ($2 >= 80) print "\033[0;36m"$1," ", "\033[0;31m"":"$2"%"; else print "\033[0;36m"$1," ","\033[0;32m"":"$2"%";}' 108 | fi 109 | } 110 | 111 | clear 112 | kubectl get deploy $deploy -n $namespace &> /dev/null 113 | status=$? 114 | if [ $status -ne 0 ]; then 115 | echo -e "Deployment $deploy not exist. \nPlease make sure you provide the correct deployment name and the correct namespace" 116 | exit $status 117 | fi 118 | echo -e "\e[1m\e[39mChecking Deployment $deploy...\e[21m" 119 | pod_status 120 | pod_utilization 121 | pod_distribution 122 | -------------------------------------------------------------------------------- /scripts/enable_ports_rke2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # TCP ports 4 | ALLOWED_PORTS=(6443 9100 8080 4245 9345 6443 6444 10250 10259 10257 2379 2380 9796 19090 9090 6942 9091 4244 4240 80 443 9963 9964 8081 8082 7000 9001 6379 9121 8084 6060 6061 6062 9879 9890 9891 9892 9893 9962 9966) 5 | for port in "${ALLOWED_PORTS[@]}"; do 6 | sudo firewall-cmd --add-port="$port/tcp" --permanent 7 | done 8 | 9 | # UDP ports (for GENEVE overlay) 10 | UDP_PORTS=(8472 4789 6081 51871 53 55355 58467 41637 39291 38519 46190) 11 | for port in "${UDP_PORTS[@]}"; do 12 | sudo firewall-cmd --add-port="$port/udp" --permanent 13 | done 14 | 15 | # Reload the firewall rules 16 | sudo firewall-cmd --reload 17 | -------------------------------------------------------------------------------- /scripts/enable_ports_rke2_func.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Add TCP ports 4 | add_tcp_ports() { 5 | local ports=("$@") 6 | for port in "${ports[@]}"; do 7 | sudo firewall-cmd --add-port="$port/tcp" --permanent 8 | done 9 | } 10 | 11 | # Add UDP ports 12 | add_udp_ports() { 13 | local ports=("$@") 14 | for port in "${ports[@]}"; do 15 | sudo firewall-cmd --add-port="$port/udp" --permanent 16 | done 17 | } 18 | 19 | # Reload the firewall rules 20 | reload_firewall() { 21 | sudo firewall-cmd --reload 22 | } 23 | 24 | # Define allowed ports 25 | ALLOWED_TCP_PORTS=(6443 9100 8080 4245 9345 6443 6444 10250 10259 10257 2379 2380 9796 19090 9090 6942 9091 4244 4240 80 443 9963 9964 8081 8082 7000 9001 6379 9121 8084 6060 6061 6062 9879 9890 9891 9892 9893 9962 9966) 26 | ALLOWED_UDP_PORTS=(8472 4789 6081 51871 53 55355 58467 41637 39291 38519 46190) 27 | 28 | # Add allowed ports 29 | add_tcp_ports "${ALLOWED_TCP_PORTS[@]}" 30 | add_udp_ports "${ALLOWED_UDP_PORTS[@]}" 31 | 32 | # Reload firewall rules 33 | reload_firewall 34 | -------------------------------------------------------------------------------- /scripts/get_pod_ip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | POD2=$(kubectl get pod pod-worker2 --template '{{.status.podIP}}') 4 | echo $POD2 5 | -------------------------------------------------------------------------------- /scripts/inject_secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Iterate list of all secrets in AWS Secrets Manager and inject into cluster as k8s secrets 4 | # USAGE: ./inject_secrets.sh secret_prefix cluster namespace region profile 5 | # USAGE EXMP: ./inject_secrets.sh myapp/dev foocluster app us-west-2 profilename 6 | 7 | if [[ $# -ne 5 ]] ; then 8 | echo "usage: $0 secret_prefix cluster namespace region profile" >&2 9 | exit 2 10 | fi 11 | 12 | secret_prefix=$1 13 | cluster=$2 14 | namespace=$3 15 | AWS_REGION=$4 16 | AWS_PROFILE=$5 17 | 18 | kubectl_ver=$(kubectl version --client=true -o json | jq -rj '.clientVersion | .major, ".", .minor') 19 | dry_run_flag="--dry-run" 20 | if [[ "$ver_major" -gt "1" ]] || [[ "$ver_minor" -gt "17" ]]; then 21 | dry_run_flag="--dry-run=client" 22 | fi 23 | 24 | echo "Injecting all secrets under ${secret_prefix} from AWS Secrets Manager into cluster ${cluster}, namespace ${namespace}" 25 | 26 | secret_count=0 27 | 28 | for secret_name in $(aws secretsmanager list-secrets --profile ${AWS_PROFILE} --region ${AWS_REGION} --query 'SecretList[?Name!=`null`]|[?starts_with(Name, `'${secret_prefix}'`) == `true`].Name' --output text); do 29 | secret_count=$((secret_count+1)) 30 | 31 | if [[ $secret_name == "None" ]]; then 32 | echo "error: aws secrets manager list-secrets returned None." 33 | exit 1 34 | fi 35 | 36 | unset k8s_secret_name value 37 | 38 | echo "secret name: $secret_name" 39 | k8s_secret_name=$(echo ${secret_name#"$secret_prefix"/} | tr "/_" "-") 40 | if [[ -z $k8s_secret_name ]]; then 41 | echo "warning: k8s_secret_name empty for secret_name=$secret_name" 42 | fi 43 | 44 | value=$(aws secretsmanager get-secret-value --secret-id ${secret_name} --query 'SecretString' --output text --region ${AWS_REGION}) 45 | 46 | if [[ -z $value ]]; then 47 | echo "warning: secret value is empty for secret_name=${secret_name}. not injecting this secret." 48 | else 49 | if [[ ${secret_count} -eq 1 ]]; then 50 | # table header 51 | echo 52 | line=$(printf -- '=%.0s' {1..20}; echo "") 53 | printf "%-65s----> %s\n" "AWS Secret name" "k8s Secret Name" 54 | printf "%-70s %s\n" ${line} ${line} 55 | fi 56 | printf "%-70s %s\n" ${secret_name} ${k8s_secret_name} 57 | 58 | kubectl create secret generic ${k8s_secret_name} --from-literal=password=${value} -n ${namespace} ${dry_run_flag} -o yaml | kubectl apply -f - > /dev/null 59 | fi 60 | done 61 | 62 | unset value 63 | 64 | if [[ $secret_count -eq 0 ]]; then 65 | echo "No secrets found in AWS Secrets Manager for secret name prefix ${secret_prefix}." 66 | fi 67 | -------------------------------------------------------------------------------- /scripts/install_cilium_talos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | # Check if Helm is installed 5 | check_helm() { 6 | if ! command -v helm &> /dev/null; then 7 | echo "Error: Helm is not installed." 8 | exit 1 9 | fi 10 | } 11 | 12 | # Add Cilium Helm repository 13 | add_cilium_repo() { 14 | helm repo add cilium https://helm.cilium.io/ 15 | helm repo update 16 | } 17 | 18 | # Install Cilium 19 | install_cilium() { 20 | helm install cilium cilium/cilium \ 21 | --version 1.16.1 \ 22 | --namespace kube-system \ 23 | --set ipam.mode=kubernetes \ 24 | --set kubeProxyReplacement=true \ 25 | --set securityContext.capabilities.ciliumAgent="{CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}" \ 26 | --set securityContext.capabilities.cleanCiliumState="{NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}" \ 27 | --set cgroup.autoMount.enabled=false \ 28 | --set cgroup.hostRoot=/sys/fs/cgroup \ 29 | --set k8sServiceHost=localhost \ 30 | --set k8sServicePort=7445 31 | } 32 | 33 | # Main functions 34 | check_helm 35 | add_cilium_repo 36 | install_cilium 37 | -------------------------------------------------------------------------------- /scripts/install_extras.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # A sudo check 4 | check_sudo() { 5 | if [ [ "$(id -u)" -ne 0 ]]; then 6 | echo "Please run this script with sudo privileges." 7 | exit 1 8 | fi 9 | } 10 | 11 | # Install podman 12 | install_podman() { 13 | echo "Installing podman..." 14 | sudo apt-get update -y 15 | sudo apt-get install -y podman 16 | sudo mkdir -p /etc/containers 17 | sudo tee /etc/containers/containers.conf </dev/null | openssl dgst -sha256 -hex | sed 's/^.* //') 93 | ENDPOINT=$(kubectl cluster-info | grep master | awk '{print $NF}') 94 | echo "kubeadm join $ENDPOINT --token $TOKEN --discovery-token-ca-cert-hash sha256:$HASH" 95 | } 96 | 97 | # Run the functions to install k8s with Calico 98 | install_containerd 99 | disable_swap 100 | install_k8s_dependencies 101 | add_k8s_repository 102 | install_k8s 103 | install_calico 104 | print_join_command 105 | -------------------------------------------------------------------------------- /scripts/install_k8s_cilium.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # A sudo check 4 | check_sudo() { 5 | if [ [ "$(id -u)" -ne 0 ]]; then 6 | echo "Please run this script with sudo privileges." 7 | exit 1 8 | fi 9 | } 10 | 11 | # Install and configure containerd 12 | install_containerd() { 13 | # Load containerd modules 14 | cat </dev/null | openssl dgst -sha256 -hex | sed 's/^.* //') 91 | ENDPOINT=$(kubectl cluster-info | grep master | awk '{print $NF}') 92 | echo "kubeadm join $ENDPOINT --token $TOKEN --discovery-token-ca-cert-hash sha256:$HASH" 93 | } 94 | 95 | # Run the functions to install k8s with cilium 96 | install_containerd 97 | disable_swap 98 | install_k8s_dependencies 99 | add_k8s_repository 100 | install_k8s 101 | install_cilium 102 | print_join_command 103 | -------------------------------------------------------------------------------- /scripts/k8s_cheat_sheet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | KUBECTL="kubectl" 4 | CHEAT_SHEET="k8s-cheat-sheet.txt" 5 | RESOURCES=("pod" "deployment" "service" "configmap" "secret" "ingress" "node" "namespace") 6 | OPERATIONS=("get" "describe" "create" "delete" "edit" "apply" "logs" "exec") 7 | 8 | echo "# Kubernetes Cheat Sheet" > $CHEAT_SHEET 9 | echo "" >> $CHEAT_SHEET 10 | 11 | read -p "Enter the cluster name: " CLUSTER_NAME 12 | read -p "Enter the context name: " CONTEXT_NAME 13 | 14 | echo "## Cluster and Context" >> $CHEAT_SHEET 15 | echo "" >> $CHEAT_SHEET 16 | echo "\`\`\`bash" >> $CHEAT_SHEET 17 | echo "# Set the cluster entry in the kubeconfig" >> $CHEAT_SHEET 18 | echo "$KUBECTL config set-cluster $CLUSTER_NAME --server= --certificate-authority=" >> $CHEAT_SHEET 19 | echo "" >> $CHEAT_SHEET 20 | echo "# Set the user entry in the kubeconfig" >> $CHEAT_SHEET 21 | echo "$KUBECTL config set-credentials --client-certificate= --client-key=" >> $CHEAT_SHEET 22 | echo "" >> $CHEAT_SHEET 23 | echo "# Set the context entry in the kubeconfig" >> $CHEAT_SHEET 24 | echo "$KUBECTL config set-context $CONTEXT_NAME --cluster=$CLUSTER_NAME --user= --namespace=" >> $CHEAT_SHEET 25 | echo "" >> $CHEAT_SHEET 26 | echo "# Use the context" >> $CHEAT_SHEET 27 | echo "$KUBECTL config use-context $CONTEXT_NAME" >> $CHEAT_SHEET 28 | echo "\`\`\`" >> $CHEAT_SHEET 29 | echo "" >> $CHEAT_SHEET 30 | 31 | for RESOURCE in "${RESOURCES[@]}"; do 32 | echo "## Resource: $RESOURCE" >> $CHEAT_SHEET 33 | echo "" >> $CHEAT_SHEET 34 | for OPERATION in "${OPERATIONS[@]}"; do 35 | echo "### Operation: $OPERATION" >> $CHEAT_SHEET 36 | echo "" >> $CHEAT_SHEET 37 | echo "\`\`\`bash" >> $CHEAT_SHEET 38 | 39 | case "$OPERATION" in 40 | 41 | get) 42 | echo "# Get all ${RESOURCE}s in the current namespace" >> $CHEAT_SHEET 43 | echo "$KUBECTL get $RESOURCE" >> $CHEAT_SHEET 44 | echo "" >> $CHEAT_SHEET 45 | 46 | echo "# Get all ${RESOURCE}s in all namespaces" >> $CHEAT_SHEET 47 | echo "$KUBECTL get $RESOURCE --all-namespaces" >> $CHEAT_SHEET 48 | echo "" >> $CHEAT_SHEET 49 | 50 | echo "# Get a specific ${RESOURCE} by name in the current namespace" >> $CHEAT_SHEET 51 | echo "$KUBECTL get $RESOURCE " >> $CHEAT_SHEET 52 | echo "" >> $CHEAT_SHEET 53 | 54 | echo "# Get a specific ${RESOURCE} by name in a specific namespace" >> $CHEAT_SHEET 55 | echo "$KUBECTL get -n " >> $CHEAT_SHEET 56 | ;; 57 | 58 | describe) 59 | echo "# Describe all ${RESOURCE}s in the current namespace" >> $CHEAT_SHEET 60 | echo "$KUBECTL describe $RESOURCE" >> $CHEAT_SHEET 61 | echo "" >> $CHEAT_SHEET 62 | 63 | echo "# Describe all ${RESOURCE}s in all namespaces" >> $CHEAT_SHEET 64 | echo "$KUBECTL describe -A " >> $CHEAT_SHEET 65 | echo "" >>$ CHEATSHEETS 66 | 67 | echo "# Describe a specific ${RESOURCE} by name in the current namespace" >>$ CHEATSHEETS 68 | echo "$KUBECTL describe " >$ CHEATSHEETS 69 | echo "" >$ CHEATSHEETS 70 | 71 | echo "# Describe a specific ${RESOURCE} by name in a specific namespace" >$ CHEATSHEETS 72 | echo "$KUBECTL describe -n " >$ CHEATSHEETS 73 | ;; 74 | 75 | create) 76 | echo "# Create a ${RESOURCE} from a YAML file in the current namespace" >> $CHEAT_SHEET 77 | echo "$KUBECTL create -f " >> $CHEAT_SHEET 78 | echo "" >> $CHEAT_SHEET 79 | 80 | echo "# Create a ${RESOURCE} from a YAML file in a specific namespace" >> $CHEAT_SHEET 81 | echo "$KUBECTL create -n -f " >> $CHEAT_SHEET 82 | echo "" >> $CHEAT_SHEET 83 | 84 | echo "# Create a ${RESOURCE} from a JSON file in the current namespace" >> $CHEAT_SHEET 85 | echo "$KUBECTL create -f " >> $CHEAT_SHEET 86 | echo "" >> $CHEAT_SHEET 87 | 88 | echo "# Create a ${RESOURCE} from a JSON file in a specific namespace" >> $CHEAT_SHEET 89 | echo "$KUBECTL create -n -f " >> $CHEAT_SHEET 90 | ;; 91 | 92 | delete) 93 | echo "# Delete all ${RESOURCE}s in the current namespace" >> $CHEAT_SHEET 94 | echo "$KUBECTL delete $RESOURCE --all" >> $CHEAT_SHEET 95 | echo "" >> $CHEAT_SHEET 96 | 97 | echo "# Delete all ${RESOURCE}s in all namespaces" >> $CHEAT_SHEET 98 | echo "$KUBECTL delete -A " >> $CHEAT_SHEET 99 | echo "" >$ CHEATSHEETS 100 | 101 | echo "# Delete a specific ${RESOURCE} by name in the current namespace" >$ CHEATSHEETS 102 | echo "$KUBECTL delete " >$ CHEATSHEETS 103 | echo "" >$ CHEATSHEETS 104 | 105 | echo "# Delete a specific ${RESOURCE} by name in a specific namespace" >$ CHEATSHEETS 106 | echo "$KUBECTL delete -n " >$ CHEATSHEETS 107 | ;; 108 | 109 | edit) 110 | echo "# Edit a ${RESOURCE} by name in the current namespace using the default editor" >> $CHEAT_SHEET 111 | echo "$KUBECTL edit $RESOURCE " >> $CHEAT_SHEET 112 | echo "" >> $CHEAT_SHEET 113 | 114 | echo "# Edit a ${RESOURCE} by name in the current namespace using a specific editor" >> $CHEAT_SHEET 115 | echo "EDITOR= kubectl edit $RESOURCE " >> $CHEAT_SHEET 116 | echo "" >> $CHEAT_SHEET 117 | 118 | echo "# Edit a ${RESOURCE} by name in a specific namespace using the default editor" >> $CHEAT_SHEET 119 | echo "$KUBECTL edit -n " >> $CHEAT_SHEET 120 | ;; 121 | 122 | apply) 123 | echo "# Apply changes to a ${RESOURCE} from a YAML file in the current namespace" >> $CHEAT_SHEET 124 | echo "$KUBECTL apply -f " >> $CHEAT_SHEET 125 | echo "" >> $CHEAT_SHEET 126 | 127 | echo "# Apply changes to a ${RESOURCE} from a YAML file in a specific namespace" >> $CHEAT_SHEET 128 | echo "$KUBECTL apply -n -f " >> $CHEAT_SHEET 129 | ;; 130 | 131 | logs) 132 | if [ "$RESOURCE" == "pod" ]; then # Logs only work for pods and containers 133 | 134 | read -p "Do you want to follow the logs? (y/n): " FOLLOW 135 | 136 | if [ "$FOLLOW" == "y" ]; then # Follow the logs 137 | 138 | read -p "Do you want to specify a container? (y/n): " CONTAINER 139 | 140 | if [ "$CONTAINER" == "y" ]; then # Specify a container 141 | 142 | read -p "Enter the container name: " CONTAINER_NAME 143 | 144 | echo "# Follow the logs of a specific container in a pod by name in the current namespace" >> $CHEAT_SHEET 145 | echo "$KUBECTL logs -f -c $CONTAINER_NAME" >> $CHEAT_SHEET 146 | echo "" >> $CHEAT_SHEET 147 | 148 | echo "# Follow the logs of a specific container in a pod by name in a specific namespace" >> $CHEAT_SHEET 149 | echo "$KUBECTL logs -n -f -c $CONTAINER_NAME" >> $CHEAT_SHEET 150 | echo "" >> $CHEAT_SHEET 151 | 152 | echo "$KUBECTL logs -n -f -c $CONTAINER_NAME" >> $CHEAT_SHEET 153 | echo "" >> $CHEAT_SHEET 154 | 155 | else # Don't specify a container 156 | 157 | echo "# Follow the logs of a pod by name in the current namespace" >> $CHEAT_SHEET 158 | echo "$KUBECTL logs -f " >> $CHEAT_SHEET 159 | echo "" >> $CHEAT_SHEET 160 | 161 | echo "# Follow the logs of a pod by name in a specific namespace" >> $CHEAT_SHEET 162 | echo "$KUBECTL logs -n -f " >> $CHEAT_SHEET 163 | echo "" >> $CHEAT_SHEET 164 | 165 | fi 166 | 167 | else 168 | 169 | read -p "Do you want to specify a container? (y/n): " CONTAINER 170 | 171 | if [ "$CONTAINER" == "y" ]; then # Specify a container 172 | 173 | read -p "Enter the container name: " CONTAINER_NAME 174 | 175 | echo "# Print the logs of a specific container in a pod by name in the current namespace" >> $CHEAT_SHEET 176 | echo "$KUBECTL logs -c $CONTAINER_NAME" >> $CHEAT_SHEET 177 | echo "" >> $CHEAT_SHEET 178 | 179 | echo "# Print the logs of a specific container in a pod by name in a specific namespace" >> $CHEAT_SHEET 180 | echo "$KUBECTL logs -n -c $CONTAINER_NAME" >> $CHEAT_SHEET 181 | echo "" >> $CHEAT_SHEET 182 | 183 | else 184 | 185 | echo "# Print the logs of a pod by name in the current namespace" >> $CHEAT_SHEET 186 | echo "$KUBECTL logs " >> $CHEAT_SHEET 187 | echo "" >> $CHEAT_SHEET 188 | 189 | echo "# Print the logs of a pod by name in a specific namespace" >> $CHEAT_SHEET 190 | echo "$KUBECTL logs -n " >> $CHEAT_SHEET 191 | echo "" >> $CHEAT_SHEET 192 | 193 | fi 194 | 195 | fi 196 | 197 | fi 198 | ;; 199 | 200 | exec) 201 | if [ "$RESOURCE" == "pod" ]; then # Exec only works for pods and containers 202 | 203 | read -p "Do you want to specify a container? (y/n): " CONTAINER 204 | 205 | if [ "$CONTAINER" == "y" ]; then # Specify a container 206 | 207 | read -p "Enter the container name: " CONTAINER_NAME 208 | 209 | echo "# Execute commands in a specific container in a pod by name in the current namespace" >> $CHEAT_SHEET 210 | echo "$KUBECTL exec -c $CONTAINER_NAME -- " >> $CHEAT_SHEET 211 | echo "" >> $CHEAT_SHEET 212 | 213 | echo "# Execute commands in a specific container in a pod by name in a specific namespace" >> $CHEAT_SHEET 214 | echo "$KUBECTL exec -n -c $CONTAINER_NAME -- " >> $CHEAT_SHEET 215 | echo "" >> $CHEAT_SHEET 216 | 217 | else 218 | 219 | echo "# Execute commands in a pod by name in the current namespace" >> $CHEAT_SHEET 220 | echo "$KUBECTL exec -- " >> $CHEAT_SHEET 221 | echo "" >> $CHEAT_SHEET 222 | 223 | echo "# Execute commands in a pod by name in a specific namespace" >> $CHEAT_SHEET 224 | echo "$KUBECTL exec -n -- " >> $CHEAT_SHEET 225 | echo "" >> $CHEAT_SHEET 226 | 227 | fi 228 | 229 | fi 230 | ;; 231 | 232 | esac 233 | 234 | echo "\`\`\`" >>$ CHEATSHEETS 235 | echo "" >$ CHEATSHEETS 236 | done 237 | done 238 | 239 | cat $CHEAT_SHEET 240 | -------------------------------------------------------------------------------- /scripts/k8s_limits_requests.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import json 3 | import datetime as dt 4 | 5 | def execute_kube_command_json(command): 6 | kube_command = subprocess.run(command, stdout=subprocess.PIPE, shell=True) 7 | json_output = str(kube_command.stdout.decode("utf-8")) 8 | json_object = json.loads(json_output) 9 | return json_object 10 | 11 | audit_report = {} 12 | command = "kubectl get namespaces -o json" 13 | json_object = execute_kube_command_json(command) 14 | number_of_namespaces = len(json_object["items"]) 15 | processed_namespace_count = 1 16 | 17 | for namespace in json_object["items"]: 18 | process_start = dt.datetime.now() 19 | print("Processing Namespace", processed_namespace_count, "of", number_of_namespaces) 20 | namespace_name=namespace["metadata"]["name"] 21 | command = "kubectl get pods -n {namespace} -o json".format(namespace=namespace_name) 22 | json_object = execute_kube_command_json(command) 23 | audit_report[namespace_name] = {} 24 | if not json_object["items"]: 25 | processed_namespace_count += 1 26 | process_end = dt.datetime.now() 27 | how_long_to_finish = float(number_of_namespaces - processed_namespace_count) / (process_end - process_start).total_seconds() / 60 28 | if how_long_to_finish < 1: 29 | how_long_to_finish * 60 30 | print(how_long_to_finish, "seconds") 31 | else: 32 | print(how_long_to_finish, "minutes") 33 | continue 34 | else: 35 | for pod in json_object["items"]: 36 | audit_report[namespace_name][pod["metadata"]["name"]] = {} 37 | for container in pod["spec"]["containers"]: 38 | audit_report[namespace_name][pod["metadata"]["name"]][container["name"]] = {} 39 | if "resources" in container: 40 | if "requests" in container["resources"] and "limits" in container["resources"]: 41 | audit_report[namespace_name][pod["metadata"]["name"]][container["name"]] = { 42 | "requests": "present", 43 | "limits": "present" 44 | } 45 | elif "limits" in container["resources"] and "requests" not in container["resources"]: 46 | audit_report[namespace_name][pod["metadata"]["name"]][container["name"]] = { 47 | "requests": "not present", 48 | "limits": "present" 49 | } 50 | elif "limits" not in container["resources"] and "requests" in container["resources"]: 51 | audit_report[namespace_name][pod["metadata"]["name"]][container["name"]] = { 52 | "requests": "present", 53 | "limits": "not present" 54 | } 55 | else: 56 | audit_report[namespace_name][pod["metadata"]["name"]][container["name"]] = { 57 | "requests": "not present", 58 | "limits": "not present" 59 | } 60 | else: 61 | audit_report[namespace_name][pod["metadata"]["name"]][container["name"]] = { 62 | "requests": "not present", 63 | "limits": "not present" 64 | } 65 | processed_namespace_count += 1 66 | process_end = dt.datetime.now() 67 | how_long_to_finish = float(number_of_namespaces - processed_namespace_count) / (process_end - process_start).total_seconds() / 60 68 | if how_long_to_finish < 1: 69 | how_long_to_finish * 60 70 | print(how_long_to_finish, "seconds") 71 | else: 72 | print(how_long_to_finish, "minutes") 73 | 74 | print(audit_report) 75 | -------------------------------------------------------------------------------- /scripts/k8s_probes.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import json 3 | import datetime as dt 4 | 5 | def execute_kube_command_json(command): 6 | kube_command = subprocess.run(command, stdout=subprocess.PIPE, shell=True) 7 | json_output = str(kube_command.stdout.decode("utf-8")) 8 | json_object = json.loads(json_output) 9 | return json_object 10 | 11 | def process_namespace(namespace): 12 | namespace_name = namespace["metadata"]["name"] 13 | command = f"kubectl get pods -n {namespace_name} -o json" 14 | json_object = execute_kube_command_json(command) 15 | audit_report_probes[namespace_name] = {} 16 | if not json_object["items"]: 17 | return 18 | for pod in json_object["items"]: 19 | audit_report_probes[namespace_name][pod["metadata"]["name"]] = {} 20 | for container in pod["spec"]["containers"]: 21 | audit_report_probes[namespace_name][pod["metadata"]["name"]][container["name"]] = { 22 | "livenessProbe": "present" if "livenessProbe" in container else "not present", 23 | "readinessProbe": "present" if "readinessProbe" in container else "not present" 24 | } 25 | 26 | audit_report_probes = {} 27 | command = "kubectl get namespaces -o json" 28 | json_object = execute_kube_command_json(command) 29 | number_of_namespaces = len(json_object["items"]) 30 | 31 | for processed_namespace_count, namespace in enumerate(json_object["items"], start=1): 32 | print(f"Processing Namespace {processed_namespace_count} of {number_of_namespaces}") 33 | process_start = dt.datetime.now() 34 | process_namespace(namespace) 35 | process_end = dt.datetime.now() 36 | how_long_to_finish = float(number_of_namespaces - processed_namespace_count) / (process_end - process_start).total_seconds() / 60 37 | if how_long_to_finish < 1: 38 | how_long_to_finish *= 60 39 | print(f"{how_long_to_finish:.2f} seconds") 40 | else: 41 | print(f"{how_long_to_finish:.2f} minutes") 42 | 43 | print(audit_report_probes) 44 | -------------------------------------------------------------------------------- /scripts/k8s_shortcuts.sh: -------------------------------------------------------------------------------- 1 | # Set up autocomplete in bash into the current shell 2 | source <(kubectl completion bash) 3 | echo "source <(kubectl completion bash)" >> ~/.bashrc # add autocomplete permanently to your bash shell. 4 | 5 | # set up autocomplete in zsh into the current shell 6 | source <(kubectl completion zsh) 7 | echo '[[ $commands[kubectl] ]] && source <(kubectl completion zsh)' >> ~/.zshrc # add autocomplete permanently to your zsh shell 8 | 9 | # kubectl plugins 10 | alias k=kubectl 11 | complete -F __start_kubectl k 12 | 13 | # Config view 14 | alias kcv="kubectl config view" 15 | alias kcc="kubectl config current-context" 16 | alias kcu="kubectl config use-context" 17 | alias kcs="kubectl config set-cluster" 18 | alias kcsc="kubectl config set-credentials" 19 | alias kcscn="kubectl config set-context --current --namespace" 20 | # short alias to set/show context/namespace (only works for bash and bash-compatible shells, current context to be set before using kn to set namespace) 21 | alias kx='f() { [ "$1" ] && kubectl config use-context $1 || kubectl config current-context ; } ; f' 22 | alias kn='f() { [ "$1" ] && kubectl config set-context --current --namespace $1 || kubectl config view --minify | grep namespace | cut -d" " -f6 ; } ; f' 23 | 24 | # Apply file yaml 25 | alias kaf='kubectl apply -f' 26 | 27 | # Drop into an interactive terminal on a container 28 | alias keti='kubectl exec -ti' 29 | 30 | # General aliases 31 | alias kdel='kubectl delete' 32 | alias kdelf='kubectl delete -f' 33 | 34 | # Pod management 35 | alias kgp='kubectl get pods' 36 | alias kgpw='kgp --watch' 37 | alias kgpwide='kgp -o wide' 38 | alias kep='kubectl edit pods' 39 | alias kdp='kubectl describe pods' 40 | alias kdelp='kubectl delete pods' 41 | 42 | # Service management. 43 | alias kgs='kubectl get svc' 44 | alias kgsw='kgs --watch' 45 | alias kgswide='kgs -o wide' 46 | alias kes='kubectl edit svc' 47 | alias kds='kubectl describe svc' 48 | alias kdels='kubectl delete svc' 49 | 50 | # Namespace management 51 | alias kgns='kubectl get namespaces' 52 | alias kens='kubectl edit namespace' 53 | alias kdns='kubectl describe namespace' 54 | alias kdelns='kubectl delete namespace' 55 | 56 | # ConfigMap management 57 | alias kgcm='kubectl get configmaps' 58 | alias kecm='kubectl edit configmap' 59 | alias kdcm='kubectl describe configmap' 60 | alias kdelcm='kubectl delete configmap' 61 | 62 | # Secret management 63 | alias kgsec='kubectl get secret' 64 | alias kdsec='kubectl describe secret' 65 | alias kdelsec='kubectl delete secret' 66 | 67 | # Deployment management. 68 | alias kgd='kubectl get deployment' 69 | alias kgdw='kgd --watch' 70 | alias kgdwide='kgd -o wide' 71 | alias ked='kubectl edit deployment' 72 | alias kdd='kubectl describe deployment' 73 | alias kdeld='kubectl delete deployment' 74 | alias ksd='kubectl scale deployment' 75 | alias krsd='kubectl rollout status deployment' 76 | kres(){ 77 | kubectl set env $@ REFRESHED_AT=$(date +%Y%m%d%H%M%S) 78 | } 79 | 80 | # Rollout management. 81 | alias kru="kubectl rollout undo" 82 | alias krp="kubectl rollout pause" 83 | alias krr="kubectl rollout resume" 84 | alias krh="kubectl rollout history" 85 | 86 | # Set the image of a deployment 87 | alias ksi="kubectl set image" 88 | 89 | # Statefulset management. 90 | alias kgs="kubectl get statefulsets" 91 | alias kgas="kubectl get statefulsets --all-namespaces" 92 | alias kds="kubectl describe statefulset" 93 | alias kds="kubectl delete statefulset" 94 | alias kss="kubectl scale statefulset" 95 | alias kps="kubectl patch statefulset" 96 | 97 | # Node Management 98 | alias kgno='kubectl get nodes' 99 | alias keno='kubectl edit node' 100 | alias kdno='kubectl describe node' 101 | alias kdelno='kubectl delete node' 102 | 103 | # Port forwarding 104 | alias kpf="kubectl port-forward" 105 | 106 | # Tools for accessing all information 107 | alias kga='kubectl get all' 108 | alias kgaa='kubectl get all --all-namespaces' 109 | 110 | # Logs 111 | alias kl='kubectl logs' 112 | alias klf='kubectl logs -f' 113 | 114 | # Replace a resource by filename or stdin 115 | alias kcr="k replace" 116 | 117 | # Update a resource using strategic merge patch 118 | alias kcp="k patch" 119 | 120 | # Expose a resource as a new Kubernetes service 121 | alias kce="k expose" 122 | 123 | # Update the labels on a resource 124 | alias kcl="k label" 125 | 126 | # Set a new size for a Deployment, ReplicaSet, Replication Controller, or StatefulSet 127 | alias kcs="k scale" 128 | -------------------------------------------------------------------------------- /scripts/label_k8s_resources.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | # Get the namespace 5 | get_namespace() { 6 | read -p "Enter the Kubernetes namespace to label resources in: " NAMESPACE 7 | 8 | if ! kubectl get namespace "$NAMESPACE" > /dev/null 2>&1; then 9 | echo "Error: Namespace '$NAMESPACE' does not exist." 10 | exit 1 11 | fi 12 | } 13 | 14 | # Get the label 15 | get_label() { 16 | read -p "Enter the label in the format key=value: " LABEL 17 | 18 | if [[ ! "$LABEL" =~ ^[a-zA-Z0-9._-]+=[a-zA-Z0-9._-]+$ ]]; then 19 | echo "Error: Label format is invalid. Use key=value format." 20 | exit 1 21 | fi 22 | 23 | KEY=$(echo "$LABEL" | cut -d '=' -f 1) 24 | VALUE=$(echo "$LABEL" | cut -d '=' -f 2) 25 | } 26 | 27 | # Confirm the operation 28 | confirm_operation() { 29 | echo "You are about to label all resources in the namespace '$NAMESPACE' with '$KEY=$VALUE'." 30 | read -p "Do you want to proceed? (yes/no): " CONFIRM 31 | if [[ "$CONFIRM" != "yes" ]]; then 32 | echo "Operation canceled." 33 | exit 0 34 | fi 35 | } 36 | 37 | # Label all resources in the namespace 38 | label_resources() { 39 | RESOURCE_TYPES=$(kubectl api-resources --verbs=list --namespaced -o name) 40 | 41 | for RESOURCE_TYPE in $RESOURCE_TYPES; do 42 | echo "Labeling resources of type $RESOURCE_TYPE in namespace $NAMESPACE..." 43 | kubectl label $RESOURCE_TYPE -n "$NAMESPACE" --all "$LABEL" --overwrite 44 | if [[ $? -ne 0 ]]; then 45 | echo "Warning: Failed to label some resources of type $RESOURCE_TYPE." 46 | fi 47 | done 48 | 49 | echo "Labeling completed for namespace '$NAMESPACE' with label '$KEY=$VALUE'." 50 | } 51 | 52 | # Main script 53 | get_namespace 54 | get_label 55 | confirm_operation 56 | label_resources 57 | -------------------------------------------------------------------------------- /scripts/list_k8s_secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script lists Kubernetes Secrets currently in use by pods in a specified namespace. 4 | # Usage: 5 | # ./list_k8s_secrets.sh 6 | # ./list_k8s_secrets.sh -t 7 | 8 | set -eu 9 | 10 | # List namespaces 11 | list_namespaces() { 12 | kubectl get namespaces -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | nl 13 | } 14 | 15 | # List secrets in use by pods in a given namespace 16 | list_secrets_with_pods() { 17 | local namespace=$1 18 | pods=$(kubectl get pods -n "$namespace" -o json) 19 | secrets=$(echo "$pods" | jq -r '.items[] | select(.spec.containers[].env[]?.valueFrom.secretKeyRef.name != null) | .metadata.name as $pod | .spec.containers[].env[]?.valueFrom.secretKeyRef.name as $secret | "\($secret) \($pod)"' | sort | uniq) 20 | 21 | if [[ -z "$secrets" ]]; then 22 | echo "No secrets used by Pods in the current namespace: $namespace" 23 | else 24 | echo "Secrets and the Pods that use them in namespace: $namespace" 25 | echo "$secrets" | while read -r secret pod; do 26 | echo "Secret: $secret, Pod: $pod" 27 | done 28 | fi 29 | } 30 | 31 | # Main script 32 | if [[ "$#" -eq 2 && "$1" == "-t" ]]; then 33 | namespace=$2 34 | else 35 | echo "Available namespaces:" 36 | namespaces=$(kubectl get namespaces -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}') 37 | echo "$namespaces" | nl 38 | echo 39 | read -p "Enter the namespace number: " namespace_number 40 | 41 | total_namespaces=$(echo "$namespaces" | wc -l) 42 | if [[ "$namespace_number" -lt 1 || "$namespace_number" -gt "$total_namespaces" ]]; then 43 | echo "Invalid namespace number. Please try again." 44 | exit 1 45 | fi 46 | 47 | namespace=$(echo "$namespaces" | sed -n "${namespace_number}p") 48 | fi 49 | 50 | echo "Listing secrets in use by pods in namespace: $namespace" 51 | list_secrets_with_pods "$namespace" 52 | -------------------------------------------------------------------------------- /scripts/pod_resource_req.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if resource requests are defined for a pod 4 | has_resource_requests() { 5 | local pod="$1" 6 | local namespace="$2" 7 | kubectl get pods "$pod" -n "$namespace" -o jsonpath='{.spec.containers[0].resources.requests}' >/dev/null 2>&1 8 | } 9 | 10 | declare -a pods_with_resource_requests 11 | declare -a pods_without_resource_requests 12 | 13 | # Fetch all namespaces 14 | while read -r namespace; do 15 | while read -r pod; do 16 | if has_resource_requests "$pod" "$namespace"; then 17 | pods_with_resource_requests+=("$pod,$namespace") 18 | else 19 | pods_without_resource_requests+=("$pod,$namespace") 20 | fi 21 | done < <(kubectl get pods -n "$namespace" -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}') 22 | done < <(kubectl get namespaces -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}') 23 | 24 | echo "Pods With Resource Requests" 25 | printf '%s\n' "${pods_with_resource_requests[@]}" 26 | 27 | echo -e "\n=========\n" 28 | 29 | echo "Pods Without Resource Requests" 30 | printf '%s\n' "${pods_without_resource_requests[@]}" 31 | -------------------------------------------------------------------------------- /scripts/pods_limit_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pods_with_limit_checks="" 4 | pods_without_limit_checks="" 5 | 6 | for namespace in `kubectl get namespaces | grep -v NAME | cut -d ' ' -f 1` ; do 7 | for pod in `kubectl get pods -n $namespace | grep -v NAME | cut -d ' ' -f 1` ; do 8 | limit_check=`kubectl get pods $pod -n $namespace -o yaml | grep limits` 9 | if [ ! -z "$limits_check" ]; then 10 | pods_with_limit_checks="$pods_with_limit_checks\n$pod,$namespace" 11 | else 12 | pods_without_limit_checks="$pods_with_limit_checks\n$pod,$namespace" 13 | fi 14 | done 15 | done 16 | 17 | echo "Pods With Limit Checks" 18 | echo $pods_with_limit_checks 19 | 20 | echo "" 21 | echo "" 22 | echo "=========" 23 | echo "" 24 | echo "" 25 | 26 | echo "Pods Without Limit Checks" 27 | echo $pods_without_limit_checks 28 | -------------------------------------------------------------------------------- /scripts/pvc_pv_cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # List unmounted PVCs 4 | list_unmounted_pvc() { 5 | echo "Unmounted PVCs:" 6 | kubectl describe -A pvc | grep -E "^Name:.*$|^Namespace:.*$|^Mounted By:.*$" | grep -B 2 "" | grep -E "^Name:.*$|^Namespace:.*$" 7 | } 8 | 9 | # List unmounted PVs 10 | list_unmounted_pv() { 11 | echo "Unmounted PVs:" 12 | kubectl get pv | grep Released 13 | } 14 | 15 | # Delete unmounted PVCs and PVs 16 | delete_unmounted_resources() { 17 | echo "Deleting unmounted PVCs and PVs..." 18 | kubectl describe -A pvc | grep -E "^Name:.*$|^Namespace:.*$|^Mounted By:.*$" | grep -B 2 "" | grep -E "^Name:.*$|^Namespace:.*$" | cut -f2 -d: | paste -d " " - - | xargs -n2 bash -c 'kubectl -n ${1} delete pvc ${0}' 19 | kubectl get pv | grep Released | awk '{print $1}' | xargs -I{} kubectl delete pv {} 20 | echo "Cleanup completed!" 21 | } 22 | 23 | # Main menu 24 | while true; do 25 | echo "Choose an option:" 26 | echo "1. List unmounted PVCs" 27 | echo "2. List unmounted PVs" 28 | echo "3. Delete unmounted PVCs and PVs" 29 | echo "4. Exit" 30 | read -p "Enter your choice: " choice 31 | 32 | case "$choice" in 33 | 1) list_unmounted_pvc ;; 34 | 2) list_unmounted_pv ;; 35 | 3) delete_unmounted_resources ;; 36 | 4) echo "Exiting. Goodbye!"; exit ;; 37 | *) echo "Invalid choice. Please select a valid option." ;; 38 | esac 39 | done 40 | -------------------------------------------------------------------------------- /scripts/top_pods_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # List Kubernetes namespaces 4 | list_namespaces() { 5 | kubectl get namespaces 6 | } 7 | 8 | # Get top pods 9 | top_pods() { 10 | read -p "Enter the namespace name: " namespace 11 | if kubectl get namespace "$namespace" &>/dev/null; then 12 | kubectl top pods -n "$namespace" 13 | else 14 | echo "Namespace '$namespace' does not exist." 15 | fi 16 | } 17 | 18 | # Get top nodes 19 | top_nodes() { 20 | kubectl top nodes 21 | } 22 | 23 | # Main menu 24 | echo "Choose an option:" 25 | echo "1. List namespaces" 26 | echo "2. Display top pods for a namespace" 27 | echo "3. Display top nodes" 28 | 29 | read -p "Enter your choice: " choice 30 | 31 | case "$choice" in 32 | 1) list_namespaces ;; 33 | 2) top_pods ;; 34 | 3) top_nodes ;; 35 | *) echo "Invalid choice. Please select a valid option." ;; 36 | esac 37 | -------------------------------------------------------------------------------- /scripts/troubleshoot_app.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for the required arguments 4 | check_arguments() { 5 | if [[ $# -lt 2 ]]; then 6 | echo "Usage: $0 " 7 | exit 1 8 | fi 9 | } 10 | 11 | # Display usage information 12 | print_usage() { 13 | echo "Usage: $0 " 14 | } 15 | 16 | # Get pod details 17 | get_pod_details() { 18 | local app_name="$1" 19 | local namespace="$2" 20 | kubectl get pods -n "$namespace" -l app="$app_name" -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' 21 | } 22 | 23 | # Check pod health 24 | check_pod_health() { 25 | local app_name="$1" 26 | local namespace="$2" 27 | local pod_names=$(kubectl get pods -n "$namespace" -l app="$app_name" -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') 28 | 29 | local all_pods_healthy=true 30 | while read -r pod_name; do 31 | local pod_status=$(kubectl get pod "$pod_name" -n "$namespace" -o jsonpath='{.status.phase}' 2>/dev/null) 32 | if [[ "$pod_status" != "Running" ]]; then 33 | echo "Pod '$pod_name' is not running!" 34 | all_pods_healthy=false 35 | fi 36 | done <<< "$pod_names" 37 | 38 | if [[ "$all_pods_healthy" == "true" ]]; then 39 | echo "All pods for '$app_name' seem healthy." 40 | fi 41 | } 42 | 43 | # Get pod logs 44 | get_pod_logs() { 45 | local app_name="$1" 46 | local namespace="$2" 47 | local pod_names=$(kubectl get pods -n "$namespace" -l app="$app_name" -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') 48 | 49 | while read -r pod_name; do 50 | kubectl logs "$pod_name" -n "$namespace" 51 | done <<< "$pod_names" 52 | } 53 | 54 | # Check resource utilization 55 | check_resource_utilization() { 56 | local app_name="$1" 57 | local namespace="$2" 58 | 59 | echo "Events for $app_name in namespace $namespace:" 60 | kubectl get events -n "$namespace" --field-selector involvedObject.kind=Pod 61 | 62 | local hpa_count=$(kubectl get hpa -n "$namespace" -l app="$app_name" 2>/dev/null | wc -l) 63 | echo "HPA count for $app_name in namespace $namespace: $hpa_count" 64 | } 65 | 66 | # Describe pods 67 | describe_pods() { 68 | local app_name="$1" 69 | local namespace="$2" 70 | kubectl describe pods -n "$namespace" "$app_name" 71 | } 72 | 73 | # Main menu 74 | main_menu() { 75 | echo "Choose an option:" 76 | echo "1. Get pod details" 77 | echo "2. Check pod health" 78 | echo "3. Get pod logs" 79 | echo "4. Check resource utilization" 80 | echo "5. Describe pods" 81 | echo "6. Exit" 82 | read -p "Enter your choice: " choice 83 | 84 | case "$choice" in 85 | 1) get_pod_details "$app_name" "$namespace";; 86 | 2) check_pod_health "$app_name" "$namespace";; 87 | 3) get_pod_logs "$app_name" "$namespace";; 88 | 4) check_resource_utilization "$app_name" "$namespace";; 89 | 5) describe_pods "$app_name" "$namespace";; 90 | 6) echo "Exiting..."; exit 0;; 91 | *) echo "Invalid choice. Please select a valid option.";; 92 | esac 93 | } 94 | 95 | # Main script 96 | check_arguments "$@" 97 | app_name="$1" 98 | namespace="$2" 99 | main_menu 100 | -------------------------------------------------------------------------------- /scripts/update_aks_sp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | # Check service principal expiration date 6 | check_sp_expiration() { 7 | echo "Checking service principal expiration date..." 8 | until [ -n "$AKS_NAME" ]; do 9 | read -p "Enter AKS cluster name: " AKS_NAME 10 | [ -z "$AKS_NAME" ] && echo "AKS cluster name cannot be empty. Please enter a valid name." 11 | done 12 | 13 | until [ -n "$RG_NAME" ]; do 14 | read -p "Enter the resource group name: " RG_NAME 15 | [ -z "$RG_NAME" ] && echo "Resource group name cannot be empty. Please enter a valid name." 16 | done 17 | 18 | SP_ID=$(az aks show --resource-group "$RG_NAME" --name "$AKS_NAME" --query servicePrincipalProfile.clientId --output tsv) 19 | az ad app credential list --id "$SP_ID" --query "[].endDateTime" --output tsv 20 | } 21 | 22 | # Reset service principal credentials 23 | reset_sp() { 24 | echo "Resetting expired service principal credentials..." 25 | SP_SECRET=$(az ad app credential reset --id "$SP_ID" --query password -o tsv) 26 | az ad sp credential reset --id "$SP_ID" --password "$SP_SECRET" 27 | } 28 | 29 | # Main menu 30 | while true; do 31 | echo "Select an option:" 32 | echo "1. Check service principal expiration date" 33 | echo "2. Reset service principal and update AKS cluster" 34 | echo "3. Exit" 35 | read -p "Enter your choice: " choice 36 | 37 | case "$choice" in 38 | 1) check_sp_expiration ;; 39 | 2) reset_sp ;; 40 | 3) echo "Exiting. Goodbye!" && exit ;; 41 | *) echo "Invalid choice. Please select 1, 2, or 3." ;; 42 | esac 43 | done 44 | -------------------------------------------------------------------------------- /scripts/update_azureCNI_to_cilium.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | check_azure_cli_version() { 5 | local required_version="2.52.0" 6 | local installed_version=$(az --version | grep -oE '([0-9]+\.[0-9]+\.[0-9]+)') 7 | 8 | if [[ "$(printf '%s\n' "$required_version" "$installed_version" | sort -V | head -n1)" != "$required_version" ]]; then 9 | echo "Azure CLI version $required_version or later is required." 10 | read -p "Do you want to update Azure CLI? (y/n): " upgrade_choice 11 | if [[ "$upgrade_choice" == [Yy]* ]]; then 12 | echo "Updating Azure CLI..." 13 | az upgrade --yes 14 | echo "Azure CLI has been updated." 15 | else 16 | echo "Please update Azure CLI manually and then run this script again." 17 | exit 1 18 | fi 19 | fi 20 | } 21 | 22 | validate_cluster_and_resource_group() { 23 | local cluster_name="$1" 24 | local resource_group="$2" 25 | 26 | if ! az aks show -n "$cluster_name" -g "$resource_group" &>/dev/null; then 27 | echo "Error: Cluster '$cluster_name' in resource group '$resource_group' not found." 28 | exit 1 29 | fi 30 | } 31 | 32 | update_cluster_to_cilium() { 33 | read -p "Enter your AKS cluster name: " cluster_name 34 | read -p "Enter the resource group name where the cluster is located: " resource_group 35 | 36 | validate_cluster_and_resource_group "$cluster_name" "$resource_group" 37 | 38 | if az aks update -n "$cluster_name" -g "$resource_group" --network-dataplane cilium; then 39 | echo "The Azure CNI on '$cluster_name' has been updated to use Cilium dataplane." 40 | else 41 | exit_code=$? 42 | echo "Failed to update the Azure CNI on '$cluster_name'. Exit code: $exit_code" 43 | exit $exit_code 44 | fi 45 | } 46 | 47 | main_menu() { 48 | echo "=== AKS Cluster Update Script ===" 49 | echo "1. Check Azure CLI version" 50 | echo "2. Update AKS cluster to use Cilium" 51 | echo "3. Exit" 52 | read -p "Enter your choice (1/2/3): " choice 53 | 54 | case "$choice" in 55 | 1) check_azure_cli_version ;; 56 | 2) update_cluster_to_cilium ;; 57 | 3) echo "Exiting. Goodbye!" ; exit ;; 58 | *) echo "Invalid choice. Please select 1, 2, or 3." ;; 59 | esac 60 | } 61 | -------------------------------------------------------------------------------- /scripts/update_hashi_keys.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Vars: 4 | VAULT_ADDR="https://vault.example.com" 5 | 6 | read_password() { 7 | local prompt=$1 8 | local password="" 9 | read -s -p "$prompt" password 10 | echo "$password" 11 | } 12 | 13 | validate_token() { 14 | local token=$1 15 | local response=$(curl -s -o /dev/null -w "%{http_code}" -H "X-Vault-Token: $token" $VAULT_ADDR/v1/auth/token/lookup-self) 16 | if [ $response -eq 200 ]; then 17 | echo "Valid vault token: $token" 18 | return 0 19 | else 20 | echo "Invalid vault token: $token" 21 | return 1 22 | fi 23 | } 24 | 25 | VAULT_TOKEN=$(read_password "Enter the vault token: ") 26 | echo "" 27 | 28 | while ! validate_token $VAULT_TOKEN; do 29 | VAULT_TOKEN=$(read_password "Enter a new vault token: ") 30 | echo "" 31 | done 32 | 33 | update_key() { 34 | local key_name=$1 35 | local password=$2 36 | curl -X POST -H "X-Vault-Token: $VAULT_TOKEN" -d "{\"value\": \"$password\"}" $VAULT_ADDR/v1/secret/data/$key_name 37 | } 38 | 39 | validate_key() { 40 | local key_name=$1 41 | local response=$(curl -s -o /dev/null -w "%{http_code}" -H "X-Vault-Token: $VAULT_TOKEN" $VAULT_ADDR/v1/secret/data/$key_name) 42 | if [ $response -eq 200 ]; then 43 | echo "Valid key name: $key_name" 44 | return 0 45 | else 46 | echo "Invalid key name: $key_name" 47 | return 1 48 | fi 49 | } 50 | 51 | compare_passwords() { 52 | local password1=$1 53 | local password2=$2 54 | if [ "$password1" == "$password2" ]; then 55 | echo "Passwords match" 56 | return 0 57 | else 58 | echo "Passwords do not match" 59 | return 1 60 | fi 61 | } 62 | 63 | read -p "How many keys do you want to update? " num_keys 64 | 65 | while ! [[ "$num_keys" =~ ^[1-9][0-9]*$ ]]; do 66 | echo "Invalid number of keys: $num_keys" 67 | read -p "Enter a positive integer: " num_keys 68 | done 69 | 70 | for ((i=1; i<=num_keys; i++)); do 71 | 72 | read -p "Enter a key name for key #$i: " key_name 73 | 74 | while ! validate_key $key_name; do 75 | read -p "Enter a new key name for key #$i: " key_name 76 | done 77 | 78 | password1=$(read_password "Enter a password for $key_name: ") 79 | echo "" 80 | password2=$(read_password "Re-enter the password for $key_name: ") 81 | echo "" 82 | 83 | while ! compare_passwords $password1 $password2; do 84 | password1=$(read_password "Enter a new password for $key_name: ") 85 | echo "" 86 | password2=$(read_password "Re-enter the new password for $key_name: ") 87 | echo "" 88 | done 89 | 90 | update_key $key_name $password1 91 | 92 | done 93 | 94 | echo "All keys updated successfully" 95 | -------------------------------------------------------------------------------- /scripts/upgrade_k8s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: ./upgrade_k8s.sh 3 | 4 | set -eu 5 | 6 | # Prompt for yes/no confirmation 7 | ask_yes_or_no() { 8 | read -p "$1 ([y]es or [N]o): " 9 | case $(echo "$REPLY" | tr '[:upper:]' '[:lower:]') in 10 | y|yes) echo "yes" ;; 11 | *) echo "no" ;; 12 | esac 13 | } 14 | 15 | # Prompt user for confirmation before proceeding 16 | if [[ "no" == $(ask_yes_or_no "Are you sure you want to upgrade Kubernetes?") ]]; then 17 | echo "Upgrade cancelled." 18 | exit 1 19 | fi 20 | 21 | # Get current Kubernetes version 22 | current_kube_version=$(kubeadm version -o short) 23 | echo "Current Kubernetes version: $current_kube_version" 24 | 25 | # Prompt user for desired Kubernetes versions 26 | read -p "Enter the desired kubeadm version (e.g., 1.27.2): " KUBEADM_VERSION 27 | read -p "Enter the desired kubelet version (e.g., 1.27.2): " KUBELET_VERSION 28 | read -p "Enter the desired kubectl version (e.g., 1.27.2): " KUBECTL_VERSION 29 | 30 | # Function to drain a node 31 | drain_node() { 32 | NODE_NAME="$1" 33 | echo "Draining node $NODE_NAME" 34 | sudo kubectl drain "$NODE_NAME" --ignore-daemonsets 35 | } 36 | 37 | # Upgrade Kubernetes 38 | upgrade_node() { 39 | echo "Upgrading kubeadm, kubelet, and kubectl..." 40 | sudo apt-mark unhold kubeadm kubelet kubectl && \ 41 | sudo apt-get update && \ 42 | sudo apt-get install -y \ 43 | "kubeadm=$KUBEADM_VERSION" \ 44 | "kubelet=$KUBELET_VERSION" \ 45 | "kubectl=$KUBECTL_VERSION" && \ 46 | sudo apt-mark hold kubeadm kubelet kubectl 47 | echo "Upgraded to versions:" 48 | sudo kubeadm version 49 | } 50 | 51 | # Uncordon the node 52 | uncordon_node() { 53 | NODE_NAME="$1" 54 | echo "Bringing the node back online by marking it schedulable..." 55 | kubectl uncordon "$NODE_NAME" 56 | } 57 | 58 | # Main script 59 | main() { 60 | NODE_NAME="$1" 61 | drain_node "$NODE_NAME" 62 | upgrade_node 63 | uncordon_node "$NODE_NAME" 64 | } 65 | 66 | main "$@" 67 | --------------------------------------------------------------------------------