├── cni ├── README.md └── install_calico.sh ├── security ├── README.md ├── postee │ ├── README.md │ └── install_postee.sh ├── trivy │ ├── README.md │ └── install_trivy.sh └── zero_trust │ └── README.md ├── calico_enterprise ├── .gitignore ├── cis_benchmark │ ├── cis.yaml │ └── README.md ├── README.md └── install_calico_enterprise.sh ├── .gitignore ├── doc ├── tutorials │ └── index.md ├── README.md ├── install │ ├── minikube.md │ ├── index.md │ └── openshift.md ├── integration │ └── index.md ├── operations │ ├── configuration.md │ └── faq.md └── index.md ├── hpa ├── sidecar │ ├── tcp-exporter │ │ ├── ocp │ │ │ ├── rockylinux.repo │ │ │ └── Dockerfile │ │ ├── Dockerfile.ubi │ │ ├── ocp-imagestream.yaml │ │ ├── Dockerfile │ │ ├── ocp-buildconfig.yaml │ │ ├── README.md │ │ └── tcp_exporter.py │ ├── conntrack-network-init │ │ ├── init.sh │ │ ├── ocp-imagestream.yaml │ │ ├── Dockerfile │ │ ├── ocp │ │ │ └── Dockerfile │ │ ├── ocp-buildconfig.yaml │ │ └── README.md │ ├── README.md │ ├── builder.sh │ ├── bootstrap.sh │ └── openshift.md ├── ocp │ ├── openshift-keda-controller.yaml │ ├── openshift-keda-operator-group.yaml │ ├── openshift-keda-subscription.yaml │ ├── README.md │ ├── configure_serviceaccount.sh │ └── configure_hpa.sh ├── frontend-ingress.yaml ├── recommendations-hpa.yaml ├── README.md ├── configure_hpa.sh ├── install_sidecar_init.sh └── install_scaled_objects.sh ├── ocp ├── user-workload-monitoring-config.yaml ├── cluster-monitoring-config.yaml ├── README.md └── setup_ocp.sh ├── monitoring ├── README.md ├── configure_grafana_dashboards.sh ├── configure_prometheus.sh └── dashboards │ └── boutique-grafana-dashboards.yaml ├── rke2 ├── README.md ├── remove_rke2.sh ├── stop_rke2.sh ├── localenv.sh └── setup_rke2.sh ├── ebpf ├── README.md ├── disable_ebpf.sh └── enable_ebpf.sh ├── kind ├── calico_cluster.yaml ├── configure_colima.sh ├── install_ingress.sh ├── ingress-dns-pod.yaml ├── quickstart_kind.sh ├── lima_default.yaml ├── docker.yaml └── README.md ├── app ├── README.md └── install_boutique.sh ├── rancher ├── README.md ├── install_rancher.sh ├── install_opa_gatekeeper.sh └── install_monitoring.sh ├── ingress ├── README.md └── nginx │ └── install_nginx_ingress.sh ├── microk8s ├── remove_microk8s.sh ├── README.md └── setup_microk8s.sh ├── quickstart.sh ├── quickstart_enterprise.sh ├── platform.sh ├── setup_k8s.sh ├── env.sh └── README.md /cni/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /security/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /security/postee/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /security/trivy/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /calico_enterprise/.gitignore: -------------------------------------------------------------------------------- 1 | tigera-pull-secret.json 2 | calico-enterprise-license.yaml 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | venv 2 | private 3 | ocp/debug.log 4 | app/microservices-demo 5 | ingress/nginx/kubernetes-ingress 6 | -------------------------------------------------------------------------------- /doc/tutorials/index.md: -------------------------------------------------------------------------------- 1 | # Tutorials 2 | 3 | A list of tutorials and examples using this full stack environment is located 4 | here in this directory. 5 | -------------------------------------------------------------------------------- /hpa/sidecar/tcp-exporter/ocp/rockylinux.repo: -------------------------------------------------------------------------------- 1 | [rockylinux_base] 2 | name=Rocky Linux Base 3 | baseurl=http://dl.rockylinux.org/vault/rocky/8.5/BaseOS/x86_64/os/ 4 | enabled=1 5 | gpgcheck=0 -------------------------------------------------------------------------------- /hpa/sidecar/conntrack-network-init/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -o xtrace 3 | 4 | iptables -A INPUT -m conntrack --ctstate ESTABLISHED 5 | #nft add rule ip filter INPUT ct state established 6 | -------------------------------------------------------------------------------- /doc/README.md: -------------------------------------------------------------------------------- 1 | # kubernetes-dev-env docs 2 | 3 | This folder contains documentation arranged accordingly for this project as a 4 | whole. Please see [index](index.md) as an entry point for all information. -------------------------------------------------------------------------------- /hpa/sidecar/tcp-exporter/Dockerfile.ubi: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi8/ubi 2 | 3 | RUN dnf install -y conntrack-tools python39 && dnf clean all 4 | COPY tcp_exporter.py / 5 | 6 | ENTRYPOINT ["python3", "/tcp_exporter.py"] 7 | CMD ["9100", "8080"] 8 | -------------------------------------------------------------------------------- /ocp/user-workload-monitoring-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: user-workload-monitoring-config 5 | namespace: openshift-user-workload-monitoring 6 | data: 7 | config.yaml: | 8 | prometheusOperator: 9 | logLevel: debug 10 | -------------------------------------------------------------------------------- /hpa/sidecar/tcp-exporter/ocp-imagestream.yaml: -------------------------------------------------------------------------------- 1 | kind: ImageStream 2 | apiVersion: image.openshift.io/v1 3 | metadata: 4 | annotations: 5 | openshift.io/display-name: tcp-exporter 6 | name: tcp-exporter 7 | namespace: default 8 | spec: 9 | lookupPolicy: 10 | local: true 11 | -------------------------------------------------------------------------------- /doc/install/minikube.md: -------------------------------------------------------------------------------- 1 | # Minikube 2 | 3 | The Kubernetes project includes a special interest group that makes Kubernetes 4 | available locally for learning and development. You can find a variety of 5 | instructions for various platforms on [minikube start](https://minikube.sigs.k8s.io/docs/start/). -------------------------------------------------------------------------------- /hpa/sidecar/conntrack-network-init/ocp-imagestream.yaml: -------------------------------------------------------------------------------- 1 | kind: ImageStream 2 | apiVersion: image.openshift.io/v1 3 | metadata: 4 | annotations: 5 | openshift.io/display-name: conntrack-network-init 6 | name: conntrack-network-init 7 | namespace: default 8 | spec: 9 | lookupPolicy: 10 | local: true -------------------------------------------------------------------------------- /hpa/ocp/openshift-keda-controller.yaml: -------------------------------------------------------------------------------- 1 | kind: KedaController 2 | apiVersion: keda.sh/v1alpha1 3 | metadata: 4 | name: keda 5 | namespace: openshift-keda 6 | spec: 7 | watchNamespace: '' 8 | operator: 9 | logLevel: info 10 | logEncoder: console 11 | metricsServer: 12 | logLevel: '0' 13 | serviceAccount: {} 14 | -------------------------------------------------------------------------------- /ocp/cluster-monitoring-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: cluster-monitoring-config 5 | namespace: openshift-monitoring 6 | data: 7 | config.yaml: | 8 | enableUserWorkload: true 9 | prometheusK8s: 10 | retention: 24h 11 | resources: 12 | requests: 13 | cpu: 200m 14 | memory: 2Gi -------------------------------------------------------------------------------- /hpa/sidecar/conntrack-network-init/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-alpine 2 | 3 | RUN apk add --no-cache \ 4 | iptables \ 5 | libcap && \ 6 | setcap CAP_NET_ADMIN,CAP_NET_RAW,CAP_DAC_READ_SEARCH,CAP_DAC_OVERRIDE=+ep "$(readlink -f `which iptables`)" 7 | 8 | COPY init.sh /usr/local/bin/ 9 | RUN chmod +x /usr/local/bin/init.sh 10 | 11 | ENTRYPOINT ["init.sh"] -------------------------------------------------------------------------------- /hpa/sidecar/conntrack-network-init/ocp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi8 2 | 3 | RUN yum install -y iptables && \ 4 | yum clean all && \ 5 | setcap CAP_NET_ADMIN,CAP_NET_RAW,CAP_DAC_READ_SEARCH,CAP_DAC_OVERRIDE=+ep "$(readlink -f `which iptables`)" 6 | 7 | COPY init.sh /usr/local/bin/ 8 | RUN chmod +x /usr/local/bin/init.sh 9 | 10 | ENTRYPOINT ["init.sh"] -------------------------------------------------------------------------------- /calico_enterprise/cis_benchmark/cis.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: projectcalico.org/v3 2 | kind: GlobalReport 3 | metadata: 4 | name: hourly-cis-results 5 | labels: 6 | deployment: production 7 | spec: 8 | reportType: cis-benchmark 9 | schedule: 0 * * * * 10 | cis: 11 | highThreshold: 100 12 | medThreshold: 50 13 | includeUnscoredTests: true 14 | numFailedTests: 5 15 | -------------------------------------------------------------------------------- /hpa/frontend-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: frontend-ingress 5 | spec: 6 | rules: 7 | - host: "boutique.test" 8 | http: 9 | paths: 10 | - pathType: Prefix 11 | path: "/" 12 | backend: 13 | service: 14 | name: frontend 15 | port: 16 | number: 80 -------------------------------------------------------------------------------- /hpa/ocp/openshift-keda-operator-group.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1 2 | kind: OperatorGroup 3 | metadata: 4 | annotations: 5 | olm.providedAPIs: ClusterTriggerAuthentication.v1alpha1.keda.sh,KedaController.v1alpha1.keda.sh,ScaledJob.v1alpha1.keda.sh,ScaledObject.v1alpha1.keda.sh,TriggerAuthentication.v1alpha1.keda.sh 6 | name: openshift-keda-og 7 | namespace: openshift-keda 8 | spec: {} -------------------------------------------------------------------------------- /hpa/sidecar/tcp-exporter/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-alpine 2 | 3 | RUN apk add --no-cache \ 4 | conntrack-tools \ 5 | libcap \ 6 | curl && \ 7 | setcap CAP_NET_BIND_SERVICE=+eip "$(readlink -f `which python3`)" && \ 8 | setcap CAP_NET_ADMIN=+eip "$(readlink -f `which conntrack`)" 9 | 10 | COPY tcp_exporter.py / 11 | 12 | ENTRYPOINT ["python3", "/tcp_exporter.py"] 13 | CMD ["9100", "8080"] -------------------------------------------------------------------------------- /hpa/sidecar/tcp-exporter/ocp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi8/python-39 2 | 3 | USER root 4 | COPY ./ocp/rockylinux.repo /etc/yum.repos.d/ 5 | 6 | RUN yum install -y conntrack-tools libcap && \ 7 | yum clean all && \ 8 | setcap CAP_NET_BIND_SERVICE=+eip "$(readlink -f `which python3`)" && \ 9 | setcap CAP_NET_ADMIN=+eip "$(readlink -f `which conntrack`)" 10 | 11 | COPY tcp_exporter.py / 12 | 13 | ENTRYPOINT ["python3", "/tcp_exporter.py"] 14 | CMD ["9100", "8080"] -------------------------------------------------------------------------------- /ocp/README.md: -------------------------------------------------------------------------------- 1 | # OpenShift Container Platform 2 | 3 | This module configures OpenShift Container Platform for use with the 4 | integrations and use cases in this repository. 5 | 6 | ## Operations 7 | 8 | * Enable monitoring and allow user-defined project configuration 9 | * Configure restricted SCC with additional permissions 10 | 11 | ## References 12 | 13 | * [Enabling monitoring for user-defined projects](https://docs.openshift.com/container-platform/4.10/monitoring/enabling-monitoring-for-user-defined-projects.html) -------------------------------------------------------------------------------- /hpa/ocp/openshift-keda-subscription.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: Subscription 3 | metadata: 4 | labels: 5 | operators.coreos.com/openshift-custom-metrics-autoscaler-operator.openshift-keda: "" 6 | name: openshift-custom-metrics-autoscaler-operator 7 | namespace: openshift-keda 8 | spec: 9 | channel: stable 10 | installPlanApproval: Automatic 11 | name: openshift-custom-metrics-autoscaler-operator 12 | source: redhat-operators 13 | sourceNamespace: openshift-marketplace 14 | startingCSV: custom-metrics-autoscaler.v2.7.1 -------------------------------------------------------------------------------- /hpa/sidecar/tcp-exporter/ocp-buildconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.openshift.io/v1 2 | kind: BuildConfig 3 | metadata: 4 | name: tcp-exporter-image 5 | labels: 6 | app: tcp-exporter-image 7 | spec: 8 | source: 9 | type: Git 10 | git: 11 | uri: "https://github.com/jhcook/kubernetes-dev-env.git" 12 | ref: "devel" 13 | contextDir: "hpa/sidecar/tcp-exporter" 14 | strategy: 15 | type: Docker 16 | dockerStrategy: 17 | dockerfilePath: ocp/Dockerfile 18 | output: 19 | to: 20 | kind: ImageStreamTag 21 | name: tcp-exporter:latest -------------------------------------------------------------------------------- /calico_enterprise/cis_benchmark/README.md: -------------------------------------------------------------------------------- 1 | # CIS Benchmark 2 | 3 | ## Introduction 4 | Use Calico Enterprise CIS benchmark compliance reports to assess compliance for all assets in a Kubernetes cluster. 5 | 6 | ## Configure 7 | 8 | ``` 9 | $ kubectl apply -f cis_benchmark/cis.yaml 10 | ... 11 | ``` 12 | 13 | ## References 14 | 15 | * [Configure CIS benchmark reports](https://docs.tigera.io/compliance/compliance-reports-cis) 16 | * Calico Enterprise [Advanced Compliance Controls](https://projectcalico.docs.tigera.io/security/calico-enterprise/compliance) 17 | * [CIS Benchmark](https://www.cisecurity.org/benchmark/kubernetes/) -------------------------------------------------------------------------------- /hpa/sidecar/conntrack-network-init/ocp-buildconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.openshift.io/v1 2 | kind: BuildConfig 3 | metadata: 4 | name: conntrack-network-init-image 5 | labels: 6 | app: conntrack-network-init-image 7 | spec: 8 | source: 9 | type: Git 10 | git: 11 | uri: "https://github.com/jhcook/kubernetes-dev-env.git" 12 | ref: "devel" 13 | contextDir: "hpa/sidecar/conntrack-network-init" 14 | strategy: 15 | type: Docker 16 | dockerStrategy: 17 | dockerfilePath: ocp/Dockerfile 18 | output: 19 | to: 20 | kind: ImageStreamTag 21 | name: conntrack-network-init:latest -------------------------------------------------------------------------------- /monitoring/README.md: -------------------------------------------------------------------------------- 1 | # Monitoring 2 | 3 | This directory contains code to configure Prometheus and install Grafana 4 | dashboards. 5 | 6 | ## Prometheus 7 | 8 | `configure_prometheus.sh` creates services, patches deployments, and creates 9 | ServiceMonitor(s) for NGINX and Calico. These metrics become available in 10 | Prometheus as metrics. These metrics are useful for Grafana dashboards, HPA, 11 | and Keda amongst others. 12 | 13 | ## Grafana 14 | 15 | The `configure_grafana_dashboards.sh` is a simple convenience that iterates 16 | through the `dashboards` directory installing `.json` or `.yaml` files which 17 | are assumed to be Grafana dashboards. 18 | 19 | The dashboards provided by default are NGINX and Calico. -------------------------------------------------------------------------------- /hpa/recommendations-hpa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.github.io/ingress-nginx/user-guide/monitoring/ 3 | apiVersion: autoscaling/v2 4 | kind: HorizontalPodAutoscaler 5 | metadata: 6 | name: recommendationservice-hpa 7 | spec: 8 | scaleTargetRef: 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | name: recommendationservice 12 | minReplicas: 1 13 | maxReplicas: 10 14 | metrics: 15 | - type: Object 16 | object: 17 | metric: 18 | name: rate(nginx_ingress_controller_nginx_process_requests_total[5m]) 19 | describedObject: 20 | apiVersion: networking.k8s.io/v1 21 | kind: Ingress 22 | name: frontend-ingress 23 | target: 24 | type: Value 25 | value: "25" -------------------------------------------------------------------------------- /rke2/README.md: -------------------------------------------------------------------------------- 1 | # Multipass RKE2 2 | 3 | ## Introduction 4 | 5 | Get an instant Ubuntu VM with a single command. Multipass can launch and run 6 | virtual machines and configure them with cloud-init like a public cloud. 7 | 8 | https://multipass.run 9 | 10 | RKE2, also known as RKE Government, is Rancher's next-generation Kubernetes 11 | distribution. 12 | 13 | It is a fully conformant Kubernetes distribution that focuses on security and 14 | compliance within the U.S. Federal Government sector. 15 | 16 | https://docs.rke2.io 17 | 18 | ## Getting Started 19 | 20 | ``` 21 | $ bash rke2/setup_rke2.sh 22 | ... 23 | ``` 24 | 25 | ## Clean Up 26 | 27 | When finished, you may delete all nodes, for example: 28 | 29 | ``` 30 | $ bash rke2/remove_rke2.sh 31 | ... 32 | ``` 33 | -------------------------------------------------------------------------------- /ebpf/README.md: -------------------------------------------------------------------------------- 1 | # eBPF 2 | 3 | Enable the eBPF dataplane on an existing cluster using Calico CNI. This 4 | requires the Calico CNI already installed. 5 | 6 | ## Introduction 7 | 8 | The eBPF dataplane mode has several advantages over standard Linux networking pipeline mode: 9 | 10 | It scales to higher throughput. 11 | It uses less CPU per GBit. 12 | It has native support for Kubernetes services (without needing kube-proxy) that: 13 | 14 | Reduces first packet latency for packets to services. 15 | Preserves external client source IP addresses all the way to the pod. 16 | Supports DSR (Direct Server Return) for more efficient service routing. 17 | Uses less CPU than kube-proxy to keep the dataplane in sync. 18 | 19 | ## Resources 20 | 21 | * [Enable the eBPF Dataplane](https://projectcalico.docs.tigera.io/maintenance/ebpf/enabling-ebpf) -------------------------------------------------------------------------------- /security/zero_trust/README.md: -------------------------------------------------------------------------------- 1 | # Zero Trust 2 | 3 | ## Introduction 4 | Zero Trust Networks are resilient even when attackers manage to breach applications or infrastructure. They make it hard for attackers to move laterally, and reconnaissance activities easier to spot. 5 | 6 | Organizations that embrace the change control model in this How-To will be able to tightly secure their network without imposing a drag on innovation in their applications. Security teams can be enablers of business value, not roadblocks. 7 | 8 | ## References 9 | 10 | * Calico [Adopt Zero Trust](https://projectcalico.docs.tigera.io/security/adopt-zero-trust) 11 | * [Boutique Policies](https://raw.githubusercontent.com/tigera-solutions/aws-howdy-parter-calico-cloud/main/policies/boutique-policies.yaml) 12 | * [Rogue Demo](https://installer.calicocloud.io/rogue-demo.yaml) 13 | * [Allow DNS](https://raw.githubusercontent.com/tigera-solutions/aws-howdy-parter-calico-cloud/main/policies/allow-kubedns.yaml) -------------------------------------------------------------------------------- /doc/integration/index.md: -------------------------------------------------------------------------------- 1 | # Integration 2 | 3 | Documentation to integrate various components and applications to a variety of 4 | Kubernetes implementations is located in this folder. 5 | 6 | ## OpenShift 7 | 8 | OpenShift provides an opinionated set of objects, additional verbs, and secure 9 | implementation. 10 | 11 | ### Security Context Constraints 12 | 13 | In order to use the standard Google Boutique microservices demo 14 | application, the easiest method is to grant the default service account 15 | privileged access. 16 | 17 | ``` 18 | $ oc adm policy add-scc-to-user privileged system:serviceaccount:project1:default 19 | ``` 20 | 21 | ### HTTPS Filtering 22 | 23 | For those environments using HTTPS filtering, you will either need to get 24 | exceptions for appropriate remote mirrors or trust the proxy CA certificate. 25 | It must be in PEM format, and this can be achieved as follows: 26 | 27 | ``` 28 | $ openssl x509 -inform DER -in cert.cer -outform PEM -out cert.pem 29 | ``` 30 | 31 | ## Resources 32 | 33 | * [Using pods in a privileged security context](https://docs.openshift.com/container-platform/4.11/cicd/pipelines/using-pods-in-a-privileged-security-context.html) -------------------------------------------------------------------------------- /hpa/ocp/README.md: -------------------------------------------------------------------------------- 1 | # OpenShift HPA 2 | 3 | As of OCP 4.11, automatically scaling pods based on custom metrics is provided 4 | by the [custom metrics autoscaler](https://docs.openshift.com/container-platform/4.11/nodes/pods/nodes-pods-autoscaling-custom.html) and is in _Tech Preview_. 5 | 6 | ## Operations 7 | 8 | An OpenShift environment needs to be available with`oc` and `kubectl` logged 9 | in to the kube-apiserver. 10 | 11 | ## Installation 12 | 13 | Create the following objects: 14 | 15 | * `openshift-keda` namespace. 16 | 17 | ``` 18 | oc create namespace openshift-keda --dry-run=client -o yaml | \ 19 | oc apply -f - 20 | ``` 21 | 22 | * Operator Group 23 | 24 | ``` 25 | $ oc create -f hpa/ocp/openshift-keda-operator-group.yaml -n openshift-keda 26 | ``` 27 | 28 | * Subscription 29 | 30 | ``` 31 | $ oc create -f hpa/ocp/openshift-keda-subscription.yaml -n openshift-keda 32 | ``` 33 | 34 | Finally, create the custom metrics autoscaler. 35 | 36 | ``` 37 | $ oc create -f hpa/ocp/custom-metrics-autoscaler.yaml -n openshift-keda 38 | ``` 39 | 40 | ## References 41 | 42 | * [Linux Capabilities in OpenShift](https://cloud.redhat.com/blog/linux-capabilities-in-openshift) 43 | * [Linux Capabilities](https://linux.die.net/man/7/capabilities) -------------------------------------------------------------------------------- /kind/calico_cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # podSubnet should equal ${POD_NET_CIDR} 3 | kind: Cluster 4 | apiVersion: kind.x-k8s.io/v1alpha4 5 | name: calico-cluster 6 | featureGates: {} 7 | # Local registry 8 | containerdConfigPatches: 9 | - |- 10 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] 11 | endpoint = ["http://kind-registry:${DOCKER_REG_PORT}"] 12 | networking: 13 | # disableDefaultCNI: true 14 | apiServerAddress: "127.0.0.1" 15 | apiServerPort: 6443 16 | podSubnet: "172.16.0.0/16" 17 | serviceSubnet: "10.96.0.0/12" 18 | nodes: 19 | - role: control-plane 20 | image: kindest/node:v1.23.4@sha256:0e34f0d0fd448aa2f2819cfd74e99fe5793a6e4938b328f657c8e3f81ee0dfb9 21 | kubeadmConfigPatches: 22 | - | 23 | kind: InitConfiguration 24 | nodeRegistration: 25 | kubeletExtraArgs: 26 | node-labels: "ingress-ready=true" 27 | extraPortMappings: 28 | - containerPort: 80 29 | hostPort: 80 30 | protocol: TCP 31 | - containerPort: 443 32 | hostPort: 443 33 | protocol: TCP 34 | - role: worker 35 | image: kindest/node:v1.23.4@sha256:0e34f0d0fd448aa2f2819cfd74e99fe5793a6e4938b328f657c8e3f81ee0dfb9 36 | - role: worker 37 | image: kindest/node:v1.23.4@sha256:0e34f0d0fd448aa2f2819cfd74e99fe5793a6e4938b328f657c8e3f81ee0dfb9 -------------------------------------------------------------------------------- /hpa/sidecar/README.md: -------------------------------------------------------------------------------- 1 | # sidecar 2 | 3 | ## Introduction 4 | The code herein is used to create sidecar and init containers for various use 5 | cases. 6 | 7 | The `bootstrap.sh` script is used to create a local Docker registry 8 | for use with Minikube where these and other container images can be created 9 | and managed locally. It requires `limactl` available with Minikube running. 10 | 11 | The `builder.sh` script is used to iterate through the images in the registry 12 | and build those missing. The directories in this folder should be named after 13 | the image name for successful automatic build. 14 | 15 | ## TODO 16 | 17 | Migrate from legacy iptables to nf_tables. 18 | * https://gitlab.alpinelinux.org/alpine/aports/-/issues/14058 19 | * https://wiki.nftables.org/wiki-nftables/index.php/Moving_from_iptables_to_nftables 20 | 21 | ## Warning 22 | 23 | This code supports macOS. Support for other platforms is encouraged by PR. As 24 | such, by default, macOS users will need to disable AirPlay Receiver in System 25 | Preferences > Sharing since it collides with tcp:5000 which is used by default. 26 | 27 | If you experience issues with `bootstrap.sh` such as 28 | `proxy: unknown scheme: http`, unset proxy environment variables. 29 | 30 | ``` 31 | $ unset $(compgen -e | awk 'tolower($1)~/proxy/{printf"%s ",$1}') 32 | ``` -------------------------------------------------------------------------------- /app/README.md: -------------------------------------------------------------------------------- 1 | # Boutique 2 | 3 | This is Google's [Online Boutique](https://github.com/GoogleCloudPlatform/microservices-demo) which is a cloud-first microservices demo 4 | application.Online Boutique consists of an 11-tier microservices application. 5 | The application is a web-based e-commerce app where users can browse items, add 6 | them to the cart, and purchase them. 7 | 8 | Google uses this application to demonstrate use of technologies like 9 | Kubernetes/GKE, Istio, Stackdriver, and gRPC. This application works on any 10 | Kubernetes cluster, as well as Google Kubernetes Engine. It’s easy to deploy 11 | with little to no configuration. 12 | 13 | ## Introduction 14 | 15 | The Online Boutique is made availabe here to demonstrate and make Locust 16 | availabile for load testing. 17 | 18 | ## Getting Started 19 | 20 | With KUBECONFIG correctly set and a context using the specific cluster and 21 | namespace, you can install the demo with `bash app/install_boutique.sh`. 22 | 23 | Given the Kubenetes platform, ingress may be automatically configured. In case 24 | it is not, apply the ingress located in `hpa` directory in this repo. 25 | 26 | ``` 27 | $ kubectl apply -f hpa/frontend-ingress.yaml 28 | ingress.networking.k8s.io/frontend-ingress created 29 | ``` 30 | 31 | The demo is available on http://boutique.test if resolution is configured. -------------------------------------------------------------------------------- /hpa/sidecar/conntrack-network-init/README.md: -------------------------------------------------------------------------------- 1 | # Restore IPTables Rules 2 | 3 | In order for conntrack to work in the network stack, there needs to be a rule 4 | to enable this. 5 | 6 | ## Build and Install 7 | 8 | Build the handy little init container like so: 9 | 10 | ``` 11 | $ docker build -t localhost:${DOCKER_REG_PORT}/boutique/conntrack-network-init . 12 | ``` 13 | 14 | And now push it to the registry: 15 | 16 | ``` 17 | $ docker push localhost:${DOCKER_REG_PORT}/boutique/conntrack-network-init:latest 18 | ``` 19 | 20 | Finally, pop an initcontainer stanza in the workload like so: 21 | 22 | ``` 23 | initContainers: 24 | - name: conntrack-networking 25 | image: localhost:${DOCKER_REG_PORT}/boutique/conntrack-network-init:latest 26 | resources: {} 27 | terminationMessagePath: /dev/termination-log 28 | terminationMessagePolicy: File 29 | imagePullPolicy: Always 30 | securityContext: 31 | capabilities: 32 | add: 33 | - NET_ADMIN 34 | - NET_RAW 35 | - DAC_READ_SEARCH 36 | - DAC_OVERRIDE 37 | privileged: true 38 | ``` 39 | 40 | ## Resources 41 | * https://venilnoronha.io/hand-crafting-a-sidecar-proxy-and-demystifying-istio 42 | * https://linux.die.net/man/7/capabilities -------------------------------------------------------------------------------- /doc/install/index.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Development Environment Installation 2 | 3 | Installing this product requires one to have `git` installed. There are several 4 | methods of doing so: 5 | * [Git SCM](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) 6 | * [Git SCM Downloads](https://git-scm.com/downloads) 7 | * [Atlassian](https://www.atlassian.com/git/tutorials/install-git) 8 | * [GitLab](https://docs.gitlab.com/ee/topics/git/how_to_install_git/) 9 | * [GitHub](https://github.com/git-guides/install-git) 10 | 11 | ## Install 12 | 13 | Installing the project is can be achieved either downloading or cloning the Git 14 | repository which can be found on [GitHub](https://github.com/jhcook/kubernetes-dev-env). 15 | 16 | ``` 17 | $ git clone https://github.com/jhcook/kubernetes-dev-env.git 18 | ``` 19 | 20 | or 21 | 22 | ``` 23 | $ gh repo clone jhcook/kubernetes-dev-env 24 | ``` 25 | 26 | ## Dependencies 27 | 28 | This project requires access to a Kubernetes cluster. This code supports 29 | instantiating [Minikube](https://minikube.sigs.k8s.io/docs/start/), [Kind](https://kind.sigs.k8s.io), and [OpenShift Local](https://console.redhat.com/openshift/create/local). 30 | 31 | ## Configuration 32 | 33 | Configuring the code is done via the `env.sh` file in the root folder. A 34 | description of each configuration item can be found in [configuration](./operations/configuration.md). 35 | -------------------------------------------------------------------------------- /rancher/README.md: -------------------------------------------------------------------------------- 1 | # Rancher 2 | 3 | ## Introduction 4 | 5 | Rancher is known early on in the Kubernetes ecosystem from Rancher Manager 6 | which deploys and manages multi-cluster apps. It unifies several other 7 | Kubernetes resources such as Prometheus and Grafana. 8 | 9 | The code in this directory focuses on deploying and configuring Rancher Manager 10 | for use with applications. Rancher Desktop can be used as a Kubernetes cluster 11 | provider. We will make efforts to distinguish Rancher Desktop when applicable. 12 | But, please assume the use of Rancher refers to Rancher Manager unless stated 13 | otherwise. 14 | 15 | ## Rancher Desktop 16 | 17 | While Rancher and Rancher Desktop share the Rancher name they do different things. Rancher Desktop is not Rancher on the Desktop. Rancher is a powerful solution to manage Kubernetes clusters. Rancher Desktop provides a local Kubernetes and container management platform. The two solutions complement each other. If you want to run Rancher on your local system, you can install Rancher into Rancher Desktop. 18 | 19 | ## Getting Started 20 | 21 | Installing Rancher requires a running Kubernetes cluster with adequate 22 | resources and no colliding installed resources. This can be achieved by running 23 | `install_rancher.sh` provided in this directory. 24 | 25 | ## Resources 26 | * [Why Rancher](https://www.rancher.com/why-rancher) 27 | * [Rancher Desktop](https://docs.rancherdesktop.io/) -------------------------------------------------------------------------------- /ingress/README.md: -------------------------------------------------------------------------------- 1 | # Ingress 2 | 3 | An API object that manages external access to the services in a cluster, 4 | typically HTTP. 5 | 6 | ## Introduction 7 | 8 | Ingress exposes HTTP and HTTPS routes from outside the cluster to services 9 | within the cluster. Traffic routing is controlled by rules defined on the 10 | Ingress resource. 11 | 12 | You must have an Ingress controller to satisfy an Ingress. Only creating an 13 | Ingress resource has no effect. 14 | 15 | You may need to deploy an Ingress controller such as ingress-nginx. You can 16 | choose from a number of Ingress controllers. 17 | 18 | ## Getting Started 19 | 20 | If you are using `setup_k8s.sh` provided with Minikube, NGINX ingress is 21 | installed and configured for you. Other configurations for other providers are 22 | provided as necessary. 23 | 24 | ## NGINX 25 | 26 | ingress-nginx can be used for many use cases, inside various cloud providers, 27 | and supports a lot of configurations. `install_nginx_ingress.sh` in this 28 | directory installs the operator using Helm for supported platforms. 29 | 30 | ## References 31 | 32 | * [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) 33 | * [Kubernetes Ingress Controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) 34 | * [NGINX Ingress](https://kubernetes.github.io/ingress-nginx/deploy/) 35 | *[Set up Ingress on Minikube with the NGINX Ingress Controller](https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/) 36 | -------------------------------------------------------------------------------- /ebpf/disable_ebpf.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Disable eBPF and switch to kube-proxy 24 | 25 | # shellcheck source=/dev/null 26 | . env.sh 27 | 28 | # Switch dataplane to Iptables 29 | kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"Iptables"}}}' 30 | 31 | # Reenable kube-proxy 32 | kubectl patch ds -n kube-system kube-proxy --type merge -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": null}}}}}' 33 | -------------------------------------------------------------------------------- /monitoring/configure_grafana_dashboards.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Create Grafana dashboards for additional services 24 | # 25 | # This exists as a wrapper for dashboards contained in files due to length. 26 | # 27 | # Author: Justin Cook 28 | 29 | shopt -s nullglob 30 | 31 | # shellcheck source=/dev/null 32 | . env.sh 33 | 34 | for dashboard in monitoring/dashboards/*.{yaml,json} 35 | do 36 | printf "Applying Grafana dashboard: %s\n" "${dashboard}" 37 | kubectl apply -f "${dashboard}" 38 | done -------------------------------------------------------------------------------- /rke2/remove_rke2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Remove multipass rke2 nodes configured from this code base. 24 | 25 | # shellcheck source=/dev/null 26 | source env.sh 27 | source "$(dirname "$0")/localenv.sh" 28 | 29 | set -o errexit 30 | 31 | $(which kubectl) config delete-context "${NAME:-generic}-rke2-cluster" || /usr/bin/true 32 | 33 | for node in $(multipass list --format=json | \ 34 | jq -r ".list[] | select(.name | match(\"${NAME:-generic}-rke2-*\")) | .name") 35 | do 36 | echo "Deleting: ${node}" 37 | multipass delete "${node}" --purge || /usr/bin/true 38 | done -------------------------------------------------------------------------------- /microk8s/remove_microk8s.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Remove microk8s configured from this code base. 24 | 25 | # shellcheck source=/dev/null 26 | . env.sh 27 | 28 | set -o errexit 29 | 30 | $(which kubectl) config delete-context microk8s-cluster || /usr/bin/true 31 | $(which kubectl) config delete-cluster microk8s-cluster || /usr/bin/true 32 | 33 | for node in $(multipass list --format=json | \ 34 | jq -r '.list[] | select(.name | match("microk8s-*")) | .name') 35 | do 36 | echo "Deleting: ${node}" 37 | multipass delete "${node}" --purge || /usr/bin/true 38 | done 39 | -------------------------------------------------------------------------------- /security/postee/install_postee.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Install Postee operator in the postee namespace and configure. 24 | # 25 | # Author: Justin Cook 26 | 27 | set -o errexit 28 | 29 | # shellcheck source=/dev/null 30 | . env.sh 31 | 32 | # Add the helm repo 33 | helm repo add aquasecurity https://aquasecurity.github.io/helm-charts/ 34 | helm repo update 35 | 36 | # Install the chart from the Aqua chart repository 37 | helm upgrade --install postee aquasecurity/postee \ 38 | --namespace postee --create-namespace 39 | 40 | # Wait for Trivy to become available 41 | kubectl rollout status statefulsets/postee -n postee 42 | kubectl rollout status deployment/posteeui -n postee -------------------------------------------------------------------------------- /rke2/stop_rke2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Stop multipass rke2 nodes configured from this code base. 24 | 25 | # shellcheck source=/dev/null 26 | source env.sh 27 | source "$(dirname "$0")/localenv.sh" 28 | 29 | set -o errexit 30 | 31 | for node in $(${MULTIPASSCMD} list --format=json | \ 32 | jq -r ".list[] | select(.name | match(\"${NAME:-generic}-rke2-*\")) | .name") 33 | do 34 | STATE="$(${MULTIPASSCMD} list --format=json | \ 35 | jq -r ".list[] | select(.name | contains(\"${node}\")) | .state")" 36 | if [ "${STATE}" = "Running" ] 37 | then 38 | echo "Stopping: ${node}" 39 | multipass stop "${node}" 40 | else 41 | echo "Not Running: ${node}" 42 | fi 43 | done -------------------------------------------------------------------------------- /kind/configure_colima.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Configure Colima instance to workaround issues like x509 certificate 24 | # verification. 25 | # 26 | # Author: Justin Cook 27 | 28 | echo "Reconfiguring Docker daemon.json" 29 | read -r -d '' INSECURE_REGISTRIES < /etc/docker/daemon.json && sudo /etc/init.d/docker restart" 46 | 47 | echo "Wait for Kind to regain conscience" 48 | sleep 60 49 | -------------------------------------------------------------------------------- /security/trivy/install_trivy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Install Trivy operator in the trivy-system namespace and configure it to 24 | # select all namespaces except kube-system and trivy-system. 25 | # 26 | # Author: Justin Cook 27 | 28 | set -o errexit 29 | 30 | # shellcheck source=/dev/null 31 | . env.sh 32 | 33 | # Add the helm repo 34 | helm repo add aqua https://aquasecurity.github.io/helm-charts/ 35 | helm repo update 36 | 37 | # Install the chart from the Aqua chart repository 38 | helm upgrade --install trivy-operator aqua/trivy-operator \ 39 | --namespace trivy-system \ 40 | --create-namespace \ 41 | --set="trivy.ignoreUnfixed=true" \ 42 | --version 0.12.1 43 | 44 | # Wait for Trivy to become available 45 | kubectl rollout status deploy/trivy-operator -n trivy-system -------------------------------------------------------------------------------- /kind/install_ingress.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Create an NGINX Ingress 24 | # Reference: https://kind.sigs.k8s.io/docs/user/ingress/ 25 | # 26 | # Author: Justin Cook 27 | 28 | # Create NGINX Ingress controller with complete rbac 29 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml 30 | 31 | # Patch the nginx-deployment to only run on ingress-ready=true labelled hosts 32 | kubectl patch deployments ingress-nginx-controller -n ingress-nginx -p \ 33 | '{"spec": {"template": {"spec": {"nodeSelector": {"ingress-ready": "true"}}}}}' 34 | 35 | # Wait for the controller to start 36 | kubectl wait --namespace ingress-nginx \ 37 | --for=condition=ready pod \ 38 | --selector=app.kubernetes.io/component=controller \ 39 | --timeout=90s 40 | -------------------------------------------------------------------------------- /calico_enterprise/README.md: -------------------------------------------------------------------------------- 1 | # Calico Enterprise 2 | 3 | ## Installation 4 | 5 | Place your pull secret and Calico Enterprise License in this directory using 6 | the following file names: 7 | 8 | * quay.io pull secret: `tigera-pull-secret.json` 9 | * Calico Enterprise License: `calico-enterprise-license.yaml` 10 | 11 | Git is configured to ignore these filenames. 12 | 13 | Execute `calico_enterprise/install_calico_enterprise.sh` and after some time, 14 | you will be prompted to open a window to the UI: 15 | 16 | ``` 17 | Visit https://localhost:9443/ to login to the Calico Enterprise UI with token above. 18 | 19 | Forwarding from 127.0.0.1:9443 -> 9443 20 | Forwarding from [::1]:9443 -> 9443 21 | ``` 22 | 23 | ## Prometheus 24 | 25 | If Rancher is installed with monitoring, the Calico Enterprise installation 26 | will use the Rancher Prometheus operator to manage the AlertManager and 27 | Prometheus CRs in the `tigera-prometheus` namespace. 28 | 29 | If no Prometheus operator exists, or is in an unknown namespace, the pull 30 | secret and operator patch will not be successful. 31 | 32 | If other Prometheus instances will need to be deployed alongside Tigera, then 33 | modification to the operator args will be necessary. By default, they are the 34 | following: 35 | 36 | ``` 37 | - args: 38 | - --prometheus-config-reloader=quay.io/tigera/prometheus-config-reloader:v3.13.0 39 | - --config-reloader-memory-request=25Mi 40 | - --namespaces=tigera-prometheus 41 | - --cluster-domain=cluster.local 42 | ``` 43 | ## Post Configuration 44 | 45 | 46 | 47 | ## Resources 48 | * [Calico Enterprise Installation](https://docs.tigera.io/getting-started/kubernetes/generic-install) 49 | * [Calico Resources](https://docs.tigera.io/reference/resources/) 50 | * [Configure access to Calico Enterprise Manager UI](https://docs.tigera.io/getting-started/cnx/access-the-manager) 51 | * [Prometheus Support](https://docs.tigera.io/maintenance/monitor/support) 52 | -------------------------------------------------------------------------------- /hpa/README.md: -------------------------------------------------------------------------------- 1 | # HPA 2 | 3 | [Horizontal Pod Autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) 4 | 5 | ## Introduction 6 | 7 | The Rancher 2.6.5 release has support for Kubernetes 1.23.x. As 8 | such, it is the selected version in this build. Kubernetes 1.23 is used and 9 | supports [`autoscaling/v2`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). 10 | 11 | In the interest of backward compatibility, Keda is used with ScaledObject(s) 12 | in the examples using the `sidecar` subdirectory. 13 | 14 | ## Prerequisites 15 | 16 | A Docker registry running on $(minikube ip):${DOCKER_REG_PORT} and accessible 17 | to localhost:${DOCKER_REG_PORT}. For more information, please see the `sidecar` 18 | subdirectory. 19 | 20 | ## Installation 21 | 22 | Execute `configure_hpa.sh` script in this directory. It will install Keda and 23 | create a frontend at http://boutique.test. 24 | 25 | Next, the sidecar and init containers need to be built and made available. The 26 | [method will vary based on platform](https://minikube.sigs.k8s.io/docs/handbook/pushing/). For macOS, please use [the code in 27 | this project](./sidecar/bootstrap.sh) to prepare and make available. 28 | Finally, patch the relevant deployments to use the containers and create 29 | ServiceMonitors for each. 30 | 31 | ``` 32 | $ bash hpa/sidecar/bootstrap.sh 33 | ... 34 | $ bash hpa/sidecar/builder.sh 35 | ... 36 | $ bash hpa/install_sidecar_init.sh 37 | ... 38 | ``` 39 | 40 | At this point, the pods are publishing metrics being scraped by Prometheus. 41 | The last big is creating ScaledObjects Keda will use to modify workloads 42 | accordingly. 43 | 44 | ``` 45 | $ bash hpa/install_scaled_objects.sh 46 | ... 47 | ``` 48 | 49 | create sidecar and an init container, modify the mock application (Boutique) 50 | with Locust for load testing. Upon successful execution, it will print 51 | instructions and make the Boutique available as `http://boutique.test`. 52 | 53 | The Locust interface is made available as `http://0.0.0.0:8089. -------------------------------------------------------------------------------- /hpa/ocp/configure_serviceaccount.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Configure deployments to use service account 24 | # 25 | # References: 26 | # * https://www.nginx.com/blog/microservices-march-reduce-kubernetes-latency-with-autoscaling/ 27 | # * https://stackoverflow.com/questions/62578789/kubectl-patch-is-it-possible-to-add-multiple-values-to-an-array-within-a-sinlge 28 | # 29 | # Requires: oc 30 | # 31 | # Author: Justin Cook 32 | 33 | set -o errexit nounset 34 | 35 | # shellcheck source=/dev/null 36 | . env.sh 37 | 38 | # Patch each deployment with appropriate service account 39 | for deploy in $(kubectl get deploy -n "${PROJECT_NAMESPACE}" -o name | \ 40 | grep -E 'service$') 41 | do 42 | # Configure the deployment to use service account 43 | oc patch "${deploy}" -n "${PROJECT_NAMESPACE}" -p \ 44 | '{"spec": {"template": {"spec": {"serviceAccountName": "net-dac-cap-anyuid"}}}}' 45 | done -------------------------------------------------------------------------------- /hpa/sidecar/builder.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # 24 | 25 | set -o errexit nounset 26 | 27 | # shellcheck source=/dev/null 28 | . env.sh 29 | 30 | # Check availability of the required Docker images. If not available, build and 31 | # push to the local registry. If the registry is not available, exit on error. 32 | NIMAGES=(localhost:"${DOCKER_REG_PORT}"/boutique/conntrack-network-init \ 33 | localhost:"${DOCKER_REG_PORT}"/boutique/tcp-exporter) 34 | declare -a FIMAGES 35 | 36 | while IFS='' read -r line 37 | do 38 | FIMAGES+=("${line}") 39 | done < <(docker images | grep localhost:"${DOCKER_REG_PORT}"/boutique | awk '{print$1}') 40 | 41 | for image in "${NIMAGES[@]}" 42 | do 43 | if ! printf '%s' "${FIMAGES[@]}" | grep "${image}" &>/dev/null 44 | then 45 | printf "image: %s not found\n" "${image##*/}" 46 | cd "./hpa/sidecar/${image##*/}" || exit 47 | docker build -t "${image}" . 48 | docker push "${image}" 49 | cd "${OLDPWD}" || exit 50 | else 51 | printf "image: %s found\n" "${image##*/}" 52 | fi 53 | done 54 | -------------------------------------------------------------------------------- /hpa/configure_hpa.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Configure HPA using Keda 24 | # 25 | # References: 26 | # * https://www.nginx.com/blog/microservices-march-reduce-kubernetes-latency-with-autoscaling/ 27 | # * https://stackoverflow.com/questions/62578789/kubectl-patch-is-it-possible-to-add-multiple-values-to-an-array-within-a-sinlge 28 | # 29 | # Requires: jq, yq 30 | # 31 | # Author: Justin Cook 32 | 33 | set -o errexit nounset 34 | 35 | # shellcheck source=/dev/null 36 | . env.sh 37 | 38 | # Add the Keda Helm chart repository 39 | helm repo add kedacore https://kedacore.github.io/charts 40 | helm repo update 41 | 42 | # Create the keda namespace 43 | kubectl create namespace keda --dry-run=client -o yaml | \ 44 | kubectl apply -f - 45 | 46 | # Install Keda 47 | helm upgrade --install keda kedacore/keda --namespace keda 48 | 49 | # Wait for all the deployments to become available 50 | for deploy in $(kubectl get deploy -n keda -o name) 51 | do 52 | kubectl rollout status "${deploy}" -n keda 53 | done 54 | 55 | # Create an Ingress for the Boutique® 56 | kubectl apply -f hpa/frontend-ingress.yaml -n "${PROJECT_NAMESPACE}" 57 | -------------------------------------------------------------------------------- /rancher/install_rancher.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Install Rancher in a Kubernetes cluster 24 | # 25 | # Author: Justin Cook 26 | 27 | set -o errexit 28 | 29 | # shellcheck source=/dev/null 30 | . env.sh 31 | 32 | # Apply cert-manager crds 33 | kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.10.2/cert-manager.crds.yaml 34 | 35 | # Add the appropriate Helm repos and update 36 | helm repo add jetstack https://charts.jetstack.io 37 | helm repo add rancher-stable https://releases.rancher.com/server-charts/stable 38 | helm repo update 39 | 40 | # Install cert-manager 41 | helm upgrade --install cert-manager jetstack/cert-manager \ 42 | --namespace cert-manager \ 43 | --create-namespace \ 44 | --version v1.10.2 45 | 46 | # Create the cattle-system namespace 47 | kubectl create namespace cattle-system --dry-run=client -o yaml | \ 48 | kubectl apply -f - 49 | 50 | # Install Rancher 51 | helm upgrade --install rancher rancher-stable/rancher \ 52 | --namespace cattle-system \ 53 | --set hostname=rancher.test \ 54 | --set bootstrapPassword=admin \ 55 | --version 2.7.1 56 | 57 | # Wait for Rancher to become available 58 | kubectl rollout status deploy/rancher -n cattle-system 59 | -------------------------------------------------------------------------------- /ebpf/enable_ebpf.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Configures an existing Kubernetes cluster with Calico CNI to use eBPF 24 | # https://projectcalico.docs.tigera.io/maintenance/ebpf/enabling-bpf 25 | # 26 | # Author: Justin Cook 27 | 28 | # shellcheck source=/dev/null 29 | . env.sh 30 | 31 | IFS=':' read -ra ENDPNT <<< "$(kubectl get endpoints kubernetes | awk '/^kubernetes/{print$2}')" 32 | 33 | cat </dev/null 49 | then 50 | # Forward port to the docker registry on Minikube 51 | printf "registry: localhost: not found: forwarding\n" 52 | docker run -d --network=host alpine ash -c \ 53 | "apk add socat && socat TCP-LISTEN:${DOCKER_REG_PORT},reuseaddr,fork TCP:$(minikube ip):5000" 54 | fi 55 | 56 | num_try=0 57 | while [ $num_try -le 20 ] 58 | do 59 | if ! curl --connect-timeout 3 http://localhost:"${DOCKER_REG_PORT}"/v2/_catalog &>/dev/null 60 | then 61 | printf "registry: waiting on localhost:%s\n" "${DOCKER_REG_PORT}" 62 | num_try=$((num_try+1)) 63 | sleep 5 64 | else 65 | printf "registry: localhost:%s} found\n" "${DOCKER_REG_PORT}" 66 | break 67 | fi 68 | done 69 | -------------------------------------------------------------------------------- /quickstart.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Setup Minikube with Calico CNI, Rancher, install and configure monitoring, 24 | # and install a boutique application for load testing. 25 | # 26 | # Author: Justin Cook 27 | 28 | set -o errexit nounset 29 | 30 | # shellcheck source=/dev/null 31 | . env.sh 32 | 33 | if [ "${RUNTIME}" = "minikube" ] || [ "${RUNTIME}" = "rdctl" ] 34 | then 35 | # Setup Kubernetes 36 | if [ "${RUNTIME}" = "minikube" ] 37 | then 38 | if [ "${RUNNING}" = "false" ] 39 | then 40 | bash setup_k8s.sh 41 | fi 42 | bash cni/install_calico.sh 43 | elif [ "${RUNTIME}" = "rdctl" ] 44 | then 45 | if [ "${RUNNING}" = "false" ] 46 | then 47 | rdctl start 48 | while ! rdctl shell "id" 2>/dev/null 49 | do 50 | sleep 5 51 | done 52 | while ! kubectl get nodes 2>/dev/null 53 | do 54 | sleep 5 55 | done 56 | fi 57 | fi 58 | 59 | # Install Rancher 60 | bash rancher/install_rancher.sh 61 | # Install and configure Prometheus metrics / Grafana dashboards 62 | bash rancher/install_monitoring.sh 63 | #bash monitoring/configure_prometheus.sh 64 | #bash monitoring/configure_grafana_dashboards.sh 65 | elif [ "${RUNTIME}" = crc ] 66 | then 67 | # Setup OpenShift Local 68 | bash ocp/setup_ocp.sh 69 | else 70 | >&2 echo "unknown runtime: ${RUNTIME}" 71 | exit 1 72 | fi 73 | 74 | # Install boutique 75 | bash app/install_boutique.sh 76 | 77 | if [ "${RUNTIME}" = "crc" ] 78 | then 79 | oc delete svc frontend-external 80 | oc expose svc frontend 81 | echo "Ensure http://frontend-boutique.apps-crc.testing resolves to $(crc ip)" 82 | fi -------------------------------------------------------------------------------- /quickstart_enterprise.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Setup Minikube with Rancher and install monitoring and Calico Enterprise. 24 | # Afterward, configuring Prometheus and Grafana and installing Boutique neeed 25 | # completed separately as they exit with runtime. 26 | # 27 | # References: 28 | # * https://kubernetes.github.io/ingress-nginx/deploy/ 29 | # 30 | # Author: Justin Cook 31 | 32 | set -o errexit 33 | 34 | # shellcheck source=/dev/null 35 | . env.sh 36 | 37 | trap "exit" INT 38 | 39 | # Configure Minikube 40 | #bash setup_k8s.sh 41 | 42 | # Configure Kind 43 | #colima start --cpu 6 --memory 28 --disk 100 --runtime containerd 44 | #bash kind/configure_colima.sh 45 | #kind create cluster --config kind/calico_cluster.yaml 46 | 47 | # Install Calico CNI 48 | bash install_calico.sh 49 | 50 | # Install NGINX Ingress Controller 51 | #kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.2/deploy/static/provider/cloud/deploy.yaml 52 | #kubectl wait --namespace ingress-nginx \ 53 | # --for=condition=ready pod \ 54 | # --selector=app.kubernetes.io/component=controller \ 55 | # --timeout=240s 56 | 57 | # Add ingress-dns if not on Minikube 58 | #kubectl apply -f kind/ingress-dns-pod.yaml 59 | 60 | # Install Rancher 61 | bash install_rancher.sh 62 | 63 | # Install Prometheus 64 | bash install_monitoring.sh 65 | 66 | # Install Calico 67 | bash calico_enterprise/install_calico_enterprise.sh 68 | 69 | # Configure Prometheus metrics / Grafana dashboards 70 | #bash monitoring/configure_prometheus.sh 71 | #bash monitoring/configure_grafana_dashboards.sh 72 | 73 | # Install boutique 74 | #bash install_boutique.sh 75 | -------------------------------------------------------------------------------- /hpa/sidecar/openshift.md: -------------------------------------------------------------------------------- 1 | # OpenShift Builds 2 | 3 | The OpenShift method of creating builds and making them available via the 4 | integrated registry is creating `BuildConfig`s and `ImageStream`s. Fetching source 5 | from Git and the use of Dockerfiles are supported. As such, each image can be 6 | made available by building from source. 7 | 8 | Create the Kubernetes objects for each image. In the case of this module, each 9 | directory has `ocp-*.yaml` files and can be easily created: 10 | 11 | ``` 12 | $ for f in hpa/sidecar/*/ocp-*.yaml ; do kubectl apply -f $f ; done 13 | buildconfig.build.openshift.io/conntrack-network-init-image created 14 | imagestream.image.openshift.io/conntrack-network-init created 15 | buildconfig.build.openshift.io/tcp-exporter-image created 16 | imagestream.image.openshift.io/tcp-exporter created 17 | ``` 18 | 19 | Once created, enable image lookup for all resources in the project with 20 | `oc set image-lookup ...`. 21 | 22 | Next, each build needs to be initiated using `oc start-build ...` like so: 23 | 24 | ``` 25 | oc start-build tcp-exporter-image -F 26 | ``` 27 | 28 | > Tip 29 | If you are using a proxy with HTTPS filtering, you will need to 30 | create exceptions for github.com and alpinglinux.org for this example. If you 31 | are unable to bypass the filtering, please see [this topic](https://access.redhat.com/solutions/6165352) or 32 | [this issue](https://github.com/alpinelinux/docker-alpine/issues/160#issuecomment-844325769) for assistance. 33 | 34 | ## Get the image registry URL 35 | 36 | ``` 37 | $ oc get route -A -o jsonpath='{range .items[*]}{.spec.host}{"\n"}{end}' | grep image-registry 38 | ``` 39 | 40 | ## Allow the insecure registry and others 41 | 42 | In case other registries need to be supported, the following is the process 43 | that should be followed. 44 | 45 | ``` 46 | $ oc patch --type=merge --patch='{ 47 | "spec": { 48 | "registrySources": { 49 | "insecureRegistries": [ 50 | "image-registry.openshift-image-registry.svc:5000" 51 | ] 52 | } 53 | } 54 | }' image.config.openshift.io/cluster 55 | ``` 56 | 57 | ``` 58 | $ ssh -i ~/.crc/machines/crc/id_ecdsa -o StrictHostKeyChecking=no core@$(crc ip) -p2222 59 | crc-master $ cat /etc/containers/registries.conf 60 | unqualified-search-registries = ['registry.access.redhat.com', 'docker.io'] 61 | 62 | [[registry]] 63 | location = "image-registry.openshift-image-registry.svc:5000" 64 | insecure = true 65 | blocked = false 66 | mirror-by-digest-only = false 67 | prefix = "" 68 | 69 | ``` 70 | 71 | ## References 72 | 73 | * [Adding an insecure registry](https://github.com/code-ready/crc/wiki/Adding-an-insecure-registry) 74 | * [Using image streams with Kubernetes resources](https://docs.openshift.com/container-platform/4.10/openshift_images/using-imagestreams-with-kube-resources.html) -------------------------------------------------------------------------------- /kind/ingress-dns-pod.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Kubernetes Authors All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | --- 16 | apiVersion: v1 17 | kind: ServiceAccount 18 | metadata: 19 | name: minikube-ingress-dns 20 | namespace: kube-system 21 | labels: 22 | app: minikube-ingress-dns 23 | kubernetes.io/bootstrapping: rbac-defaults 24 | app.kubernetes.io/part-of: kube-system 25 | --- 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | kind: ClusterRole 28 | metadata: 29 | name: minikube-ingress-dns 30 | namespace: kube-system 31 | labels: 32 | app: minikube-ingress-dns 33 | kubernetes.io/bootstrapping: rbac-defaults 34 | app.kubernetes.io/part-of: kube-system 35 | gcp-auth-skip-secret: "true" 36 | rules: 37 | - apiGroups: 38 | - "" 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | --- 48 | apiVersion: rbac.authorization.k8s.io/v1 49 | kind: ClusterRoleBinding 50 | metadata: 51 | name: minikube-ingress-dns 52 | namespace: kube-system 53 | labels: 54 | app: minikube-ingress-dns 55 | kubernetes.io/bootstrapping: rbac-defaults 56 | app.kubernetes.io/part-of: kube-system 57 | roleRef: 58 | apiGroup: rbac.authorization.k8s.io 59 | kind: ClusterRole 60 | name: minikube-ingress-dns 61 | subjects: 62 | - kind: ServiceAccount 63 | name: minikube-ingress-dns 64 | namespace: kube-system 65 | --- 66 | apiVersion: v1 67 | kind: Pod 68 | metadata: 69 | name: kube-ingress-dns-minikube 70 | namespace: kube-system 71 | labels: 72 | app: minikube-ingress-dns 73 | app.kubernetes.io/part-of: kube-system 74 | spec: 75 | serviceAccountName: minikube-ingress-dns 76 | hostNetwork: true 77 | containers: 78 | - name: minikube-ingress-dns 79 | image: gcr.io/k8s-minikube/minikube-ingress-dns@sha256:4abe27f9fc03fedab1d655e2020e6b165faf3bf6de1088ce6cf215a75b78f05f 80 | imagePullPolicy: IfNotPresent 81 | ports: 82 | - containerPort: 53 83 | protocol: UDP 84 | env: 85 | - name: DNS_PORT 86 | value: "53" 87 | - name: POD_IP 88 | valueFrom: 89 | fieldRef: 90 | fieldPath: status.podIP 91 | -------------------------------------------------------------------------------- /doc/index.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Development Environment Docs 2 | 3 | Welcome to this project's documentation. 4 | 5 | Here you can access the complete documentation for this rich development 6 | toolset. 7 | 8 | ## Introduction 9 | 10 | The overall goal of this development environment is providing a full stack for 11 | the easy integration of Kubernetes into an ecosystem, developing applications 12 | with complete integration to Kubernetes, integrating major components into a 13 | Kubernetes deployment, and perfecting infrastructure as code. After all, every 14 | component herein is provisioned, installed, and configured as code. 15 | 16 | ## Installation 17 | 18 | Installation of this repository and dependencies can be found in 19 | [install documentation](./install/index.md). 20 | 21 | ## Major Components 22 | 23 | Each major component is made available in the root directory as an install 24 | script. For instance, `./install_calico.sh` installs the Calico CNI. Each 25 | subdirectory may contain experimental code, a specific collection of code, 26 | or an entire project that integrates with the full stack. 27 | 28 | A list of components this project makes available and support are: 29 | * [Minikube](https://minikube.sigs.k8s.io/docs/) 30 | * [OpenShift Local](https://developers.redhat.com/products/openshift-local/overview) 31 | * [Calico CNI](https://github.com/projectcalico/calico) 32 | * [Rancher](https://rancher.com) 33 | * [Prometheus](https://github.com/prometheus-operator/prometheus-operator) 34 | * [Grafana](https://github.com/grafana-operator/grafana-operator) 35 | * Google's [Online Boutique](https://github.com/GoogleCloudPlatform/microservices-demo) 36 | 37 | Projects for displaying use cases and assisting with integration are: 38 | * [Monitoring](../monitoring/README.md) 39 | * [Horizontal pod autoscaling](../hpa/README.md) 40 | * [eBPF](../ebpf/README.md) 41 | * [Calico Enterprise](../calico_enterprise/README.md) 42 | 43 | ## Integration 44 | 45 | Information on integration with upstream components can be found in 46 | [integration](./integration/index.md). 47 | 48 | ## Tutorials 49 | 50 | Several use case tutorials can be found in [tutorials](./tutorials/index.md). 51 | 52 | ## Contributing 53 | 54 | Contributions are welcome. We ask everyone to follow the [code of conduct](https://www.contributor-covenant.org/version/2/1/code_of_conduct/code_of_conduct.md) 55 | or you will be asked not or disallowed to participate. 56 | 57 | Contributing code is encouraged. We ask when submitting a pull request, that it 58 | contain complete documentation and be submitted to this repository's `devel` 59 | branch. 60 | 61 | Also, we encourage all integrations or use case implementations take advantage 62 | of Kubernetes capabilities such as monitoring, autoscaling, and observability. 63 | For each of these capabilities, please use any of this project's existing code. 64 | -------------------------------------------------------------------------------- /rke2/localenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy 4 | # of this software and associated documentation files (the "Software"), to 5 | # deal in the Software without restriction, including without limitation the 6 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 | # sell copies of the Software, and to permit persons to whom the Software is 8 | # furnished to do so, subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in 11 | # all copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | # THE SOFTWARE. 20 | # 21 | # Local variables for multipass-rke2 22 | # 23 | # Author(s): Sebastiaan van Steenis 24 | # Justin Cook 25 | 26 | ## Configurable settings 27 | # Name for the cluster/configuration files 28 | NAME="" 29 | # Ubuntu image to use (xenial/bionic/focal/jammy) 30 | IMAGE="jammy" 31 | # RKE2 channel 32 | RKE2_CHANNEL="stable" 33 | # RKE2 version 34 | #RKE2_VERSION="v1.24.12+rke2r1" 35 | # How many master nodes to create 36 | MASTER_NODE_COUNT="1" 37 | # How many compute nodes to create 38 | AGENT_NODE_COUNT="2" 39 | # How many CPUs to allocate to each machine 40 | MASTER_NODE_CPU="2" 41 | AGENT_NODE_CPU="2" 42 | # How much disk space to allocate to each master and compute node 43 | MASTER_DISK_SIZE="20G" 44 | AGENT_DISK_SIZE="40G" 45 | # How much memory to allocate to each machine 46 | MASTER_MEMORY_SIZE="4G" 47 | AGENT_MEMORY_SIZE="8G" 48 | # Preconfigured secret to join the cluster (or autogenerated if empty) 49 | # Note: in order to use this script multiple times to add nodes, this needs 50 | # to be configured. 51 | TOKEN="" 52 | # Hostnames or IPv4/IPv6 addresses as Subject Alternative Names on the server 53 | # TLS cert 54 | TLSSAN="rancher.test" 55 | ## End configurable settings 56 | # Where to store the rke2 cluster kubeconfig 57 | LOCALKUBECONFIG="${HOME}/.kube/config-${NAME}" 58 | 59 | if [ -n "${RKE2_VERSION:-}" ] 60 | then 61 | CLOUD_INIT_INSTALL="INSTALL_RKE2_VERSION=${RKE2_VERSION}" 62 | else 63 | CLOUD_INIT_INSTALL="INSTALL_RKE2_CHANNEL=${RKE2_CHANNEL}" 64 | fi 65 | 66 | # Set KUBECTLCMD to local binary if found in PATH. Otherwise, use the the 67 | # Multipass primary master. 68 | if command -v kubectl >/dev/null 2>&1 69 | then 70 | KUBECTLCMD="$(command -v kubectl)" 71 | else 72 | KUBECTLCMD="${MULTIPASSCMD} exec ${NAME}-rke2-master-1 -- /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml" 73 | fi 74 | -------------------------------------------------------------------------------- /rancher/install_opa_gatekeeper.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Install OPA Gatekeeper using Rancher's Helm chart. It modifies Open Policy 24 | # Agent's upstream gatekeeper chart that provides policy-based control for 25 | # cloud native environments. 26 | # 27 | # Author: Justin Cook 28 | 29 | set -o errexit 30 | 31 | # shellcheck source=/dev/null 32 | . env.sh 33 | 34 | # Add the applicable Helm chards to the repo and update 35 | helm repo add rancher-monitoring-crd http://charts.rancher.io 36 | helm repo add rancher-monitoring http://charts.rancher.io 37 | helm repo update 38 | 39 | # Create the cattle-gatekeeper-system namespace 40 | kubectl create namespace cattle-gatekeeper-system --dry-run=client -o yaml | \ 41 | kubectl apply -f - 42 | 43 | # Install the required charts for rancher-monitoring which is just upstream 44 | # Prometheus and Grafana operators et al with a bit of configuration 45 | helm upgrade --install=true --namespace=cattle-gatekeeper-system --timeout=10m0s \ 46 | --values=https://raw.githubusercontent.com/rancher/charts/release-v2.7/charts/rancher-gatekeeper-crd/101.0.0%2Bup3.9.0/values.yaml \ 47 | --version=101.0.0+up3.9.0 --wait=true rancher-gatekeeper-crd \ 48 | http://charts.rancher.io/assets/rancher-gatekeeper-crd/rancher-gatekeeper-crd-101.0.0+up3.9.0.tgz 49 | helm upgrade --install=true --namespace=cattle-gatekeeper-system --timeout=10m0s \ 50 | --values=https://raw.githubusercontent.com/rancher/charts/release-v2.7/charts/rancher-gatekeeper/101.0.0%2Bup3.9.0/values.yaml \ 51 | --version=101.0.0+up3.9.0 --wait=true rancher-gatekeeper \ 52 | http://charts.rancher.io/assets/rancher-gatekeeper/rancher-gatekeeper-101.0.0+up3.9.0.tgz 53 | 54 | # Wait for all the deployments to become available 55 | for deploy in $(kubectl get deploy -n cattle-gatekeeper-system -o name) 56 | do 57 | kubectl rollout status "${deploy}" -n cattle-gatekeeper-system 58 | done -------------------------------------------------------------------------------- /ingress/nginx/install_nginx_ingress.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Install NGINX ingress controller 24 | # https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-helm/ 25 | # https://docs.rancherdesktop.io/how-to-guides/setup-NGINX-Ingress-Controller 26 | # 27 | # Author: Justin Cook 28 | 29 | set -o errexit 30 | 31 | # shellcheck source=/dev/null 32 | . env.sh 33 | 34 | ORIG_WRK_DIR="${PWD}" 35 | THIS_WRK_DIR="ingress/nginx" 36 | 37 | # Clone the NGINX controller repository for CRDs 38 | 39 | cd "${THIS_WRK_DIR}" 40 | if [ -d "kubernetes-ingress" ] 41 | then 42 | cd kubernetes-ingress 43 | git pull origin v3.0.2 44 | cd .. 45 | else 46 | git clone https://github.com/nginxinc/kubernetes-ingress.git --branch v3.0.2 47 | #cd kubernetes-ingress/deployments/helm-chart 48 | fi 49 | 50 | # Ignore the cloned directory above with this repo's git. 51 | if ! grep "^${THIS_WRK_DIR}/kubernetes-ingress$" "${ORIG_WRK_DIR}/.gitignore" 52 | then 53 | #shellcheck disable=SC2086 54 | if [ -n "$(tail -c1 ${ORIG_WRK_DIR}/.gitignore)" ] 55 | then 56 | echo "" >> "${ORIG_WRK_DIR}/.gitignore" 57 | fi 58 | echo "${THIS_WRK_DIR}/kubernetes-ingress" >> "${ORIG_WRK_DIR}/.gitignore" 59 | fi 60 | 61 | if [ "${RUNTIME}" = "rdctl" ] 62 | then 63 | helm upgrade --install ingress-nginx ingress-nginx \ 64 | --repo https://kubernetes.github.io/ingress-nginx \ 65 | --namespace ingress-nginx --create-namespace 66 | else 67 | # Add the helm repo 68 | helm repo add nginx-stable https://helm.nginx.com/stable 69 | helm repo update 70 | 71 | # Install the chart from the NGINX chart repository 72 | helm upgrade --install nginx-ingress nginx-stable/nginx-ingress \ 73 | --set rbac.create=true \ 74 | --namespace ingress-nginx \ 75 | --create-namespace 76 | fi 77 | 78 | # Wait for NGINX to become available 79 | kubectl rollout status deployment/ingress-nginx-controller -n ingress-nginx -------------------------------------------------------------------------------- /rancher/install_monitoring.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Install Prometheus, Grafana, and configure the monitoring stack 24 | # 25 | # Resources: 26 | # * http://charts.rancher.io/index.yaml 27 | # * https://github.com/rancher/charts/tree/release-v2.7/charts/rancher-monitoring-crd 28 | # * https://github.com/rancher/charts/tree/release-v2.7/charts/rancher-monitoring 29 | # 30 | # Author: Justin Cook 31 | 32 | set -o errexit 33 | 34 | # shellcheck source=/dev/null 35 | . env.sh 36 | 37 | # Add the applicable Helm chards to the repo and update 38 | helm repo add rancher-monitoring-crd http://charts.rancher.io 39 | helm repo add rancher-monitoring http://charts.rancher.io 40 | helm repo update 41 | 42 | # Create the cattle-monitoring-system namespace 43 | kubectl create namespace cattle-monitoring-system --dry-run=client -o yaml | \ 44 | kubectl apply -f - 45 | 46 | # Install the required charts for rancher-monitoring which is just upstream 47 | # Prometheus and Grafana operators et al with a bit of configuration 48 | helm upgrade --install=true --namespace=cattle-monitoring-system --timeout=10m0s \ 49 | --values=https://raw.githubusercontent.com/rancher/charts/release-v2.7/charts/rancher-monitoring-crd/101.0.0%2Bup19.0.3/values.yaml \ 50 | --version=101.0.0+up19.0.3 --wait=true rancher-monitoring-crd \ 51 | http://charts.rancher.io/assets/rancher-monitoring-crd/rancher-monitoring-crd-101.0.0+up19.0.3.tgz 52 | helm upgrade --install=true --namespace=cattle-monitoring-system --timeout=10m0s \ 53 | --values=https://raw.githubusercontent.com/rancher/charts/release-v2.7/charts/rancher-monitoring/101.0.0%2Bup19.0.3/values.yaml \ 54 | --version=101.0.0+up19.0.3 --wait=true rancher-monitoring \ 55 | http://charts.rancher.io/assets/rancher-monitoring/rancher-monitoring-101.0.0+up19.0.3.tgz 56 | 57 | # Wait for all the deployments to become available 58 | for deploy in $(kubectl get deploy -n cattle-monitoring-system -o name) 59 | do 60 | kubectl rollout status "${deploy}" -n cattle-monitoring-system 61 | done -------------------------------------------------------------------------------- /doc/install/openshift.md: -------------------------------------------------------------------------------- 1 | # OpenShift Install 2 | 3 | Note: if you are using a proxy, you may experience issues such as 4 | `INFO response 500 500 Internal Server Error –`. In this case, need to 5 | investigate your environment, or if you have the luxury, completely unset proxy 6 | environment variables, e.g., `unset $(compgen -e | grep -i proxy)`. 7 | 8 | 1. [Download and install OpenShift Local](https://console.redhat.com/openshift/create/local) 9 | 10 | 2. [Setup and start OpenShift Local](https://access.redhat.com/documentation/en-us/red_hat_openshift_local/2.5/html/getting_started_guide/using_gsg) 11 | 12 | ## Quickstart 13 | 14 | Please have a look at [`setup_ocp.sh`](../../ocp/setup_ocp.sh). You once 15 | OpenShift Local is installed, you my `bash ./ocp/setup_ocp.sh` and the single- 16 | node cluster will be running locally. 17 | 18 | ``` 19 | $ crc setup 20 | ... 21 | $ crc config set memory 30208 22 | ... 23 | $ crc config set disk-size 100 24 | ... 25 | $ crc config set enable-cluster-monitoring true 26 | Successfully configured enable-cluster-monitoring to true 27 | $ crc start 28 | ... 29 | ``` 30 | 31 | After running, you will need to add `oc` to PATH and update the environment 32 | with the Podman data. 33 | 34 | ``` 35 | $ eval $(crc oc-env) 36 | $ eval $(crc podman-env) 37 | ``` 38 | 39 | ## Monitoring 40 | 41 | Cluster monitoring is disabled by default in OpenShift Local. To enable, 42 | `crc config set enable-cluster-monitoring true` must be configured prior to 43 | starting the instance. [`setup_ocp.sh`](../../ocp/setup_ocp.sh) does this. 44 | 45 | Enable user workload monitoring, and set reasonable values for a development 46 | environment. 47 | 48 | ``` 49 | $ kubectl apply -f ./ocp/cluster-monitoring-config.yaml 50 | configmap/cluster-monitoring-config created 51 | ``` 52 | 53 | ## Troubleshooting 54 | 55 | To gain access to the machine, try the following ssh command: 56 | 57 | ``` 58 | $ ssh -i '~/.crc/machines/crc/id_ecdsa' -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -o ConnectTimeout=3 -p 2222 core@$(crc ip) 59 | Red Hat Enterprise Linux CoreOS 412.86.202303211731-0 60 | Part of OpenShift 4.12, RHCOS is a Kubernetes native operating system 61 | managed by the Machine Config Operator (`clusteroperator/machine-config`). 62 | 63 | WARNING: Direct SSH access to machines is not recommended; instead, 64 | make configuration changes via `machineconfig` objects: 65 | https://docs.openshift.com/container-platform/4.12/architecture/architecture-rhcos.html 66 | 67 | --- 68 | [core@crc-8tnb7-master-0 ~]$ 69 | ``` 70 | 71 | Accessing the OpenShift cluster by CLI on the machine can be achieved as 72 | follows: 73 | 74 | ``` 75 | [core@crc-8tnb7-master-0 ~]$ oc --context admin --cluster crc --kubeconfig /opt/kubeconfig get deploy -A 76 | ... 77 | ``` 78 | ## References 79 | 80 | * [Installing OCP on any platform](https://docs.openshift.com/container-platform/4.10/installing/installing_platform_agnostic/installing-platform-agnostic.html) 81 | * [OpenShift Local Create](https://console.redhat.com/openshift/create/local) 82 | * [Platform agnostic installer](https://console.redhat.com/openshift/install/platform-agnostic) 83 | * [Starting monitoring](https://crc.dev/crc/#starting-monitoring_gsg) -------------------------------------------------------------------------------- /microk8s/README.md: -------------------------------------------------------------------------------- 1 | # MicroK8s 2 | 3 | ## Introduction 4 | A lightweight Kubernetes distro that claims zero-ops, and pure-upstream Kubernetes, 5 | from developer workstations to production. 6 | 7 | https://microk8s.io 8 | 9 | ## Quickstart 10 | To get up and running, ensure `microk8s` is installed with `multipass` and 11 | `jq`. If those applications are in your PATH, then execute the following 12 | script. The script is idempotent and can be ran to create and join nodes: 13 | 14 | ``` 15 | $ bash microk8s/setup_microk8s.sh 16 | ... 17 | ``` 18 | 19 | The cluster can be destroyed with `bash microk8s/remove_microk8s.sh`. 20 | 21 | ## Getting Started 22 | On macOS: 23 | 24 | ``` 25 | $ brew install ubuntu/microk8s/microk8s 26 | ==> Tapping ubuntu/microk8s 27 | Cloning into '/usr/local/Homebrew/Library/Taps/ubuntu/homebrew-microk8s'... 28 | ... 29 | ``` 30 | 31 | Once installed, you may enable Kubernetes by simply executing the following: 32 | 33 | ``` 34 | $ microk8s install 35 | warning: "--mem" long option will be deprecated in favour of "--memory" in a future release.Please update any scripts, etc. 36 | Launched: microk8s-vm 37 | 2023-03-30T11:48:22+11:00 INFO Waiting for automatic snapd restart... 38 | microk8s (1.26/stable) v1.26.1 from Canonical✓ installed 39 | microk8s-integrator-macos 0.1 from Canonical✓ installed 40 | MicroK8s is up and running. See the available commands with `microk8s --help`. 41 | ``` 42 | 43 | Wait for Kubernetes to become ready. 44 | 45 | ``` 46 | $ microk8s status --wait-ready 47 | ``` 48 | 49 | Install the required services to support the development environment: 50 | 51 | ``` 52 | $ microk8s enable ingress dns registry 53 | Infer repository core for addon ingress 54 | Infer repository core for addon dns 55 | Infer repository core for addon registry 56 | ... 57 | ``` 58 | 59 | ## Kubernetes Version 60 | 61 | In order to install a specific version of Kubernetes on `microk8s`, you will 62 | need to configure snap on the virtual machine that has been created with 63 | install. 64 | 65 | ``` 66 | $ multipass shell microk8s-vm 67 | ... 68 | ubuntu@microk8s-vm:~$ sudo snap refresh microk8s --classic --channel=1.24/stable 69 | microk8s (1.24/stable) v1.24.12 from Canonical✓ refreshed 70 | ubuntu@microk8s-vm:~$ logout 71 | $ microk8s stop 72 | Stopped. 73 | $ microk8s start 74 | Started. 75 | ``` 76 | 77 | More information can be found on [snap channels here](ttps://microk8s.io/docs/setting-snap-channel). 78 | 79 | ## Clean Up 80 | When finished, you may delete all nodes, for example: 81 | 82 | ``` 83 | $ bash microk8s/remove_microk8s.sh 84 | ... 85 | ``` 86 | 87 | ## Troubleshooting 88 | 89 | If you have previously used `multipass`, you may find your previous 90 | configuration is incompatible with `microk8s`. In this situation, you need to 91 | refer to the relevant documentation for your platform. 92 | 93 | https://multipass.run/docs 94 | 95 | In some cases, you may have used a driver that is not default for the platform. 96 | This can be changed as an example for macOS: 97 | 98 | ``` 99 | $ multipass autheticate 100 | ... 101 | $ sudo -E multipass set local.driver=hyperkit 102 | ``` -------------------------------------------------------------------------------- /app/install_boutique.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Install Online Boutique for Kubernetes sample and load testing 24 | # 25 | # Author: Justin Cook 26 | 27 | set -o errexit 28 | 29 | # shellcheck source=/dev/null 30 | . env.sh 31 | 32 | ORIG_WRK_DIR="$(pwd)" 33 | THIS_WRK_DIR="app" 34 | 35 | # Check if the virtualenv exists. If not, create it. 36 | if [ ! -d "venv" ] 37 | then 38 | virtualenv venv 39 | fi 40 | 41 | # Activate the virtualenv and install locust 42 | # shellcheck source=/dev/null 43 | source ./venv/bin/activate 44 | pip install locust 45 | 46 | # Check if the boutique, aka microservices-demo, exists. If not, clone it. 47 | cd "${THIS_WRK_DIR}" 48 | if [ ! -d "microservices-demo" ] 49 | then 50 | git clone https://github.com/GoogleCloudPlatform/microservices-demo.git 51 | cd microservices-demo 52 | else 53 | cd microservices-demo 54 | git pull 55 | fi 56 | 57 | if ! grep "^${THIS_WRK_DIR}/microservices-demo$" "${ORIG_WRK_DIR}/.gitignore" 58 | then 59 | #shellcheck disable=SC2086 60 | if [ -n "$(tail -c1 ${ORIG_WRK_DIR}/.gitignore)" ] 61 | then 62 | echo "" >> "${ORIG_WRK_DIR}/.gitignore" 63 | fi 64 | echo "${THIS_WRK_DIR}/microservices-demo" >> "${ORIG_WRK_DIR}/.gitignore" 65 | fi 66 | 67 | # Install the boutique. 68 | kubectl apply -f ./release/kubernetes-manifests.yaml 69 | 70 | # Wait for the boutique to become available. 71 | for deploy in $(kubectl get deploy -n default -o name) 72 | do 73 | kubectl rollout status "${deploy}" 74 | done 75 | 76 | if [ "${RUNTIME}" = "minikube" ] 77 | then 78 | BOUTIQUE="$(minikube ip):$(kubectl get service frontend-external -o \ 79 | jsonpath='{.spec.ports[*].nodePort}{"\n"}')" 80 | else 81 | # Get the IP:Port and display to the user 82 | #BOUTIQUE=$(kubectl get service frontend-external -o \ 83 | # jsonpath='{.spec.clusterIP}{":"}{.spec.ports[*].nodePort}{"\n"}') 84 | BOUTIQUE="127.0.0.1" 85 | printf "\nPlease forward service/frontend to localhost and for example:\n" 86 | fi 87 | 88 | printf "\n\nOpen browser to: " 89 | printf "http://%s\n\n" "${BOUTIQUE}" 90 | 91 | # Run locust 92 | cd src/loadgenerator || exit 93 | locust -------------------------------------------------------------------------------- /cni/install_calico.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022-2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Install the Tigera Calico operator and deploy Calico CNI 24 | # Watch pods until calico-kube-controllers is deployed 25 | # 26 | # Requires: kubectl 27 | # 28 | # Author: Justin Cook 29 | 30 | set -o errexit 31 | 32 | # shellcheck source=/dev/null 33 | . env.sh 34 | 35 | CALICOSOURCE="https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml" 36 | 37 | # Install Tigera operator. In order to compensate for previous runs, remove the 38 | # previous applied resources. This is not ideal, but Calico uses more space for 39 | # CRDs than is allowed by apply. 40 | # TODO: switch to Helm install https://docs.tigera.io/calico/latest/getting-started/kubernetes/helm 41 | kubectl create -f ${CALICOSOURCE} --dry-run=client -o yaml | \ 42 | kubectl delete -f - || /usr/bin/true 43 | 44 | kubectl create -f ${CALICOSOURCE} 45 | 46 | # Wait on the operator to run 47 | kubectl rollout status deploy/tigera-operator -n tigera-operator 48 | 49 | # Install Calico using Installation kind 50 | cat </dev/null && break 81 | sleep 1 82 | done 83 | 84 | # Display pods until calico-kube-controllers rolls out 85 | kubectl get pods -A -w & 86 | watch_pid="$!" 87 | 88 | # Wait on calico-kube-controllers deployment 89 | kubectl rollout status deploy/calico-kube-controllers -n calico-system 90 | 91 | kill -15 ${watch_pid} 92 | wait ${watch_pid} || /usr/bin/true 93 | -------------------------------------------------------------------------------- /kind/quickstart_kind.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook and Kind Authors 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Get a kind cluster up and running with Colima and internal registry. 24 | # 25 | # References: 26 | # * https://kind.sigs.k8s.io/docs/user/local-registry/ 27 | # * https://github.com/abiosoft/colima 28 | # 29 | # Requires: 30 | # * colima 31 | # * kind 32 | # * kubectl 33 | 34 | set -o errexit 35 | shopt -s expand_aliases 36 | 37 | # Setup Colima 38 | # This should already exist with kind cluster due to below issue 39 | # https://github.com/containerd/nerdctl/issues/349 40 | # --runtime containerd 41 | colima start --cpu 6 --memory 28 --disk 100 42 | 43 | # Alias docker to use lima nerdctl 44 | colima nerdctl install 45 | ln -s "$(which nerdctl)" docker 46 | PATH="$(pwd):$PATH" 47 | export PATH 48 | 49 | if ! grep 'alias docker="nerdctl"' "${HOME}/.bash_aliases" 2>/dev/null 50 | then 51 | echo 'alias docker="nerdctl"' >> "${HOME}/.bash_aliases" 52 | fi 53 | # shellcheck source=/dev/null 54 | source "${HOME}/.bash_aliases" 55 | 56 | # Wait on docker 57 | echo -n "Waiting on docker" 58 | while : 59 | do 60 | docker ps >/dev/null 2>&1 && break 61 | echo -n "." 62 | sleep 2 63 | done 64 | echo 65 | 66 | # Create a registry container unless it already exists 67 | reg_name='kind-registry' 68 | reg_port='5001' 69 | if [ "$(docker inspect -f '{{.State.Running}}' \"${reg_name}\")" != 'true' ] 70 | then 71 | docker run -d --restart=always -p "127.0.0.1:${reg_port}:5000" \ 72 | --name "${reg_name}" registry:2 73 | fi 74 | 75 | # Create the kind cluster 76 | # https://github.com/containerd/nerdctl/issues/349 77 | kind create cluster --config kind/calico_cluster.yaml 78 | 79 | # connect the registry to the cluster network if not already connected 80 | if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' \"${reg_name}\")" = 'null' ] 81 | then 82 | docker network connect "kind" "${reg_name}" 83 | fi 84 | 85 | # Document the local registry 86 | # https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry 87 | cat < /usr/lib/systemd/system/docker.service 73 | # EOF 74 | 75 | probes: 76 | - mode: readiness 77 | description: Docker listening 78 | script: | 79 | #!/bin/bash 80 | set -eux -o pipefail 81 | if ! timeout 60s bash -c "until [ $(ss -Hl | awk '$5 == "*:2375"{print$2}') = "LISTEN" ]; do sleep 3; done"; then 82 | echo >&2 "Docker is not listening" 83 | exit 1 84 | fi 85 | hint: | 86 | Docker is not listening on tcp://0.0.0.0:2375 87 | 88 | cpuType: 89 | aarch64: null 90 | x86_64: null 91 | 92 | firmware: 93 | legacyBIOS: null 94 | 95 | video: 96 | display: null 97 | 98 | networks: 99 | 100 | propagateProxyEnv: null 101 | 102 | hostResolver: 103 | enabled: null 104 | ipv6: null 105 | hosts: 106 | # guest.name: 127.1.1.1 107 | # host.name: host.lima.internal 108 | # dns: 109 | # - 1.1.1.1 110 | # - 1.0.0.1 111 | 112 | # ===================================================================== # 113 | # GLOBAL DEFAULTS AND OVERRIDES 114 | # ===================================================================== # 115 | 116 | 117 | 118 | # ===================================================================== # 119 | # END OF TEMPLATE 120 | # ===================================================================== # 121 | -------------------------------------------------------------------------------- /kind/docker.yaml: -------------------------------------------------------------------------------- 1 | # Example to use Docker instead of containerd & nerdctl 2 | # $ limactl start ./docker.yaml 3 | # $ limactl shell docker docker run -it -v $HOME:$HOME --rm alpine 4 | 5 | # To run `docker` on the host (assumes docker-cli is installed): 6 | # $ export DOCKER_HOST=$(limactl list docker --format 'unix://{{.Dir}}/sock/docker.sock') 7 | # $ docker ... 8 | 9 | # This example requires Lima v0.8.0 or later 10 | images: 11 | # Try to use release-yyyyMMdd image if available. Note that release-yyyyMMdd will be removed after several months. 12 | - location: "https://cloud-images.ubuntu.com/releases/22.04/release-20220712/ubuntu-22.04-server-cloudimg-amd64.img" 13 | arch: "x86_64" 14 | digest: "sha256:86481acb9dbd62e3e93b49eb19a40c66c8aa07f07eff10af20ddf355a317e29f" 15 | - location: "https://cloud-images.ubuntu.com/releases/22.04/release-20220712/ubuntu-22.04-server-cloudimg-arm64.img" 16 | arch: "aarch64" 17 | digest: "sha256:e1ce033239f0038dca5ef09e582762ba0d0dfdedc1d329bc51bb0e9f5057af9d" 18 | # Fallback to the latest release image. 19 | # Hint: run `limactl prune` to invalidate the cache 20 | - location: "https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64.img" 21 | arch: "x86_64" 22 | - location: "https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-arm64.img" 23 | arch: "aarch64" 24 | 25 | mounts: 26 | - location: "~" 27 | - location: "/tmp/lima" 28 | writable: true 29 | # containerd is managed by Docker, not by Lima, so the values are set to false here. 30 | containerd: 31 | system: false 32 | user: false 33 | provision: 34 | - mode: system 35 | # This script defines the host.docker.internal hostname when hostResolver is disabled. 36 | # It is also needed for lima 0.8.2 and earlier, which does not support hostResolver.hosts. 37 | # Names defined in /etc/hosts inside the VM are not resolved inside containers when 38 | # using the hostResolver; use hostResolver.hosts instead (requires lima 0.8.3 or later). 39 | script: | 40 | #!/bin/sh 41 | sed -i 's/host.lima.internal.*/host.lima.internal host.docker.internal/' /etc/hosts 42 | - mode: system 43 | script: | 44 | #!/bin/bash 45 | set -eux -o pipefail 46 | command -v docker >/dev/null 2>&1 && exit 0 47 | export DEBIAN_FRONTEND=noninteractive 48 | curl -fsSL https://get.docker.com | sh 49 | # NOTE: you may remove the lines below, if you prefer to use rootful docker, not rootless 50 | systemctl disable --now docker 51 | apt-get install -y uidmap dbus-user-session 52 | - mode: user 53 | script: | 54 | #!/bin/bash 55 | set -eux -o pipefail 56 | systemctl --user start dbus 57 | dockerd-rootless-setuptool.sh install 58 | docker context use rootless 59 | probes: 60 | - script: | 61 | #!/bin/bash 62 | set -eux -o pipefail 63 | if ! timeout 30s bash -c "until command -v docker >/dev/null 2>&1; do sleep 3; done"; then 64 | echo >&2 "docker is not installed yet" 65 | exit 1 66 | fi 67 | if ! timeout 30s bash -c "until pgrep rootlesskit; do sleep 3; done"; then 68 | echo >&2 "rootlesskit (used by rootless docker) is not running" 69 | exit 1 70 | fi 71 | hint: See "/var/log/cloud-init-output.log". in the guest 72 | hostResolver: 73 | # hostResolver.hosts requires lima 0.8.3 or later. Names defined here will also 74 | # resolve inside containers, and not just inside the VM itself. 75 | hosts: 76 | host.docker.internal: host.lima.internal 77 | portForwards: 78 | - guestSocket: "/run/user/{{.UID}}/docker.sock" 79 | hostSocket: "{{.Dir}}/sock/docker.sock" 80 | message: | 81 | To run `docker` on the host (assumes docker-cli is installed), run the following commands: 82 | ------ 83 | docker context create lima-{{.Name}} --docker "host=unix://{{.Dir}}/sock/docker.sock" 84 | docker context use lima-{{.Name}} 85 | docker run hello-world 86 | ------ -------------------------------------------------------------------------------- /hpa/sidecar/tcp-exporter/README.md: -------------------------------------------------------------------------------- 1 | # tcp-exporter 2 | 3 | ## Introduction 4 | 5 | This sidecar uses conntrack or sampling to provide a count of TCP connections 6 | that are ESTABLISHED state. It provides Prometheus metrics scraped at the 7 | endpoint http://x.x.x.x:9100/metrics by default. 8 | 9 | These metrics provide insight and can be used, for instance, with Keda which 10 | provides a base or ScaledObject(s). 11 | 12 | ## Build 13 | 14 | In the `sidecar` (this) directory, you will notice a `Dockerfile` and 15 | `tcp_exporter.py`. These file can be used with `docker build ...` or another 16 | utility such as `podman build ...` to build an image. An example is displayed 17 | below. 18 | 19 | This example uses Colima on macOS: 20 | 21 | ``` 22 | $ limactl start default 23 | ... 24 | $ alias docker="lima nerdctl" 25 | ``` 26 | 27 | At this point, you will have a running environment one can use to build the 28 | artefact (image) and push to the registry on Minikube. You need to provide 29 | a route from your host to Minikube like so: 30 | 31 | ``` 32 | $ docker run --rm -it --network=host alpine ash -c \ 33 | "apk add socat && socat TCP-LISTEN:${DOCKER_REG_PORT},reuseaddr,fork TCP:$(minikube ip):${DOCKER_REG_PORT}" 34 | ... 35 | ``` 36 | 37 | Now, let's build and push the image to our local registry. You will need to 38 | leave the above running and switch to another window, or perhaps you're clever 39 | enough to have spawned a daemon: 40 | 41 | ``` 42 | $ docker build -t localhost:${DOCKER_REG_PORT}/boutique/tcp-exporter . 43 | [+] Building 3.2s (8/8) FINISHED 44 | ... 45 | unpacking localhost:${DOCKER_REG_PORT}/boutique/tcp-exporter:latest (sha256:94dc80bd667c6cad4e89e1ff4b31903447a98c63cb11ab2af9d098ae8a97db6b)...done 46 | $ docker push localhost:${DOCKER_REG_PORT}/boutique/tcp-exporter:latest 47 | INFO[0000] pushing as a reduced-platform image (application/vnd.docker.distribution.manifest 48 | ... 49 | elapsed: 6.2 s total: 20.0 M (3.2 MiB/s) 50 | $ docker push localhost:${DOCKER_REG_PORT}/boutique/tcp-exporter 51 | ... 52 | ``` 53 | 54 | ## Run 55 | 56 | The utility requires conntrack and CAP_NET_ADMIN capability to run efficiently 57 | and provide an accurate count. If this is not available, the utility will fall 58 | back to sampling from userspace which is less accurate although still useful. 59 | 60 | The code can be executed like the following for more insight: 61 | 62 | ``` 63 | $ ./tcp_exporter.py 64 | usage: tcp_exporter.py 65 | 66 | example: tcp_exporter.py 9100 8080 67 | ``` 68 | 69 | 70 | ``` 71 | $ python3 tcp_exporter.py 9100 8080 72 | Server started at localhost: 9100 73 | 127.0.0.1 - - [29/Mar/2022 10:20:23] "GET /metrics HTTP/1.1" 200 - 74 | ``` 75 | 76 | ## Container 77 | 78 | This solution was designed for Kubernetes, and as such is intended to be run in 79 | a sidecar along the primary application. If you've got the container pushed to 80 | a registry, you can patch your Deployment spec as follows to provide the 81 | metrics. 82 | 83 | ``` 84 | # kubectl patch --patch-file does not accept here docs :-/ 85 | cat << EOF >/tmp/$$.tmp 86 | spec: 87 | template: 88 | spec: 89 | containers: 90 | - name: tcp-exporter 91 | image: localhost:${DOCKER_REG_PORT}/boutique/tcp-exporter:latest 92 | imagePullPolicy: IfNotPresent 93 | securityContext: 94 | capabilities: 95 | add: ["NET_ADMIN"] 96 | args: ["9100", "${SVCPORT}"] 97 | ports: 98 | - containerPort: 9100 99 | protocol: TCP 100 | EOF 101 | kubectl patch "${deploy}" -n default --patch-file /tmp/$$.tmp 102 | ``` 103 | For more information, and an example used with Keda for scaling, please see 104 | `hpa/configure_hpa.sh` in this repository. 105 | 106 | ## References 107 | 108 | * https://linux.die.net/man/7/capabilities -------------------------------------------------------------------------------- /platform.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # These are aliases that need to be sourced based on platform -- for sake of 24 | # simplicity. There are other variables here as well. 25 | # https://superuser.com/questions/708462/alias-scoping-in-bash-functions 26 | # 27 | # Author: Justin Cook 28 | 29 | if [ "${RUNTIME}" == "minikube" ] 30 | then 31 | # Installing behind a proxy or VPN can cause problems 32 | # https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/ 33 | # If a proxy is set, then ensure specific subnets to K8s bypass the proxy. 34 | if [ -n "${HTTPS_PROXY:-}" ] || [ -n "${HTTP_PROXY:-}" ] 35 | then 36 | for np in no_proxy NO_PROXY 37 | do 38 | # Use inline case statements since fallthrough with ;& is not supported 39 | # before Bash 4. 40 | case ${!np:-} in 41 | (!(*"${SERVICECLUSTERIPRANGE}"*)) 42 | eval ${np}+=",${SERVICECLUSTERIPRANGE}" 43 | ;; 44 | esac 45 | case ${!np:-} in 46 | (!(*"${HOSTONLYCIDR}"*)) 47 | eval ${np}+=",${HOSTONLYCIDR}" 48 | ;; 49 | esac 50 | case ${!np:-} in 51 | (!(*"${MINIKUBEDOCKERCLST1}"*)) 52 | eval ${np}+=",${MINIKUBEDOCKERCLST1}" 53 | ;; 54 | esac 55 | case ${!np:-} in 56 | (!(*"${MINIKUBEKVM2DRIVER}"*)) 57 | eval ${np}+=",${MINIKUBEKVM2DRIVER}" 58 | ;; 59 | esac 60 | case ${!np:-} in 61 | (!(*"${MINIKUBENODENET}"*)) 62 | eval ${np}+=",${MINIKUBENODENET}" 63 | ;; 64 | esac 65 | done 66 | fi 67 | if minikube status 2>/dev/null 68 | then 69 | RUNNING=true 70 | else 71 | RUNNING=false 72 | fi 73 | alias kubectl="minikube kubectl --" 74 | elif [ "${RUNTIME}" = "crc" ] 75 | then 76 | if (which crc && crc status) 77 | then 78 | RUNNING=true 79 | #shellcheck disable=SC2046 80 | eval $(crc oc-env) 81 | fi 82 | alias kubectl="oc" 83 | elif [ "${RUNTIME}" = "rdctl" ] 84 | then 85 | if rdctl shell "id" 2>/dev/null 86 | then 87 | case :$PATH: in 88 | *:$HOME/.rd/bin:*) ;; 89 | *) export PATH=$HOME/.rd/bin:$PATH ;; 90 | esac 91 | RUNNING=true 92 | else 93 | RUNNING=false 94 | fi 95 | elif [ "${RUNTIME}" = "microk8s" ] 96 | then 97 | #alias kubectl="microk8s kubectl --" 98 | if microk8s status 2>/dev/null 99 | then 100 | RUNNING=true 101 | #export KUBECONFIG="${HOME}/.kube/config-microk8s" 102 | else 103 | RUNNING=false 104 | fi 105 | elif [ "${RUNTIME}" = "rke2" ] 106 | then 107 | alias rke2="multipass" 108 | MULTIPASSCMD="$(command -v multipass)" 109 | RUNNING=false 110 | else 111 | alias kubectl="kubectl --kubeconfig=kubeconfig --insecure-skip-tls-verify=true" 112 | #shellcheck disable=SC2034 113 | RUNNING=true 114 | fi -------------------------------------------------------------------------------- /setup_k8s.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Setup a three-node Kubernetes cluster with ingress, ingress-dns, and 24 | # metrics-server with CNI plugin. Configure the pod network as ${POD_NET_CIDR}. 25 | # Use Kubernetes v1.23.4 as that is latest supported by Rancher. Finally, set 26 | # resolver to forward .test DNS queries to this cluster. 27 | # 28 | # This code needs permissions to configure DNS accordingly. It uses `sudo` to 29 | # make the necessary changes. The commands needing elevated permissions vary 30 | # by platform. 31 | # 32 | # If you are on Linux with NetworkManager, the changes aren't something to 33 | # worry about. Otherwise, the code assumes dnsmasq is available and attempts to 34 | # configure as necessary. 35 | # 36 | # Author: Justin Cook 37 | 38 | # shellcheck source=/dev/null 39 | . env.sh 40 | 41 | # The below options can be used with a docker provider such as lima/colima. 42 | # --driver=docker \ 43 | # --cache-images=true \ 44 | # --container-runtime=containerd \ 45 | 46 | #shellcheck disable=SC2140 47 | minikube --addons=ingress,ingress-dns,metrics-server,registry \ 48 | --insecure-registry="10.0.0.0/24" \ 49 | --network-plugin=cni \ 50 | --extra-config="kubeadm.pod-network-cidr=${POD_NET_CIDR}" \ 51 | --service-cluster-ip-range="${SERVICECLUSTERIPRANGE}" \ 52 | --memory=8g \ 53 | --kubernetes-version=v1.24.11 \ 54 | --nodes=3 \ 55 | --insecure-registry="ghcr.io","registry.k8s.io","k8s.gcr.io","gcr.io" \ 56 | start 57 | 58 | if [ ! -d "/etc/resolver" ] 59 | then 60 | sudo mkdir /etc/resolver 61 | fi 62 | 63 | PLATFORM=$(uname) 64 | case ${PLATFORM} in 65 | Darwin) 66 | printf "Configuring macOS to forward .test to Minikube\n" 67 | sudo bash -c "cat - > /etc/resolver/minikube-test < \ 88 | /etc/NetworkManager/dnsmasq.d/minikube.conf" 89 | sudo systemctl restart NetworkManager.service 90 | fi 91 | elif which resolvconf && systemctl status resolvconf.service 92 | then 93 | sudo bash -c "cat - > /etc/resolvconf/resolv.conf.d/base <&2 103 | printf "Please configure .test resolution to Minikube\n" 1>&2 104 | fi >/dev/null 105 | ;; 106 | *) 107 | printf "Unknown platform: %s" "${PLATFORM}\n" 1>&2 108 | printf "Unable to configure .test resolution\n" 1>&2 109 | exit 255 110 | ;; 111 | esac 112 | -------------------------------------------------------------------------------- /hpa/install_sidecar_init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Configure sidecar and init container for custom metrics 24 | # 25 | # References: 26 | # * https://www.nginx.com/blog/microservices-march-reduce-kubernetes-latency-with-autoscaling/ 27 | # * https://stackoverflow.com/questions/62578789/kubectl-patch-is-it-possible-to-add-multiple-values-to-an-array-within-a-sinlge 28 | # 29 | # Requires: jq, yq 30 | # 31 | # Author: Justin Cook 32 | 33 | set -o errexit nounset 34 | 35 | # shellcheck source=/dev/null 36 | . env.sh 37 | 38 | # Configure Docker image to use appropriate image name 39 | if ! ${IGNORE_DOCKER_CONFIG} 40 | then 41 | IMAGE_REGISTRY="localhost:${DOCKER_REG_PORT}/boutique/" 42 | else 43 | IMAGE_REGISTRY="" 44 | fi 45 | 46 | # Patch each deployment with custom exporter to make metrics available via 47 | # Prometheus. Patch the cooresponding service to include said endpoint 48 | # then follow up by creating a ServiceMonitor. Give yourself a pat on the back, 49 | # Prometheus is now collecting metrics from your clever exporter. 50 | for deploy in $(kubectl get deploy -n "${PROJECT_NAMESPACE}" -o name | grep -E 'service$') 51 | do 52 | # shellcheck disable=SC2086 53 | SVCPORT="$(kubectl get svc ${deploy#*/} -n ${PROJECT_NAMESPACE} -o \ 54 | jsonpath='{.spec.ports[-1].port}' 2>/dev/null)" || continue 55 | 56 | printf "Patching service %s with TCP %s\n" "${deploy#*/}" "${SVCPORT}" 57 | kubectl patch svc "${deploy#*/}" -n "${PROJECT_NAMESPACE}" \ 58 | -p='{"spec": {"type": "ClusterIP","ports": [{"name": "prometheus","port": 9100,"protocol": "TCP","targetPort": 9100}]}}' 59 | 60 | kubectl patch svc "${deploy#*/}" -n "${PROJECT_NAMESPACE}" \ 61 | -p="{\"metadata\": {\"labels\": {\"k8s-app\": \"${deploy#*/}\"}}}" 62 | 63 | printf "Patching %s\n" "${deploy}" 64 | 65 | # Create a JSON patch for the tcp-exporter container 66 | TEPATCH=$(yq -o json -I0 <<-EOF 67 | name: tcp-exporter 68 | image: "${IMAGE_REGISTRY}tcp-exporter:latest" 69 | imagePullPolicy: Always 70 | securityContext: 71 | capabilities: 72 | drop: 73 | - all 74 | add: 75 | - NET_ADMIN 76 | args: ["9100", "${SVCPORT}"] 77 | ports: 78 | - containerPort: 9100 79 | protocol: TCP 80 | EOF 81 | ) 82 | 83 | # Create a JSON patch for the conntrack init container 84 | CIPATCH=$(yq -o json -I0 <<-EOF 85 | name: init-networking 86 | image: "${IMAGE_REGISTRY}conntrack-network-init:latest" 87 | resources: {} 88 | terminationMessagePath: /dev/termination-log 89 | terminationMessagePolicy: File 90 | imagePullPolicy: Always 91 | securityContext: 92 | capabilities: 93 | drop: 94 | - all 95 | add: 96 | - NET_ADMIN 97 | - NET_RAW 98 | - DAC_READ_SEARCH 99 | - DAC_OVERRIDE 100 | EOF 101 | ) 102 | 103 | # Apply the tcp-exporter and init container patches and enable shared process 104 | # namespace 105 | kubectl get "${deploy}" -n "${PROJECT_NAMESPACE}" -o json | \ 106 | jq ".spec.template.spec.containers[1] = ${TEPATCH}" | \ 107 | jq ".spec.template.spec.initContainers[0] = ${CIPATCH}" | \ 108 | jq '.spec.template.spec.shareProcessNamespace = true' | \ 109 | kubectl apply -f - 110 | 111 | # Apply the ServiceMonitor to enable Prometheus scraping 112 | # working: namespace: ${PROMETHEUS_NS} 113 | kubectl apply -f - < /dev/null 2>&1 38 | then 39 | echo "command: ${cmd} could not be found" 40 | exit 1 41 | fi 42 | done 43 | 44 | NODECMDS=$(cat <<__CMD__ 45 | sudo snap install microk8s --classic --channel="${K8SVER}" 46 | sudo iptables -P FORWARD ACCEPT 47 | sudo usermod -a -G microk8s ubuntu 48 | newgrp microk8s 49 | __CMD__ 50 | ) 51 | 52 | # Install microk8s and wait until ready 53 | # The hard way: https://microk8s.io/docs/install-multipass 54 | # https://microk8s.io/docs/clustering 55 | # https://microk8s.io/docs/install-multipass 56 | for node in microk8s-vm{,-node{1,2}} 57 | do 58 | if ! multipass info "${node}" 2>/dev/null 59 | then 60 | echo "Creating: ${node}" 61 | multipass launch --name "${node}" --memory 8G --disk 40G 62 | fi 63 | 64 | # Remove motd from the master as it's too noisy 65 | # https://stackoverflow.com/questions/41706150/commenting-out-lines-in-a-file-using-a-bash-script 66 | echo "sudo sed -i '/^session optional pam_motd\.so/s/^/#/' /etc/pam.d/sshd" |\ 67 | multipass shell "${node}" | cat - >/dev/null 2>&1 68 | 69 | # Configure microk8s to use correct Kubernetes version 70 | # https://microk8s.io/docs/setting-snap-channel 71 | # Get the version of microk8s snap and configure if mismatched 72 | ver=$(echo "snap list microk8s" | multipass shell "${node}" | tail -n1 | awk '{print$4}') 73 | if [ "${ver:=0}" != "${K8SVER}" ] 74 | then 75 | echo "Configuring ${node} to use ${K8SVER}" 76 | echo "${NODECMDS}" | multipass shell "${node}" 77 | fi 78 | 79 | # Join node(s) to master and create a Kubernetes cluster 80 | if [ "${node}" != "microk8s-vm" ] 81 | then 82 | if "${KUBECTL}" get "node/${node}" 2>/dev/null ; then continue ; fi 83 | # Get the node's IP address 84 | NIP=$(multipass info "${node}" --format json | jq -r ".info.\"${node}\".ipv4[0]") 85 | # Add worker to /etc/hosts on master 86 | echo "sudo bash -c \"echo ${NIP} ${node} | cat - >>/etc/hosts\"" |\ 87 | multipass shell microk8s-vm 88 | # Get a token to add node on master 89 | microk8s add-node | grep "microk8s\ join\ .*\ --worker" |\ 90 | multipass shell "${node}" 91 | # Wait for the node to appear on the control plane 92 | while : 93 | do 94 | sleep 5 95 | if "${KUBECTL}" get node/"${node}" >/dev/null 2>&1 96 | then 97 | sleep 5 98 | break 99 | fi 100 | done 101 | else 102 | microk8s status --wait-ready 103 | # Helm addon is not reliable. So, export kubeconfig, merge with 104 | # existing, and set context. 105 | if [ ! -d "${HOME}/.kube" ] 106 | then 107 | mkdir "${HOME}/.kube" 108 | fi 109 | "${KUBECTL}" config delete-context microk8s-cluster || /usr/bin/true 110 | "${KUBECTL}" config delete-cluster microk8s-cluster || /usr/bin/true 111 | microk8s config > "${LOCALKUBECONFIG}" 112 | chmod 0600 "${LOCALKUBECONFIG}" 113 | export KUBECONFIG="${KUBECONFIG:-$HOME/.kube/config}:${LOCALKUBECONFIG}" 114 | "${KUBECTL}" config view --flatten > "${KUBECONFIG%%:*}" 115 | "${KUBECTL}" config set-context microk8s-cluster --namespace default 116 | fi 117 | done 118 | 119 | # Wait for nodes to become ready 120 | "${KUBECTL}" wait --for=condition=Ready nodes --all --timeout=600s 121 | 122 | # Voila! Enable the correct services and wait for ingress to deploy 123 | echo "Enabling DNS, internal registry, and ingress" 124 | microk8s enable dns registry ingress 125 | "${KUBECTL}" rollout status ds/nginx-ingress-microk8s-controller -n ingress 126 | -------------------------------------------------------------------------------- /ocp/setup_ocp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022-2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Setup OpenShift Local 24 | # 25 | # Requires: OpenShift Local installed and CRC running 26 | # 27 | # References: 28 | # * https://discussion.fedoraproject.org/t/recommended-way-of-adding-ca-certificates/15974/4 29 | # * https://docs.openshift.com/container-platform/4.11/networking/configuring-a-custom-pki.html 30 | # 31 | # Author: Justin Cook 32 | 33 | set -o errexit nounset 34 | 35 | # shellcheck source=/dev/null 36 | . env.sh 37 | 38 | # The PID placed in the background 39 | WPID=0 40 | 41 | cleanup() { 42 | kill -9 ${WPID} 2> >(printer) > >(printer) 43 | } 44 | trap cleanup EXIT 45 | 46 | smashit() { 47 | cleanup 48 | crc cleanup 49 | } 50 | trap smashit INT 51 | 52 | # Setup crc 53 | crc setup 54 | crc config set cpus 8 55 | crc config set memory 30208 56 | crc config set disk-size 100 57 | crc config set enable-cluster-monitoring true 58 | crc config set kubeadmin-password kubeadmin 59 | crc config set pull-secret-file "$(pwd)/private/pull-secret.txt" 60 | crc config set nameserver "$(awk '/^nameserver\ /{print$2}' /etc/resolv.conf)" 61 | 62 | # If using a proxy, ensure to configure CRC appropriately. 63 | if [ -n "${http_proxy-}" ] 64 | then 65 | crc config set http-proxy "${http_proxy}" 66 | fi 67 | 68 | if [ -n "${https_proxy-}" ] 69 | then 70 | crc config set https-proxy "${https_proxy}" 71 | fi 72 | 73 | if [ -n "${no_proxy-}" ] 74 | then 75 | crc config set no-proxy "${no_proxy}" 76 | fi 77 | 78 | # If the proxy is filtering TLS, we need to insert the CA 79 | if [ -f "$(pwd)/private/cert.pem" ] 80 | then 81 | crc config set proxy-ca-file "$(pwd)/private/cert.pem" 82 | fi 83 | 84 | # Configure the pull-secret-file 85 | if [ -f "$(pwd)/private/pull-secret.txt" ] 86 | then 87 | crc config set pull-secret-file "$(pwd)/private/pull-secret.txt" 88 | fi 89 | 90 | SSH_COM="$(paste -s -d ' ' - << __EOF__ 91 | ssh 92 | -i ~/.crc/machines/crc/id_ecdsa 93 | -o StrictHostKeyChecking=no 94 | -o IdentitiesOnly=yes 95 | -o ConnectTimeout=3 96 | -p 2222 97 | core@\$(crc ip) 98 | __EOF__ 99 | )" 100 | 101 | # tl;dr: crc start is a long running process. So start in the background, 102 | # do some hack configuration that should be completely unnecessry, and wait. 103 | 104 | nohup crc start --log-level=debug >ocp/debug.log 2>&1 & 105 | WPID=$! 106 | 107 | # If cert.cer exists, then add it as a root ca on the host. 108 | # Check to see if the machine's key is available 109 | if [ -f "$(pwd)/private/cert.cer" ] 110 | then 111 | echo "Waiting for ${HOME}/.crc/machines/crc/id_ecdsa" 112 | until [ -f "${HOME}/.crc/machines/crc/id_ecdsa" ] 113 | do 114 | sleep 2 115 | done 116 | 117 | SSHCMD="$(eval echo "${SSH_COM}")" 118 | 119 | # Check if we can successfully connect 120 | echo "Waiting for connection to machine" 121 | until ${SSHCMD} "whoami" 2> >(printer) > >(printer) 122 | do 123 | sleep 2 124 | done 125 | 126 | # Check if already exists on the machine 127 | echo "Looking for cert.cer on machine" 128 | if ${SSHCMD} "sudo ls /etc/pki/ca-trust/source/anchors/devca.cer" \ 129 | 2> >(printer) > >(printer) 130 | then 131 | echo "Found cert.cer on machine" 132 | else 133 | # Copy cert.cer to the machine and restart update-ca-trust service 134 | echo "Copying cert.cer to machine" 135 | if < "$(pwd)/private/cert.cer" ${SSHCMD} "$(cat - << __EOF__ 136 | sudo bash -c "cat - >/etc/pki/ca-trust/source/anchors/devca.cer" 137 | sudo systemctl restart coreos-update-ca-trust.service 138 | #sudo systemctl restart crio 139 | #sudo systemctl restart kubelet 140 | __EOF__ 141 | )" 2> >(printer) > >(printer) 142 | then 143 | printer "cert.cer added to bundle\n" 144 | fi 145 | fi 146 | fi 147 | 148 | # Wait on `crc start` to complete 149 | echo "Waiting for crc to finish start" 150 | wait ${WPID} 151 | 152 | #shellcheck disable=SC2046 153 | eval $(crc oc-env) 154 | 155 | # Login to OpenShift 156 | oc login -u kubeadmin -p kubeadmin --insecure-skip-tls-verify=true \ 157 | https://api.crc.testing:6443 158 | 159 | # Enable cluster monitoring of user namespaces 160 | kubectl apply -f ocp/cluster-monitoring-config.yaml 161 | 162 | # Show the user credentials 163 | crc console --credentials 164 | -------------------------------------------------------------------------------- /env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022-2023 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # These are entries to configure and modify the environment for code in this 24 | # project. 25 | # 26 | # Author: Justin Cook 27 | 28 | set -o nounset errexit 29 | shopt -s extglob 30 | shopt -s expand_aliases 31 | 32 | # Set the runtime. Supported options are minikube, crc, and rdctl. 33 | export RUNTIME="minikube" 34 | #export RUNTIME="crc" 35 | #export RUNTIME="rdctl" 36 | #export RUNTIME="microk8s" 37 | #export RUNTIME="rke2" 38 | 39 | # Set loglevel to screen. The valid options are INFO and DEBUG 40 | export LOGLEVEL="DEBUG" 41 | 42 | # Pod network CIDR 43 | export POD_NET_CIDR="172.16.0.0/16" 44 | 45 | # Minikube IP Network Subnets 46 | export SERVICECLUSTERIPRANGE="10.96.0.0/12" 47 | export HOSTONLYCIDR="192.168.59.0/24" 48 | export MINIKUBEKVM2DRIVER="192.168.39.0/24" 49 | export MINIKUBEDOCKERCLST1="192.168.49.0/24" 50 | export MINIKUBENODENET="192.168.205.0/24" 51 | 52 | # shellcheck source=platform.sh 53 | source platform.sh 54 | 55 | # Which namespace will the project reside? 56 | export PROJECT_NAMESPACE="boutique" 57 | 58 | # Is the environment running? 59 | export RUNNING=false 60 | 61 | # Trap and ignore signals as appropriate 62 | trap "" USR1 USR2 63 | 64 | _exit_() { 65 | local lc="$BASH_COMMAND" rc=$? 66 | if [ "${LOGLEVEL}" = "DEBUG" ] 67 | then 68 | echo "Exited with code [$rc]: [$lc]" 69 | fi 70 | tty -s 71 | } 72 | 73 | trap _exit_ EXIT 74 | 75 | if [ "${LOGLEVEL-}" = "DEBUG" ] 76 | then 77 | set -x 78 | fi 79 | 80 | # A utility function used to print in accordance with LOGLEVEL 81 | printer() { 82 | if [ -n "${1-}" ] 83 | then 84 | case "${LOGLEVEL-}" in 85 | "DEBUG") printf "%b" "${1}" ;; 86 | *) printf "%b" "${1}" 2>/dev/null ;; 87 | esac 88 | fi 89 | } 90 | 91 | # Check if all the necessary utilities are available or exit 92 | # `kubectl` is unnecessary in this context as it is later aliased 93 | check_dependencies() { 94 | for cmd in "${RUNTIME}" "helm" "git" "virtualenv" "yq" "jq" 95 | do 96 | if ! command -v "${cmd}" &> /dev/null 97 | then 98 | echo "command: ${cmd} could not be found" 99 | exit 1 100 | fi 101 | done 102 | } 103 | 104 | # Create the namespace 105 | create_namespace(){ 106 | if ${RUNNING} && [ "${RUNTIME}" = "crc" ] 107 | then 108 | if ! oc get project "${PROJECT_NAMESPACE}" 109 | then 110 | oc new-project "${PROJECT_NAMESPACE}" \ 111 | --description="This is the Google Boutique microservices demo" \ 112 | --display-name="Online Boutique" 113 | fi 114 | oc adm policy add-scc-to-user privileged system:serviceaccount:"${PROJECT_NAMESPACE}":default 115 | elif ${RUNNING} 116 | then 117 | kubectl create ns "${PROJECT_NAMESPACE}" --dry-run=client -o yaml | \ 118 | kubectl apply -f - 119 | fi 120 | } 121 | 122 | # Set the default namespace to PROJECT_NAMESPACE 123 | set_default_namespace() { 124 | if ${RUNNING} 125 | then 126 | kubectl config set-context --current --namespace="${PROJECT_NAMESPACE}" 127 | fi 128 | } 129 | 130 | # Prometheus namespace and service names 131 | set_prometheus_names() { 132 | if [ "${RUNTIME}" = "crc" ] 133 | then # OpenShift 134 | export PROMETHEUS_NS="openshift-monitoring" 135 | export PROMETHEUS_SVC="thanos-querier" 136 | export PROMPORT="9092" 137 | else # Rancher 138 | export PROMETHEUS_NS="cattle-monitoring-system" 139 | export PROMETHEUS_SVC="rancher-monitoring-prometheus" 140 | export PROMPORT="9090" 141 | fi 142 | } 143 | 144 | # Where is Docker available, and what port should be forwarded? 145 | set_docker_env() { 146 | if ${RUNNING} && [ "${RUNTIME}" = "crc" ] 147 | then 148 | #shellcheck disable=SC2046 149 | eval $(crc podman-env) 150 | export IGNORE_DOCKER_CONFIG=true 151 | else 152 | export DOCKER_HOST="tcp://localhost:2375" 153 | export DOCKER_REG_PORT="5000" 154 | export IGNORE_DOCKER_CONFIG=false 155 | fi 156 | } 157 | 158 | CMDS=$(cat - <<__EOF__ 159 | check_dependencies 160 | create_namespace 161 | set_default_namespace 162 | set_prometheus_names 163 | set_docker_env 164 | __EOF__ 165 | ) 166 | 167 | for cmd in ${CMDS} 168 | do 169 | #shellcheck disable=SC2046 170 | eval $(printf '%s' "${cmd} ${REDIRECT-}") 171 | done 172 | -------------------------------------------------------------------------------- /hpa/install_scaled_objects.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Configure HPA with Keda ScaledObjects 24 | # 25 | # References: 26 | # * https://www.nginx.com/blog/microservices-march-reduce-kubernetes-latency-with-autoscaling/ 27 | # * https://stackoverflow.com/questions/62578789/kubectl-patch-is-it-possible-to-add-multiple-values-to-an-array-within-a-sinlge 28 | # 29 | # Requires: kubectl 30 | # 31 | # Author: Justin Cook 32 | 33 | set -o errexit nounset 34 | 35 | # shellcheck source=/dev/null 36 | . env.sh 37 | 38 | # Create the ScaledObject(s) 39 | PROMHOST=$(kubectl get svc "${PROMETHEUS_SVC}" -n \ 40 | "${PROMETHEUS_NS}" -o jsonpath='{.spec.clusterIP}') 41 | 42 | kubectl apply -f - <` as the directory name. For instance, OpenShift is 24 | represented as `./ocp` and e.g., `hpa/ocp` throughout the project. 25 | 26 | ## Up and Running 27 | 28 | The following instructions have been wrapped and provided in `quickstart.sh`. 29 | It was developed and tested on macOS using hyperkit. It requires Internet 30 | connectivity, and requires just under ten minutes to complete on a 500Mbps 31 | connection. Hyperkit uses 30GiB of RAM for the default configuration. 32 | 33 | The code requires the following utilities to operate correctly. They are 34 | available with `brew`. 35 | 36 | * Minikube 37 | * kubectl 38 | * Helm 39 | * Git 40 | * Virtualenv 41 | * yq 42 | * jq 43 | 44 | ``` 45 | bash quickstart.sh 46 | ... 47 | Open browser to: http://10.109.73.206:30875 48 | 49 | [2022-03-23 16:54:04,461] jcmmini1.local/INFO/locust.main: Starting web interface at http://0.0.0.0:8089 (accepting connections from all network interfaces) 50 | [2022-03-23 16:54:04,475] jcmmini1.local/INFO/locust.main: Starting Locust 2.8.4 51 | ``` 52 | 53 | Please note, `quickstart.sh` does not enable eBPF or install Keda and configure 54 | HPA with Prometheus metrics. It does, however, create a three node cluster; 55 | install Calico, Rancher, and the monitoring stack; configure the relevant 56 | Prometheus metrics and Grafana dashboards; and install Boutique. 57 | 58 | ## Demonstration of Calico CNI with eBPF the hard way 59 | 60 | Create a Minikube cluster enabling ingress and ingress-dns addon, set the 61 | cidr range to 172.16.0.0/16, and set the Kubernetes version. 62 | * https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/rancher-v2-6-3/ 63 | * https://minikube.sigs.k8s.io/docs/drivers/hyperkit/ 64 | 65 | ### Configure .test TLD to to use Minikube 66 | * https://minikube.sigs.k8s.io/docs/handbook/addons/ingress-dns/ 67 | 68 | ``` 69 | bash setup_k8s.sh 70 | ``` 71 | 72 | ## Install Calico CNI 73 | * https://projectcalico.docs.tigera.io/getting-started/kubernetes/minikube 74 | 75 | ``` 76 | bash install_calico.sh 77 | ``` 78 | 79 | ## Add Nodes to Minikube 80 | 81 | ``` 82 | minikube config set memory 4096 83 | minikube node add --worker 84 | ``` 85 | ## Install Rancher 86 | * https://rancher.com/docs/rancher/v2.6/en/installation/install-rancher-on-k8s/ 87 | 88 | ``` 89 | bash install_rancher.sh 90 | ``` 91 | 92 | ## Install / Configure Prometheus and Grafana 93 | * https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/guides/customize-grafana/ 94 | * https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/guides/persist-grafana/ 95 | * https://www.tigera.io/blog/monitoring-calico-with-prometheus-and-grafana/ 96 | * https://www.tigera.io/blog/how-to-monitor-calicos-ebpf-data-plane-for-proactive-cluster-management/ 97 | * https://projectcalico.docs.tigera.io/maintenance/monitor/monitor-component-metrics 98 | 99 | ``` 100 | bash install_monitoring.sh 101 | ``` 102 | 103 | Add the services necessary and create the Prometheus service monitors for 104 | Calico. 105 | 106 | ``` 107 | bash monitoring/configure_prometheus.sh 108 | ... 109 | bash monitoring/configure_grafana_dashboards.sh 110 | ... 111 | ``` 112 | 113 | ## Enable Horizontal Pod Autoscaling 114 | * https://www.nginx.com/blog/microservices-march-reduce-kubernetes-latency-with-autoscaling/ 115 | 116 | This can be done at any time and is simply a demonstration of using Keda to 117 | scale workloads using Prometheus metrics. For more information, please see the 118 | `hpa` directory. 119 | 120 | ``` 121 | bash hpa/configure_hpa.sh 122 | ``` 123 | 124 | The script above creates an ingress for http://boutique.test which becomes 125 | available after the script runs successfully. 126 | 127 | ## Load Testing with Locust 128 | * https://cloud.google.com/service-mesh/docs/onlineboutique-install-kpt 129 | * https://github.com/GoogleCloudPlatform/microservices-demo 130 | 131 | Create a virtual environment and install Locust. Then, clone the above repo, 132 | install the application, setup Locust, and execute the load test. 133 | 134 | ``` 135 | bash install_boutique.sh 136 | ... 137 | ``` 138 | 139 | Open your browser and load the sites (Boutique and Locust) displayed. 140 | 141 | ## Enable eBPF 142 | * https://projectcalico.docs.tigera.io/maintenance/ebpf/enabling-bpf 143 | 144 | ``` 145 | bash ebpf/enable_ebpf.sh 146 | ``` 147 | 148 | ## Disable eBPF 149 | 150 | ``` 151 | bash ebpf/disable_ebpf.sh 152 | ``` 153 | 154 | ## Calico Enterprise 155 | 156 | For those with a valid Tigera Calico Enterprise license, please see the 157 | `calico_enterprise` folder for more information. One caveat, it is recommended 158 | to isnstall Rancher and monitoring before Calcio Enterprise if you prefer the 159 | full stack to be available. This is due to the Prometheus operator and pull 160 | secret visibility to the operator. 161 | -------------------------------------------------------------------------------- /monitoring/configure_prometheus.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # Create/patch services and add a ServiceMonitor for Prometheus to capture metrics 24 | # 25 | # References: 26 | # https://www.tigera.io/blog/how-to-monitor-calicos-ebpf-data-plane-for-proactive-cluster-management/ 27 | # https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/how-monitoring-works/ 28 | # https://support.coreos.com/hc/en-us/articles/360000155514-Prometheus-ServiceMonitor-troubleshooting 29 | # https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/guides/customize-grafana/ 30 | # https://kubernetes.github.io/ingress-nginx/user-guide/monitoring/ 31 | # 32 | # Author: Justin Cook 33 | 34 | # shellcheck source=/dev/null 35 | . env.sh 36 | 37 | # Create services for Prometheus discovery 38 | kubectl apply -f - </dev/null 2>&1 && \ 86 | kubectl delete APIServer default -n "${_NS_}" 87 | 88 | # Install the pull secret 89 | kubectl create secret generic tigera-pull-secret \ 90 | --type=kubernetes.io/dockerconfigjson -n "${_NS_}" \ 91 | --from-file=.dockerconfigjson=calico_enterprise/tigera-pull-secret.json \ 92 | --dry-run=client -o yaml | kubectl apply -f - 93 | 94 | # Create a pull secret for the Tigera Prometheus operator and patch deployment. 95 | # In order to be idempotent, we need to be pedantic checking promtheuses and 96 | # known namespaces as prometheus API not available will change at a later date 97 | # and assumptions will become invalid. 98 | if ! kubectl get prometheus >/dev/null 2>&1 99 | then 100 | PROMOPNS="tigera-prometheus" 101 | PROMOPDP="calico-prometheus-operator" 102 | kubectl apply -f https://docs.tigera.io/manifests/tigera-prometheus-operator.yaml 103 | else 104 | if kubectl get ns cattle-monitoring-system >/dev/null 2>&1 105 | then 106 | PROMOPNS="cattle-monitoring-system" 107 | PROMOPDP="rancher-monitoring-operator" 108 | else 109 | PROMOPNS="tigera-prometheus" 110 | PROMOPDP="calico-prometheus-operator" 111 | fi 112 | kubectl create ns tigera-prometheus --dry-run=client -o yaml | \ 113 | kubectl apply -f - 114 | fi 115 | 116 | kubectl create secret generic tigera-pull-secret \ 117 | --type=kubernetes.io/dockerconfigjson -n ${PROMOPNS} \ 118 | --from-file=.dockerconfigjson=calico_enterprise/tigera-pull-secret.json \ 119 | --dry-run=client -o yaml | kubectl apply -f - 120 | kubectl patch deployment -n ${PROMOPNS} ${PROMOPDP} \ 121 | -p '{"spec":{"template":{"spec":{"imagePullSecrets":[{"name": "tigera-pull-secret"}]}}}}' 122 | 123 | # Install Tigera custom resources 124 | kubectl apply -f https://docs.tigera.io/manifests/custom-resources.yaml 125 | 126 | # Helper function to verify tigerastatus is available. In order to circumvent 127 | # flapping, get three consecutive success to proceed. 128 | tigerastatus() { 129 | # Wait for tigerastatuses.operator.tigera.io API group and kind 130 | while : 131 | do 132 | kubectl get tigerastatus "$1" >/dev/null 2>&1 && break 133 | sleep 2 134 | done 135 | success_count=0 136 | until [ "$success_count" -gt 2 ] 137 | do 138 | status=$(kubectl get tigerastatus "$1" --no-headers) 139 | avail=$(echo "${status:${#1}+3:5}" | xargs) 140 | if [ "${avail:-False}" == "True" ] 141 | then 142 | ((success_count++)) 143 | else 144 | success_count=0 145 | fi 146 | sleep 2 147 | done 148 | } 149 | 150 | # Wait until apiserver and calico are Available. 151 | for serv in calico apiserver 152 | do 153 | printf "Waiting on %s: " "${serv}" 154 | tigerastatus "${serv}" 155 | printf "Available\n" 156 | done 157 | 158 | # Install the Calico Enterprise license 159 | kubectl apply -f calico_enterprise/calico-enterprise-license.yaml 160 | 161 | # Wait for all components to become available 162 | printf "Waiting on all components: " 163 | 164 | for serv in monitor log-storage compliance intrusion-detection log-collector manager 165 | do 166 | tigerastatus "${serv}" 167 | done 168 | 169 | printf "Available\n" 170 | 171 | # Secure Calico Enterprise components with network policy 172 | kubectl apply -f https://docs.tigera.io/manifests/tigera-policies.yaml 173 | 174 | # Create an admin user 175 | kubectl create sa admin -n default --dry-run=client -o yaml | kubectl apply -f - 176 | kubectl create clusterrolebinding admin-access --clusterrole tigera-network-admin\ 177 | --serviceaccount default:admin --dry-run=client -o yaml | kubectl apply -f - 178 | 179 | # Output elastic and admin user's token 180 | printf "\nKibana \"elastic\" user token: " 181 | kubectl -n tigera-elasticsearch get secret tigera-secure-es-elastic-user \ 182 | -o go-template='{{.data.elastic | base64decode}}' && echo 183 | 184 | printf "\nCalico \"admin\" user token: " 185 | kubectl get secret "$(kubectl get serviceaccount admin -o jsonpath='{range .secrets[*]}{.name}{"\n"}{end}' | grep token)" \ 186 | -o go-template='{{.data.token | base64decode}}' && echo 187 | 188 | printf "\nVisit https://localhost:9443/ to login to the Calico Enterprise UI with token above.\n\n" 189 | 190 | kubectl port-forward -n tigera-manager svc/tigera-manager 9443 191 | -------------------------------------------------------------------------------- /kind/README.md: -------------------------------------------------------------------------------- 1 | # Kind 2 | 3 | Kind runs local Kubernetes clusters with container nodes. 4 | 5 | https://kind.sigs.k8s.io/docs/user/quick-start/ 6 | 7 | ## Getting Started 8 | 9 | You will need Docker available. On macOS, [Colima](https://github.com/abiosoft/colima) can be used. 10 | 11 | ``` 12 | $ brew install colima 13 | ... 14 | $ colima start 15 | INFO[0000] starting colima 16 | INFO[0000] creating and starting ... context=vm 17 | INFO[0099] provisioning ... context=docker 18 | INFO[0099] restarting VM to complete setup ... context=docker 19 | INFO[0099] stopping ... context=vm 20 | INFO[0105] starting ... context=vm 21 | INFO[0125] starting ... context=docker 22 | INFO[0130] waiting for startup to complete ... context=docker 23 | INFO[0130] done 24 | ``` 25 | 26 | At this point, you can create a basic cluster. 27 | 28 | ``` 29 | $ brew install kind 30 | ... 31 | $ kind create cluster 32 | Creating cluster "kind" ... 33 | ✓ Ensuring node image (kindest/node:v1.23.4) 🖼 34 | ✓ Preparing nodes 📦 35 | ✓ Writing configuration 📜 36 | ✓ Starting control-plane 🕹️ 37 | ✓ Installing CNI 🔌 38 | ✓ Installing StorageClass 💾 39 | Set kubectl context to "kind-kind" 40 | You can now use your cluster with: 41 | 42 | kubectl cluster-info --context kind-kind 43 | 44 | Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community 🙂 45 | $ kubectl get nodes 46 | NAME STATUS ROLES AGE VERSION 47 | kind-control-plane Ready control-plane,master 4m58s v1.23.4 48 | $ kind delete cluster 49 | Deleting cluster "kind" ... 50 | $ colima delete 51 | are you sure you want to delete colima and all settings? [y/N] y 52 | INFO[0003] deleting colima 53 | INFO[0003] deleting ... context=docker 54 | INFO[0003] deleting ... context=vm 55 | INFO[0003] done 56 | ``` 57 | 58 | ## Create a Cluster for Calico 59 | 60 | A multi-node Calico Enterprise cluster requires more resources. As such, you 61 | will need to allocate more cpu, memory, and disk to the virtual machine when 62 | invoking Colima. Then, you can create the cluster using the manifest provided: 63 | 64 | ``` 65 | $ colima start --cpu 6 --memory 28 --disk 50 66 | INFO[0000] starting colima 67 | INFO[0000] creating and starting ... context=vm 68 | ... 69 | $ kind create cluster --config kind/calico_cluster.yaml 70 | Creating cluster "calico-cluster" ... 71 | ✓ Ensuring node image (kindest/node:v1.23.4) 🖼 72 | ✓ Preparing nodes 📦 📦 📦 73 | ✓ Writing configuration 📜 74 | ✓ Starting control-plane 🕹️ 75 | ✓ Installing StorageClass 💾 76 | ✓ Joining worker nodes 🚜 77 | Set kubectl context to "kind-calico-cluster" 78 | You can now use your cluster with: 79 | 80 | kubectl cluster-info --context kind-calico-cluster 81 | 82 | Thanks for using kind! 😊 83 | $ kubectl get nodes 84 | NAME STATUS ROLES AGE VERSION 85 | calico-cluster-control-plane NotReady control-plane,master 43s v1.23.4 86 | calico-cluster-worker NotReady 10s v1.23.4 87 | calico-cluster-worker2 NotReady 10s v1.23.4 88 | ``` 89 | 90 | # Make all nodes schedulable 91 | 92 | If you are restricted on assets, to avoid contention, remove master node taint. 93 | 94 | ``` 95 | $ kubectl taint nodes --all node-role.kubernetes.io/master- || /usr/bin/true 96 | node/calico-cluster-control-plane untainted 97 | taint "node-role.kubernetes.io/master" not found 98 | taint "node-role.kubernetes.io/master" not found 99 | ``` 100 | 101 | # Ingress 102 | 103 | Kind clusters do not have an Ingress controller configured. So, install one and 104 | installed applications are easily accessible. The controller is configured with 105 | `kind` to listen on localhost:80 and localhost:443. Therefore, hostnames should 106 | resolve to 127.0.0.1. 107 | 108 | ``` 109 | $ bash kind/install_ingress.sh 110 | namespace/ingress-nginx created 111 | serviceaccount/ingress-nginx created 112 | serviceaccount/ingress-nginx-admission created 113 | role.rbac.authorization.k8s.io/ingress-nginx created 114 | role.rbac.authorization.k8s.io/ingress-nginx-admission created 115 | clusterrole.rbac.authorization.k8s.io/ingress-nginx created 116 | clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created 117 | rolebinding.rbac.authorization.k8s.io/ingress-nginx created 118 | rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created 119 | clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created 120 | clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created 121 | configmap/ingress-nginx-controller created 122 | service/ingress-nginx-controller created 123 | service/ingress-nginx-controller-admission created 124 | deployment.apps/ingress-nginx-controller created 125 | job.batch/ingress-nginx-admission-create created 126 | job.batch/ingress-nginx-admission-patch created 127 | ingressclass.networking.k8s.io/nginx created 128 | validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created 129 | deployment.apps/ingress-nginx-controller patched (no change) 130 | pod/ingress-nginx-controller-55c69f5f55-8c8cq condition met 131 | ``` 132 | 133 | # Rancher 134 | 135 | Installation of Rancher on Kind can be performed as per below. 136 | 137 | ``` 138 | $ bash install_rancher.sh 139 | ... 140 | Happy Containering! 141 | Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... 142 | Waiting for deployment spec update to be observed... 143 | Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... 144 | Waiting for deployment "rancher" rollout to finish: 1 of 3 updated replicas are available... 145 | Waiting for deployment "rancher" rollout to finish: 2 of 3 updated replicas are available... 146 | deployment "rancher" successfully rolled out 147 | ``` 148 | 149 | # Monitoring 150 | 151 | ``` 152 | $ bash install_monitoring.sh 153 | ... 154 | Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. 155 | deployment "rancher-monitoring-grafana" successfully rolled out 156 | deployment "rancher-monitoring-kube-state-metrics" successfully rolled out 157 | deployment "rancher-monitoring-operator" successfully rolled out 158 | deployment "rancher-monitoring-prometheus-adapter" successfully rolled out 159 | 160 | $ bash monitoring/configure_prometheus.sh 161 | service/typha-metrics created 162 | service/calico-controllers-metrics created 163 | felixconfiguration.projectcalico.org/default patched 164 | service/felix-metrics created 165 | service/ingress-nginx-controller patched 166 | service/ingress-nginx-controller patched 167 | service/ingress-nginx-controller patched 168 | service/ingress-nginx-controller patched 169 | deployment.apps/ingress-nginx-controller patched 170 | deployment.apps/ingress-nginx-controller patched 171 | deployment.apps/ingress-nginx-controller patched 172 | servicemonitor.monitoring.coreos.com/calico-typha-prometheus-config created 173 | servicemonitor.monitoring.coreos.com/calico-kube-controllers-prometheus-config created 174 | servicemonitor.monitoring.coreos.com/calico-svc-monitoring-prometheus-config created 175 | servicemonitor.monitoring.coreos.com/ingress-nginx-monitoring-config created 176 | 177 | $ bash monitoring/configure_grafana_dashboards.sh 178 | Applying Grafana dashboard: monitoring/dashboards/calico-grafana-dashboards.yaml 179 | configmap/calico-dashboards created 180 | Applying Grafana dashboard: monitoring/dashboards/nginx-grafana-dashboards.yaml 181 | configmap/nginx-dashboards created 182 | ``` 183 | 184 | # Calico Enterprise 185 | 186 | Installation of the Calico Enterprise suite can be performed on the Kind 187 | cluster as per below. The `calico-enterprise-license.yaml` and `tigera-pull-secret.json` 188 | need to be resident in `calico_enterprise` directory. 189 | 190 | ``` 191 | $ bash calico_enterprise/install_calico_enterprise.sh 192 | ... 193 | Visit https://localhost:9443/ to login to the Calico Enterprise UI with token above. 194 | ``` 195 | -------------------------------------------------------------------------------- /rke2/setup_rke2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy 4 | # of this software and associated documentation files (the "Software"), to 5 | # deal in the Software without restriction, including without limitation the 6 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 | # sell copies of the Software, and to permit persons to whom the Software is 8 | # furnished to do so, subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in 11 | # all copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | # THE SOFTWARE. 20 | # 21 | # Setup rke2 with multipass for use within this code base. 22 | # 23 | # Requires: multipass 24 | # jq 25 | # 26 | # Author(s): Sebastiaan van Steenis 27 | # Justin Cook 28 | 29 | # shellcheck source=/dev/null 30 | source env.sh 31 | source "$(dirname "$0")/localenv.sh" 32 | 33 | set -o errexit nounset 34 | 35 | if [ -z "${TOKEN}" ] 36 | then 37 | TOKEN=$(echo $RANDOM | md5sum | head -c20) 38 | echo "Generated agent token: ${TOKEN}" 39 | sed "s/TOKEN=\".*\"/TOKEN=\"${TOKEN}\"/" "$(dirname "$0")/localenv.sh" > \ 40 | "$(dirname "$0")/localenv.sh.$$" 41 | mv "$(dirname "$0")/localenv.sh.$$" "$(dirname "$0")/localenv.sh" 42 | fi 43 | 44 | # Check if name is given or create random name 45 | if [ -z "${NAME}" ] 46 | then 47 | NAME=$(grep -E '^[a-z]{5}$' /usr/share/dict/words | shuf -n1) 48 | echo "Selected name: ${NAME}" 49 | sed "s/NAME=\".*\"/NAME=\"${NAME}\"/" "$(dirname "$0")/localenv.sh" > \ 50 | "$(dirname "$0")/localenv.sh.$$" 51 | mv "$(dirname "$0")/localenv.sh.$$" "$(dirname "$0")/localenv.sh" 52 | fi 53 | 54 | cleanup() { 55 | for file in ${SUBDIR:-./}${NAME}-{pm,master,agent}-cloud-init.yaml \ 56 | "$(dirname "$0")/localenv.sh.$$" 57 | do 58 | rm -f "${file}" >/dev/null 2>&1 59 | done 60 | } 61 | trap cleanup EXIT 62 | 63 | # Prepare cloud-init template 64 | # Requires: INSTALL_RKE2_TYPE set which defaults to empty string 65 | # CONFIGYAML set which is a string of YAML 66 | # RKE2ROLE set which defaults to "server" 67 | create_cloudinit_template() { 68 | CLOUDINIT_TEMPLATE=$(cat - << EOM 69 | #cloud-config 70 | 71 | runcmd: 72 | - '\curl -sfL https://get.rke2.io | ${CLOUD_INIT_INSTALL} ${IRT:-} sh -' 73 | - '\mkdir -p /etc/rancher/rke2' 74 | - '\echo "${CONFIGYAML}" > /etc/rancher/rke2/config.yaml' 75 | - '\systemctl daemon-reload' 76 | - '\systemctl enable --now rke2-${RKE2ROLE:-server}' 77 | EOM 78 | ) 79 | } 80 | 81 | # A convenience function called throughout the code to check the status of an 82 | # instance passed to this function as "$1". If the instance is "Running", then 83 | # carry on silently, "Stopped" then start, and if nonexistent, create a 84 | # multipass instance. Wait on each node to register and become ready. 85 | # It requires arguments passed: 86 | # 1: instance name 87 | # 2: number of cpus 88 | # 3: disk size 89 | # 4: memory size 90 | # 5: image name 91 | # 6: cloud-init file name 92 | create_multipass_node() { 93 | local __state__ 94 | __state__="$(${MULTIPASSCMD} list --format=json | \ 95 | jq -r ".list[] | select(.name | contains(\"${1}\")) | .state")" 96 | if [ "${__state__}" = "Running" ] 97 | then 98 | : 99 | elif [ "${__state__}" = "Stopped" ] 100 | then 101 | ${MULTIPASSCMD} start "${1}" 102 | else 103 | echo "Creating ${1} node" 104 | ${MULTIPASSCMD} launch --cpus "${2}" --disk "${3}" --memory "${4}" "${5}" \ 105 | --name "${1}" --cloud-init "${SUBDIR:-./}${6}" --timeout=600 106 | fi 107 | wait_on_node "${1}" 108 | 109 | } 110 | 111 | # A convenience function called throughout the code to detect node registration 112 | # and wait until ready. The node name needs passed. 113 | wait_on_node() { 114 | echo "Confirming ${1} registration" 115 | ${MULTIPASSCMD} exec "${NAME}-rke2-master-1" -- bash -c "$(cat - <<__EOF__ 116 | until /var/lib/rancher/rke2/bin/kubectl \ 117 | --kubeconfig /etc/rancher/rke2/rke2.yaml get "node/${1}" 118 | do 119 | sleep 2 120 | done 121 | __EOF__ 122 | )" 123 | echo "Waiting for ${1} to become ready" 124 | ${MULTIPASSCMD} exec "${NAME}-rke2-master-1" -- /bin/bash -c "$(cat - <<__EOF__ 125 | /var/lib/rancher/rke2/bin/kubectl \ 126 | --kubeconfig /etc/rancher/rke2/rke2.yaml wait --for=condition=Ready \ 127 | "node/${1}" --timeout=600s 128 | __EOF__ 129 | )" 130 | } 131 | 132 | cat << __EOF__ 133 | Creating cluster ${NAME} with ${MASTER_NODE_COUNT} masters and \ 134 | ${AGENT_NODE_COUNT} nodes. 135 | __EOF__ 136 | 137 | # Server specific cloud-init 138 | CONFIGYAML="token: ${TOKEN}\nwrite-kubeconfig-mode: 644\ntls-san: ${TLSSAN}" 139 | create_cloudinit_template 140 | echo "${CLOUDINIT_TEMPLATE}" > "${NAME}-pm-cloud-init.yaml" 141 | create_multipass_node "${NAME}-rke2-master-1" "${MASTER_NODE_CPU}" \ 142 | "${MASTER_DISK_SIZE}" "${MASTER_MEMORY_SIZE}" "${IMAGE}" \ 143 | "${NAME}-pm-cloud-init.yaml" 144 | 145 | # Retrieve info to join agent to cluster 146 | SERVER_IP=$($MULTIPASSCMD info "${NAME}-rke2-master-1" --format=json | \ 147 | jq -r ".info.\"${NAME}-rke2-master-1\".ipv4[0]") 148 | URL="https://${SERVER_IP}:9345" 149 | 150 | # Create additional masters 151 | CONFIGYAML="server: ${URL}\ntoken: ${TOKEN}\nwrite-kubeconfig-mode: 644\ntls-san: ${TLSSAN}" 152 | create_cloudinit_template 153 | echo "${CLOUDINIT_TEMPLATE}" > "${NAME}-master-cloud-init.yaml" 154 | for ((i=2; i<=MASTER_NODE_COUNT; i++)) 155 | do 156 | create_multipass_node "${NAME}-rke2-master-${i}" "${MASTER_NODE_CPU}" \ 157 | "${MASTER_DISK_SIZE}" "${MASTER_MEMORY_SIZE}" "${IMAGE}" \ 158 | "${NAME}-master-cloud-init.yaml" 159 | done 160 | 161 | # Prepare agent node cloud-init 162 | CONFIGYAML="server: ${URL}\ntoken: ${TOKEN}" 163 | IRT='INSTALL_RKE2_TYPE="agent"' 164 | RKE2ROLE="agent" 165 | create_cloudinit_template 166 | echo "${CLOUDINIT_TEMPLATE}" > "${NAME}-agent-cloud-init.yaml" 167 | for ((i=1; i<=AGENT_NODE_COUNT; i++)) 168 | do 169 | create_multipass_node "${NAME}-rke2-agent-${i}" "${AGENT_NODE_CPU}" \ 170 | "${AGENT_DISK_SIZE}" "${AGENT_MEMORY_SIZE}" "${IMAGE}" \ 171 | "${NAME}-agent-cloud-init.yaml" 172 | done 173 | 174 | # Check if `kubectl` exists in PATH. If so, merge KUBECONFIG and set as 175 | # default context. 176 | if command -v kubectl 177 | then 178 | # Retrieve the kubeconfig, edit server address, and merge it with the local 179 | # kubeconfig in order to use contexts. 180 | if [ ! -d "$(dirname "${LOCALKUBECONFIG}")" ] 181 | then 182 | mkdir "$(dirname "${LOCALKUBECONFIG}")" 183 | fi 184 | ${MULTIPASSCMD} copy-files "${NAME}-rke2-master-1:/etc/rancher/rke2/rke2.yaml" - | \ 185 | sed "/^[[:space:]]*server:/ s_:.*_: \"https://${SERVER_IP}:6443\"_" > \ 186 | "${LOCALKUBECONFIG}" 187 | chmod 0600 "${LOCALKUBECONFIG}" 188 | 189 | "${KUBECTLCMD}" config delete-context "${NAME}-rke2-cluster" || /usr/bin/true 190 | export KUBECONFIG="${KUBECONFIG:-${HOME}/.kube/config}:${LOCALKUBECONFIG}" 191 | if [ ! -d "$(dirname "${KUBECONFIG%%:*}")" ] 192 | then 193 | mkdir "$(dirname "${KUBECONFIG%%:*}")" 194 | fi 195 | "${KUBECTLCMD}" config view --flatten > "${KUBECONFIG%%:*}" 196 | "${KUBECTLCMD}" config set-context "${NAME}-rke2-cluster" --namespace default 197 | else 198 | cat << __EOF__ 199 | 200 | kubectl not found in PATH 201 | Use the following alias for kubectl: 202 | alias kubectl="\${MULTIPASSCMD} exec \${NAME}-rke2-master-1 -- \ 203 | /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml" 204 | 205 | __EOF__ 206 | fi 207 | 208 | echo "rke2 setup complete" 209 | ${KUBECTLCMD} get nodes 210 | 211 | echo "Please configure ${TLSSAN} to resolve to ${SERVER_IP}" 212 | -------------------------------------------------------------------------------- /hpa/sidecar/tcp-exporter/tcp_exporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Copyright 2022 Justin Cook 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to 7 | # deal in the Software without restriction, including without limitation the 8 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | # sell copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | # THE SOFTWARE. 22 | # 23 | # This bit of code either uses conntrack and counts ESTABLISHED TCP connections 24 | # or failing that reads established connections and samples several times per 25 | # second maintaining count. It listens on the specified port for Prometheus 26 | # scrapes. 27 | # 28 | # References: 29 | # https://projectcalico.docs.tigera.io/reference/host-endpoints/conntrack 30 | # https://learnk8s.io/kubernetes-network-packets 31 | # https://stackabuse.com/serving-files-with-pythons-simplehttpserver-module/ 32 | # https://elixir.bootlin.com/linux/v5.17/source/include/net/tcp_states.h 33 | # 34 | # Author: Justin Cook 35 | 36 | import http.server 37 | from signal import signal, SIGINT 38 | from subprocess import Popen, PIPE, CalledProcessError 39 | from threading import Thread, Lock 40 | from itertools import count 41 | from socketserver import TCPServer 42 | from sys import argv, stderr, exit 43 | from queue import Queue, Empty 44 | from os import _exit 45 | from time import sleep 46 | 47 | class FastWriteCounter(object): 48 | def __init__(self): 49 | self._number_of_read = 0 50 | self._counter = count() 51 | self._read_lock = Lock() 52 | 53 | def inc(self): 54 | next(self._counter) 55 | 56 | def value(self): 57 | with self._read_lock: 58 | value = next(self._counter) - self._number_of_read 59 | self._number_of_read += 1 60 | return value 61 | 62 | num_connections = FastWriteCounter() 63 | state = 'running' 64 | watch_port = 8080 65 | 66 | existing_conns = [] 67 | discover_conns = Queue() 68 | conns_lock = Lock() 69 | 70 | class PrometheusServiceExporter(http.server.SimpleHTTPRequestHandler): 71 | """A simple HTTP server that serves metrics for scraping by Prometheus.""" 72 | 73 | metrics = ( 74 | "# HELP boutique_tcp_established_connections_total A count of " 75 | "ESTABLISHED TCP connections\n" 76 | "# TYPE boutique_tcp_established_connections_total counter\n" 77 | "boutique_tcp_port_established_connections_total {}\n" 78 | ) 79 | protocol_version = 'HTTP/1.1' 80 | 81 | def do_GET(self): 82 | if self.path == '/metrics': 83 | global num_connections 84 | payload = bytes(self.metrics.format(num_connections.value()), 85 | "utf8") 86 | self.send_response(200) 87 | self.send_header("Content-Length", len(payload)) 88 | self.send_header("Connection", "close") 89 | self.end_headers() 90 | self.wfile.write(payload) 91 | else: 92 | self.send_response(404) 93 | self.send_header("Connection", "close") 94 | self.end_headers() 95 | 96 | def conntrack_events(num_connections): 97 | """Use conntrack for watching ESTABLISHED TCP connections on watch_port. 98 | 99 | This requires root privileges and will raise CalledProcessError if the 100 | correct privileges are not available. 101 | """ 102 | global watch_port, state 103 | try: 104 | subp = Popen(['conntrack', '-E', '-p', 'tcp', '--dport', 105 | str(watch_port), '--state', 'ESTABLISHED'], stdout=PIPE) 106 | sleep(.2) 107 | if subp.poll(): 108 | raise CalledProcessError(subp.returncode, "conntrack", None) 109 | while True: 110 | connection = subp.stdout.readline().decode('utf8') 111 | if connection: 112 | num_connections.inc() 113 | except (CalledProcessError, FileNotFoundError) as err: 114 | print(err, file=stderr) 115 | state = 'stopped' 116 | exit(1) 117 | 118 | def get_conns(discover_conns): 119 | """Open /proc/net/tcp, look for ESTABLISHED TCP connections, and inspect 120 | the local port of each. If it is the one we are looking for, add it to the 121 | queue for sorting and counting. 122 | """ 123 | global watch_port, state 124 | print("get_conns: starting", file=stderr) 125 | while True: 126 | try: 127 | if state != 'running': break 128 | with open('/proc/net/tcp', 'r') as f: 129 | conns_lock.acquire() 130 | while f: 131 | line = f.readline() 132 | if line == "": break 133 | chunkedline = line.split() 134 | # Element three is connection state and '01' is 135 | # ESTABLISHED. 136 | if chunkedline[3] == '01': 137 | # chunkedline[1] is the local port, and if it's the one 138 | # we are looking for it's ESTABLISHED and needs to be 139 | # counted if not already existing. 140 | found_port = int(chunkedline[1].split(':')[1], 16) 141 | if found_port == watch_port: 142 | discover_conns.put_nowait(chunkedline[2]) 143 | conns_lock.release() 144 | sleep(.033) 145 | except FileNotFoundError as err: 146 | print(err, file=stderr) 147 | _exit(2) 148 | 149 | def count_conns(existing_conns, discover_conns, num_connections): 150 | global state 151 | print("count_conns: starting", file=stderr) 152 | while True: 153 | if state != 'running': break 154 | conns_lock.acquire() 155 | try: 156 | conns = [discover_conns.get() for i in range(discover_conns.qsize())] 157 | except Empty: 158 | conns_lock.release() 159 | continue 160 | for conn in existing_conns: 161 | if conn not in conns: 162 | existing_conns.remove(conn) 163 | for conn in conns: 164 | if conn not in existing_conns: 165 | num_connections.inc() 166 | existing_conns.append(conn) 167 | conns_lock.release() 168 | sleep(1) 169 | 170 | def cleanup(*args): 171 | """A simple cleanup function that changes global state and exits as 172 | necessary. 173 | """ 174 | global state 175 | state = 'shutdown' 176 | raise Exception("Exiting...") 177 | 178 | def usage(): 179 | return ( 180 | "usage: tcp_exporter.py \n\n" 181 | "example: tcp_exporter.py 9100 8080") 182 | 183 | def main(): 184 | global watch_port, state, num_connections 185 | threads = [] 186 | 187 | # Since we're looping and catching Exception, we need to handle SIGINT 188 | # as a special case. 189 | signal(SIGINT, cleanup) 190 | try: 191 | listen_port = int(argv[1]) 192 | watch_port = int(argv[2]) 193 | except IndexError as err: 194 | print(usage(), file=stderr) 195 | exit(1) 196 | 197 | # Try and start conntrack, wait, and then check state 198 | port_discover = Thread(name='conntrack-events-daemon', 199 | target=conntrack_events, args=(num_connections, )) 200 | port_discover.daemon = True 201 | port_discover.start() 202 | sleep(.5) 203 | 204 | # If state is 'stopped' then back off and try sampling 205 | if state != 'running': 206 | print("Unable to track connections. Entering sampling mode.", 207 | file=stderr) 208 | port_discover.join() 209 | state = 'running' 210 | # Create a thread that will scrape existing tcp connections and place 211 | # matching connections on a queue for counting. 212 | port_discover = Thread(name='port-discover-daemon', 213 | target=get_conns, args=(discover_conns,)) 214 | port_discover.daemon = True 215 | port_discover.start() 216 | threads.append(port_discover) 217 | 218 | # Create a thread that will count the queue. 219 | port_counter = Thread(name='port-counter-daemon', 220 | target=count_conns, args=(existing_conns, 221 | discover_conns, 222 | num_connections)) 223 | port_counter.daemon = True 224 | port_counter.start() 225 | threads.append(port_counter) 226 | 227 | # A mini HTTP server to make metrics available for scraping 228 | handler = PrometheusServiceExporter 229 | try: 230 | TCPServer.allow_reuse_address = True 231 | with TCPServer(("", listen_port), handler) as httpd: 232 | print("Server started at localhost: {}".format(listen_port), 233 | file=stderr) 234 | try: 235 | httpd.serve_forever() 236 | except Exception as err: 237 | print(err, file=stderr) 238 | finally: 239 | httpd.server_close() 240 | httpd.shutdown() 241 | except OSError as err: 242 | print(err, file=stderr) 243 | exit(4) 244 | 245 | [thread.join() for thread in threads] 246 | 247 | if __name__ == "__main__": 248 | main() -------------------------------------------------------------------------------- /monitoring/dashboards/boutique-grafana-dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: boutique-dashboards 5 | namespace: cattle-dashboards 6 | labels: 7 | grafana_dashboard: "1" 8 | data: 9 | boutique-dashboards.yaml: |- 10 | apiVersion: 1 11 | providers: 12 | - name: 'Prometheus' 13 | orgId: 1 14 | folder: '' 15 | folderUid: '' 16 | type: file 17 | disableDeletion: false 18 | editable: true 19 | updateIntervalSeconds: 15 20 | allowUiUpdates: false 21 | options: 22 | path: /etc/grafana/provisioning/dashboards 23 | boutique-dashboard.json: |- 24 | {"annotations":{"list":[{"builtIn":1,"datasource":"-- Grafana --","enable":true,"hide":true,"iconColor":"rgba(0, 211, 255, 1)","name":"Annotations & Alerts","type":"dashboard"}]},"editable":true,"gnetId":null,"graphTooltip":0,"links":[],"panels":[{"collapsed":false,"datasource":null,"gridPos":{"h":1,"w":24,"x":0,"y":0},"id":26,"panels":[],"title":"NGINX Ingress 95th Percentile","type":"row"},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":0,"y":1},"hiddenSeries":false,"id":28,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"histogram_quantile(0.95, sum by (le)(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress =~ \"frontend-ingress\"}[10s])))\n","interval":"","legendFormat":"","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Frontend Ingress","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"datasource":null,"gridPos":{"h":1,"w":24,"x":0,"y":9},"id":14,"title":"Service Connection Rate","type":"row"},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":0,"y":10},"hiddenSeries":false,"id":10,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"sum(rate(boutique_tcp_port_established_connections_total{job =~ \"adservice\"}[30s]))","interval":"","legendFormat":"","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Ad Service","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":12,"y":10},"hiddenSeries":false,"id":12,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"sum(rate(boutique_tcp_port_established_connections_total{job =~ \"cartservice\"}[30s]))","interval":"","legendFormat":"","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Cart Service","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":0,"y":18},"hiddenSeries":false,"id":4,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"sum(rate(boutique_tcp_port_established_connections_total{job =~ \"checkoutservice\"}[30s]))","interval":"","legendFormat":"","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Checkout Service","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":12,"y":18},"hiddenSeries":false,"id":16,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"sum(rate(boutique_tcp_port_established_connections_total{job =~ \"currencyservice\"}[30s]))","interval":"","legendFormat":"","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Currency Service","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":0,"y":26},"hiddenSeries":false,"id":20,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"sum(rate(boutique_tcp_port_established_connections_total{job =~ \"emailservice\"}[30s]))","interval":"","legendFormat":"","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Email Service","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":12,"y":26},"hiddenSeries":false,"id":18,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"sum(rate(boutique_tcp_port_established_connections_total{job =~ \"paymentservice\"}[30s]))","interval":"","legendFormat":"","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Payment Service","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":0,"y":34},"hiddenSeries":false,"id":2,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"sum(rate(boutique_tcp_port_established_connections_total{job =~ \"productcatalogservice\"}[30s]))","interval":"","legendFormat":"","refId":"Product Catalog Service"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Product Catalog Service","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":12,"y":34},"hiddenSeries":false,"id":22,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"sum(rate(boutique_tcp_port_established_connections_total{job =~ \"recommendationservice\"}[30s]))","interval":"","legendFormat":"","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Recommendation Service","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":null,"fieldConfig":{"defaults":{},"overrides":[]},"fill":1,"fillGradient":0,"gridPos":{"h":8,"w":12,"x":0,"y":42},"hiddenSeries":false,"id":24,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"nullPointMode":"null","options":{"alertThreshold":true},"percentage":false,"pluginVersion":"7.5.11","pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"exemplar":true,"expr":"sum(rate(boutique_tcp_port_established_connections_total{job =~ \"shippingservice\"}[30s]))","interval":"","legendFormat":"","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Shipping Service","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"schemaVersion":27,"style":"dark","tags":[],"templating":{"list":[]},"time":{"from":"now-6h","to":"now"},"timepicker":{},"timezone":"","title":"Boutique","uid":"06W0nKzVk","version":1} --------------------------------------------------------------------------------