├── tests ├── requirements.txt ├── resources │ ├── sriov-device-plugin-test-resources-valid.json │ └── sriov-device-plugin-test-resources-invalid.json ├── templates │ ├── amd-values.yaml │ ├── nginx-kata.yaml │ ├── nginx-pod.yaml │ ├── bbox-local.yaml │ ├── cuda-add.yaml │ ├── bbox.yaml │ ├── wasm-job.yaml │ ├── knative-helloworld.yaml │ ├── inaccel.yaml │ ├── pvc.yaml │ ├── keda-scaledobject.yaml │ ├── openebs-test.yaml │ ├── pvc-nfs.yaml │ ├── ingress.yaml │ └── emojivoto.yaml ├── conftest.py ├── test_parking.py ├── test_stunner.py ├── test_microcks.py ├── test_shifu.py ├── test_openfaas.py ├── test_portainer.py ├── test_argocd.py ├── test_forgejo.py ├── test_gopaddle_lite.py ├── test_falco.py ├── test_trivy.py ├── test_easyhaproxy.py ├── test_cloudnative-pg.py ├── test_ngrok.py ├── test_kata.py ├── test_multus.py ├── test_keda.py ├── test_osm_edge.py ├── test_sosivio.py ├── test_istio.py ├── test_kwasm.py ├── test_kubearmor.py ├── test_knative.py ├── test_openebs.py ├── test_inaccel.py ├── test_linkerd.py ├── test_monitoring.py ├── test_amd.py ├── test_nfs.py ├── test_cilium.py └── test_sriov_device_plugin.py ├── addons ├── forgejo │ ├── values-mk8s.yaml │ ├── disable │ └── enable ├── jaeger │ ├── simplest.yaml │ ├── cert-tester.yaml │ ├── disable │ └── enable ├── kata │ ├── kata │ │ └── runtime.yaml │ ├── disable │ └── enable ├── cilium │ ├── cilium │ └── disable ├── kubearmor │ ├── karmor │ ├── disable │ └── enable ├── microcks │ ├── disable │ └── enable ├── inaccel │ ├── disable │ └── enable ├── nfs │ ├── nfs.yaml │ ├── disable │ ├── README.md │ └── enable ├── traefik │ ├── enable │ └── disable ├── kwasm │ ├── disable │ └── enable ├── shifu │ ├── disable │ └── enable ├── istio │ ├── disable │ └── enable ├── stunner │ ├── disable │ └── enable ├── fluentd │ ├── fluentd │ │ ├── kibana-service.yaml │ │ ├── es-service.yaml │ │ ├── kibana-deployment.yaml │ │ ├── fluentd-es-ds.yaml │ │ └── es-statefulset.yaml │ ├── enable │ └── disable ├── keda │ ├── disable │ └── enable ├── osm-edge │ ├── disable │ ├── osm.wrapper │ └── enable ├── parking │ ├── disable │ └── enable ├── portainer │ ├── disable │ └── enable ├── falco │ ├── disable │ └── enable ├── trivy │ ├── disable │ └── enable ├── easyhaproxy │ ├── disable │ └── enable ├── sosivio │ ├── disable │ └── enable ├── cloudnative-pg │ ├── disable │ └── enable ├── sriov-device-plugin │ ├── disable │ └── sriovdp.yaml ├── linkerd │ ├── disable │ └── enable ├── amd │ ├── disable │ └── enable ├── openfaas │ ├── disable │ └── enable ├── gopaddle │ ├── disable │ └── enable ├── ngrok │ ├── disable │ └── enable ├── knative │ ├── disable │ └── enable ├── dashboard-ingress │ └── disable ├── common │ └── utils.sh ├── argocd │ ├── disable │ └── enable ├── multus │ ├── enable │ ├── disable │ └── multus.yaml └── openebs │ ├── enable │ └── disable ├── .github ├── workflows │ ├── cla-check.yml │ ├── check-formatting.yml │ └── run-tests.yml ├── ISSUE_TEMPLATE │ ├── question.yml │ ├── feature_request.md │ └── bug_report.md ├── dependabot.yml ├── PULL_REQUEST_TEMPLATE.md ├── .jira_sync_config.yaml └── stale.yml ├── CONTRIBUTING.md ├── README.md └── .gitignore /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | sh 3 | PyYaml 4 | requests -------------------------------------------------------------------------------- /addons/forgejo/values-mk8s.yaml: -------------------------------------------------------------------------------- 1 | # Customize values 2 | test: 3 | enabled: false 4 | -------------------------------------------------------------------------------- /addons/jaeger/simplest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: jaegertracing.io/v1 2 | kind: Jaeger 3 | metadata: 4 | name: simplest 5 | -------------------------------------------------------------------------------- /addons/kata/kata/runtime.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: node.k8s.io/v1 2 | kind: RuntimeClass 3 | metadata: 4 | name: kata 5 | handler: kata 6 | -------------------------------------------------------------------------------- /tests/resources/sriov-device-plugin-test-resources-valid.json: -------------------------------------------------------------------------------- 1 | { 2 | "resource_a": ["0000:00:06.0"], 3 | "resource_b": ["0000:00:07.0"] 4 | } 5 | -------------------------------------------------------------------------------- /addons/cilium/cilium: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | KUBECONFIG="$SNAP_DATA/credentials/client.config" ${SNAP_COMMON}/addons/community/addons/cilium/cli/cilium $* 4 | -------------------------------------------------------------------------------- /tests/resources/sriov-device-plugin-test-resources-invalid.json: -------------------------------------------------------------------------------- 1 | { 2 | "sriov_vfio_res_A": ["0000:00:06.0"], 3 | "sriov_vfio_res_B": ["0000:00:07.0"] 4 | } 5 | -------------------------------------------------------------------------------- /tests/templates/amd-values.yaml: -------------------------------------------------------------------------------- 1 | deviceConfig: 2 | spec: 3 | selector: 4 | unit-test-check: "true" 5 | metricsExporter: 6 | enable: false 7 | testRunner: 8 | enable: true -------------------------------------------------------------------------------- /tests/templates/nginx-kata.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app: kata 6 | name: nginx-kata 7 | spec: 8 | runtimeClassName: kata 9 | containers: 10 | - name: nginx 11 | image: nginx 12 | -------------------------------------------------------------------------------- /.github/workflows/cla-check.yml: -------------------------------------------------------------------------------- 1 | name: cla-check 2 | on: [pull_request] 3 | 4 | jobs: 5 | cla-check: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Check if CLA signed 9 | uses: canonical/has-signed-canonical-cla@v2 10 | -------------------------------------------------------------------------------- /addons/kubearmor/karmor: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Elevated permissions are needed for this command. Please use sudo." 4 | exit 1 5 | fi 6 | export KUBECONFIG=$SNAP_DATA/credentials/client.config 7 | 8 | ${SNAP_COMMON}/bin/karmor $* 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.yml: -------------------------------------------------------------------------------- 1 | contact_links: 2 | - name: Ask a question 3 | url: https://kubernetes.slack.com/archives/CAUNWQ85V 4 | about: "For discussions and/or other questions related to MicroK8s, please use the #microk8s channel on the Kubernetes Slack" 5 | -------------------------------------------------------------------------------- /addons/microcks/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | HELM="${SNAP}/microk8s-helm3.wrapper" 6 | NAMESPACE="microcks" 7 | 8 | echo "Disabling Microcks" 9 | 10 | "${HELM}" uninstall microcks -n "${NAMESPACE}" 11 | 12 | echo "Microcks is disabled" 13 | -------------------------------------------------------------------------------- /tests/templates/nginx-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app: nginx 6 | name: nginx 7 | namespace: default 8 | spec: 9 | containers: 10 | - name: nginx 11 | image: nginx:latest 12 | restartPolicy: Always 13 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from utils import ( 4 | microk8s_reset, 5 | ) 6 | 7 | 8 | @pytest.fixture(scope="session", autouse=True) 9 | def clean_up(): 10 | """ 11 | Clean up after a test 12 | """ 13 | yield 14 | microk8s_reset() 15 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Set update schedule for GitHub Actions 2 | 3 | version: 2 4 | updates: 5 | - package-ecosystem: "github-actions" 6 | directory: "/" 7 | schedule: 8 | # Check for updates to GitHub Actions every weekday 9 | interval: "daily" 10 | -------------------------------------------------------------------------------- /addons/inaccel/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | echo "Disabling InAccel FPGA Operator" 8 | 9 | "$SNAP/microk8s-helm3.wrapper" uninstall inaccel \ 10 | --namespace kube-system \ 11 | $@ 12 | 13 | echo "InAccel is disabled" 14 | -------------------------------------------------------------------------------- /tests/templates/bbox-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: busybox 5 | namespace: default 6 | spec: 7 | containers: 8 | - name: busybox 9 | image: localhost:32000/my-busybox 10 | command: 11 | - sleep 12 | - "3600" 13 | imagePullPolicy: IfNotPresent 14 | restartPolicy: Always 15 | -------------------------------------------------------------------------------- /addons/nfs/nfs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: data-nfs-server-provisioner-0 6 | spec: 7 | capacity: 8 | storage: {{disk_size}} 9 | accessModes: 10 | - ReadWriteOnce 11 | hostPath: 12 | path: {{hostpath}} 13 | claimRef: 14 | namespace: nfs-server-provisioner 15 | name: {{claimref_name}} -------------------------------------------------------------------------------- /addons/traefik/enable: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | echo " 4 | Traefik Ingress controller is now a core addon. Enable ingress addon to deploy Traefik on your cluster: 5 | 6 | $ microk8s enable ingress 7 | 8 | If you have this addon already enabled, you can disable it and start using the ingress core addon. 9 | Note that the new ingress core addon installs Traefik in the 'ingress' namespace. 10 | " 11 | -------------------------------------------------------------------------------- /addons/kubearmor/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | "$SNAP/microk8s-helm.wrapper" uninstall kubearmor-operator -n kubearmor 8 | 9 | if [[ -f "$SNAP_COMMON/plugins/karmor" ]]; then 10 | sudo rm "$SNAP_COMMON/plugins/karmor" 11 | fi 12 | 13 | if [[ -f "$SNAP_COMMON/bin/karmor" ]]; then 14 | sudo rm "$SNAP_COMMON/bin/karmor" 15 | fi 16 | -------------------------------------------------------------------------------- /addons/kwasm/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | NAMESPACE_KWASM="kwasm-system" 6 | 7 | "$SNAP/microk8s-enable.wrapper" helm3 8 | HELM="$SNAP/microk8s-helm3.wrapper" 9 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 10 | 11 | echo "Disabling KWasm" 12 | 13 | $HELM delete -n $NAMESPACE_KWASM kwasm-operator 14 | $HELM repo remove kwasm 15 | $KUBECTL delete ns $NAMESPACE_KWASM 16 | 17 | echo "KWasm disabled" -------------------------------------------------------------------------------- /addons/shifu/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | 9 | if [ -f "$SNAP_DATA/shifu/shifu_install.yml" ] 10 | then 11 | echo "Disabling Shifu" 12 | $KUBECTL delete -f "$SNAP_DATA/shifu/shifu_install.yml" || true 13 | run_with_sudo rm -rf "$SNAP_DATA/shifu" 14 | fi 15 | 16 | echo "The Shifu addon is disabled." 17 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ### Thank you for making MicroK8s better 2 | 3 | Please reference the issue this PR is fixing, or provide a description of the problem addressed. 4 | 5 | *Also verify you have:* 6 | * [ ] Read the [contributions](https://github.com/ubuntu/microk8s/blob/master/CONTRIBUTING.md) page. 7 | * [ ] Submitted the [CLA form](https://ubuntu.com/legal/contributors/agreement), if you are a first time contributor. 8 | -------------------------------------------------------------------------------- /tests/templates/cuda-add.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: cuda-vector-add 5 | spec: 6 | restartPolicy: OnFailure 7 | containers: 8 | - name: cuda-vector-add 9 | # https://github.com/kubernetes/kubernetes/blob/v1.7.11/test/images/nvidia-cuda/Dockerfile 10 | image: "k8s.gcr.io/cuda-vector-add:v0.1" 11 | resources: 12 | limits: 13 | nvidia.com/gpu: 1 # requesting 1 GPU 14 | -------------------------------------------------------------------------------- /tests/templates/bbox.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: busybox 5 | namespace: default 6 | spec: 7 | containers: 8 | - name: busybox 9 | # nslookup on latest busybox is broken: 10 | # https://github.com/docker-library/busybox/issues/48 11 | image: busybox:1.28.4 12 | command: 13 | - sleep 14 | - "3600" 15 | imagePullPolicy: IfNotPresent 16 | restartPolicy: Always 17 | -------------------------------------------------------------------------------- /addons/inaccel/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | "$SNAP/microk8s-enable.wrapper" dns helm3 8 | 9 | echo "Enabling InAccel FPGA Operator" 10 | 11 | "$SNAP/microk8s-helm3.wrapper" install inaccel fpga-operator \ 12 | --namespace kube-system \ 13 | --repo https://setup.inaccel.com/helm \ 14 | --set kubelet=$SNAP_COMMON/var/lib/kubelet \ 15 | $@ 16 | 17 | echo "InAccel is enabled" 18 | -------------------------------------------------------------------------------- /tests/templates/wasm-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: node.k8s.io/v1 2 | kind: RuntimeClass 3 | metadata: 4 | name: wasmedge 5 | handler: wasmedge 6 | --- 7 | apiVersion: batch/v1 8 | kind: Job 9 | metadata: 10 | name: wasm-test 11 | spec: 12 | template: 13 | spec: 14 | containers: 15 | - image: wasmedge/example-wasi:latest 16 | name: wasm-test 17 | restartPolicy: Never 18 | runtimeClassName: wasmedge 19 | backoffLimit: 1 20 | -------------------------------------------------------------------------------- /addons/istio/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | echo "Disabling Istio" 8 | 9 | run_with_sudo "${SNAP_DATA}/bin/istioctl" -c "${SNAP_DATA}/credentials/client.config" x uninstall --purge -y 10 | run_with_sudo rm -rf "${SNAP_DATA}/bin/istioctl" 11 | run_with_sudo rm -rf "$SNAP_USER_COMMON/istio-auth.lock" 12 | run_with_sudo rm -rf "$SNAP_USER_COMMON/istio.lock" 13 | 14 | echo "Istio is terminating" 15 | -------------------------------------------------------------------------------- /addons/stunner/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | "$SNAP/microk8s-enable.wrapper" helm3 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | HELM="$SNAP/microk8s-helm3.wrapper" 9 | NAMESPACE_STUNNER_SYSTEM="stunner-system" 10 | 11 | echo "Disabling STUNner..." 12 | 13 | $HELM uninstall stunner-gateway-operator --namespace $NAMESPACE_STUNNER_SYSTEM 14 | $KUBECTL delete namespace $NAMESPACE_STUNNER_SYSTEM 15 | 16 | echo "STUNner is disabled." 17 | -------------------------------------------------------------------------------- /addons/fluentd/fluentd/kibana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | kubernetes.io/cluster-service: "true" 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | kubernetes.io/name: "Kibana" 11 | spec: 12 | ports: 13 | - port: 5601 14 | protocol: TCP 15 | targetPort: ui 16 | selector: 17 | k8s-app: kibana-logging 18 | -------------------------------------------------------------------------------- /addons/keda/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | 9 | disable_keda() { 10 | 11 | if [ -f "${SNAP_DATA}/keda/keda.yaml" ] 12 | then 13 | echo "Disabling KEDA" 14 | $KUBECTL delete -f "${SNAP_DATA}/keda/keda.yaml" || true 15 | run_with_sudo rm -rf "${SNAP_DATA}/keda" 16 | fi 17 | } 18 | 19 | disable_keda 20 | 21 | echo "The KEDA addon is disabled." 22 | -------------------------------------------------------------------------------- /addons/osm-edge/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | echo "Disabling osm-edge" 8 | ${SNAP}/microk8s-status.wrapper --wait-ready --timeout 30 >/dev/null 9 | 10 | echo "Removing osm-edge control plane" 11 | KUBECONFIG=$SNAP_DATA/credentials/client.config $SNAP_DATA/bin/osm uninstall mesh -f 12 | echo "Deleting osm-edge binary." 13 | run_with_sudo rm -f "${SNAP_COMMON}/plugins/osm" 14 | run_with_sudo rm -f "$SNAP_DATA/bin/osm" 15 | -------------------------------------------------------------------------------- /addons/parking/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | source $SNAP/actions/common/utils.sh 5 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 6 | 7 | 8 | NAMESPACE_PTR="parking" 9 | 10 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 11 | HELM="$SNAP/microk8s-helm3.wrapper" 12 | 13 | echo "Disabling Parking App" 14 | echo 15 | 16 | $HELM uninstall parking \ 17 | --namespace $NAMESPACE_PTR 18 | 19 | # $KUBECTL create namespace "$NAMESPACE_PTR" > /dev/null 2>&1 || true 20 | -------------------------------------------------------------------------------- /addons/portainer/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | NAMESPACE_PTR="portainer" 8 | 9 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 10 | 11 | KUBECTL_DELETE_ARGS="--wait=true --timeout=180s --ignore-not-found=true" 12 | 13 | echo "Disabling Portainer" 14 | 15 | # unload the the manifests 16 | $KUBECTL delete $KUBECTL_DELETE_ARGS -n $NAMESPACE_PTR deployment,service,pods --all > /dev/null 2>&1 17 | 18 | 19 | echo "Portainer deployment is disabled" 20 | -------------------------------------------------------------------------------- /addons/falco/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | NAMESPACE_FALCO="falco" 8 | 9 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 10 | HELM="$SNAP/microk8s-helm3.wrapper" 11 | KUBECTL_DELETE_ARGS="--wait=true --timeout=180s --ignore-not-found=true" 12 | 13 | echo "Disabling Falco" 14 | 15 | $HELM delete falco -n $NAMESPACE_FALCO 16 | 17 | $KUBECTL delete $KUBECTL_DELETE_ARGS namespace "$NAMESPACE_FALCO" > /dev/null 2>&1 || true 18 | 19 | echo "Falco disabled" 20 | -------------------------------------------------------------------------------- /addons/trivy/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | NAMESPACE_TRIVY="trivy-system" 8 | 9 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 10 | HELM="$SNAP/microk8s-helm3.wrapper" 11 | KUBECTL_DELETE_ARGS="--wait=true --timeout=180s --ignore-not-found=true" 12 | 13 | echo "Disabling Trivy" 14 | 15 | $HELM delete trivy-operator -n $NAMESPACE_TRIVY 16 | 17 | $KUBECTL delete $KUBECTL_DELETE_ARGS namespace "$NAMESPACE_TRIVY" > /dev/null 2>&1 || true 18 | 19 | echo "Trivy disabled" 20 | -------------------------------------------------------------------------------- /tests/templates/knative-helloworld.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 # Current version of Knative 2 | kind: Service 3 | metadata: 4 | name: helloworld-go # The name of the app 5 | namespace: default # The namespace the app will use 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - image: gcr.io/knative-samples/helloworld-go # The URL to the image of the app 11 | env: 12 | - name: TARGET # The environment variable printed out by the sample app 13 | value: "Go Sample v1" 14 | -------------------------------------------------------------------------------- /addons/jaeger/cert-tester.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: cert-manager-test 5 | --- 6 | apiVersion: cert-manager.io/v1 7 | kind: Issuer 8 | metadata: 9 | name: test-selfsigned 10 | namespace: cert-manager-test 11 | spec: 12 | selfSigned: {} 13 | --- 14 | apiVersion: cert-manager.io/v1 15 | kind: Certificate 16 | metadata: 17 | name: selfsigned-cert 18 | namespace: cert-manager-test 19 | spec: 20 | dnsNames: 21 | - example.com 22 | secretName: selfsigned-cert-tls 23 | issuerRef: 24 | name: test-selfsigned 25 | -------------------------------------------------------------------------------- /tests/templates/inaccel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | inaccel/cli: | 6 | bitstream install https://store.inaccel.com/artifactory/bitstreams/xilinx/aws-vu9p-f1/dynamic-shell/aws/vector/1/1addition 7 | labels: 8 | inaccel/fpga: enabled 9 | name: inaccel-vadd 10 | spec: 11 | containers: 12 | - image: inaccel/vadd 13 | name: inaccel-vadd 14 | resources: 15 | limits: 16 | xilinx/aws-vu9p-f1: 1 17 | nodeSelector: 18 | xilinx/aws-vu9p-f1: dynamic-shell 19 | restartPolicy: Never 20 | -------------------------------------------------------------------------------- /addons/easyhaproxy/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | source $SNAP/actions/common/utils.sh 5 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 6 | 7 | 8 | NAMESPACE_PTR="easyhaproxy" 9 | 10 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 11 | HELM="$SNAP/microk8s-helm3.wrapper" 12 | 13 | echo "Disabling EasyHAProxy Ingress Controller" 14 | echo 15 | 16 | 17 | $KUBECTL label nodes $(hostname) "easyhaproxy/node-" 18 | 19 | $HELM uninstall ingress \ 20 | --namespace $NAMESPACE_PTR 21 | 22 | # $KUBECTL create namespace "$NAMESPACE_PTR" > /dev/null 2>&1 || true 23 | -------------------------------------------------------------------------------- /addons/traefik/disable: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | set -e 3 | 4 | HELM="${SNAP}/microk8s-helm.wrapper" 5 | NAMESPACE="traefik" 6 | 7 | echo "Disabling Traefik Ingress controller from ${NAMESPACE} namespace" 8 | 9 | "${HELM}" uninstall traefik -n "${NAMESPACE}" 10 | 11 | echo "Disabled Traefik Ingress controller" 12 | 13 | echo " 14 | Traefik Ingress controller is now a core addon and this community addon is deprecated. 15 | Note that the new core addon installs Traefik ingress in the 'ingress' namespace. 16 | 17 | $ microk8s enable ingress 18 | $ microk8s disable ingress 19 | " 20 | 21 | -------------------------------------------------------------------------------- /addons/sosivio/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | HELM="$SNAP/microk8s-helm3.wrapper" 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | 9 | NAMESPACE_SOSIVIO="sosivio" 10 | 11 | RED='\033[0;31m' 12 | NC='\033[0m' # No Color 13 | 14 | echo -e "${RED}Disabling Sosivio${NC}" 15 | 16 | 17 | echo "It may take up to 2 minutes..." 18 | 19 | $HELM delete sosivio -n ${NAMESPACE_SOSIVIO} --wait > /dev/null 2>&1 || true 20 | $KUBECTL delete --grace-period=60 ns ${NAMESPACE_SOSIVIO} > /dev/null 2>&1 || true 21 | 22 | echo -e "${RED}Sosivio disabled${NC}" -------------------------------------------------------------------------------- /addons/cloudnative-pg/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | 5 | cnpg_delete_manifest() { 6 | kubectl="$SNAP/microk8s-kubectl.wrapper" 7 | 8 | "${SNAP_DATA}/bin/kubectl-cnpg" install generate | $kubectl delete -f - > /dev/null 9 | 10 | echo "$?" 11 | } 12 | 13 | cnpg_disable() { 14 | echo "Uninstalling CloudNativePG" 15 | apply_result=$(cnpg_delete_manifest) 16 | 17 | if [[ $apply_result -ne 0 ]]; then 18 | echo "CloudNativePG wasn't uninstalled" 19 | exit 1 20 | fi 21 | 22 | rm -f "${SNAP_DATA}/bin/kubectl-cnpg" 23 | echo "CloudNativePG uninstalled" 24 | } 25 | 26 | cnpg_disable 27 | -------------------------------------------------------------------------------- /addons/stunner/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | "$SNAP/microk8s-enable.wrapper" dns 6 | "$SNAP/microk8s-enable.wrapper" helm3 7 | 8 | HELM="$SNAP/microk8s-helm3.wrapper" 9 | NAMESPACE_STUNNER_SYSTEM="stunner-system" 10 | VERSION_STUNNER_SYSTEM="0.18.0" 11 | 12 | echo "Enabling STUNner..." 13 | 14 | $HELM repo add stunner https://l7mp.io/stunner 15 | $HELM repo update 16 | 17 | $HELM upgrade --install stunner-gateway-operator stunner/stunner-gateway-operator \ 18 | --create-namespace --namespace $NAMESPACE_STUNNER_SYSTEM --version $VERSION_STUNNER_SYSTEM 19 | 20 | echo "STUNner is enabled." 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Suggest a new feature 4 | --- 5 | 6 | 10 | 11 | #### Summary 12 | 13 | 14 | #### Why is this important? 15 | 16 | 17 | #### Are you interested in contributing to this feature? 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /addons/sriov-device-plugin/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import json 4 | import os 5 | import subprocess 6 | import time 7 | import tempfile 8 | 9 | import click 10 | 11 | KUBECTL = os.path.expandvars("$SNAP/microk8s-kubectl.wrapper") 12 | SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__)) 13 | 14 | 15 | def main(): 16 | click.echo("Disabling SR-IOV Network Device Plugin...") 17 | 18 | subprocess.check_call([KUBECTL, "delete", "-f", os.path.join(SCRIPT_PATH, "sriovdp.yaml")]) 19 | 20 | click.echo("SR-IOV Network Device Plugin is now disabled.") 21 | 22 | 23 | if __name__ == "__main__": 24 | main() 25 | -------------------------------------------------------------------------------- /addons/linkerd/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | echo "Disabling Linkerd" 8 | ${SNAP}/microk8s-status.wrapper --wait-ready --timeout 30 >/dev/null 9 | 10 | echo "Removing linkerd control plane" 11 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 12 | "$SNAP_DATA/bin/linkerd" "--kubeconfig=${SNAP_DATA}/credentials/client.config" install "--ignore-cluster" | $KUBECTL delete -f - 13 | "$SNAP_DATA/bin/linkerd" "--kubeconfig=${SNAP_DATA}/credentials/client.config" viz uninstall | $KUBECTL delete -f - 14 | echo "Deleting linkerd binary." 15 | run_with_sudo rm -f "$SNAP_DATA/bin/linkerd" 16 | -------------------------------------------------------------------------------- /addons/amd/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import pathlib 5 | import subprocess 6 | import click 7 | 8 | SNAP = pathlib.Path(os.getenv("SNAP") or "/snap/microk8s/current") 9 | HELM = SNAP / "microk8s-helm3.wrapper" 10 | 11 | @click.command() 12 | @click.option("--debug", is_flag=True) 13 | def main(debug: bool): 14 | click.echo("Disabling AMD GPU operator") 15 | uninstall_args = ["uninstall", "amd-gpu-operator", "-n", "kube-amd-gpu"] 16 | 17 | if debug: 18 | uninstall_args.append("--debug") 19 | 20 | subprocess.check_output([HELM, *uninstall_args]) 21 | 22 | if __name__ == "__main__": 23 | main() 24 | 25 | 26 | -------------------------------------------------------------------------------- /addons/fluentd/fluentd/es-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | kubernetes.io/cluster-service: "true" 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | kubernetes.io/name: "Elasticsearch" 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: db 15 | port: 9200 16 | protocol: TCP 17 | targetPort: 9200 18 | - name: transport 19 | port: 9300 20 | protocol: TCP 21 | targetPort: 9300 22 | publishNotReadyAddresses: true 23 | selector: 24 | k8s-app: elasticsearch-logging 25 | sessionAffinity: None 26 | type: ClusterIP 27 | -------------------------------------------------------------------------------- /tests/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: myclaim 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | volumeMode: Filesystem 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | --- 13 | kind: Pod 14 | apiVersion: v1 15 | metadata: 16 | name: hostpath-test-pod 17 | spec: 18 | containers: 19 | - name: hostpath-test-container 20 | image: busybox 21 | command: 22 | ["/bin/sh", "-c", "while true; do date >> /mnt/dates; sleep 2; done"] 23 | volumeMounts: 24 | - name: hostpath-volume 25 | mountPath: "/mnt" 26 | restartPolicy: "Never" 27 | volumes: 28 | - name: hostpath-volume 29 | persistentVolumeClaim: 30 | claimName: myclaim 31 | -------------------------------------------------------------------------------- /addons/kubearmor/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | "$SNAP/microk8s-enable.wrapper" helm3 8 | 9 | 10 | ${SNAP}/microk8s-helm3.wrapper repo add kubearmor https://kubearmor.github.io/charts 11 | ${SNAP}/microk8s-helm3.wrapper repo update kubearmor 12 | ${SNAP}/microk8s-helm3.wrapper upgrade --install kubearmor-operator kubearmor/kubearmor-operator -n kubearmor --set autoDeploy=true --set kubearmorConfig.kubearmorImage.image=kubearmor/kubearmor:v1.2.0 --create-namespace 13 | 14 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 15 | curl -sfL http://get.kubearmor.io/ | sudo sh -s -- -b "$SNAP_COMMON/bin" 16 | cp "$CURRENT_DIR/karmor" "$SNAP_COMMON/plugins" 17 | chmod +x "$SNAP_COMMON/plugins/karmor" 18 | 19 | -------------------------------------------------------------------------------- /addons/openfaas/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | OF_NAMESPACE="openfaas" 8 | FN_NAMESPACE="openfaas-fn" 9 | 10 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 11 | 12 | KUBECTL_DELETE_ARGS="--wait=true --timeout=180s --ignore-not-found=true" 13 | 14 | echo "Disabling OpenFaaS" 15 | 16 | # unload the the crd 17 | 18 | 19 | # delete the namespaces 20 | $KUBECTL delete $KUBECTL_DELETE_ARGS namespace "$OF_NAMESPACE" > /dev/null 2>&1 || true 21 | $KUBECTL delete $KUBECTL_DELETE_ARGS namespace "$FN_NAMESPACE" > /dev/null 2>&1 || true 22 | $KUBECTL delete $KUBECTL_DELETE_ARGS crd \ 23 | functioningresses.openfaas.com \ 24 | profiles.openfaas.com \ 25 | functions.openfaas.com 26 | 27 | echo "OpenFaaS is disabled" 28 | -------------------------------------------------------------------------------- /tests/templates/keda-scaledobject.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: gonuts 5 | --- 6 | apiVersion: keda.sh/v1alpha1 7 | kind: ScaledObject 8 | metadata: 9 | name: stan-scaledobject 10 | namespace: gonuts 11 | spec: 12 | pollingInterval: 10 # Optional. Default: 30 seconds 13 | cooldownPeriod: 30 # Optional. Default: 300 seconds 14 | minReplicaCount: 0 # Optional. Default: 0 15 | maxReplicaCount: 30 # Optional. Default: 100 16 | scaleTargetRef: 17 | name: gonuts-sub 18 | triggers: 19 | - type: stan 20 | metadata: 21 | natsServerMonitoringEndpoint: "stan-nats-ss.stan.svc.cluster.local:8222" 22 | queueGroup: "grp1" 23 | durableName: "ImDurable" 24 | subject: "Test" 25 | lagThreshold: "10" 26 | -------------------------------------------------------------------------------- /addons/gopaddle/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | HELM="$SNAP/microk8s-helm3.wrapper" 9 | 10 | echo "Disabling gopaddle" 11 | 12 | #uninstall gopaddle 13 | $HELM delete gopaddle -n gopaddle 14 | 15 | # delete namespace 16 | $KUBECTL delete ns gopaddle 17 | 18 | #remove gopaddle helm folder 19 | rm -rf "$SNAP_DATA/tmp/gopaddle" 20 | 21 | if $KUBECTL get clusterrole gopaddle >/dev/null 2>&1 22 | then 23 | $KUBECTL delete clusterrole gopaddle 24 | fi 25 | 26 | if $KUBECTL get clusterrolebinding gopaddle >/dev/null 2>&1 27 | then 28 | $KUBECTL delete clusterrolebinding gopaddle 29 | fi 30 | 31 | # remove helm repo 32 | $HELM repo remove gopaddle 33 | 34 | echo "Disabled gopaddle" 35 | -------------------------------------------------------------------------------- /addons/shifu/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | 9 | do_prerequisites() { 10 | # enable dns service 11 | $SNAP/microk8s-enable.wrapper dns 12 | $SNAP/microk8s-status.wrapper --wait-ready --timeout 30 >/dev/null 13 | run_with_sudo mkdir -p "$SNAP_DATA/shifu" 14 | } 15 | 16 | get_shifu() { 17 | SHIFU_VERSION="v0.11.0" 18 | echo "Fetching shifu version $SHIFU_VERSION." 19 | fetch_as https://raw.githubusercontent.com/Edgenesis/shifu/$SHIFU_VERSION/pkg/k8s/crd/install/shifu_install.yml "$SNAP_DATA/shifu/shifu_install.yml" 20 | } 21 | 22 | enable_shifu() { 23 | echo "Enabling Shifu" 24 | $KUBECTL apply -f "$SNAP_DATA/shifu/shifu_install.yml" 25 | } 26 | 27 | do_prerequisites 28 | get_shifu 29 | enable_shifu 30 | 31 | echo "The Shifu is enabled." 32 | -------------------------------------------------------------------------------- /addons/jaeger/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | source $SNAP/actions/common/utils.sh 5 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 6 | echo "Disabling Jaeger" 7 | read -ra ARGUMENTS <<< "$1" 8 | 9 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 10 | MANIFESTS_PATH="${CURRENT_DIR}" 11 | NAMESPACE="default" 12 | if [ ! -z "${ARGUMENTS[@]}" ] 13 | then 14 | NAMESPACE=${ARGUMENTS[0]} 15 | sed "s/namespace: default/namespace: ${NAMESPACE}/g; \ 16 | s/default\.svc/${NAMESPACE}\.svc/g; \ 17 | s/default\/jaeger-operator-serving-cert/${NAMESPACE}\jaeger-operator-serving-cert/g" $MANIFESTS_PATH/operator.yaml |\ 18 | $KUBECTL delete -f - 19 | else 20 | $KUBECTL delete -f "${MANIFESTS_PATH}/simplest.yaml" || true 21 | $KUBECTL delete -f "${MANIFESTS_PATH}/operator.yaml" || true 22 | fi 23 | echo "The Jaeger operator is disabled" 24 | -------------------------------------------------------------------------------- /addons/osm-edge/osm.wrapper: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | export PATH="$SNAP/usr/sbin:$SNAP/usr/bin:$SNAP/sbin:$SNAP/bin:$PATH" 6 | ARCH="$($SNAP/bin/uname -m)" 7 | export LD_LIBRARY_PATH="$SNAP/lib:$SNAP/usr/lib:$SNAP/lib/$ARCH-linux-gnu:$SNAP/usr/lib/$ARCH-linux-gnu" 8 | 9 | if [ -e ${SNAP_DATA}/var/lock/clustered.lock ] 10 | then 11 | echo "This MicroK8s deployment is acting as a node in a cluster. Please use the microk8s osm on the master." 12 | exit 0 13 | fi 14 | 15 | if [ ! -f "${SNAP_DATA}/bin/osm" ]; then 16 | echo "osm-edge not available, try enabling osm-edge. 'microk8s enable osm-edge' " 17 | exit 0 18 | fi 19 | 20 | source $SNAP/actions/common/utils.sh 21 | ARCH=$(arch) 22 | 23 | if echo "$*" | grep -v -q -- '--kubeconfig'; then 24 | exit_if_no_permissions 25 | fi 26 | 27 | exit_if_stopped 28 | KUBECONFIG="${SNAP_DATA}"/credentials/client.config "${SNAP_DATA}/bin/osm" "$@" -------------------------------------------------------------------------------- /addons/keda/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | 9 | do_prerequisites() { 10 | # enable dns service 11 | "$SNAP/microk8s-enable.wrapper" dns 12 | ${SNAP}/microk8s-status.wrapper --wait-ready --timeout 30 >/dev/null 13 | run_with_sudo mkdir -p "${SNAP_DATA}/keda" 14 | } 15 | 16 | 17 | get_keda () { 18 | KEDA_VERSION="v2.12.0" 19 | KEDA_ERSION=$(echo $KEDA_VERSION | sed 's/v//g') 20 | echo "Fetching keda version $KEDA_ERSION." 21 | fetch_as https://github.com/kedacore/keda/releases/download/${KEDA_VERSION}/keda-${KEDA_ERSION}.yaml "$SNAP_DATA/keda/keda.yaml" 22 | } 23 | 24 | 25 | enable_keda() { 26 | echo "Enabling KEDA" 27 | $KUBECTL apply --server-side -f "${SNAP_DATA}/keda/keda.yaml" 28 | } 29 | 30 | do_prerequisites 31 | get_keda 32 | enable_keda 33 | 34 | echo "The KEDA is enabled." 35 | -------------------------------------------------------------------------------- /addons/kwasm/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | NAMESPACE_KWASM="kwasm-system" 6 | OPERATOR_VERSION="0.2.2" 7 | INSTALLER_VERSION="v0.3.0" 8 | 9 | "$SNAP/microk8s-enable.wrapper" helm3 10 | HELM="$SNAP/microk8s-helm3.wrapper" 11 | 12 | echo "Installing KWasm" 13 | 14 | $HELM repo add --force-update kwasm http://kwasm.sh/kwasm-operator/ 15 | $HELM upgrade -i -n $NAMESPACE_KWASM --create-namespace kwasm-operator kwasm/kwasm-operator \ 16 | --version $OPERATOR_VERSION \ 17 | --set kwasmOperator.installerImage="ghcr.io/kwasm/kwasm-node-installer:$INSTALLER_VERSION" \ 18 | --set kwasmOperator.autoProvision="true" 19 | 20 | echo "KWasm is installed with the following versions:" 21 | echo " kwasm-operator: $OPERATOR_VERSION" 22 | echo " kwasm-node-installer: $INSTALLER_VERSION" 23 | echo "" 24 | echo "If you need help to get started visit:" 25 | echo " https://kwasm.sh/?dist=microk8s#Quickstart" -------------------------------------------------------------------------------- /tests/test_parking.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import platform 3 | 4 | from utils import ( 5 | microk8s_disable, 6 | microk8s_enable, 7 | wait_for_pod_state, 8 | ) 9 | 10 | 11 | class TestParking(object): 12 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 13 | def test_parking(self): 14 | """ 15 | Sets up and validates parking. 16 | """ 17 | print("Enabling parking") 18 | microk8s_enable("parking", optional_args="example.com") 19 | print("Validating parking") 20 | self.validate_parking() 21 | print("Disabling parking") 22 | microk8s_disable("parking") 23 | 24 | def validate_parking(self): 25 | """ 26 | Validate parking 27 | """ 28 | wait_for_pod_state( 29 | "", "parking", "running", label="app.kubernetes.io/name=static-httpserver" 30 | ) 31 | -------------------------------------------------------------------------------- /addons/fluentd/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export PATH="$SNAP/usr/sbin:$SNAP/usr/bin:$SNAP/sbin:$SNAP/bin:$PATH" 6 | 7 | source $SNAP/actions/common/utils.sh 8 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 9 | 10 | echo "Enabling Fluentd-Elasticsearch" 11 | 12 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 13 | echo "Labeling nodes" 14 | NODENAME="$($KUBECTL get no -o yaml | grep " name:"| awk '{print $2}')" 15 | for NODE in $NODENAME 16 | do 17 | $KUBECTL label nodes "$NODE" beta.kubernetes.io/fluentd-ds-ready=true || true 18 | done 19 | 20 | "$SNAP/microk8s-enable.wrapper" dns 21 | sleep 5 22 | 23 | if ! grep -e "\-\-allow-privileged" ${SNAP_DATA}/args/kube-apiserver 24 | then 25 | refresh_opt_in_config "allow-privileged" "true" kube-apiserver 26 | restart_service apiserver 27 | sleep 5 28 | fi 29 | 30 | $KUBECTL apply -f "${CURRENT_DIR}/fluentd" 31 | 32 | echo "Fluentd-Elasticsearch is enabled" 33 | -------------------------------------------------------------------------------- /addons/ngrok/disable: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | set -e 3 | 4 | HELM="${SNAP}/microk8s-helm.wrapper" 5 | 6 | POSITIONAL_ARGS=() 7 | 8 | while [[ $# -gt 0 ]]; do 9 | case $1 in 10 | --namespace) 11 | NAMESPACE="$2" 12 | shift # past argument 13 | shift # past value 14 | ;; 15 | -*|--*) 16 | echo "Unknown option $1" 17 | exit 1 18 | ;; 19 | *) 20 | POSITIONAL_ARGS+=("$1") # save positional arg 21 | shift # past argument 22 | ;; 23 | esac 24 | done 25 | 26 | set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters 27 | 28 | if [ -z "$NAMESPACE" ]; then 29 | echo "Namespace (--namespace) was not specified. Defaulting to ngrok-operator namespace." 30 | NAMESPACE="ngrok-operator" 31 | fi 32 | 33 | echo "Disabling ngrok Kubernetes operator in ${NAMESPACE} namespace" 34 | 35 | "${HELM}" uninstall ngrok-operator --namespace "${NAMESPACE}" 36 | 37 | echo "Disabled ngrok Kubernetes operator" 38 | -------------------------------------------------------------------------------- /tests/test_stunner.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import platform 3 | 4 | from utils import ( 5 | microk8s_disable, 6 | microk8s_enable, 7 | wait_for_pod_state, 8 | ) 9 | 10 | 11 | class TestSTUNner(object): 12 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 13 | def test_stunner(self): 14 | """ 15 | Sets up and validates STUNner. 16 | """ 17 | print("Enabling STUNner") 18 | microk8s_enable("stunner") 19 | print("Validating STUNner") 20 | self.validate_stunner() 21 | print("Disabling STUNner") 22 | microk8s_disable("stunner") 23 | 24 | def validate_stunner(self): 25 | """ 26 | Validate STUNner 27 | """ 28 | wait_for_pod_state( 29 | "", 30 | "stunner-system", 31 | "running", 32 | label="control-plane=stunner-gateway-operator-controller-manager", 33 | ) 34 | -------------------------------------------------------------------------------- /addons/fluentd/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 7 | 8 | echo "Disabling Fluentd-Elasticsearch" 9 | 10 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 11 | 12 | # This one deletes the old fluentd resources. 13 | $KUBECTL -n kube-system delete cm fluentd-es-config-v0.1.5 > /dev/null 2>&1 || true 14 | $KUBECTL -n kube-system delete daemonset fluentd-es-v2.2.0 > /dev/null 2>&1 || true 15 | $KUBECTL -n kube-system delete daemonset fluentd-es-v3.0.2 > /dev/null 2>&1 || true 16 | 17 | NODENAME="$($KUBECTL get no -o yaml | grep " name:"| awk '{print $2}')" 18 | 19 | for NODE in $NODENAME 20 | do 21 | $KUBECTL label nodes "$NODENAME" beta.kubernetes.io/fluentd-ds-ready- || true 22 | done 23 | 24 | 25 | $KUBECTL delete -f "${CURRENT_DIR}/fluentd" 26 | # Allow for a few seconds for the deletion to take place 27 | sleep 10 28 | 29 | echo "Fluentd-Elasticsearch is disabled" 30 | -------------------------------------------------------------------------------- /addons/forgejo/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import click 3 | import os 4 | import subprocess 5 | 6 | 7 | class Handler: 8 | """Remove forgejo addon""" 9 | def __init__(self): 10 | self.KUBECTL = os.path.expandvars("$SNAP/microk8s-kubectl.wrapper") 11 | 12 | def disable(self, ns, *args, **kwargs): 13 | """Remove addon from kubernetes""" 14 | cmd = [ 15 | "/bin/sh", 16 | "-c", 17 | f"{self.KUBECTL} get all -n {ns} -o yaml | {self.KUBECTL} delete -f -", 18 | ] 19 | subprocess.check_call(cmd) 20 | cmd = ["/bin/sh", "-c", f"{self.KUBECTL} delete namespace {ns}"] 21 | subprocess.check_call(cmd) 22 | 23 | 24 | @click.command() 25 | @click.option("--ns", default="forgejo", help="delete custom kubernetes namespace") 26 | def main(ns): 27 | """Disable Forgejo""" 28 | click.echo("Disabling Forgejo") 29 | H = Handler() 30 | H.disable(ns=ns) 31 | 32 | 33 | if __name__ == "__main__": 34 | main() 35 | -------------------------------------------------------------------------------- /tests/templates/openebs-test.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: local-hostpath-pvc 5 | spec: 6 | storageClassName: openebs-hostpath 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 5G 12 | --- 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | labels: 17 | app: openebs-test-busybox 18 | name: openebs-test-busybox 19 | spec: 20 | terminationGracePeriodSeconds: 5 21 | containers: 22 | - resources: 23 | limits: 24 | cpu: 0.5 25 | name: openebs-test-busybox 26 | image: busybox 27 | command: ["sh", "-c", "echo Container 1 is Running ; sleep 3600"] 28 | imagePullPolicy: IfNotPresent 29 | ports: 30 | - containerPort: 3306 31 | name: busybox 32 | volumeMounts: 33 | - mountPath: /my-data 34 | name: my-data-volume 35 | volumes: 36 | - name: my-data-volume 37 | persistentVolumeClaim: 38 | claimName: local-hostpath-pvc 39 | -------------------------------------------------------------------------------- /tests/test_microcks.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | ) 10 | 11 | 12 | class TestMicrocks(object): 13 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 14 | @pytest.mark.skipif( 15 | os.environ.get("UNDER_TIME_PRESSURE") == None, 16 | reason="Skipping test, expected to be tested when under time pressure", 17 | ) 18 | def test_microcks(self): 19 | """ 20 | Sets up and validates microcks. 21 | """ 22 | print("Enabling microcks") 23 | microk8s_enable("microcks") 24 | print("Validating microcks") 25 | self.validate_microcks() 26 | print("Disabling microcks") 27 | microk8s_disable("microcks") 28 | 29 | def validate_microcks(self): 30 | """ 31 | Validate microcks 32 | """ 33 | wait_for_pod_state("", "microcks", "running", label="app=microcks") 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Something is not working 4 | --- 5 | 6 | 10 | 11 | #### Summary 12 | 13 | 14 | #### What Should Happen Instead? 15 | 16 | 17 | #### Reproduction Steps 18 | 19 | 20 | 1. ... 21 | 2. ... 22 | 23 | #### Introspection Report 24 | 25 | 26 | #### Can you suggest a fix? 27 | 28 | 29 | #### Are you interested in contributing with a fix? 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /tests/test_shifu.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | ) 10 | 11 | 12 | class Testshifu(object): 13 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 14 | @pytest.mark.skipif( 15 | os.environ.get("UNDER_TIME_PRESSURE") == None, 16 | reason="Skipping test, expected to be tested when under time pressure", 17 | ) 18 | def test_shifu(self): 19 | """ 20 | Sets up and validates shifu. 21 | """ 22 | print("Enabling shifu") 23 | microk8s_enable("shifu") 24 | print("Validating shifu") 25 | self.validate_shifu() 26 | print("Disabling shifu") 27 | microk8s_disable("shifu") 28 | 29 | def validate_shifu(self): 30 | """ 31 | Validate shifu 32 | """ 33 | wait_for_pod_state( 34 | "", "shifu-crd-system", "running", label="control-plane=controller-manager" 35 | ) 36 | -------------------------------------------------------------------------------- /tests/test_openfaas.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | ) 10 | 11 | 12 | class TestOpenfaas(object): 13 | @pytest.mark.skipif( 14 | platform.machine() != "x86_64", 15 | reason="OpenFaaS tests are only relevant in x86 architectures", 16 | ) 17 | @pytest.mark.skipif( 18 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 19 | reason="Skipping multus tests as we are under time pressure", 20 | ) 21 | def test_openfaas(self): 22 | """ 23 | Sets up and validates OpenFaaS. 24 | """ 25 | print("Enabling openfaas") 26 | microk8s_enable("openfaas") 27 | print("Validating openfaas") 28 | self.validate_openfaas() 29 | print("Disabling openfaas") 30 | microk8s_disable("openfaas") 31 | 32 | def validate_openfaas(self): 33 | """ 34 | Validate openfaas 35 | """ 36 | wait_for_pod_state("", "openfaas", "running", label="app=gateway") 37 | -------------------------------------------------------------------------------- /tests/test_portainer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | ) 10 | 11 | 12 | class TestPortainer(object): 13 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 14 | @pytest.mark.skipif( 15 | os.environ.get("UNDER_TIME_PRESSURE") == None, 16 | reason="Skipping test, expected to be tested when under time pressure", 17 | ) 18 | def test_portainer(self): 19 | """ 20 | Sets up and validates Portainer. 21 | """ 22 | print("Enabling Portainer") 23 | microk8s_enable("portainer") 24 | print("Validating Portainer") 25 | self.validate_portainer() 26 | print("Disabling Portainer") 27 | microk8s_disable("portainer") 28 | 29 | def validate_portainer(self): 30 | """ 31 | Validate portainer 32 | """ 33 | wait_for_pod_state( 34 | "", "portainer", "running", label="app.kubernetes.io/name=portainer" 35 | ) 36 | -------------------------------------------------------------------------------- /tests/test_argocd.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | ) 10 | 11 | 12 | class TestArgoCD(object): 13 | @pytest.mark.skipif( 14 | platform.machine() == "s390x", 15 | reason="ArgoCD tests are only relevant in x86 and arm64 architectures", 16 | ) 17 | @pytest.mark.skipif( 18 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 19 | reason="Skipping argocd tests as we are under time pressure", 20 | ) 21 | def test_argocd(self): 22 | """ 23 | Sets up and validates ArgoCD. 24 | """ 25 | print("Enabling argocd") 26 | microk8s_enable("argocd") 27 | print("Validating argocd") 28 | self.validate_argocd() 29 | print("Disabling argocd") 30 | microk8s_disable("argocd") 31 | 32 | def validate_argocd(self): 33 | """ 34 | Validate argocd 35 | """ 36 | wait_for_pod_state( 37 | "", "argocd", "running", label="app.kubernetes.io/component=server" 38 | ) 39 | -------------------------------------------------------------------------------- /tests/test_forgejo.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | wait_for_installation, 10 | ) 11 | 12 | 13 | class TestForgejo(object): 14 | @pytest.mark.skipif( 15 | platform.machine() == "s390x", 16 | reason="forgejo tests are only relevant in x86 and arm64 architectures", 17 | ) 18 | @pytest.mark.skipif( 19 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 20 | reason="Skipping argocd tests as we are under time pressure", 21 | ) 22 | def test_forgejo(self): 23 | """ 24 | Sets up and validates forgejo. 25 | """ 26 | print("Enabling forgejo") 27 | microk8s_enable("forgejo") 28 | print("Validating forgejo") 29 | self.validate_forgejo() 30 | print("Disabling forgejo") 31 | microk8s_disable("forgejo") 32 | 33 | def validate_forgejo(self): 34 | """ 35 | Validate forgejo 36 | """ 37 | wait_for_installation() 38 | wait_for_pod_state("", "forgejo", "running") 39 | -------------------------------------------------------------------------------- /tests/test_gopaddle_lite.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | ) 10 | 11 | 12 | class TestGoPaddleLite(object): 13 | @pytest.mark.skipif( 14 | platform.machine() == "s390x", 15 | reason="gopaddle tests are only relevant in x86 and arm64 architectures", 16 | ) 17 | @pytest.mark.skipif( 18 | os.environ.get("UNDER_TIME_PRESSURE") == None, 19 | reason="Skipping test, expected to be tested when under time pressure", 20 | ) 21 | def test_gopaddle_lite(self): 22 | """ 23 | Sets up and validates gopaddle. 24 | """ 25 | print("Enabling gopaddle") 26 | microk8s_enable("gopaddle") 27 | print("Validating gopaddle") 28 | self.validate_gopaddle_lite() 29 | print("Disabling gopaddle") 30 | microk8s_disable("gopaddle") 31 | 32 | def validate_gopaddle_lite(self): 33 | """ 34 | Validate gopaddle 35 | """ 36 | wait_for_pod_state("", "gopaddle", "running", label="released-by=gopaddle") 37 | -------------------------------------------------------------------------------- /tests/test_falco.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | ) 10 | 11 | 12 | class TestFalco(object): 13 | @pytest.mark.skipif( 14 | platform.machine() != "x86_64", 15 | reason="Falco tests are only relevant in x86 architectures", 16 | ) 17 | @pytest.mark.skipif( 18 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 19 | reason="Skipping falco tests as we are under time pressure", 20 | ) 21 | def test_falco(self): 22 | """ 23 | Sets up and validates Falco. 24 | """ 25 | print("Enabling Falco") 26 | microk8s_enable("falco") 27 | print("Validating Falco") 28 | self.validate_falco() 29 | print("Disabling Falco") 30 | microk8s_disable("falco") 31 | 32 | def validate_falco(self): 33 | """ 34 | Validate Falco 35 | """ 36 | wait_for_pod_state( 37 | "", 38 | "falco", 39 | "running", 40 | label="app.kubernetes.io/instance=falco", 41 | ) 42 | -------------------------------------------------------------------------------- /addons/knative/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | KNATIVE_VERSION="1.8.2" 8 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 9 | 10 | echo "Disabling Knative" 11 | 12 | if $KUBECTL get KnativeServing -n knative-serving knative-serving; then 13 | $KUBECTL delete KnativeServing -n knative-serving knative-serving 14 | fi 15 | if $KUBECTL get ns knative-serving; then 16 | $KUBECTL delete ns knative-serving 17 | fi 18 | if $KUBECTL get KnativeEventing -n knative-eventing knative-eventing; then 19 | $KUBECTL delete KnativeEventing -n knative-eventing knative-eventing 20 | fi 21 | if $KUBECTL get ns knative-eventing; then 22 | $KUBECTL delete ns knative-eventing 23 | fi 24 | if $KUBECTL get deployment knative-operator -n default; then 25 | $KUBECTL delete -f https://github.com/knative/operator/releases/download/knative-v$KNATIVE_VERSION/operator.yaml 26 | fi 27 | 28 | run_with_sudo rm -rf "${SNAP_COMMON}/plugins/kn" 29 | run_with_sudo rm -rf "${SNAP_COMMON}/plugins/kn-admin" 30 | run_with_sudo rm -rf "${SNAP_COMMON}/plugins/kn-event" 31 | run_with_sudo rm -rf "${SNAP_COMMON}/plugins/kn-func" 32 | -------------------------------------------------------------------------------- /.github/workflows/check-formatting.yml: -------------------------------------------------------------------------------- 1 | name: Lint Code 2 | 3 | on: 4 | - push 5 | - pull_request 6 | 7 | jobs: 8 | check-formatting: 9 | name: Check Formatting 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Check out code 14 | uses: actions/checkout@v4 15 | 16 | - name: Install dependencies 17 | run: | 18 | sudo apt-get update 19 | sudo apt-get install python3-setuptools 20 | sudo pip3 install black codespell==2.2.4 21 | sudo snap install node --classic 22 | sudo npm install --save-dev --save-exact -g prettier 23 | - name: Check Python formatting 24 | run: | 25 | set -eux 26 | black --check . 27 | codespell --ignore-words-list="aks" --quiet-level=2 28 | - name: Check YAML formatting 29 | run: | 30 | set -eux 31 | prettier --check $(find . -name "*.yaml" -o -name "*.yml" \ 32 | | grep -v "./addons/ingress/ingress.yaml" \ 33 | | grep -v "./addons/metallb/metallb.yaml" \ 34 | | grep -v "./addons/nfs/nfs.yaml" \ 35 | | grep -v "templates") 36 | -------------------------------------------------------------------------------- /tests/test_trivy.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | ) 10 | 11 | 12 | class TestTrivy(object): 13 | @pytest.mark.skipif( 14 | platform.machine() != "x86_64", 15 | reason="Trivy tests are only relevant in x86 architectures", 16 | ) 17 | @pytest.mark.skipif( 18 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 19 | reason="Skipping multus tests as we are under time pressure", 20 | ) 21 | def test_trivy(self): 22 | """ 23 | Sets up and validates Trivy. 24 | """ 25 | print("Enabling Trivy") 26 | microk8s_enable("trivy") 27 | print("Validating Trivy") 28 | self.validate_trivy() 29 | print("Disabling Trivy") 30 | microk8s_disable("trivy") 31 | 32 | def validate_trivy(self): 33 | """ 34 | Validate Trivy 35 | """ 36 | wait_for_pod_state( 37 | "", 38 | "trivy-system", 39 | "running", 40 | label="app.kubernetes.io/instance=trivy-operator", 41 | ) 42 | -------------------------------------------------------------------------------- /tests/test_easyhaproxy.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_pod_state, 9 | ) 10 | 11 | 12 | class TestEasyHAProxy(object): 13 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 14 | @pytest.mark.skipif( 15 | os.environ.get("UNDER_TIME_PRESSURE") == None, 16 | reason="Skipping test, expected to be tested when under time pressure", 17 | ) 18 | def test_easyhaproxy(self): 19 | """ 20 | Disabling conflicting addons 21 | """ 22 | microk8s_disable("ingress") 23 | microk8s_disable("traefik") 24 | 25 | """ 26 | Sets up and validates easyhaproxy. 27 | """ 28 | print("Enabling easyhaproxy") 29 | microk8s_enable("easyhaproxy") 30 | print("Validating easyhaproxy") 31 | self.validate_easyhaproxy() 32 | print("Disabling easyhaproxy") 33 | microk8s_disable("easyhaproxy") 34 | 35 | def validate_easyhaproxy(self): 36 | """ 37 | Validate easyhaproxy 38 | """ 39 | wait_for_pod_state( 40 | "", "easyhaproxy", "running", label="app.kubernetes.io/name=easyhaproxy" 41 | ) 42 | -------------------------------------------------------------------------------- /.github/.jira_sync_config.yaml: -------------------------------------------------------------------------------- 1 | settings: 2 | # Jira project key to create the issue in 3 | jira_project_key: "KU" 4 | 5 | # Dictionary mapping GitHub issue status to Jira issue status 6 | status_mapping: 7 | opened: Untriaged 8 | closed: done 9 | 10 | # (Optional) Jira project components that should be attached to the created issue 11 | # Component names are case-sensitive 12 | components: 13 | - Microk8s snap 14 | 15 | # (Optional) GitHub labels. Only issues with one of those labels will be synchronized. 16 | # If not specified, all issues will be synchronized 17 | # labels: [] 18 | 19 | # (Optional) (Default: false) Add a new comment in GitHub with a link to Jira created issue 20 | add_gh_comment: false 21 | 22 | # (Optional) (Default: true) Synchronize issue description from GitHub to Jira 23 | sync_description: true 24 | 25 | # (Optional) (Default: true) Synchronize comments from GitHub to Jira 26 | sync_comments: true 27 | 28 | # (Optional) (Default: None) Parent Epic key to link the issue to 29 | epic_key: "KU-925" 30 | 31 | # (Optional) Dictionary mapping GitHub issue labels to Jira issue types. 32 | # If label on the issue is not in specified list, this issue will be created as a Bug 33 | label_mapping: 34 | enhancement: Story 35 | -------------------------------------------------------------------------------- /addons/nfs/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 7 | 8 | NAMESPACE="nfs-server-provisioner" 9 | 10 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 11 | HELM="$SNAP/microk8s-helm3.wrapper" 12 | KUBECTL_DELETE_ARGS="--wait=true --timeout=180s --ignore-not-found=true" 13 | 14 | echo "Disabling NFS Server Provisioner" 15 | 16 | echo "Checking NFS PVs in use..." 17 | echo "" 18 | readarray -t pv_array < <(${KUBECTL} get pv -o=jsonpath='{range .items[*]}pv={.metadata.name} uses storageClassName={.spec.storageClassName} => Claimed by: pvc={.spec.claimRef.name} in namespace={.spec.claimRef.namespace}{"\n"}{end}') 19 | 20 | NFS=false 21 | for i in "${pv_array[@]}"; do 22 | if [[ $i =~ .*"storageClassName=nfs".* ]]; then 23 | NFS=true 24 | echo "$i" 25 | fi 26 | done 27 | 28 | if $NFS; then 29 | echo "" 30 | echo "Please delete all PVs and PVCs using Storage Class \"nfs\" prior disabling the Addon." 31 | exit 1 32 | fi 33 | 34 | $HELM delete nfs-server-provisioner -n $NAMESPACE 35 | $KUBECTL delete $KUBECTL_DELETE_ARGS namespace "$NAMESPACE" > /dev/null 2>&1 || true 36 | $KUBECTL delete pv data-nfs-server-provisioner-0 37 | 38 | echo "NFS Server Provisioner is disabled" -------------------------------------------------------------------------------- /addons/parking/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | source $SNAP/actions/common/utils.sh 5 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 6 | 7 | "$SNAP/microk8s-enable.wrapper" dns 8 | "$SNAP/microk8s-enable.wrapper" helm3 9 | 10 | 11 | NAMESPACE_PTR="parking" 12 | 13 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 14 | HELM="$SNAP/microk8s-helm3.wrapper" 15 | 16 | echo "+======================+" 17 | echo "| Enabling Parking App |" 18 | echo "+======================+" 19 | echo 20 | 21 | $KUBECTL create namespace "$NAMESPACE_PTR" > /dev/null 2>&1 || true 22 | 23 | $HELM repo add byjg https://opensource.byjg.com/helm > /dev/null 2>&1 24 | $HELM repo update > /dev/null 2>&1 25 | 26 | HELM_PACKAGE_VERSION="0.1.0" 27 | 28 | if [ -z "$1" ]; then 29 | 30 | echo "ERROR: Missing Domain names" 31 | echo 32 | echo "You need to pass the domain names using comma and no spaces." 33 | echo 34 | 35 | else 36 | 37 | $HELM upgrade --install parking byjg/static-httpserver \ 38 | --version $HELM_PACKAGE_VERSION \ 39 | --namespace parking \ 40 | --set "ingress.hosts={$1}" \ 41 | --set parameters.title="Domain Parked" \ 42 | --set parameters.htmlTitle="Domain Parked" 43 | 44 | echo 45 | echo "Installed." 46 | echo 47 | 48 | fi 49 | 50 | -------------------------------------------------------------------------------- /addons/dashboard-ingress/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import subprocess 5 | 6 | import click 7 | 8 | @click.command() 9 | def dashboard_ingress(): 10 | click.echo("Disabling Ingress for Kubernetes Dashboard") 11 | 12 | env = os.environ.copy() 13 | env["PATH"] += ":%s" % os.environ["SNAP"] 14 | 15 | resources = [ 16 | "secret/kubernetes-dashboard-basic-auth", 17 | "ingress.networking.k8s.io/kubernetes-dashboard-ingress", 18 | "middleware/ipwhitelist", 19 | "middleware/basicauth", 20 | "serverstransport/skip-verify", 21 | ] 22 | 23 | for resource in resources: 24 | click.echo(f"Destroying {resource}...") 25 | try: 26 | subprocess.check_call( 27 | ["microk8s-kubectl.wrapper", "delete", "-n", "kubernetes-dashboard", resource], 28 | stdin=subprocess.PIPE, 29 | stdout=subprocess.PIPE, 30 | stderr=subprocess.PIPE, 31 | env=env, 32 | ) 33 | except subprocess.CalledProcessError: 34 | pass 35 | 36 | click.echo("Destruction complete.") 37 | click.echo("Ingress for Kubernetes Dashboard is disabled") 38 | 39 | if __name__ == "__main__": 40 | dashboard_ingress(prog_name="microk8s disable dashboard-ingress") 41 | -------------------------------------------------------------------------------- /addons/gopaddle/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | HELM="$SNAP/microk8s-helm3.wrapper" 9 | 10 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 11 | HELM_VERSION="5.0" 12 | ARGS=$(getopt -a --options v: --longoptions "version:" -- "$@") 13 | eval set -- "$ARGS" 14 | 15 | while true; do 16 | case "$1" in 17 | -v|--version) 18 | HELM_VERSION="$2" 19 | shift 2;; 20 | --) 21 | break;; 22 | esac 23 | done 24 | 25 | 26 | do_prerequisites() { 27 | # enable helm3 28 | "$SNAP/microk8s-enable.wrapper" helm3 29 | 30 | # enable dns 31 | "$SNAP/microk8s-enable.wrapper" dns 32 | } 33 | 34 | enable_gopaddle_lite() { 35 | 36 | echo "Enabling gopaddle" 37 | 38 | # add helm repo 39 | $HELM repo add gopaddle https://gopaddle-io.github.io/gopaddle-lite 40 | 41 | #list helm repo 42 | $HELM repo update 43 | 44 | if [ -z "$HELM_VERSION" ] 45 | then 46 | $HELM install gopaddle gopaddle/gopaddle --create-namespace -n gopaddle 47 | else 48 | $HELM install gopaddle gopaddle/gopaddle --create-namespace -n gopaddle --version $HELM_VERSION 49 | fi 50 | 51 | } 52 | 53 | 54 | do_prerequisites 55 | enable_gopaddle_lite 56 | -------------------------------------------------------------------------------- /addons/common/utils.sh: -------------------------------------------------------------------------------- 1 | use_addon_manifest() { 2 | # Perform an action (apply or delete) on a manifest. 3 | # Optionally replace strings in the manifest 4 | # 5 | # Parameters: 6 | # $1 the name of the manifest. Should be in the addons directory and should not 7 | # include the trailing .yaml eg ingress, dns 8 | # $2 the action to be performed on the manifest, eg apply, delete 9 | # $3 (optional) an associative array with keys the string to be replaced and value what to 10 | # replace with. The string $ARCH is always injected to this array. 11 | # 12 | local manifest="$1.yaml"; shift 13 | local action="$1"; shift 14 | if ! [ "$#" = "0" ] 15 | then 16 | eval "declare -A items="${1#*=} 17 | else 18 | declare -A items 19 | fi 20 | local tmp_manifest="${SNAP_USER_DATA}/tmp/temp.yaml" 21 | items[\$ARCH]=$(arch) 22 | 23 | mkdir -p ${SNAP_USER_DATA}/tmp 24 | SCRIPT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 25 | cp "${SCRIPT_DIR}/../${manifest}" "${tmp_manifest}" 26 | for i in "${!items[@]}" 27 | do 28 | "$SNAP/bin/sed" -i 's@'$i'@'"${items[$i]}"'@g' "${tmp_manifest}" 29 | done 30 | "$SNAP/kubectl" "--kubeconfig=$SNAP_DATA/credentials/client.config" "$action" -f "${tmp_manifest}" 31 | use_manifest_result="$?" 32 | rm "${tmp_manifest}" 33 | } 34 | -------------------------------------------------------------------------------- /tests/test_cloudnative-pg.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | is_container, 7 | microk8s_disable, 8 | microk8s_enable, 9 | wait_for_pod_state, 10 | ) 11 | 12 | 13 | class TestCloudNativePG(object): 14 | @pytest.mark.skipif( 15 | platform.machine() not in ["x86_64", "arm64"], 16 | reason="CloudNativePg tests are only relevant in x86 architectures", 17 | ) 18 | @pytest.mark.skipif( 19 | os.environ.get("STRICT") == "yes", 20 | reason=( 21 | "Skipping CloudNativePg tests in strict confinement as they are expected to fail" 22 | ), 23 | ) 24 | def test_cloudnative_pg(self): 25 | """ 26 | Sets up and validate CloudNativePG. 27 | """ 28 | print("Enabling CloudNativePG") 29 | microk8s_enable("cloudnative-pg") 30 | print("Validating CloudNativePG") 31 | self.validate_cloudnative_pg() 32 | print("Disabling CloudNativePG") 33 | microk8s_disable("cloudnative-pg") 34 | 35 | def validate_cloudnative_pg(self): 36 | """ 37 | Validate CloudNativePG by deploying a cluster 38 | """ 39 | wait_for_pod_state( 40 | "", 41 | "cnpg-system", 42 | "running", 43 | label="app.kubernetes.io/name=cloudnative-pg", 44 | ) 45 | -------------------------------------------------------------------------------- /tests/test_ngrok.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import platform 4 | 5 | from utils import ( 6 | kubectl_get, 7 | microk8s_disable, 8 | microk8s_enable, 9 | ) 10 | 11 | 12 | class TestNgrok(object): 13 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 14 | @pytest.mark.skipif( 15 | os.environ.get("STRICT") == "yes", 16 | reason="Skipping kata tests in strict confinement as they are expected to fail", 17 | ) 18 | @pytest.mark.skipif( 19 | os.environ.get("UNDER_TIME_PRESSURE") == None, 20 | reason="Skipping test, expected to be tested when under time pressure", 21 | ) 22 | def test_ngrok(self): 23 | """ 24 | Sets up and validates the ngrok kubernetes operator. 25 | """ 26 | print("Enabling ngrok kubernetes operator") 27 | microk8s_enable( 28 | addon="ngrok", 29 | optional_args="--namespace ngrok-operator --secret-name test", 30 | ) 31 | print("Validating ngrok kubernetes operator") 32 | self.validate_ngrok() 33 | print("Disabling ngrok kubernetes operator") 34 | microk8s_disable("ngrok") 35 | 36 | def validate_ngrok(self): 37 | """ 38 | Validate ngrok kubernetes operator 39 | """ 40 | kubectl_get("deployment ngrok-operator-manager" " -n ngrok-operator") 41 | -------------------------------------------------------------------------------- /tests/templates/pvc-nfs.yaml: -------------------------------------------------------------------------------- 1 | 2 | # PVC for the customer 3 | --- 4 | kind: PersistentVolumeClaim 5 | apiVersion: v1 6 | metadata: 7 | name: pvc-nfs 8 | labels: 9 | vol: pvc-nfs 10 | namespace: default 11 | spec: 12 | storageClassName: "nfs" 13 | accessModes: 14 | - ReadWriteMany 15 | resources: 16 | requests: 17 | storage: 1Gi 18 | 19 | --- 20 | kind: Pod 21 | apiVersion: v1 22 | metadata: 23 | name: busybox-pvc-nfs1 24 | labels: 25 | app: busybox-pvc-nfs 26 | namespace: default 27 | spec: 28 | containers: 29 | - name: busybox-pvc-nfs1 30 | image: busybox 31 | command: ["/bin/sh", "-c", "while true; do date >> /mount/dates1; sleep 2; done"] 32 | volumeMounts: 33 | - name: volume 34 | mountPath: /mount 35 | volumes: 36 | - name: volume 37 | persistentVolumeClaim: 38 | claimName: pvc-nfs 39 | 40 | --- 41 | kind: Pod 42 | apiVersion: v1 43 | metadata: 44 | name: busybox-pvc-nfs2 45 | labels: 46 | app: busybox-pvc-nfs 47 | namespace: default 48 | spec: 49 | containers: 50 | - name: busybox-pvc-nfs2 51 | image: busybox 52 | command: ["/bin/sh", "-c", "while true; do date >> /mount/dates2; sleep 2; done"] 53 | volumeMounts: 54 | - name: volume 55 | mountPath: /mount 56 | volumes: 57 | - name: volume 58 | persistentVolumeClaim: 59 | claimName: pvc-nfs -------------------------------------------------------------------------------- /addons/istio/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | echo "Enabling Istio" 8 | 9 | if [ ! -f "${SNAP_DATA}/bin/istioctl" ] 10 | then 11 | ISTIO_VERSION="v1.18.2" 12 | echo "Fetching istioctl version $ISTIO_VERSION." 13 | ISTIO_ERSION=$(echo $ISTIO_VERSION | sed 's/v//g') 14 | run_with_sudo mkdir -p "${SNAP_DATA}/tmp/istio" 15 | (cd "${SNAP_DATA}/tmp/istio" 16 | fetch_as https://github.com/istio/istio/releases/download/${ISTIO_ERSION}/istio-${ISTIO_ERSION}-linux-amd64.tar.gz "$SNAP_DATA/tmp/istio/istio.tar.gz" 17 | run_with_sudo gzip -q -d "$SNAP_DATA/tmp/istio/istio.tar.gz" 18 | run_with_sudo tar -xvf "$SNAP_DATA/tmp/istio/istio.tar" 19 | run_with_sudo chmod 777 "$SNAP_DATA/tmp/istio/istio-${ISTIO_ERSION}") 20 | run_with_sudo mkdir -p "$SNAP_DATA/bin/" 21 | run_with_sudo mv "$SNAP_DATA/tmp/istio/istio-${ISTIO_ERSION}/bin/istioctl" "$SNAP_DATA/bin/" 22 | run_with_sudo chmod +x "$SNAP_DATA/bin/" 23 | 24 | run_with_sudo rm -rf "$SNAP_DATA/tmp/istio" 25 | fi 26 | 27 | # pod/servicegraph will start failing without dns 28 | "$SNAP/microk8s-enable.wrapper" dns 29 | 30 | run_with_sudo "$SNAP_DATA/bin/istioctl" -c "${SNAP_DATA}/credentials/client.config" install --set profile=demo -y 31 | 32 | run_with_sudo touch "$SNAP_USER_COMMON/istio.lock" 33 | 34 | echo "Istio is starting" 35 | echo "" 36 | echo "To configure mutual TLS authentication consult the Istio documentation." 37 | -------------------------------------------------------------------------------- /.github/workflows/run-tests.yml: -------------------------------------------------------------------------------- 1 | name: Run tests 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | run-tests-classic: 11 | name: Run tests (classic) 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Check out code 15 | uses: actions/checkout@v4 16 | - name: Install dependencies 17 | run: | 18 | sudo apt-get update 19 | sudo apt-get install -y python3-setuptools nfs-common 20 | sudo pip3 install --ignore-installed --upgrade pip 21 | sudo pip3 install -r tests/requirements.txt 22 | - name: Running addons tests 23 | run: | 24 | set -x 25 | sudo snap install microk8s --classic --channel=latest/edge 26 | sudo microk8s status --wait-ready --timeout 600 27 | 28 | if sudo microk8s addons repo list | grep community 29 | then 30 | sudo microk8s addons repo remove community 31 | fi 32 | if sudo microk8s addons repo list | grep core 33 | then 34 | sudo microk8s addons repo remove core 35 | fi 36 | sudo microk8s addons repo add community . 37 | sudo microk8s addons repo add core https://github.com/canonical/microk8s-core-addons 38 | 39 | export UNDER_TIME_PRESSURE="True" 40 | export SKIP_PROMETHEUS="False" 41 | sudo -E pytest -s -ra ./tests/ 42 | sudo snap remove microk8s --purge 43 | -------------------------------------------------------------------------------- /tests/test_kata.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | is_container, 7 | kubectl, 8 | microk8s_disable, 9 | microk8s_enable, 10 | wait_for_installation, 11 | wait_for_pod_state, 12 | ) 13 | 14 | 15 | class TestKata(object): 16 | @pytest.mark.skipif( 17 | os.environ.get("STRICT") == "yes", 18 | reason="Skipping kata tests in strict confinement as they are expected to fail", 19 | ) 20 | @pytest.mark.skipif( 21 | platform.machine() != "x86_64", 22 | reason="Kata tests are only relevant in x86 architectures", 23 | ) 24 | @pytest.mark.skipif( 25 | is_container(), reason="Kata tests are only possible on real hardware" 26 | ) 27 | def test_kata(self): 28 | """ 29 | Sets up and validates kata. 30 | """ 31 | print("Enabling kata") 32 | microk8s_enable("kata") 33 | print("Validating Kata") 34 | self.validate_kata() 35 | print("Disabling kata") 36 | microk8s_disable("kata") 37 | 38 | def validate_kata(self): 39 | """ 40 | Validate Kata 41 | """ 42 | wait_for_installation() 43 | here = os.path.dirname(os.path.abspath(__file__)) 44 | manifest = os.path.join(here, "templates", "nginx-kata.yaml") 45 | kubectl("apply -f {}".format(manifest)) 46 | wait_for_pod_state("", "default", "running", label="app=kata") 47 | kubectl("delete -f {}".format(manifest)) 48 | -------------------------------------------------------------------------------- /tests/test_multus.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | is_container, 7 | microk8s_disable, 8 | microk8s_enable, 9 | wait_for_installation, 10 | wait_for_pod_state, 11 | ) 12 | 13 | 14 | class TestMultus(object): 15 | @pytest.mark.skipif( 16 | os.environ.get("STRICT") == "yes", 17 | reason=( 18 | "Skipping multus tests in strict confinement as they are expected to fail" 19 | ), 20 | ) 21 | @pytest.mark.skipif( 22 | platform.machine() != "x86_64", 23 | reason="Multus tests are only relevant in x86 architectures", 24 | ) 25 | @pytest.mark.skipif( 26 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 27 | reason="Skipping multus tests as we are under time pressure", 28 | ) 29 | @pytest.mark.skipif( 30 | is_container(), reason="Multus fails in lxc with a shared mount error" 31 | ) 32 | def test_multus(self): 33 | """ 34 | Sets up and validates Multus. 35 | """ 36 | print("Enabling Multus") 37 | microk8s_enable("multus") 38 | print("Validating Multus") 39 | self.validate_multus() 40 | print("Disabling Multus") 41 | microk8s_disable("multus") 42 | 43 | def validate_multus(self): 44 | """ 45 | Validate multus by making sure the multus pod is running. 46 | """ 47 | 48 | wait_for_installation() 49 | wait_for_pod_state("", "kube-system", "running", label="app=multus") 50 | -------------------------------------------------------------------------------- /tests/test_keda.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | kubectl, 7 | microk8s_disable, 8 | microk8s_enable, 9 | wait_for_installation, 10 | wait_for_pod_state, 11 | ) 12 | 13 | 14 | class TestKeda(object): 15 | @pytest.mark.skipif( 16 | platform.machine() != "x86_64", 17 | reason="KEDA tests are only relevant in x86 architectures", 18 | ) 19 | @pytest.mark.skipif( 20 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 21 | reason="Skipping KEDA tests as we are under time pressure", 22 | ) 23 | def test_keda(self): 24 | """ 25 | Sets up and validates keda. 26 | """ 27 | print("Enabling keda") 28 | microk8s_enable("keda") 29 | print("Validating keda") 30 | self.validate_keda() 31 | print("Disabling keda") 32 | microk8s_disable("keda") 33 | 34 | def validate_keda(self): 35 | """ 36 | Validate keda 37 | """ 38 | wait_for_installation() 39 | wait_for_pod_state("", "keda", "running", label="app=keda-operator") 40 | print("KEDA operator up and running.") 41 | here = os.path.dirname(os.path.abspath(__file__)) 42 | manifest = os.path.join(here, "templates", "keda-scaledobject.yaml") 43 | kubectl("apply -f {}".format(manifest)) 44 | scaledObject = kubectl("-n gonuts get scaledobject.keda.sh") 45 | assert "stan-scaledobject" in scaledObject 46 | kubectl("delete -f {}".format(manifest)) 47 | -------------------------------------------------------------------------------- /tests/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: microbot 7 | name: microbot 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: microbot 13 | strategy: {} 14 | template: 15 | metadata: 16 | creationTimestamp: null 17 | labels: 18 | app: microbot 19 | spec: 20 | containers: 21 | - image: cdkbot/microbot-$ARCH 22 | imagePullPolicy: "" 23 | name: microbot 24 | ports: 25 | - containerPort: 80 26 | livenessProbe: 27 | httpGet: 28 | path: / 29 | port: 80 30 | initialDelaySeconds: 5 31 | timeoutSeconds: 30 32 | resources: {} 33 | restartPolicy: Always 34 | serviceAccountName: "" 35 | status: {} 36 | --- 37 | apiVersion: v1 38 | kind: Service 39 | metadata: 40 | name: microbot 41 | labels: 42 | app: microbot 43 | spec: 44 | ports: 45 | - port: 80 46 | protocol: TCP 47 | targetPort: 80 48 | selector: 49 | app: microbot 50 | --- 51 | apiVersion: networking.k8s.io/v1 52 | kind: Ingress 53 | metadata: 54 | name: microbot-ingress-nip 55 | spec: 56 | rules: 57 | - host: microbot.127.0.0.1.nip.io 58 | http: 59 | paths: 60 | - path: / 61 | pathType: Prefix 62 | backend: 63 | service: 64 | name: microbot 65 | port: 66 | number: 80 67 | -------------------------------------------------------------------------------- /tests/test_osm_edge.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | wait_for_installation, 9 | wait_for_pod_state, 10 | ) 11 | 12 | 13 | class TestOsmEdge(object): 14 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 15 | @pytest.mark.skipif( 16 | os.environ.get("UNDER_TIME_PRESSURE") == None, 17 | reason="Skipping test, expected to be tested when under time pressure", 18 | ) 19 | def test_osm_edge(self): 20 | """ 21 | Sets up and validate osm-edge 22 | 23 | """ 24 | print("Enabling osm-edge") 25 | microk8s_enable("osm-edge") 26 | print("Validate osm-edge installation") 27 | self.validate_osm_edge() 28 | print("Disabling osm-edge") 29 | microk8s_disable("osm-edge") 30 | 31 | def validate_osm_edge(self): 32 | """ 33 | Validate osm-edge 34 | """ 35 | wait_for_installation() 36 | wait_for_pod_state( 37 | "", 38 | "osm-system", 39 | "running", 40 | label="app=osm-controller", 41 | timeout_insec=300, 42 | ) 43 | print("osm-edge controller up and running") 44 | wait_for_pod_state( 45 | "", 46 | "osm-system", 47 | "running", 48 | label="app=osm-injector", 49 | timeout_insec=300, 50 | ) 51 | print("osm-edge proxy injector up and running.") 52 | -------------------------------------------------------------------------------- /tests/test_sosivio.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import os 3 | import platform 4 | 5 | import pytest 6 | 7 | from utils import ( 8 | microk8s_disable, 9 | microk8s_enable, 10 | wait_for_pod_state, 11 | ) 12 | 13 | 14 | class TestSosivio(object): 15 | @pytest.mark.skipif( 16 | platform.machine() != "x86_64", 17 | reason="Sosivio tests are only relevant in x86 architectures", 18 | ) 19 | @pytest.mark.skipif( 20 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 21 | reason="Skipping Sosivio tests as we are under time pressure", 22 | ) 23 | @pytest.mark.skipif( 24 | multiprocessing.cpu_count() < 4, 25 | reason="Sosivio tests require at least 4 CPU cores to run", 26 | ) 27 | def test_sosivio(self): 28 | """ 29 | Sets up and validates Sosivio. 30 | """ 31 | pytest.xfail( 32 | "The Sosivio test is currently failing as https://helm.sosiv.io is down." 33 | ) 34 | 35 | print("Enabling sosivio") 36 | microk8s_enable("sosivio") 37 | print("Validating sosivio") 38 | self.validate_sosivio() 39 | print("Disabling sosivio") 40 | microk8s_disable("sosivio") 41 | 42 | def validate_sosivio(self): 43 | """ 44 | Validate sosivio 45 | """ 46 | wait_for_pod_state( 47 | "", 48 | "sosivio", 49 | "running", 50 | label="app=sosivio-dashboard", 51 | timeout_insec=300, 52 | ) 53 | print("sosivio is up and running") 54 | -------------------------------------------------------------------------------- /addons/linkerd/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | read -ra ARGUMENTS <<< "$1" 8 | argz=("${ARGUMENTS[@]/#/--}") 9 | 10 | ARCH=$(arch) 11 | 12 | # check if linkerd cli is already in the system. Download if it doesn't exist. 13 | if [ ! -f "${SNAP_DATA}/bin/linkerd" ]; then 14 | LINKERD_VERSION="${LINKERD_VERSION:-v2.14.3}" 15 | echo "Fetching Linkerd2 version $LINKERD_VERSION." 16 | run_with_sudo mkdir -p "$SNAP_DATA/bin" 17 | LINKERD_VERSION=$(echo $LINKERD_VERSION | sed 's/v//g') 18 | echo "$LINKERD_VERSION" 19 | fetch_as https://github.com/linkerd/linkerd2/releases/download/stable-${LINKERD_VERSION}/linkerd2-cli-stable-${LINKERD_VERSION}-linux-${ARCH} "$SNAP_DATA/bin/linkerd" 20 | run_with_sudo chmod uo+x "$SNAP_DATA/bin/linkerd" 21 | fi 22 | 23 | echo "Enabling Linkerd2" 24 | # enable dns service 25 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 26 | "$SNAP/microk8s-enable.wrapper" dns 27 | # Allow some time for the apiserver to start 28 | sleep 5 29 | ${SNAP}/microk8s-status.wrapper --wait-ready --timeout 30 >/dev/null 30 | 31 | # Install CRDs 32 | "$SNAP_DATA/bin/linkerd" "--kubeconfig=$SNAP_DATA/credentials/client.config" install --crds | $KUBECTL apply -f - 33 | # Enable linkerd control plane 34 | "$SNAP_DATA/bin/linkerd" "--kubeconfig=$SNAP_DATA/credentials/client.config" install "${argz[@]}" | $KUBECTL apply -f - 35 | echo "Installing linkerd viz extension" 36 | # Enable linkerd visualization extension 37 | "$SNAP_DATA/bin/linkerd" "--kubeconfig=$SNAP_DATA/credentials/client.config" viz install | $KUBECTL apply -f - 38 | echo "Linkerd is starting" 39 | -------------------------------------------------------------------------------- /addons/fluentd/fluentd/kibana-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kibana-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | k8s-app: kibana-logging 14 | template: 15 | metadata: 16 | labels: 17 | k8s-app: kibana-logging 18 | spec: 19 | securityContext: 20 | seccompProfile: 21 | type: RuntimeDefault 22 | containers: 23 | - name: kibana-logging 24 | image: docker.elastic.co/kibana/kibana-oss:7.10.2 25 | resources: 26 | # need more cpu upon initialization, therefore burstable class 27 | limits: 28 | cpu: 1000m 29 | requests: 30 | cpu: 100m 31 | env: 32 | - name: ELASTICSEARCH_HOSTS 33 | value: http://elasticsearch-logging:9200 34 | - name: SERVER_NAME 35 | value: kibana-logging 36 | - name: SERVER_REWRITEBASEPATH 37 | value: "false" 38 | ports: 39 | - containerPort: 5601 40 | name: ui 41 | protocol: TCP 42 | livenessProbe: 43 | httpGet: 44 | path: /api/status 45 | port: ui 46 | initialDelaySeconds: 5 47 | timeoutSeconds: 10 48 | readinessProbe: 49 | httpGet: 50 | path: /api/status 51 | port: ui 52 | initialDelaySeconds: 5 53 | timeoutSeconds: 10 54 | -------------------------------------------------------------------------------- /tests/test_istio.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | microk8s_disable, 7 | microk8s_enable, 8 | run_until_success, 9 | wait_for_installation, 10 | wait_for_pod_state, 11 | ) 12 | 13 | 14 | class TestIstio(object): 15 | @pytest.mark.skipif( 16 | platform.machine() != "x86_64", 17 | reason="Istio tests are only relevant in x86 architectures", 18 | ) 19 | @pytest.mark.skipif( 20 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 21 | reason="Skipping istio and knative tests as we are under time pressure", 22 | ) 23 | def test_istio(self): 24 | """ 25 | Sets up and validate istio. 26 | """ 27 | print("Enabling Istio") 28 | microk8s_enable("istio") 29 | print("Validating Istio") 30 | self.validate_istio() 31 | print("Disabling Istio") 32 | microk8s_disable("istio") 33 | 34 | def validate_istio(self): 35 | """ 36 | Validate istio by deploying the bookinfo app. 37 | """ 38 | if platform.machine() != "x86_64": 39 | print("Istio tests are only relevant in x86 architectures") 40 | return 41 | 42 | wait_for_installation() 43 | istio_services = ["pilot", "egressgateway", "ingressgateway"] 44 | for service in istio_services: 45 | wait_for_pod_state( 46 | "", "istio-system", "running", label="istio={}".format(service) 47 | ) 48 | 49 | cmd = "/snap/bin/microk8s.istioctl verify-install" 50 | return run_until_success(cmd, timeout_insec=900, err_out="no") 51 | -------------------------------------------------------------------------------- /tests/test_kwasm.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | kubectl, 7 | microk8s_disable, 8 | microk8s_enable, 9 | wait_for_pod_state, 10 | ) 11 | 12 | 13 | class TestKwasm(object): 14 | @pytest.mark.skipif( 15 | os.environ.get("STRICT") == "yes", 16 | reason="Skipping kwasm tests in strict confinement as they are expected to fail", 17 | ) 18 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 19 | @pytest.mark.skipif( 20 | os.environ.get("UNDER_TIME_PRESSURE") == None, 21 | reason="Skipping test, expected to be tested when under time pressure", 22 | ) 23 | def test_kwasm(self): 24 | """ 25 | Sets up and validates kwasm. 26 | """ 27 | print("Enabling kwasm") 28 | microk8s_enable("kwasm") 29 | print("Validating kwasm") 30 | self.validate_kwasm() 31 | print("Disabling kwasm") 32 | microk8s_disable("kwasm") 33 | 34 | def validate_kwasm(self): 35 | """ 36 | Validate kwasm 37 | """ 38 | wait_for_pod_state( 39 | "", "kwasm-system", "running", label="app.kubernetes.io/name=kwasm-operator" 40 | ) 41 | 42 | here = os.path.dirname(os.path.abspath(__file__)) 43 | manifest = os.path.join(here, "templates", "wasm-job.yaml") 44 | kubectl("apply -f {}".format(manifest)) 45 | wait_for_pod_state( 46 | "", "default", "terminated", "Completed", label="job-name=wasm-test" 47 | ) 48 | kubectl("delete -f {}".format(manifest)) 49 | print("kwasm is up and running") 50 | -------------------------------------------------------------------------------- /addons/argocd/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | NAMESPACE_ARGOCD="argocd" 8 | 9 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 10 | HELM="$SNAP/microk8s-helm3.wrapper" 11 | KUBECTL_DELETE_ARGS="--wait=true --timeout=180s --ignore-not-found=true" 12 | 13 | PURGE=false 14 | # get the options 15 | while getopts ":p" flag; do 16 | case "${flag}" in 17 | p) PURGE=true 18 | ;; 19 | *) echo "Usage: microk8s disable argocd [-p]" 20 | echo "" 21 | echo "WARNING: Final deletion of \"$NAMESPACE_ARGOCD\" namespace must be enforced by: \"microk8s disable argocd -p\"" 22 | echo "" 23 | echo "Purge only when sure, that \"$NAMESPACE_ARGOCD\" namespace is not hosting any other services from Argo stack." 24 | echo "" 25 | exit 0 26 | ;; 27 | esac 28 | done 29 | 30 | echo "Disabling ArgoCD" 31 | 32 | $HELM delete argo-cd -n $NAMESPACE_ARGOCD 33 | 34 | $KUBECTL delete customresourcedefinition applications.argoproj.io \ 35 | applicationsets.argoproj.io \ 36 | argocdextensions.argoproj.io \ 37 | appprojects.argoproj.io 38 | 39 | if $PURGE; then 40 | echo "Final \"$NAMESPACE_ARGOCD\" namespace deletion" 41 | $KUBECTL delete $KUBECTL_DELETE_ARGS namespace "$NAMESPACE_ARGOCD" > /dev/null 2>&1 || true 42 | else 43 | echo "" 44 | echo "WARNING: Final deletion of \"$NAMESPACE_ARGOCD\" namespace must be enforced by: \"microk8s disable argocd -p\"" 45 | echo "" 46 | echo "Purge only when sure, that \"$NAMESPACE_ARGOCD\" namespace is not hosting any other services from Argo stack." 47 | echo "" 48 | fi 49 | 50 | echo "ArgoCD disabled" 51 | -------------------------------------------------------------------------------- /addons/multus/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 6 | source $CURRENT_DIR/../common/utils.sh 7 | 8 | KUBECTL="${SNAP}/kubectl --kubeconfig=${SNAP_DATA}/credentials/client.config" 9 | CUR_DATA=$(dirname "${SNAP_DATA}")/current 10 | 11 | echo "Enabling Multus" 12 | 13 | if [ -f "${SNAP_DATA}/args/cni-network/00-multus.conf" ] 14 | then 15 | echo "Multus is already installed." 16 | else 17 | echo "Waiting for microk8s to be ready." 18 | "${SNAP}/microk8s-status.wrapper" --wait-ready >/dev/null 19 | 20 | echo "Applying manifest for multus daemonset." 21 | declare -A map 22 | map[\$SNAP_DATA]="$CUR_DATA" 23 | map[\$DS_NAME]="kube-multus-ds" 24 | use_addon_manifest multus/multus apply "$(declare -p map)" 25 | 26 | echo -n "Waiting for multus daemonset to start." 27 | until [ -f "${SNAP_DATA}/opt/cni/bin/multus" ]; do 28 | sleep 1 29 | echo -n "." 30 | done 31 | echo 32 | echo "Multus is enabled" 33 | fi 34 | 35 | echo "Multus is enabled with version:" 36 | "${SNAP_DATA}/opt/cni/bin/multus" -v 37 | 38 | echo 39 | echo "Currently installed CNI and IPAM plugins include:" 40 | echo $(cd "${SNAP_DATA}/opt/cni/bin/"; ls) 41 | 42 | echo 43 | echo "New CNI plugins can be installed in ${CUR_DATA}/opt/cni/bin/" 44 | 45 | echo 46 | echo "For information on configuration please refer to the multus documentation." 47 | echo " First you need to create network definitions:" 48 | echo " https://github.com/k8snetworkplumbingwg/multus-cni/blob/v3.9/docs/how-to-use.md#create-network-attachment-definition" 49 | echo " Then you need to tell your pods to use those networks via annotations" 50 | echo " https://github.com/k8snetworkplumbingwg/multus-cni/blob/v3.9/docs/how-to-use.md#run-pod-with-network-annotation" 51 | echo 52 | -------------------------------------------------------------------------------- /addons/cilium/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | echo "Disabling Cilium" 8 | 9 | "$SNAP/microk8s-helm.wrapper" uninstall cilium -n kube-system || true 10 | 11 | # Give K8s some time to process the deletion request 12 | sleep 15 13 | cilium=$(wait_for_service_shutdown "kube-system" "k8s-app=cilium") 14 | if [[ $cilium == fail ]] 15 | then 16 | echo "Cilium did not shut down on time. Proceeding." 17 | fi 18 | 19 | cilium=$(wait_for_service_shutdown "kube-system" "name=cilium-operator") 20 | if [[ $cilium == fail ]] 21 | then 22 | echo "Cilium operator did not shut down on time. Proceeding." 23 | fi 24 | run_with_sudo rm -f "$SNAP_DATA/args/cni-network/05-cilium-cni.conf" 25 | run_with_sudo rm -f "$SNAP_DATA/opt/cni/bin/cilium-cni" 26 | run_with_sudo rm -rf $SNAP_DATA/bin/cilium* 27 | run_with_sudo rm -f "$SNAP_DATA/actions/cilium.yaml" 28 | run_with_sudo rm -rf "$SNAP_DATA/actions/cilium" 29 | run_with_sudo rm -rf "$SNAP_DATA/var/run/cilium" 30 | run_with_sudo rm -rf "$SNAP_DATA/sys/fs/bpf" 31 | run_with_sudo rm -rf "$SNAP_COMMON/plugins/cilium" 32 | 33 | if $SNAP/sbin/ip link show "cilium_vxlan" 34 | then 35 | echo "Deleting old cilium_vxlan link" 36 | run_with_sudo $SNAP/sbin/ip link delete "cilium_vxlan" 37 | fi 38 | 39 | if [ -e "$SNAP_DATA/args/cni-network/cni.yaml.disabled" ] 40 | then 41 | echo "Restarting default cni" 42 | run_with_sudo mv "$SNAP_DATA/args/cni-network/cni.yaml.disabled" "$SNAP_DATA/args/cni-network/cni.yaml" 43 | "$SNAP/microk8s-kubectl.wrapper" apply -f "$SNAP_DATA/args/cni-network/cni.yaml" 44 | elif [ ! -e "$SNAP_DATA/var/lock/ha-cluster" ] 45 | then 46 | echo "Restarting flanneld" 47 | set_service_expected_to_start flanneld 48 | 49 | run_with_sudo preserve_env snapctl start "${SNAP_NAME}.daemon-flanneld" 50 | fi 51 | echo "Cilium is terminating" 52 | -------------------------------------------------------------------------------- /addons/argocd/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | NAMESPACE_ARGOCD="argocd" 8 | 9 | ARGOCD_HELM_VERSION="5.34.3" 10 | 11 | "$SNAP/microk8s-enable.wrapper" helm3 12 | 13 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 14 | HELM="$SNAP/microk8s-helm3.wrapper" 15 | 16 | VALUES="" 17 | 18 | # get the options 19 | while getopts ":v:f:h:" flag; do 20 | case "${flag}" in 21 | v) ARGOCD_HELM_VERSION=${OPTARG} 22 | ;; 23 | f) VALUES=${OPTARG} 24 | ;; 25 | *) echo "Usage: microk8s enable argocd" 26 | echo "" 27 | echo "With overwriting default values: microk8s enable argocd -f values.yaml" 28 | echo "" 29 | echo "See https://artifacthub.io/packages/helm/argo/argo-cd for more information about the values" 30 | echo "You should enable the Ingress addon, if you want to use ArgoCD with an Ingress" 31 | echo "microk8s enable ingress" 32 | exit 0 33 | ;; 34 | esac 35 | done 36 | 37 | echo "Installing ArgoCD (Helm v${ARGOCD_HELM_VERSION})" 38 | 39 | if [ -n "$VALUES" ]; then 40 | echo "Using values file: $VALUES" 41 | fi 42 | 43 | 44 | # make sure the "argocd" namespace exists 45 | $KUBECTL create namespace "$NAMESPACE_ARGOCD" > /dev/null 2>&1 || true 46 | 47 | # add the ArgoCD chart repository 48 | $HELM repo add argo https://argoproj.github.io/argo-helm 49 | 50 | # install the helm chart 51 | if [ -z "$VALUES" ] 52 | then 53 | $HELM upgrade -i argo-cd argo/argo-cd \ 54 | --version $ARGOCD_HELM_VERSION \ 55 | --namespace "$NAMESPACE_ARGOCD" 56 | else 57 | $HELM upgrade -i argo-cd argo/argo-cd \ 58 | --version $ARGOCD_HELM_VERSION \ 59 | --namespace "$NAMESPACE_ARGOCD" \ 60 | -f $VALUES 61 | fi 62 | 63 | echo "ArgoCD is installed" 64 | -------------------------------------------------------------------------------- /tests/test_kubearmor.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import platform 3 | import os 4 | 5 | 6 | from utils import ( 7 | is_container, 8 | microk8s_enable, 9 | microk8s_disable, 10 | microk8s_reset, 11 | wait_for_installation, 12 | wait_for_pod_state, 13 | ) 14 | 15 | 16 | class TestKubearmor(object): 17 | @pytest.mark.skipif( 18 | os.environ.get("STRICT") == "yes", 19 | reason=( 20 | "Skipping kubearmor tests in strict confinement as they are expected to fail" 21 | ), 22 | ) 23 | @pytest.mark.skipif( 24 | is_container(), reason="Kubearmor tests are skipped in containers" 25 | ) 26 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 27 | @pytest.mark.skipif( 28 | os.environ.get("UNDER_TIME_PRESSURE") == None, 29 | reason="Skipping test, expected to be tested when under time pressure", 30 | ) 31 | def test_kubearmor(self): 32 | """ 33 | Sets up and validates kubearmor. 34 | """ 35 | print("Enabling Kubearmor") 36 | microk8s_enable("kubearmor") 37 | print("Validating Kubearmor") 38 | self.validate_kubearmor() 39 | print("Disabling Kubearmor") 40 | microk8s_disable("kubearmor") 41 | microk8s_reset() 42 | 43 | def validate_kubearmor(self): 44 | """ 45 | Validate kubearmor by applying policy to nginx container. 46 | """ 47 | 48 | wait_for_installation() 49 | kubearmor_pods = [ 50 | "kubearmor-controller", 51 | "kubearmor", 52 | "kubearmor-relay", 53 | ] 54 | for pod in kubearmor_pods: 55 | wait_for_pod_state( 56 | "", "kube-system", "running", label="kubearmor-app={}".format(pod) 57 | ) 58 | 59 | print("Kubearmor testing passed.") 60 | -------------------------------------------------------------------------------- /addons/jaeger/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | 9 | echo "Enabling Jaeger" 10 | 11 | if $KUBECTL get svc -n istio-system istio-ingressgateway > /dev/null 2>&1; then 12 | echo "Warning: Istio ingress is already installed." 13 | echo "Enabling the 'ingress' addon may cause conflicts with Istio ingress gateway." 14 | echo "Continue anyway? [y/N]" 15 | read -r CONT 16 | if [[ "$CONT" != "y" && "$CONT" != "Y" ]]; then 17 | echo "Aborting Jaeger enablement." 18 | exit 1 19 | fi 20 | fi 21 | 22 | "$SNAP/microk8s-enable.wrapper" dns ingress cert-manager 23 | 24 | echo "Waiting for cert-manager to be ready." 25 | while ! $KUBECTL apply -f ${CURRENT_DIR}/cert-tester.yaml > /dev/null 2>&1 26 | do 27 | echo -n "." 28 | sleep 5 29 | done 30 | echo "ready" 31 | $KUBECTL delete -f ${CURRENT_DIR}/cert-tester.yaml > /dev/null 2>&1 || true 32 | 33 | read -ra ARGUMENTS <<< "$1" 34 | MANIFESTS_PATH="${CURRENT_DIR}" 35 | NAMESPACE="default" 36 | 37 | if [ ! -z "${ARGUMENTS[@]}" ] 38 | then 39 | NAMESPACE=${ARGUMENTS[0]} 40 | $KUBECTL create ns "${NAMESPACE}" || true 41 | sed "s/namespace: default/namespace: ${NAMESPACE}/g; \ 42 | s/default\.svc/${NAMESPACE}\.svc/g;\ 43 | s/default\/jaeger-operator-serving-cert/${NAMESPACE}\/jaeger-operator-serving-cert/g" $MANIFESTS_PATH/operator.yaml |\ 44 | $KUBECTL apply -f - 45 | else 46 | $KUBECTL apply -f "${MANIFESTS_PATH}/operator.yaml" 47 | fi 48 | 49 | echo "Waiting for Jaeger Operator to be ready" 50 | $KUBECTL wait pods -n "${NAMESPACE}" -l name=jaeger-operator --for condition=Ready --timeout=90s 51 | $KUBECTL apply -f "${MANIFESTS_PATH}/simplest.yaml" 52 | 53 | echo "Jaeger is enabled, please allow some time before the Jaeger UI is accessible." -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributor Guide 2 | 3 | MicroK8s is open source ([Apache License 2.0](./LICENSE)) and actively seeks any community contributions for code, add-ons, suggestions and documentation. Many of the features currently part of MicroK8s originated in the community, and we are very keen for that to continue. This page details a few notes, workflows and suggestions for how to make contributions most effective and help us all build a better MicroK8s for everyone - please give them a read before working on any contributions. 4 | 5 | ## Licensing 6 | 7 | MicroK8s has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may make to this project. Please familiarise yourself with the terms of the license. 8 | 9 | Additionally, MicroK8s uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to use your contributions. In effect, you’re giving us a licence, but you still own the copyright — so you retain the right to modify your code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before making any contributions. 10 | 11 | ## Code of conduct 12 | 13 | MicroK8s has adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct). 14 | 15 | ## Contributing code 16 | 17 | For contributing code please look at [HACKING.md](./HACKING.md). 18 | 19 | ## Documentation 20 | 21 | Docs for MicroK8s are published online at [https://microk8s.io/docs](https://microk8s.io/docs). You can make suggestions and edit the pages themselves by joining the Kubernetes discourse at [discuss.kubernetes.io](https://discuss.kubernetes.io/t/introduction-to-microk8s/11243) or follow the link at the bottom of any of the pages published at [https://microk8s.io/docs](https://microk8s.io/docs) There is a documentation page which describes how to write and edit docs, [published as part of the documentation](https://microk8s.io/docs/docs). 22 | -------------------------------------------------------------------------------- /addons/multus/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source "${SNAP}/actions/common/utils.sh" 6 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 7 | source $CURRENT_DIR/../common/utils.sh 8 | 9 | KUBECTL="${SNAP}/kubectl --kubeconfig=${SNAP_DATA}/credentials/client.config" 10 | 11 | if [ -f "${SNAP_DATA}/args/cni-network/00-multus.conf" ] 12 | then 13 | echo "Disabling Multus" 14 | echo "Checking if multus daemonset is running" 15 | DS_CHECK=$(${KUBECTL} get pods -n kube-system --selector=app=multus -o name) 16 | if [ -n "${DS_CHECK}" ]; then 17 | echo "Found multus deamonset, removing it and cleaning up OS files" 18 | declare -A map 19 | map[\$SNAP_DATA]="$(dirname "${SNAP_DATA}")/current" 20 | # Temporary work-around to stay backward-compatible with the previous daemonset name 21 | map[\$DS_NAME]="$(microk8s.kubectl get daemonset -n kube-system --selector=app=multus -o name | sed 's:daemonset.apps/::')" 22 | use_addon_manifest multus/multus delete "$(declare -p map)" 23 | run_with_sudo rm -f "${SNAP_DATA}/args/cni-network/00-multus.conf" 24 | run_with_sudo rm -rf "${SNAP_DATA}/args/cni-network/multus.d" 25 | run_with_sudo rm -f "${SNAP_DATA}/opt/cni/bin/multus" 26 | 27 | echo -n "Waiting for multus daemonset to terminate." 28 | while [ -n "${DS_CHECK}" ]; do 29 | sleep 3 30 | DS_CHECK=$(${KUBECTL} get pods -n kube-system --selector=app=multus -o name) 31 | echo -n "." 32 | done 33 | echo 34 | echo "Multus daemonset terminated." 35 | echo "Initiating removal on all other nodes." 36 | nodes_addon multus disable 37 | else 38 | echo "Daemonset not found so we are likely a node, cleaning up OS files" 39 | run_with_sudo rm -f "${SNAP_DATA}/args/cni-network/00-multus.conf" 40 | run_with_sudo rm -rf "${SNAP_DATA}/args/cni-network/multus.d" 41 | run_with_sudo rm -f "${SNAP_DATA}/opt/cni/bin/multus" 42 | fi 43 | echo "Multus is disabled" 44 | else 45 | echo "Multus is not installed." 46 | fi 47 | -------------------------------------------------------------------------------- /addons/microcks/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | HELM="${SNAP}/microk8s-helm3.wrapper" 8 | MICROK8S_CONFIG="${SNAP}/microk8s-config.wrapper" 9 | 10 | NAMESPACE="microcks" 11 | MICROCKS_CHART_VERSION="1.8.0" 12 | 13 | # Retrieve microk8s cluster IP to nip.io setup 14 | IP=`"${MICROK8S_CONFIG}" | grep "server: https:" | sed "s/\/\// /" | awk -F ':' '{print $3}' | sed "s/ //"` 15 | 16 | # Ensure all prerequisites services are enabled 17 | do_prerequisites() { 18 | # Enable dns service 19 | "$SNAP/microk8s-enable.wrapper" dns 20 | # Enable ingress service 21 | "$SNAP/microk8s-enable.wrapper" ingress 22 | # Enable hostpath-storage service (storage is deprecated) 23 | "$SNAP/microk8s-enable.wrapper" hostpath-storage 24 | # Allow some time for the apiserver to start and be responsive 25 | ${SNAP}/microk8s-status.wrapper --wait-ready --timeout 30 >/dev/null 26 | } 27 | 28 | get_microcks () { 29 | echo "Enabling Microcks" 30 | 31 | # Always update the chart repo first (do not miss minor versions...), add repo if it does not exists 32 | "${HELM}" repo update microcks || "${HELM}" repo add microcks https://microcks.io/helm 33 | # Install the helm chart 34 | "${HELM}" upgrade --install --create-namespace microcks microcks/microcks -n "${NAMESPACE}" --version "${MICROCKS_CHART_VERSION}" --set microcks.url=microcks."${IP}".nip.io --set keycloak.url=keycloak."${IP}".nip.io 35 | 36 | # Final comment and information 37 | echo "" 38 | echo "Microcks ${MICROCKS_CHART_VERSION} has been installed and enabled." 39 | echo "Happy API(s) Mocking and Testing -> https://microcks."${IP}".nip.io" 40 | echo "" 41 | echo "Getting started -> https://microcks.io/documentation/getting-started/" 42 | echo "" 43 | echo "Useful commands:" 44 | echo "" 45 | echo "- Retrieve all Microcks' URLs of the created ingress:" 46 | echo "" 47 | echo "$ microk8s kubectl get ingress -n microcks" 48 | echo "" 49 | } 50 | 51 | do_prerequisites 52 | get_microcks 53 | -------------------------------------------------------------------------------- /addons/openfaas/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | OF_NAMESPACE="openfaas" 8 | FN_NAMESPACE="openfaas-fn" 9 | 10 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 11 | HELM="$SNAP/microk8s-helm3.wrapper" 12 | 13 | "$SNAP/microk8s-enable.wrapper" dns 14 | "$SNAP/microk8s-enable.wrapper" helm3 15 | 16 | echo "" 17 | echo "Enabling OpenFaaS" 18 | 19 | OPERATOR=false 20 | AUTH=true 21 | VALUES="" 22 | 23 | for i in "$@" 24 | do 25 | case $i in 26 | --operator) 27 | OPERATOR=true 28 | shift # past argument 29 | ;; 30 | --no-auth) 31 | AUTH=false 32 | shift # past argument 33 | ;; 34 | -f=*|--values=*) 35 | VALUES="${i#*=}" 36 | shift # past argument=value 37 | ;; 38 | *) 39 | # unknown option 40 | ;; 41 | esac 42 | done 43 | 44 | echo "Operator: $OPERATOR" 45 | echo "Basic Auth enabled: $AUTH" 46 | if [ -n "$VALUES" ]; then 47 | echo "Overrides file: $VALUES" 48 | fi 49 | 50 | 51 | # make sure the "openfaas" and "openfaas-fn" namespaces exists 52 | $KUBECTL create namespace "$OF_NAMESPACE" > /dev/null 2>&1 || true 53 | $KUBECTL create namespace "$FN_NAMESPACE" > /dev/null 2>&1 || true 54 | 55 | $HELM repo add openfaas https://openfaas.github.io/faas-netes/ 56 | 57 | if [ -z "$VALUES" ] 58 | then 59 | $HELM upgrade openfaas --install openfaas/openfaas \ 60 | --namespace openfaas \ 61 | --set functionNamespace=openfaas-fn \ 62 | --set createCRDs=true \ 63 | --set operator.create=$OPERATOR \ 64 | --set basic_auth=$AUTH \ 65 | --set generateBasicAuth=$AUTH 66 | else 67 | $HELM upgrade openfaas --install openfaas/openfaas \ 68 | --namespace openfaas \ 69 | --set functionNamespace=openfaas-fn \ 70 | --set createCRDs=true \ 71 | --set operator.create=$OPERATOR \ 72 | --set basic_auth=$AUTH \ 73 | --set generateBasicAuth=$AUTH \ 74 | -f "$VALUES" 75 | fi 76 | 77 | # print a final help message 78 | echo "OpenFaaS has been installed" 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # microk8s-addons 2 | 3 | This repository contains the community addons that can be enabled along with MicroK8s. 4 | 5 | ## Directory structure 6 | 7 | ``` 8 | addons.yaml Authoritative list of addons included in this repository. See format below. 9 | addons/ 10 | / 11 | enable Executable script that runs when enabling the addon 12 | disable Executable script that runs when disabling the addon 13 | / 14 | enable 15 | disable 16 | ... 17 | ``` 18 | 19 | ## `addons.yaml` format 20 | 21 | ```yaml 22 | microk8s-addons: 23 | # A short description for the addons in this repository. 24 | description: Core addons of the MicroK8s project 25 | 26 | # Revision number. Increment when there are important changes. 27 | revision: 1 28 | 29 | # List of addons. 30 | addons: 31 | - name: addon1 32 | description: My awesome addon 33 | 34 | # Addon version. 35 | version: "1.0.0" 36 | 37 | # Test to check that addon has been enabled. This may be: 38 | # - A path to a file. For example, "${SNAP_DATA}/var/lock/myaddon.enabled" 39 | # - A Kubernetes resource, in the form `resourceType/resourceName`, just 40 | # as it would appear in the output of the `kubectl get all -A` command. 41 | # For example, "deployment.apps/registry". 42 | # 43 | # The addon is assumed to be enabled when the specified file or Kubernetes 44 | # resource exists. 45 | check_status: "deployment.apps/addon1" 46 | 47 | # List of architectures supported by this addon. 48 | # MicroK8s supports "amd64", "arm64" and "s390x". 49 | supported_architectures: 50 | - amd64 51 | - arm64 52 | - s390x 53 | 54 | - name: addon2 55 | description: My second awesome addon, supported for amd64 only 56 | version: "1.0.0" 57 | check_status: "pod/addon2" 58 | supported_architectures: 59 | - amd64 60 | ``` 61 | 62 | ## Adding new addons 63 | 64 | See [`HACKING.md`](./HACKING.md) for instructions on how to develop custom MicroK8s addons. 65 | -------------------------------------------------------------------------------- /addons/trivy/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | NAMESPACE_TRIVY="trivy-system" 8 | 9 | TRIVY_HELM_VERSION="0.15.1" 10 | 11 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 12 | HELM="$SNAP/microk8s-helm3.wrapper" 13 | 14 | do_prerequisites() { 15 | "$SNAP/microk8s-enable.wrapper" helm3 16 | # enable dns service 17 | "$SNAP/microk8s-enable.wrapper" dns 18 | # Allow some time for the apiserver to start 19 | sleep 5 20 | ${SNAP}/microk8s-status.wrapper --wait-ready --timeout 30 >/dev/null 21 | } 22 | 23 | VALUES="" 24 | 25 | get_trivy() { 26 | # get the options 27 | while getopts ":f:h:" flag; do 28 | case "${flag}" in 29 | f) VALUES=${OPTARG} 30 | ;; 31 | *) echo "Usage: microk8s enable trivy" 32 | echo "" 33 | echo "With overwriting default values: microk8s enable trivy -f values.yaml" 34 | echo "" 35 | echo "See https://github.com/aquasecurity/trivy-operator/tree/main/deploy/helm for more information about the values" 36 | exit 0 37 | ;; 38 | esac 39 | done 40 | 41 | echo "Installing Trivy" 42 | 43 | if [ -n "$VALUES" ]; then 44 | echo "Using values file: $VALUES" 45 | fi 46 | 47 | # make sure the "trivy-system" namespace exists 48 | # $KUBECTL create namespace "$NAMESPACE_TRIVY" > /dev/null 2>&1 || true 49 | 50 | # add the trivy operator chart repository 51 | $HELM repo add aqua https://aquasecurity.github.io/helm-charts/ 52 | 53 | # install the helm chart 54 | if [ -z "$VALUES" ] 55 | then 56 | $HELM upgrade -i trivy-operator aqua/trivy-operator \ 57 | --namespace "$NAMESPACE_TRIVY" \ 58 | --create-namespace \ 59 | --set="trivy.ignoreUnfixed=true" \ 60 | --version $TRIVY_HELM_VERSION 61 | else 62 | $HELM upgrade -i trivy-operator aqua/trivy-operator \ 63 | --namespace "$NAMESPACE_TRIVY" \ 64 | --create-namespace \ 65 | --set="trivy.ignoreUnfixed=true" \ 66 | --version $TRIVY_HELM_VERSION \ 67 | -f $VALUES 68 | fi 69 | 70 | echo "Trivy is installed" 71 | } 72 | 73 | do_prerequisites 74 | get_trivy 75 | -------------------------------------------------------------------------------- /tests/test_knative.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | kubectl, 7 | microk8s_disable, 8 | microk8s_enable, 9 | wait_for_installation, 10 | wait_for_namespace_termination, 11 | wait_for_pod_state, 12 | ) 13 | 14 | 15 | class TestKnative(object): 16 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 17 | @pytest.mark.skipif( 18 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 19 | reason="Skipping knative tests as we are under time pressure", 20 | ) 21 | @pytest.mark.skip(reason="Due to https://github.com/canonical/microk8s/issues/3597") 22 | def test_knative(self): 23 | """ 24 | Test knative 25 | """ 26 | 27 | print("Enabling Knative") 28 | microk8s_enable("knative") 29 | print("Validating Knative") 30 | self.validate_knative() 31 | print("Disabling Knative") 32 | microk8s_disable("knative") 33 | wait_for_namespace_termination("knative-serving", timeout_insec=600) 34 | 35 | def validate_knative(self): 36 | """ 37 | Validate Knative by deploying the helloworld-go app supports both amd64 & arm64 38 | """ 39 | 40 | wait_for_installation() 41 | knative_services = [ 42 | "activator", 43 | "autoscaler", 44 | "controller", 45 | "domain-mapping", 46 | "autoscaler-hpa", 47 | "domainmapping-webhook", 48 | "webhook", 49 | "net-kourier-controller", 50 | "3scale-kourier-gateway", 51 | ] 52 | for service in knative_services: 53 | wait_for_pod_state( 54 | "", "knative-serving", "running", label="app={}".format(service) 55 | ) 56 | 57 | here = os.path.dirname(os.path.abspath(__file__)) 58 | manifest = os.path.join(here, "templates", "knative-helloworld.yaml") 59 | kubectl("apply -f {}".format(manifest)) 60 | wait_for_pod_state( 61 | "", "default", "running", label="serving.knative.dev/service=helloworld-go" 62 | ) 63 | kubectl("delete -f {}".format(manifest)) 64 | -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Configuration for probot-stale - https://github.com/probot/stale 2 | 3 | # Number of days of inactivity before an Issue or Pull Request becomes stale 4 | daysUntilStale: 330 5 | 6 | # Number of days of inactivity before an Issue or Pull Request with the stale label is closed. 7 | # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. 8 | daysUntilClose: 30 9 | 10 | # Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) 11 | onlyLabels: [] 12 | 13 | # Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable 14 | exemptLabels: 15 | - pinned 16 | - security 17 | - "[Status] Maybe Later" 18 | 19 | # Set to true to ignore issues in a project (defaults to false) 20 | exemptProjects: false 21 | 22 | # Set to true to ignore issues in a milestone (defaults to false) 23 | exemptMilestones: false 24 | 25 | # Set to true to ignore issues with an assignee (defaults to false) 26 | exemptAssignees: false 27 | 28 | # Label to use when marking as stale 29 | staleLabel: inactive 30 | 31 | # Comment to post when marking as stale. Set to `false` to disable 32 | markComment: > 33 | This issue has been automatically marked as stale because it has not had 34 | recent activity. It will be closed if no further activity occurs. Thank you 35 | for your contributions. 36 | 37 | # Comment to post when removing the stale label. 38 | # unmarkComment: > 39 | # Your comment here. 40 | 41 | # Comment to post when closing a stale Issue or Pull Request. 42 | # closeComment: > 43 | # Your comment here. 44 | 45 | # Limit the number of actions per hour, from 1-30. Default is 30 46 | limitPerRun: 30 47 | # Limit to only `issues` or `pulls` 48 | # only: issues 49 | 50 | # Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': 51 | # pulls: 52 | # daysUntilStale: 30 53 | # markComment: > 54 | # This pull request has been automatically marked as stale because it has not had 55 | # recent activity. It will be closed if no further activity occurs. Thank you 56 | # for your contributions. 57 | 58 | # issues: 59 | # exemptLabels: 60 | # - confirmed 61 | -------------------------------------------------------------------------------- /addons/osm-edge/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 7 | 8 | read -ra ARGUMENTS <<< "$@" 9 | 10 | ARCH=$(arch) 11 | 12 | # check if osm-edge cli is already in the system. Download if it doesn't exist. 13 | if [ ! -f "${SNAP_DATA}/bin/osm" ]; then 14 | OSM_EDGE_VERSION="${OSM_EDGE_VERSION:-v1.1.2}" 15 | echo "Fetching osm-edge version $OSM_EDGE_VERSION." 16 | 17 | run_with_sudo mkdir -p "${SNAP_DATA}/tmp/osm-edge" 18 | (cd "${SNAP_DATA}/tmp/osm-edge" 19 | fetch_as https://github.com/flomesh-io/osm-edge/releases/download/${OSM_EDGE_VERSION}/osm-edge-${OSM_EDGE_VERSION}-linux-${ARCH}.tar.gz "$SNAP_DATA/tmp/osm-edge/osm-edge.tar.gz" 20 | run_with_sudo gzip -q -d "$SNAP_DATA/tmp/osm-edge/osm-edge.tar.gz" 21 | run_with_sudo tar --no-overwrite-dir --no-same-owner -mxvf "$SNAP_DATA/tmp/osm-edge/osm-edge.tar") 22 | run_with_sudo mkdir -p "$SNAP_DATA/bin/" 23 | run_with_sudo mv "$SNAP_DATA/tmp/osm-edge/linux-${ARCH}/osm" "$SNAP_DATA/bin/" 24 | run_with_sudo chmod uo+x "$SNAP_DATA/bin/osm" 25 | 26 | run_with_sudo rm -rf "$SNAP_DATA/tmp/osm-edge" 27 | fi 28 | 29 | echo "Enabling osm-edge" 30 | # enable dns service 31 | "$SNAP/microk8s-enable.wrapper" dns 32 | # Allow some time for the apiserver to start 33 | ${SNAP}/microk8s-status.wrapper --wait-ready --timeout 30 >/dev/null 34 | 35 | OSM_MESH_NAME=${OSM_MESH_NAME:-"osm"} 36 | OSM_MESH_NS=${OSM_NAMESPACE:-"osm-system"} 37 | 38 | if [ ! -z "$#" ] 39 | then 40 | echo "Installing osm-edge" 41 | KUBECONFIG=$SNAP_DATA/credentials/client.config $SNAP_DATA/bin/osm install ${ARGUMENTS[@]} 42 | else 43 | echo "Installing osm-edge with default settings" 44 | KUBECONFIG=$SNAP_DATA/credentials/client.config $SNAP_DATA/bin/osm install --mesh-name "$OSM_MESH_NAME" \ 45 | --osm-namespace "$OSM_MESH_NS" \ 46 | --set=osm.enablePermissiveTrafficPolicy=true 47 | fi 48 | 49 | # add custom command 50 | 51 | if [ ! -f "${SNAP_COMMON}/plugins/osm" ]; then 52 | run_with_sudo mkdir -p "${SNAP_COMMON}/plugins" 53 | run_with_sudo cp "$CURRENT_DIR/osm.wrapper" "${SNAP_COMMON}/plugins/osm" 54 | run_with_sudo chmod uo+x "${SNAP_COMMON}/plugins/osm" 55 | fi 56 | 57 | echo "osm-edge is enabled" 58 | -------------------------------------------------------------------------------- /tests/test_openebs.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | kubectl, 7 | microk8s_disable, 8 | microk8s_enable, 9 | wait_for_installation, 10 | wait_for_pod_state, 11 | ) 12 | from subprocess import CalledProcessError, check_output 13 | 14 | 15 | class TestOpenebs(object): 16 | @pytest.mark.skipif( 17 | platform.machine() == "s390x", reason="OpenEBS is not available on s390x" 18 | ) 19 | @pytest.mark.skipif( 20 | os.environ.get("UNDER_TIME_PRESSURE") == None, 21 | reason="Skipping test, expected to be tested when under time pressure", 22 | ) 23 | def test_openebs(self): 24 | """ 25 | Sets up and validates openebs. 26 | """ 27 | print("Enabling OpenEBS") 28 | try: 29 | check_output("systemctl is-enabled iscsid".split()).strip().decode("utf8") 30 | microk8s_enable("openebs") 31 | print("Validating OpenEBS") 32 | self.validate_openebs() 33 | print("Disabling OpenEBS") 34 | microk8s_disable("openebs:force") 35 | except CalledProcessError: 36 | print("Nothing to do, since iscsid is not available") 37 | return 38 | 39 | def validate_openebs(self): 40 | """ 41 | Validate OpenEBS 42 | """ 43 | wait_for_installation() 44 | wait_for_pod_state( 45 | "", 46 | "openebs", 47 | "running", 48 | label="openebs.io/component-name=ndm", 49 | timeout_insec=900, 50 | ) 51 | print("OpenEBS is up and running.") 52 | here = os.path.dirname(os.path.abspath(__file__)) 53 | manifest = os.path.join(here, "templates", "openebs-test.yaml") 54 | kubectl("apply -f {}".format(manifest)) 55 | wait_for_pod_state( 56 | "", 57 | "default", 58 | "running", 59 | label="app=openebs-test-busybox", 60 | timeout_insec=900, 61 | ) 62 | output = kubectl( 63 | "exec openebs-test-busybox -- ls /", timeout_insec=900, err_out="no" 64 | ) 65 | assert "my-data" in output 66 | kubectl("delete -f {}".format(manifest)) 67 | -------------------------------------------------------------------------------- /tests/test_inaccel.py: -------------------------------------------------------------------------------- 1 | import time 2 | import pytest 3 | import os 4 | import platform 5 | 6 | from utils import ( 7 | kubectl, 8 | kubectl_get, 9 | microk8s_disable, 10 | microk8s_enable, 11 | wait_for_pod_state, 12 | ) 13 | from subprocess import CalledProcessError 14 | 15 | 16 | class TestInaccel(object): 17 | @pytest.mark.skipif( 18 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 19 | reason="Skipping FPGA tests as we are under time pressure", 20 | ) 21 | @pytest.mark.skipif( 22 | os.environ.get("TEST_FPGA") != "True", 23 | reason="Skipping FPGA because TEST_FPGA is not set", 24 | ) 25 | @pytest.mark.skipif( 26 | platform.machine() != "x86_64", 27 | reason="FPGA tests are only relevant in x86 architectures", 28 | ) 29 | def test_inaccel(self): 30 | """ 31 | Sets up inaccel. 32 | 33 | """ 34 | try: 35 | print("Enabling inaccel") 36 | microk8s_enable("inaccel") 37 | except CalledProcessError: 38 | # Failed to enable addon. Skip the test. 39 | print("Could not enable inaccel support") 40 | return 41 | self.validate_inaccel() 42 | print("Disable inaccel") 43 | microk8s_disable("inaccel") 44 | 45 | def validate_inaccel(self): 46 | """ 47 | Validate inaccel by trying a vadd. 48 | """ 49 | if platform.machine() != "x86_64": 50 | print("FPGA tests are only relevant in x86 architectures") 51 | return 52 | 53 | wait_for_pod_state( 54 | "", "kube-system", "running", label="app.kubernetes.io/name=fpga-operator" 55 | ) 56 | here = os.path.dirname(os.path.abspath(__file__)) 57 | manifest = os.path.join(here, "templates", "inaccel.yaml") 58 | 59 | get_pod = kubectl_get("po") 60 | if "inaccel-vadd" in str(get_pod): 61 | # Cleanup 62 | kubectl("delete -f {}".format(manifest)) 63 | time.sleep(10) 64 | 65 | kubectl("apply -f {}".format(manifest)) 66 | wait_for_pod_state("inaccel-vadd", "default", "terminated") 67 | result = kubectl("logs pod/inaccel-vadd") 68 | assert "PASSED" in result 69 | -------------------------------------------------------------------------------- /tests/test_linkerd.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | 5 | from utils import ( 6 | kubectl, 7 | microk8s_disable, 8 | microk8s_enable, 9 | run_until_success, 10 | wait_for_installation, 11 | wait_for_pod_state, 12 | ) 13 | 14 | 15 | class TestLinkerd(object): 16 | @pytest.mark.skipif( 17 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 18 | reason="Skipping Linkerd tests as we are under time pressure", 19 | ) 20 | @pytest.mark.skipif( 21 | platform.machine() != "x86_64", 22 | reason="Linkerd test is available for amd64", 23 | ) 24 | @pytest.mark.skipif(platform.machine() == "s390x", reason="Not available on s390x") 25 | def test_linkerd(self): 26 | """ 27 | Sets up and validate linkerd 28 | 29 | """ 30 | print("Enabling Linkerd") 31 | microk8s_enable("linkerd") 32 | print("Validating Linkerd") 33 | self.validate_linkerd() 34 | print("Disabling Linkerd") 35 | microk8s_disable("linkerd") 36 | 37 | def validate_linkerd(self): 38 | """ 39 | Validate Linkerd by deploying emojivoto. 40 | """ 41 | wait_for_installation() 42 | wait_for_pod_state( 43 | "", 44 | "linkerd", 45 | "running", 46 | label="linkerd.io/control-plane-component=destination", 47 | timeout_insec=300, 48 | ) 49 | print("Linkerd controller up and running.") 50 | wait_for_pod_state( 51 | "", 52 | "linkerd", 53 | "running", 54 | label="linkerd.io/control-plane-component=proxy-injector", 55 | timeout_insec=300, 56 | ) 57 | print("Linkerd proxy injector up and running.") 58 | here = os.path.dirname(os.path.abspath(__file__)) 59 | manifest = os.path.join(here, "templates", "emojivoto.yaml") 60 | kubectl("apply -f {}".format(manifest)) 61 | wait_for_pod_state( 62 | "", "emojivoto", "running", label="app=emoji-svc", timeout_insec=600 63 | ) 64 | print("Verify linkerd mesh is available in emojivoto pods") 65 | cmd = "/snap/bin/microk8s.linkerd viz list -n emojivoto" 66 | output = run_until_success(cmd, timeout_insec=900, err_out="no") 67 | assert "emojivoto" in output 68 | kubectl("delete -f {}".format(manifest)) 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /tests/test_monitoring.py: -------------------------------------------------------------------------------- 1 | import time 2 | import pytest 3 | import os 4 | import platform 5 | import subprocess 6 | 7 | from utils import ( 8 | kubectl, 9 | microk8s_disable, 10 | microk8s_enable, 11 | wait_for_pod_state, 12 | ) 13 | 14 | 15 | class TestMonitoring(object): 16 | @pytest.mark.skipif( 17 | platform.machine() != "x86_64", 18 | reason=( 19 | "Fluentd, prometheus, jaeger tests are only relevant in x86 architectures" 20 | ), 21 | ) 22 | @pytest.mark.skipif( 23 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 24 | reason=( 25 | "Skipping jaeger, prometheus and fluentd tests as we are under time" 26 | " pressure" 27 | ), 28 | ) 29 | def test_monitoring_addons(self): 30 | """ 31 | Test jaeger, prometheus and fluentd. 32 | 33 | """ 34 | print("Enabling fluentd") 35 | microk8s_enable("fluentd") 36 | print("Enabling jaeger") 37 | microk8s_enable("jaeger") 38 | print("Validating the Jaeger operator") 39 | self.validate_jaeger() 40 | print("Validating the Fluentd") 41 | self.validate_fluentd() 42 | print("Disabling jaeger") 43 | microk8s_disable("jaeger") 44 | print("Disabling fluentd") 45 | microk8s_disable("fluentd") 46 | 47 | def validate_fluentd(self): 48 | """ 49 | Validate fluentd 50 | """ 51 | if platform.machine() != "x86_64": 52 | print("Fluentd tests are only relevant in x86 architectures") 53 | return 54 | 55 | wait_for_pod_state("elasticsearch-logging-0", "kube-system", "running") 56 | wait_for_pod_state("", "kube-system", "running", label="k8s-app=fluentd-es") 57 | wait_for_pod_state("", "kube-system", "running", label="k8s-app=kibana-logging") 58 | 59 | def validate_jaeger(self): 60 | """ 61 | Validate the jaeger operator 62 | """ 63 | if platform.machine() != "x86_64": 64 | print("Jaeger tests are only relevant in x86 architectures") 65 | return 66 | 67 | wait_for_pod_state("", "default", "running", label="name=jaeger-operator") 68 | attempt = 30 69 | while attempt > 0: 70 | try: 71 | output = kubectl("get ingress") 72 | if "simplest-query" in output: 73 | break 74 | except subprocess.CalledProcessError: 75 | pass 76 | time.sleep(2) 77 | attempt -= 1 78 | 79 | assert attempt > 0 80 | -------------------------------------------------------------------------------- /addons/sriov-device-plugin/sriovdp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: sriov-device-plugin 6 | namespace: kube-system 7 | --- 8 | apiVersion: apps/v1 9 | kind: DaemonSet 10 | metadata: 11 | name: kube-sriov-device-plugin 12 | namespace: kube-system 13 | labels: 14 | tier: node 15 | app: sriovdp 16 | spec: 17 | selector: 18 | matchLabels: 19 | name: sriov-device-plugin 20 | template: 21 | metadata: 22 | labels: 23 | name: sriov-device-plugin 24 | tier: node 25 | app: sriovdp 26 | spec: 27 | hostNetwork: true 28 | nodeSelector: 29 | kubernetes.io/arch: amd64 30 | tolerations: 31 | - key: node-role.kubernetes.io/master 32 | operator: Exists 33 | effect: NoSchedule 34 | serviceAccountName: sriov-device-plugin 35 | containers: 36 | - name: kube-sriovdp 37 | image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.7.0 38 | imagePullPolicy: IfNotPresent 39 | args: 40 | - --log-dir=sriovdp 41 | - --log-level=10 42 | securityContext: 43 | privileged: true 44 | resources: 45 | requests: 46 | cpu: "250m" 47 | memory: "40Mi" 48 | limits: 49 | cpu: 1 50 | memory: "200Mi" 51 | volumeMounts: 52 | - name: devicesock 53 | mountPath: /var/lib/kubelet/device-plugins 54 | readOnly: false 55 | - name: plugins-registry 56 | mountPath: /var/lib/kubelet/plugins_registry 57 | readOnly: false 58 | - name: log 59 | mountPath: /var/log 60 | - name: config-volume 61 | mountPath: /etc/pcidp 62 | - name: device-info 63 | mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp 64 | volumes: 65 | - name: devicesock 66 | hostPath: 67 | path: /var/lib/kubelet/device-plugins 68 | - name: plugins-registry 69 | hostPath: 70 | path: /var/lib/kubelet/plugins_registry 71 | - name: log 72 | hostPath: 73 | path: /var/log 74 | - name: device-info 75 | hostPath: 76 | path: /var/run/k8s.cni.cncf.io/devinfo/dp 77 | type: DirectoryOrCreate 78 | - name: config-volume 79 | configMap: 80 | name: sriovdp-config 81 | items: 82 | - key: config.json 83 | path: config.json 84 | -------------------------------------------------------------------------------- /tests/test_amd.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | import subprocess 5 | import yaml 6 | from pathlib import Path 7 | 8 | from utils import ( 9 | microk8s_enable, 10 | microk8s_disable, 11 | kubectl, 12 | ) 13 | from subprocess import CalledProcessError 14 | 15 | TEMPLATES = Path(__file__).absolute().parent / "templates" 16 | 17 | 18 | class TestAMD(object): 19 | @pytest.mark.skipif( 20 | os.environ.get("STRICT") == "yes", 21 | reason="Skipping AMD tests in strict confinement as they are expected to fail", 22 | ) 23 | @pytest.mark.skipif( 24 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 25 | reason="Skipping AMD tests as we are under time pressure", 26 | ) 27 | @pytest.mark.skipif( 28 | platform.machine() != "x86_64", 29 | reason="AMD tests are only relevant in x86 architectures", 30 | ) 31 | def test_amd(self): 32 | """ 33 | Sets up amd gpu operator in a gpu capable system. Skip otherwise. 34 | 35 | """ 36 | here = os.path.dirname(os.path.abspath(__file__)) 37 | values_template = os.path.join(here, "templates", "amd-values.yaml") 38 | try: 39 | print("Enabling amd") 40 | microk8s_enable("amd --gpu-operator-values {}".format(values_template)) 41 | print("Enabled") 42 | except CalledProcessError: 43 | print("Could not enable amd addon") 44 | return 45 | self.validate_amd() 46 | try: 47 | print("Disabling amd") 48 | microk8s_disable("amd") 49 | print("Disabled") 50 | except CalledProcessError: 51 | print("Could not disable amd addon") 52 | return 53 | 54 | def validate_amd(self): 55 | """ 56 | Validate AMD by checking deviceConfig. 57 | """ 58 | 59 | if platform.machine() != "x86_64": 60 | print("GPU tests are only relevant on x86 architectures") 61 | return 62 | 63 | print("Checking deviceconfig") 64 | namespace = "kube-amd-gpu" 65 | device_config_string = kubectl( 66 | f"get deviceconfig default -n {namespace} -o yaml" 67 | ) 68 | device_config_spec = yaml.safe_load(device_config_string)["spec"] 69 | 70 | selector_passed = device_config_spec["selector"]["unit-test-check"] == "true" 71 | test_runner_passed = device_config_spec["testRunner"]["enable"] 72 | metrics_exporter_passed = not device_config_spec["metricsExporter"]["enable"] 73 | 74 | assert selector_passed and test_runner_passed and metrics_exporter_passed 75 | print("Check passed") 76 | -------------------------------------------------------------------------------- /tests/test_nfs.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import platform 4 | import time 5 | 6 | from utils import ( 7 | kubectl, 8 | microk8s_disable, 9 | microk8s_enable, 10 | is_container, 11 | wait_for_pod_state, 12 | ) 13 | 14 | 15 | class TestNfs(object): 16 | @pytest.mark.skipif( 17 | os.environ.get("STRICT") == "yes", 18 | reason="Skipping nfs tests in strict confinement as they are expected to fail", 19 | ) 20 | @pytest.mark.skipif( 21 | platform.machine() != "x86_64", 22 | reason="NFS tests are only relevant in x86 architectures", 23 | ) 24 | @pytest.mark.skipif( 25 | os.environ.get("UNDER_TIME_PRESSURE") == "True", 26 | reason="Skipping NFS tests as we are under time pressure", 27 | ) 28 | # NFS addon requires elevated privileges, which fails in lxc due to seccomp. 29 | @pytest.mark.skipif(is_container(), reason="NFS tests are skipped in containers") 30 | def test_storage_nfs(self): 31 | """ 32 | Sets up and validates NFS Server Provisioner. 33 | """ 34 | print("Enabling NFS") 35 | microk8s_enable("nfs") 36 | print("Validating NFS") 37 | self.validate_storage_nfs() 38 | print("Disabling NFS") 39 | microk8s_disable("nfs") 40 | 41 | def validate_storage_nfs(self): 42 | """ 43 | Validate NFS Storage by creating two Pods mounting the same PVC. 44 | (optimal test would be on multinode-cluster) 45 | """ 46 | wait_for_pod_state( 47 | "", "nfs-server-provisioner", "running", label="app=nfs-server-provisioner" 48 | ) 49 | 50 | here = os.path.dirname(os.path.abspath(__file__)) 51 | manifest = os.path.join(here, "templates", "pvc-nfs.yaml") 52 | kubectl("apply -f {}".format(manifest)) 53 | wait_for_pod_state("", "default", "running", label="app=busybox-pvc-nfs") 54 | 55 | attempt = 50 56 | while attempt >= 0: 57 | output = kubectl("get pvc -l vol=pvc-nfs") 58 | if "Bound" in output: 59 | break 60 | time.sleep(2) 61 | attempt -= 1 62 | 63 | # Make sure the test pod writes data to the storage 64 | for root, dirs, files in os.walk("/var/snap/microk8s/common/nfs-storage"): 65 | for file in files: 66 | if file == "dates1": 67 | found1 = True 68 | if file == "dates2": 69 | found2 = True 70 | assert found1 71 | assert found2 72 | assert "pvc-nfs" in output 73 | assert "Bound" in output 74 | kubectl("delete -f {}".format(manifest)) 75 | -------------------------------------------------------------------------------- /addons/kata/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import click 4 | import os 5 | import subprocess 6 | import sys 7 | from tempfile import mkstemp 8 | from shutil import move, copymode 9 | from os import fdopen, remove 10 | 11 | 12 | def mark_kata_disabled(): 13 | """ 14 | Mark the kata addon as enabled by removing the kata.enabled lock 15 | """ 16 | try: 17 | snapdata_path = os.environ.get("SNAP_DATA") 18 | lock_fname = "{}/var/lock/kata.enabled".format(snapdata_path) 19 | subprocess.call(['sudo', 'rm', lock_fname]) 20 | except (subprocess.CalledProcessError): 21 | print("Failed to mark the kata addon as disabled." ) 22 | sys.exit(4) 23 | 24 | def delete_runtime_manifest(): 25 | try: 26 | snap_path = os.environ.get("SNAP") 27 | current_path = os.path.dirname(os.path.realpath(__file__)) 28 | manifest = "{}/kata/runtime.yaml".format(current_path) 29 | subprocess.call(["{}/microk8s-kubectl.wrapper".format(snap_path), "delete", "-f", manifest]) 30 | except (subprocess.CalledProcessError): 31 | print("Failed to apply the runtime manifest." ) 32 | sys.exit(5) 33 | 34 | 35 | def restart_containerd(): 36 | try: 37 | print("Restarting containerd") 38 | subprocess.call(['sudo', 'systemctl', 'restart', 'snap.microk8s.daemon-containerd']) 39 | except (subprocess.CalledProcessError): 40 | print("Failed to restart containerd. Please, yry to 'microk8s stop' and 'microk8s start' manually." ) 41 | sys.exit(3) 42 | 43 | 44 | def configure_containerd(): 45 | """ 46 | Configure the containerd PATH by removing the kata runtime binary 47 | """ 48 | snapdata_path = os.environ.get("SNAP_DATA") 49 | containerd_env_file = "{}/args/containerd-env".format(snapdata_path) 50 | #Create temp file 51 | fh, abs_path = mkstemp() 52 | with fdopen(fh,'w') as tmp_file: 53 | with open(containerd_env_file) as conf_file: 54 | for line in conf_file: 55 | if "KATA_PATH=" in line: 56 | line = "KATA_PATH=\n" 57 | tmp_file.write(line) 58 | 59 | copymode(containerd_env_file, abs_path) 60 | remove(containerd_env_file) 61 | move(abs_path, containerd_env_file) 62 | 63 | 64 | @click.command() 65 | def kata(): 66 | """ 67 | Disable the kata runtime. Mark it as disabled, delete the runtimeClassName but do not remove the 68 | kata runtime because we do not know if it is used by any other application. 69 | """ 70 | print("Configuring containerd") 71 | configure_containerd() 72 | restart_containerd() 73 | print("Deleting kata runtime manifest") 74 | delete_runtime_manifest() 75 | mark_kata_disabled() 76 | 77 | 78 | if __name__ == "__main__": 79 | kata(prog_name="microk8s disable kata") 80 | -------------------------------------------------------------------------------- /addons/ngrok/enable: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | HELM="${SNAP}/microk8s-helm.wrapper" 4 | VERSION=0.19.0 5 | 6 | POSITIONAL_ARGS=() 7 | 8 | while [[ $# -gt 0 ]]; do 9 | case $1 in 10 | --namespace) 11 | NAMESPACE="$2" 12 | shift # past argument 13 | shift # past value 14 | ;; 15 | --authtoken) 16 | NGROK_AUTHTOKEN="$2" 17 | shift # past argument 18 | shift # past value 19 | ;; 20 | --api-key) 21 | NGROK_API_KEY="$2" 22 | shift # past argument 23 | shift # past value 24 | ;; 25 | --secret-name) 26 | SECRET_NAME="$2" 27 | shift # past argument 28 | shift # past value 29 | ;; 30 | -*|--*) 31 | echo "Unknown option $1" 32 | exit 1 33 | ;; 34 | *) 35 | POSITIONAL_ARGS+=("$1") # save positional arg 36 | shift # past argument 37 | ;; 38 | esac 39 | done 40 | 41 | set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters 42 | 43 | if [ -z "$NAMESPACE" ]; then 44 | echo "Namespace (--namespace) was not specified. Defaulting to ngrok-operator namespace." 45 | NAMESPACE="ngrok-operator" 46 | fi 47 | 48 | if [ -z "$SECRET_NAME" ]; then 49 | if [ -z "$NGROK_AUTHTOKEN" ] || [ -z "$NGROK_API_KEY" ]; then 50 | echo "Either --secret-name or both --authtoken and --api-key must be specified. Please see https://ngrok.com/docs/getting-started/kubernetes/ingress/ for more information." 51 | exit 1 52 | fi 53 | fi 54 | 55 | echo "Enabling ngrok Kubernetes operator in ${NAMESPACE} namespace" 56 | 57 | "${HELM}" repo update ngrok || "${HELM}" repo add ngrok https://charts.ngrok.com 58 | 59 | if [ -n "$SECRET_NAME" ]; then 60 | "${HELM}" upgrade --install --create-namespace ngrok-operator ngrok/ngrok-operator --namespace "${NAMESPACE}" --set credentials.secret.name="${SECRET_NAME}" --version=${VERSION} 61 | else 62 | "${HELM}" upgrade --install --create-namespace ngrok-operator ngrok/ngrok-operator --namespace "${NAMESPACE}" --set credentials.apiKey="${NGROK_API_KEY}" --set credentials.authtoken="${NGROK_AUTHTOKEN}" --version=${VERSION} 63 | fi 64 | 65 | echo "---" 66 | echo "" 67 | echo "The ngrok Kubernetes operator has been successfully installed in the ${NAMESPACE} namespace." 68 | echo "" 69 | 70 | if [ -n "$SECRET_NAME" ]; then 71 | echo "You specified a secret named ${SECRET_NAME}. Please ensure that secret exists in your cluster with the keys 'API_KEY' and 'AUTHTOKEN'." 72 | echo "For help creating the secret, see: https://ngrok.com/docs/getting-started/kubernetes/ingress/" 73 | echo "" 74 | fi 75 | 76 | echo "Now you can create Ingress resources with 'ingressClassName: ngrok' to expose your services through ngrok." 77 | echo "If you have the Gateway API CRDs installed, you can use those with ngrok too. See https://ngrok.com/docs/getting-started/kubernetes/gateway-api/ for examples." 78 | echo "For the getting-started guide, see: https://ngrok.com/docs/getting-started/kubernetes/ingress/" 79 | -------------------------------------------------------------------------------- /addons/falco/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | NAMESPACE_FALCO="falco" 8 | 9 | FALCO_HELM_VERSION="4.5.1" 10 | 11 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 12 | HELM="$SNAP/microk8s-helm3.wrapper" 13 | 14 | do_prerequisites() { 15 | "$SNAP/microk8s-enable.wrapper" helm3 16 | # enable dns service 17 | "$SNAP/microk8s-enable.wrapper" dns 18 | # enable hostpath-storage 19 | "$SNAP/microk8s-enable.wrapper" hostpath-storage 20 | # Allow some time for the apiserver to start 21 | sleep 5 22 | ${SNAP}/microk8s-status.wrapper --wait-ready --timeout 30 >/dev/null 23 | } 24 | 25 | VALUES="" 26 | 27 | get_falco() { 28 | # get the options 29 | while getopts ":f:h:" flag; do 30 | case "${flag}" in 31 | f) VALUES=${OPTARG} 32 | ;; 33 | *) echo "Usage: microk8s enable falco" 34 | echo "" 35 | echo "With overwriting default values: microk8s enable falco -f values.yaml" 36 | echo "" 37 | echo "See https://github.com/falcosecurity/charts/tree/master/falco for more information about the values" 38 | exit 0 39 | ;; 40 | esac 41 | done 42 | 43 | echo "Installing Falco" 44 | 45 | if [ -n "$VALUES" ]; then 46 | echo "Using values file: $VALUES" 47 | fi 48 | 49 | # make sure the "falco" namespace exists 50 | # $KUBECTL create namespace "$NAMESPACE_FALCO" > /dev/null 2>&1 || true 51 | 52 | # add the falcosecurity chart repository 53 | $HELM repo add falcosecurity https://falcosecurity.github.io/charts 54 | 55 | # install the helm chart 56 | if [ -z "$VALUES" ] 57 | then 58 | $HELM upgrade -i falco falcosecurity/falco \ 59 | --namespace "$NAMESPACE_FALCO" \ 60 | --create-namespace \ 61 | --version $FALCO_HELM_VERSION \ 62 | --set driver.kind="modern_ebpf" \ 63 | --set collectors.containerd.socket="/var/snap/microk8s/common/run/containerd.sock" \ 64 | --set falcosidekick.enabled=true \ 65 | --set falcosidekick.replicaCount=1 \ 66 | --set falcosidekick.webui.enabled=true \ 67 | --set falcosidekick.webui.replicaCount=1 68 | else 69 | $HELM upgrade -i falco falcosecurity/falco \ 70 | --namespace "$NAMESPACE_FALCO" \ 71 | --create-namespace \ 72 | --version $FALCO_HELM_VERSION \ 73 | -f $VALUES 74 | fi 75 | 76 | echo "Falco is installed" 77 | echo "The default username/password for the Falcosidekick UI is admin/admin" 78 | echo "The Falcosidekick UI is exposed by the falco-falcosidekick-ui service in the falco Namespace" 79 | echo "This is a ClusterIP Service by default, so you can either access it via a kubectl port-forward," 80 | echo "or you can edit the service to be another type you can reach (e.g. NodePort or LoadBalancer)," 81 | echo "or (if you have enabled an Ingress controller) you can put an Ingress in front of this service." 82 | } 83 | 84 | do_prerequisites 85 | get_falco 86 | -------------------------------------------------------------------------------- /addons/nfs/README.md: -------------------------------------------------------------------------------- 1 | # Microk8s NFS Addon 2 | Addon deploys [nfs-server-provisioner](https://artifacthub.io/packages/helm/kvaps/nfs-server-provisioner) Helm Chart. 3 | 4 | The most of the benefits are manifested on multi-node Microk8s clusters. I.e. Pods running on different Microk8s nodes can share the storage in a RW manner. 5 | **WARNING: Underlying hostPath volume served by the NFS server is mounted to a single Node at the time, not ensuring HA on storage level.** 6 | 7 | 8 | # Usage 9 | 10 | ## Enable Addon 11 | Specify Microk8s node name acting as a storage node. 12 | ``` 13 | microk8s enable nfs -n NODE_NAME 14 | ``` 15 | 16 | Omitting `-n` flag results in random selection of the Microk8s node for NFS (fully ok when nodes have equal storage size). 17 | Use **value** of the label key `kubernetes.io/hostname` as a node name (e.g. `master` or `worker`): 18 | ``` 19 | kubectl get node --show-labels 20 | or 21 | kubectl get node -o yaml | grep 'kubernetes.io/hostname' 22 | ``` 23 | 24 | ## Testing NFS 25 | ``` 26 | /data/manifests-samples ✘ INT 57s ⎈ microk8s-multipass 20:17:45 27 | ❯ cat busybox-daemonset-nfs.yaml 28 | --- 29 | kind: PersistentVolumeClaim 30 | apiVersion: v1 31 | metadata: 32 | name: pvc-nfs 33 | spec: 34 | storageClassName: "nfs" 35 | accessModes: 36 | - ReadWriteMany 37 | resources: 38 | requests: 39 | storage: 1Gi 40 | 41 | --- 42 | apiVersion: apps/v1 43 | kind: DaemonSet 44 | metadata: 45 | name: busybox-pvc-nfs 46 | labels: 47 | app: busybox 48 | spec: 49 | selector: 50 | matchLabels: 51 | name: busybox-pvc-nfs 52 | template: 53 | metadata: 54 | labels: 55 | name: busybox-pvc-nfs 56 | spec: 57 | containers: 58 | - name: busybox-pvc-nfs 59 | image: busybox 60 | imagePullPolicy: Always 61 | command: ["/bin/sh", "-c", "while true; do date >> /mount/$NODE_NAME-$POD_NAME; sleep 2; done"] 62 | env: 63 | - name: NODE_NAME 64 | valueFrom: 65 | fieldRef: 66 | fieldPath: spec.nodeName 67 | - name: POD_NAME 68 | valueFrom: 69 | fieldRef: 70 | fieldPath: metadata.name 71 | volumeMounts: 72 | - name: volume 73 | mountPath: /mount 74 | volumes: 75 | - name: volume 76 | persistentVolumeClaim: 77 | claimName: pvc-nfs 78 | 79 | kubectl apply -f busybox-daemonset-nfs.yaml 80 | ``` 81 | 82 | To check the shared data of Pods running on different nodes: 83 | - Exec to Pods 84 | - `cat /var/snap/microk8s/common/nfs-storage/pvc-XXXXXX/` on a Node hosting NFS Server Provisioner Pod. 85 | 86 | 87 | ## Disable Addon 88 | `microk8s disable nfs` 89 | 90 | ## Further considerations 91 | By default NFS consumes the whole storage of the underlying node regardless of NFS Server Provisioner PV size or client's PVC resource requests. 92 | Implementing LVM or similar on the host level can improve storage management. 93 | 94 | -------------------------------------------------------------------------------- /tests/test_cilium.py: -------------------------------------------------------------------------------- 1 | import time 2 | import pytest 3 | import os 4 | import platform 5 | 6 | from utils import ( 7 | is_container, 8 | kubectl, 9 | microk8s_disable, 10 | microk8s_reset, 11 | run_until_success, 12 | wait_for_installation, 13 | wait_for_pod_state, 14 | ) 15 | from subprocess import PIPE, STDOUT, run 16 | 17 | 18 | class TestCilium(object): 19 | @pytest.mark.skipif( 20 | os.environ.get("STRICT") == "yes", 21 | reason=( 22 | "Skipping cilium tests in strict confinement as they are expected to fail" 23 | ), 24 | ) 25 | @pytest.mark.skipif( 26 | platform.machine() != "x86_64", 27 | reason="Cilium tests are only relevant in x86 architectures", 28 | ) 29 | @pytest.mark.skipif(is_container(), reason="Cilium tests are skipped in containers") 30 | def test_cilium(self): 31 | """ 32 | Sets up and validates Cilium. 33 | """ 34 | print("Enabling Cilium") 35 | run( 36 | "/snap/bin/microk8s.enable cilium".split(), 37 | stdout=PIPE, 38 | input=b"N\n", 39 | stderr=STDOUT, 40 | check=True, 41 | ) 42 | print("Validating Cilium") 43 | self.validate_cilium() 44 | print("Disabling Cilium") 45 | microk8s_disable("cilium") 46 | microk8s_reset() 47 | 48 | def cilium(self, cmd, timeout_insec=300, err_out=None): 49 | """ 50 | Do a cilium 51 | Args: 52 | cmd: left part of cilium command 53 | timeout_insec: timeout for this job 54 | err_out: If command fails and this is the output, return. 55 | 56 | Returns: the cilium response in a string 57 | """ 58 | cmd = "/snap/bin/microk8s.cilium " + cmd 59 | return run_until_success(cmd, timeout_insec, err_out) 60 | 61 | def validate_cilium(self): 62 | """ 63 | Validate cilium by deploying the bookinfo app. 64 | """ 65 | if platform.machine() != "x86_64": 66 | print("Cilium tests are only relevant in x86 architectures") 67 | return 68 | 69 | wait_for_installation() 70 | wait_for_pod_state("", "kube-system", "running", label="k8s-app=cilium") 71 | 72 | here = os.path.dirname(os.path.abspath(__file__)) 73 | manifest = os.path.join(here, "templates", "nginx-pod.yaml") 74 | 75 | # Try up to three times to get nginx under cilium 76 | for attempt in range(0, 10): 77 | kubectl("apply -f {}".format(manifest)) 78 | wait_for_pod_state("", "default", "running", label="app=nginx") 79 | output = self.cilium("endpoint list -o json", timeout_insec=20) 80 | if "nginx" in output: 81 | kubectl("delete -f {}".format(manifest)) 82 | break 83 | else: 84 | print("Cilium not ready will retry testing.") 85 | kubectl("delete -f {}".format(manifest)) 86 | time.sleep(20) 87 | else: 88 | print("Cilium testing failed.") 89 | assert False 90 | -------------------------------------------------------------------------------- /addons/easyhaproxy/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | source $SNAP/actions/common/utils.sh 5 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | HELM="$SNAP/microk8s-helm3.wrapper" 9 | 10 | echo "+=========================================+" 11 | echo "| Enabling EasyHAProxy Ingress Controller |" 12 | echo "+=========================================+" 13 | echo 14 | 15 | NODEPORTS=$($KUBECTL get svc --all-namespaces -o go-template='{{range .items}}{{range.spec.ports}}{{if .nodePort}}{{.nodePort}}{{"\n"}}{{end}}{{end}}{{end}}') 16 | HOSTPORTS=$($KUBECTL get ds --all-namespaces -o go-template='{{range .items}}{{range .spec.template.spec.containers}}{{range .ports}}{{if .hostPort}}{{.hostPort}}{{"\n"}}{{end}}{{end}}{{end}}{{end}}') 17 | 18 | if [ -z "$1" ]; then 19 | 20 | for port in $(echo $NODEPORTS $HOSTPORTS); do 21 | if [[ "$port" == "80" || "$port" == "443" || "$port" == "1936" ]]; then 22 | echo "Port $port is already in use. Please disable the ingress controller or use the --nodeport option." 23 | exit 1 24 | fi 25 | done 26 | echo "Port 80 and 443 are available. Installing as a Daemonset" 27 | 28 | elif [ "$1" == "--nodeport" ]; then 29 | 30 | for port in $(echo $NODEPORTS $HOSTPORTS); do 31 | if [[ "$port" == "30080" || "$port" == "30443" || "$port" == "31936" ]]; then 32 | echo "Port $port is already in use. Please disable the component is using port $port." 33 | exit 1 34 | fi 35 | done 36 | echo "Port 30080 and 30443 are available. Installing as a Nodeport" 37 | 38 | else 39 | 40 | echo 41 | echo ERROR: Invalid parameter $1 42 | echo 43 | echo "You should pass 'empty' to install as a daemonset or '--nodeport' to install as nodeport" 44 | echo 45 | exit 1 46 | 47 | fi 48 | 49 | "$SNAP/microk8s-enable.wrapper" dns 50 | "$SNAP/microk8s-enable.wrapper" helm3 51 | 52 | NAMESPACE_PTR="easyhaproxy" 53 | 54 | $KUBECTL create namespace "$NAMESPACE_PTR" > /dev/null 2>&1 || true 55 | 56 | $HELM repo add byjg https://opensource.byjg.com/helm > /dev/null 2>&1 57 | $HELM repo update > /dev/null 2>&1 58 | 59 | $KUBECTL label nodes $(hostname) "easyhaproxy/node=master" --overwrite 60 | HELM_PACKAGE_VERSION="0.1.5" 61 | 62 | if [ -z "$1" ]; then 63 | 64 | $HELM upgrade --install ingress byjg/easyhaproxy \ 65 | --version $HELM_PACKAGE_VERSION \ 66 | --namespace $NAMESPACE_PTR \ 67 | --set resources.requests.cpu=100m \ 68 | --set resources.requests.memory=128Mi 69 | 70 | echo 71 | echo "Installed as a Daemonset at port 80 and 443" 72 | echo 73 | 74 | elif [ "$1" == "--nodeport" ]; then 75 | 76 | $HELM upgrade --install ingress byjg/easyhaproxy \ 77 | --version $HELM_PACKAGE_VERSION \ 78 | --namespace $NAMESPACE_PTR \ 79 | --set resources.requests.cpu=100m \ 80 | --set resources.requests.memory=128Mi \ 81 | --set service.create=true \ 82 | --set service.type=NodePort \ 83 | --set binding.ports.http=30080 \ 84 | --set binding.ports.https=30443 \ 85 | --set binding.ports.stats=31936 86 | 87 | echo 88 | echo "Installed as a Nodeport at port 30080 and 30443" 89 | echo 90 | 91 | fi 92 | 93 | -------------------------------------------------------------------------------- /addons/portainer/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import pathlib 5 | import re 6 | import socket 7 | import subprocess 8 | import sys 9 | 10 | import click 11 | 12 | DIR = pathlib.Path(__file__).parent.absolute() 13 | 14 | KUBECTL = os.path.expandvars("$SNAP/microk8s-kubectl.wrapper") 15 | HELM = os.path.expandvars("$SNAP/microk8s-helm3.wrapper") 16 | MICROK8S_STATUS = os.path.expandvars("$SNAP/microk8s-status.wrapper") 17 | MICROK8S_ENABLE = os.path.expandvars("$SNAP/microk8s-enable.wrapper") 18 | 19 | 20 | def ensure_addon(addon_name: str): 21 | click.echo("Checking for addon {}...".format(addon_name)) 22 | output = subprocess.check_output( 23 | [MICROK8S_STATUS, "-a", addon_name]).decode() 24 | if "enabled" not in output: 25 | p = subprocess.run([MICROK8S_ENABLE, addon_name]) 26 | if p.returncode != 0: 27 | click.echo("Failed to enable addon {}".format( 28 | addon_name), err=True) 29 | sys.exit(1) 30 | 31 | click.echo("Checking for addon {}... OK".format(addon_name)) 32 | 33 | 34 | @click.command() 35 | @click.option("--enable-ee", is_flag=True, default=False) 36 | @click.option("--helm3-addon", default="core/helm3") 37 | @click.option("--dns-addon", default="core/dns") 38 | @click.option("--rbac-addon", default="core/rbac") 39 | @click.option("--ingress-addon", default="core/ingress") 40 | @click.option("--metric-server-addon", default="core/metrics-server") 41 | @click.option("--storage-addon", default="core/hostpath-storage") 42 | @click.option('--storage-class', default=None) 43 | def main( 44 | helm3_addon: str, 45 | dns_addon: str, 46 | rbac_addon: str, 47 | ingress_addon: str, 48 | metric_server_addon: str, 49 | storage_addon: str, 50 | enable_ee: bool, 51 | storage_class: str, 52 | ): 53 | 54 | if dns_addon: 55 | ensure_addon(dns_addon) 56 | 57 | if helm3_addon: 58 | ensure_addon(helm3_addon) 59 | 60 | if rbac_addon: 61 | ensure_addon(rbac_addon) 62 | 63 | if ingress_addon: 64 | ensure_addon(ingress_addon) 65 | 66 | if metric_server_addon: 67 | ensure_addon(metric_server_addon) 68 | 69 | subprocess.run([HELM, "repo", "add", "portainer", 70 | "https://portainer.github.io/k8s/"]) 71 | subprocess.run([HELM, "repo", "update"]) 72 | # Create portainer namespace. Ignore failures (e.g. if namespace exists) 73 | subprocess.run([KUBECTL, "create", "namespace", "portainer"]) 74 | # storageclass= subprocess.check_output([KUBECTL, "get", "sc", "|", "grep", "default", "|", "awk", "{print $1}"]) 75 | args = [] 76 | if enable_ee: 77 | args.extend(["--set", "enterpriseEdition.enabled=true"]) 78 | 79 | if storage_class is not None: 80 | args.extend( 81 | ["--set", "persistence.storageClass={}".format(storage_class)]) 82 | else: 83 | ensure_addon(storage_addon) 84 | 85 | subprocess.run([HELM, "install", "-n", "portainer", "portainer", "portainer/portainer", *args]) 86 | 87 | click.echo( 88 | f""" 89 | ============================================================= 90 | Portainer has been installed and will be available shortly. Use the Nodeport or LB port to access. 91 | """ 92 | ) 93 | 94 | 95 | if __name__ == "__main__": 96 | main() 97 | -------------------------------------------------------------------------------- /addons/fluentd/fluentd/fluentd-es-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: fluentd-es 5 | namespace: kube-system 6 | labels: 7 | k8s-app: fluentd-es 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | --- 10 | kind: ClusterRole 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | metadata: 13 | name: fluentd-es 14 | labels: 15 | k8s-app: fluentd-es 16 | addonmanager.kubernetes.io/mode: Reconcile 17 | rules: 18 | - apiGroups: 19 | - "" 20 | resources: 21 | - "namespaces" 22 | - "pods" 23 | verbs: 24 | - "get" 25 | - "watch" 26 | - "list" 27 | --- 28 | kind: ClusterRoleBinding 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | metadata: 31 | name: fluentd-es 32 | labels: 33 | k8s-app: fluentd-es 34 | addonmanager.kubernetes.io/mode: Reconcile 35 | subjects: 36 | - kind: ServiceAccount 37 | name: fluentd-es 38 | namespace: kube-system 39 | apiGroup: "" 40 | roleRef: 41 | kind: ClusterRole 42 | name: fluentd-es 43 | apiGroup: "" 44 | --- 45 | apiVersion: apps/v1 46 | kind: DaemonSet 47 | metadata: 48 | name: fluentd-es-v3.1.0 49 | namespace: kube-system 50 | labels: 51 | k8s-app: fluentd-es 52 | version: v3.1.0 53 | addonmanager.kubernetes.io/mode: Reconcile 54 | spec: 55 | selector: 56 | matchLabels: 57 | k8s-app: fluentd-es 58 | version: v3.1.0 59 | template: 60 | metadata: 61 | labels: 62 | k8s-app: fluentd-es 63 | version: v3.1.0 64 | spec: 65 | securityContext: 66 | seccompProfile: 67 | type: RuntimeDefault 68 | priorityClassName: system-node-critical 69 | serviceAccountName: fluentd-es 70 | containers: 71 | - name: fluentd-es 72 | image: quay.io/fluentd_elasticsearch/fluentd:v3.1.0 73 | env: 74 | - name: FLUENTD_ARGS 75 | value: --no-supervisor -q 76 | resources: 77 | limits: 78 | memory: 500Mi 79 | requests: 80 | cpu: 100m 81 | memory: 200Mi 82 | volumeMounts: 83 | - name: varlog 84 | mountPath: /var/log 85 | - name: varlibdockercontainers 86 | mountPath: /var/lib/docker/containers 87 | readOnly: true 88 | - name: config-volume 89 | mountPath: /etc/fluent/config.d 90 | ports: 91 | - containerPort: 24231 92 | name: prometheus 93 | protocol: TCP 94 | livenessProbe: 95 | tcpSocket: 96 | port: prometheus 97 | initialDelaySeconds: 5 98 | timeoutSeconds: 10 99 | readinessProbe: 100 | tcpSocket: 101 | port: prometheus 102 | initialDelaySeconds: 5 103 | timeoutSeconds: 10 104 | terminationGracePeriodSeconds: 30 105 | volumes: 106 | - name: varlog 107 | hostPath: 108 | path: /var/log 109 | - name: varlibdockercontainers 110 | hostPath: 111 | path: /var/snap/microk8s/common/var/lib/containerd 112 | - name: config-volume 113 | configMap: 114 | name: fluentd-es-config-v0.2.0 115 | -------------------------------------------------------------------------------- /addons/cloudnative-pg/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | CNPG_VERSION="$1" 5 | if [ -z "$1" ]; then 6 | CNPG_VERSION="1.23.3" 7 | fi 8 | 9 | KUBECTL="${SNAP}/microk8s-kubectl.wrapper" 10 | # shellcheck source=/dev/null 11 | source "${SNAP}"/actions/common/utils.sh 12 | 13 | if [[ "$(arch)" == "aarch64" ]]; then 14 | ARCH="arm64" 15 | else 16 | ARCH=$(uname -m) 17 | fi 18 | 19 | cnpg_check_local_plugin() { 20 | if [ -f "${SNAP_DATA}/bin/kubectl-cnpg" ]; then 21 | version=$("${SNAP_DATA}/bin/kubectl-cnpg" version | sed 's/.*:\(.*\).*Commit.*/\1/' | tr -d ' ') 22 | echo "${version}" 23 | else 24 | echo "" 25 | fi 26 | } 27 | 28 | cnpg_install_plugin() { 29 | local cnpg_version=$1; shift 30 | echo "Installing kubectl cnpg version ${cnpg_version}" 31 | run_with_sudo mkdir "${SNAP_DATA}/tmp/cloudnative-pg" 32 | 33 | fetch_as "https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v${cnpg_version}/kubectl-cnpg_${cnpg_version}_linux_${ARCH}.tar.gz" "${SNAP_DATA}/tmp/cloudnative-pg/kubectl-cnpg.tar.gz" 34 | run_with_sudo tar -C "${SNAP_DATA}/tmp/cloudnative-pg/" -xvf "${SNAP_DATA}/tmp/cloudnative-pg/kubectl-cnpg.tar.gz" kubectl-cnpg 35 | run_with_sudo mkdir -p "${SNAP_DATA}/bin" 36 | run_with_sudo mv "${SNAP_DATA}/tmp/cloudnative-pg/kubectl-cnpg" "${SNAP_DATA}/bin/" 37 | run_with_sudo chmod +x "${SNAP_DATA}/bin/kubectl-cnpg" 38 | run_with_sudo rm -fr "${SNAP_DATA}/tmp/cloudnative-pg" 39 | } 40 | 41 | cnpg_apply_manifest() { 42 | apply_wait=$("${SNAP_DATA}"/bin/kubectl-cnpg install generate | $KUBECTL apply --server-side -f - > /dev/null) 43 | 44 | # If the apply isn't successful we stop and exit 45 | if [[ "${apply_wait}" -ne 0 ]]; then 46 | echo "$?" 47 | fi 48 | 49 | # We wait for the Deployment to be ready 50 | echo "Waiting 120 seconds for CloudNativePG Pod to be ready..." 51 | wait_output=$(cnpg_wait_for) 52 | 53 | echo "${wait_output}" 54 | } 55 | 56 | cnpg_wait_for() { 57 | $KUBECTL wait pods -n cnpg-system --for condition=Ready --timeout=120s -l app.kubernetes.io/name=cloudnative-pg 58 | 59 | echo "$?" 60 | } 61 | 62 | cnpg_show_first_cluster() { 63 | # Show a cluster example creation 64 | echo " 65 | cat < /dev/null 2>&1 || true 52 | 53 | # HostPath PV has to be created to hook to NFS Server Provisioner's PVC 54 | # DISK_SIZE is a dummy value. NFS Server Provisioner consumes by default the complete Node disk space, unless LVM or similar technique is used. 55 | # Same applies to customer's PVC storage requests. 56 | CLAIMREF_NAME="data-nfs-server-provisioner-0" 57 | DISK_SIZE=1Gi 58 | HOSTPATH="/var/snap/microk8s/common/nfs-storage" 59 | echo "Preparing PV for NFS Server Provisioner" 60 | echo "" 61 | cat $CURRENT_DIR/nfs.yaml | $SNAP/bin/sed "s@{{claimref_name}}@$CLAIMREF_NAME@g" | $SNAP/bin/sed "s@{{disk_size}}@$DISK_SIZE@g" | $SNAP/bin/sed "s@{{hostpath}}@$HOSTPATH@g" | $KUBECTL apply -f - 62 | 63 | # Add Helm Repo 64 | $HELM repo add nfs-ganesha-server-and-external-provisioner https://kubernetes-sigs.github.io/nfs-ganesha-server-and-external-provisioner/ 65 | 66 | # Install Helm Chart 67 | if [ -z "$NODE_NAME" ]; then 68 | $HELM upgrade -i nfs-server-provisioner nfs-ganesha-server-and-external-provisioner/nfs-server-provisioner \ 69 | --version $CHART_VERSION \ 70 | --namespace $NAMESPACE --set persistence.enabled=true --set persistence.storageClass='-' 71 | else 72 | $HELM upgrade -i nfs-server-provisioner nfs-ganesha-server-and-external-provisioner/nfs-server-provisioner \ 73 | --version $CHART_VERSION \ 74 | --namespace $NAMESPACE --set persistence.enabled=true --set persistence.storageClass='-' --set nodeSelector."kubernetes\.io/hostname"=$NODE_NAME 75 | fi 76 | 77 | echo "" 78 | echo "NFS Server Provisioner is installed" 79 | echo "" 80 | echo "WARNING: Install \"nfs-common\" package on all MicroK8S nodes to allow Pods with NFS mounts to start: sudo apt update && sudo apt install -y nfs-common" 81 | echo "WARNING: NFS Server Provisioner servers by default hostPath storage from a single Node." 82 | -------------------------------------------------------------------------------- /addons/fluentd/fluentd/es-statefulset.yaml: -------------------------------------------------------------------------------- 1 | # RBAC authn and authz 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: elasticsearch-logging 6 | namespace: kube-system 7 | labels: 8 | k8s-app: elasticsearch-logging 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | --- 11 | kind: ClusterRole 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | metadata: 14 | name: elasticsearch-logging 15 | labels: 16 | k8s-app: elasticsearch-logging 17 | addonmanager.kubernetes.io/mode: Reconcile 18 | rules: 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - "services" 23 | - "namespaces" 24 | - "endpoints" 25 | verbs: 26 | - "get" 27 | --- 28 | kind: ClusterRoleBinding 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | metadata: 31 | namespace: kube-system 32 | name: elasticsearch-logging 33 | labels: 34 | k8s-app: elasticsearch-logging 35 | addonmanager.kubernetes.io/mode: Reconcile 36 | subjects: 37 | - kind: ServiceAccount 38 | name: elasticsearch-logging 39 | namespace: kube-system 40 | apiGroup: "" 41 | roleRef: 42 | kind: ClusterRole 43 | name: elasticsearch-logging 44 | apiGroup: "" 45 | --- 46 | # Elasticsearch deployment itself 47 | apiVersion: apps/v1 48 | kind: StatefulSet 49 | metadata: 50 | name: elasticsearch-logging 51 | namespace: kube-system 52 | labels: 53 | k8s-app: elasticsearch-logging 54 | version: v7.4.3 55 | addonmanager.kubernetes.io/mode: Reconcile 56 | spec: 57 | serviceName: elasticsearch-logging 58 | replicas: 1 59 | selector: 60 | matchLabels: 61 | k8s-app: elasticsearch-logging 62 | version: v7.4.3 63 | template: 64 | metadata: 65 | labels: 66 | k8s-app: elasticsearch-logging 67 | version: v7.4.3 68 | spec: 69 | serviceAccountName: elasticsearch-logging 70 | containers: 71 | - image: quay.io/fluentd_elasticsearch/elasticsearch:v7.10.2 72 | name: elasticsearch-logging 73 | imagePullPolicy: Always 74 | resources: 75 | # need more cpu upon initialization, therefore burstable class 76 | limits: 77 | cpu: 1000m 78 | memory: 3Gi 79 | requests: 80 | cpu: 100m 81 | memory: 3Gi 82 | ports: 83 | - containerPort: 9200 84 | name: db 85 | protocol: TCP 86 | - containerPort: 9300 87 | name: transport 88 | protocol: TCP 89 | livenessProbe: 90 | tcpSocket: 91 | port: transport 92 | initialDelaySeconds: 5 93 | timeoutSeconds: 20 94 | failureThreshold: 10 95 | readinessProbe: 96 | tcpSocket: 97 | port: transport 98 | initialDelaySeconds: 5 99 | timeoutSeconds: 20 100 | failureThreshold: 10 101 | volumeMounts: 102 | - name: elasticsearch-logging 103 | mountPath: /data 104 | env: 105 | - name: ES_JAVA_OPTS 106 | value: "-Dlog4j2.formatMsgNoLookups=true" 107 | - name: "NAMESPACE" 108 | valueFrom: 109 | fieldRef: 110 | fieldPath: metadata.namespace 111 | - name: "MINIMUM_MASTER_NODES" 112 | value: "1" 113 | volumes: 114 | - name: elasticsearch-logging 115 | emptyDir: {} 116 | # Elasticsearch requires vm.max_map_count to be at least 262144. 117 | # If your OS already sets up this number to a higher value, feel free 118 | # to remove this init container. 119 | initContainers: 120 | - image: alpine:3.6 121 | command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] 122 | name: elasticsearch-logging-init 123 | securityContext: 124 | privileged: true 125 | -------------------------------------------------------------------------------- /addons/forgejo/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import click 3 | import os 4 | import subprocess 5 | from pathlib import Path 6 | import shutil 7 | 8 | 9 | class Handler: 10 | def __init__(self): 11 | """Forgejo HELM REPO: https://code.forgejo.org/forgejo-helm/forgejo-helm""" 12 | 13 | self.REPOSITORY = "oci://code.forgejo.org/forgejo-helm/forgejo" 14 | self.BASE_DIR = Path(__file__).parent 15 | self.HELM = os.path.expandvars("$SNAP/microk8s-helm3.wrapper") 16 | self.KUBECTL = os.path.expandvars("$SNAP/microk8s-kubectl.wrapper") 17 | self.mk8s_enable = os.path.expandvars("$SNAP/microk8s-enable.wrapper") 18 | self.mk8s_disable = os.path.expandvars("$SNAP/microk8s-disable.wrapper") 19 | self.untarpath = os.path.join("/tmp/helm") 20 | self.helmfolder = os.path.join(self.untarpath, "forgejo") 21 | 22 | def create_namespace(self, ns): 23 | try: 24 | subprocess.check_call([self.KUBECTL, "create", "namespace", ns]) 25 | except Exception as e: 26 | click.echo(e) 27 | 28 | def pull(self, *args, **kwargs): 29 | subprocess.check_call( 30 | [ 31 | self.HELM, 32 | "pull", 33 | self.REPOSITORY, 34 | "--untar", 35 | "--untardir", 36 | self.untarpath, 37 | ] 38 | ) 39 | 40 | def pull_update(self, *args, **kwargs): 41 | shutil.rmtree(self.helmfolder) 42 | self.pull() 43 | 44 | def enable(self, ns, values, admin, pw, email, *args, **kwargs): 45 | """Install addon to kubernetes""" 46 | cmd = ["/bin/sh", "-c", f"{self.mk8s_enable} hostpath-storage"] 47 | subprocess.check_call(cmd) 48 | cmd = [ 49 | "/bin/sh", 50 | "-c", 51 | f"""{self.HELM} template forgejo {self.helmfolder} \\ 52 | -f {values} \\ 53 | --namespace {ns} \\ 54 | --set gitea.admin.username={admin} \\ 55 | --set gitea.admin.password={pw} \\ 56 | --set gitea.admin.email={email} | {self.KUBECTL} apply -n {ns} -f - 57 | """, 58 | ] 59 | subprocess.check_call(cmd) 60 | 61 | 62 | H = Handler() 63 | 64 | 65 | @click.command() 66 | @click.option( 67 | "--ns", default="forgejo", help="set / create custom kubernetes namespace" 68 | ) 69 | @click.option("--admin", default="forgejo", help="admin username") 70 | @click.option("--email", default="forgejo@local.domain", help="admin email") 71 | @click.option("--pw", default="admin1234", help=f"admin password") 72 | @click.option( 73 | "--values", 74 | default=H.BASE_DIR / "values-mk8s.yaml", 75 | type=str, 76 | help="provide one values file path or url", 77 | ) 78 | @click.option( 79 | "--repo", 80 | default=H.REPOSITORY, 81 | type=str, 82 | help="provide custom helm repo path, use it with --update to pull from repository", 83 | ) 84 | @click.option( 85 | "--update", 86 | default=False, 87 | is_flag=True, 88 | help=f"pull helm if not exists in {H.helmfolder}", 89 | ) 90 | def main(ns, admin, email, pw, values, repo, update): 91 | """CLI options available""" 92 | 93 | click.echo(f"#### Forgejo parameter:") 94 | click.echo(f"--ns {ns}") 95 | click.echo(f"--admin {admin}") 96 | click.echo(f"--pw ...") 97 | click.echo(f"--email {email}") 98 | click.echo(f"--values {values}") 99 | click.echo(f"--repo {repo}") 100 | click.echo(f"--update {update}") 101 | click.echo("#### enabling forgejo addon") 102 | 103 | if update: 104 | H.pull_update(repo) 105 | 106 | elif not os.path.exists(os.path.join(H.helmfolder, "values.yaml")): 107 | H.pull(repo) 108 | 109 | H.create_namespace(ns) 110 | H.enable(repo=repo, ns=ns, values=values, admin=admin, pw=pw, email=email) 111 | 112 | 113 | if __name__ == "__main__": 114 | main() 115 | -------------------------------------------------------------------------------- /addons/kata/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import click 4 | import os 5 | import subprocess 6 | import sys 7 | from tempfile import mkstemp 8 | from shutil import move, copymode 9 | from os import fdopen, remove 10 | 11 | 12 | def mark_kata_enabled(): 13 | """ 14 | Mark the kata addon as enabled by creating the kata.enabled 15 | """ 16 | try: 17 | snapdata_path = os.environ.get("SNAP_DATA") 18 | lock_fname = "{}/var/lock/kata.enabled".format(snapdata_path) 19 | subprocess.call(["sudo", "touch", lock_fname]) 20 | except subprocess.CalledProcessError: 21 | print("Failed to mark the kata addon as enabled.") 22 | sys.exit(4) 23 | 24 | 25 | def apply_runtime_manifest(): 26 | """ 27 | Apply the manifest containing the definition of the kata runtimeClassName 28 | """ 29 | try: 30 | snap_path = os.environ.get("SNAP") 31 | current_path = os.path.dirname(os.path.realpath(__file__)) 32 | manifest = "{}/kata/runtime.yaml".format(current_path) 33 | subprocess.call( 34 | ["{}/microk8s-kubectl.wrapper".format(snap_path), "apply", "-f", manifest] 35 | ) 36 | except subprocess.CalledProcessError: 37 | print("Failed to apply the runtime manifest.") 38 | sys.exit(5) 39 | 40 | 41 | def restart_containerd(): 42 | """ 43 | Restart the containerd service 44 | """ 45 | try: 46 | print("Restarting containerd") 47 | subprocess.call( 48 | ["sudo", "systemctl", "restart", "snap.microk8s.daemon-containerd"] 49 | ) 50 | except subprocess.CalledProcessError: 51 | print( 52 | "Failed to restart containerd. Please, yry to 'microk8s stop' and 'microk8s start' manually." 53 | ) 54 | sys.exit(3) 55 | 56 | 57 | def configure_containerd(kata_path): 58 | """ 59 | Configure the containerd PATH so it finds the kata runtime binary 60 | """ 61 | snapdata_path = os.environ.get("SNAP_DATA") 62 | containerd_env_file = "{}/args/containerd-env".format(snapdata_path) 63 | # Create temp file 64 | fh, abs_path = mkstemp() 65 | with fdopen(fh, "w") as tmp_file: 66 | with open(containerd_env_file) as conf_file: 67 | for line in conf_file: 68 | if "KATA_PATH=" in line: 69 | line = 'KATA_PATH="{}"\n'.format(kata_path) 70 | tmp_file.write(line) 71 | 72 | copymode(containerd_env_file, abs_path) 73 | remove(containerd_env_file) 74 | move(abs_path, containerd_env_file) 75 | 76 | 77 | def print_next_steps(): 78 | print() 79 | print() 80 | print("To use the kata runtime set the 'kata' runtimeClassName, eg:") 81 | print() 82 | print("apiVersion: v1") 83 | print("kind: Pod") 84 | print("metadata:") 85 | print(" name: nginx-kata") 86 | print("spec:") 87 | print(" runtimeClassName: kata") 88 | print(" containers:") 89 | print(" - name: nginx") 90 | print(" image: nginx") 91 | print() 92 | 93 | 94 | @click.command() 95 | @click.option( 96 | "--runtime-path", 97 | default="/opt/kata/bin", 98 | help="The path to the kata container runtime binaries.", 99 | ) 100 | def kata(runtime_path): 101 | """ 102 | Enable the kata runtime. Either snap install the kata binaries or use a path to already deployed 103 | kata binaries. Note the kata binary must be called kata-runtime 104 | """ 105 | if not os.path.exists("{}/kata-runtime".format(runtime_path)): 106 | print("Kata runtime binaries was not found under {}.".format(runtime_path)) 107 | print("Use the --runtime-path argument to point to the right location.") 108 | sys.exit(2) 109 | 110 | configure_containerd(runtime_path) 111 | restart_containerd() 112 | apply_runtime_manifest() 113 | mark_kata_enabled() 114 | print_next_steps() 115 | 116 | 117 | if __name__ == "__main__": 118 | kata(prog_name="microk8s enable kata") 119 | -------------------------------------------------------------------------------- /addons/openebs/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | source "$SNAP/actions/common/utils.sh" 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | HELM="$SNAP/microk8s-helm3.wrapper" 9 | 10 | OPENEBS_NS="openebs" 11 | OPENEBS_VERSION="3.3.x" 12 | # Force to default to 3, able to override value with -j int flag 13 | JIVA_REPLICAS=3 14 | 15 | while getopts ":j:h:" arg 16 | do 17 | case "${arg}" in 18 | j) 19 | JIVA_REPLICAS=${OPTARG} 20 | ;; 21 | h) 22 | usage 23 | ;; 24 | 25 | esac 26 | done 27 | 28 | 29 | usage() { 30 | #small wrapper function to call other help output functions 31 | print_iscsi_help 32 | 33 | #print function to override jiva replicas in helm command '${JIVA_REPLICAS}' 34 | jiva_replicas 35 | } 36 | 37 | print_iscsi_help() { 38 | echo "Make sure iscsi is installed on all nodes." 39 | echo "To enable iscsid: " 40 | echo " sudo systemctl enable iscsid" 41 | echo "Please refer to the OpenEBS prerequisites (https://docs.openebs.io/docs/next/prerequisites.html)" 42 | } 43 | 44 | jiva_replicas(){ 45 | echo "" 46 | echo "" 47 | echo "Default behavior is to have jiva replcas set to 3, for smaller clusters you may want to reduce" 48 | echo "the number of jiva replicas. To reduce the settings run the following" 49 | echo "microk8s enable openebs -j 2" 50 | echo "REPLICAS CURRENTLY SET TO: ${JIVA_REPLICAS}" 51 | echo "" 52 | echo "" 53 | } 54 | 55 | # Check if iscsid is installed 56 | if ! is_strict && ! systemctl is-enabled iscsid | grep enabled &> /dev/null 57 | then 58 | echo "iscsid is not available or enabled." 59 | print_iscsi_help 60 | exit 61 | fi 62 | 63 | "$SNAP/microk8s-enable.wrapper" dns 64 | "$SNAP/microk8s-enable.wrapper" helm3 65 | 66 | # make sure the "openebs" namespace exist 67 | $KUBECTL create namespace "$OPENEBS_NS" > /dev/null 2>&1 || true 68 | 69 | 70 | $HELM repo add openebs https://openebs.github.io/charts 71 | $HELM repo update 72 | $HELM -n openebs install openebs openebs/openebs \ 73 | --version ${OPENEBS_VERSION} \ 74 | --set cstor.enabled=true \ 75 | --set jiva.enabled=true \ 76 | --set jiva.defaultPolicy.replicas=${JIVA_REPLICAS} \ 77 | --set legacy.enabled=false \ 78 | --set cstor.cleanup.image.tag="latest" \ 79 | --set cstor.csiNode.kubeletDir="$SNAP_COMMON/var/lib/kubelet/" \ 80 | --set jiva.csiNode.kubeletDir="$SNAP_COMMON/var/lib/kubelet/" \ 81 | --set localprovisioner.basePath="$SNAP_COMMON/var/openebs/local" \ 82 | --set ndm.sparse.path="$SNAP_COMMON/var/openebs/sparse" \ 83 | --set varDirectoryPath.baseDir="$SNAP_COMMON/var/openebs" 84 | 85 | echo "OpenEBS is installed" 86 | 87 | # Help sections 88 | echo "" 89 | echo "" 90 | echo "-----------------------" 91 | echo "" 92 | echo "When using OpenEBS with a single node MicroK8s, it is recommended to use the openebs-hostpath StorageClass" 93 | echo "An example of creating a PersistentVolumeClaim utilizing the openebs-hostpath StorageClass" 94 | echo "" 95 | echo "" 96 | echo "kind: PersistentVolumeClaim 97 | apiVersion: v1 98 | metadata: 99 | name: local-hostpath-pvc 100 | spec: 101 | storageClassName: openebs-hostpath 102 | accessModes: 103 | - ReadWriteOnce 104 | resources: 105 | requests: 106 | storage: 5G 107 | " 108 | echo "" 109 | echo "" 110 | echo "-----------------------" 111 | echo "" 112 | echo "If you are planning to use OpenEBS with multi nodes, you can use the openebs-jiva-csi-default StorageClass." 113 | echo "An example of creating a PersistentVolumeClaim utilizing the openebs-jiva-csi-default StorageClass" 114 | echo "" 115 | echo "" 116 | echo "kind: PersistentVolumeClaim 117 | apiVersion: v1 118 | metadata: 119 | name: jiva-volume-claim 120 | spec: 121 | storageClassName: openebs-jiva-csi-default 122 | accessModes: 123 | - ReadWriteOnce 124 | resources: 125 | requests: 126 | storage: 5G 127 | " 128 | 129 | if is_strict 130 | then 131 | print_iscsi_help 132 | fi 133 | -------------------------------------------------------------------------------- /addons/knative/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | 8 | KNATIVE_VERSION="1.8.2" 9 | KNATIVE_FUNC_VERSION="1.8.2" 10 | KNATIVE_SERVING=true 11 | KNATIVE_EVENTING=false 12 | ARCH=$(arch) 13 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 14 | 15 | echo "Enabling Knative" 16 | 17 | 18 | for i in "$@" 19 | do 20 | case $i in 21 | --no-serving) 22 | KNATIVE_SERVING=false 23 | shift # past argument 24 | ;; 25 | --eventing) 26 | KNATIVE_EVENTING=true 27 | shift # past argument 28 | ;; 29 | *) 30 | # unknown option 31 | ;; 32 | esac 33 | done 34 | 35 | #requirement 36 | "$SNAP/microk8s-enable.wrapper" dns 37 | 38 | echo "Enabling Knative ${KNATIVE_VERSION}" 39 | $KUBECTL apply -f https://github.com/knative/operator/releases/download/knative-v$KNATIVE_VERSION/operator.yaml 40 | 41 | if [ "$KNATIVE_SERVING" = "true" ]; then 42 | echo "Installing Knative Serving ${KNATIVE_VERSION}" 43 | $KUBECTL apply -f- << EOF 44 | apiVersion: v1 45 | kind: Namespace 46 | metadata: 47 | name: knative-serving 48 | --- 49 | apiVersion: operator.knative.dev/v1beta1 50 | kind: KnativeServing 51 | metadata: 52 | name: knative-serving 53 | namespace: knative-serving 54 | spec: 55 | ingress: 56 | kourier: 57 | enabled: true 58 | config: 59 | network: 60 | ingress-class: "kourier.ingress.networking.knative.dev" 61 | EOF 62 | fi 63 | 64 | if [ "$KNATIVE_EVENTING" = "true" ]; then 65 | echo "Installing Knative Eventing ${KNATIVE_VERSION}" 66 | $KUBECTL apply -f- << EOF 67 | apiVersion: v1 68 | kind: Namespace 69 | metadata: 70 | name: knative-eventing 71 | --- 72 | apiVersion: operator.knative.dev/v1beta1 73 | kind: KnativeEventing 74 | metadata: 75 | name: knative-eventing 76 | namespace: knative-eventing 77 | EOF 78 | fi 79 | 80 | # Handle Knative CLIs 81 | if [ ! -f "${SNAP_COMMON}/plugins/kn" ]; then 82 | echo "Fetching Knative CLI version ${KNATIVE_VERSION} in $SNAP_COMMON}/plugins/kn" 83 | run_with_sudo mkdir -p "${SNAP_COMMON}/plugins" 84 | fetch_as https://github.com/knative/client/releases/download/knative-v${KNATIVE_VERSION}/kn-linux-${ARCH} "${SNAP_COMMON}/plugins/kn" 85 | run_with_sudo chmod uo+x "${SNAP_COMMON}/plugins/kn" 86 | fi 87 | if [ ! -f "${SNAP_COMMON}/plugins/kn-admin" ]; then 88 | echo "Fetching Knative CLI Admin plugin version ${KNATIVE_VERSION} in ${SNAP_COMMON}/plugins/kn-admin" 89 | run_with_sudo mkdir -p "${SNAP_COMMON}/plugins" 90 | fetch_as https://github.com/knative-sandbox/kn-plugin-admin/releases/download/knative-v${KNATIVE_VERSION}/kn-admin-linux-${ARCH} "${SNAP_COMMON}/plugins/kn-admin" 91 | run_with_sudo chmod uo+x "${SNAP_COMMON}/plugins/kn-admin" 92 | fi 93 | if [ ! -f "${SNAP_COMMON}/plugins/kn-event" ]; then 94 | echo "Fetching Knative CLI Event plugin version ${KNATIVE_VERSION} in ${SNAP_COMMON}/plugins/kn-event" 95 | run_with_sudo mkdir -p "${SNAP_COMMON}/plugins" 96 | fetch_as https://github.com/knative-sandbox/kn-plugin-event/releases/download/knative-v${KNATIVE_VERSION}/kn-event-linux-${ARCH} "${SNAP_COMMON}/plugins/kn-event" 97 | run_with_sudo chmod uo+x "${SNAP_COMMON}/plugins/kn-event" 98 | fi 99 | if [ ! -f "${SNAP_COMMON}/plugins/kn-func" ] && [ "${ARCH}" = "amd64" ]; then 100 | echo "Fetching Knative Functions plugin version ${KNATIVE_FUNC_VERSION} in ${SNAP_COMMON}/plugins/kn-func" 101 | run_with_sudo mkdir -p "${SNAP_COMMON}/plugins" 102 | fetch_as https://github.com/knative/func/releases/download/knative-v${KNATIVE_FUNC_VERSION}/func_linux_${ARCH} "${SNAP_COMMON}/plugins/kn-func" 103 | run_with_sudo chmod uo+x "${SNAP_COMMON}/plugins/kn-func" 104 | fi 105 | 106 | if [ "$KNATIVE_SERVING" = "true" ]; then 107 | echo "" 108 | echo "" 109 | echo "Enable metalb addon to get a loadbalancer for Knative Serving, for example microk8s enable metallb:10.64.140.43-10.64.140.49" 110 | echo "" 111 | echo "Configure the domain for Knative services using:" 112 | echo "kubectl get svc -n knative-serving kourier" 113 | echo "kubectl patch configmap -n knative-serving config-domain -p '{"data": {"EXTERNAL-IP.sslip.io": ""}}'" 114 | echo "" 115 | fi 116 | echo "" 117 | echo "" 118 | echo "To be able to use 'microk8s kn ' such as 'func' add to env PATH the value ${SNAP_COMMON}/plugins" 119 | echo "" 120 | echo "Visit https://knative.dev/docs/install/operator/knative-with-operators/ for more Knative customizations" 121 | echo "" 122 | -------------------------------------------------------------------------------- /tests/templates/emojivoto.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: emojivoto 6 | --- 7 | kind: ServiceAccount 8 | apiVersion: v1 9 | metadata: 10 | name: emoji 11 | namespace: emojivoto 12 | --- 13 | kind: ServiceAccount 14 | apiVersion: v1 15 | metadata: 16 | name: voting 17 | namespace: emojivoto 18 | --- 19 | kind: ServiceAccount 20 | apiVersion: v1 21 | metadata: 22 | name: web 23 | namespace: emojivoto 24 | --- 25 | apiVersion: apps/v1 26 | kind: Deployment 27 | metadata: 28 | name: emoji 29 | namespace: emojivoto 30 | spec: 31 | replicas: 1 32 | selector: 33 | matchLabels: 34 | app: emoji-svc 35 | strategy: {} 36 | template: 37 | metadata: 38 | annotations: 39 | linkerd.io/inject: enabled 40 | labels: 41 | app: emoji-svc 42 | spec: 43 | serviceAccountName: emoji 44 | containers: 45 | - env: 46 | - name: GRPC_PORT 47 | value: "8080" 48 | image: buoyantio/emojivoto-emoji-svc:v8 49 | name: emoji-svc 50 | ports: 51 | - containerPort: 8080 52 | name: grpc 53 | resources: 54 | requests: 55 | cpu: 100m 56 | status: {} 57 | --- 58 | apiVersion: v1 59 | kind: Service 60 | metadata: 61 | name: emoji-svc 62 | namespace: emojivoto 63 | spec: 64 | selector: 65 | app: emoji-svc 66 | clusterIP: None 67 | ports: 68 | - name: grpc 69 | port: 8080 70 | targetPort: 8080 71 | --- 72 | apiVersion: apps/v1 73 | kind: Deployment 74 | metadata: 75 | name: voting 76 | namespace: emojivoto 77 | spec: 78 | replicas: 1 79 | selector: 80 | matchLabels: 81 | app: voting-svc 82 | strategy: {} 83 | template: 84 | metadata: 85 | annotations: 86 | linkerd.io/inject: enabled 87 | labels: 88 | app: voting-svc 89 | spec: 90 | serviceAccountName: voting 91 | containers: 92 | - env: 93 | - name: GRPC_PORT 94 | value: "8080" 95 | image: buoyantio/emojivoto-voting-svc:v8 96 | name: voting-svc 97 | ports: 98 | - containerPort: 8080 99 | name: grpc 100 | resources: 101 | requests: 102 | cpu: 100m 103 | status: {} 104 | --- 105 | apiVersion: v1 106 | kind: Service 107 | metadata: 108 | name: voting-svc 109 | namespace: emojivoto 110 | spec: 111 | selector: 112 | app: voting-svc 113 | clusterIP: None 114 | ports: 115 | - name: grpc 116 | port: 8080 117 | targetPort: 8080 118 | --- 119 | apiVersion: apps/v1 120 | kind: Deployment 121 | metadata: 122 | name: web 123 | namespace: emojivoto 124 | spec: 125 | replicas: 1 126 | selector: 127 | matchLabels: 128 | app: web-svc 129 | strategy: {} 130 | template: 131 | metadata: 132 | annotations: 133 | linkerd.io/inject: enabled 134 | labels: 135 | app: web-svc 136 | spec: 137 | serviceAccountName: web 138 | containers: 139 | - env: 140 | - name: WEB_PORT 141 | value: "80" 142 | - name: EMOJISVC_HOST 143 | value: emoji-svc.emojivoto:8080 144 | - name: VOTINGSVC_HOST 145 | value: voting-svc.emojivoto:8080 146 | - name: INDEX_BUNDLE 147 | value: dist/index_bundle.js 148 | image: buoyantio/emojivoto-web:v8 149 | name: web-svc 150 | ports: 151 | - containerPort: 80 152 | name: http 153 | resources: 154 | requests: 155 | cpu: 100m 156 | status: {} 157 | --- 158 | apiVersion: v1 159 | kind: Service 160 | metadata: 161 | name: web-svc 162 | namespace: emojivoto 163 | spec: 164 | type: LoadBalancer 165 | selector: 166 | app: web-svc 167 | ports: 168 | - name: http 169 | port: 80 170 | targetPort: 80 171 | --- 172 | apiVersion: apps/v1 173 | kind: Deployment 174 | metadata: 175 | name: vote-bot 176 | namespace: emojivoto 177 | spec: 178 | replicas: 1 179 | selector: 180 | matchLabels: 181 | app: vote-bot 182 | strategy: {} 183 | template: 184 | metadata: 185 | annotations: 186 | linkerd.io/inject: enabled 187 | labels: 188 | app: vote-bot 189 | spec: 190 | containers: 191 | - command: 192 | - emojivoto-vote-bot 193 | env: 194 | - name: WEB_HOST 195 | value: web-svc.emojivoto:80 196 | image: buoyantio/emojivoto-web:v8 197 | name: vote-bot 198 | resources: 199 | requests: 200 | cpu: 10m 201 | status: {} 202 | --- 203 | 204 | -------------------------------------------------------------------------------- /addons/multus/multus.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: network-attachment-definitions.k8s.cni.cncf.io 6 | spec: 7 | group: k8s.cni.cncf.io 8 | scope: Namespaced 9 | names: 10 | plural: network-attachment-definitions 11 | singular: network-attachment-definition 12 | kind: NetworkAttachmentDefinition 13 | shortNames: 14 | - net-attach-def 15 | versions: 16 | - name: v1 17 | served: true 18 | storage: true 19 | schema: 20 | openAPIV3Schema: 21 | description: | 22 | NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing 23 | Working Group to express the intent for attaching pods to one or more logical or physical 24 | networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec 25 | type: object 26 | properties: 27 | apiVersion: 28 | description: | 29 | APIVersion defines the versioned schema of this representation 30 | of an object. Servers should convert recognized schemas to the 31 | latest internal value, and may reject unrecognized values. More info: 32 | https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 33 | type: string 34 | kind: 35 | description: | 36 | Kind is a string value representing the REST resource this 37 | object represents. Servers may infer this from the endpoint the client 38 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 39 | type: string 40 | metadata: 41 | type: object 42 | spec: 43 | description: "NetworkAttachmentDefinition spec defines the desired state of a network attachment" 44 | type: object 45 | properties: 46 | config: 47 | description: "NetworkAttachmentDefinition config is a JSON-formatted CNI configuration" 48 | type: string 49 | --- 50 | kind: ClusterRole 51 | apiVersion: rbac.authorization.k8s.io/v1 52 | metadata: 53 | name: multus 54 | rules: 55 | - apiGroups: ["k8s.cni.cncf.io"] 56 | resources: 57 | - "*" 58 | verbs: 59 | - "*" 60 | - apiGroups: 61 | - "" 62 | resources: 63 | - pods 64 | - pods/status 65 | verbs: 66 | - get 67 | - update 68 | - apiGroups: 69 | - "" 70 | - events.k8s.io 71 | resources: 72 | - events 73 | verbs: 74 | - create 75 | - patch 76 | - update 77 | --- 78 | kind: ClusterRoleBinding 79 | apiVersion: rbac.authorization.k8s.io/v1 80 | metadata: 81 | name: multus 82 | roleRef: 83 | apiGroup: rbac.authorization.k8s.io 84 | kind: ClusterRole 85 | name: multus 86 | subjects: 87 | - kind: ServiceAccount 88 | name: multus 89 | namespace: kube-system 90 | --- 91 | apiVersion: v1 92 | kind: ServiceAccount 93 | metadata: 94 | name: multus 95 | namespace: kube-system 96 | --- 97 | apiVersion: apps/v1 98 | kind: DaemonSet 99 | metadata: 100 | name: $DS_NAME 101 | namespace: kube-system 102 | labels: 103 | tier: node 104 | app: multus 105 | name: multus 106 | spec: 107 | selector: 108 | matchLabels: 109 | name: multus 110 | updateStrategy: 111 | type: RollingUpdate 112 | template: 113 | metadata: 114 | labels: 115 | tier: node 116 | app: multus 117 | name: multus 118 | spec: 119 | hostNetwork: true 120 | tolerations: 121 | - operator: Exists 122 | effect: NoSchedule 123 | serviceAccountName: multus 124 | containers: 125 | - name: kube-multus 126 | image: ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9 127 | command: ["/entrypoint.sh"] 128 | args: 129 | - "--multus-conf-file=auto" 130 | - "--multus-kubeconfig-file-host=$SNAP_DATA/args/cni-network/multus.d/multus.kubeconfig" 131 | - "--cni-version=0.3.1" 132 | resources: 133 | requests: 134 | cpu: "100m" 135 | memory: "50Mi" 136 | limits: 137 | cpu: "100m" 138 | memory: "50Mi" 139 | volumeMounts: 140 | - name: cni 141 | mountPath: /host/etc/cni/net.d 142 | - name: cnibin 143 | mountPath: /host/opt/cni/bin 144 | initContainers: 145 | - name: install-multus-binary 146 | image: ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9 147 | command: 148 | - "cp" 149 | - "/usr/src/multus-cni/bin/multus" 150 | - "/host/opt/cni/bin/multus" 151 | resources: 152 | requests: 153 | cpu: "10m" 154 | memory: "15Mi" 155 | securityContext: 156 | privileged: true 157 | volumeMounts: 158 | - name: cnibin 159 | mountPath: /host/opt/cni/bin 160 | mountPropagation: Bidirectional 161 | terminationGracePeriodSeconds: 10 162 | volumes: 163 | - name: cni 164 | hostPath: 165 | path: "$SNAP_DATA/args/cni-network/" 166 | - name: cnibin 167 | hostPath: 168 | path: "$SNAP_DATA/opt/cni/bin/" 169 | -------------------------------------------------------------------------------- /addons/sosivio/enable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | # microk8s vars 6 | source $SNAP/actions/common/utils.sh 7 | CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) 8 | 9 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 10 | HELM="$SNAP/microk8s-helm3.wrapper" 11 | 12 | 13 | 14 | # script vars 15 | BLUE='\033[0;34m' 16 | NC='\033[0m' 17 | BOLD='\033[1m' 18 | 19 | NAMESPACE_SOSIVIO="sosivio" 20 | DASHBOARD_SERVICE="dashboard-lb" 21 | 22 | HELM_REPO_URL="https://helm.sosiv.io" 23 | HELM_REPO_NAME="sosivio" 24 | HELM_VERSION="1.7.1" 25 | WAIT_SECONDS=600 26 | DEBUG=0 27 | 28 | usage () { 29 | echo ' 30 | Sosivio Plugin for Microk8s 31 | 32 | For Additional information about Sosivio please infer our website: 33 | https:/sosiv.io 34 | Or The official Documentation: 35 | https://docs.sosiv.io 36 | 37 | Usage: 38 | microk8s enable sosivio 39 | 40 | Flags: 41 | -d enable debug prints 42 | -v string install a specific helm chart version (default to $HELM_VERSION) 43 | -u string the URL of the helm repo to fetch the charts from (default "$HELM_REPO_URL") 44 | -n string the name of the helm repo name to add (default "$HELM_REPO_NAME") 45 | -w int amount of time in seconds to wait for sosivio to become active (default "$WAIT_SECONDS") 46 | ' 47 | } 48 | 49 | # opt_error will print $1 input, Help and will exit with code 1 50 | opt_error() { 51 | echo $1 52 | usage 53 | exit 1 54 | } 55 | 56 | 57 | print_debug() { 58 | if [ $DEBUG -eq 1 ]; then 59 | echo "DEBUG: $1" 60 | fi 61 | } 62 | 63 | # install microk8s addons that are a prerequisite to Sosivio addon 64 | install_addons_prereq () { 65 | addons=("dns" "helm3") 66 | print_debug "enabling prerequisites addons..." 67 | for addon in ${addons[@]}; do 68 | print_debug " enabling $addon" 69 | "$SNAP/microk8s-enable.wrapper" $addon &> /dev/null 70 | done 71 | print_debug "all prerequisites addons installed successfully" 72 | # "$SNAP/microk8s-enable.wrapper" dns 73 | # "$SNAP/microk8s-enable.wrapper" helm3 74 | } 75 | 76 | add_helm_repo () { 77 | print_debug "adding repo $HELM_REPO_NAME $HELM_REPO_URL" 78 | $HELM repo add $HELM_REPO_NAME $HELM_REPO_URL > /dev/null 2>&1 79 | print_debug "updating repo..." 80 | $HELM repo update $HELM_REPO_NAME > /dev/null 2>&1 81 | } 82 | 83 | install_soivio () { 84 | 85 | echo "" 86 | echo -e "${BLUE}Enabling Sosivio${NC}" 87 | echo "" 88 | 89 | print_debug " creating $NAMESPACE_SOSIVIO namespace" 90 | $KUBECTL create ns $NAMESPACE_SOSIVIO > /dev/null 2>&1 || true 91 | 92 | HELM_CMD="$HELM upgrade --install -n $NAMESPACE_SOSIVIO \ 93 | --set expose=LoadBalancer --set adminPassword=microsivio --version=$HELM_VERSION" 94 | print_debug "applying helm chart" 95 | $HELM_CMD sosivio $HELM_REPO_NAME/sosivio &> /dev/null 96 | 97 | } 98 | 99 | wait_for_sosivio () { 100 | 101 | echo "" 102 | echo "Waiting for Sosivio to become active..." 103 | echo "It might take a while if your network connection is slow." 104 | 105 | WATCH="$KUBECTL rollout status --watch" 106 | start_time=$(date +'%s') 107 | until $WATCH deploy -n ${NAMESPACE_SOSIVIO} sosivio-dashboard --watch > /dev/null 2>&1 108 | do 109 | if [ $(( $start_time + $WAIT_SECONDS )) -lt $(date +'%s') ] ; 110 | then 111 | echo "sosivio wait time exceeded ($WAIT_SECONDS seconds)" 112 | echo "please check your internet connection or run again with '-w' flag." 113 | echo "check 'microk8s enable sosivio -h' for more details" 114 | echo "" 115 | echo "note: sosivio is still getting deployed, you can check if all the pods are running using:" 116 | echo "microk8s kubectl get pods -n $NAMESPACE_SOSIVIO" 117 | exit 1 118 | fi 119 | sleep 1 120 | done 121 | 122 | } 123 | 124 | 125 | print_finished () { 126 | echo "" 127 | echo "" 128 | echo "Sosivio is reachable using the following local address:" 129 | echo -e "${BOLD}http://localhost:$(${KUBECTL} get svc ${DASHBOARD_SERVICE} -n ${NAMESPACE_SOSIVIO} -o jsonpath='{.spec.ports[0].nodePort}')${NC}" 130 | echo "" 131 | echo "Alternatively you can use port-forward:" 132 | echo "'microk8s kubectl port-forward -n ${NAMESPACE_SOSIVIO} svc/${DASHBOARD_SERVICE} 8088'" 133 | echo "" 134 | echo "Your first login credentials are:" 135 | echo "Username: admin" 136 | echo "Password: $(${KUBECTL} get secret -n ${NAMESPACE_SOSIVIO} sosivio-admin-otp -o jsonpath='{.data.password}' | base64 -d)" 137 | echo "" 138 | echo "You can get this password again using the following command:" 139 | echo "microk8s kubectl get secret -n ${NAMESPACE_SOSIVIO} sosivio-admin-otp -o jsonpath='{.data.password}' | base64 -d" 140 | echo "" 141 | echo -e "${BLUE}Sosivio Enabled${NC}" 142 | } 143 | 144 | 145 | 146 | ################# main ################# 147 | 148 | 149 | while getopts "dv:u:n:h" opt; do 150 | case $opt in 151 | d) 152 | DEBUG=1 153 | ;; 154 | v) 155 | if [ -z $OPTARG ] ; then 156 | opt_error "error: -v flag requires parameter." 157 | fi 158 | HELM_VERSION=$OPTARG 159 | ;; 160 | u) 161 | if [ -z $OPTARG ] ; then 162 | opt_error "error: -u flag requires parameter." 163 | fi 164 | HELM_REPO_URL=$OPTARG 165 | ;; 166 | n) 167 | if [ -z $OPTARG ] ; then 168 | opt_error "error: -n flag requires parameter." 169 | fi 170 | HELM_REPO_NAME=$OPTARG 171 | ;; 172 | *) 173 | opt_error "Invalid option: -$OPTARG" 174 | ;; 175 | esac 176 | done 177 | echo "Sosivio Version $HELM_VERSION" 178 | echo "hint: use -v flag to set a different version. (ex - microk8s enable sosivio -v x.y.z)" 179 | sleep 3 180 | 181 | if [ $DEBUG -eq 1 ]; then 182 | echo "debug flag set" >&2 183 | echo "repo url is set to $HELM_REPO_URL" 184 | echo "repo name is set to $HELM_REPO_NAME" 185 | fi 186 | 187 | install_addons_prereq 188 | add_helm_repo 189 | install_soivio 190 | wait_for_sosivio 191 | print_finished 192 | -------------------------------------------------------------------------------- /tests/test_sriov_device_plugin.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | import json 4 | import pytest 5 | import platform 6 | from importlib.util import spec_from_loader, module_from_spec 7 | from importlib.machinery import SourceFileLoader 8 | import pathlib 9 | from unittest import mock 10 | 11 | from subprocess import CalledProcessError, run 12 | from utils import kubectl, microk8s_disable, wait_for_pod_state 13 | 14 | KUBECTL = os.path.expandvars("$SNAP/microk8s-kubectl.wrapper") 15 | sriov_addon_path = ( 16 | pathlib.Path(os.path.dirname(__file__)).parent.absolute() 17 | / "addons" 18 | / "sriov-device-plugin" 19 | ) 20 | spec = spec_from_loader( 21 | "enable", 22 | SourceFileLoader( 23 | "enable", 24 | str(sriov_addon_path / "enable"), 25 | ), 26 | ) 27 | enable = module_from_spec(spec) 28 | spec.loader.exec_module(enable) 29 | 30 | 31 | class TestSRIOVDevicePlugin(unittest.TestCase): 32 | """SR-IOV Network Device Plugin relies on availability of given PCI devices, so we can only 33 | test for the exception being raised. 34 | """ 35 | 36 | script_path = os.path.abspath(os.path.dirname(__file__)) 37 | resources_file_name = "sriov-device-plugin-test-resources-valid.json" 38 | resource_file = os.path.join(script_path, "resources", resources_file_name) 39 | with open(resource_file, "r") as f: 40 | resources = json.load(f) 41 | test_args = enable._TestArgs(enabled=True, resources=resources) 42 | 43 | @pytest.mark.skipif( 44 | platform.machine() != "x86_64", 45 | reason="SR-IOV Network Device Plugin tests are only relevant in x86 architectures", 46 | ) 47 | @pytest.mark.skipif( 48 | os.environ.get("STRICT") == "yes", 49 | reason="Skipping sriov-device-plugin tests in strict confinement as they are expected to fail", # noqa: E501 50 | ) 51 | def test_sriov_device_plugin_correctly_raises_error(self): 52 | """ 53 | Make sure enabling plugin fails when we have invalid PCI addresses. 54 | """ 55 | script_path = os.path.abspath(os.path.dirname(__file__)) 56 | sriov_resources_mapping_file_name = ( 57 | "sriov-device-plugin-test-resources-invalid.json" 58 | ) 59 | sriov_resources_mapping_file = os.path.join( 60 | script_path, 61 | "resources", 62 | sriov_resources_mapping_file_name, 63 | ) 64 | enable_sriov_dp_cmd = [ 65 | "/snap/bin/microk8s.enable", 66 | "sriov-device-plugin", 67 | "--resources-file", 68 | sriov_resources_mapping_file, 69 | ] 70 | print("Enabling SR-IOV Network Device Plugin") 71 | with self.assertRaises(CalledProcessError): 72 | run( 73 | enable_sriov_dp_cmd, 74 | check=True, 75 | ) 76 | 77 | print("Disabling SR-IOV Network Device Plugin") 78 | microk8s_disable("sriov-device-plugin") 79 | 80 | def mock_check_output(self, command, text=True): 81 | if command == ["lspci", "-s", "0000:00:06.0"]: 82 | return "something" 83 | elif command == ["lspci", "-s", "0000:00:07.0"]: 84 | return "something" 85 | elif command[:3] == [ 86 | KUBECTL, 87 | "apply", 88 | "-f", 89 | ]: 90 | if command[3] == os.path.join(sriov_addon_path, "sriovdp.yaml"): 91 | cmd = " ".join(command[1:]) 92 | return kubectl(cmd) 93 | elif command[3].startswith("/tmp/tmp"): 94 | cmd = " ".join(command[1:]) 95 | return kubectl(cmd) 96 | elif command == [KUBECTL, "get", "node", "-o", "json"]: 97 | return """{ 98 | "items": [ 99 | { 100 | "status": { 101 | "allocatable": { 102 | "intel.com/resource_a": "1", 103 | "intel.com/resource_b": "1" 104 | } 105 | } 106 | } 107 | ] 108 | }""" 109 | raise ValueError(f"Unmocked command: {command}") 110 | 111 | @pytest.mark.skipif( 112 | platform.machine() != "x86_64", 113 | reason="SR-IOV Network Device Plugin tests are only relevant in x86 architectures", 114 | ) 115 | @pytest.mark.skipif( 116 | os.environ.get("STRICT") == "yes", 117 | reason="Skipping sriov-device-plugin tests in strict confinement as they are expected to fail", # noqa: E501 118 | ) 119 | def test_sriov_device_plugin_works_correctly(self): 120 | """ 121 | Make sure plugin enables and disables successfully. 122 | """ 123 | print("Enabling SR-IOV Network Device Plugin") 124 | 125 | with mock.patch("subprocess.check_output") as mocked_subprocess: 126 | mocked_subprocess.side_effect = self.mock_check_output 127 | enable.main(test_args=self.test_args) 128 | 129 | mocked_subprocess.assert_any_call( 130 | ["lspci", "-s", "0000:00:06.0"], text=True 131 | ) 132 | mocked_subprocess.assert_any_call( 133 | ["lspci", "-s", "0000:00:07.0"], text=True 134 | ) 135 | mocked_subprocess.assert_any_call( 136 | [ 137 | KUBECTL, 138 | "apply", 139 | "-f", 140 | os.path.join(sriov_addon_path, "sriovdp.yaml"), 141 | ], 142 | text=True, 143 | ) 144 | mocked_subprocess.assert_any_call( 145 | [KUBECTL, "get", "node", "-o", "json"], text=True 146 | ) 147 | assert ( 148 | len(mocked_subprocess.call_args_list) == 6 149 | ), f"wrong number of `subprocess.check_output` calls, expected {6}, got {len(mocked_subprocess.call_args_list)}" 150 | 151 | wait_for_pod_state( 152 | "", "kube-system", label="name=sriov-device-plugin", desired_state="running" 153 | ) 154 | 155 | print("Disabling SR-IOV Network Device Plugin") 156 | microk8s_disable("sriov-device-plugin") 157 | -------------------------------------------------------------------------------- /addons/openebs/disable: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $SNAP/actions/common/utils.sh 6 | 7 | KUBECTL="$SNAP/microk8s-kubectl.wrapper" 8 | HELM="$SNAP/microk8s-helm3.wrapper" 9 | 10 | OPENEBS_NS="openebs" 11 | 12 | echo "Disabling OpenEBS" 13 | 14 | forceful_bdc_delete() { 15 | 16 | MESSAGE="Deleting BDC forcefully" 17 | 18 | if [[ $1 == "spc" ]] 19 | then 20 | EXTRA_LABELS="-l openebs.io/storage-pool-claim" 21 | MESSAGE="Deleting BDCs from SPCs forcefully" 22 | elif [[ $1 == "cspc" ]] 23 | then 24 | EXTRA_LABELS="-l openebs.io/cstor-pool-cluster" 25 | MESSAGE="Deleting BDCs from CSPCs forcefully" 26 | fi 27 | 28 | echo $MESSAGE 29 | OBJ_LIST=`$KUBECTL -n $OPENEBS_NS get blockdeviceclaims.openebs.io $EXTRA_LABELS -o=jsonpath='{.items[*].metadata.name}'` || true 30 | 31 | if [ -n "$OBJ_LIST" ] 32 | then 33 | $KUBECTL -n $OPENEBS_NS patch blockdeviceclaims.openebs.io ${OBJ_LIST} --type=json -p='[{"op":"remove", "path":"/metadata/finalizers"}]' || true 34 | $KUBECTL -n $OPENEBS_NS delete blockdeviceclaims.openebs.io ${OBJ_LIST} --timeout=60s --ignore-not-found || true 35 | fi 36 | } 37 | 38 | bd_remove_finalizer() { 39 | 40 | #OBJ_LIST=`$KUBECTL -n $OPENEBS_NS get blockdevice.openebs.io -o=jsonpath='{.items[?(@.status.claimState=="Claimed")].metadata.name}'` 41 | #$KUBECTL -n $OPENEBS_NS patch blockdevice.openebs.io ${OBJ_LIST} --type=json -p='[{"op":"replace", "path":"/status/claimState", "value":"Released"}]' || true 42 | 43 | #echo "Waiting for BlockDevice cleanup... (30 seconds)" 44 | #sleep 30 45 | 46 | OBJ_LIST=`$KUBECTL -n $OPENEBS_NS get blockdevice.openebs.io -o=jsonpath='{.items[?(@.status.claimState!="Unclaimed")].metadata.name}'` || true 47 | $KUBECTL -n $OPENEBS_NS patch blockdevice.openebs.io ${OBJ_LIST} --type=json -p='[{"op":"remove", "path":"/metadata/finalizers"}]' || true 48 | } 49 | 50 | disable_legacy() { 51 | 52 | echo "Deleting validatingwebhookconfiguration" 53 | $KUBECTL delete validatingwebhookconfiguration openebs-validation-webhook-cfg --ignore-not-found || true 54 | 55 | forceful_bdc_delete "spc" 56 | 57 | $KUBECTL delete storageclass openebs-jiva-default \ 58 | openebs-snapshot-promoter \ 59 | --timeout=60s --ignore-not-found || true 60 | } 61 | 62 | disable_cstor() { 63 | 64 | echo "Deleting OpenEBS cStor resources" 65 | $KUBECTL -n $OPENEBS_NS delete --all cstorpoolclusters.cstor.openebs.io --timeout=60s || CSPC_DEL_FAILED=$? 66 | 67 | if [ -n "$CSPC_DEL_FAILED" ] 68 | then 69 | echo "Deleting OpenEBS cStor validatingwebhookconfiguration" 70 | $KUBECTL delete validatingwebhookconfiguration openebs-cstor-validation-webhook --timeout=60s --ignore-not-found || true 71 | 72 | # Resources with Finalizers 73 | # cvr, cvc, cspi, cspc, cva 74 | OBJ_LIST="cstorvolumereplicas.cstor.openebs.io,cstorvolumeconfigs.cstor.openebs.io,cstorpoolinstances.cstor.openebs.io,cstorpoolclusters.cstor.openebs.io,cstorvolumeattachments.cstor.openebs.io" 75 | OBJ_LIST_FOUND=`$KUBECTL -n $OPENEBS_NS get $OBJ_LIST -o name` || true 76 | $KUBECTL -n $OPENEBS_NS patch $OBJ_LIST_FOUND --type=json -p='[{"op":"remove", "path":"/metadata/finalizers"}]' || true 77 | 78 | 79 | # Resources without Finalizers 80 | # cbackup, ccompletedbackup, crestore, cvp, cv 81 | # [Now patched] cvr, cvc, cspi, cspc, cva 82 | OBJ_LIST="cstorvolumereplicas.cstor.openebs.io,cstorvolumeconfigs.cstor.openebs.io,cstorpoolinstances.cstor.openebs.io,cstorpoolclusters.cstor.openebs.io,cstorbackups.cstor.openebs.io,cstorcompletedbackups.cstor.openebs.io,cstorrestores.cstor.openebs.io,cstorvolumeattachments.cstor.openebs.io,cstorvolumepolicies.cstor.openebs.io,cstorvolumes.cstor.openebs.io" 83 | OBJ_LIST_FOUND=`$KUBECTL -n $OPENEBS_NS get $OBJ_LIST -o name` || true 84 | $KUBECTL -n $OPENEBS_NS delete $OBJ_LIST_FOUND --timeout=60s --ignore-not-found || true 85 | 86 | forceful_bdc_delete "cspc" 87 | # Forceful cleanup does not wait for BlockDevice cleanup 88 | else 89 | echo "Waiting for BlockDevice cleanup... (30 seconds)" 90 | sleep 30 91 | fi 92 | } 93 | 94 | disable_openebs() { 95 | 96 | # LEGACY 97 | disable_legacy 98 | 99 | # CSTOR-CSI 100 | disable_cstor 101 | 102 | # BLOCKDEVICES and BLOCKDEVICECLAIMS 103 | BD_WITH_FINALIZER=`$KUBECTL -n $OPENEBS_NS get blockdevice.openebs.io -o=jsonpath='{.items[?(@.status.claimState!="Unclaimed")].metadata.name}'` || true 104 | if [ -n "$BD_WITH_FINALIZER" ] 105 | then 106 | forceful_bdc_delete 107 | bd_remove_finalizer 108 | fi 109 | 110 | # Helm chart 111 | $HELM uninstall openebs -n $OPENEBS_NS || true 112 | 113 | # Default StorageClasses 114 | $KUBECTL delete storageclass openebs-hostpath \ 115 | openebs-device \ 116 | openebs-jiva-csi-default \ 117 | --timeout=60s --ignore-not-found|| true 118 | 119 | KUBECTL_DELETE_ARGS="--wait=true --timeout=180s --ignore-not-found=true" 120 | $KUBECTL delete $KUBECTL_DELETE_ARGS namespace $OPENEBS_NS || true 121 | 122 | # CRDs 123 | $KUBECTL delete customresourcedefinition blockdeviceclaims.openebs.io \ 124 | blockdevices.openebs.io \ 125 | cstorbackups.cstor.openebs.io \ 126 | cstorcompletedbackups.cstor.openebs.io \ 127 | cstorpoolclusters.cstor.openebs.io \ 128 | cstorpoolinstances.cstor.openebs.io \ 129 | cstorrestores.cstor.openebs.io \ 130 | cstorvolumeattachments.cstor.openebs.io \ 131 | cstorvolumeconfigs.cstor.openebs.io \ 132 | cstorvolumepolicies.cstor.openebs.io \ 133 | cstorvolumereplicas.cstor.openebs.io \ 134 | cstorvolumes.cstor.openebs.io \ 135 | jivavolumepolicies.openebs.io \ 136 | jivavolumes.openebs.io \ 137 | migrationtasks.openebs.io \ 138 | upgradetasks.openebs.io \ 139 | --timeout=60s --ignore-not-found || true 140 | 141 | echo "OpenEBS disabled" 142 | echo "Manually clean up the directory $SNAP_COMMON/var/openebs/" 143 | } 144 | 145 | 146 | disable_openebs 147 | --------------------------------------------------------------------------------