├── .gitignore
├── clr-k8s-examples
├── 8-kata
│ └── overlays
│ │ ├── 2.3.3
│ │ └── .gitkeep
│ │ ├── 2.4.0
│ │ └── .gitkeep
│ │ ├── 1.8.0-kernel-config
│ │ └── kustomization.yaml
│ │ ├── 1.8.2-kernel-config
│ │ └── kustomization.yaml
│ │ └── 1.9.1-kernel-config
│ │ └── kustomization.yaml
├── 0-canal
│ └── overlays
│ │ ├── v3.18
│ │ └── kustomization.yaml
│ │ ├── v3.10
│ │ └── kustomization.yaml
│ │ ├── v3.22
│ │ └── kustomization.yaml
│ │ ├── v3.24
│ │ └── kustomization.yaml
│ │ ├── v3.9
│ │ └── kustomization.yaml
│ │ └── v3.3
│ │ └── kustomization.yaml
├── 0-cilium
│ └── overlays
│ │ ├── v1.6.4
│ │ └── kustomization.yaml
│ │ ├── v1.6
│ │ └── kustomization.yaml
│ │ ├── v1.9.13
│ │ └── kustomization.yaml
│ │ └── v1.9
│ │ └── values.yaml
├── admit-kata
│ ├── versions
│ ├── deploy
│ │ ├── webhook-registration.yaml.tpl
│ │ └── webhook.yaml
│ ├── create-certs.sh
│ └── README.md
├── 0-flannel
│ └── overlays
│ │ ├── v0.16.3
│ │ └── kustomization.yaml
│ │ ├── v0.14.0-rc1
│ │ └── kustomization.yaml
│ │ ├── 16b0fe66285d1ad1f42b154ab852682f6fafb1a7
│ │ └── kustomization.yaml
│ │ └── 960b3243b9a7faccdfe7b3c09097105e68030ea7
│ │ └── kustomization.yaml
├── 2-dashboard
│ └── overlays
│ │ ├── v2.0.0-beta2
│ │ └── kustomization.yaml
│ │ ├── v1.10.1
│ │ └── kustomization.yaml
│ │ └── v2.6.1
│ │ ├── kustomization.yaml
│ │ └── dashboard-admin.yaml
├── 5-ingres-lb
│ └── overlays
│ │ ├── controller-v1.3.0
│ │ └── kustomization.yaml
│ │ ├── nginx-0.26.1
│ │ └── kustomization.yaml
│ │ ├── nginx-0.25.1
│ │ └── kustomization.yaml
│ │ └── nginx-0.25.0
│ │ ├── patch_clusterrole.yaml
│ │ └── kustomization.yaml
├── 1-core-metrics
│ └── overlays
│ │ ├── v0.6.1
│ │ ├── patch_metricstls.yaml
│ │ └── kustomization.yaml
│ │ ├── v0.3.3
│ │ └── kustomization.yaml
│ │ ├── v0.3.5
│ │ └── kustomization.yaml
│ │ └── v0.3.6
│ │ └── kustomization.yaml
├── 7-rook
│ └── overlays
│ │ ├── v1.8.6
│ │ ├── multinode
│ │ │ └── kustomization.yaml
│ │ └── standalone
│ │ │ ├── kustomization.yaml
│ │ │ └── patch_cephcluster.yaml
│ │ ├── v1.8.10
│ │ ├── multinode
│ │ │ ├── kustomization.yaml
│ │ │ └── probe_timeout.yaml
│ │ └── standalone
│ │ │ ├── kustomization.yaml
│ │ │ ├── patch_cephcluster.yaml
│ │ │ └── probe_timeout.yaml
│ │ ├── v1.0.3
│ │ ├── multinode
│ │ │ ├── patch_cephcluster.yaml
│ │ │ ├── patch_operator.yaml
│ │ │ └── kustomization.yaml
│ │ └── standalone
│ │ │ ├── patch_cephcluster.yaml
│ │ │ ├── patch_operator.yaml
│ │ │ └── kustomization.yaml
│ │ ├── v1.1.0
│ │ ├── multinode
│ │ │ ├── patch_cephcluster.yaml
│ │ │ └── kustomization.yaml
│ │ └── standalone
│ │ │ ├── kustomization.yaml
│ │ │ └── patch_cephcluster.yaml
│ │ ├── v1.1.1
│ │ ├── multinode
│ │ │ ├── patch_cephcluster.yaml
│ │ │ └── kustomization.yaml
│ │ └── standalone
│ │ │ ├── kustomization.yaml
│ │ │ └── patch_cephcluster.yaml
│ │ ├── v1.1.7
│ │ ├── multinode
│ │ │ ├── patch_cephcluster.yaml
│ │ │ └── kustomization.yaml
│ │ └── standalone
│ │ │ ├── kustomization.yaml
│ │ │ └── patch_cephcluster.yaml
│ │ └── v1.2.6
│ │ ├── multinode
│ │ ├── patch_cephcluster.yaml
│ │ └── kustomization.yaml
│ │ └── standalone
│ │ ├── kustomization.yaml
│ │ └── patch_cephcluster.yaml
├── tests
│ ├── autoscale
│ │ ├── test-autoscale.sh
│ │ ├── load-gen.yaml
│ │ └── php.yaml
│ ├── cpumanager
│ │ ├── test-cpumanager.sh
│ │ └── test-cpumanager.yaml.tmpl
│ ├── deploy-svc-ing
│ │ ├── test-ingress-kata.yaml
│ │ ├── test-ingress-runc.yaml
│ │ ├── test-deploy-runc.yaml
│ │ ├── test-deploy-kata-fc.yaml
│ │ └── test-deploy-kata-qemu.yaml
│ ├── e2e
│ │ └── run_e2e.sh
│ └── pvc
│ │ └── wordpress.yaml
├── hack
│ ├── ceph_status.sh
│ └── update_checker.sh
├── 9-multi-network
│ ├── test
│ │ ├── pod.yaml
│ │ ├── bridge
│ │ │ ├── 1-pod-bridge.yaml
│ │ │ └── 0-bridge-net.yaml
│ │ └── sriov
│ │ │ ├── Dockerfile
│ │ │ ├── 1-pod-sriov.yaml
│ │ │ ├── 0-sriov-net.yaml
│ │ │ └── 2-pod-dpdk-ver.yaml
│ ├── systemd
│ │ ├── sriov.service
│ │ └── sriov.sh
│ ├── sriov-conf.yaml
│ ├── Dockerfile
│ ├── cni
│ │ └── vfioveth
│ ├── README.md
│ └── multus-sriov-ds.yaml
├── 6-metal-lb
│ └── overlays
│ │ ├── v0.8.1
│ │ ├── kustomization.yaml
│ │ └── patch_configmap.yaml
│ │ ├── v0.8.3
│ │ ├── kustomization.yaml
│ │ └── patch_configmap.yaml
│ │ └── v0.7.3
│ │ ├── kustomization.yaml
│ │ └── patch_configmap.yaml
├── node-feature-discovery
│ └── overlays
│ │ └── v0.4.0
│ │ └── kustomization.yaml
├── kubeadm.yaml
├── 3-efk
│ └── overlays
│ │ ├── 193692c92eb4667b8f4fb7d4cdf0462e229b5f13
│ │ └── kustomization.yaml
│ │ └── v1.15.1
│ │ └── kustomization.yaml
├── node-problem-detector
│ └── overlays
│ │ └── v0.6.6
│ │ ├── kustomization.yaml
│ │ ├── patch_configmap_mce-monitor.yaml
│ │ └── patch_configmap_rules.yaml
├── haproxy.cfg.example
├── setup_kata_firecracker.sh
├── containerd_devmapper_setup.sh
├── reset_stack.sh
├── vagrant.md
├── 4-kube-prometheus
│ └── overlays
│ │ ├── v0.2.0
│ │ └── kustomization.yaml
│ │ ├── v0.1.0
│ │ └── kustomization.yaml
│ │ ├── f458e85e5d7675f7bc253072e1b4c8892b51af0f
│ │ └── kustomization.yaml
│ │ └── v0.10.0
│ │ └── kustomization.yaml
├── Vagrantfile
├── DEVELOP.md
└── setup_system.sh
├── metrics
├── collectd
│ ├── Dockerfile
│ ├── collectd.conf
│ ├── collectd.yaml
│ └── collectd.bash
├── report
│ ├── report_dockerfile
│ │ ├── test.R
│ │ ├── elasticsearchr.R
│ │ ├── html.Rmd
│ │ ├── genreport.sh
│ │ ├── pdf.Rmd
│ │ ├── Dockerfile
│ │ ├── metrics_report.Rmd
│ │ ├── node-info.R
│ │ ├── dut-details.R
│ │ └── parallel.R
│ ├── grabdata.sh
│ ├── README.md
│ └── makereport.sh
├── scaling
│ ├── net-serve.yaml.in
│ ├── stats.yaml
│ ├── common.bash
│ ├── bb.yaml.in
│ ├── bb.json.in
│ ├── net-serve.json.in
│ ├── README.md
│ ├── k8s_scale_rapid.sh
│ └── k8s_parallel.sh
├── lib
│ ├── cpu_load_daemonset.yaml.in
│ ├── k8s-api.bash
│ ├── common.bash
│ ├── cpu-load.bash
│ └── cpu-load.md
└── README.md
├── Makefile
├── README.md
└── Jenkinsfile
/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 | OVMF.fd
3 |
4 |
--------------------------------------------------------------------------------
/clr-k8s-examples/8-kata/overlays/2.3.3/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/clr-k8s-examples/8-kata/overlays/2.4.0/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-canal/overlays/v3.18/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - canal/canal.yaml
3 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-canal/overlays/v3.10/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - canal/canal.yaml
3 |
4 |
5 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-canal/overlays/v3.22/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - canal/canal.yaml
3 |
4 |
5 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-canal/overlays/v3.24/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - canal/canal.yaml
3 |
4 |
5 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-canal/overlays/v3.9/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - canal/canal.yaml
3 |
4 |
5 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-cilium/overlays/v1.6.4/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | cilium/cilium.yaml
3 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-cilium/overlays/v1.6/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | cilium/cilium.yaml
3 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-cilium/overlays/v1.9.13/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | cilium/cilium.yaml
3 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-canal/overlays/v3.3/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - canal/canal.yaml
3 | - canal/rbac.yaml
4 |
5 |
--------------------------------------------------------------------------------
/clr-k8s-examples/admit-kata/versions:
--------------------------------------------------------------------------------
1 | https://github.com/kata-containers/tests/tree/master/kata-webhook
2 | Commit: 5ad2cec
3 |
--------------------------------------------------------------------------------
/metrics/collectd/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:latest
2 |
3 | RUN apk update && apk add collectd
4 |
5 | CMD [ "collectd", "-f" ]
6 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-flannel/overlays/v0.16.3/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - flannel/Documentation/kube-flannel.yml
3 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-flannel/overlays/v0.14.0-rc1/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - flannel/Documentation/kube-flannel.yml
3 |
--------------------------------------------------------------------------------
/clr-k8s-examples/2-dashboard/overlays/v2.0.0-beta2/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - dashboard/aio/deploy/recommended.yaml
3 |
4 |
--------------------------------------------------------------------------------
/clr-k8s-examples/2-dashboard/overlays/v1.10.1/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - dashboard/src/deploy/recommended/kubernetes-dashboard.yaml
3 |
4 |
--------------------------------------------------------------------------------
/clr-k8s-examples/2-dashboard/overlays/v2.6.1/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - dashboard/aio/deploy/recommended.yaml
3 | - dashboard-admin.yaml
4 |
5 |
--------------------------------------------------------------------------------
/clr-k8s-examples/5-ingres-lb/overlays/controller-v1.3.0/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - ingress-nginx/deploy/static/provider/baremetal/deploy.yaml
3 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-flannel/overlays/16b0fe66285d1ad1f42b154ab852682f6fafb1a7/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - flannel/Documentation/kube-flannel.yml
3 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-flannel/overlays/960b3243b9a7faccdfe7b3c09097105e68030ea7/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - flannel/Documentation/kube-flannel.yml
3 |
--------------------------------------------------------------------------------
/clr-k8s-examples/1-core-metrics/overlays/v0.6.1/patch_metricstls.yaml:
--------------------------------------------------------------------------------
1 | - op: add
2 | path: "/spec/template/spec/containers/0/args/-"
3 | value: --kubelet-insecure-tls
4 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.8.6/multinode/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/deploy/examples/cluster.yaml
3 | - rook/deploy/examples/csi/rbd/storageclass.yaml
4 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/autoscale/test-autoscale.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")"
4 | kubectl apply -f $SCRIPT_DIR
5 | watch kubectl describe hpa
6 |
--------------------------------------------------------------------------------
/clr-k8s-examples/0-cilium/overlays/v1.9/values.yaml:
--------------------------------------------------------------------------------
1 | ipam:
2 | mode: "cluster-pool"
3 | operator:
4 | clusterPoolIPv4PodCIDR: "10.244.0.0/16"
5 | clusterPoolIPv4MaskSize: 24
6 |
--------------------------------------------------------------------------------
/clr-k8s-examples/hack/ceph_status.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | name=$( echo $(kubectl get po -o name -n rook-ceph | grep tools) | cut -c 5-)
3 | kubectl exec -it "${name}" -n rook-ceph -- ceph status
4 |
--------------------------------------------------------------------------------
/clr-k8s-examples/5-ingres-lb/overlays/nginx-0.26.1/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - ingress-nginx/deploy/static/mandatory.yaml
3 | - ingress-nginx/deploy/static/provider/baremetal/service-nodeport.yaml
4 |
--------------------------------------------------------------------------------
/clr-k8s-examples/5-ingres-lb/overlays/nginx-0.25.1/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - ingress-nginx/deploy/static/mandatory.yaml
3 | - ingress-nginx/deploy/static/provider/baremetal/service-nodeport.yaml
4 |
5 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/test/pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: test
6 | spec:
7 | containers:
8 | - name: busy
9 | image: busybox
10 | command: [ "top" ]
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/6-metal-lb/overlays/v0.8.1/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - metallb/manifests/example-layer2-config.yaml
3 | - metallb/manifests/metallb.yaml
4 |
5 | patchesStrategicMerge:
6 | - patch_configmap.yaml
7 |
--------------------------------------------------------------------------------
/clr-k8s-examples/6-metal-lb/overlays/v0.8.3/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - metallb/manifests/example-layer2-config.yaml
3 | - metallb/manifests/metallb.yaml
4 |
5 | patchesStrategicMerge:
6 | - patch_configmap.yaml
7 |
--------------------------------------------------------------------------------
/clr-k8s-examples/node-feature-discovery/overlays/v0.4.0/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - node-feature-discovery/nfd-daemonset-combined.yaml.template
3 | - node-feature-discovery/nfd-worker-daemonset.yaml.template
4 |
5 |
6 |
--------------------------------------------------------------------------------
/clr-k8s-examples/6-metal-lb/overlays/v0.7.3/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - metallb/manifests/example-layer2-config.yaml
3 | - metallb/manifests/metallb.yaml
4 |
5 | patchesStrategicMerge:
6 | - patch_configmap.yaml
7 |
8 |
9 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.8.10/multinode/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/deploy/examples/cluster.yaml
3 | - rook/deploy/examples/csi/rbd/storageclass.yaml
4 |
5 | patchesStrategicMerge:
6 | - probe_timeout.yaml
7 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | DESTDIR ?= /
2 | PREFIX ?= /usr
3 | TARGET ?= $(PREFIX)/share/
4 |
5 | all:
6 |
7 | install:
8 | install -m 0755 -d $(DESTDIR)/$(TARGET)/clr-k8s-examples
9 | cp -r clr-k8s-examples/* $(DESTDIR)/$(TARGET)/clr-k8s-examples/
10 |
--------------------------------------------------------------------------------
/clr-k8s-examples/5-ingres-lb/overlays/nginx-0.25.0/patch_clusterrole.yaml:
--------------------------------------------------------------------------------
1 | # adds "networking.k8s.io" to apiGroups for ingress rules which is missing in 0.25.0
2 | - op: add
3 | path: /rules/3/apiGroups/-
4 | value: "networking.k8s.io"
5 |
6 |
--------------------------------------------------------------------------------
/clr-k8s-examples/1-core-metrics/overlays/v0.6.1/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - components.yaml
3 | patchesJson6902:
4 | - target:
5 | version: v1
6 | kind: Deployment
7 | name: metrics-server
8 | path: patch_metricstls.yaml
9 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.0.3/multinode/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: ceph.rook.io/v1
2 | kind: CephCluster
3 | metadata:
4 | name: rook-ceph
5 | namespace: rook-ceph
6 | spec:
7 | storage:
8 | directories:
9 | - path: /var/lib/rook
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.0.3/standalone/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: ceph.rook.io/v1
2 | kind: CephCluster
3 | metadata:
4 | name: rook-ceph
5 | namespace: rook-ceph
6 | spec:
7 | storage:
8 | directories:
9 | - path: /var/lib/rook
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/test.R:
--------------------------------------------------------------------------------
1 |
2 | suppressMessages(library(jsonlite)) # to load the data.
3 |
4 | options(digits=22)
5 |
6 | x=fromJSON('{"ns": 1567002188374607769}')
7 |
8 | print(x)
9 | print(fromJSON('{"ns": 1567002188374607769}'), digits=22)
10 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.0/multinode/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | storage:
9 | directories:
10 | - path: /var/lib/rook
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.1/multinode/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | storage:
9 | directories:
10 | - path: /var/lib/rook
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.7/multinode/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | storage:
9 | directories:
10 | - path: /var/lib/rook
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.2.6/multinode/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | storage:
9 | directories:
10 | - path: /var/lib/rook
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/6-metal-lb/overlays/v0.7.3/patch_configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: config
5 | data:
6 | config: |
7 | address-pools:
8 | - name: my-ip-space
9 | protocol: layer2
10 | addresses:
11 | - 10.0.0.240/28
12 |
--------------------------------------------------------------------------------
/clr-k8s-examples/6-metal-lb/overlays/v0.8.1/patch_configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: config
5 | data:
6 | config: |
7 | address-pools:
8 | - name: my-ip-space
9 | protocol: layer2
10 | addresses:
11 | - 10.0.0.240/28
12 |
--------------------------------------------------------------------------------
/clr-k8s-examples/6-metal-lb/overlays/v0.8.3/patch_configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: config
5 | data:
6 | config: |
7 | address-pools:
8 | - name: my-ip-space
9 | protocol: layer2
10 | addresses:
11 | - 10.0.0.240/28
12 |
--------------------------------------------------------------------------------
/clr-k8s-examples/8-kata/overlays/1.8.0-kernel-config/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - packaging/kata-deploy/kata-deploy.yaml
3 | - packaging/kata-deploy/kata-rbac.yaml
4 | - packaging/kata-deploy/k8s-1.14/kata-fc-runtimeClass.yaml
5 | - packaging/kata-deploy/k8s-1.14/kata-qemu-runtimeClass.yaml
6 |
7 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/systemd/sriov.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Create VFs on ens785f0 (netdev) ens785f1 (vfio) interfaces
3 |
4 | [Service]
5 | Type=oneshot
6 | ExecStart=/usr/bin/sriov.sh ens785f0
7 | ExecStart=/usr/bin/sriov.sh -b ens785f1
8 |
9 | [Install]
10 | WantedBy=default.target
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.8.6/standalone/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/deploy/examples/cluster.yaml
3 | - rook/deploy/examples/csi/rbd/storageclass.yaml
4 |
5 | patchesStrategicMerge:
6 | # patches rook to use 'directories' instead of partitions.
7 | # comment out to use partitions
8 | - patch_cephcluster.yaml
9 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/autoscale/load-gen.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: load-generator
5 | spec:
6 | containers:
7 | - command: ["/bin/sh", "-c"]
8 | args:
9 | - while true; do wget -q -O- http://php-apache-test; done;
10 | image: busybox
11 | imagePullPolicy: Always
12 | name: load-generator
13 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/test/bridge/1-pod-bridge.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: test-bridge
6 | annotations:
7 | k8s.v1.cni.cncf.io/networks: '[
8 | { "name": "mynet", "interface": "mynet" }
9 | ]'
10 | spec:
11 | containers:
12 | - name: busy
13 | image: busybox
14 | command: [ "top" ]
15 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.8.10/standalone/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/deploy/examples/cluster.yaml
3 | - rook/deploy/examples/csi/rbd/storageclass.yaml
4 |
5 | patchesStrategicMerge:
6 | # patches rook to use 'directories' instead of partitions.
7 | # comment out to use partitions
8 | - patch_cephcluster.yaml
9 | - probe_timeout.yaml
10 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.0.3/multinode/patch_operator.yaml:
--------------------------------------------------------------------------------
1 | # operator
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: rook-ceph-operator
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: rook-ceph-operator
11 | env:
12 | - name: FLEXVOLUME_DIR_PATH
13 | value: "/var/lib/kubelet/volume-plugins"
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.0.3/standalone/patch_operator.yaml:
--------------------------------------------------------------------------------
1 | # operator
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: rook-ceph-operator
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: rook-ceph-operator
11 | env:
12 | - name: FLEXVOLUME_DIR_PATH
13 | value: "/var/lib/kubelet/volume-plugins"
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/test/bridge/0-bridge-net.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: "k8s.cni.cncf.io/v1"
3 | kind: NetworkAttachmentDefinition
4 | metadata:
5 | name: mynet
6 | spec:
7 | config: '{
8 | "name": "mynet",
9 | "type": "bridge",
10 | "bridge": "mynet",
11 | "ipam": {
12 | "type": "host-local",
13 | "subnet": "198.18.0.0/24"
14 | }
15 | }'
16 |
17 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.8.10/multinode/probe_timeout.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | healthCheck:
9 | startupProbe:
10 | osd:
11 | probe:
12 | timeoutSeconds: 120
13 | initialDelaySeconds: 100
14 | periodSeconds: 10
15 | failureThreshold: 10
16 | successThreshold: 1
17 |
--------------------------------------------------------------------------------
/clr-k8s-examples/8-kata/overlays/1.8.2-kernel-config/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - packaging/kata-deploy/kata-deploy.yaml
3 | - packaging/kata-deploy/kata-rbac.yaml
4 | - packaging/kata-deploy/k8s-1.14/kata-fc-runtimeClass.yaml
5 | - packaging/kata-deploy/k8s-1.14/kata-qemu-runtimeClass.yaml
6 |
7 | images:
8 | # change 'latest' to specified version
9 | - name: katadocker/kata-deploy
10 | newName: katadocker/kata-deploy
11 | newTag: 1.8.2
--------------------------------------------------------------------------------
/clr-k8s-examples/8-kata/overlays/1.9.1-kernel-config/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - packaging/kata-deploy/kata-deploy.yaml
3 | - packaging/kata-deploy/kata-rbac.yaml
4 | - packaging/kata-deploy/k8s-1.14/kata-fc-runtimeClass.yaml
5 | - packaging/kata-deploy/k8s-1.14/kata-qemu-runtimeClass.yaml
6 |
7 | images:
8 | # change 'latest' to specified version
9 | - name: katadocker/kata-deploy
10 | newName: katadocker/kata-deploy
11 | newTag: 1.9.1
12 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.8.10/standalone/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | mon:
9 | allowMultiplePerNode: true
10 | ---
11 | apiVersion: ceph.rook.io/v1
12 | kind: CephBlockPool
13 | metadata:
14 | name: replicapool
15 | namespace: rook-ceph
16 | spec:
17 | replicated:
18 | requireSafeReplicaSize: false
19 | size: 1
20 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.8.6/standalone/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | mon:
9 | allowMultiplePerNode: true
10 | ---
11 | apiVersion: ceph.rook.io/v1
12 | kind: CephBlockPool
13 | metadata:
14 | name: replicapool
15 | namespace: rook-ceph
16 | spec:
17 | replicated:
18 | requireSafeReplicaSize: false
19 | size: 1
20 |
--------------------------------------------------------------------------------
/clr-k8s-examples/1-core-metrics/overlays/v0.3.3/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - metrics-server/deploy/1.8+/aggregated-metrics-reader.yaml
3 | - metrics-server/deploy/1.8+/auth-delegator.yaml
4 | - metrics-server/deploy/1.8+/auth-reader.yaml
5 | - metrics-server/deploy/1.8+/metrics-apiservice.yaml
6 | - metrics-server/deploy/1.8+/metrics-server-deployment.yaml
7 | - metrics-server/deploy/1.8+/metrics-server-service.yaml
8 | - metrics-server/deploy/1.8+/resource-reader.yaml
9 |
--------------------------------------------------------------------------------
/clr-k8s-examples/1-core-metrics/overlays/v0.3.5/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - metrics-server/deploy/1.8+/aggregated-metrics-reader.yaml
3 | - metrics-server/deploy/1.8+/auth-delegator.yaml
4 | - metrics-server/deploy/1.8+/auth-reader.yaml
5 | - metrics-server/deploy/1.8+/metrics-apiservice.yaml
6 | - metrics-server/deploy/1.8+/metrics-server-deployment.yaml
7 | - metrics-server/deploy/1.8+/metrics-server-service.yaml
8 | - metrics-server/deploy/1.8+/resource-reader.yaml
9 |
--------------------------------------------------------------------------------
/clr-k8s-examples/1-core-metrics/overlays/v0.3.6/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - metrics-server/deploy/1.8+/aggregated-metrics-reader.yaml
3 | - metrics-server/deploy/1.8+/auth-delegator.yaml
4 | - metrics-server/deploy/1.8+/auth-reader.yaml
5 | - metrics-server/deploy/1.8+/metrics-apiservice.yaml
6 | - metrics-server/deploy/1.8+/metrics-server-deployment.yaml
7 | - metrics-server/deploy/1.8+/metrics-server-service.yaml
8 | - metrics-server/deploy/1.8+/resource-reader.yaml
9 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.0/multinode/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | # patches rook to use 'directories' instead of partitions.
9 | # comment out to use partitions
10 | - patch_cephcluster.yaml
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.1/multinode/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | # patches rook to use 'directories' instead of partitions.
9 | # comment out to use partitions
10 | - patch_cephcluster.yaml
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.7/multinode/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | # patches rook to use 'directories' instead of partitions.
9 | # comment out to use partitions
10 | - patch_cephcluster.yaml
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.2.6/multinode/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | # patches rook to use 'directories' instead of partitions.
9 | # comment out to use partitions
10 | - patch_cephcluster.yaml
--------------------------------------------------------------------------------
/clr-k8s-examples/5-ingres-lb/overlays/nginx-0.25.0/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - ingress-nginx/deploy/static/mandatory.yaml
3 | - ingress-nginx/deploy/static/provider/baremetal/service-nodeport.yaml
4 |
5 | patchesJson6902:
6 | # adds "networking.k8s.io" to ClusterRole's apiGroups
7 | - target:
8 | group: rbac.authorization.k8s.io
9 | version: v1
10 | kind: ClusterRole
11 | name: nginx-ingress-clusterrole
12 | path: patch_clusterrole.yaml
13 |
14 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.0/standalone/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | # patches rook to use 'directories' instead of partitions.
9 | # comment out to use partitions
10 | - patch_cephcluster.yaml
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.1/standalone/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | # patches rook to use 'directories' instead of partitions.
9 | # comment out to use partitions
10 | - patch_cephcluster.yaml
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.7/standalone/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | # patches rook to use 'directories' instead of partitions.
9 | # comment out to use partitions
10 | - patch_cephcluster.yaml
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.2.6/standalone/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | # patches rook to use 'directories' instead of partitions.
9 | # comment out to use partitions
10 | - patch_cephcluster.yaml
11 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.0.3/multinode/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | - patch_operator.yaml
9 | # patches rook to use 'directories' instead of partitions.
10 | # comment out to use partitions
11 | - patch_cephcluster.yaml
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.0.3/standalone/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - rook/cluster/examples/kubernetes/ceph/common.yaml
3 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
4 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
5 | - rook/cluster/examples/kubernetes/ceph/storageclass.yaml
6 |
7 | patchesStrategicMerge:
8 | - patch_operator.yaml
9 | # patches rook to use 'directories' instead of partitions.
10 | # comment out to use partitions
11 | - patch_cephcluster.yaml
--------------------------------------------------------------------------------
/clr-k8s-examples/kubeadm.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kubeadm.k8s.io/v1beta3
2 | kind: InitConfiguration
3 | ---
4 | apiVersion: kubelet.config.k8s.io/v1beta1
5 | kind: KubeletConfiguration
6 | cgroupDriver: systemd
7 | systemReserved:
8 | cpu: 500m
9 | memory: 256M
10 | kubeReserved:
11 | cpu: 500m
12 | memory: 256M
13 | ---
14 | apiVersion: kubeadm.k8s.io/v1beta3
15 | kind: ClusterConfiguration
16 | networking:
17 | dnsDomain: cluster.local
18 | podSubnet: 10.244.0.0/16
19 | serviceSubnet: 10.96.0.0/12
20 |
21 |
--------------------------------------------------------------------------------
/clr-k8s-examples/2-dashboard/overlays/v2.6.1/dashboard-admin.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: admin-user
6 | namespace: kubernetes-dashboard
7 | ---
8 | apiVersion: rbac.authorization.k8s.io/v1
9 | kind: ClusterRoleBinding
10 | metadata:
11 | name: admin-user
12 | roleRef:
13 | apiGroup: rbac.authorization.k8s.io
14 | kind: ClusterRole
15 | name: cluster-admin
16 | subjects:
17 | - kind: ServiceAccount
18 | name: admin-user
19 | namespace: kubernetes-dashboard
20 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.0/standalone/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | mon:
9 | allowMultiplePerNode: true
10 | storage:
11 | directories:
12 | - path: /var/lib/rook
13 | ---
14 | apiVersion: ceph.rook.io/v1
15 | kind: CephBlockPool
16 | metadata:
17 | name: replicapool
18 | namespace: rook-ceph
19 | spec:
20 | replicated:
21 | requireSafeReplicaSize: false
22 | size: 1
23 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.1/standalone/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | mon:
9 | allowMultiplePerNode: true
10 | storage:
11 | directories:
12 | - path: /var/lib/rook
13 | ---
14 | apiVersion: ceph.rook.io/v1
15 | kind: CephBlockPool
16 | metadata:
17 | name: replicapool
18 | namespace: rook-ceph
19 | spec:
20 | replicated:
21 | requireSafeReplicaSize: false
22 | size: 1
23 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.1.7/standalone/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | mon:
9 | allowMultiplePerNode: true
10 | storage:
11 | directories:
12 | - path: /var/lib/rook
13 | ---
14 | apiVersion: ceph.rook.io/v1
15 | kind: CephBlockPool
16 | metadata:
17 | name: replicapool
18 | namespace: rook-ceph
19 | spec:
20 | replicated:
21 | requireSafeReplicaSize: false
22 | size: 1
23 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.2.6/standalone/patch_cephcluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | mon:
9 | allowMultiplePerNode: true
10 | storage:
11 | directories:
12 | - path: /var/lib/rook
13 | ---
14 | apiVersion: ceph.rook.io/v1
15 | kind: CephBlockPool
16 | metadata:
17 | name: replicapool
18 | namespace: rook-ceph
19 | spec:
20 | replicated:
21 | requireSafeReplicaSize: false
22 | size: 1
23 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/cpumanager/test-cpumanager.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | input="test-cpumanager.yaml.tmpl"
4 |
5 | filename() {
6 | echo "test-cpumanager-$1.yaml"
7 | }
8 |
9 | for runtimeclass in runc kata-qemu kata-fc; do
10 | output=$(filename $runtimeclass)
11 | cp $input $output
12 | sed -i "s/__runtimeclass__/$runtimeclass/g" $output
13 | if [ $runtimeclass == "runc" ]; then continue; fi
14 |
15 | insertline="\ \ runtimeClassName: $runtimeclass"
16 | sed -i "/spec:/a $insertline" $output
17 | done
18 | kubectl apply -f .
19 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/sriov-conf.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: ConfigMap
3 | apiVersion: v1
4 | metadata:
5 | name: sriov-config
6 | namespace: kube-system
7 | data:
8 | config.json: |
9 | {
10 | "resourceList":
11 | [
12 | {
13 | "resourceName": "sriov_netdevice",
14 | "selectors": {
15 | "drivers": ["i40evf", "iavf"]
16 | }
17 | },
18 | {
19 | "resourceName": "sriov_vfio",
20 | "selectors": {
21 | "drivers": ["vfio-pci"]
22 | }
23 | }
24 | ]
25 | }
26 |
--------------------------------------------------------------------------------
/clr-k8s-examples/3-efk/overlays/193692c92eb4667b8f4fb7d4cdf0462e229b5f13/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - instrumentation-addons/fluentd-elasticsearch/create-logging-namespace.yaml
3 | - instrumentation-addons/fluentd-elasticsearch/es-service.yaml
4 | - instrumentation-addons/fluentd-elasticsearch/es-statefulset.yaml
5 | - instrumentation-addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
6 | - instrumentation-addons/fluentd-elasticsearch/fluentd-es-ds.yaml
7 | - instrumentation-addons/fluentd-elasticsearch/kibana-deployment.yaml
8 | - instrumentation-addons/fluentd-elasticsearch/kibana-service.yaml
9 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/deploy-svc-ing/test-ingress-kata.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1
2 | kind: Ingress
3 | metadata:
4 | name: php-apache-kata
5 | namespace: default
6 | annotations:
7 | nginx.ingress.kubernetes.io/rewrite-target: /
8 | nginx.ingress.kubernetes.io/affinity: "cookie"
9 | nginx.ingress.kubernetes.io/session-cookie-name: "route"
10 | nginx.ingress.kubernetes.io/session-cookie-hash: "sha1"
11 | spec:
12 | rules:
13 | - http:
14 | paths:
15 | - path: /php-apache-kata
16 | backend:
17 | serviceName: php-apache-kata
18 | servicePort: 80
19 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/deploy-svc-ing/test-ingress-runc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1
2 | kind: Ingress
3 | metadata:
4 | name: php-apache-runc
5 | namespace: default
6 | annotations:
7 | nginx.ingress.kubernetes.io/rewrite-target: /
8 | nginx.ingress.kubernetes.io/affinity: "cookie"
9 | nginx.ingress.kubernetes.io/session-cookie-name: "route"
10 | nginx.ingress.kubernetes.io/session-cookie-hash: "sha1"
11 | spec:
12 | rules:
13 | - http:
14 | paths:
15 | - path: /php-apache-runc
16 | backend:
17 | serviceName: php-apache-runc
18 | servicePort: 80
19 |
--------------------------------------------------------------------------------
/clr-k8s-examples/node-problem-detector/overlays/v0.6.6/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - node-problem-detector/deployment/node-problem-detector-config.yaml
3 | - node-problem-detector/deployment/node-problem-detector.yaml
4 |
5 | patchesJson6902:
6 | # adds rule for mce
7 | - target:
8 | version: v1
9 | kind: ConfigMap
10 | name: node-problem-detector-config
11 | path: patch_configmap_rules.yaml
12 | # adds mce-monitor.json to configmap
13 | - target:
14 | version: v1
15 | kind: ConfigMap
16 | name: node-problem-detector-config
17 | path: patch_configmap_mce-monitor.yaml
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/elasticsearchr.R:
--------------------------------------------------------------------------------
1 |
2 | library('elasticsearchr')
3 |
4 | for_scaling <- query('{
5 | "bool": {
6 | "must": [
7 | { "match":
8 | {
9 | "test.testname": "k8s scaling"
10 | }
11 | }
12 | ]
13 | }
14 | }')
15 |
16 | these_fields <- select_fields('{
17 | "includes": [
18 | "date.Date",
19 | "k8s-scaling.BootResults.launch_time.Result",
20 | "k8s-scaling.BootResults.n_pods.Result"
21 | ]
22 | }')
23 |
24 | sort_by_date <- sort_on('[{"date.Date": {"order": "asc"}}]')
25 |
26 | x=elastic("http://192.168.0.111:9200", "logtest") %search% (for_scaling + sort_by_date + these_fields)
27 |
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/html.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | # Copyright (c) 2018-2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | #
6 | title: "Kubernetes metrics report"
7 | author: "Auto generated"
8 | date: "`r format(Sys.time(), '%d %B, %Y')`"
9 | output:
10 | html_document:
11 | urlcolor: blue
12 | ---
13 |
14 | ```{r setup, include=FALSE}
15 | #Set these opts to get pdf images which fit into beamer slides better
16 | opts_chunk$set(dev = 'png')
17 | # Pick up any env set by the invoking script, such as the root dir of the
18 | # results data tree
19 | source("/inputdir/Env.R")
20 | ```
21 |
22 | ```{r child = 'metrics_report.Rmd'}
23 | ```
24 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/test/sriov/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:bionic as ubuntu-build
2 | RUN apt-get update && \
3 | apt-get -y install \
4 | build-essential \
5 | git \
6 | libnuma-dev
7 |
8 | ARG DPDK_VER='master'
9 | ENV DPDK_DIR='/dpdk'
10 | ENV RTE_TARGET='x86_64-native-linuxapp-gcc'
11 | RUN git clone -b $DPDK_VER -q --depth 1 http://dpdk.org/git/dpdk-stable $DPDK_DIR 2>&1
12 | RUN cd ${DPDK_DIR} && \
13 | sed -ri 's,(IGB_UIO=).*,\1n,' config/common_linux* && \
14 | sed -ri 's,(KNI_KMOD=).*,\1n,' config/common_linux* && \
15 | make config T=x86_64-native-linuxapp-gcc && \
16 | make -j $CPUS
17 | ENV PATH="$PATH:$DPDK_DIR/build/app/"
18 |
--------------------------------------------------------------------------------
/clr-k8s-examples/node-problem-detector/overlays/v0.6.6/patch_configmap_mce-monitor.yaml:
--------------------------------------------------------------------------------
1 | # adds "mce" rule
2 | - op: add
3 | path: /data/mce-monitor.json
4 | value: |
5 | {
6 | "plugin": "journald",
7 | "pluginConfig": {
8 | "source": "mcelog"
9 | },
10 | "logPath": "/var/log/journal",
11 | "lookback": "5m",
12 | "bufferSize": 10,
13 | "source": "mce-monitor",
14 | "conditions": [],
15 | "rules": [
16 | {
17 | "type": "temporary",
18 | "reason": "Hardware Error",
19 | "pattern": "Hardware event.*"
20 | }
21 | ]
22 | }
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/test/sriov/1-pod-sriov.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: test-sriov
6 | annotations:
7 | k8s.v1.cni.cncf.io/networks: sriov-net
8 | spec:
9 | containers:
10 | - name: busy
11 | image: busybox
12 | command: [ "top" ]
13 | resources:
14 | limits:
15 | intel.com/sriov_netdevice: '1'
16 | ---
17 | apiVersion: v1
18 | kind: Pod
19 | metadata:
20 | name: test-sriov-dpdk
21 | annotations:
22 | k8s.v1.cni.cncf.io/networks: sriov-net-dpdk
23 | spec:
24 | containers:
25 | - name: busy
26 | image: busybox
27 | command: [ "top" ]
28 | resources:
29 | limits:
30 | intel.com/sriov_vfio: '1'
31 |
32 |
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/genreport.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) 2018-2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | REPORTNAME="metrics_report.pdf"
7 |
8 | cd scripts
9 |
10 | Rscript --slave -e "library(knitr);knit('pdf.Rmd')"
11 | Rscript --slave -e "library(knitr);pandoc('pdf.md', format='latex')"
12 |
13 | Rscript --slave -e "library(knitr);knit('html.Rmd')"
14 | Rscript --slave -e "library(knitr);pandoc('html.md', format='html')"
15 |
16 | cp /scripts/pdf.pdf /outputdir/${REPORTNAME}
17 | cp /scripts/figure/*.png /outputdir/
18 | echo "PNGs of graphs and tables can be found in the output directory."
19 | echo "The report, named ${REPORTNAME}, can be found in the output directory"
20 |
--------------------------------------------------------------------------------
/clr-k8s-examples/admit-kata/deploy/webhook-registration.yaml.tpl:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | apiVersion: admissionregistration.k8s.io/v1
6 | kind: MutatingWebhookConfiguration
7 | metadata:
8 | name: pod-annotate-webhook
9 | labels:
10 | app: pod-annotate-webhook
11 | kind: mutator
12 | webhooks:
13 | - name: pod-annotate-webhook.kata.xyz
14 | clientConfig:
15 | service:
16 | name: pod-annotate-webhook
17 | namespace: default
18 | path: "/mutate"
19 | caBundle: CA_BUNDLE
20 | rules:
21 | - operations: [ "CREATE" ]
22 | apiGroups: [""]
23 | apiVersions: ["v1"]
24 | resources: ["pods"]
25 |
--------------------------------------------------------------------------------
/metrics/scaling/net-serve.yaml.in:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | run: net-serve
6 | name: @DEPLOYMENT@
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | run: net-serve
12 | template:
13 | metadata:
14 | labels:
15 | run: net-serve
16 | @LABEL@: @LABELVALUE@
17 | spec:
18 | terminationGracePeriodSeconds: @GRACE@
19 | runtimeClassName: @RUNTIMECLASS@
20 | automountServiceAccountToken: false
21 | containers:
22 | - name: net-serve
23 | image: gcr.io/kubernetes-e2e-test-images/agnhost:2.8
24 | imagePullPolicy: IfNotPresent
25 | args:
26 | - netexec
27 | restartPolicy: Always
28 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## DISCONTINUATION OF PROJECT.
2 |
3 | This project will no longer be maintained by Intel.
4 |
5 | Intel will not provide or guarantee development of or support for this project, including but not limited to, maintenance, bug fixes, new releases or updates. Patches to this project are no longer accepted by Intel. If you have an ongoing need to use this project, are interested in independently developing it, or would like to maintain patches for the community, please create your own fork of the project.
6 |
7 | Contact: webadmin@linux.intel.com
8 |
9 | ## Cloud Native Setup
10 |
11 | Automation around setting up the cloud-native content (Kubernetes) on Clear Linux.
12 |
13 | ### Folder Structure
14 | * **clr-k8s-examples**: script tools to deploy a Kubernetes cluster
15 | * **metrics**: tools to aid in measuring the scaling capabilities of Kubernetes clusters.
16 |
--------------------------------------------------------------------------------
/clr-k8s-examples/7-rook/overlays/v1.8.10/standalone/probe_timeout.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: ceph.rook.io/v1
3 | kind: CephCluster
4 | metadata:
5 | name: rook-ceph
6 | namespace: rook-ceph
7 | spec:
8 | healthCheck:
9 | startupProbe:
10 | mon:
11 | probe:
12 | timeoutSeconds: 10
13 | initialDelaySeconds: 100
14 | periodSeconds: 10
15 | failureThreshold: 12
16 | successThreshold: 1
17 | mgr:
18 | probe:
19 | timeoutSeconds: 10
20 | initialDelaySeconds: 100
21 | periodSeconds: 10
22 | failureThreshold: 12
23 | successThreshold: 1
24 | osd:
25 | probe:
26 | timeoutSeconds: 10
27 | initialDelaySeconds: 100
28 | periodSeconds: 10
29 | failureThreshold: 12
30 | successThreshold: 1
31 |
--------------------------------------------------------------------------------
/metrics/scaling/stats.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: stats
5 | spec:
6 | selector:
7 | matchLabels:
8 | name: stats-pods
9 | template:
10 | metadata:
11 | labels:
12 | name: stats-pods
13 | spec:
14 | hostNetwork: true
15 | tolerations:
16 | - key: node-role.kubernetes.io/master
17 | operator: Exists
18 | effect: NoSchedule
19 | terminationGracePeriodSeconds: 0
20 | containers:
21 | - name: stats
22 | image: busybox
23 | securityContext:
24 | # Run a priv container so we really do measure what is happening on the
25 | # host (node) system
26 | privileged: true
27 | command:
28 | - "tail"
29 | - "-f"
30 | - "/dev/null"
31 | stdin: true
32 | tty: true
33 |
--------------------------------------------------------------------------------
/metrics/lib/cpu_load_daemonset.yaml.in:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: cpu-load
5 | spec:
6 | selector:
7 | matchLabels:
8 | name: cpu-load-pods
9 | template:
10 | metadata:
11 | labels:
12 | name: cpu-load-pods
13 | spec:
14 | hostNetwork: true
15 | terminationGracePeriodSeconds: 0
16 | containers:
17 | - name: cpu-load
18 | imagePullPolicy: IfNotPresent
19 | image: polinux/stress-ng
20 | command: ["stress-ng"]
21 | args: # comment fields here so we can *delete* sections on demand
22 | - "--cpu"
23 | - "@CPU_NCPU@"
24 | - "-l" #CPU_PERCENT
25 | - "@CPU_PERCENT@" #CPU_PERCENT
26 | resources:
27 | limits:
28 | cpu: @CPU_LIMIT@
29 | requests:
30 | cpu: @CPU_REQUEST@
31 |
--------------------------------------------------------------------------------
/clr-k8s-examples/3-efk/overlays/v1.15.1/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - kubernetes/cluster/addons/fluentd-elasticsearch/es-service.yaml
3 | - kubernetes/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
4 | - kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
5 | - kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
6 | - kubernetes/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
7 | - kubernetes/cluster/addons/fluentd-elasticsearch/kibana-service.yaml
8 |
9 | images:
10 | - name: gcr.io/fluentd-elasticsearch/fluentd
11 | newName: quay.io/fluentd_elasticsearch/fluentd
12 | newTag: v2.6.0
13 | - name: gcr.io/fluentd-elasticsearch/elasticsearch
14 | newName: quay.io/fluentd_elasticsearch/elasticsearch
15 | newTag: v7.1.1
16 | - name: docker.elastic.co/kibana/kibana-oss
17 | newName: docker.elastic.co/kibana/kibana-oss
18 | newTag: 7.1.1
19 |
--------------------------------------------------------------------------------
/metrics/scaling/common.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 |
7 | input_yaml="${SCRIPT_PATH}/bb.yaml.in"
8 | input_json="${SCRIPT_PATH}/bb.json.in"
9 | generated_yaml="${SCRIPT_PATH}/generated.yaml"
10 | generated_json="${SCRIPT_PATH}/generated.json"
11 | deployment="busybox"
12 |
13 | stats_pod="stats"
14 |
15 | NUM_PODS=${NUM_PODS:-20}
16 | NUM_DEPLOYMENTS=${NUM_DEPLOYMENTS:-20}
17 | STEP=${STEP:-1}
18 |
19 | LABEL=${LABEL:-magiclabel}
20 | LABELVALUE=${LABELVALUE:-scaling_common}
21 |
22 | # sleep and timeout times for k8s actions, in seconds
23 | wait_time=${wait_time:-30}
24 | delete_wait_time=${delete_wait_time:-600}
25 | settle_time=${settle_time:-5}
26 | use_api=${use_api:-yes}
27 | grace=${grace:-30}
28 | proc_wait_time=${proc_wait_time:-20}
29 | proc_sleep_time=2
30 |
31 | declare -a new_pods
32 | declare -A node_basemem
33 | declare -A node_baseinode
34 |
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/pdf.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | # Copyright (c) 2018-2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | #
6 | title: "Kubernetes metrics report"
7 | author: "Auto generated"
8 | date: "`r format(Sys.time(), '%d %B, %Y')`"
9 | output:
10 | pdf_document:
11 | # Shrink the page margins so we get bigger/better resolution on the graphs
12 | # Keep the top and bottom margins reasonable, as we are really interested in
13 | # gaining 'width', and if we trim the bottom too much, we lose the page numbers.
14 | geometry: "left=1cm, right=1cm, top=2cm, bottom=2cm"
15 | urlcolor: blue
16 | ---
17 |
18 | ```{r setup, include=FALSE}
19 | #Set these opts to get pdf images which fit into beamer slides better
20 | opts_chunk$set(dev = 'pdf')
21 | # Pick up any env set by the invoking script, such as the root dir of the
22 | # results data tree
23 | source("/inputdir/Env.R")
24 | ```
25 |
26 | ```{r child = 'metrics_report.Rmd'}
27 | ```
28 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/deploy-svc-ing/test-deploy-runc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | run: php-apache-runc
6 | name: php-apache-runc
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | run: php-apache-runc
12 | template:
13 | metadata:
14 | labels:
15 | run: php-apache-runc
16 | spec:
17 | containers:
18 | - image: k8s.gcr.io/hpa-example
19 | imagePullPolicy: Always
20 | name: php-apache
21 | ports:
22 | - containerPort: 80
23 | protocol: TCP
24 | resources:
25 | requests:
26 | cpu: 200m
27 | restartPolicy: Always
28 | ---
29 | apiVersion: v1
30 | kind: Service
31 | metadata:
32 | name: php-apache-runc
33 | spec:
34 | ports:
35 | - port: 80
36 | protocol: TCP
37 | targetPort: 80
38 | selector:
39 | run: php-apache-runc
40 | sessionAffinity: None
41 | type: ClusterIP
42 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/deploy-svc-ing/test-deploy-kata-fc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | run: php-apache-kata-fc
6 | name: php-apache-kata-fc
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | run: php-apache-kata-fc
12 | template:
13 | metadata:
14 | labels:
15 | run: php-apache-kata-fc
16 | spec:
17 | runtimeClassName: kata-fc
18 | containers:
19 | - image: k8s.gcr.io/hpa-example
20 | imagePullPolicy: Always
21 | name: php-apache
22 | ports:
23 | - containerPort: 80
24 | protocol: TCP
25 | resources:
26 | requests:
27 | cpu: 200m
28 | restartPolicy: Always
29 | ---
30 | apiVersion: v1
31 | kind: Service
32 | metadata:
33 | name: php-apache-kata-fc
34 | spec:
35 | ports:
36 | - port: 80
37 | protocol: TCP
38 | targetPort: 80
39 | selector:
40 | run: php-apache-kata-fc
41 | sessionAffinity: None
42 | type: ClusterIP
43 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/deploy-svc-ing/test-deploy-kata-qemu.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | run: php-apache-kata-qemu
6 | name: php-apache-kata-qemu
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | run: php-apache-kata-qemu
12 | template:
13 | metadata:
14 | labels:
15 | run: php-apache-kata-qemu
16 | spec:
17 | runtimeClassName: kata-qemu
18 | containers:
19 | - image: k8s.gcr.io/hpa-example
20 | imagePullPolicy: Always
21 | name: php-apache
22 | ports:
23 | - containerPort: 80
24 | protocol: TCP
25 | resources:
26 | requests:
27 | cpu: 200m
28 | restartPolicy: Always
29 | ---
30 | apiVersion: v1
31 | kind: Service
32 | metadata:
33 | name: php-apache-kata-qemu
34 | spec:
35 | ports:
36 | - port: 80
37 | protocol: TCP
38 | targetPort: 80
39 | selector:
40 | run: php-apache-kata-qemu
41 | sessionAffinity: None
42 | type: ClusterIP
43 |
--------------------------------------------------------------------------------
/clr-k8s-examples/admit-kata/create-certs.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | # Copyright (c) 2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 |
7 | WEBHOOK_NS=${1:-"default"}
8 | WEBHOOK_NAME=${2:-"pod-annotate"}
9 | WEBHOOK_SVC="${WEBHOOK_NAME}-webhook"
10 |
11 | # Create certs for our webhook
12 | openssl genrsa -out webhookCA.key 2048
13 | openssl req -new -key ./webhookCA.key -subj "/CN=${WEBHOOK_SVC}.${WEBHOOK_NS}.svc" -out ./webhookCA.csr
14 | openssl x509 -req -days 365 -in webhookCA.csr -signkey webhookCA.key -out webhook.crt
15 |
16 | # Create certs secrets for k8s
17 | kubectl create secret generic \
18 | ${WEBHOOK_SVC}-certs \
19 | --from-file=key.pem=./webhookCA.key \
20 | --from-file=cert.pem=./webhook.crt \
21 | --dry-run -o yaml > ./deploy/webhook-certs.yaml
22 |
23 | # Set the CABundle on the webhook registration
24 | CA_BUNDLE=$(cat ./webhook.crt | base64 -w0)
25 | sed "s/CA_BUNDLE/${CA_BUNDLE}/" ./deploy/webhook-registration.yaml.tpl > ./deploy/webhook-registration.yaml
26 |
27 | # Clean
28 | rm ./webhookCA* && rm ./webhook.crt
29 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/test/sriov/0-sriov-net.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: "k8s.cni.cncf.io/v1"
3 | kind: NetworkAttachmentDefinition
4 | metadata:
5 | name: sriov-net
6 | annotations:
7 | k8s.v1.cni.cncf.io/resourceName: intel.com/sriov_netdevice
8 | spec:
9 | config: '{
10 | "type": "sriov",
11 | "name": "sriov-net",
12 | "ipam": {
13 | "type": "host-local",
14 | "subnet": "198.19.0.0/24",
15 | "rangeStart": "198.19.0.100",
16 | "rangeEnd": "198.19.0.200",
17 | "gateway": "198.19.0.1"
18 | }
19 | }'
20 | ---
21 | apiVersion: "k8s.cni.cncf.io/v1"
22 | kind: NetworkAttachmentDefinition
23 | metadata:
24 | name: sriov-net-dpdk
25 | annotations:
26 | k8s.v1.cni.cncf.io/resourceName: intel.com/sriov_vfio
27 | spec:
28 | config: '{
29 | "type": "vfioveth",
30 | "name": "sriov-net",
31 | "ipam": {
32 | "type": "host-local",
33 | "subnet": "198.19.0.0/24",
34 | "rangeStart": "198.19.0.100",
35 | "rangeEnd": "198.19.0.200",
36 | "gateway": "198.19.0.1"
37 | }
38 | }'
39 |
40 |
--------------------------------------------------------------------------------
/metrics/collectd/collectd.conf:
--------------------------------------------------------------------------------
1 | Interval 5
2 |
3 | LoadPlugin aggregation
4 | LoadPlugin cpu
5 | LoadPlugin csv
6 | LoadPlugin interface
7 | LoadPlugin ipc
8 | LoadPlugin memory
9 | LoadPlugin cpufreq
10 | LoadPlugin df
11 |
12 | Hostname localhost
13 |
14 |
15 | ReportByCpu true
16 | ReportByState true
17 | ValuesPercentage true
18 |
19 |
20 | DataDir "/mnt/opt/collectd/run"
21 | StoreRates true
22 |
23 |
24 | Interface "/^eno/"
25 | Interface "/^ens/"
26 | Interface "/^enp/"
27 | Interface "/^em/"
28 | Interface "/^eth/"
29 | IgnoreSelected false
30 |
31 |
32 |
33 | Plugin "cpu"
34 | Type "percent"
35 |
36 | GroupBy "Host"
37 | GroupBy "TypeInstance"
38 |
39 | CalculateSum true
40 | CalculateAverage true
41 |
42 |
43 |
44 | Device "overlay"
45 | MountPoint "/"
46 | FSType "overlay"
47 | ReportInodes true
48 | IgnoreSelected false
49 |
50 |
--------------------------------------------------------------------------------
/clr-k8s-examples/haproxy.cfg.example:
--------------------------------------------------------------------------------
1 | global
2 | log /dev/log local0
3 | chroot /var/lib/haproxy
4 | stats socket /run/haproxy-master.sock mode 660 level admin
5 | stats timeout 30s
6 | user haproxy
7 | group haproxy
8 | daemon
9 | # Default SSL material locations
10 | ca-base /etc/ssl/certs
11 | ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
12 | ssl-default-bind-options no-sslv3
13 | defaults
14 | log global
15 | mode http
16 | option httplog
17 | option dontlognull
18 | timeout connect 5000
19 | timeout client 50000
20 | timeout server 50000
21 | timeout tunnel 4h
22 | frontend kubernetes
23 | bind 10.0.0.100:6444
24 | option tcplog
25 | mode tcp
26 | default_backend kubernetes-master-nodes
27 |
28 | backend kubernetes-master-nodes
29 | mode tcp
30 | balance source
31 | option tcp-check
32 | server master-1 10.0.0.100:6443 check fall 3 rise 2
33 | server master-2 10.0.0.101:6443 check fall 3 rise 2
34 | server master-3 10.0.0.102:6443 check fall 3 rise 2
35 |
--------------------------------------------------------------------------------
/metrics/scaling/bb.yaml.in:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017,2018,2019 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | apiVersion: apps/v1
6 | # We use a deployment rather than a pod directly specifically so we can use it to
7 | # generate more replicas by re-deploying it.
8 | # The only downside might be, if containers go wrong or die etc., then the deployment
9 | # is going to try and re-deploy them, and we may not notice or get stuck waiting for
10 | # them, so we want to be careful in the test code to time out in such cases.
11 | kind: Deployment
12 | metadata:
13 | labels:
14 | run: busybox
15 | name: @DEPLOYMENT@
16 | spec:
17 | replicas: @REPLICAS@
18 | selector:
19 | matchLabels:
20 | run: busybox
21 | template:
22 | metadata:
23 | labels:
24 | run: busybox
25 | @LABEL@: @LABELVALUE@
26 | spec:
27 | terminationGracePeriodSeconds: @GRACE@
28 | runtimeClassName: @RUNTIMECLASS@
29 | automountServiceAccountToken: false
30 | containers:
31 | - name: bb
32 | image: busybox
33 | command: @PODCOMMAND@
34 | stdin: true
35 | tty: true
36 | restartPolicy: Always
37 |
--------------------------------------------------------------------------------
/metrics/scaling/bb.json.in:
--------------------------------------------------------------------------------
1 | {
2 | "apiVersion": "apps/v1",
3 | "kind": "Deployment",
4 | "metadata": {
5 | "labels": {
6 | "run": "busybox"
7 | },
8 | "name": "@DEPLOYMENT@"
9 | },
10 | "spec": {
11 | "replicas": @REPLICAS@,
12 | "selector": {
13 | "matchLabels": {
14 | "run": "busybox"
15 | }
16 | },
17 | "template": {
18 | "metadata": {
19 | "labels": {
20 | "run": "busybox",
21 | "@LABEL@": "@LABELVALUE@"
22 | }
23 | },
24 | "spec": {
25 | "terminationGracePeriodSeconds": @GRACE@,
26 | "runtimeClassName": "@RUNTIMECLASS@",
27 | "automountServiceAccountToken": false,
28 | "containers": [{
29 | "name": "bb",
30 | "image": "busybox",
31 | "command": @PODCOMMAND@,
32 | "stdin": true,
33 | "tty": true
34 | }],
35 | "restartPolicy": "Always"
36 | }
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/autoscale/php.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | labels:
6 | run: php-apache-test
7 | name: php-apache-test
8 | spec:
9 | selector:
10 | matchLabels:
11 | run: php-apache-test
12 | template:
13 | metadata:
14 | labels:
15 | run: php-apache-test
16 | spec:
17 | containers:
18 | - image: k8s.gcr.io/hpa-example
19 | name: php-apache-test
20 | ports:
21 | - containerPort: 80
22 | protocol: TCP
23 | resources:
24 | requests:
25 | cpu: 200m
26 | restartPolicy: Always
27 | ---
28 | apiVersion: v1
29 | kind: Service
30 | metadata:
31 | name: php-apache-test
32 | spec:
33 | ports:
34 | - port: 80
35 | protocol: TCP
36 | targetPort: 80
37 | selector:
38 | run: php-apache-test
39 | sessionAffinity: None
40 | type: ClusterIP
41 | ---
42 | apiVersion: autoscaling/v1
43 | kind: HorizontalPodAutoscaler
44 | metadata:
45 | name: php-apache-test
46 | spec:
47 | maxReplicas: 10
48 | minReplicas: 1
49 | scaleTargetRef:
50 | apiVersion: apps/v1
51 | kind: Deployment
52 | name: php-apache-test
53 | targetCPUUtilizationPercentage: 50
54 |
55 |
--------------------------------------------------------------------------------
/Jenkinsfile:
--------------------------------------------------------------------------------
1 | pipeline {
2 | agent {
3 | label 'clearlinux'
4 | }
5 | options {
6 | timeout(time: 1, unit: "HOURS")
7 | }
8 | triggers {
9 | cron('H */12 * * *')
10 | }
11 | environment {
12 | CLR_K8S_PATH="${env.WORKSPACE}/clr-k8s-examples"
13 | }
14 | stages {
15 | stage('Setup system') {
16 | steps {
17 | dir(path: "$CLR_K8S_PATH") {
18 | sh './setup_system.sh'
19 | }
20 | }
21 | }
22 | stage('Init') {
23 | steps {
24 | dir(path: "$CLR_K8S_PATH") {
25 | sh './create_stack.sh init'
26 | sh 'mkdir -p $HOME/.kube'
27 | sh 'sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config'
28 | sh 'sudo chown $(id -u):$(id -g) $HOME/.kube/config'
29 | sh 'kubectl version'
30 | }
31 | }
32 | }
33 | stage('CNI') {
34 | steps {
35 | dir(path: "$CLR_K8S_PATH") {
36 | sh './create_stack.sh cni'
37 | sh 'kubectl rollout status deployment/coredns -n kube-system --timeout=5m'
38 | sh 'kubectl get pods -n kube-system'
39 | }
40 | }
41 | }
42 | stage('Reset Stack') {
43 | steps {
44 | dir(path: "$CLR_K8S_PATH") {
45 | sh './reset_stack.sh'
46 | }
47 | }
48 | }
49 | }
50 | post {
51 | always {
52 | sh 'uname -a'
53 | sh 'swupd info'
54 | }
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/metrics/scaling/net-serve.json.in:
--------------------------------------------------------------------------------
1 | {
2 | "apiVersion": "apps/v1",
3 | "kind": "Deployment",
4 | "metadata": {
5 | "labels": {
6 | "run": "net-serve"
7 | },
8 | "name": "@DEPLOYMENT@"
9 | },
10 | "spec": {
11 | "replicas": 1,
12 | "selector": {
13 | "matchLabels": {
14 | "run": "net-serve"
15 | }
16 | },
17 | "template": {
18 | "metadata": {
19 | "labels": {
20 | "run": "net-serve",
21 | "@LABEL@": "@LABELVALUE@"
22 | }
23 | },
24 | "spec": {
25 | "terminationGracePeriodSeconds": @GRACE@,
26 | "runtimeClassName": "@RUNTIMECLASS@",
27 | "automountServiceAccountToken": false,
28 | "containers": [{
29 | "name": "net-serve",
30 | "image": "gcr.io/kubernetes-e2e-test-images/agnhost:2.8",
31 | "imagePullPolicy": "IfNotPresent",
32 | "args": [
33 | "netexec"
34 | ]
35 | }],
36 | "restartPolicy": "Always"
37 | }
38 | }
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/clr-k8s-examples/admit-kata/deploy/webhook.yaml:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | apiVersion: extensions/v1
6 | kind: Deployment
7 | metadata:
8 | name: pod-annotate-webhook
9 | labels:
10 | app: pod-annotate-webhook
11 | spec:
12 | replicas: 1
13 | template:
14 | metadata:
15 | labels:
16 | app: pod-annotate-webhook
17 | spec:
18 | containers:
19 | - name: pod-annotate-webhook
20 | image: katadocker/kata-webhook-example:latest
21 | imagePullPolicy: Always
22 | args:
23 | - -tls-cert-file=/etc/webhook/certs/cert.pem
24 | - -tls-key-file=/etc/webhook/certs/key.pem
25 | - -exclude-namespaces=rook-ceph-system,rook-ceph
26 | volumeMounts:
27 | - name: webhook-certs
28 | mountPath: /etc/webhook/certs
29 | readOnly: true
30 | volumes:
31 | - name: webhook-certs
32 | secret:
33 | secretName: pod-annotate-webhook-certs
34 | ---
35 | apiVersion: v1
36 | kind: Service
37 | metadata:
38 | name: pod-annotate-webhook
39 | labels:
40 | app: pod-annotate-webhook
41 | spec:
42 | ports:
43 | - port: 443
44 | targetPort: 8080
45 | selector:
46 | app: pod-annotate-webhook
47 |
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-2019 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # Set up an Ubuntu image with the components needed to generate a
6 | # metrics report. That includes:
7 | # - R
8 | # - The R 'tidyverse'
9 | # - pandoc
10 | # - The report generation R files and helper scripts
11 |
12 | # Start with the base rocker tidyverse.
13 | # We would have used the 'verse' base, that already has some of the docs processing
14 | # installed, but I could not figure out how to add in the extra bits we needed to
15 | # the lite tex version is uses.
16 | FROM rocker/tidyverse:3.6.0
17 |
18 | # Version of the Dockerfile
19 | LABEL DOCKERFILE_VERSION="1.1"
20 |
21 | # Without this some of the package installs stop to try and ask questions...
22 | ENV DEBIAN_FRONTEND=noninteractive
23 |
24 | # Install the extra doc processing parts we need for our Rmarkdown PDF flow.
25 | RUN apt-get update -qq && \
26 | apt-get install -y \
27 | texlive-latex-base \
28 | texlive-fonts-recommended \
29 | latex-xcolor
30 |
31 | # Install the extra R packages we need.
32 | RUN install2.r --error --deps TRUE \
33 | gridExtra \
34 | ggpubr
35 |
36 | # Pull in our actual worker scripts
37 | COPY . /scripts
38 |
39 | # By default generate the report
40 | CMD ["/scripts/genreport.sh"]
41 |
--------------------------------------------------------------------------------
/clr-k8s-examples/setup_kata_firecracker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o errexit
4 | set -o pipefail
5 | set -o nounset
6 |
7 | # Firecracker can only work with devicemapper
8 | # Setup a sparse disk to be used for devicemapper
9 | sudo rm -f /var/lib/crio/devicemapper/disk.img
10 | sudo mkdir -p /var/lib/crio/devicemapper
11 | sudo truncate /var/lib/crio/devicemapper/disk.img --size 10G
12 |
13 | # Ensure that this disk is loop mounted at each boot
14 | sudo mkdir -p /etc/systemd/system
15 |
16 | cat < **Note:**
18 | > Image needs to be published for the webhook needs to work. Alternately
19 | > on a single machine cluster change the `imagePullPolicy` to use the locally
20 | > built image.
21 |
22 | ## Making Kata the default runtime using an admission controller
23 |
24 | Today in `crio.conf` `runc` is the default runtime when a user does not specify
25 | `runtimeClass` in the pod spec. If you want to run a cluster where Kata is used
26 | by default, except for workloads we know for sure will not work with Kata, use
27 | the [admission webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
28 | and sample admission controller we created by running
29 |
30 | ```bash
31 | $ kubectl apply -f deploy/
32 | ```
33 |
34 | The webhook mutates pods to use the kata runtime class for all pods except
35 | those with
36 |
37 | * `hostNetwork: true`
38 | * namespace: `rook-ceph` and `rook-ceph-system`
39 |
40 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/Dockerfile:
--------------------------------------------------------------------------------
1 | # Build multus plugin
2 | FROM busybox AS multus
3 | ARG MULTUS_VER=3.4.2
4 | RUN wget -O multus.tgz https://github.com/intel/multus-cni/releases/download/v${MULTUS_VER}/multus-cni_${MULTUS_VER}_linux_amd64.tar.gz
5 | RUN tar xvzf multus.tgz --strip-components=1 -C /bin
6 |
7 | # Build sriov plugin
8 | FROM golang AS sriov-cni
9 | ARG SRIOV_CNI_VER=2.3
10 | RUN wget -qO sriov-cni.tgz https://github.com/intel/sriov-cni/archive/v${SRIOV_CNI_VER}.tar.gz
11 | RUN mkdir -p sriov-cni && \
12 | tar xzf sriov-cni.tgz --strip-components=1 -C sriov-cni && \
13 | cd sriov-cni && \
14 | make && \
15 | cp build/sriov /bin
16 |
17 | # Build sriov device plugin
18 | FROM golang AS sriov-dp
19 | ARG SRIOV_DP_VER=3.2
20 | RUN wget -qO sriov-dp.tgz https://github.com/intel/sriov-network-device-plugin/archive/v${SRIOV_DP_VER}.tar.gz
21 | RUN mkdir -p sriov-dp && \
22 | tar xzf sriov-dp.tgz --strip-components=1 -C sriov-dp && \
23 | cd sriov-dp && \
24 | make && \
25 | cp build/sriovdp /bin
26 |
27 | # Build vfioveth plugin
28 | FROM busybox as vfioveth
29 | RUN wget -O /bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
30 | COPY cni/vfioveth /bin/vfioveth
31 | RUN chmod +x /bin/vfioveth /bin/jq
32 |
33 | # Final image
34 | FROM centos/systemd
35 | WORKDIR /tmp/cni/bin
36 | COPY --from=multus /bin/multus-cni .
37 | COPY --from=sriov-cni /bin/sriov .
38 | COPY --from=vfioveth /bin/vfioveth .
39 | COPY --from=vfioveth /bin/jq .
40 | WORKDIR /usr/bin
41 | COPY --from=sriov-dp /bin/sriovdp .
42 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/systemd/sriov.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o errexit
4 | set -o pipefail
5 | set -o nounset
6 |
7 | OPTIND=1
8 | bind="false"
9 |
10 | while getopts ":b" opt; do
11 | case ${opt} in
12 | b)
13 | bind="true"
14 | ;;
15 | \?)
16 | echo "Usage: sriov.sh [-b] ens785f0 ens785f1 ..."
17 | echo "-b Bind to vfio-pci"
18 | exit
19 | ;;
20 | esac
21 | done
22 | shift $((OPTIND - 1))
23 |
24 | setup_pf() {
25 | local pf=$1
26 | echo "Resetting PF $pf"
27 | echo 0 | tee /sys/class/net/$pf/device/sriov_numvfs
28 | local NUM_VFS=$(cat /sys/class/net/$pf/device/sriov_totalvfs)
29 | echo "Enabling $NUM_VFS VFs for $pf"
30 | echo $NUM_VFS | tee /sys/class/net/$pf/device/sriov_numvfs
31 | ip link set $pf up
32 | sleep 1
33 | }
34 |
35 | setup_vfs() {
36 | local pf=$1
37 | local pfpci=$(readlink /sys/devices/pci*/*/*/net/$pf/device | awk '{print substr($1,10)}')
38 | local NUM_VFS=$(cat /sys/class/net/$pf/device/sriov_numvfs)
39 | for ((idx = 0; idx < NUM_VFS; idx++)); do
40 | ip link set dev $pf vf $idx state enable
41 | if [ $bind != "true" ]; then continue; fi
42 |
43 | local vfn="virtfn$idx"
44 | local vfpci=$(ls -l /sys/devices/pci*/*/$pfpci | awk -v vfn=$vfn 'vfn==$9 {print substr($11,4)}')
45 | # Capture and set MAC of the VF before unbinding from linux, for later use in CNI
46 | local mac=$(cat /sys/bus/pci*/*/$vfpci/net/*/address)
47 | ip link set dev $pf vf $idx mac $mac
48 | # Bind VF to vfio-pci
49 | echo $vfpci >/sys/bus/pci*/*/$vfpci/driver/unbind
50 | echo "vfio-pci" >/sys/devices/pci*/*/$vfpci/driver_override
51 | echo $vfpci >/sys/bus/pci/drivers/vfio-pci/bind
52 | done
53 | }
54 |
55 | for pf in "$@"; do
56 | setup_pf $pf
57 | setup_vfs $pf
58 | done
59 |
--------------------------------------------------------------------------------
/metrics/collectd/collectd.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: collectd
5 | spec:
6 | selector:
7 | matchLabels:
8 | name: collectd-pods
9 | template:
10 | metadata:
11 | labels:
12 | name: collectd-pods
13 | spec:
14 | hostNetwork: true
15 | tolerations:
16 | - key: node-role.kubernetes.io/master
17 | operator: Exists
18 | effect: NoSchedule
19 | terminationGracePeriodSeconds: 0
20 | containers:
21 | - name: collectd
22 | image: dklyle/alpine-collectd:1.0
23 | imagePullPolicy: IfNotPresent
24 | securityContext:
25 | # Run a priv container so we really do measure what is happening on the
26 | # host (node) system
27 | privileged: true
28 | command: ["/bin/sh", "-c"]
29 | args:
30 | - collectd -f;
31 | volumeMounts:
32 | - name: collectd-config-volume
33 | mountPath: /etc/collectd
34 | - name: proc
35 | mountPath: /mnt/proc
36 | readOnly: true
37 | - name: root
38 | mountPath: /hostfs
39 | readOnly: true
40 | - name: etc
41 | mountPath: /mnt/etc
42 | readOnly: true
43 | - name: opt
44 | mountPath: /mnt/opt
45 | volumes:
46 | - name: collectd-config-volume
47 | configMap:
48 | name: collectd-config
49 | items:
50 | - key: collectd.conf
51 | path: collectd.conf
52 | - name: proc
53 | hostPath:
54 | path: /proc
55 | - name: root
56 | hostPath:
57 | path: /
58 | - name: etc
59 | hostPath:
60 | path: /etc
61 | - name: opt
62 | hostPath:
63 | path: /opt
64 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/cni/vfioveth:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x
2 |
3 | set -o errexit
4 | set -o pipefail
5 | set -o nounset
6 |
7 | exec 3>&1
8 | exec &>>/var/log/$(basename $0).log
9 |
10 | PATH="$CNI_PATH:$(dirname "${BASH_SOURCE[0]}"):$PATH"
11 | CNI_CONF=$(cat /dev/stdin)
12 |
13 | get_peer_name() {
14 | echo "$1-vdev"
15 | }
16 |
17 | get_mac_with_vfpci() {
18 | local pf=$(readlink /sys/devices/pci*/*/$1/physfn | awk '{print substr($1,4)}')
19 | local pfName=$(ls /sys/devices/pci*/*/$pf/net/ | head -1)
20 | local idx=$(ls -l /sys/devices/pci*/*/$pf | awk -v vf=$1 'substr($11,4)==vf {print substr($9,7)}')
21 | local mac=$(ip link show dev $pfName | awk -v idx="$idx" '$1=="vf" && $2==idx {print substr($4,1,17)}')
22 | echo $mac
23 | }
24 |
25 | ipam() {
26 | local plugin=$(echo $CNI_CONF | jq -r '.ipam.type')
27 | local res=$(echo $"$CNI_CONF" | "$plugin" | jq -c '.')
28 | echo $res
29 | }
30 |
31 | add_pair_ns() {
32 | vfpci=$(echo $CNI_CONF | jq -r '.deviceID')
33 | mac=$(get_mac_with_vfpci $vfpci)
34 | peer=$(get_peer_name $CNI_IFNAME)
35 | ip=$1
36 |
37 | mkdir -p /var/run/netns/
38 | ln -sfT $CNI_NETNS /var/run/netns/$CNI_CONTAINERID
39 |
40 | ip netns exec $CNI_CONTAINERID ip link add $CNI_IFNAME type veth peer name $peer
41 | ip netns exec $CNI_CONTAINERID ip link set $CNI_IFNAME addr $mac up alias $vfpci
42 | ip netns exec $CNI_CONTAINERID ip link set $peer up
43 | ip netns exec $CNI_CONTAINERID ip addr add $ip dev $CNI_IFNAME
44 | }
45 |
46 | delete_pair_ns() {
47 | ip netns exec $CNI_CONTAINERID ip link del $CNI_IFNAME
48 | }
49 |
50 | case $CNI_COMMAND in
51 | ADD)
52 | res=$(ipam)
53 | ip=$(echo $res | jq -r '.ips[0].address')
54 | add_pair_ns $ip
55 | echo '{"cniVersion":"0.3.1"}' | jq -c --arg ip $ip '.ips[0].address = $ip' >&3
56 | ;;
57 | DEL)
58 | set +o errexit
59 | ipam
60 | delete_pair_ns
61 | set -o errexit
62 | ;;
63 | *)
64 | echo "CNI_COMMAND=[ADD|DEL] only supported"
65 | exit 1
66 | ;;
67 | esac
68 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/cpumanager/test-cpumanager.yaml.tmpl:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: test-cpumanager-guaranteed-__runtimeclass__
6 | spec:
7 | restartPolicy: Never
8 | containers:
9 | - name: busy
10 | image: busybox
11 | command: [ "top" ]
12 | resources:
13 | limits:
14 | cpu: 1
15 | memory: 500Mi # For kata to run
16 | ---
17 | apiVersion: v1
18 | kind: Pod
19 | metadata:
20 | name: test-cpumanager-burstable-integer-limit-__runtimeclass__
21 | spec:
22 | restartPolicy: Never
23 | containers:
24 | - name: busy
25 | image: busybox
26 | command: [ "top" ]
27 | resources:
28 | requests:
29 | cpu: 1
30 | memory: 100Mi
31 | limits:
32 | cpu: 2
33 | memory: 500Mi
34 | ---
35 | apiVersion: v1
36 | kind: Pod
37 | metadata:
38 | name: test-cpumanager-burstable-float-limit-__runtimeclass__
39 | spec:
40 | restartPolicy: Never
41 | containers:
42 | - name: busy
43 | image: busybox
44 | command: [ "top" ]
45 | resources:
46 | requests:
47 | cpu: 500m
48 | memory: 100Mi
49 | limits:
50 | cpu: 1
51 | memory: 500Mi
52 | ---
53 | apiVersion: v1
54 | kind: Pod
55 | metadata:
56 | name: test-cpumanager-burstable-integer-__runtimeclass__
57 | spec:
58 | restartPolicy: Never
59 | containers:
60 | - name: busy
61 | image: busybox
62 | command: [ "top" ]
63 | resources:
64 | requests:
65 | cpu: 1
66 | memory: 100Mi
67 | ---
68 | apiVersion: v1
69 | kind: Pod
70 | metadata:
71 | name: test-cpumanager-burstable-float-__runtimeclass__
72 | spec:
73 | restartPolicy: Never
74 | containers:
75 | - name: busy
76 | image: busybox
77 | command: [ "top" ]
78 | resources:
79 | requests:
80 | cpu: 500m
81 | memory: 100Mi
82 | ---
83 | apiVersion: v1
84 | kind: Pod
85 | metadata:
86 | name: test-cpumanager-besteffort-__runtimeclass__
87 | spec:
88 | restartPolicy: Never
89 | containers:
90 | - name: busy
91 | image: busybox
92 | command: [ "top" ]
93 |
--------------------------------------------------------------------------------
/metrics/collectd/collectd.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) 2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | #
6 |
7 | THIS_FILE=$(readlink -f ${BASH_SOURCE[0]})
8 | COLLECTD_DIR=${THIS_FILE%/*}
9 |
10 | collectd_pod="collectd"
11 |
12 | init_stats() {
13 | local wait_time=$1
14 |
15 | # create collectd-config configmap, delete old if there is one
16 | kubectl get configmap collectd-config >/dev/null 2>&1 && kubectl delete configmap collectd-config
17 | kubectl create configmap collectd-config --from-file=${COLLECTD_DIR}/collectd.conf
18 |
19 | # if there is collectd daemonset already running, delete it
20 | # to make sure that the latest configmap will be used.
21 | kubectl get daemonset collectd >/dev/null 2>&1 && kubectl delete daemonset --wait=true --timeout=${delete_wait_time}s "${collectd_pod}"
22 |
23 | # Launch our stats gathering pod
24 | kubectl apply -f ${COLLECTD_DIR}/${collectd_pod}.yaml
25 | kubectl rollout status --timeout=${wait_time}s daemonset/${collectd_pod}
26 |
27 | # clear existing collectd output
28 | while read -u 3 name node; do
29 | kubectl exec -ti $name -- sh -c "rm -rf /mnt/opt/collectd/run/localhost/*"
30 | done 3< <(kubectl get pods --selector name=collectd-pods -o json | jq -r '.items[] | "\(.metadata.name) \(.spec.nodeName)"')
31 |
32 | # attempting to provide buffer for collectd to be installed and running,
33 | # and CPU collection to build adequate history
34 | sleep 12
35 | }
36 |
37 | cleanup_stats() {
38 | # attempting to provide buffer for collectd CPU collection to record adequate history
39 | sleep 6
40 |
41 | # get logs before shutting down stats daemonset
42 | while read -u 3 name node; do
43 | kubectl exec -ti $name -- sh -c "cd /mnt/opt/collectd/run; rm -f ../localhost.tar.gz; tar -czvf ../localhost.tar.gz localhost"
44 | kubectl cp $name:/mnt/opt/collectd/localhost.tar.gz ${RESULT_DIR}/${node}.tar.gz
45 | kubectl exec -ti $name -- sh -c "rm -rf /mnt/opt/collectd/run"
46 | done 3< <(kubectl get pods --selector name=collectd-pods -o json | jq -r '.items[] | "\(.metadata.name) \(.spec.nodeName)"')
47 |
48 | kubectl delete daemonset --wait=true --timeout=${delete_wait_time}s "${collectd_pod}" || true
49 |
50 | # remove configmap
51 | kubectl delete configmap collectd-config
52 | }
53 |
--------------------------------------------------------------------------------
/clr-k8s-examples/containerd_devmapper_setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | sudo rm -rf /var/lib/containerd/devmapper/data-disk.img
8 | sudo rm -rf /var/lib/containerd/devmapper/meta-disk.img
9 | sudo mkdir -p /var/lib/containerd/devmapper
10 | sudo truncate --size 10G /var/lib/containerd/devmapper/data-disk.img
11 | sudo truncate --size 10G /var/lib/containerd/devmapper/meta-disk.img
12 |
13 | sudo mkdir -p /etc/systemd/system
14 |
15 | cat< NOTE: SR-IOV devices are not necessary to test multi-network capability
9 |
10 | ### Customization
11 |
12 | The device plugin will register the SR-IOV enabled devices on the host, specified with
13 | `selectors` in [sriov-conf.yaml](sriov-conf.yaml). Helper [systemd unit](systemd/sriov.service)
14 | file is provided, which enables SR-IOV for the above devices. More config options
15 | are listed [here](https://github.com/intel/sriov-network-device-plugin#configurations).
16 |
17 | ### Pre-req (SR-IOV only)
18 |
19 | One each SR-IOV node make sure `VT-d` is enabled in the BIOS and `intel_iommu=on` on kernel commandline.
20 | Setup systemd to bring up VFs on designated interfaces bound to network driver or `vfio-pci`
21 |
22 | ```bash
23 | # Make sure vfio-pci is loaded on boot
24 | echo 'vfio-pci' | sudo tee /etc/modules-load.d/sriov.conf
25 | sudo systemctl restart systemd-modules-load.service
26 |
27 | sudo cp systemd/sriov.sh /usr/bin/sriov.sh
28 | sudo cp systemd/sriov.service /etc/systemd/system/
29 | sudo systemctl daemon-reload
30 | sudo systemctl enable --now sriov.service
31 | ```
32 |
33 | ### Install
34 |
35 | To install and configure `multus-cni` on all nodes, along with
36 | `sriov-cni`, `vfioveth-cni` and `sriov-network-device-plugin`
37 |
38 | ```bash
39 | kubectl apply -f .
40 | kubectl get nodes -o json | jq '.items[].status.allocatable' # should list "intel.com/sriov_*"
41 | ```
42 |
43 | ## Tests
44 |
45 | ### Default only
46 |
47 | To test if default connectivity is working
48 |
49 | ```bash
50 | kubectl apply -f test/pod.yaml
51 | kubectl exec test -- ip a # should see one interface only
52 | ```
53 |
54 | ### Bridge
55 |
56 | To test multus with second interface created by `bridge` plugin
57 |
58 | ```bash
59 | kubectl apply -f test/bridge
60 | kubectl exec test-bridge -- ip a # should see two interfaces
61 | ip a show mynet # bridge created on host if it doesnt exist already
62 | ```
63 |
64 | ### SR-IOV
65 |
66 | To test multus with second interface created by `sriov` plugin
67 |
68 | ```bash
69 | kubectl apply -f test/sriov
70 |
71 | kubectl exec test-sriov -- ip a # second interface is a VF
72 |
73 | kubectl exec test-sriov-dpdk -- ip a # veth pair with details of VF
74 | kubectl exec test-sriov-dpdk -- ls -l /dev/vfio
75 | ```
76 |
--------------------------------------------------------------------------------
/clr-k8s-examples/vagrant.md:
--------------------------------------------------------------------------------
1 | # Detailed Vagrant installation steps for different distros
2 |
3 | * [Ubuntu](#install-vagrant-on-ubuntu)
4 | * [Clear Linux](#install-vagrant-on-clear-linux)
5 |
6 | ## Install vagrant on Ubuntu
7 |
8 | On Ubuntu Bionic, run these commands
9 | Install dependencies and prepare system
10 | ```bash
11 | sudo apt-get update
12 | sudo apt-get install gcc make
13 | sudo apt-get install qemu qemu-kvm libvirt-bin ebtables dnsmasq-base virt-top libguestfs-tools virtinst bridge-utils
14 | sudo apt-get install libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
15 | sudo modprobe vhost_net
16 | sudo lsmod | grep vhost
17 | echo "vhost_net" | sudo tee -a /etc/modules
18 | ```
19 | Download the latest Debian package from https://www.vagrantup.com/downloads.html and install it followed by vagrant-libvirt
20 | ```bash
21 | sudo dpkg -i vagrant_${VER}_x86_64.deb
22 | vagrant plugin install vagrant-libvirt
23 | ```
24 | Make sure to add the user to the default group
25 | ```bash
26 | usermod --append --groups libvirt `whoami`
27 | ```
28 |
29 | Run vagrant
30 | ```bash
31 | vagrant up --provider=libvirt
32 | ```
33 |
34 | Note, in order to spin up vagrant to use different CPUS and MEMORY for individual VM's:
35 | ```bash
36 | CPUS=4 MEMORY=8096 vagrant up clr-01 --provider=libvirt
37 | ```
38 |
39 | Note, vagrant installation steps were derived from:
40 | * https://computingforgeeks.com/install-kvm-centos-rhel-ubuntu-debian-sles-arch/
41 | * https://computingforgeeks.com/using-vagrant-with-libvirt-on-linux/
42 | * https://computingforgeeks.com/install-latest-vagrant-on-ubuntu-18-04-debian-9-kali-linux/
43 | * https://github.com/vagrant-libvirt/vagrant-libvirt/blob/master/README.md
44 |
45 | ## Install vagrant on Clear Linux
46 |
47 | On Clear Linux, run these commands
48 | ```bash
49 | sudo swupd update
50 | ```
51 | Make sure all the prerequisite packages are installed
52 | ```bash
53 | sudo swupd bundle-add unzip rsync wget kvm-host
54 | ```
55 | Now, run the following scripts
56 | ```bash
57 | wget https://raw.githubusercontent.com/AntonioMeireles/ClearLinux-packer/master/extras/clearlinux/setup/libvirtd.sh
58 | chmod +x libvirtd.sh
59 | ./libvirtd.sh
60 | wget https://raw.githubusercontent.com/AntonioMeireles/ClearLinux-packer/master/extras/clearlinux/setup/vagrant.sh
61 | chmod +x vagrant.sh
62 | ./vagrant.sh
63 | ```
64 | Check if vagrant is installed successfully
65 | ```bash
66 | vagrant --version
67 | ```
68 | Run vagrant
69 | ```bash
70 | vagrant up --provider=libvirt
71 | ```
72 | You can check the vagrant status using the following command
73 | ```bash
74 | vagrant status
75 | ```
76 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/e2e/run_e2e.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Runs upstream k8s e2e tests against existing cloud native basic cluster
4 | # Requires cluster to already be up
5 | # To specify a parameter for --ginkgo.focus as described below, provide a focus as the first argument to this script
6 | # https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/e2e-tests.md#building-kubernetes-and-running-the-tests
7 | # One example would be Feature:Performance. The script will add square brackets for you
8 | # For other examples of values, see
9 | # https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/e2e-tests.md#kinds-of-tests
10 |
11 | set -o errexit
12 | set -o pipefail
13 |
14 | if [ ! -z $1 ]
15 | then
16 | FOCUS=$1
17 | echo Running e2e tests where spec matches $1
18 | else
19 | echo Running all e2e tests, this will take a long time
20 | fi
21 |
22 | GO_INSTALLED=$(sudo swupd bundle-list | grep go-basic)
23 | if [ -z $GO_INSTALLED ]
24 | then
25 | echo Installing go-basic bundle
26 | sudo swupd bundle-add go-basic
27 | else
28 | echo Skipping go-basic bundle installation
29 | fi
30 |
31 | if [ -z $GOPATH ] ; then GOPATH=$HOME/go; fi
32 | if [ -z $GOBIN ] ; then GOBIN=$HOME/go/bin; fi
33 |
34 | echo Getting kubetest
35 | go get -u k8s.io/test-infra/kubetest
36 |
37 | cd $GOPATH/src/k8s.io
38 |
39 | if [ -d kubernetes ]
40 | then
41 | cd kubernetes
42 | echo Checking status of existing Kubernetes repo clone
43 | git status kubernetes
44 | else
45 | echo Cloning upstream Kubernetes repo
46 | git clone https://github.com/kubernetes/kubernetes.git
47 | cd kubernetes
48 | fi
49 |
50 | PATH=$PATH:$GOBIN
51 |
52 | API_SERVER=$(kubectl config view -o jsonpath="{.clusters[?(@.name==\"kubernetes\")].cluster.server}")
53 | CLIENT_VERSION=$(kubectl version --short | grep -E 'Client' | sed 's/Client Version: //')
54 |
55 | echo Running kubetest
56 |
57 | if [ -z $FOCUS ]
58 | then
59 | echo sudo -E kubetest --test --test_args="--kubeconfig=${HOME}/.kube/config --host=$API_SERVER" --extract=$CLIENT_VERSION --provider=local
60 | sudo -E kubetest --test --test_args="--kubeconfig=${HOME}/.kube/config --host=$API_SERVER" --extract=$CLIENT_VERSION --provider=local
61 | else
62 | echo sudo -E kubetest --test --test_args="--kubeconfig=${HOME}/.kube/config --host=$API_SERVER --ginkgo.focus=\[$FOCUS\]" --extract=$CLIENT_VERSION --provider=local
63 | sudo -E kubetest --test --test_args="--kubeconfig=${HOME}/.kube/config --host=$API_SERVER --ginkgo.focus=\[$FOCUS\]" --extract=$CLIENT_VERSION --provider=local
64 | fi
65 |
66 |
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/metrics_report.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | # Copyright (c) 2018-2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | #
6 | ---
7 | \pagebreak
8 |
9 | # Introduction
10 | This report compares the metrics between multiple sets of data generated from
11 | the [cloud-native-setup report generation scripts](https://github.com/clearlinux/cloud-native-setup/metrics/report/README.md).
12 |
13 | This report was generated using the data from the **`r resultdirs`** results directories.
14 |
15 | \pagebreak
16 |
17 | # Runtime scaling
18 | This [test](https://github.com/clearlinux/cloud-native-setup/metrics/scaling/k8s_scale.sh)
19 | measures the system memory 'free' reduction, CPU idle %, free inodes, and pod boot time as
20 | it launches more and more idle `busybox` pods on a Kubernetes cluster.
21 |
22 | > Note: CPU % is measured as a system whole - 100% represents *all* CPUs on the node.
23 |
24 | ```{r scaling, echo=FALSE, fig.cap="K8S scaling", results='asis'}
25 | source('tidy_scaling.R')
26 | ```
27 |
28 | \pagebreak
29 |
30 | # Runtime parallel scaling
31 | This [test](https://github.com/clearlinux/cloud-native-setup/metrics/scaling/k8s_parallel.sh)
32 | measures the time taken to launch and delete pods in parallel using a deployment. The times
33 | are how long it takes for the whole deployment operation to complete.
34 |
35 | ```{r parallel, echo=FALSE, fig.cap="K8S parallel pods", results='asis'}
36 | source('parallel.R')
37 | ```
38 |
39 | \pagebreak
40 |
41 | # Runtime scaling rapid
42 | This [test](https://github.com/clearlinux/cloud-native-setup/metrics/scaling/k8s_scale_fast.sh)
43 | uses collectd to asynchronously measure CPU idle %, free memory, pod boot time, free inodes,
44 | and interface stats as it launches more and more idle `busybox` pods on a Kubernetes cluster.
45 |
46 | > Note: CPU % is measured as a system whole - 100% represents *all* CPUs on the node.
47 |
48 | ```{r collectd, echo=FALSE, fig.cap="K8S scaling collectd", results='asis'}
49 | source('collectd_scaling.R')
50 | ```
51 |
52 | \pagebreak
53 |
54 | # Test setup details
55 |
56 | This table describes the test system details, as derived from the information contained
57 | in the test results files.
58 |
59 |
60 | ```{r dut, echo=FALSE, fig.cap="System configuration details", results='asis'}
61 | source('dut-details.R')
62 | ```
63 |
64 | \pagebreak
65 |
66 | # Test setup node details
67 |
68 | This table describes node details within the Kubernetes cluster that have been used for test.
69 |
70 | ```{r node, echo=FALSE, fig.cap="Node information within Kubernetes cluster", results='asis'}
71 | source('node-info.R')
72 | ```
73 |
--------------------------------------------------------------------------------
/metrics/report/grabdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) 2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # Run a set of the metrics tests to gather data to be used with the report
7 | # generator. The general ideal is to have the tests configured to generate
8 | # useful, meaninful and repeatable (stable, with minimised variance) results.
9 | # If the tests have to be run more or longer to achieve that, then generally
10 | # that is fine - this test is not intended to be quick, it is intended to
11 | # be repeatable.
12 |
13 | # Note - no 'set -e' in this file - if one of the metrics tests fails
14 | # then we wish to continue to try the rest.
15 | # Finally at the end, in some situations, we explicitly exit with a
16 | # failure code if necessary.
17 |
18 | SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
19 | source "${SCRIPT_DIR}/../lib/common.bash"
20 | RESULTS_DIR=${SCRIPT_DIR}/../results
21 |
22 | # By default we run all the tests
23 | RUN_ALL=1
24 |
25 | help() {
26 | usage=$(cat << EOF
27 | Usage: $0 [-h] [options]
28 | Description:
29 | This script gathers a number of metrics for use in the
30 | report generation script. Which tests are run can be
31 | configured on the commandline. Specifically enabling
32 | individual tests will disable the 'all' option, unless
33 | 'all' is also specified last.
34 | Options:
35 | -a, Run all tests (default).
36 | -h, Print this help.
37 | -s, Run the scaling tests.
38 | EOF
39 | )
40 | echo "$usage"
41 | }
42 |
43 | # Set up the initial state
44 | init() {
45 | metrics_onetime_init
46 |
47 | local OPTIND
48 | while getopts "ahs" opt;do
49 | case ${opt} in
50 | a)
51 | RUN_ALL=1
52 | ;;
53 | h)
54 | help
55 | exit 0;
56 | ;;
57 | s)
58 | RUN_SCALING=1
59 | RUN_ALL=
60 | ;;
61 | ?)
62 | # parse failure
63 | help
64 | die "Failed to parse arguments"
65 | ;;
66 | esac
67 | done
68 | shift $((OPTIND-1))
69 | }
70 |
71 | run_scaling() {
72 | echo "Running scaling tests"
73 |
74 | (cd scaling; ./k8s_scale.sh)
75 | (cd scaling; ./k8s_parallel.sh)
76 | }
77 |
78 | # Execute metrics scripts
79 | run() {
80 | pushd "$SCRIPT_DIR/.."
81 |
82 | if [ -n "$RUN_ALL" ] || [ -n "$RUN_SCALING" ]; then
83 | run_scaling
84 | fi
85 |
86 | popd
87 | }
88 |
89 | finish() {
90 | echo "Now please create a suitably descriptively named subdirectory in"
91 | echo "$RESULTS_DIR and copy the .json results files into it before running"
92 | echo "this script again."
93 | }
94 |
95 | init "$@"
96 | run
97 | finish
98 |
99 |
--------------------------------------------------------------------------------
/clr-k8s-examples/node-problem-detector/overlays/v0.6.6/patch_configmap_rules.yaml:
--------------------------------------------------------------------------------
1 | # adds "mce" rule
2 | - op: add
3 | path: /data/kernel-monitor.json
4 | value: |
5 | {
6 | "plugin": "kmsg",
7 | "logPath": "/dev/kmsg",
8 | "lookback": "5m",
9 | "bufferSize": 10,
10 | "source": "kernel-monitor",
11 | "conditions": [
12 | {
13 | "type": "KernelDeadlock",
14 | "reason": "KernelHasNoDeadlock",
15 | "message": "kernel has no deadlock"
16 | },
17 | {
18 | "type": "ReadonlyFilesystem",
19 | "reason": "FilesystemIsReadOnly",
20 | "message": "Filesystem is read-only"
21 | }
22 | ],
23 | "rules": [
24 | {
25 | "type": "temporary",
26 | "reason": "Hardware Error",
27 | "pattern": "mce:.*"
28 | },
29 | {
30 | "type": "temporary",
31 | "reason": "OOMKilling",
32 | "pattern": "Kill process \\d+ (.+) score \\d+ or sacrifice child\\nKilled process \\d+ (.+) total-vm:\\d+kB, anon-rss:\\d+kB, file-rss:\\d+kB.*"
33 | },
34 | {
35 | "type": "temporary",
36 | "reason": "TaskHung",
37 | "pattern": "task \\S+:\\w+ blocked for more than \\w+ seconds\\."
38 | },
39 | {
40 | "type": "temporary",
41 | "reason": "UnregisterNetDevice",
42 | "pattern": "unregister_netdevice: waiting for \\w+ to become free. Usage count = \\d+"
43 | },
44 | {
45 | "type": "temporary",
46 | "reason": "KernelOops",
47 | "pattern": "BUG: unable to handle kernel NULL pointer dereference at .*"
48 | },
49 | {
50 | "type": "temporary",
51 | "reason": "KernelOops",
52 | "pattern": "divide error: 0000 \\[#\\d+\\] SMP"
53 | },
54 | {
55 | "type": "permanent",
56 | "condition": "KernelDeadlock",
57 | "reason": "AUFSUmountHung",
58 | "pattern": "task umount\\.aufs:\\w+ blocked for more than \\w+ seconds\\."
59 | },
60 | {
61 | "type": "permanent",
62 | "condition": "KernelDeadlock",
63 | "reason": "DockerHung",
64 | "pattern": "task docker:\\w+ blocked for more than \\w+ seconds\\."
65 | },
66 | {
67 | "type": "permanent",
68 | "condition": "ReadonlyFilesystem",
69 | "reason": "FilesystemIsReadOnly",
70 | "pattern": "Remounting filesystem read-only"
71 | }
72 | ]
73 | }
74 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/test/sriov/2-pod-dpdk-ver.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: dpdk-1711
6 | annotations:
7 | k8s.v1.cni.cncf.io/networks: sriov-net-dpdk
8 | spec:
9 | restartPolicy: Never
10 | containers:
11 | - name: dpdk
12 | image: krsna1729/dpdk:17.11
13 | stdin: true
14 | tty: true
15 | command: [ "/bin/bash", "-c"]
16 | args:
17 | - ls -l /dev/vfio;
18 | testpmd --no-huge -m 2048 -- --stats-period=10 --nb-port=1 --port-topology=chained --auto-start --total-num-mbufs=2048 --forward-mode=macswap;
19 | securityContext:
20 | capabilities:
21 | add:
22 | - IPC_LOCK
23 | resources:
24 | limits:
25 | intel.com/sriov_vfio: '1'
26 | ---
27 | apiVersion: v1
28 | kind: Pod
29 | metadata:
30 | name: dpdk-1811
31 | annotations:
32 | k8s.v1.cni.cncf.io/networks: sriov-net-dpdk
33 | spec:
34 | restartPolicy: Never
35 | containers:
36 | - name: dpdk
37 | image: krsna1729/dpdk:18.11
38 | stdin: true
39 | tty: true
40 | command: [ "/bin/bash", "-c"]
41 | args:
42 | - ls -l /dev/vfio;
43 | testpmd --no-huge -m 2048 -- --stats-period=10 --nb-port=1 --port-topology=chained --auto-start --total-num-mbufs=2048 --forward-mode=macswap;
44 | securityContext:
45 | capabilities:
46 | add:
47 | - IPC_LOCK
48 | resources:
49 | limits:
50 | intel.com/sriov_vfio: '1'
51 | ---
52 | apiVersion: v1
53 | kind: Pod
54 | metadata:
55 | name: dpdk-1911
56 | annotations:
57 | k8s.v1.cni.cncf.io/networks: sriov-net-dpdk
58 | spec:
59 | restartPolicy: Never
60 | containers:
61 | - name: dpdk
62 | image: krsna1729/dpdk:19.11
63 | stdin: true
64 | tty: true
65 | command: [ "/bin/bash", "-c"]
66 | args:
67 | - ls -l /dev/vfio;
68 | testpmd --no-huge -m 2048 -- --stats-period=10 --nb-port=1 --port-topology=chained --auto-start --total-num-mbufs=2048 --forward-mode=macswap;
69 | securityContext:
70 | capabilities:
71 | add:
72 | - IPC_LOCK
73 | resources:
74 | limits:
75 | intel.com/sriov_vfio: '1'
76 | ---
77 | apiVersion: v1
78 | kind: Pod
79 | metadata:
80 | name: dpdk-2002
81 | annotations:
82 | k8s.v1.cni.cncf.io/networks: sriov-net-dpdk
83 | spec:
84 | restartPolicy: Never
85 | containers:
86 | - name: dpdk
87 | image: krsna1729/dpdk:20.02
88 | stdin: true
89 | tty: true
90 | command: [ "/bin/bash", "-c"]
91 | args:
92 | - ls -l /dev/vfio;
93 | testpmd --no-huge -m 2048 -- --stats-period=10 --nb-port=1 --port-topology=chained --auto-start --total-num-mbufs=2048 --forward-mode=macswap;
94 | securityContext:
95 | capabilities:
96 | add:
97 | - IPC_LOCK
98 | resources:
99 | limits:
100 | intel.com/sriov_vfio: '1'
101 |
--------------------------------------------------------------------------------
/clr-k8s-examples/tests/pvc/wordpress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: wordpress-mysql
5 | labels:
6 | app: wordpress
7 | spec:
8 | ports:
9 | - port: 3306
10 | selector:
11 | app: wordpress
12 | tier: mysql
13 | clusterIP: None
14 | ---
15 | apiVersion: v1
16 | kind: PersistentVolumeClaim
17 | metadata:
18 | name: mysql-pv-claim
19 | labels:
20 | app: wordpress
21 | spec:
22 | accessModes:
23 | - ReadWriteOnce
24 | resources:
25 | requests:
26 | storage: 1Gi
27 | ---
28 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
29 | kind: Deployment
30 | metadata:
31 | name: wordpress-mysql
32 | labels:
33 | app: wordpress
34 | spec:
35 | selector:
36 | matchLabels:
37 | app: wordpress
38 | tier: mysql
39 | strategy:
40 | type: Recreate
41 | template:
42 | metadata:
43 | labels:
44 | app: wordpress
45 | tier: mysql
46 | spec:
47 | containers:
48 | - image: mysql:5.6
49 | name: mysql
50 | env:
51 | - name: MYSQL_ROOT_PASSWORD
52 | value: password
53 | ports:
54 | - containerPort: 3306
55 | name: mysql
56 | volumeMounts:
57 | - name: mysql-persistent-storage
58 | mountPath: /var/lib/mysql
59 | volumes:
60 | - name: mysql-persistent-storage
61 | persistentVolumeClaim:
62 | claimName: mysql-pv-claim
63 | ---
64 | apiVersion: v1
65 | kind: Service
66 | metadata:
67 | name: wordpress
68 | labels:
69 | app: wordpress
70 | spec:
71 | ports:
72 | - port: 80
73 | selector:
74 | app: wordpress
75 | tier: frontend
76 | type: LoadBalancer
77 | ---
78 | apiVersion: v1
79 | kind: PersistentVolumeClaim
80 | metadata:
81 | name: wp-pv-claim
82 | labels:
83 | app: wordpress
84 | spec:
85 | accessModes:
86 | - ReadWriteOnce
87 | resources:
88 | requests:
89 | storage: 20Gi
90 | ---
91 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
92 | kind: Deployment
93 | metadata:
94 | name: wordpress
95 | labels:
96 | app: wordpress
97 | spec:
98 | selector:
99 | matchLabels:
100 | app: wordpress
101 | tier: frontend
102 | strategy:
103 | type: Recreate
104 | template:
105 | metadata:
106 | labels:
107 | app: wordpress
108 | tier: frontend
109 | spec:
110 | containers:
111 | - image: wordpress:4.8-apache
112 | name: wordpress
113 | env:
114 | - name: WORDPRESS_DB_HOST
115 | value: wordpress-mysql
116 | - name: WORDPRESS_DB_PASSWORD
117 | value: password
118 | ports:
119 | - containerPort: 80
120 | name: wordpress
121 | volumeMounts:
122 | - name: wordpress-persistent-storage
123 | mountPath: /var/www/html
124 | volumes:
125 | - name: wordpress-persistent-storage
126 | persistentVolumeClaim:
127 | claimName: wp-pv-claim
128 |
--------------------------------------------------------------------------------
/metrics/report/README.md:
--------------------------------------------------------------------------------
1 | * [cloud-native-setup metrics report generator](#cloud-native-setup-metrics-report-generator)
2 | * [Data gathering](#data-gathering)
3 | * [Report generation](#report-generation)
4 | * [Debugging and development](#debugging-and-development)
5 |
6 | # cloud-native-setup metrics report generator
7 |
8 | The files within this directory can be used to generate a 'metrics report'
9 | for Kubernetes.
10 |
11 | The primary workflow consists of two stages:
12 |
13 | 1) Run the provided report metrics data gathering scripts on the system(s) you wish
14 | to analyze.
15 | 2) Run the provided report generation script to analyze the data and generate a
16 | report file.
17 |
18 | ## Data gathering
19 |
20 | Data gathering is provided by the `grabdata.sh` script. When run, this script
21 | executes a set of tests from the `cloud-native-setup/metrics` directory. The JSON results files
22 | will be placed into the `cloud-native-setup/metrics/results` directory.
23 |
24 | Once the results are generated, create a suitably named subdirectory of
25 | `tests/metrics/results`, and move the JSON files into it.
26 |
27 | Repeat this process if you want to compare multiple sets of results. Note, the
28 | report generation scripts process all subfolders of `tests/metrics/results` when
29 | generating the report.
30 |
31 | You can restrict the subset of tests run by `grabdata.sh` via its commandline parameters:
32 |
33 | | Option | Description |
34 | | ------ | ----------- |
35 | | -a | Run all tests (default) |
36 | | -s | Run the scaling tests |
37 | | -h | Print this help |
38 |
39 | ## Report generation
40 |
41 | Report generation is provided by the `makereport.sh` script. By default this script
42 | processes all subdirectories of the `cloud-native-setup/metrics/results` directory to generate the report.
43 | To run in the default mode, execute the following:
44 |
45 | ```sh
46 | $ ./makereport.sh
47 | ```
48 |
49 | The report generation tool uses [Rmarkdown](https://github.com/rstudio/rmarkdown),
50 | [R](https://www.r-project.org/about.html) and [pandoc](https://pandoc.org/) to produce
51 | a PDF report. To avoid the need for all users to set up a working environment
52 | with all the necessary tooling, the `makereport.sh` script utilises a `Dockerfile` with
53 | the environment pre-defined in order to produce the report. Thus, you need to
54 | have Docker installed on your system in order to run the report generation.
55 |
56 | The resulting `metrics_report.pdf` is generated into the `output` subdir of the `report`
57 | directory.
58 |
59 | ## Debugging and development
60 |
61 | To aid in script development and debugging, the `makereport.sh` script offers a debug
62 | facility via the `-d` command line option. Using this option will place you into a `bash`
63 | shell within the running `Dockerfile` image used to generate the report, whilst also
64 | mapping your host side `R` scripts from the `report_dockerfile` subdirectory into the
65 | container, thus facilitating a 'live' edit/reload/run development cycle.
66 | From there you can examine the Docker image environment, and execute the generation scripts.
67 |
68 | E.g., to test the `tidy_scaling.R` script, you can execute:
69 |
70 | ```bash
71 | $ ./makereport.sh -d
72 | ...
73 | Successfully built eea7d6ac6fa7
74 | Successfully tagged metrics-report:latest
75 | root@:/# R
76 | > source('/inputdir/Env.R')
77 | > source('/scripts/tidy_scaling.R')
78 | ## Edit script on host, and re-load/run...
79 | > source('/scripts/tidy_scaling.R')
80 | ```
81 |
82 |
--------------------------------------------------------------------------------
/metrics/lib/common.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2017,2018,2019 Intel Corporation
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 |
7 | THIS_FILE=$(readlink -f ${BASH_SOURCE[0]})
8 | LIB_DIR=${THIS_FILE%/*}
9 | RESULT_DIR="${LIB_DIR}/../results"
10 |
11 | source ${LIB_DIR}/json.bash
12 | source ${LIB_DIR}/k8s-api.bash
13 | source ${LIB_DIR}/cpu-load.bash
14 | source /etc/os-release || source /usr/lib/os-release
15 |
16 | die() {
17 | local msg="$*"
18 | echo "ERROR: $msg" >&2
19 | exit 1
20 | }
21 |
22 | warn() {
23 | local msg="$*"
24 | echo "WARNING: $msg"
25 | }
26 |
27 | info() {
28 | local msg="$*"
29 | echo "INFO: $msg"
30 | }
31 |
32 | # This function checks existence of commands.
33 | # They can be received standalone or as an array, e.g.
34 | #
35 | # cmds=(“cmd1” “cmd2”)
36 | # check_cmds "${cmds[@]}"
37 | check_cmds()
38 | {
39 | local cmd req_cmds=( "$@" )
40 | for cmd in "${req_cmds[@]}"; do
41 | if ! command -v "$cmd" > /dev/null 2>&1; then
42 | die "command $cmd not available"
43 | fi
44 | echo "command: $cmd: yes"
45 | done
46 | }
47 |
48 | # Print a banner to the logs noting clearly which test
49 | # we are about to run
50 | test_banner()
51 | {
52 | echo -e "\n===== starting test [$1] ====="
53 | }
54 |
55 | # Initialization/verification environment. This function makes
56 | # minimal steps for metrics/tests execution.
57 | init_env()
58 | {
59 | test_banner "${TEST_NAME}"
60 |
61 | cmd=("kubectl")
62 |
63 | # check dependencies
64 | check_cmds "${cmd[@]}"
65 |
66 | # We could try to clean the k8s cluster here... but that
67 | # might remove some pre-installed soak tests etc. that have
68 | # been deliberately injected into the cluster under test.
69 | }
70 |
71 | framework_init() {
72 | info "Initialising"
73 |
74 | check_cmds "${cmds[@]}"
75 |
76 | info "Checking k8s accessible"
77 | local worked=$( kubectl get nodes > /dev/null 2>&1 && echo $? || echo $? )
78 | if [ "$worked" != 0 ]; then
79 | die "kubectl failed to get nodes"
80 | fi
81 |
82 | info $(get_num_nodes) "k8s nodes in 'Ready' state found"
83 |
84 | k8s_api_init
85 |
86 | # Launch our stats gathering pod
87 | if [ -n "$SMF_USE_COLLECTD" ]; then
88 | info "Setting up collectd"
89 | init_stats $wait_time
90 | fi
91 |
92 | # And now we can set up our results storage then...
93 | metrics_json_init "k8s"
94 | save_config
95 |
96 | # Initialise the cpu load generators now - after json init, as they may
97 | # produce some json results (config) data.
98 | cpu_load_init
99 |
100 | }
101 |
102 | framework_shutdown() {
103 | metrics_json_save
104 | k8s_api_shutdown
105 | cpu_load_shutdown
106 |
107 | if [ -n "$SMF_USE_COLLECTD" ]; then
108 | cleanup_stats
109 | fi
110 |
111 | }
112 |
113 | # finds elements in $1 that are not in $2
114 | find_unique_pods() {
115 | local list_a=$1
116 | local list_b=$2
117 |
118 | new_pods=()
119 | for a in $list_a; do
120 | local in_b=false
121 | for b in $list_b; do
122 | if [[ $a == $b ]]; then
123 | in_b=true
124 | break
125 | fi
126 | done
127 | if [[ $in_b == false ]]; then
128 | new_pods[${#new_pods[@]}]=$a
129 | fi
130 | done
131 | }
132 |
133 | # waits for process to complete within a given time range
134 | waitForProcess(){
135 | wait_time="$1"
136 | sleep_time="$2"
137 | cmd="$3"
138 | proc_info_msg="$4"
139 |
140 | while [ "$wait_time" -gt 0 ]; do
141 | if eval "$cmd"; then
142 | return 0
143 | else
144 | info "$proc_info_msg"
145 | sleep "$sleep_time"
146 | wait_time=$((wait_time-sleep_time))
147 | fi
148 | done
149 | return 1
150 | }
151 |
--------------------------------------------------------------------------------
/metrics/lib/cpu-load.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 |
7 | # Helper routines for setting up a constant CPU load on the cluster/nodes
8 |
9 | CPULOAD_DIR=${THIS_FILE%/*}
10 |
11 | # Default to testing all cores
12 | SMF_CPU_LOAD_NODES_NCPU=${SMF_CPU_LOAD_NODES_NCPU:-0}
13 | # Default to 100% load (yes, this might kill your node)
14 | SMF_CPU_LOAD_NODES_PERCENT=${SMF_CPU_LOAD_NODES_PERCENT:-}
15 | # Default to not setting any limits or requests, so no cpuset limiting and
16 | # no cpu core pinning
17 | SMF_CPU_LOAD_NODES_LIMIT=${SMF_CPU_LOAD_NODES_LIMIT:-}
18 | SMF_CPU_LOAD_NODES_REQUEST=${SMF_CPU_LOAD_NODES_REQUEST:-}
19 |
20 | cpu_load_post_deploy_sleep=${cpu_load_post_deploy_sleep:-30}
21 |
22 | cpu_per_node_daemonset=cpu-load
23 | clean_up_cpu_per_node=false
24 |
25 | # Use a DaemonSet to place one cpu stressor on each node.
26 | cpu_per_node_init() {
27 | info "Generating per-node CPU load daemonset"
28 |
29 | local ds_template=${CPULOAD_DIR}/cpu_load_daemonset.yaml.in
30 | local ds_yaml=${ds_template%\.in}
31 |
32 | # Grab a copy of the template
33 | cp -f ${ds_template} ${ds_yaml}
34 |
35 | # If a setting is not used (defined), then delete its relevant
36 | # lines from the YAML. Note, the YAML is constructed when necessary
37 | # with comments on the correct lines to ensure all necessary lines are
38 | # deleted
39 | if [ -z "$SMF_CPU_LOAD_NODES_NCPU" ]; then
40 | sed -i '/CPU_NCPU/d' ${ds_yaml}
41 | fi
42 |
43 | if [ -z "${SMF_CPU_LOAD_NODES_PERCENT}" ]; then
44 | sed -i '/CPU_PERCENT/d' ${ds_yaml}
45 | fi
46 |
47 | if [ -z "${SMF_CPU_LOAD_NODES_LIMIT}" ]; then
48 | sed -i '/CPU_LIMIT/d' ${ds_yaml}
49 | fi
50 |
51 | if [ -z "${SMF_CPU_LOAD_NODES_REQUEST}" ]; then
52 | sed -i '/CPU_REQUEST/d' ${ds_yaml}
53 | fi
54 |
55 | # And then finally replace all the remaining defined parts with the
56 | # real values.
57 | sed -i \
58 | -e "s|@CPU_NCPU@|${SMF_CPU_LOAD_NODES_NCPU}|g" \
59 | -e "s|@CPU_PERCENT@|${SMF_CPU_LOAD_NODES_PERCENT}|g" \
60 | -e "s|@CPU_LIMIT@|${SMF_CPU_LOAD_NODES_LIMIT}|g" \
61 | -e "s|@CPU_REQUEST@|${SMF_CPU_LOAD_NODES_REQUEST}|g" \
62 | ${ds_yaml}
63 |
64 | # Launch the daemonset...
65 | info "Deploying cpu-load-per-node daemonset"
66 | kubectl apply -f ${ds_yaml}
67 | kubectl rollout status --timeout=${wait_time}s daemonset/${cpu_per_node_daemonset}
68 | clean_up_cpu_per_node=yes
69 | info "cpu-load-per-node daemonset Deployed"
70 | if [ -n "$cpu_load_post_deploy_sleep" ]; then
71 | info "Sleeping ${cpu_load_post_deploy_sleep}s for cpu-load to settle"
72 | sleep ${cpu_load_post_deploy_sleep}
73 | fi
74 |
75 | # And store off our config into the JSON results
76 | metrics_json_start_array
77 | local json="$(cat << EOF
78 | {
79 | "LOAD_NODES_NCPU": "${SMF_CPU_LOAD_NODES_NCPU}",
80 | "LOAD_NODES_PERCENT": "${SMF_CPU_LOAD_NODES_PERCENT}",
81 | "LOAD_NODES_LIMIT": "${SMF_CPU_LOAD_NODES_LIMIT}",
82 | "LOAD_NODES_REQUEST": "${SMF_CPU_LOAD_NODES_REQUEST}"
83 | }
84 | EOF
85 | )"
86 | metrics_json_add_array_element "$json"
87 | metrics_json_end_array "cpu-load"
88 | }
89 |
90 | cpu_load_init() {
91 | info "Check if we need CPU load generators..."
92 | # This is defaulted of off (not defined), unless the high level test requests it.
93 | if [ -n "$SMF_CPU_LOAD_NODES" ]; then
94 | info "Initialising per-node CPU load"
95 | cpu_per_node_init
96 | fi
97 | }
98 |
99 | cpu_load_shutdown() {
100 | if [ "$clean_up_cpu_per_node" = "yes" ]; then
101 | info "Cleaning up cpu per node load daemonset"
102 | kubectl delete daemonset --wait=true --timeout=${delete_wait_time}s "${cpu_per_node_daemonset}" || true
103 | fi
104 | }
105 |
--------------------------------------------------------------------------------
/metrics/lib/cpu-load.md:
--------------------------------------------------------------------------------
1 | # `cpu-load` stack stresser
2 |
3 | The `cpu-load` stress functionality of the scaling framework allows you to optionally add a constant CPU stress
4 | load to cluster under test whilst the tests are running. This aids impact analysis of CPU load.
5 |
6 | The `cpu-load` functionality utilises the [`stress-ng`](https://kernel.ubuntu.com/git/cking/stress-ng.git/) tool
7 | to generate the CPU load. Some of the configuration parameters are taken directoy from the `stress-ng` command line.
8 |
9 | ## Configuration
10 |
11 | `cpu-load` is configured via a number of environment variables:
12 |
13 | | Tool | Description |
14 | | ---- | ----------- |
15 | | collectd | `collectd` based statistics/metrics gathering daemonset code |
16 | | lib | General library helper functions for forming and launching workloads, and storing results in a uniform manner to aid later analysis |
17 | | report | Rmarkdown based report generator, used to produce a PDF comparison report of 1 or more sets of results |
18 | | scaling | Tests to measure scaling, such as linear or parallel launching of pods |
19 |
20 | | Variable | Description | Default |
21 | | -------- | ----------- | ------- |
22 | | `SMF_CPU_LOAD_NODES` | Set to non-empty to deploy `cpu-load` stressor | unset (off) |
23 | | `SMF_CPU_LOAD_NODES_NCPU` | Number of stressor threads to launch per node | 0 (one per cpu) |
24 | | `SMF_CPU_LOAD_NODES_PERCENT` | Percentage of CPU to load | unset (100%) |
25 | | `SMF_CPU_LOAD_NODES_LIMIT` | k8s cpu resource limit to set | unset (none) |
26 | | `SMF_CPU_LOAD_NODES_REQUEST` | k8s cpu resource request to set | unset (none) |
27 | | `cpu_load_post_deploy_sleep` | Seconds to sleep for `cpu-load` deployment to settle | 30 |
28 |
29 | `SMF_CPU_LOAD_NODES` must be set to a non-empty string to enable the `cpu-load` functionality. `cpu-load` uses
30 | a daemonSet to deploy one `stress-ng` single container pod to each active node in the cluster.
31 |
32 |
33 | Any of the `SMF_CPU_LOAD_NODES_*` variables can be set, or unset, and the daemonSet pods will be configured
34 | appropriately.
35 |
36 | ## Examples
37 |
38 | The combinations of settings available allow a lot of flexibility. Below are some common example setups:
39 |
40 | ### 50% CPU load on all cores of all nodes (`stress-ng`)
41 |
42 | Here we allow `stress-ng` to spawn workers to cover all the CPUs on each node, but ask it to restrict its
43 | bandwidth use to 50% of the CPU. We do not use the k8s limits.
44 |
45 | ```bash
46 | export SMF_CPU_LOAD_NODES=true
47 | #export SMF_CPU_LOAD_NODES_NCPU=
48 | export SMF_CPU_LOAD_NODES_PERCENT=50
49 | #export SMF_CPU_LOAD_NODES_LIMIT=999m
50 | #export SMF_CPU_LOAD_NODES_REQUEST=999m
51 | ```
52 |
53 | ### 50% CPU load on 1 un-pinned core of all nodes (k8s `limits`)
54 |
55 | Here we set `stress-ng` to run a single worker thread at 100% CPU, but use the k8s resource limits to restrict
56 | actual CPU usage to 50%. Because the k8s limit and request are not whole interger units, if the static policy is
57 | in place on the k8s cluster, the pods will be classified as Guaranteed QoS, but will *not* get pinned to a specific
58 | cpuset.
59 |
60 | ```bash
61 | export SMF_CPU_LOAD_NODES=true
62 | export SMF_CPU_LOAD_NODES_NCPU=1
63 | export SMF_CPU_LOAD_NODES_PERCENT=100
64 | export SMF_CPU_LOAD_NODES_LIMIT=500m
65 | export SMF_CPU_LOAD_NODES_REQUEST=500m
66 | ```
67 |
68 | ### 50% CPU load pinned to 1 core, on all nodes
69 |
70 | Here we set `stress-ng` to run a single worker thread at 50% CPU, and use the k8s resource limits to classify the
71 | pod as Guaranteed, and as we are using whole integer units of CPU resource requests, if the static policy manager is
72 | in play, the thread will be pinned to a single cpu cpuset.
73 |
74 | ```bash
75 | export SMF_CPU_LOAD_NODES=true
76 | export SMF_CPU_LOAD_NODES_NCPU=1
77 | export SMF_CPU_LOAD_NODES_PERCENT=50
78 | export SMF_CPU_LOAD_NODES_LIMIT=1
79 | export SMF_CPU_LOAD_NODES_REQUEST=1
80 | ```
81 |
82 |
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/node-info.R:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env Rscript
2 | # Copyright (c) 2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # Display details for the 'Nodes within Kubernetes cluster'.
7 |
8 | suppressMessages(suppressWarnings(library(tidyr))) # for gather().
9 | library(tibble)
10 | suppressMessages(suppressWarnings(library(plyr))) # rbind.fill
11 | # So we can plot multiple graphs
12 | library(gridExtra) # together.
13 | suppressMessages(suppressWarnings(library(ggpubr))) # for ggtexttable.
14 | suppressMessages(library(jsonlite)) # to load the data.
15 |
16 | render_node_info <- function()
17 | {
18 | # A list of all the known results files we might find the information inside.
19 | resultsfiles=c(
20 | "k8s-scaling.json"
21 | )
22 |
23 | stats=c()
24 | stats_names=c()
25 | datasetname=c()
26 | complete_data=c()
27 | max_char_name_node=18
28 |
29 | # list for each dirstats
30 | dirstats_list=list()
31 | j=1
32 |
33 | # For each set of results
34 | for (currentdir in resultdirs) {
35 | dirstats=c()
36 | for (resultsfile in resultsfiles) {
37 | fname=paste(inputdir, currentdir, resultsfile, sep="/")
38 | if ( !file.exists(fname)) {
39 | next
40 | }
41 |
42 | # Derive the name from the test result dirname
43 | datasetname=basename(currentdir)
44 |
45 | # Import the data
46 | fdata=fromJSON(fname)
47 |
48 | if (length(fdata$'kubectl-version') != 0 ) {
49 | numnodes= nrow(fdata$'kubectl-get-nodes'$items)
50 | for (i in 1:numnodes) {
51 | node_i=fdata$'kubectl-get-nodes'$items[i,]
52 | node_info=fdata$'socketsPerNode'[i,]
53 |
54 | # Substring node name so it fits properly into final table
55 | node_name=node_i$metadata$name
56 | if ( nchar(node_name) >= max_char_name_node) {
57 | dirstats=tibble("Node \nname"=as.character(substring(node_name, 1, max_char_name_node)))
58 | } else {
59 | dirstats=tibble("Node \nname"=as.character(node_name))
60 | }
61 |
62 | dirstats=cbind(dirstats, "CPUs"=as.character(node_i$status$capacity$cpu))
63 | dirstats=cbind(dirstats, "Memory"=as.character(node_i$status$capacity$memory))
64 | dirstats=cbind(dirstats, "Max \nPods"=as.character(node_i$status$capacity$pods))
65 | dirstats=cbind(dirstats, "Count \nsockets"=as.character(node_info$num_sockets))
66 | dirstats=cbind(dirstats, "Have \nhypervisor"=as.character(node_info$hypervisor))
67 |
68 | dirstats=cbind(dirstats, "kernel"=as.character(node_i$status$nodeInfo$kernelVersion))
69 | dirstats=cbind(dirstats, "OS"=as.character(node_i$status$nodeInfo$osImage))
70 | dirstats=cbind(dirstats, "Test"=as.character(datasetname))
71 |
72 | dirstats_list[[j]]=dirstats
73 | j=j+1
74 | }
75 | complete_data = do.call(rbind, dirstats_list)
76 | }
77 | }
78 |
79 | if ( length(complete_data) == 0 ) {
80 | cat(paste("No valid data found for directory ", currentdir, "\n\n"))
81 | }
82 |
83 | # use plyr rbind.fill so we can combine disparate version info frames
84 | stats=rbind.fill(stats, complete_data)
85 | stats_names=rbind(stats_names, datasetname)
86 | }
87 |
88 | if ( length(stats_names) == 0 ) {
89 | cat("No node stats found\n\n");
90 | return()
91 | }
92 |
93 | # Build us a text table of numerical results
94 | # Set up as left hand justify, so the node data indent renders.
95 | tablefontsize=8
96 | tbody.style = tbody_style(hjust=0, x=0.1, size=tablefontsize)
97 | stats_plot = suppressWarnings(ggtexttable(data.frame(complete_data, check.names=FALSE),
98 | theme=ttheme(base_size=tablefontsize, tbody.style=tbody.style),
99 | rows=NULL))
100 |
101 | # It may seem odd doing a grid of 1x1, but it should ensure we get a uniform format and
102 | # layout to match the other charts and tables in the report.
103 | master_plot = grid.arrange(stats_plot,
104 | nrow=1,
105 | ncol=1 )
106 | }
107 |
108 | render_node_info()
109 |
--------------------------------------------------------------------------------
/metrics/report/makereport.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) 2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # Take the data found in subdirectories of the metrics 'results' directory,
7 | # and turn them into a PDF report. Use a Dockerfile containing all the tooling
8 | # and scripts we need to do that.
9 |
10 | set -e
11 |
12 | SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
13 | source "${SCRIPT_PATH}/../lib/common.bash"
14 |
15 | IMAGE="${IMAGE:-metrics-report}"
16 | DOCKERFILE="${SCRIPT_PATH}/report_dockerfile/Dockerfile"
17 |
18 | HOSTINPUTDIR="${SCRIPT_PATH}/../results"
19 | RENVFILE="${HOSTINPUTDIR}/Env.R"
20 | HOSTOUTPUTDIR="${SCRIPT_PATH}/output"
21 |
22 | GUESTINPUTDIR="/inputdir/"
23 | GUESTOUTPUTDIR="/outputdir/"
24 |
25 | # If in debugging mode, we also map in the scripts dir so you can
26 | # dynamically edit and re-load them at the R prompt
27 | HOSTSCRIPTDIR="${SCRIPT_PATH}/report_dockerfile"
28 | GUESTSCRIPTDIR="/scripts/"
29 |
30 | # This function performs a docker build on the image names
31 | # passed in, to ensure that we have the latest changes from
32 | # the dockerfiles
33 | build_dockerfile_image()
34 | {
35 | local image="$1"
36 | local dockerfile_path="$2"
37 | local dockerfile_dir=${2%/*}
38 |
39 | echo "docker building $image"
40 | if ! docker build --label "$image" --tag "${image}" -f "$dockerfile_path" "$dockerfile_dir"; then
41 | die "Failed to docker build image $image"
42 | fi
43 | }
44 |
45 | # This function verifies that the dockerfile version is
46 | # equal to the test version in order to build the image or
47 | # just run the test
48 | check_dockerfiles_images()
49 | {
50 | local image="$1"
51 | local dockerfile_path="$2"
52 |
53 | if [ -z "$image" ] || [ -z "$dockerfile_path" ]; then
54 | die "Missing image or dockerfile path variable"
55 | fi
56 |
57 | # Verify that dockerfile version is equal to test version
58 | check_image=$(docker images "$image" -q)
59 | if [ -n "$check_image" ]; then
60 | # Check image label
61 | check_image_version=$(docker image inspect $image | grep -w DOCKERFILE_VERSION | head -1 | cut -d '"' -f4)
62 | if [ -n "$check_image_version" ]; then
63 | echo "$image is not updated"
64 | build_dockerfile_image "$image" "$dockerfile_path"
65 | else
66 | # Check dockerfile label
67 | dockerfile_version=$(grep DOCKERFILE_VERSION $dockerfile_path | cut -d '"' -f2)
68 | if [ "$dockerfile_version" != "$check_image_version" ]; then
69 | echo "$dockerfile_version is not equal to $check_image_version"
70 | build_dockerfile_image "$image" "$dockerfile_path"
71 | fi
72 | fi
73 | else
74 | build_dockerfile_image "$image" "$dockerfile_path"
75 | fi
76 | }
77 |
78 | setup() {
79 | echo "Checking subdirectories"
80 | check_subdir="$(ls -dx ${HOSTINPUTDIR}/*/ 2> /dev/null | wc -l)"
81 | if [ $check_subdir -eq 0 ]; then
82 | die "No subdirs in [${HOSTINPUTDIR}] to read results from."
83 | fi
84 |
85 | echo "Checking Dockerfile"
86 | check_dockerfiles_images "$IMAGE" "$DOCKERFILE"
87 |
88 | mkdir -p "$HOSTOUTPUTDIR" && true
89 |
90 | echo "inputdir=\"${GUESTINPUTDIR}\"" > ${RENVFILE}
91 | echo "outputdir=\"${GUESTOUTPUTDIR}\"" >> ${RENVFILE}
92 |
93 | # A bit of a hack to get an R syntax'd list of dirs to process
94 | # Also, need it as not host-side dir path - so short relative names
95 | resultdirs="$(cd ${HOSTINPUTDIR}; ls -dx */)"
96 | resultdirslist=$(echo ${resultdirs} | sed 's/ \+/", "/g')
97 | echo "resultdirs=c(" >> ${RENVFILE}
98 | echo " \"${resultdirslist}\"" >> ${RENVFILE}
99 | echo ")" >> ${RENVFILE}
100 | }
101 |
102 | run() {
103 | docker run ${extra_opts} --rm -v ${HOSTINPUTDIR}:${GUESTINPUTDIR} -v ${HOSTOUTPUTDIR}:${GUESTOUTPUTDIR} ${extra_volumes} ${IMAGE} ${extra_command}
104 | ls -la ${HOSTOUTPUTDIR}/*
105 | }
106 |
107 | main() {
108 |
109 | local OPTIND
110 | while getopts "d" opt;do
111 | case ${opt} in
112 | d)
113 | # In debug mode, run a shell instead of the default report generation
114 | extra_command="bash"
115 | extra_volumes="-v ${HOSTSCRIPTDIR}:${GUESTSCRIPTDIR}"
116 | extra_opts="-ti"
117 | ;;
118 | esac
119 | done
120 | shift $((OPTIND-1))
121 |
122 | setup
123 | run
124 | }
125 |
126 | main "$@"
127 |
128 |
--------------------------------------------------------------------------------
/metrics/README.md:
--------------------------------------------------------------------------------
1 | * [Metric testing for scaling on Kubernetes.](#metric-testing-for-scaling-on-kubernetes)
2 | * [Results storage and analysis](#results-storage-and-analysis)
3 | * [Developers](#developers)
4 | * [Metrics gathering](#metrics-gathering)
5 | * [`collectd` statistics](#collectd-statistics)
6 | * [privileged statistics pods](#privileged-statistics-pods)
7 | * [Configuring constant 'loads'](#configuring-constant-loads)
8 |
9 | # Metric testing for scaling on Kubernetes.
10 |
11 | This folder contains tools to aid in measuring the scaling capabilities of
12 | Kubernetes clusters.
13 |
14 | Primarily these tools were designed to measure scaling of large number of pods on a single node, but
15 | the code is structured to handle multiple nodes, and may also be useful in that scenario.
16 |
17 | The tools tend to take one of two forms:
18 |
19 | - Tools to launch jobs and take measurements
20 | - Tools to analyse results
21 |
22 | For more details, see individual sub-folders. A brief summary of available tools
23 | is below:
24 |
25 | | Folder | Description |
26 | | ---- | ----------- |
27 | | collectd | `collectd` based statistics/metrics gathering daemonset code |
28 | | lib | General library helper functions for forming and launching workloads, and storing results in a uniform manner to aid later analysis |
29 | | lib/[cpu-load*](lib/cpu-load.md) | Helper functions to enable CPU load generation on a cluster whilst under test |
30 | | [report](report/README.md) | Rmarkdown based report generator, used to produce a PDF comparison report of one or more sets of results |
31 | | [scaling](scaling/README.md) | Tests to measure scaling, such as linear or parallel launching of pods |
32 |
33 | ## Results storage and analysis
34 |
35 | The tools generate JSON formatted results files via the [`lib/json.bash`](lib/json.bash) functions. The `metrics_json_save()`
36 | function has the ability to also `curl` or `socat` the JSON results to a database defined
37 | by environment variables (see the file source for details). This method has been used to store results in
38 | Elasticsearch and InfluxDB databases for instance, but should be adaptable to use with any REST API that accepts
39 | JSON input.
40 |
41 | ## Prerequisites
42 |
43 | There are some basic pre-requisites required in order to run the test and process the results:
44 |
45 | * A Kubernetes cluster up and running (tested on v1.15.3).
46 | * `bc` and `jq` packages.
47 | * Docker (only for report generation).
48 |
49 | # Developers
50 |
51 | Below are some architecture and internal details of how the code is structured and configured. This will be
52 | helpful for improving, modifying or submitting fixes to the code base.
53 |
54 | ## Metrics gathering
55 |
56 | Metrics can be gathered using either a daemonset deployment of privileged pods used to gather statistics
57 | directly from the nodes using a combination of `mpstat`, `free` and `df`, or a daemonset deployment based
58 | around `collectd`. The general recommendation is to use the `collectd` based collection if possible, as it
59 | is more efficient, as the system does not have to poll and wait for results, and thus executes the test
60 | cycle faster. The `collectd` results are collected asyncronously, and the report generator code later
61 | aligns the results with the pod execution in the timeline.
62 |
63 | ### `collectd` statistics
64 |
65 | The `collected` based code can be found in the `collectd` subdirectory. It uses the `collected` configuration
66 | found in the `collectd.conf` file to gather statistics, and store the results on the nodes themselves whilst
67 | tests are running. At the end of the test, the results are copied from the nodes and stored in the results
68 | directory for later processing.
69 |
70 | The `collectd` statistics are only configured and gathered if the environment variable `SMF_USE_COLLECTD`
71 | is set to non-empty by the test code (that is, it is only enabled upon request).
72 |
73 | ### privileged statistics pods
74 |
75 | The privileged statistics pods `YAML` can be found in the [`scaling/stats.yaml`](scaling/stats.yaml) file.
76 | An example of how to invoke and use this daemonset to extract statistics can be found in the
77 | [`scaling/k8s_scale.sh`](scaling/k8s_scale.sh) file.
78 |
79 | ## Configuring constant 'loads'
80 |
81 | The framework includes some tooling to assist in setting up constant pre-defined 'loads' across the cluster
82 | to aid evaluation of their impacts on the scaling metrics. See the [cpu-load documentation](lib/cpu-load.md)
83 | for more information.
84 |
--------------------------------------------------------------------------------
/clr-k8s-examples/4-kube-prometheus/overlays/v0.2.0/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - kube-prometheus/manifests/00namespace-namespace.yaml
3 | - kube-prometheus/manifests/0prometheus-operator-0alertmanagerCustomResourceDefinition.yaml
4 | - kube-prometheus/manifests/0prometheus-operator-0prometheusCustomResourceDefinition.yaml
5 | - kube-prometheus/manifests/0prometheus-operator-0prometheusruleCustomResourceDefinition.yaml
6 | - kube-prometheus/manifests/0prometheus-operator-0servicemonitorCustomResourceDefinition.yaml
7 | - kube-prometheus/manifests/0prometheus-operator-clusterRoleBinding.yaml
8 | - kube-prometheus/manifests/0prometheus-operator-clusterRole.yaml
9 | - kube-prometheus/manifests/0prometheus-operator-deployment.yaml
10 | - kube-prometheus/manifests/0prometheus-operator-serviceAccount.yaml
11 | - kube-prometheus/manifests/0prometheus-operator-serviceMonitor.yaml
12 | - kube-prometheus/manifests/0prometheus-operator-service.yaml
13 | - kube-prometheus/manifests/alertmanager-alertmanager.yaml
14 | - kube-prometheus/manifests/alertmanager-secret.yaml
15 | - kube-prometheus/manifests/alertmanager-serviceAccount.yaml
16 | - kube-prometheus/manifests/alertmanager-serviceMonitor.yaml
17 | - kube-prometheus/manifests/alertmanager-service.yaml
18 | - kube-prometheus/manifests/grafana-dashboardDatasources.yaml
19 | - kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
20 | - kube-prometheus/manifests/grafana-dashboardSources.yaml
21 | - kube-prometheus/manifests/grafana-deployment.yaml
22 | - kube-prometheus/manifests/grafana-serviceAccount.yaml
23 | - kube-prometheus/manifests/grafana-serviceMonitor.yaml
24 | - kube-prometheus/manifests/grafana-service.yaml
25 | - kube-prometheus/manifests/kube-state-metrics-clusterRoleBinding.yaml
26 | - kube-prometheus/manifests/kube-state-metrics-clusterRole.yaml
27 | - kube-prometheus/manifests/kube-state-metrics-deployment.yaml
28 | - kube-prometheus/manifests/kube-state-metrics-roleBinding.yaml
29 | - kube-prometheus/manifests/kube-state-metrics-role.yaml
30 | - kube-prometheus/manifests/kube-state-metrics-serviceAccount.yaml
31 | - kube-prometheus/manifests/kube-state-metrics-serviceMonitor.yaml
32 | - kube-prometheus/manifests/kube-state-metrics-service.yaml
33 | - kube-prometheus/manifests/node-exporter-clusterRoleBinding.yaml
34 | - kube-prometheus/manifests/node-exporter-clusterRole.yaml
35 | - kube-prometheus/manifests/node-exporter-daemonset.yaml
36 | - kube-prometheus/manifests/node-exporter-serviceAccount.yaml
37 | - kube-prometheus/manifests/node-exporter-serviceMonitor.yaml
38 | - kube-prometheus/manifests/node-exporter-service.yaml
39 | - kube-prometheus/manifests/prometheus-adapter-apiService.yaml
40 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml
41 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleBindingDelegator.yaml
42 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleBinding.yaml
43 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleServerResources.yaml
44 | - kube-prometheus/manifests/prometheus-adapter-clusterRole.yaml
45 | - kube-prometheus/manifests/prometheus-adapter-configMap.yaml
46 | - kube-prometheus/manifests/prometheus-adapter-deployment.yaml
47 | - kube-prometheus/manifests/prometheus-adapter-roleBindingAuthReader.yaml
48 | - kube-prometheus/manifests/prometheus-adapter-serviceAccount.yaml
49 | - kube-prometheus/manifests/prometheus-adapter-service.yaml
50 | - kube-prometheus/manifests/prometheus-clusterRoleBinding.yaml
51 | - kube-prometheus/manifests/prometheus-clusterRole.yaml
52 | - kube-prometheus/manifests/prometheus-prometheus.yaml
53 | - kube-prometheus/manifests/prometheus-roleBindingConfig.yaml
54 | - kube-prometheus/manifests/prometheus-roleBindingSpecificNamespaces.yaml
55 | - kube-prometheus/manifests/prometheus-roleConfig.yaml
56 | - kube-prometheus/manifests/prometheus-roleSpecificNamespaces.yaml
57 | - kube-prometheus/manifests/prometheus-rules.yaml
58 | - kube-prometheus/manifests/prometheus-serviceAccount.yaml
59 | - kube-prometheus/manifests/prometheus-serviceMonitorApiserver.yaml
60 | - kube-prometheus/manifests/prometheus-serviceMonitorCoreDNS.yaml
61 | - kube-prometheus/manifests/prometheus-serviceMonitorKubeControllerManager.yaml
62 | - kube-prometheus/manifests/prometheus-serviceMonitorKubelet.yaml
63 | - kube-prometheus/manifests/prometheus-serviceMonitorKubeScheduler.yaml
64 | - kube-prometheus/manifests/prometheus-serviceMonitor.yaml
65 | - kube-prometheus/manifests/prometheus-service.yaml
66 |
--------------------------------------------------------------------------------
/clr-k8s-examples/4-kube-prometheus/overlays/v0.1.0/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - kube-prometheus/manifests/00namespace-namespace.yaml
3 | - kube-prometheus/manifests/0prometheus-operator-0alertmanagerCustomResourceDefinition.yaml
4 | - kube-prometheus/manifests/0prometheus-operator-0prometheusCustomResourceDefinition.yaml
5 | - kube-prometheus/manifests/0prometheus-operator-0prometheusruleCustomResourceDefinition.yaml
6 | - kube-prometheus/manifests/0prometheus-operator-0servicemonitorCustomResourceDefinition.yaml
7 | - kube-prometheus/manifests/0prometheus-operator-clusterRoleBinding.yaml
8 | - kube-prometheus/manifests/0prometheus-operator-clusterRole.yaml
9 | - kube-prometheus/manifests/0prometheus-operator-deployment.yaml
10 | - kube-prometheus/manifests/0prometheus-operator-serviceAccount.yaml
11 | - kube-prometheus/manifests/0prometheus-operator-serviceMonitor.yaml
12 | - kube-prometheus/manifests/0prometheus-operator-service.yaml
13 | - kube-prometheus/manifests/alertmanager-alertmanager.yaml
14 | - kube-prometheus/manifests/alertmanager-secret.yaml
15 | - kube-prometheus/manifests/alertmanager-serviceAccount.yaml
16 | - kube-prometheus/manifests/alertmanager-serviceMonitor.yaml
17 | - kube-prometheus/manifests/alertmanager-service.yaml
18 | - kube-prometheus/manifests/grafana-dashboardDatasources.yaml
19 | - kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
20 | - kube-prometheus/manifests/grafana-dashboardSources.yaml
21 | - kube-prometheus/manifests/grafana-deployment.yaml
22 | - kube-prometheus/manifests/grafana-serviceAccount.yaml
23 | - kube-prometheus/manifests/grafana-serviceMonitor.yaml
24 | - kube-prometheus/manifests/grafana-service.yaml
25 | - kube-prometheus/manifests/kube-state-metrics-clusterRoleBinding.yaml
26 | - kube-prometheus/manifests/kube-state-metrics-clusterRole.yaml
27 | - kube-prometheus/manifests/kube-state-metrics-deployment.yaml
28 | - kube-prometheus/manifests/kube-state-metrics-roleBinding.yaml
29 | - kube-prometheus/manifests/kube-state-metrics-role.yaml
30 | - kube-prometheus/manifests/kube-state-metrics-serviceAccount.yaml
31 | - kube-prometheus/manifests/kube-state-metrics-serviceMonitor.yaml
32 | - kube-prometheus/manifests/kube-state-metrics-service.yaml
33 | - kube-prometheus/manifests/node-exporter-clusterRoleBinding.yaml
34 | - kube-prometheus/manifests/node-exporter-clusterRole.yaml
35 | - kube-prometheus/manifests/node-exporter-daemonset.yaml
36 | - kube-prometheus/manifests/node-exporter-serviceAccount.yaml
37 | - kube-prometheus/manifests/node-exporter-serviceMonitor.yaml
38 | - kube-prometheus/manifests/node-exporter-service.yaml
39 | - kube-prometheus/manifests/prometheus-adapter-apiService.yaml
40 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml
41 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleBindingDelegator.yaml
42 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleBinding.yaml
43 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleServerResources.yaml
44 | - kube-prometheus/manifests/prometheus-adapter-clusterRole.yaml
45 | - kube-prometheus/manifests/prometheus-adapter-configMap.yaml
46 | - kube-prometheus/manifests/prometheus-adapter-deployment.yaml
47 | - kube-prometheus/manifests/prometheus-adapter-roleBindingAuthReader.yaml
48 | - kube-prometheus/manifests/prometheus-adapter-serviceAccount.yaml
49 | - kube-prometheus/manifests/prometheus-adapter-service.yaml
50 | - kube-prometheus/manifests/prometheus-clusterRoleBinding.yaml
51 | - kube-prometheus/manifests/prometheus-clusterRole.yaml
52 | - kube-prometheus/manifests/prometheus-prometheus.yaml
53 | - kube-prometheus/manifests/prometheus-roleBindingConfig.yaml
54 | - kube-prometheus/manifests/prometheus-roleBindingSpecificNamespaces.yaml
55 | - kube-prometheus/manifests/prometheus-roleConfig.yaml
56 | - kube-prometheus/manifests/prometheus-roleSpecificNamespaces.yaml
57 | - kube-prometheus/manifests/prometheus-rules.yaml
58 | - kube-prometheus/manifests/prometheus-serviceAccount.yaml
59 | - kube-prometheus/manifests/prometheus-serviceMonitorApiserver.yaml
60 | - kube-prometheus/manifests/prometheus-serviceMonitorCoreDNS.yaml
61 | - kube-prometheus/manifests/prometheus-serviceMonitorKubeControllerManager.yaml
62 | - kube-prometheus/manifests/prometheus-serviceMonitorKubelet.yaml
63 | - kube-prometheus/manifests/prometheus-serviceMonitorKubeScheduler.yaml
64 | - kube-prometheus/manifests/prometheus-serviceMonitor.yaml
65 | - kube-prometheus/manifests/prometheus-service.yaml
66 |
67 |
68 |
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/dut-details.R:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env Rscript
2 | # Copyright (c) 2018 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # Display details for the 'Device Under Test', for all data sets being processed.
7 |
8 | suppressMessages(suppressWarnings(library(tidyr))) # for gather().
9 | library(tibble)
10 | suppressMessages(suppressWarnings(library(plyr))) # rbind.fill
11 | # So we can plot multiple graphs
12 | library(gridExtra) # together.
13 | suppressMessages(suppressWarnings(library(ggpubr))) # for ggtexttable.
14 | suppressMessages(library(jsonlite)) # to load the data.
15 |
16 | render_dut_details <- function()
17 | {
18 | # A list of all the known results files we might find the information inside.
19 | resultsfiles=c(
20 | "k8s-parallel.json",
21 | "k8s-scaling.json",
22 | "k8s-rapid.json"
23 | )
24 |
25 | data=c()
26 | stats=c()
27 | stats_names=c()
28 |
29 | # For each set of results
30 | for (currentdir in resultdirs) {
31 | count=1
32 | dirstats=c()
33 | datasetname=c()
34 | for (resultsfile in resultsfiles) {
35 | fname=paste(inputdir, currentdir, resultsfile, sep="/")
36 | if ( !file.exists(fname)) {
37 | #warning(paste("Skipping non-existent file: ", fname))
38 | next
39 | }
40 |
41 | # Derive the name from the test result dirname
42 | datasetname=basename(currentdir)
43 |
44 | # Import the data
45 | fdata=fromJSON(fname)
46 |
47 | if (length(fdata$'kubectl-version') != 0 ) {
48 | # We have kata-runtime data
49 | dirstats=tibble("Client Ver"=as.character(fdata$'kubectl-version'$clientVersion$gitVersion))
50 | dirstats=cbind(dirstats, "Server Ver"=as.character(fdata$'kubectl-version'$serverVersion$gitVersion))
51 | numnodes= nrow(fdata$'kubectl-get-nodes'$items)
52 | dirstats=cbind(dirstats, "No. nodes"=as.character(numnodes))
53 |
54 | if (numnodes != 0) {
55 | first_node=fdata$'kubectl-get-nodes'$items[1,]
56 | dirstats=cbind(dirstats, "- Node0 name"=as.character(first_node$metadata$name))
57 |
58 | havekata=first_node$metadata$labels$'katacontainers.io/kata-runtime'
59 | if ( is.null(havekata) ) {
60 | dirstats=cbind(dirstats, " Have Kata"=as.character('false'))
61 | } else {
62 | dirstats=cbind(dirstats, " Have Kata"=as.character(havekata))
63 | }
64 |
65 | dirstats=cbind(dirstats, " CPUs"=as.character(first_node$status$capacity$cpu))
66 | dirstats=cbind(dirstats, " Memory"=as.character(first_node$status$capacity$memory))
67 | dirstats=cbind(dirstats, " MaxPods"=as.character(first_node$status$capacity$pods))
68 | dirstats=cbind(dirstats, " PodCIDR"=as.character(first_node$spec$podCIDR))
69 |
70 | dirstats=cbind(dirstats, " runtime"=as.character(first_node$status$nodeInfo$containerRuntimeVersion))
71 | dirstats=cbind(dirstats, " kernel"=as.character(first_node$status$nodeInfo$kernelVersion))
72 | dirstats=cbind(dirstats, " kubeProxy"=as.character(first_node$status$nodeInfo$kubeProxyVersion))
73 | dirstats=cbind(dirstats, " Kubelet"=as.character(first_node$status$nodeInfo$kubeletVersion))
74 | dirstats=cbind(dirstats, " OS"=as.character(first_node$status$nodeInfo$osImage))
75 | }
76 |
77 | break
78 | }
79 | }
80 |
81 | if ( length(dirstats) == 0 ) {
82 | cat(paste("No valid data found for directory ", currentdir, "\n\n"))
83 | }
84 |
85 | # use plyr rbind.fill so we can combine disparate version info frames
86 | stats=rbind.fill(stats, dirstats)
87 | stats_names=rbind(stats_names, datasetname)
88 | }
89 |
90 | if ( length(stats_names) == 0 ) {
91 | cat("No system details found\n\n")
92 | return()
93 | }
94 |
95 | rownames(stats) = stats_names
96 |
97 | # Rotate the tibble so we get data dirs as the columns
98 | spun_stats = as_tibble(cbind(What=names(stats), t(stats)))
99 |
100 | # Build us a text table of numerical results
101 | # Set up as left hand justify, so the node data indent renders.
102 | tablefontsize=8
103 | tbody.style = tbody_style(hjust=0, x=0.1, size=tablefontsize)
104 | stats_plot = suppressWarnings(ggtexttable(data.frame(spun_stats, check.names=FALSE),
105 | theme=ttheme(base_size=tablefontsize, tbody.style=tbody.style),
106 | rows=NULL
107 | ))
108 |
109 | # It may seem odd doing a grid of 1x1, but it should ensure we get a uniform format and
110 | # layout to match the other charts and tables in the report.
111 | master_plot = grid.arrange(
112 | stats_plot,
113 | nrow=1,
114 | ncol=1 )
115 | }
116 |
117 | render_dut_details()
118 |
--------------------------------------------------------------------------------
/clr-k8s-examples/hack/update_checker.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ###
4 | # update_checker.sh
5 | # Parses create_stack.sh for urls and versions
6 | # Curls urls for latest version and reports the comparison
7 | ##
8 |
9 | # first argument is path to create_stach.sh
10 | COMPONENT_FILE="${1:-$create_stack.sh}"
11 | # set CLRK8S_DEBUG=1 for debug output
12 | DEBUG=${CLRK8S_DEBUG:-0}
13 | # set CLRK8S_NO_COLOR=1 for no colors
14 | NO_COLOR=${CLRK8S_NO_COLOR:-""}
15 | # set CLRK8S_ALL=1 for all results, not just changed
16 | ALL=${CLRK8S_ALL:-""}
17 | # add components to skip (not check)
18 | # - canal doesn't use git repo tags for revisions
19 | declare -a COMPONENT_SKIP=( CANAL )
20 |
21 | # internal vars
22 | declare -A COMPONENT_VER
23 | declare -A COMPONENT_URL
24 | LATEST_URL=""
25 |
26 | # usage prints help and exit
27 | function usage(){
28 | echo "Compare default component versions to latest release"
29 | echo "usage: update_checker.sh "
30 | exit 0
31 | }
32 | # log echoes to stdout
33 | function log(){
34 | echo "$1"
35 | }
36 | # debug echoes to stdout if debug is enabled
37 | function debug(){
38 | if [[ "${DEBUG}" -ne 0 ]]; then
39 | echo "$1"
40 | fi
41 | }
42 | # extract_component_data scans file for component versions and urls and add them to maps
43 | function extract_component_data(){
44 | file=${1:-$COMPONENT_FILE}
45 | name=""
46 | version=""
47 | url=""
48 | while read -r line
49 | do
50 | # versions
51 | if [[ $line =~ "_VER=" ]]; then
52 | debug "Found component version $line"
53 | name=${line%%_*}
54 | if [[ "$COMPONENT_SKIP" =~ (^|[[:space:]])"$name"($|[[:space:]]) ]]; then
55 | debug "Skipping component $name"
56 | continue
57 | fi
58 | versions=${line#*=}
59 | if [[ $versions =~ ":-" ]]; then
60 | version=${line#*:-}
61 | fi
62 | # cleanup value
63 | version=${version%\}}
64 | version=${version%\}\"}
65 | if [[ -n "$name" && ${COMPONENT_VER[$name]} == "" ]]; then
66 | debug "Adding component $name=$version to COMPONENT_VER"
67 | COMPONENT_VER[$name]=$version
68 | fi
69 | fi
70 |
71 | # urls
72 | if [[ $line =~ "_URL=" ]]; then
73 | debug "Found component URL $line"
74 | name=${line%%_*}
75 | if [[ $COMPONENT_SKIP =~ (^|[[:space:]])"$name"($|[[:space:]]) ]]; then
76 | debug "Skipping component $name"
77 | continue
78 | fi
79 | urls=${line#*=}
80 |
81 | if [[ $urls =~ ":-" ]]; then
82 | urls=${line#*:-}
83 | fi
84 | # cleanup value
85 | url=${urls%\"}
86 | url=${url#\"}
87 | if [[ -n "$name" && ${COMPONENT_URL[$name]} == "" ]]; then
88 | debug "Adding component $name=$url to COMPONENT_URL"
89 | COMPONENT_URL[$name]=$url
90 | fi
91 | fi
92 |
93 | done < $file
94 |
95 | }
96 | # resolve_latest_url extracts the real release/latest url from a repo url
97 | function resolve_latest_url(){
98 | repo=$1
99 | url=${repo%.git*}/releases/latest
100 | LATEST_URL=$(curl -Ls -o /dev/null -w %{url_effective} $url)
101 | if [[ "$?" -gt 0 ]]; then
102 | echo "curl error, exiting."
103 | exit 1
104 | fi
105 | }
106 | # function_exists checks if a function exists
107 | function function_exists() {
108 | declare -f -F "$1" > /dev/null
109 | return $?
110 | }
111 | function report(){
112 | if [[ -z $NO_COLOR ]]; then
113 | BOLD="\e[1m\e[33m"
114 | BOLD_OFF="\e[0m"
115 | fi
116 | mode="changed"
117 | out=""
118 | if [[ -n "$1" ]]; then
119 | mode="all"
120 | fi
121 | out+="\n"
122 | out+="Components ($mode)\n"
123 | out+="--------------------------\n"
124 | echo -e $out
125 | out="NAME CURRENT LATEST\n"
126 | # loop thru each url, get latest version and report
127 | for k in "${!COMPONENT_URL[@]}";
128 | do
129 | name=$k
130 | resolve_latest_url "${COMPONENT_URL[$k]}"
131 | latest_url="$LATEST_URL"
132 | latest_ver="${latest_url#*tag/}"
133 | current_ver="${COMPONENT_VER[$name]}"
134 | if [[ "${current_ver}" != "${latest_ver}" ]]; then
135 | out+="$name $current_ver $BOLD $latest_ver $BOLD_OFF\n";
136 | fi
137 | if [[ "${current_ver}" == "${latest_ver}" && -n $ALL ]]; then
138 | out+="$name $current_ver $latest_ver\n";
139 | fi
140 | done;
141 | echo -e "${out}" | column -t
142 | if [[ "${#COMPONENT_SKIP[@]}" -gt 0 ]]; then
143 | echo "---"
144 | echo "WARNING: Skipped comparisions for the following components:"
145 | for s in "${!COMPONENT_SKIP[@]}"
146 | do
147 | echo "${COMPONENT_SKIP[$s]}"
148 | done
149 | fi
150 | }
151 | ###
152 | # Main
153 | ##
154 |
155 | # print help if no args
156 | if [[ "$#" -eq 0 ]]; then
157 | usage
158 | fi
159 |
160 | # get the versions
161 | extract_component_data "$1"
162 | # output
163 | report "${ALL}"
164 |
165 |
166 |
167 |
--------------------------------------------------------------------------------
/clr-k8s-examples/4-kube-prometheus/overlays/f458e85e5d7675f7bc253072e1b4c8892b51af0f/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # This is a temporary reference commit until a working prometheus for 1.16 is released
2 | # (dashboard-deployment api version not updated in v0.2.0, breaking k8s 1.16)
3 | # f458e85e5d7675f7bc253072e1b4c8892b51af0f
4 | resources:
5 | - kube-prometheus/manifests/00namespace-namespace.yaml
6 | - kube-prometheus/manifests/0prometheus-operator-0alertmanagerCustomResourceDefinition.yaml
7 | - kube-prometheus/manifests/0prometheus-operator-0prometheusCustomResourceDefinition.yaml
8 | - kube-prometheus/manifests/0prometheus-operator-0prometheusruleCustomResourceDefinition.yaml
9 | - kube-prometheus/manifests/0prometheus-operator-0servicemonitorCustomResourceDefinition.yaml
10 | - kube-prometheus/manifests/0prometheus-operator-clusterRoleBinding.yaml
11 | - kube-prometheus/manifests/0prometheus-operator-clusterRole.yaml
12 | - kube-prometheus/manifests/0prometheus-operator-deployment.yaml
13 | - kube-prometheus/manifests/0prometheus-operator-serviceAccount.yaml
14 | - kube-prometheus/manifests/0prometheus-operator-serviceMonitor.yaml
15 | - kube-prometheus/manifests/0prometheus-operator-service.yaml
16 | - kube-prometheus/manifests/alertmanager-alertmanager.yaml
17 | - kube-prometheus/manifests/alertmanager-secret.yaml
18 | - kube-prometheus/manifests/alertmanager-serviceAccount.yaml
19 | - kube-prometheus/manifests/alertmanager-serviceMonitor.yaml
20 | - kube-prometheus/manifests/alertmanager-service.yaml
21 | - kube-prometheus/manifests/grafana-dashboardDatasources.yaml
22 | - kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
23 | - kube-prometheus/manifests/grafana-dashboardSources.yaml
24 | - kube-prometheus/manifests/grafana-deployment.yaml
25 | - kube-prometheus/manifests/grafana-serviceAccount.yaml
26 | - kube-prometheus/manifests/grafana-serviceMonitor.yaml
27 | - kube-prometheus/manifests/grafana-service.yaml
28 | - kube-prometheus/manifests/kube-state-metrics-clusterRoleBinding.yaml
29 | - kube-prometheus/manifests/kube-state-metrics-clusterRole.yaml
30 | - kube-prometheus/manifests/kube-state-metrics-deployment.yaml
31 | - kube-prometheus/manifests/kube-state-metrics-roleBinding.yaml
32 | - kube-prometheus/manifests/kube-state-metrics-role.yaml
33 | - kube-prometheus/manifests/kube-state-metrics-serviceAccount.yaml
34 | - kube-prometheus/manifests/kube-state-metrics-serviceMonitor.yaml
35 | - kube-prometheus/manifests/kube-state-metrics-service.yaml
36 | - kube-prometheus/manifests/node-exporter-clusterRoleBinding.yaml
37 | - kube-prometheus/manifests/node-exporter-clusterRole.yaml
38 | - kube-prometheus/manifests/node-exporter-daemonset.yaml
39 | - kube-prometheus/manifests/node-exporter-serviceAccount.yaml
40 | - kube-prometheus/manifests/node-exporter-serviceMonitor.yaml
41 | - kube-prometheus/manifests/node-exporter-service.yaml
42 | - kube-prometheus/manifests/prometheus-adapter-apiService.yaml
43 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml
44 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleBindingDelegator.yaml
45 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleBinding.yaml
46 | - kube-prometheus/manifests/prometheus-adapter-clusterRoleServerResources.yaml
47 | - kube-prometheus/manifests/prometheus-adapter-clusterRole.yaml
48 | - kube-prometheus/manifests/prometheus-adapter-configMap.yaml
49 | - kube-prometheus/manifests/prometheus-adapter-deployment.yaml
50 | - kube-prometheus/manifests/prometheus-adapter-roleBindingAuthReader.yaml
51 | - kube-prometheus/manifests/prometheus-adapter-serviceAccount.yaml
52 | - kube-prometheus/manifests/prometheus-adapter-service.yaml
53 | - kube-prometheus/manifests/prometheus-clusterRoleBinding.yaml
54 | - kube-prometheus/manifests/prometheus-clusterRole.yaml
55 | - kube-prometheus/manifests/prometheus-prometheus.yaml
56 | - kube-prometheus/manifests/prometheus-roleBindingConfig.yaml
57 | - kube-prometheus/manifests/prometheus-roleBindingSpecificNamespaces.yaml
58 | - kube-prometheus/manifests/prometheus-roleConfig.yaml
59 | - kube-prometheus/manifests/prometheus-roleSpecificNamespaces.yaml
60 | - kube-prometheus/manifests/prometheus-rules.yaml
61 | - kube-prometheus/manifests/prometheus-serviceAccount.yaml
62 | - kube-prometheus/manifests/prometheus-serviceMonitorApiserver.yaml
63 | - kube-prometheus/manifests/prometheus-serviceMonitorCoreDNS.yaml
64 | - kube-prometheus/manifests/prometheus-serviceMonitorKubeControllerManager.yaml
65 | - kube-prometheus/manifests/prometheus-serviceMonitorKubelet.yaml
66 | - kube-prometheus/manifests/prometheus-serviceMonitorKubeScheduler.yaml
67 | - kube-prometheus/manifests/prometheus-serviceMonitor.yaml
68 | - kube-prometheus/manifests/prometheus-service.yaml
69 |
--------------------------------------------------------------------------------
/metrics/report/report_dockerfile/parallel.R:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env Rscript
2 | # Copyright (c) 2018-2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # Show effects of parallel container launch on boot and deletion times by
7 | # launching and killing off a deployment whilst ramping the number of pods requested.
8 |
9 | suppressMessages(suppressWarnings(library(ggplot2))) # ability to plot nicely.
10 | # So we can plot multiple graphs
11 | library(gridExtra) # together.
12 | suppressMessages(suppressWarnings(library(ggpubr))) # for ggtexttable.
13 | suppressMessages(library(jsonlite)) # to load the data.
14 | suppressMessages(library(scales)) # For de-science notation of axis
15 |
16 | render_parallel <- function()
17 | {
18 | testnames=c(
19 | "k8s-parallel*"
20 | )
21 |
22 | data=c()
23 | stats=c()
24 | rstats=c()
25 | rstats_names=c()
26 | cstats=c()
27 | cstats_names=c()
28 |
29 | skip_points_enable_smooth=0 # Should we draw the points as well as lines on the graphs.
30 |
31 | for (currentdir in resultdirs) {
32 | dirstats=c()
33 | for (testname in testnames) {
34 | matchdir=paste(inputdir, currentdir, sep="")
35 | matchfile=paste(testname, '\\.json', sep="")
36 | files=list.files(matchdir, pattern=matchfile)
37 | if ( length(files) == 0 ) {
38 | #warning(paste("Pattern [", matchdir, "/", matchfile, "] matched nothing"))
39 | }
40 | for (ffound in files) {
41 | fname=paste(inputdir, currentdir, ffound, sep="")
42 | if ( !file.exists(fname)) {
43 | warning(paste("Skipping non-existent file: ", fname))
44 | next
45 | }
46 |
47 | # Derive the name from the test result dirname
48 | datasetname=basename(currentdir)
49 |
50 | # Import the data
51 | fdata=fromJSON(fname)
52 | # De-nest the test name specific data
53 | shortname=substr(ffound, 1, nchar(ffound)-nchar(".json"))
54 | fdata=fdata[[shortname]]
55 |
56 | testname=datasetname
57 |
58 | # convert ms to seconds
59 | cdata=data.frame(boot_time=as.numeric(fdata$BootResults$launch_time$Result)/1000)
60 | cdata=cbind(cdata, delete_time=as.numeric(fdata$BootResults$delete_time$Result)/1000)
61 | cdata=cbind(cdata, npod=as.numeric(fdata$BootResults$n_pods$Result))
62 |
63 | # If we have more than 20 items to draw, then do not draw the points on
64 | # the graphs, as they are then too noisy to read.
65 | # But, do draw the smoothed lines to help read the now dense and potentially
66 | # noisy graphs.
67 | if (length(cdata[, "boot_time"]) > 20) {
68 | skip_points_enable_smooth=1
69 | }
70 |
71 | cdata=cbind(cdata, testname=rep(testname, length(cdata[, "boot_time"]) ))
72 | cdata=cbind(cdata, dataset=rep(datasetname, length(cdata[, "boot_time"]) ))
73 |
74 | # Store away as a single set
75 | data=rbind(data, cdata)
76 | }
77 | }
78 | }
79 |
80 | # If we found nothing to process, quit early and nicely
81 | if ( length(data) == 0 ) {
82 | cat("No results files found for parallel tests\n\n")
83 | return()
84 | }
85 |
86 | # Show how boot time changed
87 | boot_line_plot <- ggplot( data=data, aes(npod, boot_time, colour=testname, group=dataset)) +
88 | geom_line( alpha=0.2) +
89 | xlab("parallel pods") +
90 | ylab("Boot time (s)") +
91 | ggtitle("Deployment boot time (detail)") +
92 | #ylim(0, NA) + # For big machines, better to not 0-index
93 | theme(axis.text.x=element_text(angle=90))
94 |
95 | if ( skip_points_enable_smooth == 0 ) {
96 | boot_line_plot = boot_line_plot + geom_point(alpha=0.3)
97 | } else {
98 | boot_line_plot = bool_line_plot + geom_smooth(se=FALSE, method="loess", size=0.3)
99 | }
100 |
101 | # And get a zero Y index plot.
102 | boot_line_plot_zero = boot_line_plot + ylim(0, NA) +
103 | ggtitle("Deployment boot time (0 index)")
104 |
105 | # Show how boot time changed
106 | delete_line_plot <- ggplot( data=data, aes(npod, delete_time, colour=testname, group=dataset)) +
107 | geom_line(alpha=0.2) +
108 | xlab("parallel pods") +
109 | ylab("Delete time (s)") +
110 | ggtitle("Deployment deletion time (detail)") +
111 | #ylim(0, NA) + # For big machines, better to not 0-index
112 | theme(axis.text.x=element_text(angle=90))
113 |
114 | if ( skip_points_enable_smooth == 0 ) {
115 | delete_line_plot = delete_line_plot + geom_point(alpha=0.3)
116 | } else {
117 | delete_line_plot = delete_line_plot + geom_smooth(se=FALSE, method="loess", size=0.3)
118 | }
119 |
120 | # And get a 0 indexed Y axis plot
121 | delete_line_plot_zero = delete_line_plot + ylim(0, NA) +
122 | ggtitle("Deployment deletion time (0 index)")
123 |
124 | # See https://www.r-bloggers.com/ggplot2-easy-way-to-mix-multiple-graphs-on-the-same-page/ for
125 | # excellent examples
126 | master_plot = grid.arrange(
127 | boot_line_plot_zero,
128 | delete_line_plot_zero,
129 | boot_line_plot,
130 | delete_line_plot,
131 | nrow=2,
132 | ncol=2 )
133 | }
134 |
135 | render_parallel()
136 |
--------------------------------------------------------------------------------
/metrics/scaling/README.md:
--------------------------------------------------------------------------------
1 | # Scaling metrics tests
2 |
3 | This directory contains a number of scripts to perform a variety of system scaling tests.
4 |
5 | The tests are described in their individual sections below.
6 |
7 | Each test has a number of configurable options. Many of those options are common across all tests.
8 | Those options are detailed in their own section below.
9 |
10 | > **Note:** `k8s_scale_rapid.sh` is the most complete and upto date test. It is the only test to
11 | > currently use the `collectd` data collection method. Other tests use a privileged container to
12 | > gather statistics.
13 | >
14 | > If you find one of the other tests useful, please consider updating it and the corresponding report
15 | > generation code to use the `collectd` method and send a Pull Request with your updates to this codebase.
16 |
17 | ## Global test configuration options
18 |
19 | The following variables are settable for many of the tests. Check each individual tests help
20 | for specifics and their individual default values.
21 |
22 | | Variable | Default Value | Description |
23 | | -------- | ------------- | ----------- |
24 | | TEST_NAME | test dependant | Can be set to over-ride the default JSON results filename |
25 | | NUM_PODS | 20 | Number of pods to launch |
26 | | STEP | 1 | Number of pods to launch per cycle |
27 | | wait_time | 30 | Seconds to wait for pods to become ready |
28 | | delete_wait_time | 600 | Seconds to wait for all pods to be deleted |
29 | | settle_time | 5 | Seconds to wait after pods ready before taking measurements |
30 | | use_api | yes | specify yes or no to use the JSON API to launch pods (otherwise, launch via YAML) |
31 | | grace | 30 | specify the grace period in seconds for workload pod termination |
32 | | RUNTIME | unset | specify the `RuntimeClass` to use to launch the pods |
33 |
34 | ## k8s_parallel.sh
35 |
36 | Measures pod create and delete times whilst increasing the number of pods launched in parallel.
37 |
38 | The test works by creating and destroying deployments with the required number of replicas being scaled.
39 |
40 | ## k8s_scale_nc.sh
41 |
42 | Measures pod response time using `nc` to test network connection response. Stores results as percentile
43 | values. Is used to see if the response time latency and jitter is affected by scaling the number of pods.
44 |
45 | ## k8s_scale_net.sh
46 |
47 | Measures pod response time to a `curl` HTTP get request from the K8S e2e `agnhost` image.
48 | Used to measure if the 'ready to respond' time scales with the number of service ports in use.
49 |
50 | ## k8s_scale_rapid.sh
51 |
52 | Measures how pod launch and the k8s system scales whilst launching more and more pods.
53 |
54 | Uses the `collectd` method to gather a number of statistics, including:
55 |
56 | - cpu usage
57 | - memory usage
58 | - network connections
59 | - disk usage
60 | - ipc stats
61 |
62 | ## k8s_scale.sh
63 |
64 | The fore-runner to `k8s_scale_rapid.sh`, using the privileged pod method to gather statistics. It is recommended
65 | to use `k8s_scale_rapid.sh` in preference if possible.
66 |
67 | # Example
68 |
69 | Below is a brief example of running the `k8s_scale_rapid.sh` test and generating a report from the results.
70 |
71 | 1. Run the test
72 |
73 | The test will run against the default `kubectl` configured cluster.
74 | ```sh
75 | $ ./scaling/k8s_scale.sh
76 | ```
77 |
78 | Results are stored in the `results` directory. The results will comprise of one `JSON` file for the test, and
79 | one `.tar.gz` file for each node found in the cluster.
80 |
81 | > **Note:** Only the `collectd` based tests generate `.tar.gz` files. All other tests only generate a single
82 | > `JSON` file for each run.
83 |
84 | 1. Move the results files
85 |
86 | In order to generate the report, the results files should be moved into an appropriately named sub-directory.
87 | The report generator can process and compare multiple sets of results. Each set of results should be placed
88 | into its own sub-directory. The below example uses the name `run1` as an example:
89 |
90 | ```sh
91 | $ cd results
92 | $ mkdir run1
93 | $ mv *.json run1
94 | $ mv *.tar.gz run1
95 | ```
96 |
97 | This sequence can be repeated to gather multiple test data sets. Place each data set in its own subdirectory.
98 | The report generator will process and compare all data set subdirectories found in the `results` directory.
99 |
100 | 1. Generate the report
101 |
102 | The report generator in the `report` subdirectory processes the sub-directories of the `results` directory
103 | to produce a `PDF` report and individual `PNG` based graphs.. The report generator utilises `docker` to create
104 | a docker image containing all the tooling necessary.
105 |
106 | ```sh
107 | $ cd report
108 | $ ./makereport.sh
109 | ...
110 | $ tree output
111 | output/
112 | ├── dut-1.png
113 | ├── metrics_report.pdf
114 | ├── scaling-1.png
115 | ├── scaling-2.png
116 | ├── scaling-3.png
117 | └── scaling-4.png
118 | ```
119 |
--------------------------------------------------------------------------------
/clr-k8s-examples/4-kube-prometheus/overlays/v0.10.0/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - kube-prometheus/manifests/kubeStateMetrics-serviceAccount.yaml
3 | - kube-prometheus/manifests/blackboxExporter-clusterRole.yaml
4 | - kube-prometheus/manifests/nodeExporter-serviceAccount.yaml
5 | - kube-prometheus/manifests/prometheus-prometheusRule.yaml
6 | - kube-prometheus/manifests/kubernetesControlPlane-serviceMonitorKubeScheduler.yaml
7 | - kube-prometheus/manifests/prometheus-serviceMonitor.yaml
8 | - kube-prometheus/manifests/grafana-dashboardSources.yaml
9 | - kube-prometheus/manifests/kubeStateMetrics-prometheusRule.yaml
10 | - kube-prometheus/manifests/kubePrometheus-prometheusRule.yaml
11 | - kube-prometheus/manifests/prometheus-clusterRole.yaml
12 | - kube-prometheus/manifests/blackboxExporter-serviceMonitor.yaml
13 | - kube-prometheus/manifests/prometheus-roleSpecificNamespaces.yaml
14 | - kube-prometheus/manifests/alertmanager-service.yaml
15 | - kube-prometheus/manifests/prometheusAdapter-serviceMonitor.yaml
16 | - kube-prometheus/manifests/nodeExporter-prometheusRule.yaml
17 | - kube-prometheus/manifests/nodeExporter-service.yaml
18 | - kube-prometheus/manifests/prometheus-roleConfig.yaml
19 | - kube-prometheus/manifests/kubeStateMetrics-clusterRole.yaml
20 | - kube-prometheus/manifests/prometheusOperator-deployment.yaml
21 | - kube-prometheus/manifests/prometheusOperator-serviceMonitor.yaml
22 | - kube-prometheus/manifests/prometheusAdapter-deployment.yaml
23 | - kube-prometheus/manifests/kubernetesControlPlane-serviceMonitorApiserver.yaml
24 | - kube-prometheus/manifests/prometheusAdapter-configMap.yaml
25 | - kube-prometheus/manifests/kubernetesControlPlane-prometheusRule.yaml
26 | - kube-prometheus/manifests/kubeStateMetrics-deployment.yaml
27 | - kube-prometheus/manifests/blackboxExporter-configuration.yaml
28 | - kube-prometheus/manifests/kubeStateMetrics-clusterRoleBinding.yaml
29 | - kube-prometheus/manifests/blackboxExporter-serviceAccount.yaml
30 | - kube-prometheus/manifests/grafana-dashboardDefinitions.yaml
31 | - kube-prometheus/manifests/prometheusOperator-service.yaml
32 | - kube-prometheus/manifests/grafana-service.yaml
33 | - kube-prometheus/manifests/prometheus-prometheus.yaml
34 | - kube-prometheus/manifests/kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml
35 | - kube-prometheus/manifests/alertmanager-alertmanager.yaml
36 | - kube-prometheus/manifests/kubernetesControlPlane-serviceMonitorKubelet.yaml
37 | - kube-prometheus/manifests/grafana-dashboardDatasources.yaml
38 | - kube-prometheus/manifests/kubernetesControlPlane-serviceMonitorCoreDNS.yaml
39 | - kube-prometheus/manifests/alertmanager-serviceMonitor.yaml
40 | - kube-prometheus/manifests/grafana-deployment.yaml
41 | - kube-prometheus/manifests/grafana-serviceAccount.yaml
42 | - kube-prometheus/manifests/alertmanager-serviceAccount.yaml
43 | - kube-prometheus/manifests/prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml
44 | - kube-prometheus/manifests/prometheusOperator-prometheusRule.yaml
45 | - kube-prometheus/manifests/alertmanager-podDisruptionBudget.yaml
46 | - kube-prometheus/manifests/prometheus-serviceAccount.yaml
47 | - kube-prometheus/manifests/prometheus-service.yaml
48 | - kube-prometheus/manifests/prometheusAdapter-clusterRoleServerResources.yaml
49 | - kube-prometheus/manifests/prometheusAdapter-clusterRoleBinding.yaml
50 | - kube-prometheus/manifests/prometheus-roleBindingConfig.yaml
51 | - kube-prometheus/manifests/nodeExporter-daemonset.yaml
52 | - kube-prometheus/manifests/prometheus-roleBindingSpecificNamespaces.yaml
53 | - kube-prometheus/manifests/nodeExporter-clusterRoleBinding.yaml
54 | - kube-prometheus/manifests/prometheusOperator-serviceAccount.yaml
55 | - kube-prometheus/manifests/prometheusOperator-clusterRoleBinding.yaml
56 | - kube-prometheus/manifests/kubeStateMetrics-serviceMonitor.yaml
57 | - kube-prometheus/manifests/prometheusAdapter-roleBindingAuthReader.yaml
58 | - kube-prometheus/manifests/prometheusAdapter-clusterRoleBindingDelegator.yaml
59 | - kube-prometheus/manifests/prometheusAdapter-serviceAccount.yaml
60 | - kube-prometheus/manifests/blackboxExporter-deployment.yaml
61 | - kube-prometheus/manifests/alertmanager-prometheusRule.yaml
62 | - kube-prometheus/manifests/prometheus-clusterRoleBinding.yaml
63 | - kube-prometheus/manifests/prometheusAdapter-clusterRole.yaml
64 | - kube-prometheus/manifests/grafana-serviceMonitor.yaml
65 | - kube-prometheus/manifests/nodeExporter-clusterRole.yaml
66 | - kube-prometheus/manifests/prometheusAdapter-service.yaml
67 | - kube-prometheus/manifests/prometheus-podDisruptionBudget.yaml
68 | - kube-prometheus/manifests/blackboxExporter-service.yaml
69 | - kube-prometheus/manifests/nodeExporter-serviceMonitor.yaml
70 | - kube-prometheus/manifests/blackboxExporter-clusterRoleBinding.yaml
71 | - kube-prometheus/manifests/alertmanager-secret.yaml
72 | - kube-prometheus/manifests/prometheusAdapter-apiService.yaml
73 | - kube-prometheus/manifests/prometheusOperator-clusterRole.yaml
74 | - kube-prometheus/manifests/grafana-config.yaml
75 | - kube-prometheus/manifests/prometheusAdapter-podDisruptionBudget.yaml
76 | - kube-prometheus/manifests/kubeStateMetrics-service.yaml
77 |
--------------------------------------------------------------------------------
/clr-k8s-examples/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | require 'fileutils'
5 | require 'ipaddr'
6 | require 'securerandom'
7 |
8 | $num_instances = (ENV['NODES'] || 3).to_i
9 | $cpus = (ENV['CPUS'] || 4).to_i
10 | $memory = (ENV['MEMORY'] || 8192).to_i
11 | $disks = 2
12 | # Using folder prefix instead of uuid until vagrant-libvirt fixes disk cleanup
13 | $disk_prefix = File.basename(File.dirname(__FILE__), "/")
14 | $disk_size = "10G"
15 | $box = "AntonioMeireles/ClearLinux"
16 | $box_ver = (ENV['CLEAR_VBOX_VER'])
17 | File.exists?("/usr/share/qemu/OVMF.fd") ? $loader = "/usr/share/qemu/OVMF.fd" : $loader = File.join(File.dirname(__FILE__), "OVMF.fd")
18 | $vm_name_prefix = "clr"
19 | $base_ip = IPAddr.new("10.10.100.10")
20 | $hosts = {}
21 | $proxy_ip_list = "192.168.121.0/24"
22 | $driveletters = ('a'..'z').to_a
23 | $setup_fc = true ? (['true', '1'].include? ENV['SETUP_FC'].to_s) : false
24 | $runner = ENV.has_key?('RUNNER') ? ENV['RUNNER'].to_s : "crio".to_s
25 | $high_pod_count = ENV.has_key?('HIGH_POD_COUNT') ? ENV['HIGH_POD_COUNT'].to_s : ""
26 | $CLRK8S_CLR_VER = ENV.has_key?('CLRK8S_CLR_VER') ? ENV['CLRK8S_CLR_VER'].to_s : ""
27 | if !(["crio","containerd"].include? $runner)
28 | abort("it's either crio or containerd. Cannot do anything else")
29 | end
30 |
31 | if not File.exists?($loader)
32 | system('curl -O https://download.clearlinux.org/image/OVMF.fd')
33 | end
34 |
35 | # We need v 1.0.14 or above for this vagrantfile to work.
36 | unless Vagrant.has_plugin?("vagrant-guests-clearlinux")
37 | system "vagrant plugin install vagrant-guests-clearlinux"
38 | end
39 |
40 | # Install plugins that you might need.
41 | if ENV['http_proxy'] || ENV['HTTP_PROXY']
42 | system "vagrant plugin install vagrant-proxyconf" unless Vagrant.has_plugin?("vagrant-proxyconf")
43 | end
44 |
45 | # All Vagrant configuration is done below. The "2" in Vagrant.configure
46 | # configures the configuration version (we support older styles for
47 | # backwards compatibility). Please don't change it unless you know what
48 | # you're doing.
49 | Vagrant.configure("2") do |config|
50 | # Every Vagrant development environment requires a box. You can search for
51 | # boxes at https://vagrantcloud.com/search.
52 | config.vm.box = $box
53 | config.vm.box_version = $box_ver
54 |
55 | # Mount the current dir at home folder instead of default
56 | config.vm.synced_folder './', '/vagrant', disabled: true
57 | config.vm.synced_folder './', '/home/clear/' + File.basename(Dir.getwd), type: 'rsync',
58 | rsync__args: ["--verbose", "--archive", "--delete", "-zz", "--copy-links"]
59 | #Setup proxies for all machines
60 | (1..$num_instances).each do |i|
61 | $base_ip = $base_ip.succ
62 | $hosts["clr-%02d" % i] = $base_ip.to_s
63 | end
64 |
65 | $hosts.each do |vm_name, ip|
66 | $proxy_ip_list = ("#{$proxy_ip_list},#{vm_name},#{ip}")
67 | end
68 |
69 | $hosts.each do |vm_name, ip|
70 | config.vm.define vm_name do |c|
71 | c.vm.hostname = vm_name
72 | c.vm.network :private_network, ip: ip, autostart: true
73 | c.vm.provider :libvirt do |lv|
74 | lv.cpu_mode = "host-passthrough"
75 | lv.nested = true
76 | lv.loader = $loader
77 | lv.cpus = $cpus
78 | lv.memory = $memory
79 | lv.machine_virtual_size = 40
80 | (1..$disks).each do |d|
81 | lv.storage :file, :device => "hd#{$driveletters[d]}", :path => "disk-#{$disk_prefix}-#{vm_name}-#{d}.disk", :size => $disk_size, :type => "raw"
82 | end
83 | end
84 | if ENV['http_proxy'] || ENV['HTTP_PROXY']
85 | if Vagrant.has_plugin?("vagrant-proxyconf")
86 | c.proxy.http = (ENV['http_proxy']||ENV['HTTP_PROXY'])
87 | c.proxy.https = (ENV['https_proxy']||ENV['HTTPS_PROXY'])
88 | if ENV['no_proxy'] || ENV['NO_PROXY']
89 | if ENV['no_proxy']
90 | c.proxy.no_proxy = (ENV['no_proxy']+","+"#{$proxy_ip_list}")
91 | else
92 | c.proxy.no_proxy = (ENV['NO_PROXY']+","+"#{$proxy_ip_list}")
93 | end
94 | else
95 | c.proxy.no_proxy = "localhost,127.0.0.1,172.16.10.10,#{$proxy_ip_list}"
96 | end
97 | end
98 | end
99 | c.vm.provider :virtualbox do |_, override|
100 | override.vm.provision "shell", privileged: true, inline: "sudo mkdir -p /etc/profile.d; echo export MASTER_IP=#{$hosts["clr-01"]} > /etc/profile.d/cnsetup.sh"
101 | end
102 | c.vm.provision "shell", privileged: false, path: "setup_system.sh", env: {"RUNNER" => $runner, "HIGH_POD_COUNT" => $high_pod_count, "CLRK8S_CLR_VER" => $CLRK8S_CLR_VER}
103 | if $setup_fc
104 | if $runner == "crio".to_s
105 | c.vm.provision "shell", privileged: false, path: "setup_kata_firecracker.sh"
106 | else
107 | # Wish we could use device mapper snapshotter with containerd, but it
108 | # does not exist on any released containerd version. Failing for now
109 | # when we use FC with containerd
110 | abort("Cannot use containerd with FC for now.")
111 | #c.vm.provision "shell", privileged: false, path: "containerd_devmapper_setup.sh"
112 | end
113 | end
114 | # Include shells bundle to get bash completion and add kubectl's commands to vagrant's shell
115 | c.vm.provision "shell", privileged: false, inline: 'sudo -E swupd bundle-add shells; echo "source <(kubectl completion bash)" >> $HOME/.bashrc'
116 | end
117 | end
118 | end
119 |
--------------------------------------------------------------------------------
/clr-k8s-examples/DEVELOP.md:
--------------------------------------------------------------------------------
1 | # Developer
2 |
3 | This document describes the key concepts and technologies used in the project, and lists the ways to contribute to the
4 | project.
5 |
6 | ## Code Conventions
7 |
8 | ### Shell Scripts
9 |
10 | Shell scripts should adhere to the [Google Shell Style Guide](https://google.github.io/styleguide/shell.xml) as much as
11 | possible.
12 |
13 | #### Formatting with `shfmt`
14 |
15 | The [shfmt](https://github.com/mvdan/sh#shfmt) tool should be used to format shell scripts with 2 spaces and should use
16 | the following parameters:
17 |
18 | ```shell script
19 | shfmt -i 2 -ci
20 | ```
21 |
22 | #### Linting with shellcheck
23 |
24 | The [shellcheck](https://github.com/koalaman/shellcheck) tool should be used to identify issues with the scripts
25 | themselves. The config file for shellcheck is typically found in `~/.shellcheckrc` and should include rules that
26 | are [ignored](https://github.com/koalaman/shellcheck/wiki/Ignore) project wide.
27 |
28 | ```shell script
29 | # ~/.shellcheckrc
30 | # disabled rules here
31 | ```
32 |
33 |
34 |
35 | ## Kustomize Usage
36 |
37 | [Kustomize](https://kustomize.io/) is used to offer multiple versions of components simultaneously and helps us be
38 | explicit in patching. The main functionality of the tool is now built into `kubectl`. The following sections provide an
39 | overview of how we use Kustomize.
40 |
41 | ### Multiple Versions of Components
42 |
43 | We maintain multiple versions of a component by creating a directory for each version (e.g. `v0.8.3` and `v1.0.3`) and
44 | using a `kustomization.yaml` file to specify the required files and patches.
45 |
46 | ```bash
47 | 7-rook
48 | ├── overlays
49 | │ ├── v0.8.3
50 | │ │ ├── kustomization.yaml
51 | │ │ └── operator_patch.yaml
52 | │ └── v1.0.3
53 | │ ├── kustomization.yaml
54 | │ ├── patch_operator.yaml
55 | │ └── rook
56 |
57 | ```
58 |
59 | For each component to be installed, the `create_stack.sh` will clone the relevant repo to the specified version
60 | dir (e.g. `7-rook/overlays/v1.0.3/rook`) and switch the branch to the specified release. The `create_stack.sh` script will then
61 | install the specified version via `kubectl` (e.g. `kubectl apply -k 7-rook/overlays/v1.0.3`) which will apply the
62 | required files and patches.
63 |
64 | ### Specific files
65 |
66 | The `kustomization.yaml` allows us to specify which manifests to load under the `resources:` element and makes it easy
67 | to see any customizations via patch files.
68 |
69 | ```yaml
70 | # 7-rook/overlays/v1.0.3/kustomization.yaml
71 | resources:
72 | - rook/cluster/examples/kubernetes/ceph/common.yaml
73 | - rook/cluster/examples/kubernetes/ceph/operator.yaml
74 | - rook/cluster/examples/kubernetes/ceph/cluster.yaml
75 | - rook/cluster/examples/kubernetes/ceph/storageclass.yaml
76 |
77 | patchesStrategicMerge:
78 | - patch_operator.yaml
79 | ```
80 |
81 | ### Patches
82 |
83 | There are two types of patches in Kustomize, `patchesStrategicMerge` for simple YAML fragments and
84 | `patchesJson6902` for more advanced use cases.
85 |
86 | #### patchesStrategicMerge
87 |
88 | The `patchesStrategicMerge` patch is just a fragment of YAML that will be merged into the final manifest. Note that the
89 | metadata is required so the tool can locate the target manifest.
90 |
91 | ```yaml
92 | # 7-rook/overlays/v1.0.3/patch_operator.yaml
93 | apiVersion: apps/v1
94 | kind: Deployment
95 | metadata:
96 | name: rook-ceph-operator
97 | spec:
98 | template:
99 | spec:
100 | containers:
101 | - name: rook-ceph-operator
102 | env:
103 | - name: FLEXVOLUME_DIR_PATH
104 | value: "/var/lib/kubelet/volume-plugins"
105 | ```
106 | The above example adds the `FLEXVOLUME_DIR_PATH` environment variable and value to the `rook-ceph-operator` manifest.
107 |
108 | #### patchesJson6902
109 |
110 | In the following example we demonstrate the more advanced JSON patching format.
111 |
112 | ```yaml
113 | # 5-ingres-lb/overlays/nginx-0.25.0/kustomization.yaml
114 | resources:
115 | - ingress-nginx/deploy/static/mandatory.yaml
116 | - ingress-nginx/deploy/static/provider/baremetal/service-nodeport.yaml
117 |
118 | patchesJson6902:
119 | # adds "networking.k8s.io" to ClusterRole's apiGroups
120 | - target:
121 | group: rbac.authorization.k8s.io
122 | version: v1
123 | kind: ClusterRole
124 | name: nginx-ingress-clusterrole
125 | path: patch_clusterrole.yaml
126 | ```
127 | ```yaml
128 | # 5-ingres-lb/overlays/nginx-0.25.0/patch_clusterrole.yaml
129 |
130 | # adds "networking.k8s.io" to apiGroups for ingress rules which is missing in 0.25.0
131 | - op: add
132 | path: /rules/3/apiGroups/-
133 | value: "networking.k8s.io"
134 | ```
135 | In the above example, the metadata for the target manifest is specified in the `kustomization.yaml` and the patch file
136 | itself contains the operation to perform, target path and value. The `rules/3/apiGroups/-` path indicates to perform the
137 | operation (in this case "add") at the `apiGroups:` list found under the 4th list item of `rules:`.
138 |
139 | ```yaml
140 | apiVersion: rbac.authorization.k8s.io/v1
141 | kind: ClusterRole
142 | metadata:
143 | name: nginx-ingress-clusterrole
144 | ...
145 | rules:
146 | - apiGroups:
147 | ...
148 | - apiGroups:
149 | ...
150 | - apiGroups:
151 | ...
152 | - apiGroups:
153 | ...
154 | - apiGroups:
155 | - "extensions"
156 | - "networking.k8s.io" # <- The patch adds the value to the list here
157 | ```
158 |
159 | The `value:` property specifies the data being operated on (added) and in this case it is a simple string,
160 | "networking.k8s.io". The `value:` can also be more complex and specified as JSON or YAML. For more information, see
161 | [jsonpath.md](https://github.com/kubernetes-sigs/kustomize/blob/master/examples/jsonpatch.md)
162 |
163 | ### Kustomize Resources
164 |
165 | * `kustomization.yaml` [fields](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/fields.md)
166 |
167 |
--------------------------------------------------------------------------------
/metrics/scaling/k8s_scale_rapid.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) 2019 Intel Corporation
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | #
6 |
7 | set -e
8 |
9 | # Pull in some common, useful, items
10 | SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
11 | source "${SCRIPT_PATH}/../lib/common.bash"
12 | source "${SCRIPT_PATH}/common.bash"
13 | source "${SCRIPT_PATH}/../collectd/collectd.bash"
14 |
15 | NUM_PODS=${NUM_PODS:-20}
16 | STEP=${STEP:-1}
17 |
18 | SMF_USE_COLLECTD=true
19 |
20 | LABELVALUE=${LABELVALUE:-gandalf}
21 |
22 | pod_command="[\"tail\", \"-f\", \"/dev/null\"]"
23 |
24 | # Set some default metrics env vars
25 | TEST_ARGS="runtime=${RUNTIME}"
26 | TEST_NAME="k8s rapid"
27 |
28 | # $1 is the launch time in seconds this pod/container took to start up.
29 | # $2 is the number of pod/containers under test
30 | grab_stats() {
31 | local launch_time_ms=$1
32 | local n_pods=$2
33 |
34 | info "And grab some stats"
35 |
36 | local date_json="$(cat << EOF
37 | "date": {
38 | "ns": $(date +%s%N),
39 | "Date": "$(date -u +"%Y-%m-%dT%T.%3N")"
40 | }
41 | EOF
42 | )"
43 | metrics_json_add_array_fragment "$date_json"
44 |
45 | local pods_json="$(cat << EOF
46 | "n_pods": {
47 | "Result": ${n_pods},
48 | "Units" : "int"
49 | }
50 | EOF
51 | )"
52 | metrics_json_add_array_fragment "$pods_json"
53 |
54 | local launch_json="$(cat << EOF
55 | "launch_time": {
56 | "Result": $launch_time_ms,
57 | "Units" : "ms"
58 | }
59 | EOF
60 | )"
61 | metrics_json_add_array_fragment "$launch_json"
62 |
63 | info "launch [$launch_time_ms]"
64 |
65 | metrics_json_close_array_element
66 | }
67 |
68 | init() {
69 | framework_init
70 | }
71 |
72 | save_config(){
73 | metrics_json_start_array
74 |
75 | local json="$(cat << EOF
76 | {
77 | "testname": "${TEST_NAME}",
78 | "NUM_PODS": ${NUM_PODS},
79 | "STEP": ${STEP},
80 | "wait_time": ${wait_time},
81 | "delete_wait_time": ${delete_wait_time},
82 | "settle_time": ${settle_time}
83 | }
84 | EOF
85 | )"
86 | metrics_json_add_array_element "$json"
87 | metrics_json_end_array "Config"
88 | }
89 |
90 | run() {
91 | info "Running test"
92 |
93 | trap cleanup EXIT QUIT KILL
94 |
95 | metrics_json_start_array
96 |
97 | for reqs in $(seq ${STEP} ${STEP} ${NUM_PODS}); do
98 | info "Testing replicas ${reqs} of ${NUM_PODS}"
99 | # Generate the next yaml file
100 |
101 | local runtime_command
102 | if [ -n "$RUNTIME" ]; then
103 | runtime_command="s|@RUNTIMECLASS@|${RUNTIME}|g"
104 | else
105 | runtime_command="/@RUNTIMECLASS@/d"
106 | fi
107 |
108 | local input_template
109 | local generated_file
110 | if [ "$use_api" != "no" ]; then
111 | input_template=$input_json
112 | generated_file=$generated_json
113 | else
114 | input_template=$input_yaml
115 | generated_file=$generated_yaml
116 | fi
117 |
118 | sed -e "s|@REPLICAS@|${reqs}|g" \
119 | -e $runtime_command \
120 | -e "s|@DEPLOYMENT@|${deployment}|g" \
121 | -e "s|@LABEL@|${LABEL}|g" \
122 | -e "s|@LABELVALUE@|${LABELVALUE}|g" \
123 | -e "s|@GRACE@|${grace}|g" \
124 | -e "s#@PODCOMMAND@#${pod_command}#g" \
125 | < ${input_template} > ${generated_file}
126 |
127 | info "Applying changes"
128 | local start_time=$(date +%s%N)
129 | if [ "$use_api" != "no" ]; then
130 | # If this is the first launch of the deploy, we need to use a different URL form.
131 | if [ $reqs == ${STEP} ]; then
132 | curl -s ${API_ADDRESS}:${API_PORT}/apis/apps/v1/namespaces/default/deployments -XPOST -H 'Content-Type: application/json' -d@${generated_file} > /dev/null
133 | else
134 | curl -s ${API_ADDRESS}:${API_PORT}/apis/apps/v1/namespaces/default/deployments/${deployment} -XPATCH -H 'Content-Type:application/strategic-merge-patch+json' -d@${generated_file} > /dev/null
135 | fi
136 | else
137 | kubectl apply -f ${generated_file}
138 | fi
139 |
140 | kubectl rollout status --timeout=${wait_time}s deployment/${deployment}
141 | local end_time=$(date +%s%N)
142 | local total_milliseconds=$(( (end_time - start_time) / 1000000 ))
143 | info "Took $total_milliseconds ms ($end_time - $start_time)"
144 |
145 | sleep ${settle_time}
146 | grab_stats $total_milliseconds $reqs
147 | done
148 | }
149 |
150 | cleanup() {
151 | info "Cleaning up"
152 |
153 | # First try to save any results we got
154 | metrics_json_end_array "BootResults"
155 |
156 | local start_time=$(date +%s%N)
157 | kubectl delete deployment --wait=true --timeout=${delete_wait_time}s ${deployment} || true
158 | for x in $(seq 1 ${delete_wait_time}); do
159 | local npods=$(kubectl get pods -l=${LABEL}=${LABELVALUE} -o=name | wc -l)
160 | if [ $npods -eq 0 ]; then
161 | echo "All pods have terminated at cycle $x"
162 | local alldied=true
163 | break;
164 | fi
165 | sleep 1
166 | done
167 | local end_time=$(date +%s%N)
168 | local total_milliseconds=$(( (end_time - start_time) / 1000000 ))
169 | if [ -z "$alldied" ]; then
170 | echo "ERROR: Not all pods died!"
171 | fi
172 | info "Delete Took $total_milliseconds ms ($end_time - $start_time)"
173 |
174 | local json="$(cat << EOF
175 | "Delete": {
176 | "Result": ${total_milliseconds},
177 | "Units" : "ms"
178 | }
179 | EOF
180 | )"
181 |
182 | metrics_json_add_fragment "$json"
183 | framework_shutdown
184 | }
185 |
186 | show_vars()
187 | {
188 | echo -e "\nEnvironment variables:"
189 | echo -e "\tName (default)"
190 | echo -e "\t\tDescription"
191 | echo -e "\tTEST_NAME (${TEST_NAME})"
192 | echo -e "\t\tCan be set to over-ride the default JSON results filename"
193 | echo -e "\tNUM_PODS (${NUM_PODS})"
194 | echo -e "\t\tNumber of pods to launch"
195 | echo -e "\tSTEP (${STEP})"
196 | echo -e "\t\tNumber of pods to launch per cycle"
197 | echo -e "\twait_time (${wait_time})"
198 | echo -e "\t\tSeconds to wait for pods to become ready"
199 | echo -e "\tdelete_wait_time (${delete_wait_time})"
200 | echo -e "\t\tSeconds to wait for all pods to be deleted"
201 | echo -e "\tsettle_time (${settle_time})"
202 | echo -e "\t\tSeconds to wait after pods ready before taking measurements"
203 | echo -e "\tuse_api (${use_api})"
204 | echo -e "\t\tspecify yes or no to use the API to launch pods"
205 | echo -e "\tgrace (${grace})"
206 | echo -e "\t\tspecify the grace period in seconds for workload pod termination"
207 | }
208 |
209 | help()
210 | {
211 | usage=$(cat << EOF
212 | Usage: $0 [-h] [options]
213 | Description:
214 | Launch a series of workloads and take memory metric measurements after
215 | each launch.
216 | Options:
217 | -h, Help page.
218 | EOF
219 | )
220 | echo "$usage"
221 | show_vars
222 | }
223 |
224 | main() {
225 |
226 | local OPTIND
227 | while getopts "h" opt;do
228 | case ${opt} in
229 | h)
230 | help
231 | exit 0;
232 | ;;
233 | esac
234 | done
235 | shift $((OPTIND-1))
236 | init
237 | run
238 | # cleanup will happen at exit due to the shell 'trap' we registered
239 | # cleanup
240 | }
241 |
242 | main "$@"
243 |
--------------------------------------------------------------------------------
/clr-k8s-examples/setup_system.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -o errexit
3 | set -o nounset
4 |
5 | # global vars
6 | CLRK8S_OS=${CLRK8S_OS:-""}
7 | CLR_VER=${CLRK8S_CLR_VER:-""}
8 | HIGH_POD_COUNT=${HIGH_POD_COUNT:-""}
9 |
10 | # set no proxy
11 | NO_PROXY_ARRAY=(.svc 10.0.0.0/8 )
12 | NO_PROXY_ARRAY+=( $(hostname -I | awk -F. '{print $1"."$2"."$3".0/24"}'))
13 |
14 | if [[ -z "${RUNNER+x}" ]]; then RUNNER="${CLRK8S_RUNNER:-crio}"; fi
15 |
16 | # update os version
17 | function upate_os_version() {
18 | if [[ -n "${CLR_VER}" ]]; then
19 | sudo swupd repair -m "${CLR_VER}" --picky --force
20 | return
21 | fi
22 | sudo swupd update
23 | }
24 |
25 | # add depdencies such as k8s and crio
26 | function add_os_deps() {
27 | sudo -E swupd bundle-add --quiet cloud-native-basic storage-utils
28 | }
29 |
30 | # permanently disable swap
31 | function disable_swap() {
32 | # disable current swap
33 | sudo swapoff -a
34 | # permanently disable swap
35 | sudo systemctl mask swap.target
36 | }
37 |
38 | # enable ip forwarding
39 | function enable_ip_forwarding() {
40 | #Ensure 'default' and 'all' rp_filter setting of strict mode (1)
41 | #Inividual interfaces can still be configured to loose mode (2)
42 | #However, loose mode is not supported by Project Calico/felix, per
43 | #https://github.com/projectcalico/felix/issues/2082
44 | #Alternative is to set loose mode on and set Calico to run anyway as
45 | #described in the issue above. However, loose mode is less secure
46 | #than strict. (See: https://github.com/dcos/dcos/pull/454#issuecomment-238408590)
47 | #This workaround can be removed when and if systemd reverts their
48 | #rp_filter settings back to 1 for 'default' and 'all'.
49 | sudo mkdir -p /etc/sysctl.d/
50 | cat < /etc/sysctl.d/60-k8s.conf"
51 | net.ipv4.ip_forward=1
52 | net.ipv4.conf.default.rp_filter=1
53 | net.ipv4.conf.all.rp_filter=1
54 | EOT
55 | sudo systemctl restart systemd-sysctl
56 |
57 | }
58 |
59 | # ensure the modules we need are preloaded
60 | function setup_modules_load() {
61 | sudo mkdir -p /etc/modules-load.d/
62 | cat < /etc/modules-load.d/k8s.conf"
63 | br_netfilter
64 | vhost_vsock
65 | overlay
66 | EOT
67 | }
68 |
69 | # ensure hosts file setup
70 | function setup_hosts() {
71 | # Make sure /etc/hosts file exists
72 | if [ ! -f /etc/hosts ]; then
73 | sudo touch /etc/hosts
74 | fi
75 | # add localhost to /etc/hosts file
76 | # shellcheck disable=SC2126
77 | hostcount=$(grep '127.0.0.1 localhost' /etc/hosts | wc -l)
78 | if [ "$hostcount" == "0" ]; then
79 | echo "127.0.0.1 localhost $(hostname)" | sudo bash -c "cat >> /etc/hosts"
80 | else
81 | echo "/etc/hosts already configured"
82 | fi
83 | }
84 |
85 | # write increased limits to specified file
86 | function write_limits_conf() {
87 | cat < $1"
88 | [Service]
89 | LimitNOFILE=1048576
90 | LimitNPROC=1048576
91 | LimitCORE=1048576
92 | TimeoutStartSec=0
93 | MemoryLimit=infinity
94 | EOT
95 | }
96 |
97 | # update configuration to enable high pod counts
98 | function config_high_pod_count() {
99 | # install bundle dependencies
100 | sudo -E swupd bundle-add --quiet jq bc
101 |
102 | # increase max inotify watchers
103 | cat < /etc/sysctl.conf"
104 | fs.inotify.max_queued_events=1048576
105 | fs.inotify.max_user_watches=1048576
106 | fs.inotify.max_user_instances=1048576
107 | EOT
108 | sudo sysctl -q -p
109 |
110 | # write configuration files
111 | sudo mkdir -p /etc/systemd/system/kubelet.service.d
112 | write_limits_conf "/etc/systemd/system/kubelet.service.d/limits.conf"
113 | if [ "$RUNNER" == "containerd" ]; then
114 | sudo mkdir -p /etc/systemd/system/containerd.service.d
115 | write_limits_conf "/etc/systemd/system/containerd.service.d/limits.conf"
116 | fi
117 | if [ "$RUNNER" == "crio" ]; then
118 | sudo mkdir -p /etc/systemd/system/crio.service.d
119 | write_limits_conf "/etc/systemd/system/crio.service.d/limits.conf"
120 | fi
121 | }
122 |
123 | # daemon reload
124 | function daemon_reload() {
125 | sudo systemctl daemon-reload
126 | }
127 |
128 | # enable kubelet for $RUNNER
129 | function enable_kubelet_runner() {
130 | # This will fail at this point, but puts it into a retry loop that
131 | # will therefore startup later once we have configured with kubeadm.
132 | sudo systemctl enable kubelet $RUNNER || true
133 | }
134 |
135 | # ensure that the system is ready without requiring a reboot
136 | function ensure_system_ready() {
137 | sudo systemctl restart systemd-modules-load.service
138 | }
139 |
140 | # add proxy if found
141 | function setup_proxy() {
142 | set +o nounset
143 | set +o errexit
144 | if [[ ${http_proxy} ]] || [[ ${HTTP_PROXY} ]]; then
145 | echo "Setting up proxy stuff...."
146 | # Setup IP for users too
147 | for ip in "${NO_PROXY_ARRAY[@]}"
148 | do
149 | result=`grep no_proxy /etc/profile.d/proxy.sh | grep $ip`
150 | [ -z "$result" ] && ADD_NO_PROXY+="$ip,"
151 | done
152 | sed_val=${ADD_NO_PROXY//\//\\/}
153 | [ -f /etc/environment ] && sudo sed -i "/no_proxy/I s/$/,${sed_val}/g" /etc/environment
154 | if [ -f /etc/profile.d/proxy.sh ]; then
155 | sudo sed -i "/no_proxy/I s/$/,${sed_val}/g" /etc/profile.d/proxy.sh
156 | else
157 | echo "Warning, failed to find /etc/profile.d/proxy.sh to edit no_proxy line"
158 | fi
159 |
160 | sudo mkdir -p /etc/systemd/system.conf.d
161 | cat < /etc/systemd/system.conf.d/proxy.conf"
162 | [Manager]
163 | DefaultEnvironment="HTTP_PROXY=${http_proxy}"
164 | DefaultEnvironment="HTTPS_PROXY=${https_proxy}"
165 | DefaultEnvironment="SOCKS_PROXY=${socks_proxy}"
166 | DefaultEnvironment="NO_PROXY=${no_proxy},${ADD_NO_PROXY}"
167 | EOF
168 |
169 | sudo systemctl daemon-reexec
170 | fi
171 | set -o nounset
172 | set -o errexit
173 | }
174 |
175 | # init for performing any pre tasks
176 | function init() {
177 | echo ""
178 | }
179 |
180 | ###
181 | # main
182 | ##
183 |
184 | if [[ -n "${CLRK8S_OS}" ]]; then
185 | # shellcheck disable=SC1090
186 | source "$(dirname "$0")/setup_system_${CLRK8S_OS}.sh"
187 | fi
188 |
189 | echo "Init..."
190 | init
191 | echo "Disabling swap..."
192 | disable_swap
193 | echo "Setting OS Version..."
194 | upate_os_version
195 | echo "Adding OS Dependencies..."
196 | add_os_deps
197 | echo "Enabling IP Forwarding..."
198 | enable_ip_forwarding
199 | echo "Setting up modules to load..."
200 | setup_modules_load
201 | echo "Setting up /etc/hosts..."
202 | setup_hosts
203 | if [[ -n "${HIGH_POD_COUNT}" ]]; then
204 | echo "Configure high pod count scaling..."
205 | config_high_pod_count
206 | fi
207 | echo "Reloading daemons..."
208 | daemon_reload
209 | echo "Enabling kubelet runner..."
210 | enable_kubelet_runner
211 | echo "Ensuring system is ready..."
212 | ensure_system_ready
213 | echo "Detecting and setting up proxy..."
214 | setup_proxy
215 |
216 | # We have potentially modified their env files, we need to restart the services.
217 | # daemon reload
218 | sudo systemctl daemon-reload
219 | # restart runner
220 | sudo systemctl restart $RUNNER || true
221 |
--------------------------------------------------------------------------------
/clr-k8s-examples/9-multi-network/multus-sriov-ds.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: network-attachment-definitions.k8s.cni.cncf.io
6 | spec:
7 | group: k8s.cni.cncf.io
8 | scope: Namespaced
9 | names:
10 | plural: network-attachment-definitions
11 | singular: network-attachment-definition
12 | kind: NetworkAttachmentDefinition
13 | shortNames:
14 | - net-attach-def
15 | versions:
16 | - name: v1
17 | served: true
18 | storage: true
19 | schema:
20 | openAPIV3Schema:
21 | description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
22 | Working Group to express the intent for attaching pods to one or more logical or physical
23 | networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
24 | type: object
25 | properties:
26 | spec:
27 | description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
28 | type: object
29 | properties:
30 | config:
31 | description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
32 | type: string
33 | ---
34 | apiVersion: v1
35 | kind: ServiceAccount
36 | metadata:
37 | name: multus-sa
38 | namespace: kube-system
39 | ---
40 | apiVersion: v1
41 | kind: Secret
42 | metadata:
43 | name: multus-sa-secret
44 | namespace: kube-system
45 | annotations:
46 | kubernetes.io/service-account.name: multus-sa
47 | type: kubernetes.io/service-account-token
48 | ---
49 | kind: ClusterRole
50 | apiVersion: rbac.authorization.k8s.io/v1
51 | metadata:
52 | name: multus-pod-updater
53 | rules:
54 | - apiGroups: ["k8s.cni.cncf.io"]
55 | resources:
56 | - '*'
57 | verbs:
58 | - '*'
59 | - apiGroups:
60 | - ""
61 | resources:
62 | - pods
63 | - pods/status
64 | verbs:
65 | - get
66 | - update
67 | - apiGroups:
68 | - ""
69 | - events.k8s.io
70 | resources:
71 | - events
72 | verbs:
73 | - create
74 | - patch
75 | - update
76 | ---
77 | kind: ClusterRoleBinding
78 | apiVersion: rbac.authorization.k8s.io/v1
79 | metadata:
80 | name: multus-rb
81 | roleRef:
82 | apiGroup: rbac.authorization.k8s.io
83 | kind: ClusterRole
84 | name: multus-pod-updater
85 | subjects:
86 | - kind: ServiceAccount
87 | name: multus-sa
88 | namespace: kube-system
89 | ---
90 | kind: ConfigMap
91 | apiVersion: v1
92 | metadata:
93 | name: multus-scripts
94 | namespace: kube-system
95 | data:
96 | install-certs.sh: |
97 | # Copied from Calico
98 | # https://github.com/projectcalico/cni-plugin/blob/master/k8s-install/scripts/install-cni.sh
99 | touch /host/etc/cni/net.d/multus-kubeconfig
100 | chmod 600 /host/etc/cni/net.d/multus-kubeconfig
101 | SERVICE_ACCOUNT_PATH=/var/run/secrets/multus/serviceaccount
102 | KUBE_CA_FILE=$SERVICE_ACCOUNT_PATH/ca.crt
103 | TLS_CFG="certificate-authority-data: $(cat $KUBE_CA_FILE | base64 | tr -d '\n')"
104 | SERVICEACCOUNT_TOKEN=$(cat $SERVICE_ACCOUNT_PATH/token)
105 | cat > /host/etc/cni/net.d/multus-kubeconfig < /host/etc/cni/net.d/00-multus.conf < ${generated_yaml}
72 |
73 | info "Applying warmup pod"
74 | kubectl apply -f ${generated_yaml}
75 | info "Waiting for warmup"
76 | kubectl rollout status --timeout=${wait_time}s deployment/${deployment}
77 |
78 | info "Killing warmup pod"
79 | kill_deployment "${deployment}" "${LABEL}" "${LABELVALUE}" ${delete_wait_time}
80 |
81 | }
82 |
83 | # $1 is the launch time in seconds this pod/container took to start up.
84 | # $2 is the delete time in seconds this pod/container took to start up.
85 | # $2 is the number of pod/containers under test
86 | save_stats() {
87 | local launch_time_ms=$1
88 | local delete_time_ms=$2
89 | local n_pods=$3
90 |
91 | local json="$(cat << EOF
92 | {
93 | "date": {
94 | "ns": $(date +%s%N),
95 | "Date": "$(date -u +"%Y-%m-%dT%T.%3N")"
96 | },
97 | "n_pods": {
98 | "Result": ${n_pods},
99 | "Units" : "int"
100 | },
101 | "launch_time": {
102 | "Result": $launch_time_ms,
103 | "Units" : "ms"
104 | },
105 | "delete_time": {
106 | "Result": $delete_time_ms,
107 | "Units" : "ms"
108 | }
109 | }
110 | EOF
111 | )"
112 | metrics_json_add_array_element "$json"
113 | }
114 |
115 | init() {
116 | info "Initialising"
117 | info "Checking Kubernetes accessible"
118 | local worked=$( kubectl get nodes > /dev/null 2>&1 && echo $? || echo $? )
119 | if [ "$worked" != 0 ]; then
120 | die "kubectl failed to get nodes"
121 | fi
122 |
123 | info $(get_num_nodes) "Kubernetes nodes found"
124 | # We could check we have just the one node here - right now this is a single node
125 | # test!! - because, our stats gathering is rudimentry, as k8s does not provide
126 | # a nice way to do it (unless you want to parse 'descibe nodes')
127 | # Have a read of https://github.com/kubernetes/kubernetes/issues/25353
128 |
129 | framework_init
130 |
131 | # Ensure we pre-cache the container image etc.
132 | warmup
133 | }
134 |
135 | save_config(){
136 | metrics_json_start_array
137 |
138 | local json="$(cat << EOF
139 | {
140 | "testname": "${TEST_NAME}",
141 | "NUM_PODS": ${NUM_PODS},
142 | "STEP": ${STEP},
143 | "wait_time": ${wait_time},
144 | "delete_wait_time": ${delete_wait_time}
145 | }
146 | EOF
147 | )"
148 | metrics_json_add_array_element "$json"
149 | metrics_json_end_array "Config"
150 | }
151 |
152 | run() {
153 | info "Running test"
154 |
155 | trap cleanup EXIT QUIT KILL
156 |
157 | metrics_json_start_array
158 | for reqs in $(seq ${STEP} ${STEP} ${NUM_PODS}); do
159 | info "Testing parallel replicas ${reqs} of ${NUM_PODS}"
160 | # Generate the next yaml file
161 |
162 | local runtime_command
163 | if [ -n "$RUNTIME" ]; then
164 | runtime_command="s|@RUNTIMECLASS@|${RUNTIME}|g"
165 | else
166 | runtime_command="/@RUNTIMECLASS@/d"
167 | fi
168 |
169 | local input_template
170 | local generated_file
171 | if [ "$use_api" != "no" ]; then
172 | input_template=$input_json
173 | generated_file=$generated_json
174 | else
175 | input_template=$input_yaml
176 | generated_file=$generated_yaml
177 | fi
178 |
179 | sed -e "s|@REPLICAS@|${reqs}|g" \
180 | -e $runtime_command \
181 | -e "s|@DEPLOYMENT@|${deployment}|g" \
182 | -e "s|@LABEL@|${LABEL}|g" \
183 | -e "s|@LABELVALUE@|${LABELVALUE}|g" \
184 | -e "s|@GRACE@|${grace}|g" \
185 | -e "s#@PODCOMMAND@#${pod_command}#g" \
186 | < ${input_template} > ${generated_file}
187 |
188 | info "Applying changes"
189 | local start_time=$(date +%s%N)
190 |
191 | if [ "$use_api" != "no" ]; then
192 | curl -s ${API_ADDRESS}:${API_PORT}/apis/apps/v1/namespaces/default/deployments -XPOST -H 'Content-Type: application/json' -d@${generated_file} > /dev/null
193 | else
194 | kubectl apply -f ${generated_file}
195 | fi
196 |
197 | kubectl rollout status --timeout=${wait_time}s deployment/${deployment}
198 | local end_time=$(date +%s%N)
199 | local total_milliseconds=$(( (end_time - start_time) / 1000000 ))
200 | info "Took $total_milliseconds ms ($end_time - $start_time)"
201 |
202 | # And now remove that deployment, ready to launch the next one
203 | local delete_start_time=$(date +%s%N)
204 | kill_deployment "${deployment}" "${LABEL}" "${LABELVALUE}" ${delete_wait_time}
205 | local delete_end_time=$(date +%s%N)
206 | local delete_total_milliseconds=$(( (delete_end_time - delete_start_time) / 1000000 ))
207 | info "Delete took $delete_total_milliseconds ms ($delete_end_time - $delete_start_time)"
208 | save_stats $total_milliseconds $delete_total_milliseconds $reqs
209 | done
210 | }
211 |
212 | cleanup() {
213 | info "Cleaning up"
214 |
215 | # First try to save any results we got
216 | metrics_json_end_array "BootResults"
217 | kill_deployment "${deployment}" "${LABEL}" "${LABELVALUE}" ${delete_wait_time}
218 | framework_shutdown
219 | }
220 |
221 | show_vars()
222 | {
223 | echo -e "\nEnvironment variables:"
224 | echo -e "\tName (default)"
225 | echo -e "\t\tDescription"
226 | echo -e "\tTEST_NAME (${TEST_NAME})"
227 | echo -e "\t\tCan be set to over-ride the default JSON results filename"
228 | echo -e "\tNUM_PODS (${NUM_PODS})"
229 | echo -e "\t\tNumber of pods to launch"
230 | echo -e "\tSTEP (${STEP})"
231 | echo -e "\t\tNumber of pods to launch per cycle"
232 | echo -e "\twait_time (${wait_time})"
233 | echo -e "\t\tSeconds to wait for pods to become ready"
234 | echo -e "\tdelete_wait_time (${delete_wait_time})"
235 | echo -e "\t\tSeconds to wait for all pods to be deleted"
236 | echo -e "\tsettle_time (${settle_time})"
237 | echo -e "\t\tSeconds to wait after pods ready before taking measurements"
238 | echo -e "\tuse_api (${use_api})"
239 | echo -e "\t\tspecify yes or no to use the API to launch pods"
240 | echo -e "\tgrace (${grace})"
241 | echo -e "\t\tspecify the grace period in seconds for workload pod termination"
242 | }
243 |
244 | help()
245 | {
246 | usage=$(cat << EOF
247 | Usage: $0 [-h] [options]
248 | Description:
249 | Launch a series of workloads in a parallel manner and take memory metric measurements
250 | after each launch.
251 | Options:
252 | -h, Help page.
253 | EOF
254 | )
255 | echo "$usage"
256 | show_vars
257 | }
258 |
259 | main() {
260 |
261 | local OPTIND
262 | while getopts "h" opt;do
263 | case ${opt} in
264 | h)
265 | help
266 | exit 0;
267 | ;;
268 | esac
269 | done
270 | shift $((OPTIND-1))
271 |
272 | init
273 | run
274 | # cleanup will happen at exit due to the shell 'trap' we registered
275 | # cleanup
276 | }
277 |
278 | main "$@"
279 |
--------------------------------------------------------------------------------