├── .github
└── workflows
│ ├── build-docker-images.yml
│ ├── microk8s-kata.yml
│ └── v2-kata-microk8s.yml
├── .gitignore
├── .project
├── LICENSE
├── README.md
├── README.template.md
├── data
├── containerd.toml
└── containerd.toml.bak
├── docker
├── Dockerfile-autoscale
└── Dockerfile-helloworld
├── img
├── containerd-logo.png
├── kata-logo.png
├── kata-vs-docker.jpg
├── microk8s-kata-launch-button.jpg
├── microk8s-logo.png
├── oci-logo.png
└── podman-logo.jpg
├── kubernetes
├── autoscale-kata.yaml
├── autoscale-runc.yaml
├── helloworld-kata.yaml
├── helloworld-runc.yaml
├── kata-runtime-class.yaml
├── nginx-kata.yaml
├── nginx-runc.yaml
└── nginx-untrusted.yaml
├── report.md
├── sh
├── microk8s-kata.sh
├── test-sed.sh
└── v2-kata-microk8s.sh
└── src
└── go
├── autoscale
└── autoscale.go
└── helloworld
└── helloworld.go
/.github/workflows/build-docker-images.yml:
--------------------------------------------------------------------------------
1 | name: MicroK8s Services Images
2 |
3 | on:
4 |
5 | #push:
6 |
7 | #schedule:
8 | #- cron: '0 1 * * TUE,FRI'
9 |
10 | workflow_dispatch:
11 | inputs:
12 | workflowAction:
13 | description: 'Action'
14 | default: 'Build helloworld-go and autoscale-go images'
15 | required: true
16 |
17 | jobs:
18 | microk8s-kata-containers-docker-build:
19 |
20 | runs-on: ubuntu-20.04
21 |
22 | steps:
23 |
24 | - name: Check environment
25 | run: |-
26 | lsb_release -a
27 |
28 | - name: Checkout
29 | uses: actions/checkout@v2
30 |
31 | - name: Build helloworld-go & autoscale container images
32 | run: |-
33 | echo ${{ secrets.DOCKER_PASSWORD }} | docker login --username ${{ secrets.DOCKER_USERID }} --password-stdin
34 | echo "build and push helloworld-go:"
35 | docker build --file docker/Dockerfile-helloworld --tag didierdurand/helloworld-go .
36 | docker push didierdurand/helloworld-go
37 | echo "build and push autoscale-go:"
38 | docker build --file docker/Dockerfile-autoscale --tag didierdurand/autoscale-go .
39 | docker push didierdurand/autoscale-go
40 | rm /home/runner/.docker/config.json
41 |
--------------------------------------------------------------------------------
/.github/workflows/microk8s-kata.yml:
--------------------------------------------------------------------------------
1 | name: Kata Containers on MicroK8s
2 |
3 | on:
4 |
5 | #push:
6 |
7 | #schedule:
8 | # - cron: '0 1 * * MON,THU'
9 |
10 | workflow_dispatch:
11 | inputs:
12 | workflowAction:
13 | description: 'Action'
14 | default: 'Run MicroK8s + Kata Containers'
15 | required: true
16 |
17 | jobs:
18 | microk8s-kata-containers:
19 |
20 | runs-on: ubuntu-20.04
21 |
22 | steps:
23 |
24 | - name: Check environment
25 | run: |-
26 | lsb_release -a
27 |
28 | - name: Checkout
29 | uses: actions/checkout@v2
30 |
31 | - name: Setup gcloud CLI
32 | uses: google-github-actions/setup-gcloud@master
33 | with:
34 | project_id: ${{ secrets.GCP_PROJECT }}
35 | service_account_key: ${{ secrets.GCP_SA_KEY }}
36 |
37 | - name: Get gcloud version & info
38 | run: |-
39 | echo '--- gcloud components update ---'
40 | gcloud components update
41 | echo '--- gcloud version ---'
42 | gcloud version
43 |
44 | - name: install MicroK8s + Kata Containers
45 | env:
46 | KATA_GCE_DELETE: true
47 | GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
48 | run: |-
49 | bash sh/microk8s-kata.sh
50 |
51 | - name: commit back changed files
52 | run: |
53 | git add -A
54 | git config --local user.name "Github Action from $GITHUB_ACTOR"
55 | git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com"
56 | git diff --quiet && git diff --staged --quiet || git commit -m "changes committed back by actor $GITHUB_ACTOR on $HELMALYZER_TIMESTAMP (workflow: $GITHUB_WORKFLOW - job: $GITHUB_JOB - sha: $GITHUB_SHA - ref: $GITHUB_REF)"
57 | git config pull.rebase false
58 | git diff --quiet && git diff --staged --quiet || git pull
59 | git push https://$GITHUB_ACTOR:${{ secrets.GITHUB_TOKEN }}@github.com/$GITHUB_REPOSITORY
--------------------------------------------------------------------------------
/.github/workflows/v2-kata-microk8s.yml:
--------------------------------------------------------------------------------
1 | name: (V2) Kata Containers on MicroK8s
2 |
3 | on:
4 |
5 | #push:
6 |
7 | #schedule:
8 | # - cron: '0 1 * * MON,THU'
9 |
10 | workflow_dispatch:
11 | inputs:
12 | workflowAction:
13 | description: 'Action'
14 | default: 'Run MicroK8s + Kata Containers V2'
15 | required: true
16 |
17 | jobs:
18 | v2-microk8s-kata-containers:
19 |
20 | runs-on: ubuntu-20.04
21 |
22 | steps:
23 |
24 | - name: Check environment
25 | run: |-
26 | lsb_release -a
27 |
28 | - name: Checkout
29 | uses: actions/checkout@v2
30 |
31 | - name: Setup gcloud CLI
32 | uses: GoogleCloudPlatform/github-actions/setup-gcloud@master
33 | with:
34 | project_id: ${{ secrets.GCP_PROJECT }}
35 | service_account_key: ${{ secrets.GCP_SA_KEY }}
36 |
37 | - name: Get gcloud version & info
38 | run: |-
39 | echo '--- gcloud components update ---'
40 | gcloud components update
41 | echo '--- gcloud version ---'
42 | gcloud version
43 |
44 | - name: install MicroK8s + Kata Containers V2
45 | env:
46 | KATA_GCE_DELETE: true
47 | KATA_VERSION: 2.x
48 | GCP_PROJECT: ${{ secrets.GCP_PROJECT }}
49 | run: |-
50 | bash sh/microk8s-kata.sh
51 |
52 | #- name: commit back changed files
53 | # run: |
54 | # git add -A
55 | # git config --local user.name "Github Action from $GITHUB_ACTOR"
56 | # git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com"
57 | # git diff --quiet && git diff --staged --quiet || git commit -m "changes committed back by actor $GITHUB_ACTOR on $HELMALYZER_TIMESTAMP (workflow: $GITHUB_WORKFLOW - job: $GITHUB_JOB - sha: $GITHUB_SHA - ref: $GITHUB_REF)"
58 | # git config pull.rebase false
59 | # git diff --quiet && git diff --staged --quiet || git pull
60 | # git push https://$GITHUB_ACTOR:${{ secrets.GITHUB_TOKEN }}@github.com/$GITHUB_REPOSITORY
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Created by https://www.gitignore.io/api/macos
3 | # Edit at https://www.gitignore.io/?templates=macos
4 |
5 | ### macOS ###
6 | # General
7 | .DS_Store
8 | .AppleDouble
9 | .LSOverride
10 |
11 | # Icon must end with two \r
12 | Icon
13 |
14 | # Thumbnails
15 | ._*
16 |
17 | # Files that might appear in the root of a volume
18 | .DocumentRevisions-V100
19 | .fseventsd
20 | .Spotlight-V100
21 | .TemporaryItems
22 | .Trashes
23 | .VolumeIcon.icns
24 | .com.apple.timemachine.donotpresent
25 |
26 | # Directories potentially created on remote AFP share
27 | .AppleDB
28 | .AppleDesktop
29 | Network Trash Folder
30 | Temporary Items
31 | .apdisk
32 |
33 | # End of https://www.gitignore.io/api/macos
34 |
35 | # Created by https://www.gitignore.io/api/windows
36 | # Edit at https://www.gitignore.io/?templates=windows
37 |
38 | ### Windows ###
39 | # Windows thumbnail cache files
40 | Thumbs.db
41 | Thumbs.db:encryptable
42 | ehthumbs.db
43 | ehthumbs_vista.db
44 |
45 | # Dump file
46 | *.stackdump
47 |
48 | # Folder config file
49 | [Dd]esktop.ini
50 |
51 | # Recycle Bin used on file shares
52 | $RECYCLE.BIN/
53 |
54 | # Windows Installer files
55 | *.cab
56 | *.msi
57 | *.msix
58 | *.msm
59 | *.msp
60 |
61 | # Windows shortcuts
62 | *.lnk
63 |
64 | # End of https://www.gitignore.io/api/windows
65 |
66 | # Created by https://www.gitignore.io/api/linux
67 | # Edit at https://www.gitignore.io/?templates=linux
68 |
69 | ### Linux ###
70 | *~
71 |
72 | # temporary files which can be created if a process still has a handle open of a deleted file
73 | .fuse_hidden*
74 |
75 | # KDE directory preferences
76 | .directory
77 |
78 | # Linux trash folder which might appear on any partition or disk
79 | .Trash-*
80 |
81 | # .nfs files are created when an open file is removed but is still being accessed
82 | .nfs*
83 |
84 | # End of https://www.gitignore.io/api/linux
--------------------------------------------------------------------------------
/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | microk8s-kata-containers
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | 


3 |
4 | # Kata Containers on MicroK8s
5 |
6 | 
7 | 
8 |
9 | [](https://opensource.org/licenses/Apache-2.0)
10 |
11 | * [Goal](README.md#goal)
12 | * [Kata Containers - Rationale](README.md#kata-containers---rationale)
13 | * [Specific Setup](README.md#specific-setup)
14 | * [Workflow Steps](README.md#workflow-steps)
15 | * [How to Fork & Run](README.md#how-to-fork--run)
16 | * [Execution Report](README.md#execution-report)
17 |
18 |
19 | ## Goal
20 |
21 | [**Nota Bene:** This repository is **Work In Progress (WIP)**: currently, we abruptly replace *"[runc](https://github.com/opencontainers/runc)"* binary, initially packaged with MicroK8s, with a symbolic link (symlink) to *"[kata-runtime](https://github.com/kata-containers/runtime)"* binary, installed on the Ubuntu instance from project's GitHub repository and added to the MicroK8s [snap](https://en.wikipedia.org/wiki/Snap_(package_manager)) in early steps of this workflow. This initial (very) direct shortcut is possible because both binaries fully respect the [OCI runtime specification](https://opencontainers.org/). Next version of this repo will properly adapt the configuration of [containerd](https://containerd.io/) (via changes in containerd.toml) and implement the K8s [RuntimeClass](https://kubernetes.io/docs/concepts/containers/runtime-class/) to be able to dynamically choose the runtime on per container basis: proper directives in Deployment yaml manifests will allow simultaneous use of *"runc"* and *"kata-runtime"* in parallel by different containers having different execution requirements.]
22 |
23 | This repository encompasses a fully scripted Github workflow (via [microk8s-kata.yml](.github/workflows/microk8s-kata.yml) calling [microk8s-kata.sh](sh/microk8s-kata.sh)) to test the transparent use of the runtime for Kata Containers (Katas) on MicroK8s. It must run on a quite specific Google Cloud Engine (GCE) instance since so-called *"[nested virtualization](https://pve.proxmox.com/wiki/Nested_Virtualization)"* is required by Katas when running on the cloud due to its embedded virtual machine coming on top of the cloud hypervisor managing the Linux host. Some sample containerized services (see [helloworld.go](src/go/helloworld/helloworld.go) and [autoscale.go](src/go/autoscale/autoscale.go) built automatically with this [side job](.github/workflows/build-docker-images.yml)) are deployed from Docker Hub and executed as Kubernetes services on MicroK8s.
24 |
25 | The workflow tests the proper execution of sample containers with 'kata-runtime' after running them initially on standard 'runc' to validate global setup: beyond run of traditional helloworld-go, autoscale-go is called with parameters ensuring that thorough computations and resource allocation are properly executed by the replacing runtime.
26 |
27 | [MicroK8s](https://microk8s.io/) by Canonical was chosen on purpose for this project: its source code is extremely close to the upstream version of Kubernetes. Consequently, it allows to build a fully-featured production-grade Kubernetes cluster that can be run autonomously - on a single Limux instance - with very sensible default configuration allowing a quick setup, quite representative of a productive system.
28 |
29 | To automatically confirm the validity of this workflow overtime when new versions of the various components (Kata Containers, MicroK8s, Podman, Ubuntu, etc.) get published, cron schedules it on a recurring basis: execution logs can be seen in [Actions tab](https://github.com/didier-durand/microk8s-kata-containers/actions). Excerpts of last execution are gathered [further down in this page](README.md#execution-report).
30 |
31 | **Forking and re-using on your own is strongly encouraged!** All comments for improvements and extensions will be welcome. Finally, if you like this repo, please give a Github star so that it gets more easily found by others.
32 |
33 | ## Kata Containers - Rationale
34 |
35 | As per [Katas' website](https://katacontainers.io/): *"Kata Containers is an open source community working to build a secure container runtime with lightweight virtual machines that feel and perform like containers, but provide stronger workload isolation using hardware virtualization technology as a second layer of defense."*
36 |
37 | This added lightweight virtual machine comes with a dedicated Linux kernel, providing isolation of network, I/O and memory and utilizes hardware-enforced isolation through Intel's [VT-x features](https://en.wikipedia.org/wiki/X86_virtualization#Intel_virtualization_(VT-x)) for virtualization.
38 |
39 |
40 |
41 | The use of a per-container dedicated kernel and lightweight virtual machines, provided by either [Qemu](https://www.qemu.org/) or [Amazon's Firecracker](https://firecracker-microvm.github.io/), creates a much stronger isolation between the containers themselves and with the host. For example, if a container misbehaves and messes up with the kernel resources by overconsuming or corrupting them, it's only **HIS** dedicated kernel that gets damaged, not the unique kernel shared between all containers and host, as when you're using regular containers. The picture above shows the clear differences between the two architectures. So, Kata Containers are probably the best option currently available for additional security and reliability with untrusted workloads of all kinds (recent versions, external source code, etc.).
42 |
43 | As you would expect, this further level of isolation through additional virtualization comes with a performance / cost penalty but this [comparative study](https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/6e4619c416ff4bd19e1c087f27a43eea/www-assets-prod/presentation-media/kata-containers-and-gvisor-a-quantitave-comparison.pdf) between the performances of raw host performances, *"runc"*, [Google's gVisor](https://gvisor.dev/) containers and Kata Containers demonstrates that the overhead remains quite acceptable in many situations for the additional security that is delivered. Look at slides 19 to 26 of the linked pdf to get the exact numbers.
44 |
45 | ## Specific Setup
46 |
47 | Various specific points have to be part of this workflow:
48 |
49 | 1. [Katas on GCE](https://github.com/kata-containers/documentation/blob/master/install/gce-installation-guide.md) implies use of [nested virtualization](https://en.wikipedia.org/wiki/Virtualization#Nested_virtualization): this requires to create a [specific GCE image](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances) to activate the [Intel VT-x instruction set](https://en.wikipedia.org/wiki/X86_virtualization#Intel_virtualization_(VT-x)). This is obtained by the addition of a specific option *"--licenses="* to the command *"gcloud compute images create"*. See [microk8s-kata.sh](sh/microk8s-kata.sh) for details.
50 |
51 | 2. The underlying hardware must minimally be of the Intel's [Broadwell architecture generation](https://en.wikipedia.org/wiki/Broadwell_(microarchitecture)) to provide the VT-x instructions. This is guaranteed by adding *"--min-cpu-platform 'Intel Broadwell'"* to the command *"gcloud compute instances create"*. See [microk8s-kata.sh](sh/microk8s-kata.sh) for details.
52 |
53 | 3. [Podman CLI](https://podman.io/) is used instead of Docker CLI because Docker is not compatible with Kata Containers runtime 2.0. As [this article](https://developers.redhat.com/blog/2019/02/21/podman-and-buildah-for-docker-users/) explains it, the transition from Docker to Podman is very easy: command syntax and results are extremely close and even identical in most cases.
54 |
55 | ## Workflow Steps
56 |
57 | The major steps in this workflow are:
58 |
59 | 1. Check that GCE instance is proper ('GenuineIntel') - according to the above requirement for Broadwell - via lscpu after it has been created.
60 | 2. Install Kata Containers runtime directly from the Github repository of the project.
61 | 3. Check that this added runtime can run on the instance: command *"kata-runtime kata-check"* MUST produce output *"System is capable of running Kata Containers"*
62 | 4. Install Podman and check via *"podman info"* that it sees both its standard runtime *"runc"* and the newly added *"kata-runtime"*
63 | 5. Run the latest version of [Alpine Linux](https://en.wikipedia.org/wiki/Alpine_Linux) image with selection of kata-runtime (*"--runtime='kata-runtime"*) and verify through *"podman inspect"* that the running Alpine is effectively using kata-runtime.
64 | 6. Install MicroK8s via snap and check that it works properly via the deployment of [helloworld-go.yml](kubernetes/helloworld-go.yml) and [autoscale-go.yml](kubernetes/autoscale-go.yml) service manifests, built from from GoLang source code in [src/go directory](src/go). Stop MicroK8s when validation is successful.
65 | 7. Open the MicroK8s .snap file to add kata-runtime and repackage a new version (now unsigned) of the .snap file. Please, note use of *"unsquashfs"* and *"mksquashfs"* to achieve this refurbishing since the [snap archive format](https://en.wikipedia.org/wiki/Snap_(package_manager)) is based on read-only and compressed [SquashFS](https://en.wikipedia.org/wiki/SquashFS) Linux file system.
66 | 8. Remove old MicroK8s installation and re-install a fresh instance based with newly created snap version: *"--dangerous"* option is now required since the tweaked .snap is no longer signed by its official provider, Canonical.
67 | 9. Deploy again helloworld-go and autoscale-go on fresh MicroK8s to validate that they work fine with kata-runtime: autoscale-go request is parametrized to make sure that some amount computing resources are consumed to achieve a better validation.
68 |
69 | ## How to Fork & Run
70 |
71 | To start with, you need a Google Cloud account including a project where the GCE APIs have been enabled. Obtain the id of your project from
72 | GCP dashboard. Additionally, you need to create in this project a service account (SA) and give it proper GCE credentials: right to create, administer and delete GCE images & instances (if your cannot make the SA a "Project Owner" to simplify the security aspects...). Save the private key of the SA in json format.
73 |
74 | Then, fork our repository and define the required [Github Secrets](https://docs.github.com/en/actions/reference/encrypted-secrets) in your fork:
75 |
76 | 1. your GCP project id will be {{ secrets.GCP_PROJECT }}
77 | 2. The private key of your service account in json format will be ${{ secrets.GCP_SA_KEY }}
78 |
79 | To easily use the workflow from Github, you can launch it with the [manual dispatch feature of Github](https://github.blog/changelog/2020-07-06-github-actions-manual-triggers-with-workflow_dispatch/) that you can see as a launch button (the green one in the picture below) in the Action tab of your fork.
80 |
81 |
82 |
83 | The workflow will execute all the steps described above and terminate gracefully after all validation tests described are completed: it will then delete the GCE instance and the associated image triggering the nested virtualization.
84 |
85 | If you also want to make use of the [side workflow](.github/workflows/build-docker-images.yml) allowing to build the test container images from their GoLang sources, you'll need to add 2 additional secrets : {{ secrets.DOCKER_USERID }} & {{ secrets.DOCKER_PASSWORD }} corresponding to the login parameters of your [Docker Hub account](https://hub.docker.com/).
86 |
87 | ## Execution Report
88 |
89 | Below are some relevant excerpts of the last execution log:
90 |
91 |
92 |
93 | ```
94 | ### execution date: Sat Nov 28 09:49:46 UTC 2020
95 |
96 | ### microk8s snap version:
97 | microk8s v1.19.3 1791 1.19/stable canonical* classic
98 |
99 | ### ubuntu version:
100 | Distributor ID: Ubuntu
101 | Description: Ubuntu 20.04.1 LTS
102 | Release: 20.04
103 | Codename: focal
104 |
105 | ### docker version:
106 | Client: Docker Engine - Community
107 | Version: 19.03.13
108 | API version: 1.40
109 | Go version: go1.13.15
110 | Git commit: 4484c46d9d
111 | Built: Wed Sep 16 17:02:52 2020
112 | OS/Arch: linux/amd64
113 | Experimental: false
114 |
115 | ### kata-runtime version:
116 | kata-runtime : 1.12.0-rc0
117 | commit : <>
118 | OCI specs: 1.0.1-dev
119 |
120 | ### kata-runtime check:
121 | System is capable of running Kata Containers
122 |
123 |
124 | ### check existing container runtimes on Ubuntu host:
125 | -rwxr-xr-x 1 root root 9.7M Sep 9 15:40 /bin/runc
126 | -rwxr-xr-x 1 root root 31M Oct 22 16:51 /bin/kata-runtime
127 |
128 | ### check active OCI runtime:
129 |
130 | ### test use of kata-runtime with alpine:
131 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
132 | 7d90229b5af6 docker.io/library/alpine:latest sh 2 seconds ago Up Less than a second ago kata-alpine
133 | "Name": "kata-alpine",
134 | "Id": "7d90229b5af691afd78a472d10e7948b9d5a6d9756065cdf7be88463b3f9733b",
135 | "OCIRuntime": "kata-runtime",
136 |
137 | ### install microk8s:
138 | microk8s is running
139 | high-availability: no
140 | datastore master nodes: 127.0.0.1:19001
141 | datastore standby nodes: none
142 | addons:
143 | enabled:
144 | ha-cluster # Configure high availability on the current node
145 | disabled:
146 | ambassador # Ambassador API Gateway and Ingress
147 | cilium # SDN, fast with full network policy
148 | dashboard # The Kubernetes dashboard
149 | dns # CoreDNS
150 | fluentd # Elasticsearch-Fluentd-Kibana logging and monitoring
151 | gpu # Automatic enablement of Nvidia CUDA
152 | helm # Helm 2 - the package manager for Kubernetes
153 | helm3 # Helm 3 - Kubernetes package manager
154 | host-access # Allow Pods connecting to Host services smoothly
155 | ingress # Ingress controller for external access
156 | istio # Core Istio service mesh services
157 | jaeger # Kubernetes Jaeger operator with its simple config
158 | knative # The Knative framework on Kubernetes.
159 | kubeflow # Kubeflow for easy ML deployments
160 | linkerd # Linkerd is a service mesh for Kubernetes and other frameworks
161 | metallb # Loadbalancer for your Kubernetes cluster
162 | metrics-server # K8s Metrics Server for API access to service metrics
163 | multus # Multus CNI enables attaching multiple network interfaces to pods
164 | prometheus # Prometheus operator for monitoring and logging
165 | rbac # Role-Based Access Control for authorisation
166 | registry # Private image registry exposed on localhost:32000
167 | storage # Storage class; allocates storage from host directory
168 |
169 | ### check container runtime on microk8s snap:
170 | -rwxr-xr-x 1 root root 15M Nov 6 12:06 /snap/microk8s/current/bin/runc
171 |
172 | ### TEST WITH RUNC
173 |
174 |
175 | ### test microk8s with helloworld-go & autoscale-go:
176 | service/helloworld-go created
177 | deployment.apps/helloworld-go-deployment created
178 | service/autoscale-go created
179 | deployment.apps/autoscale-go-deployment created
180 | NAME READY STATUS RESTARTS AGE
181 | nginx-test 0/1 ContainerCreating 0 1s
182 | helloworld-go-deployment-86f5466d4-dc5d6 0/1 ContainerCreating 0 0s
183 | helloworld-go-deployment-86f5466d4-5wfd9 0/1 ContainerCreating 0 0s
184 | autoscale-go-deployment-5894658957-4vl42 0/1 Pending 0 0s
185 | autoscale-go-deployment-5894658957-6747m 0/1 ContainerCreating 0 0s
186 |
187 | waiting for ready pods...
188 |
189 | NAME READY STATUS RESTARTS AGE
190 | nginx-test 1/1 Running 0 2m2s
191 | autoscale-go-deployment-5894658957-4vl42 1/1 Running 0 2m1s
192 | helloworld-go-deployment-86f5466d4-5wfd9 1/1 Running 0 2m1s
193 | helloworld-go-deployment-86f5466d4-dc5d6 1/1 Running 0 2m1s
194 | autoscale-go-deployment-5894658957-6747m 1/1 Running 0 2m1s
195 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
196 | kubernetes ClusterIP 10.152.183.1 443/TCP 2m34s
197 | helloworld-go NodePort 10.152.183.42 80:31982/TCP 2m2s
198 | autoscale-go NodePort 10.152.183.245 80:31469/TCP 2m1s
199 |
200 | calling helloworld-go...
201 |
202 | Hello World: Kata Containers!
203 |
204 | calling autoscale-go with request for biggest prime under 10 000 and 5 MB memory...
205 |
206 | Allocated 5 Mb of memory.
207 | The largest prime less than 10000 is 9973.
208 | Slept for 100.18 milliseconds.
209 |
210 | ### re-install microk8s incl kata-runtime:
211 | microk8s v1.19.3 installed
212 | microk8s is running
213 | high-availability: no
214 | datastore master nodes: 127.0.0.1:19001
215 | datastore standby nodes: none
216 | addons:
217 | enabled:
218 | ha-cluster # Configure high availability on the current node
219 | disabled:
220 | ambassador # Ambassador API Gateway and Ingress
221 | cilium # SDN, fast with full network policy
222 | dashboard # The Kubernetes dashboard
223 | dns # CoreDNS
224 | fluentd # Elasticsearch-Fluentd-Kibana logging and monitoring
225 | gpu # Automatic enablement of Nvidia CUDA
226 | helm # Helm 2 - the package manager for Kubernetes
227 | helm3 # Helm 3 - Kubernetes package manager
228 | host-access # Allow Pods connecting to Host services smoothly
229 | ingress # Ingress controller for external access
230 | istio # Core Istio service mesh services
231 | jaeger # Kubernetes Jaeger operator with its simple config
232 | knative # The Knative framework on Kubernetes.
233 | kubeflow # Kubeflow for easy ML deployments
234 | linkerd # Linkerd is a service mesh for Kubernetes and other frameworks
235 | metallb # Loadbalancer for your Kubernetes cluster
236 | metrics-server # K8s Metrics Server for API access to service metrics
237 | multus # Multus CNI enables attaching multiple network interfaces to pods
238 | prometheus # Prometheus operator for monitoring and logging
239 | rbac # Role-Based Access Control for authorisation
240 | registry # Private image registry exposed on localhost:32000
241 | storage # Storage class; allocates storage from host directory
242 |
243 | ### TEST WITH KATA-RUNTIME
244 |
245 |
246 | ### test microk8s with helloworld-go & autoscale-go:
247 | service/helloworld-go created
248 | deployment.apps/helloworld-go-deployment created
249 | service/autoscale-go created
250 | deployment.apps/autoscale-go-deployment created
251 | NAME READY STATUS RESTARTS AGE
252 | nginx-test 0/1 ContainerCreating 0 2s
253 | helloworld-go-deployment-86f5466d4-hzmv8 0/1 ContainerCreating 0 1s
254 | autoscale-go-deployment-5894658957-m5qff 0/1 ContainerCreating 0 0s
255 | helloworld-go-deployment-86f5466d4-vntqb 0/1 ContainerCreating 0 1s
256 | autoscale-go-deployment-5894658957-ckc2z 0/1 ContainerCreating 0 0s
257 |
258 | waiting for ready pods...
259 |
260 | NAME READY STATUS RESTARTS AGE
261 | nginx-test 1/1 Running 0 2m3s
262 | autoscale-go-deployment-5894658957-m5qff 1/1 Running 0 2m1s
263 | helloworld-go-deployment-86f5466d4-hzmv8 1/1 Running 0 2m2s
264 | helloworld-go-deployment-86f5466d4-vntqb 1/1 Running 0 2m2s
265 | autoscale-go-deployment-5894658957-ckc2z 1/1 Running 0 2m1s
266 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
267 | kubernetes ClusterIP 10.152.183.1 443/TCP 2m26s
268 | helloworld-go NodePort 10.152.183.67 80:32503/TCP 2m2s
269 | autoscale-go NodePort 10.152.183.112 80:30065/TCP 2m1s
270 |
271 | calling helloworld-go...
272 |
273 | Hello World: Kata Containers!
274 |
275 | calling autoscale-go with request for biggest prime under 10 000 and 5 MB memory...
276 |
277 | Allocated 5 Mb of memory.
278 | The largest prime less than 10000 is 9973.
279 | Slept for 100.65 milliseconds.
280 |
281 | ### check proper symlink from microk8s runc:
282 | lrwxrwxrwx 1 root root 30 Nov 28 09:43 /snap/microk8s/current/bin/runc -> squashfs-root/bin/kata-runtime
283 | -rwxr-xr-x 1 root root 31560112 Oct 22 16:51 /bin/kata-runtime
284 | -rwxr-xr-x 1 root root 31560112 Nov 28 09:43 /snap/microk8s/current/bin/kata-runtime
285 | ```
286 |
--------------------------------------------------------------------------------
/README.template.md:
--------------------------------------------------------------------------------
1 |
2 | 


3 |
4 | # Kata Containers on MicroK8s
5 |
6 | 
7 | 
8 |
9 | [](https://opensource.org/licenses/Apache-2.0)
10 |
11 | * [Goal](README.md#goal)
12 | * [Kata Containers - Rationale](README.md#kata-containers---rationale)
13 | * [Specific Setup](README.md#specific-setup)
14 | * [Workflow Steps](README.md#workflow-steps)
15 | * [How to Fork & Run](README.md#how-to-fork--run)
16 | * [Execution Report](README.md#execution-report)
17 |
18 |
19 | ## Goal
20 |
21 | [**Nota Bene:** This repository is **Work In Progress (WIP)**: currently, we abruptly replace *"[runc](https://github.com/opencontainers/runc)"* binary, initially packaged with MicroK8s, with a symbolic link (symlink) to *"[kata-runtime](https://github.com/kata-containers/runtime)"* binary, installed on the Ubuntu instance from project's GitHub repository and added to the MicroK8s [snap](https://en.wikipedia.org/wiki/Snap_(package_manager)) in early steps of this workflow. This initial (very) direct shortcut is possible because both binaries fully respect the [OCI runtime specification](https://opencontainers.org/). Next version of this repo will properly adapt the configuration of [containerd](https://containerd.io/) (via changes in containerd.toml) and implement the K8s [RuntimeClass](https://kubernetes.io/docs/concepts/containers/runtime-class/) to be able to dynamically choose the runtime on per container basis: proper directives in Deployment yaml manifests will allow simultaneous use of *"runc"* and *"kata-runtime"* in parallel by different containers having different execution requirements.]
22 |
23 | This repository encompasses a fully scripted Github workflow (via [microk8s-kata.yml](.github/workflows/microk8s-kata.yml) calling [microk8s-kata.sh](sh/microk8s-kata.sh)) to test the transparent use of the runtime for Kata Containers (Katas) on MicroK8s. It must run on a quite specific Google Cloud Engine (GCE) instance since so-called *"[nested virtualization](https://pve.proxmox.com/wiki/Nested_Virtualization)"* is required by Katas when running on the cloud due to its embedded virtual machine coming on top of the cloud hypervisor managing the Linux host. Some sample containerized services (see [helloworld.go](src/go/helloworld/helloworld.go) and [autoscale.go](src/go/autoscale/autoscale.go) built automatically with this [side job](.github/workflows/build-docker-images.yml)) are deployed from Docker Hub and executed as Kubernetes services on MicroK8s.
24 |
25 | The workflow tests the proper execution of sample containers with 'kata-runtime' after running them initially on standard 'runc' to validate global setup: beyond run of traditional helloworld-go, autoscale-go is called with parameters ensuring that thorough computations and resource allocation are properly executed by the replacing runtime.
26 |
27 | [MicroK8s](https://microk8s.io/) by Canonical was chosen on purpose for this project: its source code is extremely close to the upstream version of Kubernetes. Consequently, it allows to build a fully-featured production-grade Kubernetes cluster that can be run autonomously - on a single Limux instance - with very sensible default configuration allowing a quick setup, quite representative of a productive system.
28 |
29 | To automatically confirm the validity of this workflow overtime when new versions of the various components (Kata Containers, MicroK8s, Podman, Ubuntu, etc.) get published, cron schedules it on a recurring basis: execution logs can be seen in [Actions tab](https://github.com/didier-durand/microk8s-kata-containers/actions). Excerpts of last execution are gathered [further down in this page](README.md#execution-report).
30 |
31 | **Forking and re-using on your own is strongly encouraged!** All comments for improvements and extensions will be welcome. Finally, if you like this repo, please give a Github star so that it gets more easily found by others.
32 |
33 | ## Kata Containers - Rationale
34 |
35 | As per [Katas' website](https://katacontainers.io/): *"Kata Containers is an open source community working to build a secure container runtime with lightweight virtual machines that feel and perform like containers, but provide stronger workload isolation using hardware virtualization technology as a second layer of defense."*
36 |
37 | This added lightweight virtual machine comes with a dedicated Linux kernel, providing isolation of network, I/O and memory and utilizes hardware-enforced isolation through Intel's [VT-x features](https://en.wikipedia.org/wiki/X86_virtualization#Intel_virtualization_(VT-x)) for virtualization.
38 |
39 |
40 |
41 | The use of a per-container dedicated kernel and lightweight virtual machines, provided by either [Qemu](https://www.qemu.org/) or [Amazon's Firecracker](https://firecracker-microvm.github.io/), creates a much stronger isolation between the containers themselves and with the host. For example, if a container misbehaves and messes up with the kernel resources by overconsuming or corrupting them, it's only **HIS** dedicated kernel that gets damaged, not the unique kernel shared between all containers and host, as when you're using regular containers. The picture above shows the clear differences between the two architectures. So, Kata Containers are probably the best option currently available for additional security and reliability with untrusted workloads of all kinds (recent versions, external source code, etc.).
42 |
43 | As you would expect, this further level of isolation through additional virtualization comes with a performance / cost penalty but this [comparative study](https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/6e4619c416ff4bd19e1c087f27a43eea/www-assets-prod/presentation-media/kata-containers-and-gvisor-a-quantitave-comparison.pdf) between the performances of raw host performances, *"runc"*, [Google's gVisor](https://gvisor.dev/) containers and Kata Containers demonstrates that the overhead remains quite acceptable in many situations for the additional security that is delivered. Look at slides 19 to 26 of the linked pdf to get the exact numbers.
44 |
45 | ## Specific Setup
46 |
47 | Various specific points have to be part of this workflow:
48 |
49 | 1. [Katas on GCE](https://github.com/kata-containers/documentation/blob/master/install/gce-installation-guide.md) implies use of [nested virtualization](https://en.wikipedia.org/wiki/Virtualization#Nested_virtualization): this requires to create a [specific GCE image](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances) to activate the [Intel VT-x instruction set](https://en.wikipedia.org/wiki/X86_virtualization#Intel_virtualization_(VT-x)). This is obtained by the addition of a specific option *"--licenses="* to the command *"gcloud compute images create"*. See [microk8s-kata.sh](sh/microk8s-kata.sh) for details.
50 |
51 | 2. The underlying hardware must minimally be of the Intel's [Broadwell architecture generation](https://en.wikipedia.org/wiki/Broadwell_(microarchitecture)) to provide the VT-x instructions. This is guaranteed by adding *"--min-cpu-platform 'Intel Broadwell'"* to the command *"gcloud compute instances create"*. See [microk8s-kata.sh](sh/microk8s-kata.sh) for details.
52 |
53 | 3. [Podman CLI](https://podman.io/) is used instead of Docker CLI because Docker is not compatible with Kata Containers runtime 2.0. As [this article](https://developers.redhat.com/blog/2019/02/21/podman-and-buildah-for-docker-users/) explains it, the transition from Docker to Podman is very easy: command syntax and results are extremely close and even identical in most cases.
54 |
55 | ## Workflow Steps
56 |
57 | The major steps in this workflow are:
58 |
59 | 1. Check that GCE instance is proper ('GenuineIntel') - according to the above requirement for Broadwell - via lscpu after it has been created.
60 | 2. Install Kata Containers runtime directly from the Github repository of the project.
61 | 3. Check that this added runtime can run on the instance: command *"kata-runtime kata-check"* MUST produce output *"System is capable of running Kata Containers"*
62 | 4. Install Podman and check via *"podman info"* that it sees both its standard runtime *"runc"* and the newly added *"kata-runtime"*
63 | 5. Run the latest version of [Alpine Linux](https://en.wikipedia.org/wiki/Alpine_Linux) image with selection of kata-runtime (*"--runtime='kata-runtime"*) and verify through *"podman inspect"* that the running Alpine is effectively using kata-runtime.
64 | 6. Install MicroK8s via snap and check that it works properly via the deployment of [helloworld-go.yml](kubernetes/helloworld-go.yml) and [autoscale-go.yml](kubernetes/autoscale-go.yml) service manifests, built from from GoLang source code in [src/go directory](src/go). Stop MicroK8s when validation is successful.
65 | 7. Open the MicroK8s .snap file to add kata-runtime and repackage a new version (now unsigned) of the .snap file. Please, note use of *"unsquashfs"* and *"mksquashfs"* to achieve this refurbishing since the [snap archive format](https://en.wikipedia.org/wiki/Snap_(package_manager)) is based on read-only and compressed [SquashFS](https://en.wikipedia.org/wiki/SquashFS) Linux file system.
66 | 8. Remove old MicroK8s installation and re-install a fresh instance based with newly created snap version: *"--dangerous"* option is now required since the tweaked .snap is no longer signed by its official provider, Canonical.
67 | 9. Deploy again helloworld-go and autoscale-go on fresh MicroK8s to validate that they work fine with kata-runtime: autoscale-go request is parametrized to make sure that some amount computing resources are consumed to achieve a better validation.
68 |
69 | ## How to Fork & Run
70 |
71 | To start with, you need a Google Cloud account including a project where the GCE APIs have been enabled. Obtain the id of your project from
72 | GCP dashboard. Additionally, you need to create in this project a service account (SA) and give it proper GCE credentials: right to create, administer and delete GCE images & instances (if your cannot make the SA a "Project Owner" to simplify the security aspects...). Save the private key of the SA in json format.
73 |
74 | Then, fork our repository and define the required [Github Secrets](https://docs.github.com/en/actions/reference/encrypted-secrets) in your fork:
75 |
76 | 1. your GCP project id will be {{ secrets.GCP_PROJECT }}
77 | 2. The private key of your service account in json format will be ${{ secrets.GCP_SA_KEY }}
78 |
79 | To easily use the workflow from Github, you can launch it with the [manual dispatch feature of Github](https://github.blog/changelog/2020-07-06-github-actions-manual-triggers-with-workflow_dispatch/) that you can see as a launch button (the green one in the picture below) in the Action tab of your fork.
80 |
81 |
82 |
83 | The workflow will execute all the steps described above and terminate gracefully after all validation tests described are completed: it will then delete the GCE instance and the associated image triggering the nested virtualization.
84 |
85 | If you also want to make use of the [side workflow](.github/workflows/build-docker-images.yml) allowing to build the test container images from their GoLang sources, you'll need to add 2 additional secrets : {{ secrets.DOCKER_USERID }} & {{ secrets.DOCKER_PASSWORD }} corresponding to the login parameters of your [Docker Hub account](https://hub.docker.com/).
86 |
87 | ## Execution Report
88 |
89 | Below are some relevant excerpts of the last execution log:
90 |
91 |
92 |
93 |
--------------------------------------------------------------------------------
/data/containerd.toml:
--------------------------------------------------------------------------------
1 | # Use config version 2 to enable new configuration fields.
2 | version = 2
3 | oom_score = 0
4 |
5 | [grpc]
6 | uid = 0
7 | gid = 0
8 | max_recv_message_size = 16777216
9 | max_send_message_size = 16777216
10 |
11 | [debug]
12 | address = ""
13 | uid = 0
14 | gid = 0
15 |
16 | [metrics]
17 | address = "127.0.0.1:1338"
18 | grpc_histogram = false
19 |
20 | [cgroup]
21 | path = ""
22 |
23 |
24 | # The 'plugins."io.containerd.grpc.v1.cri"' table contains all of the server options.
25 | [plugins."io.containerd.grpc.v1.cri"]
26 |
27 | stream_server_address = "127.0.0.1"
28 | stream_server_port = "0"
29 | enable_selinux = false
30 | sandbox_image = "k8s.gcr.io/pause:3.1"
31 | stats_collect_period = 10
32 | enable_tls_streaming = false
33 | max_container_log_line_size = 16384
34 |
35 | # 'plugins."io.containerd.grpc.v1.cri".containerd' contains config related to containerd
36 | [plugins."io.containerd.grpc.v1.cri".containerd]
37 |
38 | # snapshotter is the snapshotter used by containerd.
39 | snapshotter = "overlayfs"
40 |
41 | # no_pivot disables pivot-root (linux only), required when running a container in a RamDisk with runc.
42 | # This only works for runtime type "io.containerd.runtime.v1.linux".
43 | no_pivot = false
44 |
45 | # default_runtime_name is the default runtime name to use.
46 | default_runtime_name = "runc"
47 |
48 | # 'plugins."io.containerd.grpc.v1.cri".containerd.runtimes' is a map from CRI RuntimeHandler strings, which specify types
49 | # of runtime configurations, to the matching configurations.
50 | # In this example, 'runc' is the RuntimeHandler string to match.
51 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
52 | # runtime_type is the runtime type to use in containerd e.g. io.containerd.runtime.v1.linux
53 | runtime_type = "io.containerd.runc.v1"
54 |
55 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-runtime]
56 | runtime_type = "io.containerd.runc.v1"
57 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-runtime.options]
58 | BinaryName = "kata-runtime"
59 |
60 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-container-runtime]
61 | runtime_type = "io.containerd.runc.v1"
62 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-container-runtime.options]
63 | BinaryName = "nvidia-container-runtime"
64 |
65 | # 'plugins."io.containerd.grpc.v1.cri".cni' contains config related to cni
66 | [plugins."io.containerd.grpc.v1.cri".cni]
67 | # bin_dir is the directory in which the binaries for the plugin is kept.
68 | bin_dir = "/var/snap/microk8s/x1/opt/cni/bin"
69 |
70 | # conf_dir is the directory in which the admin places a CNI conf.
71 | conf_dir = "/var/snap/microk8s/x1/args/cni-network"
72 |
73 | # 'plugins."io.containerd.grpc.v1.cri".registry' contains config related to the registry
74 | [plugins."io.containerd.grpc.v1.cri".registry]
75 |
76 | # 'plugins."io.containerd.grpc.v1.cri".registry.mirrors' are namespace to mirror mapping for all namespaces.
77 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
78 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
79 | endpoint = ["https://registry-1.docker.io", ]
80 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:32000"]
81 | endpoint = ["http://localhost:32000"]
--------------------------------------------------------------------------------
/data/containerd.toml.bak:
--------------------------------------------------------------------------------
1 | # Use config version 2 to enable new configuration fields.
2 | version = 2
3 | oom_score = 0
4 |
5 | [grpc]
6 | uid = 0
7 | gid = 0
8 | max_recv_message_size = 16777216
9 | max_send_message_size = 16777216
10 |
11 | [debug]
12 | address = ""
13 | uid = 0
14 | gid = 0
15 |
16 | [metrics]
17 | address = "127.0.0.1:1338"
18 | grpc_histogram = false
19 |
20 | [cgroup]
21 | path = ""
22 |
23 |
24 | # The 'plugins."io.containerd.grpc.v1.cri"' table contains all of the server options.
25 | [plugins."io.containerd.grpc.v1.cri"]
26 |
27 | stream_server_address = "127.0.0.1"
28 | stream_server_port = "0"
29 | enable_selinux = false
30 | sandbox_image = "k8s.gcr.io/pause:3.1"
31 | stats_collect_period = 10
32 | enable_tls_streaming = false
33 | max_container_log_line_size = 16384
34 |
35 | # 'plugins."io.containerd.grpc.v1.cri".containerd' contains config related to containerd
36 | [plugins."io.containerd.grpc.v1.cri".containerd]
37 |
38 | # snapshotter is the snapshotter used by containerd.
39 | snapshotter = "overlayfs"
40 |
41 | # no_pivot disables pivot-root (linux only), required when running a container in a RamDisk with runc.
42 | # This only works for runtime type "io.containerd.runtime.v1.linux".
43 | no_pivot = false
44 |
45 | # default_runtime_name is the default runtime name to use.
46 | default_runtime_name = "runc"
47 |
48 | # 'plugins."io.containerd.grpc.v1.cri".containerd.runtimes' is a map from CRI RuntimeHandler strings, which specify types
49 | # of runtime configurations, to the matching configurations.
50 | # In this example, 'runc' is the RuntimeHandler string to match.
51 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
52 | # runtime_type is the runtime type to use in containerd e.g. io.containerd.runtime.v1.linux
53 | runtime_type = "io.containerd.runc.v1"
54 |
55 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-container-runtime]
56 | # runtime_type is the runtime type to use in containerd e.g. io.containerd.runtime.v1.linux
57 | runtime_type = "io.containerd.runc.v1"
58 |
59 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-container-runtime.options]
60 | BinaryName = "nvidia-container-runtime"
61 |
62 | # 'plugins."io.containerd.grpc.v1.cri".cni' contains config related to cni
63 | [plugins."io.containerd.grpc.v1.cri".cni]
64 | # bin_dir is the directory in which the binaries for the plugin is kept.
65 | bin_dir = "/var/snap/microk8s/x1/opt/cni/bin"
66 |
67 | # conf_dir is the directory in which the admin places a CNI conf.
68 | conf_dir = "/var/snap/microk8s/x1/args/cni-network"
69 |
70 | # 'plugins."io.containerd.grpc.v1.cri".registry' contains config related to the registry
71 | [plugins."io.containerd.grpc.v1.cri".registry]
72 |
73 | # 'plugins."io.containerd.grpc.v1.cri".registry.mirrors' are namespace to mirror mapping for all namespaces.
74 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
75 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
76 | endpoint = ["https://registry-1.docker.io", ]
77 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:32000"]
78 | endpoint = ["http://localhost:32000"]
--------------------------------------------------------------------------------
/docker/Dockerfile-autoscale:
--------------------------------------------------------------------------------
1 | # Start from a Debian image with the latest version of Go installed
2 | # and a workspace (GOPATH) configured at /go.
3 | FROM golang
4 |
5 | # Copy the local package files to the container's workspace.
6 | ADD src src
7 |
8 | # Build sample.
9 | RUN go install ./src/go/autoscale
10 |
11 | # Run the command by default when the container starts.
12 | ENTRYPOINT /go/bin/autoscale
13 |
14 | # Document that the service listens on port 8080.
15 | EXPOSE 8080
--------------------------------------------------------------------------------
/docker/Dockerfile-helloworld:
--------------------------------------------------------------------------------
1 | # source: https://github.com/peter-evans/knative-docs/blob/master/serving/samples/helloworld-go/Dockerfile
2 | # Start from a Debian image with the latest version of Go installed
3 | # and a workspace (GOPATH) configured at /go.
4 | FROM golang
5 |
6 | # Copy the local package files to the container's workspace.
7 | ADD src src
8 |
9 | # Build sample.
10 | RUN go install ./src/go/helloworld
11 |
12 | # Run the command by default when the container starts.
13 | ENTRYPOINT /go/bin/helloworld
14 |
15 | # Document that the service listens on port 8080.
16 | EXPOSE 8080
17 |
--------------------------------------------------------------------------------
/img/containerd-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/bd5e9b191887783222816afd338dffdea3f5d35d/img/containerd-logo.png
--------------------------------------------------------------------------------
/img/kata-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/bd5e9b191887783222816afd338dffdea3f5d35d/img/kata-logo.png
--------------------------------------------------------------------------------
/img/kata-vs-docker.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/bd5e9b191887783222816afd338dffdea3f5d35d/img/kata-vs-docker.jpg
--------------------------------------------------------------------------------
/img/microk8s-kata-launch-button.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/bd5e9b191887783222816afd338dffdea3f5d35d/img/microk8s-kata-launch-button.jpg
--------------------------------------------------------------------------------
/img/microk8s-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/bd5e9b191887783222816afd338dffdea3f5d35d/img/microk8s-logo.png
--------------------------------------------------------------------------------
/img/oci-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/bd5e9b191887783222816afd338dffdea3f5d35d/img/oci-logo.png
--------------------------------------------------------------------------------
/img/podman-logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/bd5e9b191887783222816afd338dffdea3f5d35d/img/podman-logo.jpg
--------------------------------------------------------------------------------
/kubernetes/autoscale-kata.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: autoscale-kata
5 | spec:
6 | selector:
7 | app: autoscale-kata
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 8080
12 | type: NodePort
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: autoscale-kata-deployment
18 | labels:
19 | app: autoscale-kata
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: autoscale-kata
24 | replicas: 1
25 | template:
26 | metadata:
27 | labels:
28 | app: autoscale-kata
29 | spec:
30 | containers:
31 | - name: autoscale-kata
32 | image: didierdurand/autoscale-go
33 | ports:
34 | - containerPort: 8080
--------------------------------------------------------------------------------
/kubernetes/autoscale-runc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: autoscale-runc
5 | spec:
6 | selector:
7 | app: autoscale-runc
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 8080
12 | type: NodePort
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: autoscale-runc-deployment
18 | labels:
19 | app: autoscale-runc
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: autoscale-runc
24 | replicas: 1
25 | template:
26 | metadata:
27 | labels:
28 | app: autoscale-runc
29 | spec:
30 | containers:
31 | - name: autoscale-runc
32 | image: didierdurand/autoscale-go
33 | ports:
34 | - containerPort: 8080
--------------------------------------------------------------------------------
/kubernetes/helloworld-kata.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: helloworld-kata
5 | spec:
6 | selector:
7 | app: helloworld-kata
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 8080
12 | type: NodePort
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: helloworld-kata-deployment
18 | labels:
19 | app: helloworld-kata
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: helloworld-kata
24 | replicas: 1
25 | template:
26 | metadata:
27 | labels:
28 | app: helloworld-kata
29 | spec:
30 | containers:
31 | - name: helloworld-kata
32 | image: didierdurand/helloworld-go
33 | ports:
34 | - containerPort: 8080
35 | env:
36 | - name: TARGET
37 | value: "Kata Containers"
--------------------------------------------------------------------------------
/kubernetes/helloworld-runc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: helloworld-runc
5 | spec:
6 | selector:
7 | app: helloworld-runc
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 8080
12 | type: NodePort
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: helloworld-runc-deployment
18 | labels:
19 | app: helloworld-runc
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: helloworld-runc
24 | replicas: 1
25 | template:
26 | metadata:
27 | labels:
28 | app: helloworld-runc
29 | spec:
30 | containers:
31 | - name: helloworld-runc
32 | image: didierdurand/helloworld-go
33 | ports:
34 | - containerPort: 8080
35 | env:
36 | - name: TARGET
37 | value: "Runc Containers"
--------------------------------------------------------------------------------
/kubernetes/kata-runtime-class.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: node.k8s.io/v1beta1
2 | kind: RuntimeClass
3 | metadata:
4 | name: kata-runtime
5 | handler: kata-handler
--------------------------------------------------------------------------------
/kubernetes/nginx-kata.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginx-kata
5 | spec:
6 | selector:
7 | app: nginx-kata
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 80
12 | type: NodePort
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: nginx-kata-deployment
18 | labels:
19 | app: nginx-kata
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: nginx-kata
24 | replicas: 1
25 | template:
26 | metadata:
27 | labels:
28 | app: nginx-kata
29 | spec:
30 | containers:
31 | - name: nginx-kata
32 | image: nginx
33 | ports:
34 | - containerPort: 80
--------------------------------------------------------------------------------
/kubernetes/nginx-runc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginx-runc
5 | spec:
6 | selector:
7 | app: nginx-runc
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 80
12 | type: NodePort
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: nginx-runc-deployment
18 | labels:
19 | app: nginx-runc
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: nginx-runc
24 | replicas: 1
25 | template:
26 | metadata:
27 | labels:
28 | app: nginx-runc
29 | spec:
30 | containers:
31 | - name: nginx-runc
32 | image: nginx
33 | ports:
34 | - containerPort: 80
--------------------------------------------------------------------------------
/kubernetes/nginx-untrusted.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginx-untrusted
5 | spec:
6 | selector:
7 | app: nginx-untrusted
8 | ports:
9 | - protocol: TCP
10 | port: 80
11 | targetPort: 80
12 | type: NodePort
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: nginx-untrusted-deployment
18 | labels:
19 | app: nginx-untrusted
20 | spec:
21 | selector:
22 | matchLabels:
23 | app: nginx-untrusted
24 | replicas: 1
25 | template:
26 | metadata:
27 | labels:
28 | app: nginx-untrusted
29 | spec:
30 | containers:
31 | - name: nginx-untrusted
32 | image: nginx
33 | ports:
34 | - containerPort: 80
--------------------------------------------------------------------------------
/report.md:
--------------------------------------------------------------------------------
1 | ### execution date: Sat Nov 28 09:49:46 UTC 2020
2 |
3 | ### microk8s snap version:
4 | microk8s v1.19.3 1791 1.19/stable canonical* classic
5 |
6 | ### ubuntu version:
7 | Distributor ID: Ubuntu
8 | Description: Ubuntu 20.04.1 LTS
9 | Release: 20.04
10 | Codename: focal
11 |
12 | ### docker version:
13 | Client: Docker Engine - Community
14 | Version: 19.03.13
15 | API version: 1.40
16 | Go version: go1.13.15
17 | Git commit: 4484c46d9d
18 | Built: Wed Sep 16 17:02:52 2020
19 | OS/Arch: linux/amd64
20 | Experimental: false
21 |
22 | ### kata-runtime version:
23 | kata-runtime : 1.12.0-rc0
24 | commit : <>
25 | OCI specs: 1.0.1-dev
26 |
27 | ### kata-runtime check:
28 | System is capable of running Kata Containers
29 |
30 |
31 | ### check existing container runtimes on Ubuntu host:
32 | -rwxr-xr-x 1 root root 9.7M Sep 9 15:40 /bin/runc
33 | -rwxr-xr-x 1 root root 31M Oct 22 16:51 /bin/kata-runtime
34 |
35 | ### check active OCI runtime:
36 |
37 | ### test use of kata-runtime with alpine:
38 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
39 | 7d90229b5af6 docker.io/library/alpine:latest sh 2 seconds ago Up Less than a second ago kata-alpine
40 | "Name": "kata-alpine",
41 | "Id": "7d90229b5af691afd78a472d10e7948b9d5a6d9756065cdf7be88463b3f9733b",
42 | "OCIRuntime": "kata-runtime",
43 |
44 | ### install microk8s:
45 | microk8s is running
46 | high-availability: no
47 | datastore master nodes: 127.0.0.1:19001
48 | datastore standby nodes: none
49 | addons:
50 | enabled:
51 | ha-cluster # Configure high availability on the current node
52 | disabled:
53 | ambassador # Ambassador API Gateway and Ingress
54 | cilium # SDN, fast with full network policy
55 | dashboard # The Kubernetes dashboard
56 | dns # CoreDNS
57 | fluentd # Elasticsearch-Fluentd-Kibana logging and monitoring
58 | gpu # Automatic enablement of Nvidia CUDA
59 | helm # Helm 2 - the package manager for Kubernetes
60 | helm3 # Helm 3 - Kubernetes package manager
61 | host-access # Allow Pods connecting to Host services smoothly
62 | ingress # Ingress controller for external access
63 | istio # Core Istio service mesh services
64 | jaeger # Kubernetes Jaeger operator with its simple config
65 | knative # The Knative framework on Kubernetes.
66 | kubeflow # Kubeflow for easy ML deployments
67 | linkerd # Linkerd is a service mesh for Kubernetes and other frameworks
68 | metallb # Loadbalancer for your Kubernetes cluster
69 | metrics-server # K8s Metrics Server for API access to service metrics
70 | multus # Multus CNI enables attaching multiple network interfaces to pods
71 | prometheus # Prometheus operator for monitoring and logging
72 | rbac # Role-Based Access Control for authorisation
73 | registry # Private image registry exposed on localhost:32000
74 | storage # Storage class; allocates storage from host directory
75 |
76 | ### check container runtime on microk8s snap:
77 | -rwxr-xr-x 1 root root 15M Nov 6 12:06 /snap/microk8s/current/bin/runc
78 |
79 | ### TEST WITH RUNC
80 |
81 |
82 | ### test microk8s with helloworld-go & autoscale-go:
83 | service/helloworld-go created
84 | deployment.apps/helloworld-go-deployment created
85 | service/autoscale-go created
86 | deployment.apps/autoscale-go-deployment created
87 | NAME READY STATUS RESTARTS AGE
88 | nginx-test 0/1 ContainerCreating 0 1s
89 | helloworld-go-deployment-86f5466d4-dc5d6 0/1 ContainerCreating 0 0s
90 | helloworld-go-deployment-86f5466d4-5wfd9 0/1 ContainerCreating 0 0s
91 | autoscale-go-deployment-5894658957-4vl42 0/1 Pending 0 0s
92 | autoscale-go-deployment-5894658957-6747m 0/1 ContainerCreating 0 0s
93 |
94 | waiting for ready pods...
95 |
96 | NAME READY STATUS RESTARTS AGE
97 | nginx-test 1/1 Running 0 2m2s
98 | autoscale-go-deployment-5894658957-4vl42 1/1 Running 0 2m1s
99 | helloworld-go-deployment-86f5466d4-5wfd9 1/1 Running 0 2m1s
100 | helloworld-go-deployment-86f5466d4-dc5d6 1/1 Running 0 2m1s
101 | autoscale-go-deployment-5894658957-6747m 1/1 Running 0 2m1s
102 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
103 | kubernetes ClusterIP 10.152.183.1 443/TCP 2m34s
104 | helloworld-go NodePort 10.152.183.42 80:31982/TCP 2m2s
105 | autoscale-go NodePort 10.152.183.245 80:31469/TCP 2m1s
106 |
107 | calling helloworld-go...
108 |
109 | Hello World: Kata Containers!
110 |
111 | calling autoscale-go with request for biggest prime under 10 000 and 5 MB memory...
112 |
113 | Allocated 5 Mb of memory.
114 | The largest prime less than 10000 is 9973.
115 | Slept for 100.18 milliseconds.
116 |
117 | ### re-install microk8s incl kata-runtime:
118 | microk8s v1.19.3 installed
119 | microk8s is running
120 | high-availability: no
121 | datastore master nodes: 127.0.0.1:19001
122 | datastore standby nodes: none
123 | addons:
124 | enabled:
125 | ha-cluster # Configure high availability on the current node
126 | disabled:
127 | ambassador # Ambassador API Gateway and Ingress
128 | cilium # SDN, fast with full network policy
129 | dashboard # The Kubernetes dashboard
130 | dns # CoreDNS
131 | fluentd # Elasticsearch-Fluentd-Kibana logging and monitoring
132 | gpu # Automatic enablement of Nvidia CUDA
133 | helm # Helm 2 - the package manager for Kubernetes
134 | helm3 # Helm 3 - Kubernetes package manager
135 | host-access # Allow Pods connecting to Host services smoothly
136 | ingress # Ingress controller for external access
137 | istio # Core Istio service mesh services
138 | jaeger # Kubernetes Jaeger operator with its simple config
139 | knative # The Knative framework on Kubernetes.
140 | kubeflow # Kubeflow for easy ML deployments
141 | linkerd # Linkerd is a service mesh for Kubernetes and other frameworks
142 | metallb # Loadbalancer for your Kubernetes cluster
143 | metrics-server # K8s Metrics Server for API access to service metrics
144 | multus # Multus CNI enables attaching multiple network interfaces to pods
145 | prometheus # Prometheus operator for monitoring and logging
146 | rbac # Role-Based Access Control for authorisation
147 | registry # Private image registry exposed on localhost:32000
148 | storage # Storage class; allocates storage from host directory
149 |
150 | ### TEST WITH KATA-RUNTIME
151 |
152 |
153 | ### test microk8s with helloworld-go & autoscale-go:
154 | service/helloworld-go created
155 | deployment.apps/helloworld-go-deployment created
156 | service/autoscale-go created
157 | deployment.apps/autoscale-go-deployment created
158 | NAME READY STATUS RESTARTS AGE
159 | nginx-test 0/1 ContainerCreating 0 2s
160 | helloworld-go-deployment-86f5466d4-hzmv8 0/1 ContainerCreating 0 1s
161 | autoscale-go-deployment-5894658957-m5qff 0/1 ContainerCreating 0 0s
162 | helloworld-go-deployment-86f5466d4-vntqb 0/1 ContainerCreating 0 1s
163 | autoscale-go-deployment-5894658957-ckc2z 0/1 ContainerCreating 0 0s
164 |
165 | waiting for ready pods...
166 |
167 | NAME READY STATUS RESTARTS AGE
168 | nginx-test 1/1 Running 0 2m3s
169 | autoscale-go-deployment-5894658957-m5qff 1/1 Running 0 2m1s
170 | helloworld-go-deployment-86f5466d4-hzmv8 1/1 Running 0 2m2s
171 | helloworld-go-deployment-86f5466d4-vntqb 1/1 Running 0 2m2s
172 | autoscale-go-deployment-5894658957-ckc2z 1/1 Running 0 2m1s
173 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
174 | kubernetes ClusterIP 10.152.183.1 443/TCP 2m26s
175 | helloworld-go NodePort 10.152.183.67 80:32503/TCP 2m2s
176 | autoscale-go NodePort 10.152.183.112 80:30065/TCP 2m1s
177 |
178 | calling helloworld-go...
179 |
180 | Hello World: Kata Containers!
181 |
182 | calling autoscale-go with request for biggest prime under 10 000 and 5 MB memory...
183 |
184 | Allocated 5 Mb of memory.
185 | The largest prime less than 10000 is 9973.
186 | Slept for 100.65 milliseconds.
187 |
188 | ### check proper symlink from microk8s runc:
189 | lrwxrwxrwx 1 root root 30 Nov 28 09:43 /snap/microk8s/current/bin/runc -> squashfs-root/bin/kata-runtime
190 | -rwxr-xr-x 1 root root 31560112 Oct 22 16:51 /bin/kata-runtime
191 | -rwxr-xr-x 1 root root 31560112 Nov 28 09:43 /snap/microk8s/current/bin/kata-runtime
192 |
--------------------------------------------------------------------------------
/sh/microk8s-kata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #https://github.com/kata-containers/documentation/blob/master/how-to/containerd-kata.md
4 |
5 | set -e
6 | trap 'catch $? $LINENO' EXIT
7 | catch() {
8 | if [ "$1" != "0" ]; then
9 | echo "Error $1 occurred on line $2"
10 | if [[ ! -z "$GITHUB_WORKFLOW" ]]
11 | then
12 | # delete cloud instance in case of failure when run scheduled on GitHub (to save costs...)
13 | delete_gce_instance $KATA_INSTANCE $KATA_IMAGE || true
14 | true
15 | fi
16 | fi
17 | }
18 |
19 | REPORT='report.md'
20 |
21 | OS=$(uname -a)
22 | if [[ "$OS" == 'Linux'* ]]
23 | then
24 | lsb_release -a
25 | fi
26 |
27 | ON_GCE=$((curl -s -i metadata.google.internal | grep 'Google') || true)
28 |
29 | # variables below can be inherited from environment
30 | if [[ -z ${GCP_PROJECT+x} && ! "$ON_GCE" == *'Google'* ]] ; then echo "ERROR: gcp project not set" && false ; fi ; echo "gcp project: $GCP_PROJECT"
31 | if [[ -z ${GCP_ZONE+x} ]] ; then GCP_ZONE='us-central1-c' ; fi ; echo "gcp zone: $GCP_ZONE"
32 |
33 | if [[ -z ${KATA_GCE_CREATE+x} ]] ; then KATA_GCE_CREATE='true' ; fi ; echo "kata gce create: $KATA_GCE_CREATE"
34 | if [[ -z ${KATA_GCE_DELETE+x} ]] ; then KATA_GCE_DELETE='false' ; fi ; echo "kata gce delete: $KATA_GCE_DELETE"
35 |
36 | if [[ -z ${KATA_INSTALL+x} ]] ; then KATA_INSTALL='true' ; fi ; echo "kata install: $KATA_INSTALL"
37 | if [[ -z ${KATA_IMAGE_FAMILY+x} ]] ; then KATA_IMAGE_FAMILY='ubuntu-2004-lts' ; fi ; echo "kata image family: $KATA_IMAGE_FAMILY"
38 | if [[ -z ${KATA_INSTANCE+x} ]] ; then KATA_INSTANCE='microk8s-kata' ; fi ; echo "kata host instance: $KATA_INSTANCE"
39 |
40 | #if [[ -z ${KATA_VERSION+x} ]] ; then export KATA_VERSION='2.x' ; fi ; echo "mk8s version: $KATA_VERSION"
41 |
42 | if [[ -z ${MK8S_VERSION+x} ]] ; then export MK8S_VERSION='1.19' ; fi ; echo "mk8s version: $MK8S_VERSION"
43 |
44 | create_gce_instance()
45 | {
46 | local GCE_INSTANCE="$1"
47 | local GCE_IMAGE="$2"
48 | echo -e "\n### setup instance: $GCE_INSTANCE - image: $GCE_IMAGE"
49 | gcloud compute instances list \
50 | --project=$GCP_PROJECT
51 | if [[ ! $(gcloud compute instances list --project=$GCP_PROJECT) == *"$GCE_INSTANCE"* ]]
52 | then
53 | gcloud compute instances create \
54 | --min-cpu-platform 'Intel Broadwell' \
55 | --machine-type 'n1-standard-4' \
56 | --image $GCE_IMAGE \
57 | --zone $GCP_ZONE \
58 | --project=$GCP_PROJECT \
59 | --quiet \
60 | $GCE_INSTANCE
61 | fi
62 | echo -e "\n### started instance:" | tee -a "$REPORT"
63 | gcloud compute instances list --project=$GCP_PROJECT | tee -a "$REPORT"
64 | while [[ ! $(gcloud compute ssh $GCE_INSTANCE --command='uname -a' --zone $GCP_ZONE --project=$GCP_PROJECT) == *'Linux'* ]]
65 | do
66 | echo -e "instance not ready for ssh..."
67 | sleep 5
68 | done
69 | gcloud compute ssh $GCE_INSTANCE \
70 | --command='uname -a' \
71 | --zone $GCP_ZONE \
72 | --project=$GCP_PROJECT
73 | }
74 |
75 | delete_gce_instance()
76 | {
77 | local GCE_INSTANCE="$1"
78 | local GCE_IMAGE="$2"
79 | echo -e "\n### delete gce instance: $GCE_INSTANCE"
80 | gcloud compute instances delete \
81 | --zone $GCP_ZONE \
82 | --project=$GCP_PROJECT \
83 | --quiet \
84 | $GCE_INSTANCE
85 |
86 | echo -e "\n### delete gce image: $GCE_IMAGE"
87 | gcloud compute images delete \
88 | --project=$GCP_PROJECT \
89 | --quiet \
90 | $GCE_IMAGE
91 | }
92 |
93 | KATA_IMAGE="$KATA_IMAGE_FAMILY-kata"
94 |
95 | if [[ $KATA_GCE_CREATE == 'true' ]]
96 | then
97 | if [[ "$ON_GCE" == *'Google'* ]]
98 | then
99 | echo '\n### running on GCE'
100 | else
101 | echo -e '\n### not on GCE'
102 |
103 | if [[ ! $(gcloud compute instances list --project=$GCP_PROJECT) == *"$KATA_INSTANCE"* ]]
104 | then
105 | echo -e "\n### cleanup previous image: $KATA_IMAGE"
106 | if [[ -n $(gcloud compute images describe --project=$GCP_PROJECT $KATA_IMAGE) ]]
107 | then
108 | gcloud compute images delete \
109 | --project=$GCP_PROJECT \
110 | --quiet \
111 | $KATA_IMAGE
112 | fi
113 |
114 | echo -e "\n### image: $(gcloud compute images list | grep $KATA_IMAGE_FAMILY)"
115 | IMAGE_PROJECT=$(gcloud compute images list | grep $KATA_IMAGE_FAMILY | awk '{ print $2 }')
116 |
117 | echo -e "\n### create image: $KATA_IMAGE"
118 | gcloud compute images create \
119 | --source-image-project $IMAGE_PROJECT \
120 | --source-image-family $KATA_IMAGE_FAMILY \
121 | --licenses=https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx \
122 | --project=$GCP_PROJECT \
123 | $KATA_IMAGE
124 |
125 | echo -e "\n### describe image: $KATA_IMAGE"
126 | gcloud compute images describe --project=$GCP_PROJECT $KATA_IMAGE
127 | fi
128 |
129 | create_gce_instance "$KATA_INSTANCE" "$KATA_IMAGE"
130 |
131 | gcloud compute ssh $KATA_INSTANCE --command='sudo rm -rf /var/lib/apt/lists/* && sudo apt update -y && (sudo apt upgrade -y && sudo apt upgrade -y) && sudo apt autoremove -y' --zone $GCP_ZONE --project=$GCP_PROJECT
132 | gcloud compute scp $0 "$KATA_INSTANCE:$(basename $0)" --zone $GCP_ZONE --project=$GCP_PROJECT
133 | gcloud compute scp 'data/containerd.toml' "$KATA_INSTANCE:containerd.toml" --zone $GCP_ZONE --project=$GCP_PROJECT
134 | gcloud compute scp 'data/containerd.toml.bak' "$KATA_INSTANCE:containerd.toml.bak" --zone $GCP_ZONE --project=$GCP_PROJECT
135 | gcloud compute ssh $KATA_INSTANCE --command="sudo chmod ugo+x ./$(basename $0)" --zone $GCP_ZONE --project=$GCP_PROJECT
136 | gcloud compute ssh $KATA_INSTANCE --command="bash ./$(basename $0)" --zone $GCP_ZONE --project=$GCP_PROJECT
137 |
138 | if [[ ! -z "$GITHUB_WORKFLOW" ]]
139 | then
140 | gcloud compute scp $KATA_INSTANCE:$REPORT $REPORT --zone $GCP_ZONE --project=$GCP_PROJECT
141 | cat README.template.md > README.md
142 | echo '```' >> README.md
143 | cat $REPORT >> README.md || true
144 | echo '```' >> README.md
145 | fi
146 |
147 | if [[ $KATA_GCE_DELETE == 'true' ]]
148 | then
149 | delete_gce_instance $KATA_INSTANCE $KATA_IMAGE
150 | fi
151 | fi
152 | fi
153 |
154 | #gcloud compute ssh microk8s-kata --zone 'us-central1-c' --project=$GCP_PROJECT
155 |
156 | if [[ ! "$ON_GCE" == *'Google'* ]]
157 | then
158 | exit 0
159 | fi
160 |
161 | #now running on GCE....
162 |
163 | echo -e "\n### check gce instance:"
164 | lscpu
165 | lscpu | grep 'GenuineIntel'
166 |
167 | if [[ -z $(which jq) ]]
168 | then
169 | echo -e "\n### install jq:"
170 | sudo snap install jq
171 | snap list | grep 'jq'
172 | fi
173 |
174 | # due to https://github.com/containers/podman/pull/7126 and https://github.com/containers/podman/pull/7077
175 | # some podman commands fail if --runtime= is not specified. So, we currently add it to all commands until 7126 gets published in upcoming official release
176 | if [[ -z "$KATA_VERSION" ]]
177 | then
178 | KATA_PATH='/bin/kata-runtime'
179 | else
180 | KATA_PATH='/snap/kata-containers/current/usr/bin/kata-runtime'
181 | fi
182 |
183 | if [[ ! -f $KATA_PATH ]]
184 | then
185 | if [[ -z "$KATA_VERSION" ]]
186 | then
187 | echo -e "\n### install kata containers: v1.x"
188 | bash -c "$(curl -fsSL https://raw.githubusercontent.com/kata-containers/tests/master/cmd/kata-manager/kata-manager.sh) install-docker-system"
189 | else
190 | echo -e "\n### install kata containers: v2.x"
191 | sudo snap install --edge --classic kata-containers
192 | sudo snap list | grep 'kata-containers' | grep ' 2.'
193 | fi
194 | fi
195 |
196 | echo -e "\n### kata-runtime env:"
197 | $KATA_PATH kata-env
198 |
199 | echo -e "\n### kata-runtime version: $($KATA_PATH --version)"
200 |
201 | #kata-check fail since Nov, 12th 20202 due to publication on version 1.12. See https://github.com/kata-containers/runtime/issues/3069
202 | $KATA_PATH kata-check -n || true
203 | $KATA_PATH kata-check -n | grep 'System is capable of running Kata Containers' || true
204 |
205 | if [[ -z $(which podman) ]]
206 | then
207 | echo -e "\n### install podman: "
208 | source /etc/os-release
209 | sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
210 | wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_${VERSION_ID}/Release.key -O- | sudo apt-key add -
211 | sudo apt update -y && sudo apt upgrade -y && sudo apt install -y podman
212 | fi
213 |
214 | KATA_PARAMS='
215 | #microk8s-kata
216 | kata = [
217 | "/usr/bin/kata-runtime",
218 | "/usr/sbin/kata-runtime",
219 | "/usr/local/bin/kata-runtime",
220 | "/usr/local/sbin/kata-runtime",
221 | "/sbin/kata-runtime",
222 | "/bin/kata-runtime",
223 | "/usr/bin/kata-qemu",
224 | "/usr/bin/kata-fc",
225 | ]'
226 | #echo "kata params: $KATA_PARAMS"
227 |
228 | #cat /etc/containers/containers.conf | grep '#microk8s-kata' || echo "$KATA_PARAMS" | sudo tee -a /etc/containers/containers.conf
229 | #cat /etc/containers/containers.conf
230 |
231 |
232 | echo -e "\n### podman version: "
233 | podman version
234 |
235 | echo -e "\n### check existing container runtimes on Ubuntu host:" | tee -a "$REPORT"
236 | ls -lh /bin/runc | tee -a "$REPORT"
237 | ls -lh "$KATA_PATH" | tee -a "$REPORT"
238 |
239 | echo -e "\n### check active OCI runtime: " | tee -a "$REPORT"
240 | podman info --runtime="$KATA_PATH"
241 | podman info --runtime="$KATA_PATH" --format=json | jq '.host.ociRuntime.name' | grep 'runc' | tee -a "$REPORT"
242 |
243 | echo -e "\n### test use of kata-runtime with alpine: " | tee -a "$REPORT"
244 |
245 | echo -e "\n### podman runc tests: runc"
246 | podman run --rm --runtime='/bin/runc' alpine ls -l | grep 'etc' | grep 'root'
247 | podman run --rm --runtime='/bin/runc' alpine cat /etc/hosts | grep 'localhost'
248 |
249 | echo -e "\n### podman tests: kata-runtime"
250 | ls -l "$KATA_PATH"
251 | #to debug issue with podman on v2.0
252 | if [[ -n "$KATA_VERSION" ]]
253 | then
254 | set -x
255 | fi
256 | sudo -E podman run --rm --runtime="$KATA_PATH" alpine grep -m 1 kataShared /etc/mtab && echo 'kata-runtime successfully detected!'
257 | sudo -E podman run --rm --runtime="$KATA_PATH" alpine ls -l | grep 'etc' | grep 'root'
258 | sudo -E podman run --rm --runtime="$KATA_PATH" alpine cat /etc/hosts | grep 'localhost'
259 |
260 | # stop and rm old container(s) if any (for script idempotence)
261 | sudo podman stop 'kata-alpine' --runtime="$KATA_PATH" > /dev/null 2>&1 || true
262 | sudo podman rm --force --runtime="$KATA_PATH" 'kata-alpine' > /dev/null 2>&1 || true
263 |
264 | KATA_ALPINE_ID=$(sudo -E podman run -itd --rm --runtime="$KATA_PATH" --name='kata-alpine' alpine sh)
265 | echo -e "\n### started kata-alpine container: $KATA_ALPINE_ID"
266 |
267 | echo -e "\n### list running containers: "
268 | sudo podman ps -a --runtime="$KATA_PATH" | tee -a "$REPORT"
269 | sudo podman ps -a --runtime="$KATA_PATH" | grep 'kata-alpine' > /dev/null
270 |
271 | echo -e "\n### inspect kata-alpine container: "
272 | sudo podman inspect --runtime="$KATA_PATH" "$KATA_ALPINE_ID"
273 | sudo podman inspect --runtime="$KATA_PATH" "$KATA_ALPINE_ID" | grep 'Name' | grep 'kata-alpine' | tee -a "$REPORT"
274 | sudo podman inspect --runtime="$KATA_PATH" "$KATA_ALPINE_ID" | grep 'Id' | tee -a "$REPORT"
275 | sudo podman inspect --runtime="$KATA_PATH" "$KATA_ALPINE_ID" | grep 'OCIRuntime' | grep 'kata-runtime' | tee -a "$REPORT"
276 |
277 | KATA_ALPINE_ID2=$(sudo sudo podman stop 'kata-alpine' --runtime="$KATA_PATH")
278 | echo -e "\n### stopped kata-alpine: $KATA_ALPINE_ID2 "
279 | [[ "$KATA_ALPINE_ID2" == "$KATA_ALPINE_ID" ]]
280 |
281 | if [[ -z $(which microk8s) ]]
282 | then
283 | echo -e "\n### install microk8s:" | tee -a "$REPORT"
284 | sudo snap install microk8s --classic --channel="$MK8S_VERSION"
285 | SNAP_VERSION=$(sudo snap list | grep 'microk8s')
286 | sudo microk8s status --wait-ready | tee -a "$REPORT"
287 | fi
288 |
289 | echo -e "\n### check container runtime on microk8s snap:" | tee -a "$REPORT"
290 | ls -lh /snap/microk8s/current/bin/runc | tee -a "$REPORT"
291 |
292 | echo -e "\n### TEST WITH INITIAL RUNC\n" | tee -a "$REPORT"
293 |
294 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/nginx-runc.yaml"
295 |
296 | echo -e "\n### test microk8s with helloworld-runc & autoscale-runc: " | tee -a "$REPORT"
297 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/helloworld-runc.yaml" | tee -a "$REPORT"
298 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/autoscale-runc.yaml" | tee -a "$REPORT"
299 |
300 | sudo microk8s kubectl get pods -n default | tee -a "$REPORT"
301 |
302 | echo -e "\nwaiting for ready pods...\n" >> "$REPORT"
303 | sleep 120s
304 | # wait --for=condition=available : currently unstable with MicroK8s
305 | #sudo microk8s kubectl wait --for=condition=available --timeout=1000s deployment.apps/helloworld-runc-deployment -n default | tee -a "$REPORT" || true
306 | #sudo microk8s kubectl wait --for=condition=available --timeout=1000s deployment.apps/autoscale-runc-deployment -n default | tee -a "$REPORT" || true
307 |
308 | sudo microk8s kubectl get pods -n default | tee -a "$REPORT"
309 | sudo microk8s kubectl get services -n default | tee -a "$REPORT"
310 |
311 | #echo -e "\n### lscpu:" | tee -a "$REPORT"
312 | #sudo microk8s kubectl exec --stdin --tty nginx-runc -- lscpu
313 | #sudo microk8s kubectl exec --stdin --tty nginx-runc -- lscpu | grep 'Vendor' | tee -a "$REPORT" || true
314 | #sudo microk8s kubectl exec --stdin --tty nginx-runc -- lscpu | grep 'Model name' | tee -a "$REPORT" || true
315 | #sudo microk8s kubectl exec --stdin --tty nginx-runc -- lscpu | grep 'Virtualization' | tee -a "$REPORT" || true
316 | #sudo microk8s kubectl exec --stdin --tty nginx-runc -- lscpu | grep 'Hypervisor vendor' | tee -a "$REPORT" || true
317 | #sudo microk8s kubectl exec --stdin --tty nginx-runc-- lscpu | grep 'Virtualization type' | tee -a "$REPORT" || true
318 |
319 | echo -e "\ncalling helloworld-runc...\n" >> "$REPORT"
320 | curl -v "http://$(sudo microk8s kubectl get service helloworld-runc -n default --no-headers | awk '{print $3}')" | tee -a "$REPORT"
321 | curl -s "http://$(sudo microk8s kubectl get service helloworld-runc -n default --no-headers | awk '{print $3}')" | grep -m 1 'Hello World: Runc Containers!'
322 |
323 | #source: https://knative.dev/docs/serving/autoscaling/autoscale-go/
324 | #curl "http://autoscale-runc.default.1.2.3.4.xip.io?sleep=100&prime=10000&bloat=5"
325 | echo -e "\ncalling autoscale-runc with request for biggest prime under 10 000 and 5 MB memory...\n" >> "$REPORT"
326 | curl -v "http://$(sudo microk8s kubectl get service autoscale-runc -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | tee -a "$REPORT"
327 | curl -s "http://$(sudo microk8s kubectl get service autoscale-runc -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | grep 'The largest prime less than 10000 is 9973'
328 |
329 | echo -e "\n### extend microk8s snap with kata-runtime:"
330 | sudo microk8s stop
331 | if [[ -d microk8s-squash ]]
332 | then
333 | sudo rm -rf microk8s-squash
334 | fi
335 | mkdir microk8s-squash
336 | cd microk8s-squash
337 | MK8S_SNAP=$(mount | grep 'var/lib/snapd/snaps/microk8s' | awk '{printf $1}')
338 | ls -l "$MK8S_SNAP"
339 | sudo unsquashfs "$MK8S_SNAP"
340 | sudo mv squashfs-root/bin/runc squashfs-root/bin/runc.bak
341 | sudo cp /bin/runc squashfs-root/bin/runc
342 | sudo cp "$KATA_PATH" squashfs-root/bin/kata-runtime
343 | echo -e "\ncontainers runtimes in new snap: " | tee -a "$REPORT"
344 | ls -l squashfs-root/bin/runc.bak
345 | ls -l squashfs-root/bin/runc
346 | ls -l squashfs-root/bin/kata-runtime
347 | #sudo ln -s squashfs-root/bin/kata-runtime squashfs-root/bin/runc
348 | sudo mksquashfs squashfs-root/ "$(basename $MK8S_SNAP)" -noappend -always-use-fragments | tee -a "$REPORT"
349 | cd
350 | ls -lh "microk8s-squash/$(basename $MK8S_SNAP)"
351 |
352 | export CONTAINERD_TOML='/var/snap/microk8s/current/args/containerd.toml'
353 | export KATA_HANDLER_BEFORE='[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-container-runtime]'
354 | #https://github.com/kata-containers/documentation/blob/master/how-to/containerd-kata.md
355 | export KATA_HANDLER_AFTER='
356 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-runtime]
357 | runtime_type = "io.containerd.kata-runtime.v1"
358 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-runtime.options]
359 | BinaryName = "kata-runtime"
360 |
361 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-container-runtime]'
362 |
363 | if [[ ! -f "$CONTAINERD_TOML.bak" ]]
364 | then
365 | echo -e "\n### backup containerd config: "
366 | sudo cp "$CONTAINERD_TOML" "$CONTAINERD_TOML.bak"
367 | fi
368 |
369 | #if [[ -z $(sudo cat $CONTAINERD_TOML | grep 'kata-runtme') ]]
370 | #then
371 | # echo -e "\n### extend containerd config: " | tee -a "$REPORT"
372 | # sudo cat "$CONTAINERD_TOML" | sed "s!$KATA_HANDLER_BEFORE!$KATA_HANDLER_AFTER!" | sudo tee "$CONTAINERD_TOML" || true
373 | #fi
374 |
375 | if [[ -z $(sudo cat $CONTAINERD_TOML | grep 'kata-runtme') ]]
376 | then
377 | echo -e "\n### extend containerd config: " | tee -a "$REPORT"
378 | yes | sudo cp 'containerd.toml' "$CONTAINERD_TOML"
379 | fi
380 |
381 | echo -e "\n### re-install microk8s including kata-runtime: " | tee -a "$REPORT"
382 | set -x
383 | sudo microk8s start
384 | sudo microk8s status --wait-ready
385 | sudo snap remove microk8s
386 | sudo snap install --classic --dangerous "microk8s-squash/$(basename $MK8S_SNAP)" | tee -a "$REPORT"
387 |
388 | if [[ -z $(sudo cat $CONTAINERD_TOML | grep 'kata-runtme') ]]
389 | then
390 | echo -e "\n### extend containerd config: " | tee -a "$REPORT"
391 | yes | sudo cp 'containerd.toml' "$CONTAINERD_TOML"
392 | fi
393 |
394 |
395 | echo -e "\n### restart microk8s: "
396 | sudo microk8s start
397 | sudo microk8s status --wait-ready | tee -a "$REPORT"
398 |
399 |
400 |
401 | echo -e "\n### TEST WITH KATA-RUNTIME AND UPDATED RUNC\n" | tee -a "$REPORT"
402 |
403 | echo -e "\n### deploy K8s runtime class for kata: " | tee -a "$REPORT"
404 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/kata-runtime-class.yaml" | tee -a "$REPORT"
405 | sudo microk8s kubectl get runtimeclass -o wide
406 | sudo microk8s kubectl get runtimeclass | grep 'kata-runtime' && echo 'kara-runtime detected as K8s runtime class'
407 |
408 |
409 | echo -e "\n### deploy nginx servers: " | tee -a "$REPORT"
410 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/nginx-runc.yaml" | tee -a "$REPORT"
411 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/nginx-kata.yaml" | tee -a "$REPORT"
412 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/nginx-untrusted.yaml" | tee -a "$REPORT"
413 |
414 | echo -e "\n### test microk8s with helloworld-runc & autoscale-runc: " | tee -a "$REPORT"
415 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/helloworld-runc.yaml" | tee -a "$REPORT"
416 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/autoscale-runc.yaml" | tee -a "$REPORT"
417 |
418 | echo -e "\n### test microk8s with helloworld-kata & autoscale-kata: " | tee -a "$REPORT"
419 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/helloworld-kata.yaml" | tee -a "$REPORT"
420 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/autoscale-kata.yaml" | tee -a "$REPORT"
421 |
422 |
423 | sudo microk8s kubectl get pods -n default | tee -a "$REPORT"
424 |
425 | echo -e "\nwaiting for ready pods...\n" >> "$REPORT"
426 | sleep 120s
427 | # wait --for=condition=available : currently unstable with MicroK8s
428 | #sudo microk8s kubectl wait --for=condition=available --timeout=1000s deployment.apps/helloworld-runc-deployment -n default | tee -a "$REPORT" || true
429 | #sudo microk8s kubectl wait --for=condition=available --timeout=1000s deployment.apps/autoscale-runc-deployment -n default | tee -a "$REPORT" || true
430 |
431 | sudo microk8s kubectl get pods -n default | tee -a "$REPORT"
432 | sudo microk8s kubectl get services -n default | tee -a "$REPORT"
433 |
434 | #sudo microk8s kubectl exec --stdin --tty shell-demo -- /bin/bash
435 | #sudo microk8s kubectl exec nginx-runc-deployment-d9fff6df7-9hcbb -- uname -a
436 | #sudo microk8s kubectl exec cat /etc/mtab
437 | #sudo microk8s kubectl exec grep -m 1 kataShared /etc/mtab
438 |
439 | #echo -e "\n### lscpu:" | tee -a "$REPORT"
440 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu
441 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Vendor' | tee -a "$REPORT" || true
442 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Model name' | tee -a "$REPORT" || true
443 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Virtualization' | tee -a "$REPORT" || true
444 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Hypervisor vendor' | tee -a "$REPORT" || true
445 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Virtualization type' | tee -a "$REPORT" || true
446 |
447 | echo -e "\ncalling helloworld-runc...\n" >> "$REPORT"
448 | curl -v "http://$(sudo microk8s kubectl get service helloworld-runc -n default --no-headers | awk '{print $3}')" | tee -a "$REPORT"
449 | curl -s "http://$(sudo microk8s kubectl get service helloworld-runc -n default --no-headers | awk '{print $3}')" | grep -m 1 'Hello World: Runc Containers!'
450 |
451 | echo -e "\ncalling helloworld-kata...\n" >> "$REPORT"
452 | curl -v "http://$(sudo microk8s kubectl get service helloworld-kata -n default --no-headers | awk '{print $3}')" | tee -a "$REPORT"
453 | curl -s "http://$(sudo microk8s kubectl get service helloworld-kata -n default --no-headers | awk '{print $3}')" | grep -m 1 'Hello World: Kata Containers!'
454 |
455 | #source: https://knative.dev/docs/serving/autoscaling/autoscale-go/
456 | #curl "http://autoscale-go.default.1.2.3.4.xip.io?sleep=100&prime=10000&bloat=5"
457 | echo -e "\ncalling autoscale-runc with request for biggest prime under 10 000 and 5 MB memory...\n" >> "$REPORT"
458 | curl -v "http://$(sudo microk8s kubectl get service autoscale-runc -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | tee -a "$REPORT"
459 | curl -s "http://$(sudo microk8s kubectl get service autoscale-runc -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | grep 'The largest prime less than 10000 is 9973'
460 |
461 | echo -e "\ncalling autoscale-kata with request for biggest prime under 10 000 and 5 MB memory...\n" >> "$REPORT"
462 | curl -v "http://$(sudo microk8s kubectl get service autoscale-kata -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | tee -a "$REPORT"
463 | curl -s "http://$(sudo microk8s kubectl get service autoscale-kata -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | grep 'The largest prime less than 10000 is 9973'
464 |
465 | echo -e "\n### check microk8s runtimes:" | tee -a "$REPORT"
466 | #[[ -L /snap/microk8s/current/bin/runc ]]
467 | ls -l /snap/microk8s/current/bin/runc | tee -a "$REPORT"
468 | ls -l /snap/microk8s/current/bin/kata-runtime | tee -a "$REPORT"
469 | cmp /bin/runc /snap/microk8s/current/bin/runc && echo 'runc binary identical: microk8s <> host' | tee -a "$REPORT"
470 | cmp "$KATA_PATH" /snap/microk8s/current/bin/kata-runtime && echo 'kata-runtime binary identical: microk8s <> host' | tee -a "$REPORT"
471 |
472 | echo -e "\n### prepare execution report:"
473 |
474 | echo -e "### execution date: $(date --utc)" >> "$REPORT.tmp"
475 | echo " " >> "$REPORT.tmp"
476 |
477 | echo -e "### microk8s snap version:" >> "$REPORT.tmp"
478 | echo -e "$SNAP_VERSION" >> "$REPORT.tmp"
479 | echo " " >> "$REPORT.tmp"
480 |
481 | echo "### ubuntu version:" >> "$REPORT.tmp"
482 | echo "$(lsb_release -a)" >> "$REPORT.tmp"
483 | echo " " >> "$REPORT.tmp"
484 |
485 | echo "### podman version:" >> "$REPORT.tmp"
486 | echo "$(podman version)" >> "$REPORT.tmp"
487 | echo " " >> "$REPORT.tmp"
488 |
489 | echo "### containerd version:" >> "$REPORT.tmp"
490 | echo "$(containerd --version)" >> "$REPORT.tmp"
491 | echo " " >> "$REPORT.tmp"
492 |
493 | echo "### kata-runtime version:" >> "$REPORT.tmp"
494 | "$KATA_PATH" --version >> "$REPORT.tmp"
495 | echo " " >> "$REPORT.tmp"
496 |
497 | echo "### kata-runtime check:" >> "$REPORT.tmp"
498 | "$KATA_PATH" kata-check -n >> "$REPORT.tmp"
499 | echo " " >> "$REPORT.tmp"
500 |
501 | cat $REPORT >> "$REPORT.tmp"
502 | rm "$REPORT"
503 | mv "$REPORT.tmp" $REPORT
504 |
505 | echo "### execution report:"
506 | cat $REPORT
--------------------------------------------------------------------------------
/sh/test-sed.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | CONTAINERD_TOML='data/containerd.toml'
5 | KATA_HANDLER_BEFORE='[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-container-runtime]'
6 | #https://github.com/kata-containers/documentation/blob/master/how-to/containerd-kata.md
7 | KATA_HANDLER_AFTER='
8 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-runtime]
9 | runtime_type = "io.containerd.kata-runtime.v1"
10 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-runtime.options]
11 | BinaryName = "kata-runtime"
12 |
13 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-container-runtime]'
14 |
15 | #cat "$CONTAINERD_TOML"
16 | cat "$CONTAINERD_TOML" | sed -e "s!$KATA_HANDLER_BEFORE!foo-foo-foo!"
--------------------------------------------------------------------------------
/sh/v2-kata-microk8s.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | trap 'catch $? $LINENO' EXIT
5 | catch() {
6 | if [ "$1" != "0" ]; then
7 | echo "Error $1 occurred on $2"
8 | if [[ ! -z "$GITHUB_WORKFLOW" ]]
9 | then
10 | # delete cloud instance in case of failure when run scheduled on GitHub (to save costs...)
11 | #delete_gce_instance $KATA_INSTANCE $KATA_IMAGE || true
12 | true
13 | fi
14 | fi
15 | }
16 |
17 | REPORT='report.md'
18 |
19 | OS=$(uname -a)
20 | if [[ "$OS" == 'Linux'* ]]
21 | then
22 | lsb_release -a
23 | fi
24 |
25 | ON_GCE=$((curl -s -i metadata.google.internal | grep 'Google') || true)
26 |
27 | # variables below can be inherited from environment
28 | if [[ -z ${GCP_PROJECT+x} && ! "$ON_GCE" == *'Google'* ]] ; then echo "ERROR: gcp project not set" && false ; fi ; echo "gcp project: $GCP_PROJECT"
29 | if [[ -z ${GCP_ZONE+x} ]] ; then GCP_ZONE='us-central1-c' ; fi ; echo "gcp zone: $GCP_ZONE"
30 |
31 | if [[ -z ${KATA_GCE_CREATE+x} ]] ; then KATA_GCE_CREATE='true' ; fi ; echo "kata gce create: $KATA_GCE_CREATE"
32 | if [[ -z ${KATA_GCE_DELETE+x} ]] ; then KATA_GCE_DELETE='false' ; fi ; echo "kata gce delete: $KATA_GCE_DELETE"
33 | if [[ -z ${KATA_INSTALL+x} ]] ; then KATA_INSTALL='true' ; fi ; echo "kata install: $KATA_INSTALL"
34 | if [[ -z ${KATA_HOST+x} ]] ; then KATA_HOST='ubuntu-2004-lts' ; fi ; echo "kata host os: $KATA_HOST"
35 | if [[ -z ${KATA_INSTANCE+x} ]] ; then KATA_INSTANCE='microk8s-kata' ; fi ; echo "kata host instance: $KATA_INSTANCE"
36 |
37 | if [[ -z ${MK8S_VERSION+x} ]] ; then export MK8S_VERSION='1.19' ; fi ; echo "mk8s version: $MK8S_VERSION"
38 |
39 | create_gce_instance()
40 | {
41 | local GCE_INSTANCE="$1"
42 | local GCE_IMAGE="$2"
43 | echo -e "\n### setup instance: $GCE_INSTANCE - image: $GCE_IMAGE"
44 | gcloud compute instances list \
45 | --project=$GCP_PROJECT
46 | if [[ ! $(gcloud compute instances list --project=$GCP_PROJECT) == *"$GCE_INSTANCE"* ]]
47 | then
48 | gcloud compute instances create \
49 | --min-cpu-platform 'Intel Broadwell' \
50 | --machine-type 'n1-standard-4' \
51 | --image $GCE_IMAGE \
52 | --zone $GCP_ZONE \
53 | --project=$GCP_PROJECT \
54 | --quiet \
55 | $GCE_INSTANCE
56 | fi
57 | echo -e "\n### started instance:" | tee -a "$REPORT"
58 | gcloud compute instances list --project=$GCP_PROJECT | tee -a "$REPORT"
59 | while [[ ! $(gcloud compute ssh $GCE_INSTANCE --command='uname -a' --zone $GCP_ZONE --project=$GCP_PROJECT) == *'Linux'* ]]
60 | do
61 | echo -e "instance not ready for ssh..."
62 | sleep 5
63 | done
64 | gcloud compute ssh $GCE_INSTANCE \
65 | --command='uname -a' \
66 | --zone $GCP_ZONE \
67 | --project=$GCP_PROJECT
68 | }
69 |
70 | delete_gce_instance()
71 | {
72 | local GCE_INSTANCE="$1"
73 | local GCE_IMAGE="$2"
74 | echo -e "\n### delete gce instance: $GCE_INSTANCE"
75 | gcloud compute instances delete \
76 | --zone $GCP_ZONE \
77 | --project=$GCP_PROJECT \
78 | --quiet \
79 | $GCE_INSTANCE
80 |
81 | echo -e "\n### delete gce image: $GCE_IMAGE"
82 | gcloud compute images delete \
83 | --project=$GCP_PROJECT \
84 | --quiet \
85 | $GCE_IMAGE
86 | }
87 |
88 | KATA_IMAGE="$KATA_HOST-kata"
89 |
90 | if [[ $KATA_GCE_CREATE == 'true' ]]
91 | then
92 | if [[ "$ON_GCE" == *'Google'* ]]
93 | then
94 | echo '\n### running on GCE'
95 | else
96 | echo -e '\n### not on GCE'
97 |
98 | if [[ ! $(gcloud compute instances list --project=$GCP_PROJECT) == *"$KATA_INSTANCE"* ]]
99 | then
100 | echo -e "\n### cleanup previous image: $KATA_IMAGE"
101 | if [[ -n $(gcloud compute images describe --project=$GCP_PROJECT $KATA_IMAGE) ]]
102 | then
103 | gcloud compute images delete \
104 | --project=$GCP_PROJECT \
105 | --quiet \
106 | $KATA_IMAGE
107 | fi
108 |
109 | echo -e "\n### image: $(gcloud compute images list | grep $KATA_HOST)"
110 | IMAGE_PROJECT=$(gcloud compute images list | grep $KATA_HOST | awk '{ print $2 }')
111 |
112 | echo -e "\n### create image: $KATA_IMAGE"
113 | gcloud compute images create \
114 | --source-image-project $IMAGE_PROJECT \
115 | --source-image-family $KATA_HOST \
116 | --licenses=https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx \
117 | --project=$GCP_PROJECT \
118 | $KATA_IMAGE
119 |
120 | echo -e "\n### describe image: $KATA_IMAGE"
121 | gcloud compute images describe --project=$GCP_PROJECT $KATA_IMAGE
122 | fi
123 |
124 | create_gce_instance "$KATA_INSTANCE" "$KATA_IMAGE"
125 |
126 | gcloud compute ssh $KATA_INSTANCE --command="sudo apt update -y && (sudo apt upgrade -y || sudo apt upgrade -y) && sudo apt autoremove -y" --zone $GCP_ZONE --project=$GCP_PROJECT
127 | gcloud compute ssh $KATA_INSTANCE --command='(sudo groupadd docker || true) && sudo usermod -a -G docker ${USER}' --zone $GCP_ZONE --project=$GCP_PROJECT
128 | #gcloud compute ssh $KATA_INSTANCE --command='sudo groupadd docker && sudo usermod -a -G docker ${USER} && sudo groupadd microk8s && sudo usermod -a -G microk8s ${USER}' --zone $GCP_ZONE --project=$GCP_PROJECT
129 | gcloud compute scp $0 $KATA_INSTANCE:$(basename $0) --zone $GCP_ZONE --project=$GCP_PROJECT
130 | gcloud compute ssh $KATA_INSTANCE --command="sudo chmod ugo+x ./$(basename $0)" --zone $GCP_ZONE --project=$GCP_PROJECT
131 | gcloud compute ssh $KATA_INSTANCE --command="bash ./$(basename $0)" --zone $GCP_ZONE --project=$GCP_PROJECT
132 |
133 | if [[ ! -z "$GITHUB_WORKFLOW" ]]
134 | then
135 | gcloud compute scp $KATA_INSTANCE:$REPORT $REPORT --zone $GCP_ZONE --project=$GCP_PROJECT
136 | cat README.template.md > README.md
137 | echo '```' >> README.md
138 | cat $REPORT >> README.md || true
139 | echo '```' >> README.md
140 | fi
141 |
142 | if [[ $KATA_GCE_DELETE == 'true' ]]
143 | then
144 | delete_gce_instance $KATA_INSTANCE $KATA_IMAGE
145 | fi
146 | fi
147 | fi
148 |
149 | #gcloud compute ssh microk8s-kata --zone 'us-central1-c' --project=$GCP_PROJECT
150 |
151 | if [[ ! "$ON_GCE" == *'Google'* ]]
152 | then
153 | exit 0
154 | fi
155 |
156 | #now running on GCE....
157 |
158 | echo -e "\n### check gce instance:"
159 | lscpu
160 | lscpu | grep 'GenuineIntel'
161 |
162 | if [[ -z $(which /snap/kata-containers/current/usr/bin/kata-runtime) ]]
163 | then
164 | echo -e "\n### install kata containers:"
165 | #bash -c "$(curl -fsSL https://raw.githubusercontent.com/kata-containers/tests/master/cmd/kata-manager/kata-manager.sh) install-docker-system"
166 | #https://github.com/kata-containers/kata-containers/blob/2.0-dev/utils/README.md
167 | #bash -c "$(curl -fsSL https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/utils/kata-manager.sh)"
168 | sudo snap install --edge --classic kata-containers
169 | sudo snap list | grep 'kata-containers'
170 | fi
171 |
172 | echo -e "\n### check install:"
173 |
174 | echo -e "\n### kata-runtime version: $(kata-containers.runtime --version)"
175 |
176 | echo -e "### kata-runtime kata-env:"
177 | kata-containers.runtime kata-env || true
178 |
179 | #kata-check fail since Nov, 12th 2020 due to publication on version 1.12. See https://github.com/kata-containers/runtime/issues/3069
180 | echo -e "sudo kata-runtime kata-check -n: " || true
181 | kata-containers.runtime kata-check -n || true
182 |
183 | kata-containers.runtime kata-check -n | grep 'System is capable of running Kata Containers' || true
184 |
185 | if [[ -z $(which podman) ]]
186 | then
187 | echo -e "\n### install podman: "
188 | sudo snap install podman --edge
189 | fi
190 |
191 | echo -e "\n### podman version: "
192 | podman --version
193 |
194 | echo -e "\n### check existing container runtimes on Ubuntu host:" | tee -a "$REPORT"
195 | ls -lh /bin/runc | tee -a "$REPORT"
196 | ls -lh /snap/kata-containers/current/usr/bin/kata-runtime | tee -a "$REPORT"
197 |
198 | echo -e "\n### check available docker runtimes: " | tee -a "$REPORT"
199 | podman info
200 | docker info | grep 'Runtimes' | grep 'kata-runtime' | grep 'runc' | tee -a "$REPORT"
201 |
202 | #echo -e "\n### test use of kata-runtime with alpine: " | tee -a "$REPORT"
203 |
204 | #docker run --rm --runtime='kata-runtime' alpine ls -l | grep 'etc' | grep 'root'
205 | #docker run --rm --runtime='kata-runtime' alpine cat /etc/hosts | grep 'localhost'
206 |
207 | #docker run -itd --rm --runtime='kata-runtime' --name='kata-alpine' alpine sh
208 |
209 | ##docker ps -a | tee -a "$REPORT"
210 | #docker inspect $(sudo docker ps -a | grep 'kata-alpine' | awk '{print $1}')
211 | #docker inspect $(sudo docker ps -a | grep 'kata-alpine' | awk '{print $1}') | grep 'Name' | grep 'kata-alpine' | tee -a "$REPORT"
212 | #docker inspect $(sudo docker ps -a | grep 'kata-alpine' | awk '{print $1}') | grep 'Id' | tee -a "$REPORT"
213 | #docker inspect $(sudo docker ps -a | grep 'kata-alpine' | awk '{print $1}') | grep 'Runtime' | grep 'kata-runtime' | tee -a "$REPORT"
214 |
215 | #docker stop 'kata-alpine'
216 |
217 | if [[ -z $(which microk8s) ]]
218 | then
219 | echo -e "\n### install microk8s:" | tee -a "$REPORT"
220 | sudo snap install 'microk8s' --classic --channel='1.19/stable'
221 | SNAP_VERSION=$(sudo snap list | grep 'microk8s')
222 | sudo microk8s status --wait-ready | tee -a "$REPORT"
223 | fi
224 |
225 | echo -e "\n### check container runtime on microk8s snap:" | tee -a "$REPORT"
226 | ls -lh /snap/microk8s/current/bin/runc | tee -a "$REPORT"
227 |
228 | echo -e "\n### TEST WITH RUNC\n" | tee -a "$REPORT"
229 |
230 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/nginx-test.yaml"
231 |
232 | echo -e "\n### test microk8s with helloworld-go & autoscale-go: " | tee -a "$REPORT"
233 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/helloworld-go.yaml" | tee -a "$REPORT"
234 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/autoscale-go.yaml" | tee -a "$REPORT"
235 |
236 | sudo microk8s kubectl get pods -n default | tee -a "$REPORT"
237 |
238 | echo -e "\nwaiting for ready pods...\n" >> "$REPORT"
239 | sleep 120s
240 | # wait --for=condition=available : currently unstable with MicroK8s
241 | #sudo microk8s kubectl wait --for=condition=available --timeout=1000s deployment.apps/helloworld-go-deployment -n default | tee -a "$REPORT" || true
242 | #sudo microk8s kubectl wait --for=condition=available --timeout=1000s deployment.apps/autoscale-go-deployment -n default | tee -a "$REPORT" || true
243 |
244 | sudo microk8s kubectl get pods -n default | tee -a "$REPORT"
245 | sudo microk8s kubectl get services -n default | tee -a "$REPORT"
246 |
247 | #echo -e "\n### lscpu:" | tee -a "$REPORT"
248 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu
249 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Vendor' | tee -a "$REPORT" || true
250 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Model name' | tee -a "$REPORT" || true
251 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Virtualization' | tee -a "$REPORT" || true
252 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Hypervisor vendor' | tee -a "$REPORT" || true
253 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Virtualization type' | tee -a "$REPORT" || true
254 |
255 | echo -e "\ncalling helloworld-go...\n" >> "$REPORT"
256 | curl -v "http://$(sudo microk8s kubectl get service helloworld-go -n default --no-headers | awk '{print $3}')" | tee -a "$REPORT"
257 | curl -s "http://$(sudo microk8s kubectl get service helloworld-go -n default --no-headers | awk '{print $3}')" | grep -m 1 'Hello World: Kata Containers!'
258 |
259 | #source: https://knative.dev/docs/serving/autoscaling/autoscale-go/
260 | #curl "http://autoscale-go.default.1.2.3.4.xip.io?sleep=100&prime=10000&bloat=5"
261 | echo -e "\ncalling autoscale-go with request for biggest prime under 10 000 and 5 MB memory...\n" >> "$REPORT"
262 | curl -v "http://$(sudo microk8s kubectl get service autoscale-go -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | tee -a "$REPORT"
263 | curl -s "http://$(sudo microk8s kubectl get service autoscale-go -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | grep 'The largest prime less than 10000 is 9973'
264 |
265 | echo -e "\n### extend microk8s snap with kata-runtime:"
266 | sudo microk8s stop
267 |
268 | if [[ -d microk8s-squash ]]
269 | then
270 | sudo rm -rf microk8s-squash
271 | fi
272 | mkdir microk8s-squash
273 | cd microk8s-squash
274 | MK8S_SNAP=$(mount | grep 'var/lib/snapd/snaps/microk8s' | awk '{printf $1}')
275 | ls -l "$MK8S_SNAP"
276 | sudo unsquashfs "$MK8S_SNAP"
277 | sudo cp /snap/kata-containers/current/usr/bin/kata-runtime squashfs-root/bin/kata-runtime
278 | sudo mv squashfs-root/bin/runc squashfs-root/bin/runc.bak
279 | sudo ln -s squashfs-root/bin/kata-runtime squashfs-root/bin/runc
280 | sudo mksquashfs squashfs-root/ "$(basename $MK8S_SNAP)" -noappend -always-use-fragments | tee -a "$REPORT"
281 | cd
282 | ls -lh "microk8s-squash/$(basename $MK8S_SNAP)"
283 |
284 | echo -e "\n### re-install microk8s incl kata-runtime: " | tee -a "$REPORT"
285 | sudo microk8s start
286 | sudo microk8s status --wait-ready
287 | sudo snap remove microk8s
288 | sudo snap install --classic --dangerous "microk8s-squash/$(basename $MK8S_SNAP)" | tee -a "$REPORT"
289 |
290 | set -x
291 | echo -e "\n### restart microk8s: "
292 | sudo microk8s start
293 | sudo microk8s status --wait-ready | tee -a "$REPORT"
294 |
295 | echo -e "\n### TEST WITH KATA-RUNTIME\n" | tee -a "$REPORT"
296 |
297 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/nginx-test.yaml"
298 |
299 | echo -e "\n### test microk8s with helloworld-go & autoscale-go: " | tee -a "$REPORT"
300 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/helloworld-go.yaml" | tee -a "$REPORT"
301 | sudo microk8s kubectl apply -f "https://raw.githubusercontent.com/didier-durand/microk8s-kata-containers/main/kubernetes/autoscale-go.yaml" | tee -a "$REPORT"
302 |
303 | sudo microk8s kubectl get pods -n default | tee -a "$REPORT"
304 |
305 | echo -e "\nwaiting for ready pods...\n" >> "$REPORT"
306 | sleep 120s
307 | # wait --for=condition=available : currently unstable with MicroK8s
308 | #sudo microk8s kubectl wait --for=condition=available --timeout=1000s deployment.apps/helloworld-go-deployment -n default | tee -a "$REPORT" || true
309 | #sudo microk8s kubectl wait --for=condition=available --timeout=1000s deployment.apps/autoscale-go-deployment -n default | tee -a "$REPORT" || true
310 |
311 | sudo microk8s kubectl get pods -n default | tee -a "$REPORT"
312 | sudo microk8s kubectl get services -n default | tee -a "$REPORT"
313 |
314 | #echo -e "\n### lscpu:" | tee -a "$REPORT"
315 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu
316 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Vendor' | tee -a "$REPORT" || true
317 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Model name' | tee -a "$REPORT" || true
318 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Virtualization' | tee -a "$REPORT" || true
319 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Hypervisor vendor' | tee -a "$REPORT" || true
320 | #sudo microk8s kubectl exec --stdin --tty nginx-test -- lscpu | grep 'Virtualization type' | tee -a "$REPORT" || true
321 |
322 | echo -e "\ncalling helloworld-go...\n" >> "$REPORT"
323 | curl -v "http://$(sudo microk8s kubectl get service helloworld-go -n default --no-headers | awk '{print $3}')" | tee -a "$REPORT"
324 | curl -s "http://$(sudo microk8s kubectl get service helloworld-go -n default --no-headers | awk '{print $3}')" | grep -m 1 'Hello World: Kata Containers!'
325 |
326 | #source: https://knative.dev/docs/serving/autoscaling/autoscale-go/
327 | #curl "http://autoscale-go.default.1.2.3.4.xip.io?sleep=100&prime=10000&bloat=5"
328 | echo -e "\ncalling autoscale-go with request for biggest prime under 10 000 and 5 MB memory...\n" >> "$REPORT"
329 | curl -v "http://$(sudo microk8s kubectl get service autoscale-go -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | tee -a "$REPORT"
330 | curl -s "http://$(sudo microk8s kubectl get service autoscale-go -n default --no-headers | awk '{print $3}')?sleep=100&prime=10000&bloat=5" | grep 'The largest prime less than 10000 is 9973'
331 |
332 | echo -e "\n### check proper symlink from microk8s runc:" | tee -a "$REPORT"
333 | ls -l /snap/microk8s/current/bin/runc | tee -a "$REPORT"
334 | [[ -L /snap/microk8s/current/bin/runc ]]
335 | ls -l /snap/kata-containers/current/usr/bin/kata-runtime | tee -a "$REPORT"
336 | ls -l /snap/microk8s/current/bin/kata-runtime | tee -a "$REPORT"
337 | cmp /snap/kata-containers/current/usr/bin/kata-runtime /snap/microk8s/current/bin/kata-runtime
338 |
339 | echo -e "\n### prepare execution report:"
340 |
341 | echo -e "### execution date: $(date --utc)" >> "$REPORT.tmp"
342 | echo " " >> "$REPORT.tmp"
343 |
344 | echo -e "### microk8s snap version:" >> "$REPORT.tmp"
345 | echo -e "$SNAP_VERSION" >> "$REPORT.tmp"
346 | echo " " >> "$REPORT.tmp"
347 |
348 | echo "### ubuntu version:" >> "$REPORT.tmp"
349 | echo "$(lsb_release -a)" >> "$REPORT.tmp"
350 | echo " " >> "$REPORT.tmp"
351 |
352 | echo "### docker version:" >> "$REPORT.tmp"
353 | echo "$(docker version)" >> "$REPORT.tmp"
354 | echo " " >> "$REPORT.tmp"
355 |
356 | echo "### kata-runtime version:" >> "$REPORT.tmp"
357 | kata-containers.runtime --version >> "$REPORT.tmp"
358 | echo " " >> "$REPORT.tmp"
359 |
360 | echo "### kata-runtime check:" >> "$REPORT.tmp"
361 | kata-containers.runtime kata-check -n >> "$REPORT.tmp"
362 | echo " " >> "$REPORT.tmp"
363 |
364 | cat $REPORT >> "$REPORT.tmp"
365 | rm "$REPORT"
366 | mv "$REPORT.tmp" $REPORT
367 |
368 | echo "### execution report:"
369 | cat $REPORT
--------------------------------------------------------------------------------
/src/go/autoscale/autoscale.go:
--------------------------------------------------------------------------------
1 | //source: https://github.com/peter-evans/knative-docs/blob/master/serving/samples/autoscale-go/autoscale.go
2 | /*
3 | Copyright 2017 The Knative Authors
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package main
18 |
19 | import (
20 | "fmt"
21 | "math"
22 | "net/http"
23 | "strconv"
24 | "sync"
25 | "time"
26 | )
27 |
28 | // Algorithm from https://stackoverflow.com/a/21854246
29 |
30 | // Only primes less than or equal to N will be generated
31 | func allPrimes(N int) []int {
32 |
33 | var x, y, n int
34 | nsqrt := math.Sqrt(float64(N))
35 |
36 | is_prime := make([]bool, N)
37 |
38 | for x = 1; float64(x) <= nsqrt; x++ {
39 | for y = 1; float64(y) <= nsqrt; y++ {
40 | n = 4*(x*x) + y*y
41 | if n <= N && (n%12 == 1 || n%12 == 5) {
42 | is_prime[n] = !is_prime[n]
43 | }
44 | n = 3*(x*x) + y*y
45 | if n <= N && n%12 == 7 {
46 | is_prime[n] = !is_prime[n]
47 | }
48 | n = 3*(x*x) - y*y
49 | if x > y && n <= N && n%12 == 11 {
50 | is_prime[n] = !is_prime[n]
51 | }
52 | }
53 | }
54 |
55 | for n = 5; float64(n) <= nsqrt; n++ {
56 | if is_prime[n] {
57 | for y = n * n; y < N; y += n * n {
58 | is_prime[y] = false
59 | }
60 | }
61 | }
62 |
63 | is_prime[2] = true
64 | is_prime[3] = true
65 |
66 | primes := make([]int, 0, 1270606)
67 | for x = 0; x < len(is_prime)-1; x++ {
68 | if is_prime[x] {
69 | primes = append(primes, x)
70 | }
71 | }
72 |
73 | // primes is now a slice that contains all primes numbers up to N
74 | return primes
75 | }
76 |
77 | func bloat(mb int) string {
78 | b := make([]byte, mb*1024*1024)
79 | b[0] = 1
80 | b[len(b)-1] = 1
81 | return fmt.Sprintf("Allocated %v Mb of memory.\n", mb)
82 | }
83 |
84 | func prime(max int) string {
85 | p := allPrimes(max)
86 | if len(p) > 0 {
87 | return fmt.Sprintf("The largest prime less than %v is %v.\n", max, p[len(p)-1])
88 | } else {
89 | return fmt.Sprintf("There are no primes smaller than %v.\n", max)
90 | }
91 | }
92 |
93 | func sleep(ms int) string {
94 | start := time.Now().UnixNano()
95 | time.Sleep(time.Duration(ms) * time.Millisecond)
96 | end := time.Now().UnixNano()
97 | return fmt.Sprintf("Slept for %.2f milliseconds.\n", float64(end-start)/1000000)
98 | }
99 |
100 | func parseIntParam(r *http.Request, param string) (int, bool, error) {
101 | if value := r.URL.Query().Get(param); value != "" {
102 | i, err := strconv.Atoi(value)
103 | if err != nil {
104 | return 0, false, err
105 | }
106 | if i == 0 {
107 | return i, false, nil
108 | }
109 | return i, true, nil
110 | }
111 | return 0, false, nil
112 | }
113 |
114 | func handler(w http.ResponseWriter, r *http.Request) {
115 | // Validate inputs.
116 | ms, hasMs, err := parseIntParam(r, "sleep")
117 | if err != nil {
118 | http.Error(w, err.Error(), http.StatusBadRequest)
119 | return
120 | }
121 | max, hasMax, err := parseIntParam(r, "prime")
122 | if err != nil {
123 | http.Error(w, err.Error(), http.StatusBadRequest)
124 | return
125 | }
126 | mb, hasMb, err := parseIntParam(r, "bloat")
127 | if err != nil {
128 | http.Error(w, err.Error(), http.StatusBadRequest)
129 | return
130 | }
131 | // Consume time, cpu and memory in parallel.
132 | var wg sync.WaitGroup
133 | defer wg.Wait()
134 | if hasMs {
135 | wg.Add(1)
136 | go func() {
137 | defer wg.Done()
138 | fmt.Fprint(w, sleep(ms))
139 | }()
140 | }
141 | if hasMax {
142 | wg.Add(1)
143 | go func() {
144 | defer wg.Done()
145 | fmt.Fprint(w, prime(max))
146 | }()
147 | }
148 | if hasMb {
149 | wg.Add(1)
150 | go func() {
151 | defer wg.Done()
152 | fmt.Fprint(w, bloat(mb))
153 | }()
154 | }
155 | }
156 |
157 | func main() {
158 | http.HandleFunc("/", handler)
159 | http.ListenAndServe(":8080", nil)
160 | }
161 |
--------------------------------------------------------------------------------
/src/go/helloworld/helloworld.go:
--------------------------------------------------------------------------------
1 |
2 | //Source: https://github.com/peter-evans/knative-docs/blob/master/serving/samples/helloworld-go/helloworld.go
3 |
4 | /*
5 | Copyright 2018 The Knative Authors
6 |
7 | Licensed under the Apache License, Version 2.0 (the "License");
8 | you may not use this file except in compliance with the License.
9 | You may obtain a copy of the License at
10 |
11 | https://www.apache.org/licenses/LICENSE-2.0
12 |
13 | Unless required by applicable law or agreed to in writing, software
14 | distributed under the License is distributed on an "AS IS" BASIS,
15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | See the License for the specific language governing permissions and
17 | limitations under the License.
18 | */
19 |
20 | package main
21 |
22 | import (
23 | "flag"
24 | "fmt"
25 | "log"
26 | "net/http"
27 | "os"
28 | )
29 |
30 | func handler(w http.ResponseWriter, r *http.Request) {
31 | log.Print("Hello world received a request.")
32 | target := os.Getenv("TARGET")
33 | if target == "" {
34 | target = "NOT SPECIFIED"
35 | }
36 | fmt.Fprintf(w, "Hello World: %s!\n", target)
37 | }
38 |
39 | func main() {
40 | flag.Parse()
41 | log.Print("Hello world sample started.")
42 |
43 | http.HandleFunc("/", handler)
44 | http.ListenAndServe(":8080", nil)
45 | }
46 |
--------------------------------------------------------------------------------