├── .circleci ├── config.yml ├── content-trust.crt └── content-trust.key ├── .github ├── ISSUE_TEMPLATE.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── .mailmap ├── AUTHORS ├── CONTRIBUTING.md ├── LICENSE ├── MAINTAINERS ├── Makefile ├── NOTICE ├── README.md ├── boot.sh ├── pkg ├── cri-containerd │ ├── Dockerfile │ └── build.yml ├── kube-e2e-test │ ├── Dockerfile │ ├── README.md │ ├── build.yml │ ├── e2e.sh │ └── in-cluster-config.yaml ├── kubelet │ ├── Dockerfile │ ├── build.yml │ ├── kubeadm-init.sh │ └── kubelet.sh ├── kubernetes-docker-image-cache-common │ ├── .gitignore │ ├── Dockerfile │ ├── build.yml │ └── images.lst └── kubernetes-docker-image-cache-control-plane │ ├── .gitignore │ ├── Dockerfile │ ├── build.yml │ └── images.lst ├── poule.yml ├── scripts ├── generate-authors.sh ├── mk-image-cache-lst ├── run-e2e-test.sh └── update-linuxkit-hashes.sh ├── ssh_into_kubelet.sh ├── test ├── .gitignore └── cases │ ├── 000_smoke │ ├── 001_cri-bridge │ │ └── test.sh │ ├── 002_cri-weave │ │ └── test.sh │ ├── 003_docker-bridge │ │ └── test.sh │ ├── 004_docker-weave │ │ └── test.sh │ ├── common.sh │ ├── group.sh │ ├── test.exp │ └── test.yml │ └── group.sh └── yml ├── bridge.yml ├── cri-containerd-master.yml ├── cri-containerd.yml ├── docker-master.yml ├── docker.yml ├── kube.yml └── weave.yml /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | linuxkit_pkg_build: &linuxkit_pkg_build 2 | docker: 3 | - image: debian:stretch 4 | steps: 5 | - run: 6 | name: Configure $PATH 7 | command: echo 'export PATH=/workspace/bin:$PATH' >> $BASH_ENV 8 | - run: 9 | name: Install packages 10 | # ca-certificates are needed for attach_workspace (and git over https) 11 | command: apt-get update && apt-get install -y ca-certificates git openssh-client 12 | - attach_workspace: 13 | at: /workspace 14 | - checkout 15 | - setup_remote_docker: 16 | version: 17.06.1-ce 17 | - run: 18 | name: Docker version 19 | command: | 20 | docker version 21 | - run: 22 | name: Building package 23 | command: | 24 | PKG=${CIRCLE_JOB#pkg-} 25 | mkdir /workspace/packages 26 | linuxkit pkg build pkg/$PKG 27 | linuxkit pkg show-tag pkg/$PKG > /workspace/packages/$PKG.tag 28 | echo 29 | docker image ls --all 30 | docker image save -o /workspace/packages/$PKG.tar linuxkit/$PKG 31 | - persist_to_workspace: 32 | root: /workspace 33 | paths: packages 34 | 35 | image_build: &image_build 36 | docker: 37 | - image: debian:stretch 38 | steps: 39 | - run: 40 | name: Configure $PATH 41 | command: echo 'export PATH=/workspace/bin:$PATH' >> $BASH_ENV 42 | - run: 43 | name: Install packages 44 | # ca-certificates are needed for attach_workspace (and git over https) 45 | command: apt-get update && apt-get install -y ca-certificates curl git make openssh-client 46 | - attach_workspace: 47 | at: /workspace 48 | - checkout 49 | - setup_remote_docker: 50 | version: 17.06.1-ce 51 | - run: 52 | name: Importing packages from workspace 53 | command: | 54 | load() { 55 | local pkg=$1 56 | docker image load --input /workspace/packages/${pkg}.tar 57 | 58 | # Retag to avoid content trust for unpushed images 59 | local tag=$(cat /workspace/packages/${pkg}.tag) 60 | docker image tag ${tag} linuxkitcircleci/${pkg}:ci 61 | sed -i -e "s,image: ${tag}$,image: linuxkitcircleci/${pkg}:ci,g" yml/*.yml 62 | } 63 | 64 | load kubelet 65 | case "$KUBE_RUNTIME" in 66 | docker) 67 | load kubernetes-docker-image-cache-common 68 | load kubernetes-docker-image-cache-control-plane 69 | ;; 70 | cri-containerd) 71 | load cri-containerd 72 | ;; 73 | *) 74 | echo "Unknown $KUBE_RUNTIME" 75 | exit 1 76 | ;; 77 | esac 78 | echo 79 | docker image ls --all 80 | echo 81 | git --no-pager diff 82 | - run: 83 | name: Build images 84 | command: | 85 | mkdir -p /workspace/images/kube-$KUBE_RUNTIME-$KUBE_NETWORK 86 | # KUBE_FORMATS="iso-efi iso-bios" are much slower (especially for RUNTIME=docker) to build than tar. 87 | # So for now just build tar files. 88 | make KUBE_FORMATS="tar" master node 89 | 90 | version: 2 91 | jobs: 92 | dependencies: 93 | docker: 94 | - image: debian:stretch 95 | steps: 96 | - run: 97 | name: Create workspace 98 | command: mkdir -p /workspace/bin 99 | - run: 100 | name: Install packages 101 | command: apt-get update && apt-get install -y ca-certificates curl 102 | - run: 103 | name: Fetch binaries 104 | command: | 105 | curl -fsSL -o /tmp/docker.tgz https://download.docker.com/linux/static/stable/x86_64/docker-18.03.0-ce.tgz 106 | tar xfO /tmp/docker.tgz docker/docker > /workspace/bin/docker 107 | # To update find the most recent successful build at https://circleci.com/gh/linuxkit/linuxkit/tree/master 108 | # and find the link + SHA256 in the `Artifacts` tab 109 | curl -fsSL -o /workspace/bin/linuxkit https://github.com/linuxkit/linuxkit/releases/download/v0.4/linuxkit-linux-amd64 110 | curl -fsSL -o /workspace/bin/manifest-tool https://github.com/estesp/manifest-tool/releases/download/v0.7.0/manifest-tool-linux-amd64 111 | curl -fsSL -o /workspace/bin/notary https://github.com/theupdateframework/notary/releases/download/v0.6.0/notary-Linux-amd64 112 | 113 | echo "Downloaded:" 114 | sha256sum /workspace/bin/* 115 | echo 116 | 117 | echo "Checking checksums" 118 | sha256sum -c <> $BASH_ENV 142 | - run: 143 | name: Install packages 144 | # ca-certificates are needed for attach_workspace (and git over https) 145 | command: apt-get update && apt-get install -y ca-certificates git make openssh-client 146 | - attach_workspace: 147 | at: /workspace 148 | - setup_remote_docker: 149 | version: 17.06.1-ce 150 | - run: 151 | name: Docker version 152 | command: | 153 | docker version 154 | - checkout 155 | - run: 156 | name: Check YML hashes 157 | command: | 158 | # Check yml hashes are up to date. 159 | make update-hashes 160 | if ! git diff --exit-code ; then 161 | echo "" 162 | echo "*** Hashes are not up to date." 163 | exit 1 164 | fi 165 | - run: 166 | name: Check image-cache similarity 167 | command: | 168 | kdicc=pkg/kubernetes-docker-image-cache-common 169 | kdiccp=pkg/kubernetes-docker-image-cache-control-plane 170 | # Check that the image cache build files have not drifted apart 171 | if ! diff $kdicc/Dockerfile $kdiccp/Dockerfile ; then 172 | echo "" 173 | echo "*** image-cache Dockerfiles do not match." 174 | exit 1 175 | fi 176 | # These differ in one expected way, the name 177 | sed -e 's/^image: kubernetes-docker-image-cache-common$/image: kubernetes-docker-image-cache/' <$kdicc/build.yml >$kdicc/build.yml.for-lint 178 | sed -e 's/^image: kubernetes-docker-image-cache-control-plane$/image: kubernetes-docker-image-cache/' <$kdiccp/build.yml >$kdiccp/build.yml.for-lint 179 | if ! diff $kdicc/build.yml.for-lint $kdiccp/build.yml.for-lint ; then 180 | echo "" 181 | echo "*** image-cache build.yml files do not match." 182 | exit 1 183 | fi 184 | rm -f $kdicc/build.yml.for-lint $kdiccp/build.yml.for-lint 185 | - run: 186 | # This one potentially does a lot of docker pull, leave it until last. 187 | name: Check image cache images 188 | command: | 189 | # Check pkg/kubernetes-docker-image-cache-*/image.lst are up to date 190 | make refresh-image-caches 191 | if ! git diff --exit-code ; then 192 | echo "" 193 | echo "*** image cache lists are not up to date." 194 | exit 1 195 | fi 196 | 197 | pkg-kubelet: 198 | <<: *linuxkit_pkg_build 199 | pkg-cri-containerd: 200 | <<: *linuxkit_pkg_build 201 | pkg-kube-e2e-test: 202 | <<: *linuxkit_pkg_build 203 | 204 | pkg-kubernetes-docker-image-cache-common: 205 | <<: *linuxkit_pkg_build 206 | pkg-kubernetes-docker-image-cache-control-plane: 207 | <<: *linuxkit_pkg_build 208 | 209 | image-docker-weave: 210 | <<: *image_build 211 | # Needs to be configured/enabled by CircleCI person 212 | #resource_class: large 213 | environment: 214 | - KUBE_RUNTIME: docker 215 | - KUBE_NETWORK: weave 216 | image-docker-bridge: 217 | <<: *image_build 218 | # Needs to be configured/enabled by CircleCI person 219 | #resource_class: large 220 | environment: 221 | - KUBE_RUNTIME: docker 222 | - KUBE_NETWORK: bridge 223 | 224 | image-cri-containerd-weave: 225 | <<: *image_build 226 | environment: 227 | - KUBE_RUNTIME: cri-containerd 228 | - KUBE_NETWORK: weave 229 | image-cri-containerd-bridge: 230 | <<: *image_build 231 | environment: 232 | - KUBE_RUNTIME: cri-containerd 233 | - KUBE_NETWORK: bridge 234 | 235 | push-pkgs-to-hub: 236 | docker: 237 | - image: debian:stretch 238 | steps: 239 | - run: 240 | name: Configure $PATH 241 | command: echo 'export PATH=/workspace/bin:$PATH' >> $BASH_ENV 242 | - run: 243 | name: Install packages 244 | # ca-certificates are needed for attach_workspace (and git over https) 245 | command: apt-get update && apt-get install -y ca-certificates expect git jq openssh-client 246 | - attach_workspace: 247 | at: /workspace 248 | - checkout 249 | - setup_remote_docker: 250 | version: 17.06.1-ce 251 | - run: 252 | name: Docker version 253 | command: | 254 | docker version 255 | - run: 256 | name: Import packages from workspace 257 | command: | 258 | for pkg in /workspace/packages/*.tar ; do 259 | docker image load --input $pkg 260 | done 261 | echo 262 | docker image ls --all 263 | - run: 264 | name: Push packages 265 | command: | 266 | # PRs from forks do not have access to the necessary secrets to do the push. 267 | if [ -z "$DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE" ] ; then 268 | echo "DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE not set (likely this is a PR from a fork)." 269 | echo "No credentials available, not pushing to hub." 270 | exit 0 271 | fi 272 | 273 | docker login -u $DOCKER_USER -p $DOCKER_PASS 274 | mkdir -p ~/.docker/trust/private 275 | cp .circleci/content-trust.key ~/.docker/trust/private/b056f84873aa0be205dfe826afa6e7458120c9569dd19a2a84154498fb1165d5.key 276 | 277 | linuxkit pkg push --nobuild pkg/kubelet 278 | linuxkit pkg push --nobuild pkg/cri-containerd 279 | linuxkit pkg push --nobuild pkg/kube-e2e-test 280 | linuxkit pkg push --nobuild pkg/kubernetes-docker-image-cache-common 281 | linuxkit pkg push --nobuild pkg/kubernetes-docker-image-cache-control-plane 282 | 283 | workflows: 284 | version: 2 285 | build: 286 | jobs: 287 | - dependencies 288 | - lint: 289 | requires: 290 | - dependencies 291 | 292 | - pkg-kubelet: 293 | requires: 294 | - dependencies 295 | - pkg-cri-containerd: 296 | requires: 297 | - dependencies 298 | - pkg-kube-e2e-test: 299 | requires: 300 | - dependencies 301 | - pkg-kubernetes-docker-image-cache-common: 302 | requires: 303 | - dependencies 304 | - pkg-kubernetes-docker-image-cache-control-plane: 305 | requires: 306 | - dependencies 307 | 308 | - image-docker-weave: 309 | requires: 310 | - dependencies 311 | - pkg-kubelet 312 | - pkg-kubernetes-docker-image-cache-common 313 | - pkg-kubernetes-docker-image-cache-control-plane 314 | - image-docker-bridge: 315 | requires: 316 | - dependencies 317 | - pkg-kubelet 318 | - pkg-kubernetes-docker-image-cache-common 319 | - pkg-kubernetes-docker-image-cache-control-plane 320 | - image-cri-containerd-weave: 321 | requires: 322 | - dependencies 323 | - pkg-kubelet 324 | - pkg-cri-containerd 325 | - image-cri-containerd-bridge: 326 | requires: 327 | - dependencies 328 | - pkg-kubelet 329 | - pkg-cri-containerd 330 | 331 | - push-pkgs-to-hub: 332 | # We want everything to have passed, which is a bit 333 | # tedious. Some of these are already covered transitively, 334 | # but be more explicit. 335 | requires: 336 | - lint 337 | - pkg-kubelet 338 | - pkg-cri-containerd 339 | - pkg-kube-e2e-test 340 | - pkg-kubernetes-docker-image-cache-common 341 | - pkg-kubernetes-docker-image-cache-control-plane 342 | - image-docker-weave 343 | - image-docker-bridge 344 | - image-cri-containerd-weave 345 | - image-cri-containerd-bridge 346 | -------------------------------------------------------------------------------- /.circleci/content-trust.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFxDCCA6wCCQDB3AGNjPBlEjANBgkqhkiG9w0BAQsFADCBozELMAkGA1UEBhMC 3 | VUsxFzAVBgNVBAgMDkNhbWJyaWRnZXNoaXJlMRIwEAYDVQQHDAlDYW1icmlkZ2Ux 4 | ETAPBgNVBAoMCExpbnV4S2l0MSswKQYDVQQLDCJMaW51eEtpdC9LdWJlcm5ldGVz 5 | IENJIHNpZ25pbmcga2V5MScwJQYDVQQDDB5naXRodWIuY29tL2xpbnV4a2l0L2t1 6 | YmVybmV0ZXMwHhcNMTcxMTIxMTY1ODQ0WhcNMTgxMTIxMTY1ODQ0WjCBozELMAkG 7 | A1UEBhMCVUsxFzAVBgNVBAgMDkNhbWJyaWRnZXNoaXJlMRIwEAYDVQQHDAlDYW1i 8 | cmlkZ2UxETAPBgNVBAoMCExpbnV4S2l0MSswKQYDVQQLDCJMaW51eEtpdC9LdWJl 9 | cm5ldGVzIENJIHNpZ25pbmcga2V5MScwJQYDVQQDDB5naXRodWIuY29tL2xpbnV4 10 | a2l0L2t1YmVybmV0ZXMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDF 11 | lc4gUNtWX8OUxCOD65NHnnIQNjbfF28IA4L20lfhfrnzhGY4a6eCN2IwTDtD1huJ 12 | 6/2fg4QcUHiZdWj9PmnsGsblNrxUAjuJNSypyBhV24C9kk7se8pSt0N7/HlHAiIV 13 | whaRMqiiR5+SZKI5IMZFltcu3YuPzGWy+RbS/3o4f4WArfQVGXUPAxovfxqxOFhp 14 | onGi2zTm8bg307i3zzQ+3YkVeEoG7MyVrGHLSAmoCMnTP8NU5WFVJrlPe/Hea3+0 15 | 3IdNp/+V4SVdM+ET/+mqf5fuRdJibT2R64u69t8rAOh5iaUGzPORoJmcXbG3fBQW 16 | uYe1MEezj953Xny85AGhRinAsJok6VHA/nYkTBpZQtRv82CVSj+rg7oyfvy9f3SG 17 | mLYw4JpMOMAGq0ywLvt4p/DNIey7RFurLhp5bnNh1Dprbc3I2dgb3TlPRZZ32OV8 18 | 66JKxQg5aweS8efech7PqbBpuf4OHrprjhXZWgH9hHgfONNT3qnlqgigd5SI2S23 19 | 1tIXdzcM4kjMQ+18KBvYOVwhmjJV5LQUfbE/+R+NN47gVIKm/NwPW891UXRxczKW 20 | uMHccyox5Xqhf/nW9wEEKQvq+JSlgtXczLFgMFCYrNUwALP6BX2Y/7Kfd5VrnTrD 21 | GPASh/AWn/RK/nxCbNcdWgIYU2ABoPs4MTCsO5GapQIDAQABMA0GCSqGSIb3DQEB 22 | CwUAA4ICAQBk+UAEtgFMnbTobO0zH3pxUrAAsqRT2GWeWNdVc8NOjj7zzkJyl6G1 23 | epUT1KyjyrfSNWF0l/cMgosTIJKQA7+gQDRivS/pp+4vWLq0/pjkCtGlJO9batuf 24 | ELALlWHeXKj4C7Adx5QAyDuvGqnH2JrCLX++GyZvcU76gv0Y/KRr+Ttj057ILenL 25 | /2xNyFuwXgb1/243m2DRWs8mILI0a731X+l2W/sp8LLnArKhiCcSZXaJNVHTrDqg 26 | a2YKR/edjRDG/GL2F/5F/8s27EgKhZfiEKtuCQz+Cl3yIMuE+lM/ObTHvYpU+qhO 27 | ypwka6Yfri2fwYIaPAHUHBGiyeVzJMANluifZd6mMzd8o8FWhFhKrUUOrhhuEAh6 28 | 3fagymS2vqf4LvUqtvIp0BPwQ4L+RmiucHXSTprDIbTOHHidHJ2DeHGDM5XTfE/J 29 | U72Btj9a+VX3M1XY3IhOCUVUOTET92Ey4n1GJ497f4vOKZ34d3A79fDhXGr2cSAK 30 | 1CW0M0v8obFrpMu6vd6NqeSazz5vthYMqJOTKhtpAFvt8lROqQLYNwZ5uBzEDphz 31 | Dv8kLuTHHjspl5Q2wQRt7iqB+cn+cH1BU/pOkNVa+MCZiwB9HnB/YyMspKf0S1QS 32 | wFMq67DIzyxhSEo9IorVGsg0LRByVjEY6uK8aBERFARLELug8k+xVg== 33 | -----END CERTIFICATE----- 34 | -------------------------------------------------------------------------------- /.circleci/content-trust.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | Proc-Type: 4,ENCRYPTED 3 | DEK-Info: AES-256-CBC,054D79A3BFFB818D6EA3FF54A0457C31 4 | 5 | XbdXsNS3FCb67KWRHukUHpkX3Do1D2Sittt+xnUcNWPFzoN9LEyjIDzcWLlGpmkv 6 | OmvPKDwtleHzk4TH3lkpEIq7AhzC9MCfHnfVD6/4iN13Aqg5B130lEBJCSqwiykK 7 | 1AvlK4MiarIcV4G3iG40s9J7TpuADPy3WJeXVu7X7E3SFddyNEiEW4N97GUy0RdG 8 | 32g6B7/MD2p+oGvbzyO74KegKjMdTptDjwIWgDIPWgBfmllBOWiHsG82B9khE1WS 9 | F+yuY3eeNY7cPESgvwd2EAc3+mybTEKFzC3RVdFHeHaafMjNSS2NM6FAncuqECmP 10 | zcztFQlyOjKrDESeCiQYged9fl7sXoIZFj0qPWhQBIlSmTAuEEwam6RT0b+J6S4o 11 | 4OBJWsIYG+SWyHKFUkqPdkx3zh/ncs4LefDDWQ9domKMVieoKhTDdvdhYqmbiLiQ 12 | D2A4QqbDUfmZBbksfZqKj1SePm9t27sdApEactaD0uyF7x+OTcwJKdO1KxAwL01W 13 | n5RHxkWfOOaGTx/n+cuIPYKzajMZnBciU9JPavd+qRTmO9abVD81MmbrmBO7weVq 14 | g8fhhWL+ybI0QNon4DrnSpJRysmqIoi2MBzG/TYoO5AY40UiXgjvSLFHuxqyDlXg 15 | XjtM4+E2jUnMq6DAIqOhq1V9086ryzb3qiKGJWVoWhSogyzUabe+Emt4hUrM476B 16 | vukUiSW8Wd5y+feGbe1KTWfYq/kiLdr+B6hrA5YsCW5yMYjTL1uof/hjoBOcyoku 17 | CcBeBfmQOjurH4yylBdi1WOKWoxoS07LTYp4uZFemJXlff1nQ/5ofmav3BCa5A0A 18 | 9gAXhU0JK8Qc3rhvfip7hkV5GjOuTQ79qMNYrbkN4brlvG9dTHrgwz6x0OfVhiAi 19 | xXO+A/J9spfaK5nWP9BB+EgzFXOQndcdlSZmePH58jMeUrCjm9yT/t3GOyJ1inpO 20 | aR1/t9ZIruouqI7/Brfxc03zwBUh1fqnqRUM893opq892Fxvr7Uw/QwbkoEMhWiI 21 | UC8d4D5KcA9yKp7Spx9x2FP1EAIY3QcIRgh3UaPLCv4lI5wwDU5QoB1aHGrKHxMD 22 | kok383wNVh1pB+wHjMLbZkzIK83eW6QYSaTlNn1thpoITi+WI8nHBA8QFGTt/jpd 23 | /rRr6XtStMlaFsEj6T41wia6p+pVzKdWnu5bndWa/9BsNQWpgtncMqnlxGiS2ufD 24 | zpPbspQ5LFSn22KL5qWHeBXawU+M3L6230LFpzdP0vDESSB12YOiLDnAMElVliTh 25 | +95vJGtaVfGPo91jrsQ7JaTjwk1sgj0O/EDSvHbuf8HBrSB9ScAVO4i+bEH5t/aX 26 | potS8Gk+SFVtCnR2GYrawj+OqnG6N5SsrfaTOvP8rF7kNKz7v3P2ikC9jgTBihcF 27 | 5wW3rU9jpOmGM8/Riv1E/567x23HAxPYR4az4HYncEOOHDVcWVPVkz2zjNK3EOHF 28 | vRZ4kaig6XurOW/SVWQxHPTF1BX/0CFLPT/tr1z5jwIeplnVd/Errwry2tNxoWfu 29 | c5OX+Iq/dcu//X1Ty+IjamqfejmK1RxLZOftO+Lmd2tHTohGy5jZiu+iyvUXXYtV 30 | YRnTdlbxNUU/DukuDbmiiB92bAiXlkho2w383+2ERlfCS3p9OeP2Mnvs034NE0Sg 31 | CG4UUA1bhn3rwgdf9S6aYqLsIlmuZL0TB5hns4B6d3sIpKlRwWbAth1zFXCajldc 32 | Suw0Dq+1kG3N6gCaXEk+imyRAOx3SxJGJ9bJGcBia2vMmqs+UbX9zdbuBSz/6vI3 33 | F00XqKTQojXQfX3JnWusf98VgtF9N/33YQa5Y1rXRlBra+UBG8rmpeDvasCaIyef 34 | 5LuDKqVZrDAt2QQRu6+R/r73x6Dv40E1wfYN2GggZVg+aCukam84V6NJkCVRYiUo 35 | oaX+SsYmoDnnzZRJMs60GoluhkNciqtrLRTSQeFqXMb/CRoe418zQQy099A/0lwh 36 | Y5yBKvZ4U5TFloEdpo4COM+UkP+XrmGPzln0XKr72fwJJU6vu2e1yuPId8bDX9Sf 37 | Y5Sn3ALW6vgKJwkvXfNpo4g1qdTFuWTGFLhDjURmz/bOg2Y9F0MdiDC3j5Qm859U 38 | ZATychGpuwsorNWFeCIEUYl4ApiqQgf/zES8CXk6p3xOxwE35Onlk5y1OrRfNJlI 39 | Zuc6QTORjRo+R7BGipSWHBV75BOfW/D3lR9kan6Qo9dCOGhDlFHddSzSDaJnICjq 40 | AITx0oKaXJNQILIohfzmupl+ZlX1eg3wIS0659YL5IELSqhkQ9xig613Nf5kL9tB 41 | o7CC+NiZGEvA6EVH5cUB4+94AZTIFNpIQh34kkUPCrDs8oz/0zFoGLAbS/ObbX4o 42 | Zd5g41WoiDSDNwKGcP3ZCKrvUWahiEuKJxHXas6H4YHsMpOPmXnsvtpIUWu7cPqn 43 | YALZCzy8ytFw/AFTPnw7ryj5urtDrf8qpNtPKo2MqftJBflLj2UoIdSWaU+vFS6p 44 | 3/snFB4WaTFuK05DwNQoJ/IYmSJKAOz1kyjq3hq9fBXAFuM4wXcbLnIcK3zN6kaV 45 | ZgEGVh6H1O9Oq4ievJLousYa7A8AGlD8fCU+HfY0yzXFCXSPT8k9OZH07qJkbrSp 46 | mQ/vOgLB2tfzEx0f+7K8VWAZGQswo/OCp4v5uNT8d1DgGr0/VdIJzs+j+aORbFxE 47 | w0MNU/Bs3CgqrrBzmqIXMvBF4g+dO6Y5pmXfvbEDvzxZ8tSszXHau7g6uxNFYZ4J 48 | nqJT6yKKRVAhXyA5BwfZgUndCphGjAVMc57YhB9FyshU2Z0TWtWxqKEf6klpt2O7 49 | 7Wb9Jl/a/subGmsBaKIgot8CpZYQDBpQDXonTuAQLFIKRRe9aHNnGkbeSTLvBCqO 50 | JZJtMrGLNSpnamywi4cDRMFWshkjYmExvkPmhLGkzBGeWMKAz7LIZ7mZUvm4DiU2 51 | m+FLsGi1d81J3b+1/Oe2zDlcl3i2wAfbZCuNKERZX3039yrEYVQnGm+Hjlom99Ia 52 | QmSSJfDA1pyygeo//wvNTtc6zx2cUYFTgRTOZByASPsQuxqoNF7wKSPqK8bzOKVy 53 | b392fHNptt9H2+9VUXV8dqvWKsYTRQn8LvsHsXFPDkN31IEkTYtD0pun83hHEg7q 54 | -----END RSA PRIVATE KEY----- 55 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 21 | 22 | **Description** 23 | 24 | 27 | 28 | **Steps to reproduce the issue:** 29 | 30 | 31 | **Describe the results you received:** 32 | 33 | 34 | **Describe the results you expected:** 35 | 36 | 37 | **Additional information you deem important (e.g. issue happens only occasionally):** 38 | 39 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 15 | 16 | **- What I did** 17 | 18 | **- How I did it** 19 | 20 | **- How to verify it** 21 | 22 | **- Description for the changelog** 23 | 27 | 28 | 29 | **- A picture of a cute animal (not mandatory but encouraged)** 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.iso 2 | kube-*-kernel 3 | kube-*-cmdline 4 | kube-*-initrd.img 5 | kube-*-state 6 | kube-weave.yaml 7 | -------------------------------------------------------------------------------- /.mailmap: -------------------------------------------------------------------------------- 1 | # Generate AUTHORS: scripts/generate-authors.sh 2 | 3 | # Tip for finding duplicates (besides scanning the output of AUTHORS for name 4 | # duplicates that aren't also email duplicates): scan the output of: 5 | # git log --format='%aE - %aN' | sort -uf 6 | # 7 | # For explanation on this file format: man git-shortlog 8 | 9 | Amir Chaudhry 10 | Anil Madhavapeddy 11 | Dan Finneran 12 | Dan Finneran 13 | Dan Finneran 14 | David Protasowski 15 | David Scott 16 | David Scott 17 | David Scott 18 | David Scott 19 | Dave Tucker 20 | David Gageot 21 | David Sheets 22 | David Sheets 23 | Ian Campbell 24 | Ian Campbell 25 | Ian Campbell 26 | Isaac Rodman 27 | Istvan Szukacs 28 | Jeff Wu 29 | Jeremy Yallop 30 | Justin Cormack 31 | Justin Cormack 32 | Ken Cochrane 33 | Magnus Skjegstad 34 | Marten Cassel 35 | Mindy Preston 36 | Nathan Dautenhahn 37 | Nathan LeClaire 38 | Nathan LeClaire 39 | Niclas Mietz 40 | Pierre Gayvallet 41 | Radu Matei 42 | Riyaz Faizullabhoy 43 | Riyaz Faizullabhoy 44 | Rolf Neugebauer 45 | Rolf Neugebauer 46 | Rolf Neugebauer 47 | Sebastiaan van Stijn 48 | Simon Ferquel 49 | Thomas Gazagnaire 50 | Thomas Gazagnaire 51 | Vincent Demeester 52 | Vincent Demeester 53 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | # This file lists all individuals having contributed content to the repository. 2 | # For how it is generated, see `scripts/generate-authors.sh`. 3 | 4 | Avi Deitcher 5 | Dave Tucker 6 | David Scott 7 | Ian Campbell 8 | Ilya Dmitrichenko 9 | Isaac Rodman 10 | Jesse Adametz 11 | Justin Cormack 12 | Magnus Skjegstad 13 | Matt Bajor 14 | Nick Jones 15 | Pierre Gayvallet 16 | Riyaz Faizullabhoy 17 | Robin Winkelewski 18 | Rolf Neugebauer 19 | Sebastiaan van Stijn 20 | Tiago Pires 21 | Tim Potter 22 | w9n 23 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to LinuxKit/Kubernetes 2 | 3 | Want to hack on this project? Awesome! Here are instructions to get you started. 4 | 5 | Additional information can be found in the docs of the main LinuxKit project: 6 | [issue triage](https://github.com/linuxkit/linuxkit/blob/master/docs/issue-triage.md), 7 | and [review process](https://github.com/linuxkit/linuxkit/blob/master/docs/reviewing.md). 8 | 9 | ## Reporting security issues 10 | 11 | The LinuxKit maintainers take security seriously. If you discover a security 12 | issue, please bring it to their attention right away! 13 | 14 | Please **DO NOT** file a public issue, instead send your report privately to 15 | [security@docker.com](mailto:security@docker.com). 16 | 17 | Security reports are greatly appreciated and we will publicly thank you for it. 18 | We also like to send gifts—if you're into Docker schwag, make sure to let 19 | us know. We currently do not offer a paid security bounty program, but are not 20 | ruling it out in the future. 21 | 22 | ## Reporting other issues 23 | 24 | A great way to contribute to the project is to send a detailed report when you 25 | encounter an issue. We always appreciate a well-written, thorough bug report, 26 | and will thank you for it! 27 | 28 | Check that [our issue database](https://github.com/linuxkit/linuxkit/issues) 29 | doesn't already include that problem or suggestion before submitting an issue. 30 | If you find a match, you can use the "subscribe" button to get notified on 31 | updates. Do *not* leave random "+1" or "I have this too" comments, as they 32 | only clutter the discussion, and don't help resolving it. However, if you 33 | have ways to reproduce the issue or have additional information that may help 34 | resolving the issue, please leave a comment. 35 | 36 | Also include the steps required to reproduce the problem if possible and 37 | applicable. This information will help us review and fix your issue faster. 38 | When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). 39 | Don't forget to remove sensitive data from your logfiles before posting (you can 40 | replace those parts with "REDACTED"). 41 | 42 | ## Quick contribution tips and guidelines 43 | 44 | This section gives the experienced contributor some tips and guidelines. 45 | 46 | ### Pull requests are always welcome 47 | 48 | Not sure if that typo is worth a pull request? Found a bug and know how to fix 49 | it? Do it! We will appreciate it. Any significant improvement should be 50 | documented as [a GitHub issue](https://github.com/linuxkit/linuxkit/issues) before 51 | anybody starts working on it. 52 | 53 | We are always thrilled to receive pull requests. We do our best to process them 54 | quickly. If your pull request is not accepted on the first try, don't get 55 | discouraged! Our contributor's guide explains [the review process we 56 | use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). 57 | 58 | ### Design and cleanup proposals 59 | 60 | You can propose new designs for existing features. You can also design 61 | entirely new features. We really appreciate contributors who want to refactor or 62 | otherwise cleanup our project. For information on making these types of 63 | contributions, see [the advanced contribution 64 | section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in 65 | the contributors guide. 66 | 67 | We try hard to keep LinuxKit lean and focused. LinuxKit can't do everything for 68 | everybody. This means that we might decide against incorporating a new feature. 69 | However, there might be a way to implement that feature *on top of* LinuxKit. 70 | 71 | ### Commit Messages 72 | 73 | Commit messages must start with a capitalized and short summary (max. 50 chars) 74 | written in the imperative, followed by an optional, more detailed explanatory 75 | text which is separated from the summary by an empty line. 76 | 77 | Commit messages should follow best practices, including explaining the context 78 | of the problem and how it was solved, including in caveats or follow up changes 79 | required. They should tell the story of the change and provide readers 80 | understanding of what led to it. 81 | 82 | If you're lost about what this even means, please see [How to Write a Git 83 | Commit Message](http://chris.beams.io/posts/git-commit/) for a start. 84 | 85 | In practice, the best approach to maintaining a nice commit message is to 86 | leverage a `git add -p` and `git commit --amend` to formulate a solid 87 | changeset. This allows one to piece together a change, as information becomes 88 | available. 89 | 90 | If you squash a series of commits, don't just submit that. Re-write the commit 91 | message, as if the series of commits was a single stroke of brilliance. 92 | 93 | That said, there is no requirement to have a single commit for a PR, as long as 94 | each commit tells the story. For example, if there is a feature that requires a 95 | package, it might make sense to have the package in a separate commit then have 96 | a subsequent commit that uses it. 97 | 98 | Remember, you're telling part of the story with the commit message. Don't make 99 | your chapter weird. 100 | 101 | 102 | ### Review 103 | 104 | Code review comments may be added to your pull request. Discuss, then make the 105 | suggested modifications and push additional commits to your feature branch. Post 106 | a comment after pushing. New commits show up in the pull request automatically, 107 | but the reviewers are notified only when you comment. 108 | 109 | Pull requests must be cleanly rebased on top of master without multiple branches 110 | mixed into the PR. 111 | 112 | **Git tip**: If your PR no longer merges cleanly, use `rebase master` in your 113 | feature branch to update your pull request rather than `merge master`. 114 | 115 | Before you make a pull request, squash your commits into logical units of work 116 | using `git rebase -i` and `git push -f`. A logical unit of work is a consistent 117 | set of patches that should be reviewed together: for example, upgrading the 118 | version of a vendored dependency and taking advantage of its now available new 119 | feature constitute two separate units of work. Implementing a new function and 120 | calling it in another file constitute a single logical unit of work. The very 121 | high majority of submissions should have a single commit, so if in doubt: squash 122 | down to one. 123 | 124 | After every commit, [make sure the test suite passes] 125 | (https://docs.docker.com/opensource/project/test-and-docs/). Include documentation 126 | changes in the same pull request so that a revert would remove all traces of 127 | the feature or fix. 128 | 129 | Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that 130 | close an issue. Including references automatically closes the issue on a merge. 131 | 132 | ### Merge approval 133 | 134 | Docker maintainers use LGTM (Looks Good To Me) in comments or GitHub approval 135 | on the code review to indicate acceptance. 136 | 137 | A change requires at least one LGTM from a maintainers of each component 138 | affected. A list of maintainers can be found in the [MAINTAINERS](MAINTAINERS) 139 | file. 140 | 141 | ### Community Guidelines and Code of Conduct 142 | 143 | When contributing to this project, we expect you to respect our community 144 | guidelines and [code of conduct](https://github.com/docker/code-of-conduct) 145 | 146 | We want to keep the community awesome, growing and collaborative. We need your 147 | help to keep it that way. To help with this we've come up with some general 148 | guidelines for the community as a whole: 149 | 150 | * Be nice: Be courteous, respectful and polite to fellow community members: 151 | no regional, racial, gender, or other abuse will be tolerated. We like 152 | nice people way better than mean ones! 153 | 154 | * Encourage diversity and participation: Make everyone in our community feel 155 | welcome, regardless of their background and the extent of their 156 | contributions, and do everything possible to encourage participation in 157 | our community. 158 | 159 | * Keep it legal: Basically, don't get us in trouble. Share only content that 160 | you own, do not share private or sensitive information, and don't break 161 | the law. 162 | 163 | * Stay on topic: Make sure that you are posting to the correct channel and 164 | avoid off-topic discussions. Remember when you update an issue or respond 165 | to an email you are potentially sending to a large number of people. Please 166 | consider this before you update. Also remember that nobody likes spam. 167 | 168 | * Don't send email to the maintainers: There's no need to send email to the 169 | maintainers to ask them to investigate an issue or to take a look at a 170 | pull request. Instead of sending an email, GitHub mentions should be 171 | used to ping maintainers to review a pull request, a proposal or an 172 | issue. 173 | 174 | ### Guideline violations — 3 strikes method 175 | 176 | The point of this section is not to find opportunities to punish people, but we 177 | do need a fair way to deal with people who are making our community suck. 178 | 179 | 1. First occurrence: We'll give you a friendly, but public reminder that the 180 | behavior is inappropriate according to our guidelines. 181 | 182 | 2. Second occurrence: We will send you a private message with a warning that 183 | any additional violations will result in removal from the community. 184 | 185 | 3. Third occurrence: Depending on the violation, we may need to delete or ban 186 | your account. 187 | 188 | **Notes:** 189 | 190 | * Obvious spammers are banned on first occurrence. If we don't do this, we'll 191 | have spam all over the place. 192 | 193 | * Violations are forgiven after 6 months of good behavior, and we won't hold a 194 | grudge. 195 | 196 | * People who commit minor infractions will get some education, rather than 197 | hammering them in the 3 strikes process. 198 | 199 | * The rules apply equally to everyone in the community, no matter how much 200 | you've contributed. 201 | 202 | * Extreme violations of a threatening, abusive, destructive or illegal nature 203 | will be addressed immediately and are not subject to 3 strikes or forgiveness. 204 | 205 | * Contact abuse@docker.com to report abuse or appeal violations. In the case of 206 | appeals, we know that mistakes happen, and we'll work with you to come up with a 207 | fair solution if there has been a misunderstanding. 208 | 209 | ### Sign your work 210 | 211 | The sign-off is a simple line at the end of the explanation for the patch. Your 212 | signature certifies that you wrote the patch or otherwise have the right to pass 213 | it on as an open-source patch. The rules are pretty simple: if you can certify 214 | the below (from [developercertificate.org](http://developercertificate.org/)): 215 | 216 | ``` 217 | Developer Certificate of Origin 218 | Version 1.1 219 | 220 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 221 | 1 Letterman Drive 222 | Suite D4700 223 | San Francisco, CA, 94129 224 | 225 | Everyone is permitted to copy and distribute verbatim copies of this 226 | license document, but changing it is not allowed. 227 | 228 | 229 | Developer's Certificate of Origin 1.1 230 | 231 | By making a contribution to this project, I certify that: 232 | 233 | (a) The contribution was created in whole or in part by me and I 234 | have the right to submit it under the open source license 235 | indicated in the file; or 236 | 237 | (b) The contribution is based upon previous work that, to the best 238 | of my knowledge, is covered under an appropriate open source 239 | license and I have the right under that license to submit that 240 | work with modifications, whether created in whole or in part 241 | by me, under the same open source license (unless I am 242 | permitted to submit under a different license), as indicated 243 | in the file; or 244 | 245 | (c) The contribution was provided directly to me by some other 246 | person who certified (a), (b) or (c) and I have not modified 247 | it. 248 | 249 | (d) I understand and agree that this project and the contribution 250 | are public and that a record of the contribution (including all 251 | personal information I submit with it, including my sign-off) is 252 | maintained indefinitely and may be redistributed consistent with 253 | this project or the open source license(s) involved. 254 | ``` 255 | 256 | Then you just add a line to every git commit message: 257 | 258 | Signed-off-by: Joe Smith 259 | 260 | Use your real name (sorry, no pseudonyms or anonymous contributions.) 261 | 262 | If you set your `user.name` and `user.email` git configs, you can sign your 263 | commit automatically with `git commit -s`. 264 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | https://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2015-2017 Docker, Inc. 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | https://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | # Project maintainers file 2 | # 3 | # This file describes who runs the linuxkit/linuxkit project and how. 4 | # This is a living document - if you see something out of date or missing, speak up! 5 | # 6 | # It is structured to be consumable by both humans and programs. 7 | # To extract its contents programmatically, use any TOML-compliant parser. 8 | # 9 | # This file is compiled into the MAINTAINERS file in docker/opensource. 10 | # 11 | [Org] 12 | [Org."Core maintainers"] 13 | people = [ 14 | "deitch", 15 | "ijc", 16 | "justincormack", 17 | "riyazdf", 18 | "rn", 19 | ] 20 | 21 | [people] 22 | 23 | # A reference list of all people associated with the project. 24 | # All other sections should refer to people by their canonical key 25 | # in the people section. 26 | 27 | # ADD YOURSELF HERE IN ALPHABETICAL ORDER 28 | [People.deitch] 29 | Name = "Avi Deitcher" 30 | Email = "avi@atomicinc.com" 31 | GitHub = "deitch" 32 | 33 | [People.ijc] 34 | Name = "Ian Campbell" 35 | Email = "ian.campbell@docker.com" 36 | GitHub = "ijc" 37 | 38 | [people.justincormack] 39 | Name = "Justin Cormack" 40 | Email = "justin.cormack@docker.com" 41 | GitHub = "justincormack" 42 | 43 | [people.riyazdf] 44 | Name = "Riyaz Faizullabhoy" 45 | Email = "riyaz@docker.com" 46 | GitHub = "riyazdf" 47 | 48 | [people.rn] 49 | Name = "Rolf Neugebauer" 50 | Email = "rolf.neugebauer@docker.com" 51 | GitHub = "rn" 52 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | KUBE_RUNTIME ?= docker 2 | KUBE_NETWORK ?= weave 3 | 4 | KUBE_NETWORK_WEAVE ?= v2.2.1 5 | 6 | ifeq ($(shell uname -s),Darwin) 7 | KUBE_FORMATS ?= iso-efi 8 | else 9 | KUBE_FORMATS ?= iso-bios 10 | endif 11 | 12 | KUBE_FORMAT_ARGS := $(patsubst %,-format %,$(KUBE_FORMATS)) 13 | 14 | KUBE_BASENAME ?= kube- 15 | 16 | .PHONY: all master node 17 | all: master node 18 | 19 | master: yml/kube.yml yml/$(KUBE_RUNTIME).yml yml/$(KUBE_RUNTIME)-master.yml yml/$(KUBE_NETWORK).yml $(KUBE_EXTRA_YML) 20 | linuxkit $(LINUXKIT_ARGS) build $(LINUXKIT_BUILD_ARGS) -name $(KUBE_BASENAME)master $(KUBE_FORMAT_ARGS) $^ 21 | 22 | node: yml/kube.yml yml/$(KUBE_RUNTIME).yml yml/$(KUBE_NETWORK).yml $(KUBE_EXTRA_YML) 23 | linuxkit $(LINUXKIT_ARGS) build $(LINUXKIT_BUILD_ARGS) -name $(KUBE_BASENAME)node $(KUBE_FORMAT_ARGS) $^ 24 | 25 | yml/weave.yml: kube-weave.yaml 26 | 27 | kube-weave.yaml: 28 | curl -L -o $@ https://cloud.weave.works/k8s/v1.8/net?v=$(KUBE_NETWORK_WEAVE) 29 | 30 | .PHONY: update-hashes 31 | update-hashes: 32 | set -e ; for tag in $$(linuxkit pkg show-tag pkg/kubelet) \ 33 | $$(linuxkit pkg show-tag pkg/cri-containerd) \ 34 | $$(linuxkit pkg show-tag pkg/kubernetes-docker-image-cache-common) \ 35 | $$(linuxkit pkg show-tag pkg/kubernetes-docker-image-cache-control-plane) ; do \ 36 | image=$${tag%:*} ; \ 37 | sed -E -i.bak -e "s,$$image:[[:xdigit:]]{40}(-dirty)?,$$tag,g" yml/*.yml ; \ 38 | done 39 | 40 | .PHONY: clean 41 | clean: 42 | rm -f -r \ 43 | kube-*-kernel kube-*-cmdline kube-*-state kube-*-initrd.img *.iso \ 44 | kube-weave.yaml 45 | 46 | .PHONY: refresh-image-caches 47 | refresh-image-caches: 48 | ./scripts/mk-image-cache-lst common > pkg/kubernetes-docker-image-cache-common/images.lst 49 | ./scripts/mk-image-cache-lst control-plane > pkg/kubernetes-docker-image-cache-control-plane/images.lst 50 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright 2015-2017 Docker, Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | https://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes and LinuxKit 2 | 3 | [![CircleCI](https://circleci.com/gh/linuxkit/kubernetes.svg?style=svg)](https://circleci.com/gh/linuxkit/kubernetes) 4 | 5 | This project aims to demonstrate how one can create minimal and immutable Kubernetes OS images with LinuxKit. 6 | 7 | ## Build requirements 8 | 9 | To build images and to rebuild the individual packages you will need the [LinuxKit tool](https://github.com/linuxkit/linuxkit/tree/master/src/cmd/linuxkit) 10 | 11 | If you already have `go` installed you can use `go get -u github.com/linuxkit/linuxkit/src/cmd/linuxkit` to install the tool. 12 | 13 | On MacOS there is a `brew tap` available. Detailed instructions are at [linuxkit/homebrew-linuxkit](https://github.com/linuxkit/homebrew-linuxkit), the short summary is 14 | ``` 15 | brew tap linuxkit/linuxkit 16 | brew install --HEAD linuxkit 17 | ``` 18 | 19 | Build requirements from source: 20 | - GNU `make` 21 | - Docker 22 | - optionally `qemu` 23 | 24 | ## Building OS images 25 | 26 | To build the default OS images: 27 | ``` 28 | make all 29 | ``` 30 | 31 | By default this will build images using Docker Engine for execution. To instead use cri-containerd use: 32 | ``` 33 | make all KUBE_RUNTIME=cri-containerd 34 | ``` 35 | 36 | ## Booting and initialising OS images 37 | 38 | Boot Kubernetes master OS image using `hyperkit` on macOS: or `qemu` on Linux: 39 | ``` 40 | ./boot.sh 41 | ``` 42 | or, to automatically initialise the cluster upon boot with no additional options 43 | ``` 44 | KUBE_MASTER_AUTOINIT="" ./boot.sh 45 | ``` 46 | 47 | Get IP address of the master: 48 | ``` 49 | ip addr show dev eth0 50 | ``` 51 | 52 | Login to the kubelet container: 53 | ``` 54 | ./ssh_into_kubelet.sh 55 | ``` 56 | 57 | Manually initialise master with `kubeadm` if booted without `KUBE_MASTER_AUTOINIT`: 58 | ``` 59 | kubeadm-init.sh 60 | ``` 61 | 62 | Once `kubeadm` exits, make sure to copy the `kubeadm join` arguments, 63 | and try `kubectl get nodes` from within the master. 64 | 65 | If you just want to run a single node cluster with jobs running on the master, you can use: 66 | ``` 67 | kubectl taint nodes --all node-role.kubernetes.io/master- --kubeconfig /etc/kubernetes/admin.conf 68 | ``` 69 | 70 | To boot a node use: 71 | ``` 72 | ./boot.sh [ ...] 73 | ``` 74 | 75 | More specifically, to start 3 nodes use 3 separate shells and run this: 76 | ``` 77 | shell1> ./boot.sh 1 --token bb38c6.117e66eabbbce07d 192.168.65.22:6443 --discovery-token-unsafe-skip-ca-verification 78 | shell2> ./boot.sh 2 --token bb38c6.117e66eabbbce07d 192.168.65.22:6443 --discovery-token-unsafe-skip-ca-verification 79 | shell3> ./boot.sh 3 --token bb38c6.117e66eabbbce07d 192.168.65.22:6443 --discovery-token-unsafe-skip-ca-verification 80 | ``` 81 | 82 | ## Platform specific information 83 | 84 | ### MacOS 85 | 86 | The above instructions should work as is. 87 | 88 | ### Linux 89 | 90 | By default `linuxkit run` uses user mode networking which does not 91 | support access from the host. To workaround this you can use port 92 | forwarding e.g. 93 | 94 | KUBE_RUN_ARGS="-publish 2222:22" ./boot.sh 95 | 96 | ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2222 root@localhost 97 | 98 | However you will not be able to run worker nodes since individual 99 | instances cannot see each other. 100 | 101 | To enable networking between instance unfortunately requires `root` 102 | privileges to configure a bridge and setup the bridge mode privileged 103 | helper. 104 | 105 | See http://wiki.qemu.org/Features/HelperNetworking for details in 106 | brief you will need: 107 | 108 | - To setup and configure a bridge (including e.g. DHCP etc) on the 109 | host. (You can reuse a bridge created by e.g. `virt-mananger`) 110 | - To set the `qemu-bridge-helper` setuid root. The location differs by 111 | distro, it could be `/usr/lib/qemu/qemu-bridge-helper` or 112 | `/usr/local/libexec/qemu-bridge-helper` or elsewhere. You need to 113 | `chmod u+s «PATH»`. 114 | - List the bridge created in the first step in `/etc/qemu/bridge.conf` 115 | with a line like `allow br0` (if your bridge is called `br0`). 116 | - Set `KUBE_NETWORKING=bridge,«name»` e.g. 117 | 118 | KUBE_NETWORKING="bridge,br0" ./boot.sh 119 | KUBE_NETWORKING="bridge,br0" ./boot.sh 1 «options» 120 | 121 | ## Configuration 122 | 123 | The `boot.sh` script has various configuration variables at the top 124 | which can be overridden via the environment e.g. 125 | 126 | KUBE_VCPUS=4 ./boot.sh 127 | -------------------------------------------------------------------------------- /boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | : ${KUBE_MASTER_VCPUS:=2} 6 | : ${KUBE_MASTER_MEM:=1024} 7 | : ${KUBE_MASTER_DISK:=4G} 8 | : ${KUBE_MASTER_UNTAINT:=n} 9 | 10 | : ${KUBE_NODE_VCPUS:=2} 11 | : ${KUBE_NODE_MEM:=4096} 12 | : ${KUBE_NODE_DISK:=8G} 13 | 14 | : ${KUBE_NETWORKING:=default} 15 | : ${KUBE_RUN_ARGS:=} 16 | : ${KUBE_EFI:=} 17 | : ${KUBE_MAC:=} 18 | : ${KUBE_CLEAR_STATE:=} 19 | 20 | : ${KUBE_METADATA:=} # Without the outermost braces {}. 21 | 22 | [ "$(uname -s)" = "Darwin" ] && KUBE_EFI=1 23 | 24 | suffix=".iso" 25 | [ -n "${KUBE_EFI}" ] && suffix="-efi.iso" && uefi="--uefi" 26 | 27 | if [ $# -eq 0 ] ; then 28 | img="kube-master" 29 | # If $KUBE_MASTER_AUTOINIT is set, including if it is set to "" 30 | # then we configure for auto init. If it is completely unset then 31 | # we do not. 32 | if [ -n "${KUBE_MASTER_AUTOINIT+x}" ] ; then 33 | kubeadm_data="${kubeadm_data:+$kubeadm_data, }\"init\": { \"content\": \"${KUBE_MASTER_AUTOINIT}\" }" 34 | fi 35 | if [ "${KUBE_MASTER_UNTAINT}" = "y" ] ; then 36 | kubeadm_data="${kubeadm_data:+$kubeadm_data, }\"untaint-master\": { \"content\": \"\" }" 37 | fi 38 | 39 | state="kube-master-state" 40 | 41 | : ${KUBE_VCPUS:=$KUBE_MASTER_VCPUS} 42 | : ${KUBE_MEM:=$KUBE_MASTER_MEM} 43 | : ${KUBE_DISK:=$KUBE_MASTER_DISK} 44 | elif [ $# -ge 1 ] ; then 45 | case $1 in 46 | ''|*[!0-9]*) 47 | echo "Node number must be a number" 48 | exit 1 49 | ;; 50 | 0) 51 | echo "Node number must be greater than 0" 52 | exit 1 53 | ;; 54 | *) ;; 55 | esac 56 | img="kube-node" 57 | name="node-${1}" 58 | shift 59 | 60 | if [ $# -ge 1 ] ; then 61 | kubeadm_data="\"join\": { \"content\": \"${*}\" }" 62 | fi 63 | 64 | state="kube-${name}-state" 65 | 66 | : ${KUBE_VCPUS:=$KUBE_NODE_VCPUS} 67 | : ${KUBE_MEM:=$KUBE_NODE_MEM} 68 | : ${KUBE_DISK:=$KUBE_NODE_DISK} 69 | else 70 | echo "Usage:" 71 | echo " - Boot master:" 72 | echo " ${0}" 73 | echo " - Boot node:" 74 | echo " ${0} " 75 | exit 1 76 | fi 77 | 78 | set -x 79 | if [ -n "${KUBE_CLEAR_STATE}" ] ; then 80 | rm -rf "${state}" 81 | mkdir "${state}" 82 | if [ -n "${KUBE_MAC}" ] ; then 83 | echo -n "${KUBE_MAC}" > "${state}"/mac-addr 84 | fi 85 | fi 86 | 87 | mkdir -p "${state}" 88 | touch $state/metadata.json 89 | if [ -n "${KUBE_METADATA}" ] ; then 90 | metadata="${metadata:+$metadata, }${KUBE_METADATA}" 91 | fi 92 | if [ -n "${kubeadm_data}" ] ; then 93 | metadata="${metadata:+$metadata, }\"kubeadm\": { \"entries\": { ${kubeadm_data} } }" 94 | fi 95 | if [ -n "${metadata}" ] ; then 96 | metadata="{ ${metadata} }" 97 | echo "${metadata}" > $state/metadata.json 98 | fi 99 | 100 | exec linuxkit run ${KUBE_RUN_ARGS} -networking ${KUBE_NETWORKING} -cpus ${KUBE_VCPUS} -mem ${KUBE_MEM} -state "${state}" -disk size=${KUBE_DISK} -data-file $state/metadata.json -iso ${uefi} "${img}${suffix}" 101 | -------------------------------------------------------------------------------- /pkg/cri-containerd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM linuxkit/alpine:1b05307ae8152e3d38f79e297b0632697a30c65c AS build 2 | 3 | RUN \ 4 | apk add \ 5 | bash \ 6 | gcc \ 7 | git \ 8 | go \ 9 | libc-dev \ 10 | libseccomp-dev \ 11 | linux-headers \ 12 | make \ 13 | && true 14 | ENV GOPATH=/go PATH=$PATH:/go/bin 15 | 16 | ENV CRI_CONTAINERD_URL https://github.com/containerd/cri-containerd.git 17 | #ENV CRI_CONTAINERD_BRANCH pull/NNN/head 18 | # This is the `standalone-cri-containerd` branch, which is at the point right before standalone mode was deleted. 19 | ENV CRI_CONTAINERD_COMMIT 64b098a293831f742aeb3dd3e48a5405990c14c5 20 | RUN mkdir -p $GOPATH/src/github.com/containerd && \ 21 | cd $GOPATH/src/github.com/containerd && \ 22 | git clone $CRI_CONTAINERD_URL cri-containerd 23 | WORKDIR $GOPATH/src/github.com/containerd/cri-containerd 24 | RUN set -e; \ 25 | if [ -n "$CRI_CONTAINERD_BRANCH" ] ; then \ 26 | git fetch origin "$CRI_CONTAINERD_BRANCH"; \ 27 | fi; \ 28 | git checkout -q $CRI_CONTAINERD_COMMIT 29 | RUN make static-binaries BUILD_TAGS="seccomp" 30 | 31 | RUN mkdir -p /out/etc/apk && cp -r /etc/apk/* /out/etc/apk/ 32 | # util-linux because a full ns-enter is required. 33 | # example commands: /usr/bin/nsenter --net= -F -- 34 | # /usr/bin/nsenter --net=/var/run/netns/cni-5e8acebe-810d-c1b9-ced0-47be2f312fa8 -F -- 35 | # NB the first ("--net=") is actually not valid -- see https://github.com/containerd/cri/issues/245 36 | RUN apk add --no-cache --initdb -p /out \ 37 | alpine-baselayout \ 38 | busybox \ 39 | ca-certificates \ 40 | iptables \ 41 | util-linux \ 42 | socat \ 43 | && true 44 | # Remove apk residuals. We have a read-only rootfs, so apk is of no use. 45 | RUN rm -rf /out/etc/apk /out/lib/apk /out/var/cache 46 | 47 | RUN make DESTDIR=/out install 48 | 49 | FROM scratch 50 | WORKDIR / 51 | ENTRYPOINT ["cri-containerd", "--log-level", "info", "--network-bin-dir", "/opt/cni/bin", "--network-conf-dir", "/etc/cni/net.d"] 52 | COPY --from=build /out / 53 | -------------------------------------------------------------------------------- /pkg/cri-containerd/build.yml: -------------------------------------------------------------------------------- 1 | org: linuxkit 2 | image: cri-containerd 3 | network: true 4 | arches: 5 | - amd64 6 | config: 7 | binds: 8 | - /etc/resolv.conf:/etc/resolv.conf 9 | - /run:/run:rshared,rbind 10 | - /dev:/dev 11 | - /tmp:/tmp 12 | - /var:/var:rshared,rbind 13 | - /var/lib/kubeadm:/etc/kubernetes 14 | - /var/lib/cni/conf:/etc/cni/net.d:rshared,rbind 15 | - /var/lib/cni/bin:/opt/cni/bin:rshared,rbind 16 | - /run/containerd/containerd.sock:/run/containerd/containerd.sock 17 | - /var/lib/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins:rshared,rbind 18 | mounts: 19 | - type: bind 20 | source: /sys/fs/cgroup 21 | destination: /sys/fs/cgroup 22 | options: ["rw","rbind","rshared","nosuid","noexec","nodev","relatime"] 23 | capabilities: 24 | - all 25 | rootfsPropagation: shared 26 | pid: host 27 | runtime: 28 | mkdir: 29 | - /var/lib/kubeadm 30 | - /var/lib/cni/conf 31 | - /var/lib/cni/bin 32 | - /var/lib/kubelet-plugins 33 | -------------------------------------------------------------------------------- /pkg/kube-e2e-test/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM linuxkit/alpine:1b05307ae8152e3d38f79e297b0632697a30c65c AS build 2 | 3 | # When changing kubernetes_version remember to also update: 4 | # - scripts/mk-image-cache-lst and run `make refresh-image-caches` from top-level 5 | # - pkg/kubelet/Dockerfile 6 | ENV kubernetes_version v1.10.3 7 | 8 | RUN apk add -U --no-cache \ 9 | bash \ 10 | coreutils \ 11 | curl \ 12 | findutils \ 13 | git \ 14 | go \ 15 | grep \ 16 | libc-dev \ 17 | linux-headers \ 18 | make \ 19 | rsync \ 20 | && true 21 | 22 | ENV GOPATH=/go PATH=$PATH:/go/bin 23 | 24 | ENV KUBERNETES_URL https://github.com/kubernetes/kubernetes.git 25 | #ENV KUBERNETES_BRANCH pull/NNN/head 26 | ENV KUBERNETES_COMMIT ${kubernetes_version} 27 | RUN mkdir -p $GOPATH/src/github.com/kubernetes && \ 28 | cd $GOPATH/src/github.com/kubernetes && \ 29 | git clone $KUBERNETES_URL kubernetes 30 | WORKDIR $GOPATH/src/github.com/kubernetes/kubernetes 31 | RUN set -e; \ 32 | if [ -n "$KUBERNETES_BRANCH" ] ; then \ 33 | git fetch origin "$KUBERNETES_BRANCH"; \ 34 | fi; \ 35 | git checkout -q $KUBERNETES_COMMIT 36 | 37 | RUN make WHAT="cmd/kubectl test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo" 38 | 39 | RUN mkdir -p /out/etc/apk && cp -r /etc/apk/* /out/etc/apk/ 40 | RUN apk add --no-cache --initdb -p /out \ 41 | alpine-baselayout \ 42 | bash \ 43 | busybox \ 44 | ca-certificates \ 45 | curl \ 46 | musl \ 47 | socat \ 48 | util-linux \ 49 | && true 50 | 51 | RUN cp _output/bin/kubectl /out/usr/bin/kubectl 52 | RUN cp _output/bin/ginkgo /out/usr/bin/ginkgo 53 | RUN cp _output/bin/e2e.test /out/usr/bin/e2e.test 54 | 55 | # Remove apk residuals. We have a read-only rootfs, so apk is of no use. 56 | RUN rm -rf /out/etc/apk /out/lib/apk /out/var/cache 57 | 58 | ADD in-cluster-config.yaml /out/etc/in-cluster-config.yaml 59 | ADD e2e.sh /out/usr/bin/e2e.sh 60 | 61 | FROM scratch 62 | WORKDIR / 63 | ENV KUBECONFIG /etc/in-cluster-config.yaml 64 | ENTRYPOINT ["/usr/bin/e2e.sh"] 65 | COPY --from=build /out / 66 | -------------------------------------------------------------------------------- /pkg/kube-e2e-test/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes end-to-end test suite (e2e) 2 | 3 | In this package provides Kubernetes e2e test suite, it is simple and convient to use within LinuxKit CI. 4 | 5 | > There are other ways to run e2e tests, however there are downsides to each of those approaches 6 | > and maintaining this package is not seen as a major downside at present (however, things may 7 | > change in the future). 8 | > For example, [kubetest][] attempts to setup cluster by itself, and also has various modes which make 9 | > it more complex to use, additionally it download release tarballs each time you run it. 10 | > And [sonobuoy][] appears to have dependencies on a service, which is not desirable for LinuxKit use-case. 11 | 12 | [kubetest]: https://github.com/kubernetes/test-infra/tree/master/kubetest 13 | [sonobuoy]: https://github.com/heptio/sonobuoy 14 | 15 | ## Building the package 16 | 17 | ``` 18 | linuxkit pkg build pkg/kube-e2e-test 19 | ``` 20 | 21 | This will result in `linuxkit/kube-e2e-test:` image that can be use with `scripts/run-e2e-test.sh`. 22 | 23 | ## Running as Job on any cluster 24 | 25 | Start the test suite: 26 | ``` 27 | scripts/run-e2e-test.sh 28 | ``` 29 | 30 | After the script exits, you can find `e2e.log` in the current directory. 31 | 32 | Please consult [Kubernetes documentation for more information][e2e-docs]. 33 | 34 | [e2e-docs]: https://github.com/kubernetes/community/blob/master/contributors/devel/e2e-tests.md 35 | -------------------------------------------------------------------------------- /pkg/kube-e2e-test/build.yml: -------------------------------------------------------------------------------- 1 | org: linuxkit 2 | image: kube-e2e-test 3 | network: true 4 | arches: 5 | - amd64 6 | -------------------------------------------------------------------------------- /pkg/kube-e2e-test/e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -eu 4 | 5 | # cleanup resources created by previous runs 6 | kubectl get namespaces \ 7 | --output="jsonpath={range .items[?(.status.phase == \"Active\")]}{.metadata.name}{\"\n\"}{end}" \ 8 | | grep '^e2e-.*' \ 9 | | xargs -r kubectl delete namespaces 10 | 11 | # execute the test suite 12 | exec /usr/bin/ginkgo \ 13 | -progress \ 14 | -nodes="${E2E_PARALLEL}" \ 15 | -flakeAttempts="${E2E_FLAKE_ATTEMPTS}" \ 16 | -skip="${E2E_SKIP}" \ 17 | -focus="${E2E_FOCUS}" \ 18 | /usr/bin/e2e.test -- \ 19 | -provider="${E2E_CLOUD_PROVIDER}" \ 20 | -host="https://kubernetes.default.svc:443" \ 21 | -kubeconfig="${KUBECONFIG}" \ 22 | -test.short \ 23 | -test.v 24 | -------------------------------------------------------------------------------- /pkg/kube-e2e-test/in-cluster-config.yaml: -------------------------------------------------------------------------------- 1 | # most tests generate `kubeconfig` on the fly, but some depend on `-host`, 2 | # so we must provide `-kubeconfig` and `-host` flags until that changes 3 | apiVersion: v1 4 | kind: Config 5 | clusters: 6 | - name: kube-e2e-test-cluster 7 | cluster: 8 | server: https://kubernetes.default.svc:443 9 | certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 10 | users: 11 | - name: kube-e2e-test-user 12 | user: 13 | tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 14 | contexts: 15 | - name: kube-e2e-test-context 16 | context: 17 | cluster: kube-e2e-test-cluster 18 | user: kube-e2e-test-user 19 | current-context: kube-e2e-test-context 20 | -------------------------------------------------------------------------------- /pkg/kubelet/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM linuxkit/alpine:1b05307ae8152e3d38f79e297b0632697a30c65c AS build 2 | 3 | # When changing kubernetes_version remember to also update: 4 | # - scripts/mk-image-cache-lst and run `make refresh-image-caches` from top-level 5 | # - pkg/e2e-test/Dockerfile 6 | ENV kubernetes_version v1.10.3 7 | ENV cni_version v0.7.1 8 | ENV critools_version v1.0.0-alpha.0 9 | 10 | RUN apk add -U --no-cache \ 11 | bash \ 12 | coreutils \ 13 | curl \ 14 | findutils \ 15 | git \ 16 | go \ 17 | grep \ 18 | libc-dev \ 19 | linux-headers \ 20 | make \ 21 | rsync \ 22 | && true 23 | 24 | ENV GOPATH=/go PATH=$PATH:/go/bin 25 | 26 | ### Kubernetes (incl Kubelet) 27 | 28 | ENV KUBERNETES_URL https://github.com/kubernetes/kubernetes.git 29 | #ENV KUBERNETES_BRANCH pull/NNN/head 30 | ENV KUBERNETES_COMMIT ${kubernetes_version} 31 | RUN mkdir -p $GOPATH/src/github.com/kubernetes && \ 32 | cd $GOPATH/src/github.com/kubernetes && \ 33 | git clone $KUBERNETES_URL kubernetes 34 | WORKDIR $GOPATH/src/github.com/kubernetes/kubernetes 35 | RUN set -e; \ 36 | if [ -n "$KUBERNETES_BRANCH" ] ; then \ 37 | git fetch origin "$KUBERNETES_BRANCH"; \ 38 | fi; \ 39 | git checkout -q $KUBERNETES_COMMIT 40 | 41 | RUN make WHAT="cmd/kubelet cmd/kubectl cmd/kubeadm" 42 | 43 | ### CNI plugins 44 | 45 | ENV CNI_URL https://github.com/containernetworking/plugins 46 | #ENV CNI_BRANCH pull/NNN/head 47 | ENV CNI_COMMIT ${cni_version} 48 | RUN mkdir -p $GOPATH/github.com/containernetworking/ && \ 49 | cd $GOPATH/github.com/containernetworking/ && \ 50 | git clone $CNI_URL plugins 51 | WORKDIR $GOPATH/github.com/containernetworking/plugins 52 | RUN set -e; \ 53 | if [ -n "$CNI_BRANCH" ] ; then \ 54 | git fetch origin "CNI_BRANCH"; \ 55 | fi; \ 56 | git checkout -q $CNI_COMMIT 57 | RUN ./build.sh 58 | 59 | ### critools 60 | 61 | ENV CRITOOLS_URL https://github.com/kubernetes-incubator/cri-tools 62 | #ENV CRITOOLS_BRANCH pull/NNN/head 63 | ENV CRITOOLS_COMMIT ${critools_version} 64 | RUN mkdir -p $GOPATH/github.com/kubernetes-incubator/ && \ 65 | cd $GOPATH/github.com/kubernetes-incubator/ && \ 66 | git clone $CRITOOLS_URL cri-tools 67 | WORKDIR $GOPATH/github.com/kubernetes-incubator/cri-tools 68 | RUN set -e; \ 69 | if [ -n "$CRITOOLS_BRANCH" ] ; then \ 70 | git fetch origin "CRITOOLS_BRANCH"; \ 71 | fi; \ 72 | git checkout -q $CRITOOLS_COMMIT 73 | RUN make binaries 74 | 75 | ## Construct final image 76 | 77 | RUN mkdir -p /out/etc/apk && cp -r /etc/apk/* /out/etc/apk/ 78 | #coreutils needed for du -B for disk image checks made by kubelet 79 | # example: $ du -s -B 1 /var/lib/kubelet/pods/... 80 | # du: unrecognized option: B 81 | RUN apk add --no-cache --initdb -p /out \ 82 | alpine-baselayout \ 83 | busybox \ 84 | ca-certificates \ 85 | coreutils \ 86 | curl \ 87 | ebtables \ 88 | ethtool \ 89 | findutils \ 90 | iproute2 \ 91 | iptables \ 92 | musl \ 93 | openssl \ 94 | socat \ 95 | util-linux \ 96 | nfs-utils \ 97 | && true 98 | 99 | RUN cp $GOPATH/src/github.com/kubernetes/kubernetes/_output/bin/kubelet /out/usr/bin/kubelet 100 | RUN cp $GOPATH/src/github.com/kubernetes/kubernetes/_output/bin/kubeadm /out/usr/bin/kubeadm 101 | RUN cp $GOPATH/src/github.com/kubernetes/kubernetes/_output/bin/kubectl /out/usr/bin/kubectl 102 | 103 | RUN tar -czf /out/root/cni.tgz -C $GOPATH/github.com/containernetworking/plugins/bin . 104 | 105 | RUN cp $GOPATH/bin/crictl /out/usr/bin/crictl 106 | RUN cp $GOPATH/bin/critest /out/usr/bin/critest 107 | 108 | # Remove apk residuals. We have a read-only rootfs, so apk is of no use. 109 | RUN rm -rf /out/etc/apk /out/lib/apk /out/var/cache 110 | 111 | ADD kubelet.sh /out/usr/bin/kubelet.sh 112 | ADD kubeadm-init.sh /kubeadm-init.sh 113 | RUN sed -e "s/@KUBERNETES_VERSION@/${kubernetes_version}/g" /out/usr/bin/kubeadm-init.sh && chmod +x /out/usr/bin/kubeadm-init.sh 114 | 115 | FROM scratch 116 | WORKDIR / 117 | ENTRYPOINT ["/usr/bin/kubelet.sh"] 118 | COPY --from=build /out / 119 | ENV KUBECONFIG "/etc/kubernetes/admin.conf" 120 | -------------------------------------------------------------------------------- /pkg/kubelet/build.yml: -------------------------------------------------------------------------------- 1 | org: linuxkit 2 | image: kubelet 3 | network: true 4 | arches: 5 | - amd64 6 | config: 7 | binds: 8 | - /dev:/dev 9 | - /etc/resolv.conf:/etc/resolv.conf 10 | - /etc/os-release:/etc/os-release 11 | - /run:/run:rshared,rbind 12 | - /var:/var:rshared,rbind 13 | - /var/lib/kubeadm:/etc/kubernetes 14 | - /etc/kubelet.sh.conf:/etc/kubelet.sh.conf 15 | - /etc/kubeadm:/etc/kubeadm 16 | - /var/lib/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins:rshared,rbind 17 | - /var/lib/cni/conf:/etc/cni/net.d:rshared,rbind 18 | - /var/lib/cni/bin:/opt/cni/bin:rshared,rbind 19 | mounts: 20 | - type: bind 21 | source: /sys/fs/cgroup 22 | destination: /sys/fs/cgroup 23 | options: ["rw","rbind","rshared","nosuid","noexec","nodev","relatime"] 24 | capabilities: 25 | - all 26 | rootfsPropagation: shared 27 | pid: host 28 | runtime: 29 | cgroups: 30 | - systemreserved 31 | - podruntime 32 | - kubepods 33 | mkdir: 34 | - /var/lib/kubeadm 35 | - /var/lib/cni/conf 36 | - /var/lib/cni/bin 37 | - /var/lib/kubelet-plugins 38 | - /var/lib/nfs/statd/sm 39 | mounts: 40 | - type: bind 41 | source: /var/lib/cni/bin 42 | destination: /opt/cni/bin 43 | options: ["rw","bind"] 44 | - type: bind 45 | source: /var/lib/cni/conf 46 | destination: /etc/cni/net.d 47 | options: ["rw","bind"] 48 | -------------------------------------------------------------------------------- /pkg/kubelet/kubeadm-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | touch /var/lib/kubeadm/.kubeadm-init.sh-started 4 | if [ -f /etc/kubeadm/kubeadm.yaml ]; then 5 | echo Using the configuration from /etc/kubeadm/kubeadm.yaml 6 | if [ $# -ne 0 ] ; then 7 | echo WARNING: Ignoring command line options: $@ 8 | fi 9 | kubeadm init --ignore-preflight-errors=all --config /etc/kubeadm/kubeadm.yaml 10 | else 11 | kubeadm init --ignore-preflight-errors=all --kubernetes-version @KUBERNETES_VERSION@ $@ 12 | fi 13 | 14 | # sorting by basename relies on the dirnames having the same number of directories 15 | YAML=$(ls -1 /run/config/kube-system.init/*.yaml /etc/kubeadm/kube-system.init/*.yaml 2>/dev/null | sort --field-separator=/ --key=5) 16 | for i in ${YAML}; do 17 | n=$(basename "$i") 18 | if [ -e "$i" ] ; then 19 | if [ ! -s "$i" ] ; then # ignore zero sized files 20 | echo "Ignoring zero size file $n" 21 | continue 22 | fi 23 | echo "Applying $n" 24 | if ! kubectl create -n kube-system -f "$i" ; then 25 | touch /var/lib/kubeadm/.kubeadm-init.sh-kube-system.init-failed 26 | touch /var/lib/kubeadm/.kubeadm-init.sh-kube-system.init-"$n"-failed 27 | echo "Failed to apply $n" 28 | continue 29 | fi 30 | fi 31 | done 32 | if [ -f /run/config/kubeadm/untaint-master ] ; then 33 | echo "Removing \"node-role.kubernetes.io/master\" taint from all nodes" 34 | kubectl taint nodes --all node-role.kubernetes.io/master- 35 | fi 36 | touch /var/lib/kubeadm/.kubeadm-init.sh-finished 37 | -------------------------------------------------------------------------------- /pkg/kubelet/kubelet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Kubelet outputs only to stderr, so arrange for everything we do to go there too 3 | exec 1>&2 4 | 5 | if [ -e /etc/kubelet.sh.conf ] ; then 6 | . /etc/kubelet.sh.conf 7 | fi 8 | 9 | if [ -f /run/config/kubelet/disabled ] ; then 10 | echo "kubelet.sh: /run/config/kubelet/disabled file is present, exiting" 11 | exit 0 12 | fi 13 | if [ -n "$KUBELET_DISABLED" ] ; then 14 | echo "kubelet.sh: KUBELET_DISABLED environ variable is set, exiting" 15 | exit 0 16 | fi 17 | 18 | if [ ! -e /var/lib/cni/.opt.defaults-extracted ] ; then 19 | mkdir -p /var/lib/cni/bin 20 | tar -xzf /root/cni.tgz -C /var/lib/cni/bin 21 | touch /var/lib/cni/.opt.defaults-extracted 22 | fi 23 | 24 | if [ ! -e /var/lib/cni/.cni.conf-extracted ] && [ -d /run/config/cni ] ; then 25 | mkdir -p /var/lib/cni/conf 26 | cp /run/config/cni/* /var/lib/cni/conf/ 27 | touch /var/lib/cni/.cni.configs-extracted 28 | fi 29 | 30 | await=/etc/kubernetes/kubelet.conf 31 | 32 | if [ -f "/etc/kubernetes/kubelet.conf" ] ; then 33 | echo "kubelet.sh: kubelet already configured" 34 | elif [ -d /run/config/kubeadm ] ; then 35 | if [ -f /run/config/kubeadm/init ] ; then 36 | echo "kubelet.sh: init cluster with metadata \"$(cat /run/config/kubeadm/init)\"" 37 | # This needs to be in the background since it waits for kubelet to start. 38 | # We skip printing the token so it is not persisted in the log. 39 | kubeadm-init.sh --skip-token-print $(cat /run/config/kubeadm/init) & 40 | elif [ -e /run/config/kubeadm/join ] ; then 41 | echo "kubelet.sh: joining cluster with metadata \"$(cat /run/config/kubeadm/join)\"" 42 | kubeadm join --ignore-preflight-errors=all $(cat /run/config/kubeadm/join) 43 | await=/etc/kubernetes/bootstrap-kubelet.conf 44 | fi 45 | elif [ -e /run/config/userdata ] ; then 46 | echo "kubelet.sh: joining cluster with metadata \"$(cat /run/config/userdata)\"" 47 | kubeadm join --ignore-preflight-errors=all $(cat /run/config/userdata) 48 | await=/etc/kubernetes/bootstrap-kubelet.conf 49 | fi 50 | 51 | echo "kubelet.sh: waiting for ${await}" 52 | # TODO(ijc) is there a race between kubeadm creating this file and 53 | # finishing the write where we might be able to fall through and 54 | # start kubelet with an incomplete configuration file? I've tried 55 | # to provoke such a race without success. An explicit 56 | # synchronisation barrier or changing kubeadm to write 57 | # kubelet.conf atomically might be good in any case. 58 | until [ -f "${await}" ] ; do 59 | sleep 1 60 | done 61 | 62 | echo "kubelet.sh: ${await} has arrived" 2>&1 63 | 64 | if [ -f "/run/config/kubelet-config.json" ]; then 65 | echo "Found kubelet configuration from /run/config/kubelet-config.json" 66 | else 67 | echo "Generate kubelet configuration to /run/config/kubelet-config.json" 68 | : ${KUBE_CLUSTER_DNS:='"10.96.0.10"'} 69 | cat > /run/config/kubelet-config.json << EOF 70 | { 71 | "kind": "KubeletConfiguration", 72 | "apiVersion": "kubelet.config.k8s.io/v1beta1", 73 | "staticPodPath": "/etc/kubernetes/manifests", 74 | "clusterDNS": [ 75 | ${KUBE_CLUSTER_DNS} 76 | ], 77 | "clusterDomain": "cluster.local", 78 | "cgroupsPerQOS": false, 79 | "enforceNodeAllocatable": [], 80 | "kubeReservedCgroup": "podruntime", 81 | "systemReservedCgroup": "systemreserved", 82 | "cgroupRoot": "kubepods" 83 | } 84 | EOF 85 | fi 86 | 87 | mkdir -p /etc/kubernetes/manifests 88 | 89 | # If using --cgroups-per-qos then need to use --cgroup-root=/ and not 90 | # the --cgroup-root=kubepods from below. This can be done at image 91 | # build time by adding to the service definition: 92 | # 93 | # command: 94 | # - /usr/bin/kubelet.sh 95 | # - --cgroup-root=/ 96 | # - --cgroups-per-qos 97 | exec kubelet \ 98 | --config=/run/config/kubelet-config.json \ 99 | --kubeconfig=/etc/kubernetes/kubelet.conf \ 100 | --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ 101 | --allow-privileged=true \ 102 | --network-plugin=cni \ 103 | --cni-conf-dir=/etc/cni/net.d \ 104 | --cni-bin-dir=/opt/cni/bin \ 105 | --cadvisor-port=0 \ 106 | $KUBELET_ARGS $@ 107 | -------------------------------------------------------------------------------- /pkg/kubernetes-docker-image-cache-common/.gitignore: -------------------------------------------------------------------------------- 1 | dl/*.tar 2 | -------------------------------------------------------------------------------- /pkg/kubernetes-docker-image-cache-common/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM linuxkit/alpine:1b05307ae8152e3d38f79e297b0632697a30c65c AS build 2 | 3 | RUN mkdir -p /out/etc/apk && cp -r /etc/apk/* /out/etc/apk/ 4 | RUN apk add --no-cache --initdb -p /out \ 5 | alpine-baselayout \ 6 | busybox 7 | 8 | # Remove apk residuals. We have a read-only rootfs, so apk is of no use. 9 | RUN rm -rf /out/etc/apk /out/lib/apk /out/var/cache 10 | 11 | FROM scratch 12 | WORKDIR / 13 | COPY --from=build /out / 14 | COPY --from=docker:17.06.0-ce /usr/local/bin/docker /usr/local/bin/docker 15 | COPY dl/*.tar /images/ 16 | ENTRYPOINT [ "/bin/sh", "-c" ] 17 | CMD [ "for image in /images/*.tar ; do docker image load -i $image && rm -f $image ; done" ] 18 | -------------------------------------------------------------------------------- /pkg/kubernetes-docker-image-cache-common/build.yml: -------------------------------------------------------------------------------- 1 | org: linuxkit 2 | image: kubernetes-docker-image-cache-common 3 | arches: 4 | - amd64 5 | config: 6 | binds: 7 | - /var/run:/var/run 8 | depends: 9 | docker-images: 10 | target-dir: dl 11 | from-file: images.lst 12 | -------------------------------------------------------------------------------- /pkg/kubernetes-docker-image-cache-common/images.lst: -------------------------------------------------------------------------------- 1 | # autogenerated by: 2 | # ./scripts/mk-image-cache-lst common 3 | gcr.io/google_containers/kube-proxy-amd64:v1.10.3@sha256:568df575bb2e630abfd4a4754a23a8af7b13c3f4a526796af01021eda3ff7a30 4 | gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.8@sha256:23df717980b4aa08d2da6c4cfa327f1b730d92ec9cf740959d2d5911830d82fb 5 | gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.8@sha256:6d8e0da4fb46e9ea2034a3f4cab0e095618a2ead78720c12e791342738e5f85d 6 | gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.8@sha256:93c827f018cf3322f1ff2aa80324a0306048b0a69bc274e423071fb0d2d29d8b 7 | gcr.io/google_containers/pause-amd64:3.1@sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610 8 | -------------------------------------------------------------------------------- /pkg/kubernetes-docker-image-cache-control-plane/.gitignore: -------------------------------------------------------------------------------- 1 | dl/*.tar 2 | -------------------------------------------------------------------------------- /pkg/kubernetes-docker-image-cache-control-plane/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM linuxkit/alpine:1b05307ae8152e3d38f79e297b0632697a30c65c AS build 2 | 3 | RUN mkdir -p /out/etc/apk && cp -r /etc/apk/* /out/etc/apk/ 4 | RUN apk add --no-cache --initdb -p /out \ 5 | alpine-baselayout \ 6 | busybox 7 | 8 | # Remove apk residuals. We have a read-only rootfs, so apk is of no use. 9 | RUN rm -rf /out/etc/apk /out/lib/apk /out/var/cache 10 | 11 | FROM scratch 12 | WORKDIR / 13 | COPY --from=build /out / 14 | COPY --from=docker:17.06.0-ce /usr/local/bin/docker /usr/local/bin/docker 15 | COPY dl/*.tar /images/ 16 | ENTRYPOINT [ "/bin/sh", "-c" ] 17 | CMD [ "for image in /images/*.tar ; do docker image load -i $image && rm -f $image ; done" ] 18 | -------------------------------------------------------------------------------- /pkg/kubernetes-docker-image-cache-control-plane/build.yml: -------------------------------------------------------------------------------- 1 | org: linuxkit 2 | image: kubernetes-docker-image-cache-control-plane 3 | arches: 4 | - amd64 5 | config: 6 | binds: 7 | - /var/run:/var/run 8 | depends: 9 | docker-images: 10 | target-dir: dl 11 | from-file: images.lst 12 | -------------------------------------------------------------------------------- /pkg/kubernetes-docker-image-cache-control-plane/images.lst: -------------------------------------------------------------------------------- 1 | # autogenerated by: 2 | # ./scripts/mk-image-cache-lst control-plane 3 | gcr.io/google_containers/kube-apiserver-amd64:v1.10.3@sha256:a6c4b6b2429d0a15d30a546226e01b1164118e022ad40f3ece2f95126f1580f5 4 | gcr.io/google_containers/kube-controller-manager-amd64:v1.10.3@sha256:98a3a7dc4c6c60dbeb0273302d697edaa89bd10fceed87ad5144c0b0acc5cced 5 | gcr.io/google_containers/kube-scheduler-amd64:v1.10.3@sha256:4770e1f1eef2229138e45a2b813c927e971da9c40256a7e2321ccf825af56916 6 | gcr.io/google_containers/etcd-amd64:3.1.12@sha256:68235934469f3bc58917bcf7018bf0d3b72129e6303b0bef28186d96b2259317 7 | -------------------------------------------------------------------------------- /poule.yml: -------------------------------------------------------------------------------- 1 | # Add a "status/0-triage" to every newly opened pull request. 2 | - triggers: 3 | pull_request: [ opened ] 4 | operations: 5 | - type: label 6 | filters: { 7 | ~labels: [ "status/0-triage", "status/1-design-review", "status/2-code-review", "status/3-docs-review", "status/4-merge" ], 8 | } 9 | settings: { 10 | patterns: { 11 | status/0-triage: [ ".*" ], 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /scripts/generate-authors.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." 5 | 6 | # see also ".mailmap" for how email addresses and names are deduplicated 7 | 8 | { 9 | cat <<-'EOH' 10 | # This file lists all individuals having contributed content to the repository. 11 | # For how it is generated, see `scripts/generate-authors.sh`. 12 | EOH 13 | echo 14 | git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf 15 | } > AUTHORS 16 | -------------------------------------------------------------------------------- /scripts/mk-image-cache-lst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | repo=gcr.io/google_containers 3 | # When changing kubernetes_version remember to also update: 4 | # - pkg/kubelet/Dockerfile 5 | # - pkg/e2e-test/Dockerfile 6 | kubernetes_version=v1.10.3 7 | kube_dns_version=1.14.8 8 | pause_version=3.1 9 | etcd_version=3.1.12 10 | 11 | common=" 12 | kube-proxy-amd64:$kubernetes_version 13 | k8s-dns-sidecar-amd64:$kube_dns_version 14 | k8s-dns-kube-dns-amd64:$kube_dns_version 15 | k8s-dns-dnsmasq-nanny-amd64:$kube_dns_version 16 | pause-amd64:$pause_version" 17 | 18 | control=" 19 | kube-apiserver-amd64:$kubernetes_version 20 | kube-controller-manager-amd64:$kubernetes_version 21 | kube-scheduler-amd64:$kubernetes_version 22 | etcd-amd64:$etcd_version" 23 | 24 | oi() { 25 | local i="$1" 26 | digest=$(docker image inspect --format '{{index .RepoDigests 0}}' "$repo/$i") 27 | echo "$repo/${i}@${digest#*@}" 28 | } 29 | 30 | if [ $# -ne 1 ] ; then 31 | echo >&2 "Need exactly one of \`control-plane' or \`common'" 32 | exit 1 33 | fi 34 | 35 | case $1 in 36 | common) pkgs="$common";; 37 | control-plane) pkgs="$control" ;; 38 | esac 39 | 40 | for i in $pkgs ; do 41 | docker image pull "$repo/$i" 1>&2 42 | done 43 | 44 | echo "# autogenerated by:" 45 | echo "# $0 $@" 46 | for i in $pkgs ; do 47 | oi "$i" 48 | done 49 | -------------------------------------------------------------------------------- /scripts/run-e2e-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -eu 4 | 5 | E2E_CLOUD_PROVIDER="local" 6 | E2E_PARALLEL="4" 7 | E2E_FLAKE_ATTEMPTS="2" 8 | 9 | E2E_FOCUS='' 10 | E2E_SKIP='\\[Slow\\]|\\[Serial\\]|\\[Disruptive\\]|\\[Flaky\\]|\\[Feature:.+\\]|\\[HPA\\]|Dashboard|Services.*functioning.*NodePort|.*NFS.*|.*Volume.*|\\[sig-storage\\]|.*StatefulSet.*|should\\ proxy\\ to\\ cadvisor\\ using\\ proxy\\ subresource' 11 | 12 | ## To see this fail quickly try: 13 | #E2E_FOCUS='should\ handle\ in-cluster\ config' 14 | #E2E_SKIP='' 15 | ## To see this pass quickly try: 16 | #E2E_FOCUS='Simple\ pod' 17 | #E2E_SKIP='should\ handle\ in-cluster\ config' 18 | 19 | namespace="kube-system" 20 | name="kube-e2e-test" 21 | 22 | cleanup() { 23 | ## we only cleanup control resources, the resources created by the 24 | ## test suite itself are cleaned up by pkg/kube-e2e-test/e2e.sh, as 25 | ## those can be useful for investigation of why something fails 26 | kubectl delete --namespace "${namespace}" \ 27 | "Job/${name}" \ 28 | "ServiceAccount/${name}" \ 29 | "ClusterRole/${name}" \ 30 | "ClusterRoleBinding/${name}" 31 | } 32 | 33 | get_pods() { 34 | kubectl get pods --namespace "${namespace}" --selector job-name="${name}" "$@" 35 | } 36 | 37 | one_pod_running() { 38 | test "$(get_pods --output "jsonpath={range .items[?(.status.phase == \"Running\")]}{.metadata.name}{\"\n\"}{end}" | wc -l)" -eq 1 39 | } 40 | 41 | all_pods_absent() { 42 | test "$(get_pods --output "jsonpath={range .items[*]}{.metadata.name}{\"\n\"}{end}" | wc -l)" -eq 0 43 | } 44 | 45 | get_logs() { 46 | kubectl logs --namespace "${namespace}" "Job/${name}" "$@" || true 47 | } 48 | 49 | echo "$0: deleting any old resources left over from the previous run..." 50 | cleanup 2> /dev/null || true 51 | echo "$0: waiting until old pods are absent..." 52 | until all_pods_absent ; do sleep 0.5 ; done 53 | 54 | echo "$0: creating resources to run the suite..." 55 | kubectl create --namespace "${namespace}" --filename - < e2e.log 138 | echo "$0: log saved in ${PWD}/e2e.log, cleaning up the resources..." 139 | cleanup 2> /dev/null || true 140 | if grep -q '^Test Suite Passed$' e2e.log ; then 141 | echo "$0: test suite passed, exiting" 142 | exit 0 143 | else 144 | echo "$0: test suite failed, exiting" 145 | exit 1 146 | fi 147 | -------------------------------------------------------------------------------- /scripts/update-linuxkit-hashes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | lkurl="https://github.com/linuxkit/linuxkit" 5 | 6 | tdir=$(mktemp -d) 7 | trap 'if [ -d "$tdir" ] ; then rm -rf $tdir; fi' EXIT 8 | 9 | git clone $lkurl $tdir/lk 10 | 11 | case $# in 12 | 0) ;; 13 | 1) 14 | git -C $tdir/lk reset --hard $1 15 | ;; 16 | *) 17 | echo "Invalid arguments" >&2 18 | exit 1 19 | ;; 20 | esac 21 | 22 | lkrev=$(git -C $tdir/lk show --oneline -s HEAD) 23 | lktag=$(git -C $tdir/lk tag -l --points-at HEAD) 24 | 25 | update_hash() { 26 | local tag=$1; shift 27 | echo "Updating to $tag" 28 | 29 | image=${tag%:*} 30 | sed -i -e "s,$image:[[:xdigit:]]\{40\}\(-dirty\)\?,$tag,g" $@ 31 | } 32 | 33 | # First update alpine build bases 34 | 35 | # Alpine base image does not use linuxkit pkg, grab the tag from versions.x86_64 instead. 36 | if [ ! -f "$tdir/lk/tools/alpine/versions.x86_64" ] ; then 37 | echo "tools/alpine lacks versions.x86_64 file" >&2 38 | exit 1 39 | fi 40 | tag=$(sed -n -e '1s,^\# \(linuxkit/alpine:[[:xdigit:]]\{40\}\)-amd64$,\1,p' $tdir/lk/tools/alpine/versions.x86_64) 41 | if [ ! -n "$tag" ] ; then 42 | echo "Failed to extract tools/alpine tag" >&2 43 | exit 1 44 | fi 45 | update_hash $tag pkg/*/Dockerfile 46 | 47 | for i in $tdir/lk/pkg/* ; do 48 | if [ ! -d "$i" ] ; then 49 | continue 50 | fi 51 | 52 | if [ ! -f "$i/build.yml" ] ; then 53 | echo "$i does not contain a build.yml" >&2 54 | continue 55 | fi 56 | 57 | tag=$(linuxkit pkg show-tag "$i") 58 | update_hash "$tag" yml/*.yml 59 | done 60 | 61 | # Kernel doesn't use `linuxkit pkg` and uses a different 62 | # tagging strategy, so we do it manually by extracting the 63 | # "recommended" one from the toplevel linuxkit.yml 64 | # example. 65 | # TODO: add a target to kernel/Makefile which will show 66 | # the recommended kernel. 67 | tag=$(sed -n -e 's,^\s*image: \(linuxkit/kernel:.\+\)\s*,\1,p' $tdir/lk/linuxkit.yml) 68 | if [ ! -n "$tag" ] ; then 69 | echo "Failed to extract kernel tag" >&2 70 | exit 1 71 | fi 72 | tagmsg="" 73 | if [ -n "$lktag" ] ; then 74 | tagmsg=$(printf "\nTag: %s" $lktag) 75 | fi 76 | # Not update_hash since the tag is not a hash in this case 77 | 78 | echo "Updating to $tag" 79 | sed -i -e "s,linuxkit/kernel:.\+,$tag,g" yml/*.yml 80 | 81 | # We manually construct the S-o-b because -F strips the trailing blank 82 | # lines, meaning that with -s there is no blank between the "Commit: 83 | # ..." and the S-o-b. 84 | uname=$(git config --get user.name) 85 | email=$(git config --get user.email) 86 | 87 | cat >$tdir/commit-msg < 93 | EOF 94 | 95 | git commit --only -F $tdir/commit-msg yml/*.yml pkg/*/Dockerfile 96 | 97 | # Now update for the result of changes to pkg/*/Dockerfile, this is 98 | # defered until now so we get the new hash instead of the old hash 99 | # with a -dirty suffix. 100 | make update-hashes 101 | git commit --amend --only --no-edit yml/*.yml 102 | -------------------------------------------------------------------------------- /ssh_into_kubelet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | sshopts="-o LogLevel=FATAL \ 4 | -o StrictHostKeyChecking=no \ 5 | -o UserKnownHostsFile=/dev/null \ 6 | -o IdentitiesOnly=yes \ 7 | ${SSHOPTS:-}" 8 | 9 | case $(uname -s) in 10 | Linux) 11 | ssh=ssh 12 | ;; 13 | *) 14 | ssh="docker run --rm -ti \ 15 | -v $HOME/.ssh/:/root/.ssh \ 16 | ijc25/alpine-ssh" 17 | ;; 18 | esac 19 | exec $ssh $sshopts -t root@"$1" ctr --namespace services.linuxkit tasks exec --tty --exec-id ssh-$(hostname)-$$ kubelet ash -l 20 | -------------------------------------------------------------------------------- /test/.gitignore: -------------------------------------------------------------------------------- 1 | _results 2 | cases/_tmp 3 | -------------------------------------------------------------------------------- /test/cases/000_smoke/001_cri-bridge/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # SUMMARY: build and boot using cri-containerd runtime and Bridged networking 3 | # LABELS: 4 | 5 | runtime=cri-containerd 6 | network=bridge 7 | 8 | # Doesn't return 9 | . ../common.sh 10 | -------------------------------------------------------------------------------- /test/cases/000_smoke/002_cri-weave/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # SUMMARY: build and boot using cri-containerd runtime and Weave networking 3 | # LABELS: 4 | 5 | runtime=cri-containerd 6 | network=weave 7 | 8 | # Doesn't return 9 | . ../common.sh 10 | -------------------------------------------------------------------------------- /test/cases/000_smoke/003_docker-bridge/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # SUMMARY: build and boot using Docker runtime and Bridged networking 3 | # LABELS: 4 | 5 | runtime=docker 6 | network=bridge 7 | 8 | # Doesn't return 9 | . ../common.sh 10 | -------------------------------------------------------------------------------- /test/cases/000_smoke/004_docker-weave/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # SUMMARY: build and boot using Docker runtime and Weave networking 3 | # LABELS: 4 | 5 | runtime=docker 6 | network=weave 7 | 8 | # Doesn't return 9 | . ../common.sh 10 | -------------------------------------------------------------------------------- /test/cases/000_smoke/common.sh: -------------------------------------------------------------------------------- 1 | # To be sourced by */test.sh 2 | 3 | set -e 4 | 5 | # Source libraries. Uncomment if needed/defined 6 | #. "${RT_LIB}" 7 | #. "${RT_PROJECT_ROOT}/_lib/lib.sh" 8 | 9 | if [ "x$runtime" = "x" ] || [ "x$network" = "x" ] ; then 10 | echo "common.sh requires \$runtime and \$network" >&2 11 | exit 1 12 | fi 13 | 14 | clean_up() { 15 | rm -f kube-master.iso 16 | rm -rf kube-master-state 17 | } 18 | 19 | trap clean_up EXIT 20 | 21 | export KUBE_RUNTIME=$runtime 22 | export KUBE_NETWORK=$network 23 | export LINUXKIT_BUILD_ARGS="--disable-content-trust" 24 | export KUBE_BASENAME="`pwd`/kube-" 25 | export KUBE_EXTRA_YML="`pwd`/../test.yml" 26 | make -C ${RT_PROJECT_ROOT}/../../ master 27 | 28 | ../test.exp ${RT_PROJECT_ROOT}/../../boot.sh ${RT_PROJECT_ROOT}/../../ssh_into_kubelet.sh 29 | 30 | exit 0 31 | -------------------------------------------------------------------------------- /test/cases/000_smoke/group.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # SUMMARY: Basic build and run smoke tests 3 | # LABELS: 4 | 5 | group_init() { 6 | # Group initialisation code goes here 7 | return 0 8 | } 9 | 10 | group_deinit() { 11 | # Group de-initialisation code goes here 12 | return 0 13 | } 14 | 15 | CMD=$1 16 | case $CMD in 17 | init) 18 | group_init 19 | res=$? 20 | ;; 21 | deinit) 22 | group_deinit 23 | res=$? 24 | ;; 25 | *) 26 | res=1 27 | ;; 28 | esac 29 | 30 | exit $res 31 | 32 | -------------------------------------------------------------------------------- /test/cases/000_smoke/test.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect 2 | 3 | set con_prompt "(ns: getty) linuxkit-*:*# " 4 | set ssh_prompt "linuxkit-*:*# " 5 | set timeout 120 6 | 7 | set bootexec [lindex $argv 0] 8 | set sshexec [lindex $argv 1] 9 | 10 | proc kill args { 11 | foreach what $args { 12 | global $what 13 | if [info exists $what] { 14 | upvar #0 $what sid 15 | set pid [exp_pid -i $sid] 16 | puts "killing $what ($sid) = $pid" 17 | exec kill $pid 18 | close $sid 19 | } else { 20 | puts "not killing $what (not started)" 21 | } 22 | } 23 | } 24 | 25 | proc boot_linuxkit {} { 26 | global lk_sid bootexec 27 | spawn env {KUBE_RUN_ARGS=-publish 2222:22 -publish 30002:30002} KUBE_MEM=3584 KUBE_VCPUS=2 KUBE_DISK=8G KUBE_CLEAR_STATE=y KUBE_MASTER_UNTAINT=y $bootexec 28 | set lk_sid $spawn_id 29 | puts "INFO lk ($lk_sid) is pid [exp_pid -i $lk_sid]" 30 | } 31 | 32 | proc ssh_into_kubelet {} { 33 | global ssh_sid sshexec 34 | 35 | set sshopts {-p 2222 -o ConnectTimeout=5 -o LogLevel=DEBUG} 36 | if [info exists ::env(SSHOPTS)] { 37 | set sshopts [concat $::env(SSHOPTS) $sshopts] 38 | } 39 | spawn env SSHOPTS=$sshopts $sshexec localhost 40 | set ssh_sid $spawn_id 41 | puts "INFO ssh ($ssh_sid) is pid [exp_pid -i $ssh_sid]" 42 | } 43 | 44 | proc await_prompt {sidvar promptvar step} { 45 | upvar #0 $sidvar sid $promptvar prompt 46 | expect -i $sid \ 47 | $prompt { 48 | puts "SUCCESS $step" 49 | } timeout { 50 | puts "FAIL $step (timeout)" 51 | kill ssh_sid lk_sid 52 | exit 1 53 | } eof { 54 | puts "FAIL $step (eof)" 55 | kill ssh_sid lk_sid 56 | exit 1 57 | } 58 | } 59 | 60 | proc send_concmd {s} { 61 | global lk_sid 62 | send -i $lk_sid "$s\n" 63 | } 64 | 65 | proc await_con_prompt {step} { 66 | global lk_sid con_prompt 67 | await_prompt lk_sid con_prompt $step 68 | } 69 | 70 | proc concmd {step cmd} { 71 | send_concmd $cmd 72 | await_con_prompt $step 73 | } 74 | 75 | proc send_sshcmd {s} { 76 | global ssh_sid 77 | send -i $ssh_sid "$s\n" 78 | } 79 | 80 | proc await_ssh_prompt {step} { 81 | global ssh_sid ssh_prompt 82 | await_prompt ssh_sid ssh_prompt $step 83 | } 84 | 85 | proc sshcmd {step cmd} { 86 | send_sshcmd $cmd 87 | await_ssh_prompt $step 88 | } 89 | 90 | # Run $cmd at 1s intervals until we get $want or timeout 91 | # $sidvar names a variable containing sid to send/receive on 92 | # $promptvar names a variable containing the expected prompt 93 | # $step is used for logging 94 | proc await_cmd_output {sidvar promptvar step cmd want} { 95 | upvar #0 $sidvar sid $promptvar prompt 96 | send -i $sid "$cmd\n" 97 | set retries 0 98 | expect -i $sid -timeout 300 \ 99 | $want { 100 | puts "SUCCESS $step: $cmd => $want" 101 | } $prompt { 102 | set retries [expr $retries + 1] 103 | sleep 1 104 | send -i $sid "$cmd\n" 105 | exp_continue -continue_timer 106 | } timeout { 107 | puts "FAIL $step (timeout)" 108 | kill ssh_sid lk_sid 109 | exit 1 110 | } 111 | await_prompt $sidvar $promptvar "$step (after $retries attempts)" 112 | } 113 | 114 | proc await_sshcmd_output {step cmd want} { 115 | global ssh_sid ssh_prompt 116 | await_cmd_output ssh_sid ssh_prompt $step $cmd $want 117 | } 118 | 119 | boot_linuxkit 120 | 121 | await_con_prompt "boot" 122 | 123 | concmd "ifconfig" "ifconfig eth0" 124 | concmd "list containers" "ctr -n services.linuxkit container ls" 125 | 126 | set retries 0 127 | ssh_into_kubelet 128 | # provide ssh_sid as an indirect, allowing ssh to be respawned, which 129 | # changes the id, we need this in case ssh cannot immediately connect. 130 | expect -i ssh_sid \ 131 | $ssh_prompt { 132 | puts "SUCCESS connected to ssh (after $retries attempts)" 133 | } "read: Connection reset by peer" { 134 | # ssh happened too soon, wait a bit. 135 | set retries [expr $retries + 1] 136 | #puts "RETRY:$retries ssh (conn reset)" 137 | wait -i $ssh_sid 138 | sleep 1 139 | ssh_into_kubelet 140 | exp_continue -continue_timer 141 | } eof { 142 | set retries [expr $retries + 1] 143 | #puts "RETRY:$retries ssh (eof)" 144 | wait -i $ssh_sid 145 | sleep 1 146 | ssh_into_kubelet 147 | exp_continue -continue_timer 148 | } timeout { 149 | puts "FAIL ssh (timeout)" 150 | kill ssh_sid lk_sid 151 | exit 1 152 | } 153 | 154 | puts "RUN kubeadm-init.sh" 155 | send_sshcmd "kubeadm-init.sh" 156 | 157 | # Written as N*5m with logging to avoid "10 mins with no output" from CI 158 | set retries 0 159 | set maxretries 10 160 | expect -i $ssh_sid -timeout 300 \ 161 | "Your Kubernetes master has initialized successfully!" { 162 | puts "SUCCESS cluster initialised!" 163 | } $ssh_prompt { 164 | puts "FAIL kubeadm-init.sh (returned to prompt)" 165 | kill ssh_sid lk_sid 166 | exit 1 167 | } timeout { 168 | set retries [expr $retries + 1] 169 | if [expr $retries < $maxretries] { 170 | #puts "RETRY:$retries kubeadm-init.sh (timeout)" 171 | exp_continue 172 | } 173 | puts "FAIL kubeadm-init.sh (timeout)" 174 | kill ssh_sid lk_sid 175 | exit 1 176 | } eof { 177 | puts "FAIL kubeadm-init.sh (eof)" 178 | kill ssh_sid lk_sid 179 | exit 1 180 | } 181 | await_ssh_prompt "kubeadm-init.sh" 182 | 183 | sshcmd "status" "kubectl get -o wide nodes ; echo ; kubectl --namespace=kube-system get -o wide pods" 184 | 185 | await_sshcmd_output "await node ready" \ 186 | {kubectl get nodes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' ; echo} \ 187 | "Ready=True" 188 | 189 | await_sshcmd_output "await kube-dns ready" \ 190 | {kubectl --namespace=kube-system get --selector='k8s-app'='kube-dns' -o jsonpath='{.items[*].status.phase}' pods ; echo} \ 191 | "Running" 192 | await_sshcmd_output "await etcd ready" \ 193 | {kubectl --namespace=kube-system get --selector='component'='etcd' -o jsonpath='{.items[*].status.phase}' pods ; echo} \ 194 | "Running" 195 | 196 | sshcmd "status" "kubectl get -o wide nodes ; echo ; kubectl --namespace=kube-system get -o wide pods" 197 | 198 | sshcmd "apply app.yaml" "kubectl apply -f /root/app.yaml" 199 | 200 | await_sshcmd_output "await alpine pod ready" \ 201 | {kubectl get pods --selector=name=alpine -o jsonpath='{.items[*].status.phase}' ; echo} \ 202 | "Running" 203 | await_sshcmd_output "await nginx pod ready" \ 204 | {kubectl get pods --selector=name=nginx -o jsonpath='{.items[*].status.phase}' ; echo} \ 205 | "Running" 206 | puts "SUCCESS application pods ready" 207 | 208 | sshcmd "status" "kubectl get -o wide nodes ; echo ; kubectl --namespace=kube-system get -o wide pods ; echo ; kubectl --namespace=default get -o wide pods" 209 | 210 | set curl [exec curl -sS http://localhost:30002] 211 | if [string match "Welcome to nginx!" $curl] { 212 | puts "FAIL nginx returned:" 213 | puts $curl 214 | kill ssh_sid lk_sid 215 | exit 1 216 | } 217 | puts "SUCCESS nginx responded well" 218 | 219 | sshcmd "cat cni config" {grep . /var/lib/cni/conf/*.conf /var/lib/cni/conf/*.conflist} 220 | sshcmd "host ifconfig" {ifconfig -a && route -n && echo && grep . /etc/resolv.conf} 221 | sshcmd "nginx ifconfig" {kubectl exec $(kubectl get pods -l name==nginx -o=jsonpath='{.items[*].metadata.name}') -- sh -c 'ifconfig -a && route -n && echo && grep . /etc/resolv.conf'} 222 | sshcmd "alpine ifconfig" {kubectl exec $(kubectl get pods -l name==alpine -o=jsonpath='{.items[*].metadata.name}') -- sh -c 'ifconfig -a && route -n && echo && grep . /etc/resolv.conf'} 223 | 224 | # This also happens to test external connectivity... 225 | sshcmd "alpine install curl" {kubectl exec $(kubectl get pods -l name==alpine -o=jsonpath='{.items[*].metadata.name}') -- apk add --update curl} 226 | 227 | # We rely on the expect -timeout to kill the infinite curl loop. The 228 | # loop is needed because it seems it sometimes takes the internal DNS 229 | # a while to settle, resulting in spurious `curl: (6) Could not 230 | # resolve host: nginx` failures. 231 | send_sshcmd {while ! kubectl exec $(kubectl get pods -l name==alpine -o=jsonpath='{.items[*].metadata.name}') -- curl -sS http://nginx/ ; do sleep 1s ; done} 232 | expect -i $ssh_sid -timeout 60 \ 233 | "Welcome to nginx!" { 234 | puts "SUCCESS intra-pod networking ok" 235 | } $ssh_prompt { 236 | puts "FAIL intra-pod networking (returned to prompt)" 237 | kill ssh_sid lk_sid 238 | exit 1 239 | } timeout { 240 | puts "FAIL intra-pod networking (timeout)" 241 | kill ssh_sid lk_sid 242 | exit 1 243 | } eof { 244 | puts "FAIL intra-pod networking (eof)" 245 | kill ssh_sid lk_sid 246 | exit 1 247 | } 248 | await_ssh_prompt "intra-pod networking" 249 | 250 | kill ssh_sid 251 | 252 | puts "RUN poweroff -f" 253 | send_concmd "poweroff -f" 254 | 255 | expect -i $lk_sid \ 256 | "Power down" { 257 | puts "SUCCESS poweroff" 258 | } eof { 259 | puts "SUCCESS poweroff" 260 | } timeout { 261 | puts "FAILED poweroff (timeout)" 262 | exit 1 263 | } 264 | -------------------------------------------------------------------------------- /test/cases/000_smoke/test.yml: -------------------------------------------------------------------------------- 1 | files: 2 | - path: containers/services/kubelet/lower/root/app.yaml 3 | contents: | 4 | apiVersion: extensions/v1beta1 5 | kind: Deployment 6 | metadata: 7 | name: nginx 8 | labels: 9 | name: nginx 10 | namespace: default 11 | spec: 12 | replicas: 1 13 | template: 14 | metadata: 15 | labels: 16 | name: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: nginx:alpine 21 | ports: 22 | - name: www 23 | containerPort: 80 24 | volumeMounts: 25 | - mountPath: /tmp 26 | name: tmp-volume 27 | volumes: 28 | - name: tmp-volume 29 | emptyDir: 30 | medium: Memory 31 | nodeSelector: 32 | beta.kubernetes.io/os: linux 33 | --- 34 | apiVersion: v1 35 | kind: Service 36 | metadata: 37 | name: nginx 38 | labels: 39 | name: nginx 40 | namespace: default 41 | spec: 42 | type: NodePort 43 | ports: 44 | # the port that this service should serve on 45 | - port: 80 46 | targetPort: 80 47 | nodePort: 30002 48 | selector: 49 | name: nginx 50 | --- 51 | apiVersion: extensions/v1beta1 52 | kind: Deployment 53 | metadata: 54 | name: alpine 55 | labels: 56 | name: alpine 57 | namespace: default 58 | spec: 59 | replicas: 1 60 | template: 61 | metadata: 62 | labels: 63 | name: alpine 64 | spec: 65 | containers: 66 | - name: alpine 67 | image: alpine:3.7 68 | command: ["/bin/sh", "-c", "while : ; do sleep 1h ; done"] 69 | nodeSelector: 70 | beta.kubernetes.io/os: linux 71 | -------------------------------------------------------------------------------- /test/cases/group.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # NAME: kubernetes 3 | # SUMMARY: LinuxKit/Kubernetes regression tests 4 | 5 | # Source libraries. Uncomment if needed/defined 6 | # . "${RT_LIB}" 7 | #. "${RT_PROJECT_ROOT}/_lib/lib.sh" 8 | 9 | group_init() { 10 | # Group initialisation code goes here 11 | return 0 12 | } 13 | 14 | group_deinit() { 15 | # Group de-initialisation code goes here 16 | return 0 17 | } 18 | 19 | CMD=$1 20 | case $CMD in 21 | init) 22 | group_init 23 | res=$? 24 | ;; 25 | deinit) 26 | group_deinit 27 | res=$? 28 | ;; 29 | *) 30 | res=1 31 | ;; 32 | esac 33 | 34 | exit $res 35 | -------------------------------------------------------------------------------- /yml/bridge.yml: -------------------------------------------------------------------------------- 1 | onboot: 2 | - name: bridge 3 | image: busybox:latest 4 | command: 5 | - "/bin/sh" 6 | - "-c" 7 | - | 8 | set -e 9 | field() { 10 | local f=$1 11 | local d=$2 12 | if [ -r "/run/config/cni.bridge/$f" ] ; then 13 | cat "/run/config/cni.bridge/$f" 14 | else 15 | echo -e "\"$d\"\\n" 16 | fi 17 | } 18 | subnet="$(field subnet '10.1.0.0/16')" 19 | gateway="$(field gateway '10.1.0.1')" 20 | ns="$(field ns '10.1.0.1')" 21 | cat </var/lib/cni/conf/10-default.conflist 22 | { 23 | "cniVersion": "0.3.1", 24 | "name": "default", 25 | "plugins": [ 26 | { 27 | "type": "bridge", 28 | "bridge": "cni0", 29 | "isDefaultGateway": true, 30 | "ipMasq": true, 31 | "hairpinMode": true, 32 | "ipam": { 33 | "type": "host-local", 34 | "subnet": $subnet, 35 | "gateway": $gateway 36 | }, 37 | "dns": { 38 | "nameservers": [$ns] 39 | } 40 | }, 41 | { 42 | "type": "portmap", 43 | "capabilities": { 44 | "portMappings": true 45 | }, 46 | "snat": true 47 | } 48 | ] 49 | } 50 | EOF 51 | cat </var/lib/cni/conf/99-loopback.conf 52 | { 53 | "cniVersion": "0.2.0", 54 | "type": "loopback" 55 | } 56 | EOF 57 | if [ -r "/run/config/cni.bridge/debug-show-configs" ] ; then 58 | grep . /var/lib/cni/conf/*.conf /var/lib/cni/conf/*.conflist 59 | fi 60 | runtime: 61 | mkdir: ["/var/lib/cni/conf"] 62 | binds: 63 | - /var/lib:/var/lib 64 | - /run/config:/run/config 65 | -------------------------------------------------------------------------------- /yml/cri-containerd-master.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxkit/kubernetes/7622bd4525d195de84e6fb75b0e9ca365e65c780/yml/cri-containerd-master.yml -------------------------------------------------------------------------------- /yml/cri-containerd.yml: -------------------------------------------------------------------------------- 1 | services: 2 | - name: cri-containerd 3 | image: linuxkit/cri-containerd:74cb328b786d5cada9ddfca0097675b51c7e7d93 4 | cgroupsPath: podruntime/cri-containerd 5 | files: 6 | - path: /etc/kubelet.sh.conf 7 | contents: | 8 | KUBELET_ARGS="--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-containerd.sock" 9 | -------------------------------------------------------------------------------- /yml/docker-master.yml: -------------------------------------------------------------------------------- 1 | services: 2 | - name: kubernetes-docker-image-cache-control-plane 3 | image: linuxkit/kubernetes-docker-image-cache-control-plane:698faae3de953d7fc0f009360bcfce98497afe76 4 | cgroupsPath: podruntime/control-cache 5 | -------------------------------------------------------------------------------- /yml/docker.yml: -------------------------------------------------------------------------------- 1 | services: 2 | - name: docker 3 | image: docker:18.03.0-ce-dind 4 | capabilities: 5 | - all 6 | pid: host 7 | mounts: 8 | - type: bind 9 | source: /sys/fs/cgroup 10 | destination: /sys/fs/cgroup 11 | options: ["rw","rbind","rshared","nosuid","noexec","nodev","relatime"] 12 | binds: 13 | - /dev:/dev 14 | - /etc/resolv.conf:/etc/resolv.conf 15 | - /etc/os-release:/etc/os-release 16 | - /lib/modules:/lib/modules 17 | - /run:/run 18 | - /var:/var:rshared,rbind 19 | - /var/lib/kubeadm:/etc/kubernetes 20 | - /var/lib/cni/conf:/etc/cni/net.d:rshared,rbind 21 | - /var/lib/cni/bin:/opt/cni/bin:rshared,rbind 22 | - /var/lib/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins:rshared,rbind 23 | rootfsPropagation: shared 24 | command: ["/usr/local/bin/docker-init", "/usr/local/bin/dockerd"] 25 | runtime: 26 | mkdir: ["/var/lib/kubeadm", "/var/lib/cni/conf", "/var/lib/cni/bin", "/var/lib/kubelet-plugins"] 27 | cgroupsPath: podruntime/docker 28 | - name: kubernetes-docker-image-cache-common 29 | image: linuxkit/kubernetes-docker-image-cache-common:2da947148638cbbef869215cdb0e572c0402833c 30 | cgroupsPath: podruntime/common-cache 31 | files: 32 | - path: /etc/kubelet.sh.conf 33 | contents: "" 34 | -------------------------------------------------------------------------------- /yml/kube.yml: -------------------------------------------------------------------------------- 1 | kernel: 2 | image: linuxkit/kernel:4.14.40 3 | cmdline: "console=tty0 console=ttyS0" 4 | init: 5 | - linuxkit/init:c79d7587fcd0a195b8a3ecafe428a30e735cf2b4 6 | - linuxkit/runc:d659de11767a419319b175700a7c6f64b8704f8c 7 | - linuxkit/containerd:018fc633223d8a49f650da365603a5abccc6a423 8 | - linuxkit/ca-certificates:f882e9be933fac737bf1f4d303a4bb49a12f302f 9 | onboot: 10 | - name: sysctl 11 | image: linuxkit/sysctl:2a98cb7a116d4d8a71498cea0e0ad8116a9b5a3b 12 | binds: 13 | - /etc/sysctl.d/01-kubernetes.conf:/etc/sysctl.d/01-kubernetes.conf 14 | readonly: false 15 | - name: sysfs 16 | image: linuxkit/sysfs:dc7b876f395fa44c2b93bad6b987e418497c5b34 17 | - name: dhcpcd 18 | image: linuxkit/dhcpcd:193a81bd4a93779c8a048d66e0cb1d201d0ae102 19 | command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"] 20 | - name: metadata 21 | image: linuxkit/metadata:6962be42ec291db67ac9bb4267d8fd06fd464f48 22 | - name: format 23 | image: linuxkit/format:9a537dd3eaefd02dbc01c598b843fba33da8c1a5 24 | - name: mounts 25 | image: linuxkit/mount:a718496fa5ea2a7a9f7552eac64c7f3699fb6a86 26 | command: ["/usr/bin/mountie", "/var/lib/"] 27 | services: 28 | - name: getty 29 | image: linuxkit/getty:3fa8e2240c1392ba4af508d3e6be8548443b12cc 30 | env: 31 | - INSECURE=true 32 | cgroupsPath: systemreserved/getty 33 | - name: rngd 34 | image: linuxkit/rngd:aaa9a63cbc9c04421b160b85aef4df5fa5d0f5f0 35 | cgroupsPath: systemreserved/rngd 36 | - name: ntpd 37 | image: linuxkit/openntpd:413ee972bc71a66030c50bc8daf7385e5c8ea269 38 | cgroupsPath: systemreserved/ntpd 39 | - name: sshd 40 | image: linuxkit/sshd:5544de2376475f6685e12bdc10bfe49f4695873a 41 | cgroupsPath: systemreserved/sshd 42 | - name: kubelet 43 | image: linuxkit/kubelet:9aed4553dba72f8424da7b3b3029e3974a5bea7b 44 | cgroupsPath: podruntime/kubelet 45 | files: 46 | - path: etc/linuxkit.yml 47 | metadata: yaml 48 | - path: /etc/kubernetes 49 | symlink: "/var/lib/kubeadm" 50 | - path: /etc/os-release 51 | contents: | 52 | PRETTY_NAME="LinuxKit Kubernetes Project" 53 | - path: /usr/libexec/kubernetes/kubelet-plugins 54 | symlink: "/var/lib/kubelet-plugins" 55 | - path: /etc/kubeadm/ 56 | directory: true 57 | - path: /etc/sysctl.d/01-kubernetes.conf 58 | contents: 'net.ipv4.ip_forward = 1' 59 | - path: /etc/cni/net.d 60 | directory: true 61 | - path: /opt/cni/bin 62 | directory: true 63 | - path: root/.ssh/authorized_keys 64 | source: ~/.ssh/id_rsa.pub 65 | mode: "0600" 66 | optional: true 67 | trust: 68 | org: 69 | - linuxkit 70 | - library 71 | -------------------------------------------------------------------------------- /yml/weave.yml: -------------------------------------------------------------------------------- 1 | files: 2 | - path: /etc/kubeadm/kube-system.init/50-weave.yaml 3 | source: kube-weave.yaml 4 | --------------------------------------------------------------------------------