├── .gitignore
├── Dockerfile
├── INSTALL.md
├── LICENSE
├── Makefile
├── Makefile.e2e
├── README.md
├── chart
└── knoc
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── templates
│ ├── _helpers.tpl
│ ├── pod.yaml
│ ├── remote-secret.yaml
│ ├── serviceaccount.yaml
│ └── setup_kubeconfig.yaml
│ └── values.yaml
├── cmd
└── virtual-kubelet
│ ├── main.go
│ └── ocagent.go
├── common
├── common.go
└── utils.go
├── deploy
├── argo-install.yaml
├── base.yml
├── knoc-cfg.json
├── knoc-crt.pem
├── knoc-key.pem
├── pod.yml
├── setup_kubeconfig.yaml
└── skaffold.yml
├── door
├── door.go
└── types.go
├── examples
├── argo-dag-coinflip.yaml
├── argo-dag-diamond.yaml
├── argo-dag-intentionalFail.yaml
├── argo-dag-nested.yaml
├── argo-npb-mpi-test.yaml
├── argo-test_bp.yaml
├── argo-workflow-sample.yaml
├── busyecho.yaml
├── busyecho_k8s.yaml
├── busyecho_with_cm.yaml
└── configMap.yaml
├── go.mod
├── go.sum
├── internal
├── README.md
├── expansion
│ ├── LICENSE
│ ├── README.md
│ ├── expand.go
│ └── expand_test.go
├── manager
│ ├── doc.go
│ ├── resource.go
│ └── resource_test.go
├── podutils
│ └── env.go
└── test
│ ├── e2e
│ ├── framework
│ │ ├── framework.go
│ │ ├── node.go
│ │ ├── pod.go
│ │ └── stats.go
│ └── main_test.go
│ ├── suite
│ ├── suite.go
│ └── suite_test.go
│ └── util
│ ├── kubernetes.go
│ └── provider.go
├── knoc.go
├── media
├── Dark.png
├── Light.png
├── darkcrop.png
├── knoc-env.png
└── lightcrop.png
├── remote.go
└── test
├── README.md
└── e2e
├── README.md
├── basic.go
├── knoc_test.go
└── suite.go
/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.dll
4 | *.so
5 | *.dylib
6 | bin/
7 |
8 | # Certificates
9 | # *.pem
10 | deploy/remote-secret.yml
11 | deploy/sealed-remote-secret-crd.yaml
12 |
13 | doodling.sh
14 |
15 | # Test binary, build with `go test -c`
16 | *.test
17 |
18 | # Output of the go coverage tool, specifically when used with LiteIDE
19 | *.out
20 |
21 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
22 | .glide/
23 |
24 | /bin
25 | /dist
26 | /build
27 | /cover
28 |
29 | # Test credentials file
30 | credentials.json
31 |
32 | # Test loganalytics file
33 | loganalytics.json
34 |
35 | # VS Code files
36 | .vscode/
37 |
38 | # IntelliJ Goland files
39 | .idea
40 |
41 | # Terraform ignores
42 | **/.terraform/**
43 | **/terraform-provider-kubernetes
44 | **/*.tfstate*
45 | debug
46 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:latest as builder
2 | RUN apt-get update && apt-get install -y golang-go build-essential git
3 | WORKDIR /build
4 | COPY . .
5 | RUN go mod tidy && go get
6 | RUN make build
7 |
8 | FROM ubuntu:latest
9 | RUN apt-get update && apt-get install -y openssh-server sudo curl
10 | RUN useradd --create-home --shell /bin/bash user0 && echo "user0:user0" | chpasswd && adduser user0 sudo && mkdir -p /home/user0/.ssh
11 |
12 | WORKDIR /home/user0
13 |
14 | ENV APISERVER_CERT_LOCATION /home/user0/knoc-crt.pem
15 | ENV APISERVER_KEY_LOCATION /home/user0/knoc-key.pem
16 | ENV KUBELET_PORT 10250
17 |
18 | # Copy the configuration file for the knoc provider.
19 | COPY --from=builder /build/deploy/knoc-cfg.json /home/user0/knoc-cfg.json
20 | # Copy the certificate for the HTTPS server.
21 | COPY --from=builder /build/deploy/knoc-crt.pem /home/user0/knoc-crt.pem
22 | # Copy the private key for the HTTPS server.
23 | COPY --from=builder /build/deploy/knoc-key.pem /home/user0/knoc-key.pem
24 |
25 | COPY --from=builder /build/bin/virtual-kubelet /usr/local/bin/virtual-kubelet
26 | COPY --from=builder /build/bin/door /usr/local/bin/door
27 |
28 | RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
29 | RUN chmod +x kubectl && \
30 | mv ./kubectl /usr/local/bin/kubectl
31 |
32 | USER user0
33 | CMD ["/usr/local/bin/virtual-kubelet"]
34 |
--------------------------------------------------------------------------------
/INSTALL.md:
--------------------------------------------------------------------------------
1 | 1. [Install Dependencies](#install-dependencies)
2 | - [Docker](#docker)
3 | - [Minikube](#minikube)
4 | - [Helm](#helm)
5 | 2. [Deployment](#deployment)
6 | 1. [Create a single-node Kubernetes cluster with Minikube](#start-minikube)
7 | 2. [Deploy KNoC as a Kubernetes node](#deploy-knoc)
8 | 3. [Testing](#testing-our-deployment)
9 | 4. [Uninstall KNoC and Minikube](#tear-down)
10 |
11 | # Install Dependencies
12 |
13 | ## Docker
14 | Install docker following the guide [here](https://docs.docker.com/engine/install/).
15 |
16 | Next start docker service and enable the permissions needed for the user
17 | ```bash
18 | systemctl start docker
19 | sudo groupadd docker
20 | sudo usermod -aG docker $(whoami)
21 | ```
22 |
23 | Log out and log back in so that your group membership is re-evaluated.
24 | OR
25 | (if in linux) run ```newgrp docker```
26 |
27 | - you can check that docker is installed and running by running ```docker ps```
28 | - you should see NO container running and NO message related to "permission denied"
29 |
30 | ## Minikube
31 | ```bash
32 | curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
33 | sudo install minikube-linux-amd64 /usr/local/bin/minikube
34 | ```
35 |
42 |
43 | ## Helm
44 | In case you have curl installed...
45 | ```bash
46 | curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
47 | ```
48 | or alternatively..
49 | ```bash
50 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
51 | chmod 700 get_helm.sh
52 | ./get_helm.sh
53 | ```
54 |
55 | # Deployment
56 | ## Start Minikube
57 | In order to create a minikube single-node-cluster,
58 | you need to specify the minikube profile and the ip where the minikube's Kubernetes will listen to, for external requests.
59 |
60 | ### First we need to set the environment:
61 |
62 | ```bash
63 | export MINIKUBE_PROFILE=knoc
64 | export ADVERTISED_HOST=139.91.92.71
65 | export API_SERVER_PORT=8443
66 | export PROXY_API_SERVER_PORT=38080
67 | export KUBE_PROXY=${ADVERTISED_HOST}:${PROXY_API_SERVER_PORT}
68 | ```
69 |
70 | - optionally,do the following steps, in case this is not your first kubernetes configuration..
71 | ```bash
72 | # save previous kubeconfig and prepare the path for the new kubeconfig generated by minikube
73 | export LAST_KUBECONFIG="${KUBECONFIG:=/home/$(whoami)/.kube/config}"
74 | export KUBECONFIG="${HOME}/.kube/configs/${MINIKUBE_PROFILE}.yaml"
75 | ```
76 | ### Now we re are ready to start the minikube:
77 | ```bash
78 | minikube start -p ${MINIKUBE_PROFILE} --kubernetes-version=v1.19.13 --apiserver-ips=${ADVERTISED_HOST}
79 | # To point your shell to minikube's docker-daemon, run
80 | eval $(minikube -p $MINIKUBE_PROFILE docker-env)
81 |
82 | # Expose Kubernetes API server from minikube, using socat.
83 | # This is required for the argo executor that needs connection to the K8s Api server
84 | # Then socat forwards traffic from the to the ip of minikube
85 | # redirect web traffic from one server to the local
86 | socat TCP-LISTEN:${PROXY_API_SERVER_PORT},fork TCP:$(minikube -p $MINIKUBE_PROFILE ip):${API_SERVER_PORT} &
87 | ```
88 | We can use the kubernetes cli that minikube is providing:
89 | ```bash
90 | alias kubectl='minikube -p knoc kubectl --'
91 | ```
92 |
93 | if you want to have the kubectl attached to minikube's specific version you can save the command bellow to your ```~/.bashrc```
94 | ```bash
95 |
96 | echo "alias kubectl='minikube -p knoc kubectl --'" >> /home/$(whoami)/.bashrc
97 |
98 | #refresh your terminal session
99 | # i.e. in bash
100 | # . ~/.bashrc
101 | ```
102 |
103 | ## Deploy KNOC
104 |
105 | Before using kubectl or docker commands, you have to first configure the terminal session you are in, with the command below:
106 | ```bash
107 | export HELM_RELEASE=knoc
108 | export SLURM_CLUSTER_IP=139.91.92.100
109 | export SLURM_CLUSTER_USER=$(whoami)
110 | export SLURM_CLUSTER_SSH_PRIV=/home/${SLURM_CLUSTER_USER}/.ssh/id_rsa
111 | ```
112 | #################################################################
113 | #### Make sure that you can login on the remote side using your private ssh key
114 | #################################################################
115 | ```bash
116 | # Download the source code
117 | git clone git@github.com:CARV-ICS-FORTH/KNoC.git
118 | cd KNoC
119 |
120 | #build the container
121 | docker build -t malvag/knoc:latest .
122 |
123 | # setup vanilla argo in our cluster (Argo 3.0.2) that uses a slightly modified version of k8sapi-executor
124 | kubectl create ns argo
125 | kubectl apply -n argo -f deploy/argo-install.yaml
126 |
127 |
128 | # And now you can run this
129 |
130 | helm upgrade --install --debug --wait $HELM_RELEASE chart/knoc --namespace default \
131 | --set knoc.k8sApiServer=https://${KUBE_PROXY} \
132 | --set knoc.remoteSecret.address=${SLURM_CLUSTER_IP} \
133 | --set knoc.remoteSecret.user=${SLURM_CLUSTER_USER} \
134 | --set knoc.remoteSecret.kubeContext=$(kubectl config current-context) \
135 | --set knoc.remoteSecret.privkey="$(cat ${SLURM_CLUSTER_SSH_PRIV})"
136 |
137 | ```
138 |
139 | ## Testing our deployment
140 | You can test our deployment is working by submiting a sample workflow to argo:
141 | ```bash
142 | kubectl create -f examples/argo-workflow-sample.yaml
143 |
144 | -- example output:
145 | workflow.argoproj.io/steps-pzvmd created
146 | ```
147 | You can check that eveything works fine by executing the follow command after a minute or two:
148 | ```bash
149 | kubectl get pods # List all pods in the namespace
150 |
151 | NAME READY STATUS RESTARTS AGE
152 | sample-workflow-bws9f 0/2 Completed 0 6m
153 | ```
154 | You can expect to see the final state is going to be "Completed".
155 |
156 | ### Delete the sample workflow
157 | ```
158 | kubectl delete workflow $(kubectl get workflow --no-headers | cut -f1 -d' ')
159 | ```
160 |
161 | # Tear down
162 |
163 | ## Remove knoc
164 | ```bash
165 | helm uninstall --wait $HELM_RELEASE
166 | ```
167 | In case you want to clean everything from the remote side:
168 |
169 | ```bash
170 | # Clean slurm outputs and door executable
171 | rm -f slurm-*.out door
172 | # now let's clean door logs, kubernetes associated files and generated scripts
173 | rm -rf .knoc .tmp
174 | ```
175 |
176 | ## Delete minikube's profile
177 | This command deletes the whole minikube-vm that includes the Kubernetes and the Docker deployments inside the vm.
178 |
179 | ```bash
180 | minikube stop -p $MINIKUBE_PROFILE
181 | minikube delete -p $MINIKUBE_PROFILE
182 | # revert back to the old kubeconfig if you need to..
183 | export KUBECONFIG=${LAST_KUBECONFIG}
184 | ```
185 |
186 | ## Remove minikube and its data
187 | ```bash
188 | minikube stop -p $MINIKUBE_PROFILE; minikube delete -p $MINIKUBE_PROFILE
189 |
190 | # ++ optionally: if you run minikube on docker
191 | unset DOCKER_HOST
192 | unset DOCKER_TLS_VERIFY
193 | unset DOCKER_CERT_PATH
194 | docker stop (docker ps -aq)
195 | # ++
196 |
197 | rm -r ~/.kube ~/.minikube
198 | sudo rm /usr/local/bin/minikube
199 | sudo rm /usr/local/bin/helm
200 |
201 | systemctl stop '*kubelet*.mount'
202 | sudo rm -rf /etc/kubernetes/
203 |
204 | # ++ optionally: if you run minikube on docker
205 | docker system prune -af --volumes
206 | systemctl stop docker
207 | # ++
208 | ```
209 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | LINTER_BIN ?= golangci-lint
2 |
3 | GO111MODULE := on
4 | export GO111MODULE
5 | # include Makefile.e2e
6 |
7 | .PHONY: build
8 | build: clean bin/virtual-kubelet bin/door
9 |
10 | .PHONY: door_clean clean
11 |
12 | door_clean:
13 | @${RM} bin/door
14 |
15 | bin/door: BUILD_VERSION ?= $(shell git describe --tags --always --dirty="-dev")
16 | bin/door: BUILD_DATE ?= $(shell date -u '+%Y-%m-%d-%H:%M UTC')
17 | bin/door: VERSION_FLAGS := -ldflags='-X "main.buildVersion=$(BUILD_VERSION)" -X "main.buildTime=$(BUILD_DATE)"'
18 | bin/door: door/door.go door/types.go
19 | CGO_ENABLED=0 go build -ldflags '-extldflags "-static"' -o bin/door $(VERSION_FLAGS) door/door.go door/types.go
20 |
21 | .PHONY: clean
22 | clean: files := bin/virtual-kubelet
23 | clean: door_clean
24 | @${RM} $(files) &>/dev/null || exit 0
25 |
26 | .PHONY: test
27 | test:
28 | @echo running tests
29 | go test -v ./...
30 |
31 | .PHONY: vet
32 | vet:
33 | @go vet ./... #$(packages)
34 |
35 | .PHONY: lint
36 | lint:
37 | @$(LINTER_BIN) run --new-from-rev "HEAD~$(git rev-list master.. --count)" ./...
38 |
39 | .PHONY: check-mod
40 | check-mod: # verifies that module changes for go.mod and go.sum are checked in
41 | @hack/ci/check_mods.sh
42 |
43 | .PHONY: mod
44 | mod:
45 | @go mod tidy
46 |
47 | bin/virtual-kubelet: BUILD_VERSION ?= $(shell git describe --tags --always --dirty="-dev")
48 | bin/virtual-kubelet: BUILD_DATE ?= $(shell date -u '+%Y-%m-%d-%H:%M UTC')
49 | bin/virtual-kubelet: VERSION_FLAGS := -ldflags='-X "main.buildVersion=$(BUILD_VERSION)" -X "main.buildTime=$(BUILD_DATE)"'
50 |
51 | bin/%:
52 | CGO_ENABLED=0 go build -ldflags '-extldflags "-static"' -o bin/$(*) $(VERSION_FLAGS) ./cmd/$(*)
53 |
54 |
55 | # # skaffold deploys the virtual-kubelet to the Kubernetes cluster targeted by the current kubeconfig using skaffold.
56 | # # The current context (as indicated by "kubectl config current-context") must be one of "minikube" or "docker-for-desktop".
57 | # # MODE must be set to one of "dev" (default), "delete" or "run", and is used as the skaffold command to be run.
58 | # .PHONY: skaffold
59 | # skaffold: MODE ?= dev
60 | # .SECONDEXPANSION:
61 | # skaffold: skaffold/$$(MODE)
62 |
63 | # .PHONY: skaffold/%
64 | # skaffold/%: PROFILE := local
65 | # skaffold/%: skaffold.validate
66 | # skaffold $(*) \
67 | # -f $(PWD)/deploy/skaffold.yml \
68 | # -p $(PROFILE)
69 |
70 | # skaffold/run skaffold/dev: bin/virtual-kubelet
71 |
72 | # container: PROFILE := local
73 | # container: skaffold.validate
74 | # skaffold build --platform=linux/amd64 -f $(PWD)/deploy/skaffold.yml \
75 | # -p $(PROFILE)
--------------------------------------------------------------------------------
/Makefile.e2e:
--------------------------------------------------------------------------------
1 | .PHONY: skaffold.validate
2 | skaffold.validate: kubectl_context := $(shell kubectl config current-context)
3 | skaffold.validate:
4 | @if [[ ! "minikube,docker-for-desktop,docker-desktop,knoc-playground" =~ .*"$(kubectl_context)".* ]]; then \
5 | echo current-context is [$(kubectl_context)]. Must be one of [minikube,docker-for-desktop,docker-desktop]; \
6 | false; \
7 | fi
8 |
9 | # skaffold deploys the virtual-kubelet to the Kubernetes cluster targeted by the current kubeconfig using skaffold.
10 | # The current context (as indicated by "kubectl config current-context") must be one of "minikube" or "docker-for-desktop".
11 | # MODE must be set to one of "dev" (default), "delete" or "run", and is used as the skaffold command to be run.
12 | .PHONY: skaffold
13 | skaffold: MODE ?= dev
14 | .SECONDEXPANSION:
15 | skaffold: skaffold/$$(MODE)
16 |
17 | .PHONY: skaffold/%
18 | skaffold/%: PROFILE := local
19 | skaffold/%: skaffold.validate
20 | skaffold $(*) \
21 | -f $(PWD)/virtual-kubelet/skaffold.yml \
22 | -p $(PROFILE)
23 |
24 | skaffold/run skaffold/dev: bin/e2e/virtual-kubelet
25 |
26 | bin/e2e:
27 | @mkdir -p bin/e2e
28 |
29 | bin/e2e/virtual-kubelet: bin/e2e
30 | GOOS=linux GOARCH=amd64 $(MAKE) OUTPUT_DIR=$(@D) build
31 |
32 | # e2e runs the end-to-end test suite against the Kubernetes cluster targeted by the current kubeconfig.
33 | # It automatically deploys the virtual-kubelet with the knoc provider by running "make skaffold MODE=run".
34 | # It is the caller's responsibility to cleanup the deployment after running this target (e.g. by running "make skaffold MODE=delete").
35 | .PHONY: e2e
36 | e2e: KUBECONFIG ?= $(HOME)/.kube/config
37 | e2e: NAMESPACE := default
38 | e2e: NODE_NAME := vkubelet-knoc-0
39 | e2e: export VK_BUILD_TAGS += knoc_provider
40 | e2e: e2e.clean bin/e2e/virtual-kubelet skaffold/run
41 | @echo Running tests...
42 | cd $(PWD)/internal/test/e2e && go test -v -timeout 5m -tags e2e ./... \
43 | -kubeconfig=$(KUBECONFIG) \
44 | -namespace=$(NAMESPACE) \
45 | -node-name=$(NODE_NAME)
46 | @$(MAKE) e2e.clean
47 |
48 | .PHONY: e2e.clean
49 | e2e.clean: NODE_NAME ?= vkubelet-knoc-0
50 | e2e.clean: skaffold/delete
51 | kubectl delete --ignore-not-found node $(NODE_NAME); \
52 | if [ -f bin/e2e/virtual-kubelet ]; then rm bin/e2e/virtual-kubelet; fi
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # KNoC - A Kubernetes Node to manage container lifecycle on HPC clusters
2 |
8 |
9 |
10 | KNoC is a [Virtual Kubelet](https://github.com/virtual-kubelet/virtual-kubelet) Provider implementation that manages real pods and containers in a remote container runtime by supporting the lifecycle management of pods, containers and other resources in the context of Kubernetes.
11 |
12 | [Virtual Kubelet](https://github.com/virtual-kubelet/virtual-kubelet) is an open source [Kubernetes](https://kubernetes.io/) kubelet implementation that masquerades as a kubelet for the purposes of connecting Kubernetes to other APIs.
13 |
14 | Remote environments include [Singularity](https://sylabs.io/singularity/) container runtime utilizing [Slurm's](https://slurm.schedmd.com/) resource management and job scheduling
15 |
16 | ## Features
17 | - Create, delete and update pods
18 | - Container logs and exec
19 | - Get pod, pods and pod status
20 | - Support for EmptyDirs, Secrets and ConfigMaps
21 |
22 | 
23 |
24 | ## Installation
25 | You can find all relative information in [INSTALL](https://github.com/CARV-ICS-FORTH/KNoC/blob/master/INSTALL.md).
26 |
27 | ## Acknowledgements
28 |
29 | We thankfully acknowledge the support of the European Commission and the Greek General Secretariat for Research and Innovation under the European High-Performance Computing Joint Undertaking (JU) through projects EUROCC (GA-951732), DEEP-SEA (GA-955606), and EUPEX (GA-101033975). The JU receives support from the European Union's Horizon 2020 research and innovation programme and France, Germany, Italy, Greece, United Kingdom, the Czech Republic, and Croatia. National contributions from the involved state members (including the Greek General Secretariat for Research and Innovation) match the JU's funding.
30 |
--------------------------------------------------------------------------------
/chart/knoc/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/chart/knoc/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: KNoC
3 | description: A KNoC chart for Kubernetes
4 |
5 | type: application
6 |
7 | # This is the chart version. This version number should be incremented each time you make changes
8 | # to the chart and its templates, including the app version.
9 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
10 | version: 0.1.0
11 |
12 | # This is the version number of the application being deployed. This version number should be
13 | # incremented each time you make changes to the application. Versions are not expected to
14 | # follow Semantic Versioning. They should reflect the version the application is using.
15 | # It is recommended to use it with quotes.
16 | appVersion: "0.9.4"
17 |
--------------------------------------------------------------------------------
/chart/knoc/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "chart.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "chart.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "chart.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "chart.labels" -}}
37 | helm.sh/chart: {{ include "chart.chart" . }}
38 | {{ include "chart.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "chart.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "chart.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "chart.serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "chart.fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
--------------------------------------------------------------------------------
/chart/knoc/templates/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: {{ .Release.Name }}-vkubelet
5 | spec:
6 | containers:
7 | - name: vk-{{ .Release.Name }}
8 | image: {{.Values.image.repository }}
9 | imagePullPolicy: {{.Values.image.pullPolicy}}
10 | args:
11 | - virtual-kubelet
12 | - --nodename
13 | - vk-knoc
14 | - --provider
15 | - knoc
16 | - --provider-config
17 | - /home/user0/knoc-cfg.json
18 | - --startup-timeout
19 | - 10s
20 | - --klog.v
21 | - "2"
22 | - --klog.logtostderr
23 | - --log-level
24 | - {{ .Values.knoc.logLevel }}
25 | volumeMounts:
26 | - name: kubeconfig-setup
27 | mountPath: /home/user0/scripts/
28 | env:
29 | - name: KUBELET_PORT
30 | value: "10250"
31 | - name: VKUBELET_POD_IP
32 | valueFrom:
33 | fieldRef:
34 | fieldPath: status.podIP
35 | - name: REMOTE_USER
36 | valueFrom:
37 | secretKeyRef:
38 | name: {{ .Release.Name }}-remote-secret
39 | key: remote_user
40 | - name: REMOTE_KEY
41 | valueFrom:
42 | secretKeyRef:
43 | name: {{ .Release.Name }}-remote-secret
44 | key: ssh-privatekey
45 | - name: REMOTE_HOST
46 | valueFrom:
47 | secretKeyRef:
48 | name: {{ .Release.Name }}-remote-secret
49 | key: host
50 | - name: REMOTE_PORT
51 | valueFrom:
52 | secretKeyRef:
53 | name: {{ .Release.Name }}-remote-secret
54 | key: port
55 | - name: KUBE_CURRENT_CONTEXT
56 | valueFrom:
57 | secretKeyRef:
58 | name: {{ .Release.Name }}-remote-secret
59 | key: kube_context
60 | {{- if .Values.knoc.enabledMetrics }}
61 | ports:
62 | - name: metrics
63 | containerPort: {{- if .Values.metrics_port }} {{ .Values.metrics_port }} {{- else }} 10255 {{- end }}
64 | readinessProbe:
65 | httpGet:
66 | path: /stats/summary
67 | port: metrics
68 | {{- end }}
69 | serviceAccountName: knoc
70 | volumes:
71 | - name: kubeconfig-setup
72 | configMap:
73 | name: setup-kubeconfig
74 | defaultMode: 0777
75 |
--------------------------------------------------------------------------------
/chart/knoc/templates/remote-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: {{ .Release.Name }}-remote-secret
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | release: {{ .Release.Name }}
8 | type: "Opaque"
9 | immutable: true
10 | data:
11 | host: {{ .Values.knoc.remoteSecret.address | b64enc }}
12 | port: {{ .Values.knoc.remoteSecret.port | b64enc }}
13 | remote_user: {{ .Values.knoc.remoteSecret.user | b64enc }}
14 | ssh-privatekey: {{ .Values.knoc.remoteSecret.privkey | b64enc }}
15 | kube_context: {{ .Values.knoc.remoteSecret.kubeContext | b64enc }}
--------------------------------------------------------------------------------
/chart/knoc/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: {{ .Values.serviceAccount.name }}
5 | labels:
6 | {{- include "chart.labels" . | nindent 4 }}
7 | {{- with .Values.serviceAccount.annotations }}
8 | annotations:
9 | {{- toYaml . | nindent 4 }}
10 | {{- end }}
11 | ---
12 | apiVersion: rbac.authorization.k8s.io/v1
13 | {{- if .Values.singleNamespace }}
14 | kind: Role
15 | {{ else }}
16 | kind: ClusterRole
17 | {{- end }}
18 | metadata:
19 | name: {{ .Release.Name }}-role
20 | rules:
21 | - apiGroups:
22 | - ""
23 | resources:
24 | - configmaps
25 | - secrets
26 | - services
27 | - serviceaccounts
28 | verbs:
29 | - get
30 | - list
31 | - watch
32 | - apiGroups:
33 | - ""
34 | resources:
35 | - pods
36 | verbs:
37 | - delete
38 | - get
39 | - list
40 | - watch
41 | - patch
42 | - apiGroups:
43 | - ""
44 | resources:
45 | - nodes
46 | verbs:
47 | - create
48 | - get
49 | - apiGroups:
50 | - ""
51 | resources:
52 | - nodes/status
53 | verbs:
54 | - update
55 | - patch
56 | - apiGroups:
57 | - ""
58 | resources:
59 | - pods/status
60 | verbs:
61 | - update
62 | - patch
63 | - apiGroups:
64 | - ""
65 | resources:
66 | - events
67 | verbs:
68 | - create
69 | - patch
70 | ---
71 | apiVersion: rbac.authorization.k8s.io/v1
72 | {{- if .Values.singleNamespace }}
73 | kind: RoleBinding
74 | {{ else }}
75 | kind: ClusterRoleBinding
76 | {{- end }}
77 | metadata:
78 | name: {{ .Release.Name }}-rolebinding
79 | subjects:
80 | - kind: ServiceAccount
81 | name: {{ .Values.serviceAccount.name }}
82 | namespace: {{ .Release.Namespace }}
83 | roleRef:
84 | apiGroup: rbac.authorization.k8s.io
85 | {{- if .Values.singleNamespace }}
86 | kind: Role
87 | {{ else }}
88 | kind: ClusterRole
89 | {{- end }}
90 | name: {{ .Release.Name }}-role
91 |
--------------------------------------------------------------------------------
/chart/knoc/templates/setup_kubeconfig.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: setup-kubeconfig
5 | data:
6 | prepare_kubeconfig.sh: |
7 | #!/bin/bash
8 | SERVICE_ACCOUNT={{ .Values.serviceAccount.name }}
9 | USER_TOKEN_NAME=$(kubectl -n {{ .Release.Namespace }} get serviceaccount ${SERVICE_ACCOUNT} -o=jsonpath='{.secrets[0].name}')
10 | USER_TOKEN_VALUE=$(kubectl -n {{ .Release.Namespace }} get secret/${USER_TOKEN_NAME} -o=go-template='{{ "{{.data.token}}" }}' | base64 --decode)
11 | # CURRENT_CONTEXT=$(kubectl config current-context)
12 | CURRENT_CONTEXT=$KUBE_CURRENT_CONTEXT
13 | CLUSTER_CA=`kubectl get secret/$USER_TOKEN_NAME -n {{ .Release.Namespace }} -o jsonpath='{.data.ca\.crt}'`
14 | CLUSTER_SERVER={{ .Values.knoc.k8sApiServer }}
15 | echo "
16 | apiVersion: v1
17 | kind: Config
18 | current-context: ${CURRENT_CONTEXT}
19 | contexts:
20 | - name: ${CURRENT_CONTEXT}
21 | context:
22 | cluster: ${CURRENT_CONTEXT}
23 | user: default
24 | namespace: {{ .Release.Namespace }}
25 | clusters:
26 | - name: ${CURRENT_CONTEXT}
27 | cluster:
28 | certificate-authority-data: ${CLUSTER_CA}
29 | server: ${CLUSTER_SERVER}
30 | users:
31 | - name: default
32 | user:
33 | token: ${USER_TOKEN_VALUE}
34 | "
--------------------------------------------------------------------------------
/chart/knoc/values.yaml:
--------------------------------------------------------------------------------
1 | replicaCount: 1
2 |
3 | image:
4 | repository: malvag/knoc
5 | pullPolicy: IfNotPresent
6 | tag: ""
7 |
8 |
9 | knoc: {
10 | enabledMetrics: false,
11 | k8sApiServer: "",
12 | logLevel: debug,
13 | remoteSecret: {
14 | address: "",
15 | port: "22",
16 | user: "",
17 | privkey: "",
18 | kubeContext: ""
19 | }
20 | }
21 |
22 | imagePullSecrets: []
23 | nameOverride: ""
24 | fullnameOverride: ""
25 |
26 | singleNamespace: false
27 |
28 | serviceAccount:
29 | create: true
30 | annotations: {}
31 | name: "knoc"
32 |
33 | # podAnnotations: {}
34 |
35 | # podSecurityContext: {}
36 | # # fsGroup: 2000
37 |
38 |
39 | # nodeSelector: {}
40 |
41 | # tolerations: []
42 |
43 | # affinity: {}
44 |
--------------------------------------------------------------------------------
/cmd/virtual-kubelet/main.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2021 FORTH-ICS
2 | // Copyright © 2017 The virtual-kubelet authors
3 | //
4 | // Licensed under the Apache License, Version 2.0 (the "License");
5 | // you may not use this file except in compliance with the License.
6 | // You may obtain a copy of the License at
7 | //
8 | // http://www.apache.org/licenses/LICENSE-2.0
9 | //
10 | // Unless required by applicable law or agreed to in writing, software
11 | // distributed under the License is distributed on an "AS IS" BASIS,
12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | // See the License for the specific language governing permissions and
14 | // limitations under the License.
15 |
16 | package main
17 |
18 | import (
19 | "context"
20 | "strings"
21 |
22 | "github.com/CARV-ICS-FORTH/knoc"
23 | "github.com/sirupsen/logrus"
24 | cli "github.com/virtual-kubelet/node-cli"
25 | logruscli "github.com/virtual-kubelet/node-cli/logrus"
26 | opencensuscli "github.com/virtual-kubelet/node-cli/opencensus"
27 | "github.com/virtual-kubelet/node-cli/opts"
28 | "github.com/virtual-kubelet/node-cli/provider"
29 | "github.com/virtual-kubelet/virtual-kubelet/log"
30 | logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus"
31 | "github.com/virtual-kubelet/virtual-kubelet/trace"
32 | "github.com/virtual-kubelet/virtual-kubelet/trace/opencensus"
33 | )
34 |
35 | var (
36 | buildVersion = "N/A"
37 | buildTime = "N/A"
38 | k8sVersion = "v1.15.2" // This should follow the version of k8s.io/kubernetes we are importing
39 | )
40 |
41 | func main() {
42 | ctx, cancel := context.WithCancel(context.Background())
43 | defer cancel()
44 | ctx = cli.ContextWithCancelOnSignal(ctx)
45 |
46 | logger := logrus.StandardLogger()
47 | log.L = logruslogger.FromLogrus(logrus.NewEntry(logger))
48 | logConfig := &logruscli.Config{LogLevel: "info"}
49 |
50 | trace.T = opencensus.Adapter{}
51 | traceConfig := opencensuscli.Config{
52 | AvailableExporters: map[string]opencensuscli.ExporterInitFunc{
53 | "ocagent": initOCAgent,
54 | },
55 | }
56 |
57 | o := opts.New()
58 | o.Provider = "knoc"
59 | o.Version = strings.Join([]string{k8sVersion, "vk-knoc", buildVersion}, "-")
60 | node, err := cli.New(ctx,
61 | cli.WithBaseOpts(o),
62 | cli.WithCLIVersion(buildVersion, buildTime),
63 | cli.WithProvider("knoc", func(cfg provider.InitConfig) (provider.Provider, error) {
64 | return knoc.NewProvider(cfg.ConfigPath, cfg.NodeName, cfg.OperatingSystem, cfg.InternalIP, cfg.ResourceManager, cfg.DaemonPort)
65 | }),
66 | cli.WithPersistentFlags(logConfig.FlagSet()),
67 | cli.WithPersistentPreRunCallback(func() error {
68 | return logruscli.Configure(logConfig, logger)
69 | }),
70 | cli.WithPersistentFlags(traceConfig.FlagSet()),
71 | cli.WithPersistentPreRunCallback(func() error {
72 | return opencensuscli.Configure(ctx, &traceConfig, o)
73 | }),
74 | )
75 | if err != nil {
76 | log.G(ctx).Fatal(err)
77 | }
78 | if err := node.Run(); err != nil {
79 | log.G(ctx).Fatal(err)
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/cmd/virtual-kubelet/ocagent.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2021 FORTH-ICS
2 | // Copyright © 2017 The virtual-kubelet authors
3 | //
4 | // Licensed under the Apache License, Version 2.0 (the "License");
5 | // you may not use this file except in compliance with the License.
6 | // You may obtain a copy of the License at
7 | //
8 | // http://www.apache.org/licenses/LICENSE-2.0
9 | //
10 | // Unless required by applicable law or agreed to in writing, software
11 | // distributed under the License is distributed on an "AS IS" BASIS,
12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | // See the License for the specific language governing permissions and
14 | // limitations under the License.
15 |
16 | package main
17 |
18 | import (
19 | "os"
20 |
21 | "contrib.go.opencensus.io/exporter/ocagent"
22 | opencensuscli "github.com/virtual-kubelet/node-cli/opencensus"
23 | "github.com/virtual-kubelet/virtual-kubelet/errdefs"
24 | "go.opencensus.io/trace"
25 | )
26 |
27 | func initOCAgent(c *opencensuscli.Config) (trace.Exporter, error) {
28 | agentOpts := append([]ocagent.ExporterOption{}, ocagent.WithServiceName(c.ServiceName))
29 |
30 | if endpoint := os.Getenv("OCAGENT_ENDPOINT"); endpoint != "" {
31 | agentOpts = append(agentOpts, ocagent.WithAddress(endpoint))
32 | } else {
33 | return nil, errdefs.InvalidInput("must set endpoint address in OCAGENT_ENDPOINT")
34 | }
35 |
36 | switch os.Getenv("OCAGENT_INSECURE") {
37 | case "0", "no", "n", "off", "":
38 | case "1", "yes", "y", "on":
39 | agentOpts = append(agentOpts, ocagent.WithInsecure())
40 | default:
41 | return nil, errdefs.InvalidInput("invalid value for OCAGENT_INSECURE")
42 | }
43 |
44 | return ocagent.NewExporter(agentOpts...)
45 | }
46 |
--------------------------------------------------------------------------------
/common/common.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2021 FORTH-ICS
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.package common
14 |
15 | package common
16 |
17 | import (
18 | "time"
19 |
20 | "github.com/CARV-ICS-FORTH/knoc/internal/manager"
21 | v1 "k8s.io/api/core/v1"
22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
23 | )
24 |
25 | const (
26 | // Provider configuration defaults.
27 | DefaultCPUCapacity = "20"
28 | DefaultMemoryCapacity = "100Gi"
29 | DefaultPodCapacity = "20"
30 |
31 | // Values used in tracing as attribute keys.
32 | NamespaceKey = "namespace"
33 | NameKey = "name"
34 | ContainerNameKey = "containerName"
35 | PodVolRoot = ".knoc/"
36 | PodSecretVolPerms = 0755
37 | PodSecretVolDir = "/secrets"
38 | PodSecretFilePerms = 0644
39 | PodConfigMapVolPerms = 0755
40 | PodConfigMapVolDir = "/configmaps"
41 | PodConfigMapFilePerms = 0644
42 | PodDownwardApiVolPerms = 0755
43 | PodDownwardApiVolDir = "/downwardapis"
44 | PodDownwardApiFilePerms = 0644
45 | CREATE = 0
46 | DELETE = 1
47 | )
48 |
49 | type KNOCProvider struct { // nolint:golint
50 | NodeName string
51 | OperatingSystem string
52 | InternalIP string
53 | DaemonEndpointPort int32
54 | Pods map[string]*v1.Pod
55 | Config KNOCConfig
56 | StartTime time.Time
57 | ResourceManager *manager.ResourceManager
58 | Notifier func(*v1.Pod)
59 | }
60 | type KNOCConfig struct { // nolint:golint
61 | CPU string `json:"cpu,omitempty"`
62 | Memory string `json:"memory,omitempty"`
63 | Pods string `json:"pods,omitempty"`
64 | }
65 |
66 | type DoorContainer struct {
67 | Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
68 | Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
69 | Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
70 | Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
71 | WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
72 | Ports []v1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
73 | EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
74 | Env []v1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
75 | Resources v1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
76 | VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
77 | VolumeDevices []v1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
78 | Metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
79 | }
80 |
--------------------------------------------------------------------------------
/common/utils.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2021 FORTH-ICS
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.package common
14 |
15 | package common
16 |
17 | import (
18 | "fmt"
19 | "io"
20 | "io/fs"
21 | "os"
22 | "strings"
23 |
24 | "github.com/pkg/sftp"
25 | "github.com/sfreiberg/simplessh"
26 | v1 "k8s.io/api/core/v1"
27 | )
28 |
29 | func UploadData(client *simplessh.Client, data []byte, remote string, mode fs.FileMode) error {
30 | c, err := sftp.NewClient(client.SSHClient)
31 | if err != nil {
32 | fmt.Println("Could not connect over sftp on the remote system ")
33 | return err
34 | }
35 | defer c.Close()
36 |
37 | remoteFile, err := c.Create(remote)
38 | if err != nil {
39 | fmt.Println("Could not create file over sftp on the remote system ")
40 | return err
41 | }
42 |
43 | _, err = remoteFile.Write(data)
44 |
45 | if err != nil {
46 | fmt.Println("Could not write content on the remote system ")
47 | return err
48 | }
49 | err = c.Chmod(remote, mode)
50 | if err != nil {
51 | return err
52 | }
53 | return nil
54 | }
55 |
56 | func UploadFile(client *simplessh.Client, local string, remote string, mode fs.FileMode) error {
57 | c, err := sftp.NewClient(client.SSHClient)
58 | if err != nil {
59 | fmt.Println("Could not connect over sftp on the remote system ")
60 | return err
61 | }
62 | defer c.Close()
63 |
64 | localFile, err := os.Open(local)
65 | if err != nil {
66 | fmt.Println("Could not open local file in path: " + local)
67 | return err
68 | }
69 | defer localFile.Close()
70 |
71 | remoteFile, err := c.Create(remote)
72 | if err != nil {
73 | fmt.Println("Could not create file over sftp on the remote system ")
74 | return err
75 | }
76 |
77 | _, err = io.Copy(remoteFile, localFile)
78 | if err != nil {
79 | fmt.Println("Could not copy file on the remote system: ")
80 | return err
81 | }
82 | err = c.Chmod(remote, mode)
83 | if err != nil {
84 | return err
85 | }
86 | return nil
87 | }
88 |
89 | func NormalizeImageName(instance_name string) string {
90 | instances_str := strings.Split(string(instance_name), "/")
91 | final_name := ""
92 | first_iter := true
93 | for _, strings := range instances_str {
94 | if first_iter {
95 | final_name = strings
96 | first_iter = false
97 | continue
98 | }
99 | final_name = final_name + "-" + strings
100 | }
101 | without_version_stamp := strings.Split(final_name, ":")
102 | return without_version_stamp[0]
103 | }
104 |
105 | func BuildKeyFromNames(namespace string, name string) (string, error) {
106 | return fmt.Sprintf("%s-%s", namespace, name), nil
107 | }
108 |
109 | // buildKey is a helper for building the "key" for the providers pod store.
110 | func BuildKey(pod *v1.Pod) (string, error) {
111 | if pod.ObjectMeta.Namespace == "" {
112 | return "", fmt.Errorf("pod namespace not found")
113 | }
114 |
115 | if pod.ObjectMeta.Name == "" {
116 | return "", fmt.Errorf("pod name not found")
117 | }
118 |
119 | return BuildKeyFromNames(pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
120 | }
121 |
--------------------------------------------------------------------------------
/deploy/argo-install.yaml:
--------------------------------------------------------------------------------
1 | # This is an auto-generated file. DO NOT EDIT
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: clusterworkflowtemplates.argoproj.io
6 | spec:
7 | group: argoproj.io
8 | names:
9 | kind: ClusterWorkflowTemplate
10 | listKind: ClusterWorkflowTemplateList
11 | plural: clusterworkflowtemplates
12 | shortNames:
13 | - clusterwftmpl
14 | - cwft
15 | singular: clusterworkflowtemplate
16 | scope: Cluster
17 | versions:
18 | - name: v1alpha1
19 | schema:
20 | openAPIV3Schema:
21 | properties:
22 | apiVersion:
23 | type: string
24 | kind:
25 | type: string
26 | metadata:
27 | type: object
28 | spec:
29 | type: object
30 | x-kubernetes-preserve-unknown-fields: true
31 | required:
32 | - metadata
33 | - spec
34 | type: object
35 | served: true
36 | storage: true
37 | ---
38 | apiVersion: apiextensions.k8s.io/v1
39 | kind: CustomResourceDefinition
40 | metadata:
41 | name: cronworkflows.argoproj.io
42 | spec:
43 | group: argoproj.io
44 | names:
45 | kind: CronWorkflow
46 | listKind: CronWorkflowList
47 | plural: cronworkflows
48 | shortNames:
49 | - cwf
50 | - cronwf
51 | singular: cronworkflow
52 | scope: Namespaced
53 | versions:
54 | - name: v1alpha1
55 | schema:
56 | openAPIV3Schema:
57 | properties:
58 | apiVersion:
59 | type: string
60 | kind:
61 | type: string
62 | metadata:
63 | type: object
64 | spec:
65 | type: object
66 | x-kubernetes-preserve-unknown-fields: true
67 | status:
68 | type: object
69 | x-kubernetes-preserve-unknown-fields: true
70 | required:
71 | - metadata
72 | - spec
73 | type: object
74 | served: true
75 | storage: true
76 | ---
77 | apiVersion: apiextensions.k8s.io/v1
78 | kind: CustomResourceDefinition
79 | metadata:
80 | name: workfloweventbindings.argoproj.io
81 | spec:
82 | group: argoproj.io
83 | names:
84 | kind: WorkflowEventBinding
85 | listKind: WorkflowEventBindingList
86 | plural: workfloweventbindings
87 | shortNames:
88 | - wfeb
89 | singular: workfloweventbinding
90 | scope: Namespaced
91 | versions:
92 | - name: v1alpha1
93 | schema:
94 | openAPIV3Schema:
95 | properties:
96 | apiVersion:
97 | type: string
98 | kind:
99 | type: string
100 | metadata:
101 | type: object
102 | spec:
103 | type: object
104 | x-kubernetes-preserve-unknown-fields: true
105 | required:
106 | - metadata
107 | - spec
108 | type: object
109 | served: true
110 | storage: true
111 | ---
112 | apiVersion: apiextensions.k8s.io/v1
113 | kind: CustomResourceDefinition
114 | metadata:
115 | name: workflows.argoproj.io
116 | spec:
117 | group: argoproj.io
118 | names:
119 | kind: Workflow
120 | listKind: WorkflowList
121 | plural: workflows
122 | shortNames:
123 | - wf
124 | singular: workflow
125 | scope: Namespaced
126 | versions:
127 | - additionalPrinterColumns:
128 | - description: Status of the workflow
129 | jsonPath: .status.phase
130 | name: Status
131 | type: string
132 | - description: When the workflow was started
133 | format: date-time
134 | jsonPath: .status.startedAt
135 | name: Age
136 | type: date
137 | name: v1alpha1
138 | schema:
139 | openAPIV3Schema:
140 | properties:
141 | apiVersion:
142 | type: string
143 | kind:
144 | type: string
145 | metadata:
146 | type: object
147 | spec:
148 | type: object
149 | x-kubernetes-preserve-unknown-fields: true
150 | status:
151 | type: object
152 | x-kubernetes-preserve-unknown-fields: true
153 | required:
154 | - metadata
155 | - spec
156 | type: object
157 | served: true
158 | storage: true
159 | subresources: {}
160 | ---
161 | apiVersion: apiextensions.k8s.io/v1
162 | kind: CustomResourceDefinition
163 | metadata:
164 | name: workflowtemplates.argoproj.io
165 | spec:
166 | group: argoproj.io
167 | names:
168 | kind: WorkflowTemplate
169 | listKind: WorkflowTemplateList
170 | plural: workflowtemplates
171 | shortNames:
172 | - wftmpl
173 | singular: workflowtemplate
174 | scope: Namespaced
175 | versions:
176 | - name: v1alpha1
177 | schema:
178 | openAPIV3Schema:
179 | properties:
180 | apiVersion:
181 | type: string
182 | kind:
183 | type: string
184 | metadata:
185 | type: object
186 | spec:
187 | type: object
188 | x-kubernetes-preserve-unknown-fields: true
189 | required:
190 | - metadata
191 | - spec
192 | type: object
193 | served: true
194 | storage: true
195 | ---
196 | apiVersion: v1
197 | kind: ServiceAccount
198 | metadata:
199 | name: argo
200 | ---
201 | apiVersion: v1
202 | kind: ServiceAccount
203 | metadata:
204 | name: argo-server
205 | ---
206 | apiVersion: rbac.authorization.k8s.io/v1
207 | kind: Role
208 | metadata:
209 | name: argo-role
210 | rules:
211 | - apiGroups:
212 | - coordination.k8s.io
213 | resources:
214 | - leases
215 | verbs:
216 | - create
217 | - get
218 | - update
219 | - apiGroups:
220 | - ""
221 | resources:
222 | - secrets
223 | verbs:
224 | - get
225 | ---
226 | apiVersion: rbac.authorization.k8s.io/v1
227 | kind: ClusterRole
228 | metadata:
229 | labels:
230 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
231 | name: argo-aggregate-to-admin
232 | rules:
233 | - apiGroups:
234 | - argoproj.io
235 | resources:
236 | - workflows
237 | - workflows/finalizers
238 | - workfloweventbindings
239 | - workfloweventbindings/finalizers
240 | - workflowtemplates
241 | - workflowtemplates/finalizers
242 | - cronworkflows
243 | - cronworkflows/finalizers
244 | - clusterworkflowtemplates
245 | - clusterworkflowtemplates/finalizers
246 | verbs:
247 | - create
248 | - delete
249 | - deletecollection
250 | - get
251 | - list
252 | - patch
253 | - update
254 | - watch
255 | ---
256 | apiVersion: rbac.authorization.k8s.io/v1
257 | kind: ClusterRole
258 | metadata:
259 | labels:
260 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
261 | name: argo-aggregate-to-edit
262 | rules:
263 | - apiGroups:
264 | - argoproj.io
265 | resources:
266 | - workflows
267 | - workflows/finalizers
268 | - workfloweventbindings
269 | - workfloweventbindings/finalizers
270 | - workflowtemplates
271 | - workflowtemplates/finalizers
272 | - cronworkflows
273 | - cronworkflows/finalizers
274 | - clusterworkflowtemplates
275 | - clusterworkflowtemplates/finalizers
276 | verbs:
277 | - create
278 | - delete
279 | - deletecollection
280 | - get
281 | - list
282 | - patch
283 | - update
284 | - watch
285 | ---
286 | apiVersion: rbac.authorization.k8s.io/v1
287 | kind: ClusterRole
288 | metadata:
289 | labels:
290 | rbac.authorization.k8s.io/aggregate-to-view: "true"
291 | name: argo-aggregate-to-view
292 | rules:
293 | - apiGroups:
294 | - argoproj.io
295 | resources:
296 | - workflows
297 | - workflows/finalizers
298 | - workfloweventbindings
299 | - workfloweventbindings/finalizers
300 | - workflowtemplates
301 | - workflowtemplates/finalizers
302 | - cronworkflows
303 | - cronworkflows/finalizers
304 | - clusterworkflowtemplates
305 | - clusterworkflowtemplates/finalizers
306 | verbs:
307 | - get
308 | - list
309 | - watch
310 | ---
311 | apiVersion: rbac.authorization.k8s.io/v1
312 | kind: ClusterRole
313 | metadata:
314 | name: argo-cluster-role
315 | rules:
316 | - apiGroups:
317 | - ""
318 | resources:
319 | - pods
320 | - pods/exec
321 | verbs:
322 | - create
323 | - get
324 | - list
325 | - watch
326 | - update
327 | - patch
328 | - delete
329 | - apiGroups:
330 | - ""
331 | resources:
332 | - configmaps
333 | verbs:
334 | - get
335 | - watch
336 | - list
337 | - apiGroups:
338 | - ""
339 | resources:
340 | - persistentvolumeclaims
341 | verbs:
342 | - create
343 | - delete
344 | - get
345 | - apiGroups:
346 | - argoproj.io
347 | resources:
348 | - workflows
349 | - workflows/finalizers
350 | verbs:
351 | - get
352 | - list
353 | - watch
354 | - update
355 | - patch
356 | - delete
357 | - create
358 | - apiGroups:
359 | - argoproj.io
360 | resources:
361 | - workflowtemplates
362 | - workflowtemplates/finalizers
363 | - clusterworkflowtemplates
364 | - clusterworkflowtemplates/finalizers
365 | verbs:
366 | - get
367 | - list
368 | - watch
369 | - apiGroups:
370 | - ""
371 | resources:
372 | - serviceaccounts
373 | verbs:
374 | - get
375 | - list
376 | - apiGroups:
377 | - argoproj.io
378 | resources:
379 | - cronworkflows
380 | - cronworkflows/finalizers
381 | verbs:
382 | - get
383 | - list
384 | - watch
385 | - update
386 | - patch
387 | - delete
388 | - apiGroups:
389 | - ""
390 | resources:
391 | - events
392 | verbs:
393 | - create
394 | - patch
395 | - apiGroups:
396 | - policy
397 | resources:
398 | - poddisruptionbudgets
399 | verbs:
400 | - create
401 | - get
402 | - delete
403 | ---
404 | apiVersion: rbac.authorization.k8s.io/v1
405 | kind: ClusterRole
406 | metadata:
407 | name: argo-server-cluster-role
408 | rules:
409 | - apiGroups:
410 | - ""
411 | resources:
412 | - configmaps
413 | verbs:
414 | - get
415 | - watch
416 | - list
417 | - apiGroups:
418 | - ""
419 | resources:
420 | - secrets
421 | verbs:
422 | - get
423 | - create
424 | - apiGroups:
425 | - ""
426 | resources:
427 | - pods
428 | - pods/exec
429 | - pods/log
430 | verbs:
431 | - get
432 | - list
433 | - watch
434 | - delete
435 | - apiGroups:
436 | - ""
437 | resources:
438 | - events
439 | verbs:
440 | - watch
441 | - create
442 | - patch
443 | - apiGroups:
444 | - ""
445 | resources:
446 | - serviceaccounts
447 | verbs:
448 | - get
449 | - list
450 | - apiGroups:
451 | - argoproj.io
452 | resources:
453 | - eventsources
454 | - sensors
455 | - workflows
456 | - workfloweventbindings
457 | - workflowtemplates
458 | - cronworkflows
459 | - clusterworkflowtemplates
460 | verbs:
461 | - create
462 | - get
463 | - list
464 | - watch
465 | - update
466 | - patch
467 | - delete
468 | ---
469 | apiVersion: rbac.authorization.k8s.io/v1
470 | kind: RoleBinding
471 | metadata:
472 | name: argo-binding
473 | roleRef:
474 | apiGroup: rbac.authorization.k8s.io
475 | kind: Role
476 | name: argo-role
477 | subjects:
478 | - kind: ServiceAccount
479 | name: argo
480 | ---
481 | apiVersion: rbac.authorization.k8s.io/v1
482 | kind: ClusterRoleBinding
483 | metadata:
484 | name: argo-binding
485 | roleRef:
486 | apiGroup: rbac.authorization.k8s.io
487 | kind: ClusterRole
488 | name: argo-cluster-role
489 | subjects:
490 | - kind: ServiceAccount
491 | name: argo
492 | namespace: argo
493 | ---
494 | apiVersion: rbac.authorization.k8s.io/v1
495 | kind: ClusterRoleBinding
496 | metadata:
497 | name: argo-server-binding
498 | roleRef:
499 | apiGroup: rbac.authorization.k8s.io
500 | kind: ClusterRole
501 | name: argo-server-cluster-role
502 | subjects:
503 | - kind: ServiceAccount
504 | name: argo-server
505 | namespace: argo
506 | ---
507 | apiVersion: v1
508 | kind: ConfigMap
509 | metadata:
510 | name: workflow-controller-configmap
511 | data:
512 | containerRuntimeExecutor: k8sapi
513 | downwardAPIUnavailable: 'true'
514 | ---
515 | apiVersion: v1
516 | kind: Service
517 | metadata:
518 | name: argo-server
519 | spec:
520 | type: NodePort
521 | ports:
522 | - name: web
523 | port: 2746
524 | nodePort: 32746
525 | targetPort: 2746
526 | selector:
527 | app: argo-server
528 | ---
529 | apiVersion: v1
530 | kind: Service
531 | metadata:
532 | name: workflow-controller-metrics
533 | spec:
534 | ports:
535 | - name: metrics
536 | port: 9090
537 | protocol: TCP
538 | targetPort: 9090
539 | selector:
540 | app: workflow-controller
541 | ---
542 | apiVersion: apps/v1
543 | kind: Deployment
544 | metadata:
545 | name: argo-server
546 | spec:
547 | selector:
548 | matchLabels:
549 | app: argo-server
550 | template:
551 | metadata:
552 | labels:
553 | app: argo-server
554 | spec:
555 | containers:
556 | - args:
557 | - server
558 | - --auth-mode=server
559 | image: quay.io/argoproj/argocli:v3.0.4
560 | name: argo-server
561 | ports:
562 | - containerPort: 2746
563 | name: web
564 | readinessProbe:
565 | httpGet:
566 | path: /
567 | port: 2746
568 | scheme: HTTPS
569 | initialDelaySeconds: 10
570 | periodSeconds: 20
571 | securityContext:
572 | capabilities:
573 | drop:
574 | - ALL
575 | volumeMounts:
576 | - mountPath: /tmp
577 | name: tmp
578 | nodeSelector:
579 | kubernetes.io/os: linux
580 | securityContext:
581 | runAsNonRoot: true
582 | serviceAccountName: argo-server
583 | volumes:
584 | - emptyDir: {}
585 | name: tmp
586 | ---
587 | apiVersion: apps/v1
588 | kind: Deployment
589 | metadata:
590 | name: workflow-controller
591 | spec:
592 | selector:
593 | matchLabels:
594 | app: workflow-controller
595 | template:
596 | metadata:
597 | labels:
598 | app: workflow-controller
599 | spec:
600 | containers:
601 | - args:
602 | - --configmap
603 | - workflow-controller-configmap
604 | - --executor-image
605 | - carvicsforth/argoexec:v3.0.4-custom
606 | command:
607 | - workflow-controller
608 | env:
609 | - name: ARGO_DOWNWARD_API_UNAVAILABLE
610 | - name: LEADER_ELECTION_IDENTITY
611 | valueFrom:
612 | fieldRef:
613 | apiVersion: v1
614 | fieldPath: metadata.name
615 | image: carvicsforth/workflow-controller:v3.0.4-custom
616 | livenessProbe:
617 | httpGet:
618 | path: /metrics
619 | port: metrics
620 | initialDelaySeconds: 30
621 | periodSeconds: 30
622 | name: workflow-controller
623 | ports:
624 | - containerPort: 9090
625 | name: metrics
626 | securityContext:
627 | capabilities:
628 | drop:
629 | - ALL
630 | nodeSelector:
631 | kubernetes.io/os: linux
632 | securityContext:
633 | runAsNonRoot: true
634 | serviceAccountName: argo
635 |
--------------------------------------------------------------------------------
/deploy/base.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: knoc
5 | ---
6 | apiVersion: rbac.authorization.k8s.io/v1
7 | kind: ClusterRole
8 | metadata:
9 | name: knoc
10 | rules:
11 | - apiGroups:
12 | - ""
13 | resources:
14 | - configmaps
15 | - secrets
16 | - services
17 | - serviceaccounts
18 | verbs:
19 | - get
20 | - list
21 | - watch
22 | - apiGroups:
23 | - ""
24 | resources:
25 | - pods
26 | verbs:
27 | - delete
28 | - get
29 | - list
30 | - watch
31 | - patch
32 | - apiGroups:
33 | - ""
34 | resources:
35 | - nodes
36 | verbs:
37 | - create
38 | - get
39 | - apiGroups:
40 | - ""
41 | resources:
42 | - nodes/status
43 | verbs:
44 | - update
45 | - patch
46 | - apiGroups:
47 | - ""
48 | resources:
49 | - pods/status
50 | verbs:
51 | - update
52 | - patch
53 | - apiGroups:
54 | - ""
55 | resources:
56 | - events
57 | verbs:
58 | - create
59 | - patch
60 | ---
61 | apiVersion: rbac.authorization.k8s.io/v1
62 | kind: ClusterRoleBinding
63 | metadata:
64 | name: knoc
65 | subjects:
66 | - kind: ServiceAccount
67 | name: knoc
68 | namespace: default
69 | roleRef:
70 | apiGroup: rbac.authorization.k8s.io
71 | kind: ClusterRole
72 | name: knoc
73 |
--------------------------------------------------------------------------------
/deploy/knoc-cfg.json:
--------------------------------------------------------------------------------
1 | {
2 | "vk-knoc": {
3 | "cpu": "2",
4 | "memory": "32Gi",
5 | "pods": "128"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/deploy/knoc-crt.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIID3jCCAsagAwIBAgIIUa45eqfb4sEwDQYJKoZIhvcNAQELBQAwfzELMAkGA1UE
3 | BhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9ydGxhbmQxGDAWBgNV
4 | BAoTD3ZrdWJlbGV0LW1vY2stMDEYMBYGA1UECxMPdmt1YmVsZXQtbW9jay0wMRgw
5 | FgYDVQQDEw92a3ViZWxldC1tb2NrLTAwHhcNMTgxMTI2MTIwMzIzWhcNMTkwMjI1
6 | MTgwODIzWjB/MQswCQYDVQQGEwJVUzEPMA0GA1UECBMGT3JlZ29uMREwDwYDVQQH
7 | EwhQb3J0bGFuZDEYMBYGA1UEChMPdmt1YmVsZXQtbW9jay0wMRgwFgYDVQQLEw92
8 | a3ViZWxldC1tb2NrLTAxGDAWBgNVBAMTD3ZrdWJlbGV0LW1vY2stMDCCASIwDQYJ
9 | KoZIhvcNAQEBBQADggEPADCCAQoCggEBALryHvK3UBBBqGV2Fpwymf0p/YKGQA9r
10 | Nu0N6f2+RkUXLuQXG+WdFQl3ZQybPLfCE2hwFcl3IF+3hCzY3/2UIyGBloBIft7K
11 | YFLM3YWJDy5ElKDg1bNDSLzF6tkpNLDnVlgkPPITzpEHIAu+BT5DZGWhYAWO/Dir
12 | XdxoJBOhPZZCcBCV+kwQQPbsXzZy+q7Qhx270CRMIXso9C5LJhGYL9fwsxmukAOR
13 | 56SmfsAaml7UOlzHITRDwD5AQ1BkTSEFy08dk6JAYL8LDLhgaLoWoV0Ge2gOIepR
14 | jpl87dGbSVGyBHmTXv4o6utqT6S6nU76Ln9NSi7YhMqj8uWv0pTDlYcCAwEAAaNe
15 | MFwwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD
16 | AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBQVHwU1sy7Qnw1WvVvFLcZrhoT40DAN
17 | BgkqhkiG9w0BAQsFAAOCAQEAsNGNKz1Jwfwg7rYaO7VF/zan01XXZFP1bnFYnXJu
18 | 15RzhOBMsp3KvWCVhwUfxNe8GhUDSx2tmS5EA/8oaEngLFl3jtR3pnUNOwDVlzly
19 | QOCN3rlOi4+p26LvMiAFp5hxXAv3LORs6Dzr6h3/QTtlV5jDShUOXZdFdOPJdZ2n
20 | g4birrG7MO6vwvR8CiNcQ26b+b8p9BGXbE8bsJoHmcsqya8fbVs2n6CdEJeI+4hD
21 | N6xlo5SvhjH5tFII7eCVedyZGl0BKvkocOigLgq8X+JzFxj1wtdmtXv7sjdKcB9r
22 | 6TWGJRrZVxoxUOzZhpxUj3j/pLaRcDmttSJCuDu3NAtkgQ==
23 | -----END CERTIFICATE-----
24 |
--------------------------------------------------------------------------------
/deploy/knoc-key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEowIBAAKCAQEAuvIe8rdQEEGoZXYWnDKZ/Sn9goZAD2s27Q3p/b5GRRcu5Bcb
3 | 5Z0VCXdlDJs8t8ITaHAVyXcgX7eELNjf/ZQjIYGWgEh+3spgUszdhYkPLkSUoODV
4 | s0NIvMXq2Sk0sOdWWCQ88hPOkQcgC74FPkNkZaFgBY78OKtd3GgkE6E9lkJwEJX6
5 | TBBA9uxfNnL6rtCHHbvQJEwheyj0LksmEZgv1/CzGa6QA5HnpKZ+wBqaXtQ6XMch
6 | NEPAPkBDUGRNIQXLTx2TokBgvwsMuGBouhahXQZ7aA4h6lGOmXzt0ZtJUbIEeZNe
7 | /ijq62pPpLqdTvouf01KLtiEyqPy5a/SlMOVhwIDAQABAoIBAEN84tVGfh3QRiWS
8 | sujij5rITN+Q7ZFjaCm96yoSRbXtf50SBp0mzxBizNT3Ob0wz+bVB9h6K/LCAnJa
9 | PMqDbwdKi/V1tm9hadKaaKIrb5KJaYqGgD893AViAb0x1fbDHPWm52WQ5vKOOvBi
10 | QexPUfAqiMqY6s7ednz6D4QSonQamxCUPBPYvudmayHtPlc8Qb6eY0V+pcdFnW08
11 | SDZXYOxey3/IAjZydcA7XgvNSc+6XOwmhKsGAW71uFTTagJvzX3ePCY14rkGJmDG
12 | m/10hoW6NMKGeV/RyX3dX0jJmDk1VfxAQW3xpOipZfgfvgavCOqHnKA6I8dK3zhg
13 | vE9BleECgYEA87X/ztQZDI4qOTA9CW/nMXfwAy9QO1K6bGhBHUu7Js4pqgxuH8Fk
14 | hQgQK7V8iasny/dCyj6Cu3QJNofxudAvLLQKkquyQOa+zqFCUpVid7JVRMcRLJlt
15 | 3HlyCNvVlhfjDT0cI2RdU45q8MnZoy1f3DPZB16cHb3HL9z1gQZTiXECgYEAxF9a
16 | 68SbxmWFBK7PaobI8wVfDoTirHmoAvnypYK0oQkAX8VmEmtEEs2+N1moKjSTPr+t
17 | us4JKguA8z2tuLk5j+eF+zDl/2U+7djTF8FCNprwz3sXr427GCIGL5YvpIBZ+TL8
18 | Bji2uyoo8k9SAWMb4ObOzfGm4teCvciS99gw0ncCgYAt5GbAVtZEs/ylejz0KvtZ
19 | KGGs59ru4Nw0D8m7L4iVfRsBZ4fROQSpvGP3JxzFe9JpqS0NkonhrK8TcrQFLnvD
20 | qj+XcPeHGyxxEpK/pFu/eHhwFCBayqWSb9gWbPciZWsfEhPbYknksxvWLdxqyt+T
21 | QrwqlBlHzHXWwIAGhN90MQKBgQC5CYkpBFgsuFiBMx+rJ1qO9I6/paPaFcClHVTx
22 | dJoz68F4fQ9TZ9P7S/djPI5jRqtAw2k2zxJ/ldtqWMIrgA2ndegf69GtuH91q4wt
23 | pCN6RMGJIFoPSCP194mQqZo3DeK6GLq2OhalgnKW8Ps652LLp3FTSdORiLVfk3I5
24 | LHPEvQKBgDCxa/3vneG8vgs8ArEjN89B/YxO1qIU5mxJe6Zafb81NdhYUjfRAVro
25 | ALTofiApMsnDbJDHMiwvwcDUHbPLpruK80R//zmX7Xen+F+5obfSQ8j0GSmmeWFQ
26 | SVG6ApNtktLPI0nK2nEIH/Qx4ouGC9N0pADRClQQPSxEPmDvf4xf
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/deploy/pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: vkubelet-knoc-0
5 | spec:
6 | containers:
7 | - name: vkubelet-knoc-0
8 | image: carvicsforth/knoc
9 | # "IfNotPresent" is used to prevent Minikube from trying to pull from the registry (and failing) in the first place.
10 | imagePullPolicy: IfNotPresent
11 | args:
12 | - /virtual-kubelet
13 | - --nodename
14 | - vkubelet-knoc-0
15 | - --provider
16 | - knoc
17 | - --provider-config
18 | - /vkubelet-knoc-0-cfg.json
19 | - --startup-timeout
20 | - 10s
21 | - --klog.v
22 | - "2"
23 | - --klog.logtostderr
24 | - --log-level
25 | - info
26 | volumeMounts:
27 | - name: kubeconfig-setup
28 | mountPath: /home/carv/scripts/
29 | env:
30 | - name: KUBELET_PORT
31 | value: "10250"
32 | - name: VKUBELET_POD_IP
33 | valueFrom:
34 | fieldRef:
35 | fieldPath: status.podIP
36 | - name: REMOTE_USER
37 | valueFrom:
38 | secretKeyRef:
39 | name: remote-secret
40 | key: remote_user
41 | - name: REMOTE_KEY
42 | valueFrom:
43 | secretKeyRef:
44 | name: remote-secret
45 | key: ssh-privatekey
46 | - name: REMOTE_HOST
47 | valueFrom:
48 | secretKeyRef:
49 | name: remote-secret
50 | key: host
51 | - name: REMOTE_PORT
52 | valueFrom:
53 | secretKeyRef:
54 | name: remote-secret
55 | key: port
56 | ports:
57 | - name: metrics
58 | containerPort: 10255
59 | readinessProbe:
60 | httpGet:
61 | path: /stats/summary
62 | port: metrics
63 | serviceAccountName: knoc
64 | volumes:
65 | - name: kubeconfig-setup
66 | configMap:
67 | name: setup-kubeconfig
68 | defaultMode: 0777
--------------------------------------------------------------------------------
/deploy/setup_kubeconfig.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: setup-kubeconfig
5 | data:
6 | prepare_kubeconfig.sh: |
7 | #!/bin/bash
8 | SERVICE_ACCOUNT="knoc"
9 | USER_TOKEN_NAME=$(kubectl -n default get serviceaccount ${SERVICE_ACCOUNT} -o=jsonpath='{.secrets[0].name}')
10 | USER_TOKEN_VALUE=$(kubectl -n default get secret/${USER_TOKEN_NAME} -o=go-template='{{.data.token}}' | base64 --decode)
11 | CURRENT_CONTEXT=knoc
12 | CURRENT_CLUSTER=$(kubectl config view --raw -o=go-template='{{range .contexts}}{{if eq .name "'''${CURRENT_CONTEXT}'''"}}{{ index .context "cluster" }}{{end}}{{end}}')
13 | CLUSTER_CA=`kubectl get secret/$USER_TOKEN_NAME -n default -o jsonpath='{.data.ca\.crt}'`
14 | CLUSTER_SERVER=https://139.91.92.71:38080
15 | echo "
16 | apiVersion: v1
17 | kind: Config
18 | current-context: ${CURRENT_CONTEXT}
19 | contexts:
20 | - name: ${CURRENT_CONTEXT}
21 | context:
22 | cluster: ${CURRENT_CONTEXT}
23 | user: default
24 | namespace: default
25 | clusters:
26 | - name: ${CURRENT_CONTEXT}
27 | cluster:
28 | certificate-authority-data: ${CLUSTER_CA}
29 | server: ${CLUSTER_SERVER}
30 | users:
31 | - name: default
32 | user:
33 | token: ${USER_TOKEN_VALUE}
34 | "
--------------------------------------------------------------------------------
/deploy/skaffold.yml:
--------------------------------------------------------------------------------
1 | apiVersion: skaffold/v2beta11
2 | kind: Config
3 | build:
4 | artifacts:
5 | - image: carvicsforth/knoc
6 | docker:
7 | # Use a Dockerfile specific for development only.
8 | dockerfile: deploy/Dockerfile
9 | deploy:
10 | kubectl:
11 | manifests:
12 | - deploy/base.yml
13 | - deploy/pod.yml
14 | - deploy/setup_kubeconfig.yaml
15 | profiles:
16 | - name: local
17 | build:
18 | tagPolicy:
19 | gitCommit:
20 | variant: Tags
21 | local: {}
22 |
--------------------------------------------------------------------------------
/door/door.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2021 FORTH-ICS
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.package main
14 |
15 | package main
16 |
17 | import (
18 | b64 "encoding/base64"
19 | "encoding/json"
20 | "fmt"
21 | "os"
22 | "os/exec"
23 | "path/filepath"
24 | "regexp"
25 | "strconv"
26 | "strings"
27 |
28 | "github.com/akamensky/argparse"
29 | log "github.com/sirupsen/logrus"
30 | )
31 |
32 | const (
33 | SUBMIT = 0
34 | STOP = 1
35 | UNKNOWN = 2
36 | SBATCH = "/usr/bin/sbatch"
37 | SCANCEL = "/usr/bin/scancel"
38 | )
39 |
40 | var buildVersion = "dev"
41 |
42 | func args_parser() (int, string) {
43 | // Create new parser object
44 | parser := argparse.NewParser("door", "KNoC's sidekick, he's deadly!")
45 | // Create string flag
46 | // log.Infoln(buildVersion)
47 | var action *string = parser.Selector("a", "action", []string{"submit", "stop"}, &argparse.Options{Required: false, Help: "Action required for door"})
48 | var container *string = parser.String("c", "container", &argparse.Options{Required: false, Help: "Container required for action"})
49 | verbose := parser.Flag("V", "verbose", &argparse.Options{Required: false, Help: "Verbose flag sets log level to DEBUG"})
50 | version := parser.Flag("v", "version", &argparse.Options{Required: false, Help: "version"})
51 |
52 | // Parse input
53 | err := parser.Parse(os.Args)
54 | if err != nil {
55 | // In case of error print error and print usage
56 | // This can also be done by passing -h or --help flags
57 | fmt.Print(parser.Usage(err))
58 | }
59 |
60 | if *version {
61 | log.Infoln(buildVersion)
62 | os.Exit(0)
63 | }
64 |
65 | if *verbose {
66 | log.SetLevel(log.DebugLevel)
67 | }
68 | // log.Debugln(*action, *container)
69 | if *action == "submit" {
70 | return SUBMIT, *container
71 | } else if *action == "stop" {
72 | return STOP, *container
73 | }
74 | fmt.Println(parser.Usage(""))
75 | return 3, ""
76 | }
77 |
78 | func prepare_env(c DoorContainer) []string {
79 | env := make([]string, 1)
80 | env = append(env, "--env")
81 | env_data := ""
82 | for _, env_var := range c.Env {
83 | tmp := (env_var.Name + "=" + env_var.Value + ",")
84 | env_data += tmp
85 | }
86 | if last := len(env_data) - 1; last >= 0 && env_data[last] == ',' {
87 | env_data = env_data[:last]
88 | }
89 | return append(env, env_data)
90 | }
91 |
92 | func prepare_mounts(c DoorContainer) []string {
93 | mount := make([]string, 1)
94 | mount = append(mount, "--bind")
95 | mount_data := ""
96 | pod_name := strings.Split(c.Name, "-")
97 | for _, mount_var := range c.VolumeMounts {
98 | path := (".knoc/" + strings.Join(pod_name[:6], "-") + "/" + mount_var.Name + ":" + mount_var.MountPath + ",")
99 | mount_data += path
100 | }
101 | if last := len(mount_data) - 1; last >= 0 && mount_data[last] == ',' {
102 | mount_data = mount_data[:last]
103 | }
104 | return append(mount, mount_data)
105 | }
106 |
107 | func produce_slurm_script(c DoorContainer, command []string) string {
108 | newpath := filepath.Join(".", ".tmp")
109 | err := os.MkdirAll(newpath, os.ModePerm)
110 | f, err := os.Create(".tmp/" + c.Name + ".sh")
111 | if err != nil {
112 | log.Fatalln("Cant create slurm_script")
113 | }
114 | var sbatch_flags_from_argo []string
115 | var sbatch_flags_as_string = ""
116 | if slurm_flags, ok := c.Metadata.Annotations["slurm-job.knoc.io/flags"]; ok {
117 | sbatch_flags_from_argo = strings.Split(slurm_flags, " ")
118 | log.Debugln(sbatch_flags_from_argo)
119 | }
120 | if mpi_flags, ok := c.Metadata.Annotations["slurm-job.knoc.io/mpi-flags"]; ok {
121 | if mpi_flags != "true" {
122 | mpi := append([]string{"mpiexec", "-np", "$SLURM_NTASKS"}, strings.Split(mpi_flags, " ")...)
123 | command = append(mpi, command...)
124 | }
125 | log.Debugln(mpi_flags)
126 | }
127 | for _, slurm_flag := range sbatch_flags_from_argo {
128 | sbatch_flags_as_string += "\n#SBATCH " + slurm_flag
129 | }
130 | sbatch_macros := "#!/bin/bash" +
131 | "\n#SBATCH --job-name=" + c.Name +
132 | sbatch_flags_as_string +
133 | "\n. ~/.bash_profile" +
134 | "\npwd; hostname; date\n"
135 | f.WriteString(sbatch_macros + "\n" + strings.Join(command[:], " ") + " >> " + ".knoc/" + c.Name + ".out 2>> " + ".knoc/" + c.Name + ".err \n echo $? > " + ".knoc/" + c.Name + ".status")
136 | f.Close()
137 | return ".tmp/" + c.Name + ".sh"
138 | }
139 |
140 | func slurm_batch_submit(path string, c DoorContainer) string {
141 | var output []byte
142 | var err error
143 | output, err = exec.Command(SBATCH, path).Output()
144 | if err != nil {
145 | log.Fatalln("Could not run sbatch. " + err.Error())
146 | }
147 | return string(output)
148 |
149 | }
150 |
151 | func handle_jid(c DoorContainer, output string) {
152 | r := regexp.MustCompile(`Submitted batch job (?P\d+)`)
153 | jid := r.FindStringSubmatch(output)
154 | f, err := os.Create(".knoc/" + c.Name + ".jid")
155 | if err != nil {
156 | log.Fatalln("Cant create jid_file")
157 | }
158 | f.WriteString(jid[1])
159 | f.Close()
160 | }
161 |
162 | func create_container(c DoorContainer) {
163 | log.Debugln("create_container")
164 |
165 | commstr1 := []string{"singularity", "exec"}
166 |
167 | envs := prepare_env(c)
168 | image := ""
169 | mounts := prepare_mounts(c)
170 | if strings.HasPrefix(c.Image, "/") {
171 | if image_uri, ok := c.Metadata.Annotations["slurm-job.knoc.io/image-root"]; ok {
172 | log.Debugln(image_uri)
173 | image = image_uri + c.Image
174 | } else {
175 | log.Errorln("image-uri annotation not specified for path in remote filesystem")
176 | }
177 | } else {
178 | image = "docker://" + c.Image
179 | }
180 | singularity_command := append(commstr1, envs...)
181 | singularity_command = append(singularity_command, mounts...)
182 | singularity_command = append(singularity_command, image)
183 | singularity_command = append(singularity_command, c.Command...)
184 | singularity_command = append(singularity_command, c.Args...)
185 |
186 | path := produce_slurm_script(c, singularity_command)
187 | out := slurm_batch_submit(path, c)
188 | handle_jid(c, out)
189 | log.Debugln(singularity_command)
190 | log.Infoln(out)
191 |
192 | }
193 |
194 | func delete_container(c DoorContainer) {
195 | data, err := os.ReadFile(".knoc/" + c.Name + ".jid")
196 | if err != nil {
197 | log.Fatalln("Can't find job id of container")
198 | }
199 | jid, err := strconv.Atoi(string(data))
200 | if err != nil {
201 | log.Fatalln("Can't find job id of container")
202 | }
203 | _, err = exec.Command(SCANCEL, fmt.Sprint(jid)).Output()
204 | if err != nil {
205 | log.Fatalln("Could not delete job", jid)
206 | }
207 | exec.Command("rm", "-f ", ".knoc/"+c.Name+".out")
208 | exec.Command("rm", "-f ", ".knoc/"+c.Name+".err")
209 | exec.Command("rm", "-f ", ".knoc/"+c.Name+".status")
210 | exec.Command("rm", "-f ", ".knoc/"+c.Name+".jid")
211 | exec.Command("rm", "-rf", " .knoc/"+c.Name)
212 | log.Infoln("Delete job", jid)
213 | }
214 |
215 | func importContainerb64Json(containerSpec string) DoorContainer {
216 | dc := DoorContainer{}
217 | sDec, err := b64.StdEncoding.DecodeString(containerSpec)
218 | if err != nil {
219 | log.Fatalln("Wrong containerSpec!")
220 | }
221 | err = json.Unmarshal(sDec, &dc)
222 | if err != nil {
223 | log.Fatalln("Wrong type of doorContainer!")
224 | }
225 | return dc
226 | }
227 |
228 | func main() {
229 | action, containerSpec := args_parser()
230 | if action == 3 {
231 | os.Exit(1)
232 | }
233 | dc := importContainerb64Json(containerSpec)
234 | switch action {
235 | case SUBMIT:
236 | create_container(dc)
237 | case STOP:
238 | delete_container(dc)
239 | }
240 | }
241 |
--------------------------------------------------------------------------------
/door/types.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2021 FORTH-ICS
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.package main
14 |
15 | package main
16 |
17 | import (
18 | v1 "k8s.io/api/core/v1"
19 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
20 | )
21 |
22 | type DoorContainer struct {
23 | Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
24 | Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
25 | Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
26 | Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
27 | WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
28 | Ports []v1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
29 | EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
30 | Env []v1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
31 | Resources v1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
32 | VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
33 | VolumeDevices []v1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
34 | Metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
35 | }
36 |
--------------------------------------------------------------------------------
/examples/argo-dag-coinflip.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | generateName: dag-diamond-coinflip-
5 | namespace: default
6 | spec:
7 | entrypoint: diamond
8 | templates:
9 | - name: diamond
10 | dag:
11 | tasks:
12 | - name: A
13 | template: coinflip
14 | - name: B
15 | dependencies: [A]
16 | template: coinflip
17 | - name: C
18 | dependencies: [A]
19 | template: coinflip
20 | - name: D
21 | dependencies: [B, C]
22 | template: coinflip
23 |
24 | - name: coinflip
25 | steps:
26 | - - name: flip-coin
27 | template: flip-coin
28 | - - name: heads
29 | template: heads
30 | when: "{{steps.flip-coin.outputs.result}} == heads"
31 | - name: tails
32 | template: coinflip
33 | when: "{{steps.flip-coin.outputs.result}} == tails"
34 |
35 | - name: flip-coin
36 | script:
37 | image: python:alpine3.6
38 | command: [python]
39 | source: |
40 | import random
41 | result = "heads" if random.randint(0,1) == 0 else "tails"
42 | print(result)
43 |
44 | - name: heads
45 | container:
46 | image: alpine:3.6
47 | command: [sh, -c]
48 | args: ["echo \"it was heads\""]
49 | dnsPolicy: ClusterFirst
50 | nodeSelector:
51 | kubernetes.io/role: agent
52 | beta.kubernetes.io/os: linux
53 | type: virtual-kubelet
54 | tolerations:
55 | - key: virtual-kubelet.io/provider
56 | operator: Exists
--------------------------------------------------------------------------------
/examples/argo-dag-diamond.yaml:
--------------------------------------------------------------------------------
1 |
2 | # The following workflow executes a diamond workflow
3 | #
4 | # A
5 | # / \
6 | # B C
7 | # \ /
8 | # D
9 | apiVersion: argoproj.io/v1alpha1
10 | kind: Workflow
11 | metadata:
12 | generateName: dag-diamond-
13 | namespace: default
14 | spec:
15 | entrypoint: diamond
16 | podMetadata:
17 | annotations:
18 | slurm-job.knoc.io/flags: "--mem=4G "
19 | templates:
20 | - name: diamond
21 | metadata:
22 | annotations:
23 | slurm-job.knoc.io/flags: "--mem=2G --exclude=tie0,tie3,tie4"
24 | dag:
25 | tasks:
26 | - name: A
27 | template: echo
28 | arguments:
29 | parameters: [{name: message, value: A}]
30 | - name: B
31 | dependencies: [A]
32 | template: echo
33 | arguments:
34 | parameters: [{name: message, value: B}]
35 | - name: C
36 | dependencies: [A]
37 | template: echo
38 | arguments:
39 | parameters: [{name: message, value: C}]
40 | - name: D
41 | dependencies: [B, C]
42 | template: echo
43 | arguments:
44 | parameters: [{name: message, value: D}]
45 |
46 | - name: echo
47 | metadata:
48 | annotations:
49 | slurm-job.knoc.io/flags: "--mem=2G --exclude=tie0,tie3,tie4"
50 | inputs:
51 | parameters:
52 | - name: message
53 | container:
54 | image: alpine:3.7
55 | command: [echo, "{{inputs.parameters.message}}"]
56 | dnsPolicy: ClusterFirst
57 | nodeSelector:
58 | kubernetes.io/role: agent
59 | beta.kubernetes.io/os: linux
60 | type: virtual-kubelet
61 | tolerations:
62 | - key: virtual-kubelet.io/provider
63 | operator: Exists
--------------------------------------------------------------------------------
/examples/argo-dag-intentionalFail.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | generateName: dag-contiue-on-fail-
5 | namespace: default
6 | spec:
7 | entrypoint: workflow
8 | templates:
9 | - name: workflow
10 | dag:
11 | tasks:
12 | - name: A
13 | template: whalesay
14 | - name: B
15 | dependencies: [A]
16 | template: intentional-fail
17 | continueOn:
18 | failed: true
19 | - name: C
20 | dependencies: [A]
21 | template: whalesay
22 | - name: D
23 | dependencies: [B, C]
24 | template: whalesay
25 | - name: E
26 | dependencies: [A]
27 | template: intentional-fail
28 | - name: F
29 | dependencies: [A]
30 | template: whalesay
31 | - name: G
32 | dependencies: [E, F]
33 | template: whalesay
34 |
35 | - name: whalesay
36 | container:
37 | image: docker/whalesay:latest
38 | command: [cowsay]
39 | args: ["hello world"]
40 |
41 | - name: intentional-fail
42 | container:
43 | image: alpine:latest
44 | command: [sh, -c]
45 | args: ["'echo intentional failure; exit 1'"]
46 | dnsPolicy: ClusterFirst
47 | nodeSelector:
48 | kubernetes.io/role: agent
49 | beta.kubernetes.io/os: linux
50 | type: virtual-kubelet
51 | tolerations:
52 | - key: virtual-kubelet.io/provider
53 | operator: Exists
54 | - key: azure.com/aci
55 | effect: NoSchedule
56 |
--------------------------------------------------------------------------------
/examples/argo-dag-nested.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | generateName: dag-nested-
5 | namespace: default
6 | spec:
7 | entrypoint: diamond
8 | templates:
9 | - name: echo
10 | inputs:
11 | parameters:
12 | - name: message
13 | container:
14 | image: alpine:3.7
15 | command: [echo, "{{inputs.parameters.message}}"]
16 | - name: diamond
17 | dag:
18 | tasks:
19 | - name: A
20 | template: nested-diamond
21 | arguments:
22 | parameters: [{name: message, value: A}]
23 | - name: B
24 | dependencies: [A]
25 | template: nested-diamond
26 | arguments:
27 | parameters: [{name: message, value: B}]
28 | - name: C
29 | dependencies: [A]
30 | template: nested-diamond
31 | arguments:
32 | parameters: [{name: message, value: C}]
33 | - name: D
34 | dependencies: [B, C]
35 | template: nested-diamond
36 | arguments:
37 | parameters: [{name: message, value: D}]
38 | - name: nested-diamond
39 | inputs:
40 | parameters:
41 | - name: message
42 | dag:
43 | tasks:
44 | - name: A
45 | template: echo
46 | arguments:
47 | parameters: [{name: message, value: "{{inputs.parameters.message}}A"}]
48 | - name: B
49 | dependencies: [A]
50 | template: echo
51 | arguments:
52 | parameters: [{name: message, value: "{{inputs.parameters.message}}B"}]
53 | - name: C
54 | dependencies: [A]
55 | template: echo
56 | arguments:
57 | parameters: [{name: message, value: "{{inputs.parameters.message}}C"}]
58 | - name: D
59 | dependencies: [B, C]
60 | template: echo
61 | arguments:
62 | parameters: [{name: message, value: "{{inputs.parameters.message}}D"}]
63 | dnsPolicy: ClusterFirst
64 | nodeSelector:
65 | kubernetes.io/role: agent
66 | beta.kubernetes.io/os: linux
67 | type: virtual-kubelet
68 | tolerations:
69 | - key: virtual-kubelet.io/provider
70 | operator: Exists
71 | - key: azure.com/aci
72 | effect: NoSchedule
--------------------------------------------------------------------------------
/examples/argo-npb-mpi-test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: Workflow
3 | metadata:
4 | generateName: npb-with-mpi-
5 | spec:
6 | entrypoint: npb-with-mpi
7 | templates:
8 | - name: npb-with-mpi
9 | dag:
10 | tasks:
11 | - name: A
12 | template: npb
13 | arguments:
14 | parameters:
15 | - {name: cpus, value: "{{item}}"}
16 | withItems:
17 | - 2
18 | - 4
19 | - 8
20 | - 16
21 | - name: npb
22 | metadata:
23 | annotations:
24 | slurm-job.knoc.io/flags: "--mem=8G --nodes=3 --nodelist=tie0,tie1,tie3 --ntasks={{inputs.parameters.cpus}}"
25 | slurm-job.knoc.io/mpi-flags: "--mca btl self,tcp,vader --mca btl_tcp_if_include ib0 "
26 | inputs:
27 | parameters:
28 | - name: cpus
29 | container:
30 | image: registry.platform.science-hangar.eu/malvag/mpi-npb:latest
31 | command: ["/work/NPB3.3-MPI/bin/ep.A.{{inputs.parameters.cpus}}"]
32 | nodeSelector:
33 | kubernetes.io/role: agent
34 | beta.kubernetes.io/os: linux
35 | type: virtual-kubelet
36 | tolerations:
37 | - key: virtual-kubelet.io/provider
38 | operator: Exists
--------------------------------------------------------------------------------
/examples/argo-test_bp.yaml:
--------------------------------------------------------------------------------
1 | # Example of loops using DAGs
2 | apiVersion: argoproj.io/v1alpha1
3 | kind: Workflow
4 | metadata:
5 | generateName: singularity-local-sif-test-
6 | spec:
7 | entrypoint: singularity-local-sif
8 | templates:
9 | - name: singularity-local-sif
10 | dag:
11 | tasks:
12 | - name: A
13 | template: test
14 | - name: test
15 | metadata:
16 | annotations:
17 | slurm-job.knoc.io/flags: "--mem=1G --nodelist=tie0,tie1,tie3"
18 | slurm-job.knoc.io/image-root: "./singularity/images"
19 | container:
20 | image: /alpine.sif
21 | command: ["hostname"]
22 | nodeSelector:
23 | kubernetes.io/role: agent
24 | beta.kubernetes.io/os: linux
25 | type: virtual-kubelet
26 | tolerations:
27 | - key: virtual-kubelet.io/provider
28 | operator: Exists
--------------------------------------------------------------------------------
/examples/argo-workflow-sample.yaml:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: argoproj.io/v1alpha1
3 | kind: Workflow
4 | metadata:
5 | generateName: steps-
6 | spec:
7 | entrypoint: hello
8 |
9 | templates:
10 | - name: hello
11 | metadata:
12 | annotations:
13 | slurm-job.knoc.io/flags: "--mem=2G --exclude=tie3,tie4"
14 | steps: # The type of this "template" is "steps"
15 | - - name: hello
16 | template: whalesay # We reference our second "template" here
17 | arguments:
18 | parameters: [{name: message, value: "hello1"}]
19 |
20 | - name: whalesay # The second "template" in this Workflow, it is referenced by "hello"
21 | metadata:
22 | annotations:
23 | slurm-job.knoc.io/flags: "--mem=2G --exclude=tie3,tie4"
24 | inputs:
25 | parameters:
26 | - name: message
27 | container: # The type of this "template" is "container"
28 | image: alpine:3.7
29 | command: [echo, "{{inputs.parameters.message}}"]
30 | nodeSelector:
31 | kubernetes.io/role: agent
32 | beta.kubernetes.io/os: linux
33 | type: virtual-kubelet
34 | tolerations:
35 | - key: virtual-kubelet.io/provider
36 | operator: Exists
--------------------------------------------------------------------------------
/examples/busyecho.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: busyecho
5 | spec:
6 | containers:
7 | - image: busybox
8 | command:
9 | - echo
10 | args: [ "Hello from container" ]
11 | imagePullPolicy: Always
12 | name: nginx
13 | resources:
14 | requests:
15 | memory: 1G
16 | cpu: 1
17 | ports:
18 | - containerPort: 80
19 | name: http
20 | protocol: TCP
21 | - containerPort: 443
22 | name: https
23 | dnsPolicy: ClusterFirst
24 | nodeSelector:
25 | kubernetes.io/role: agent
26 | beta.kubernetes.io/os: linux
27 | type: virtual-kubelet
28 | tolerations:
29 | - key: virtual-kubelet.io/provider
30 | operator: Exists
31 |
--------------------------------------------------------------------------------
/examples/busyecho_k8s.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: busyecho
5 | annotations:
6 | slurm-job.knoc.io/flags: "--job-name=testcoin"
7 | spec:
8 | containers:
9 | - image: argoproj/argosay:v2
10 | command:
11 | - sleep 30
12 | imagePullPolicy: Always
13 | name: busyecho
14 | dnsPolicy: ClusterFirst
15 | nodeSelector:
16 | kubernetes.io/role: agent
17 | beta.kubernetes.io/os: linux
18 | type: virtual-kubelet
19 | tolerations:
20 | - key: virtual-kubelet.io/provider
21 | operator: Exists
22 | - key: azure.com/aci
23 | effect: NoSchedule
24 |
--------------------------------------------------------------------------------
/examples/busyecho_with_cm.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: busyecho3
5 | spec:
6 | containers:
7 | - image: argoproj/argosay:v2
8 | command:
9 | - "sleep"
10 | args:
11 | - "30"
12 | imagePullPolicy: Always
13 | name: busyecho
14 | volumeMounts:
15 | - name: config-volume
16 | mountPath: /etc/config
17 | volumes:
18 | - name: config-volume
19 | configMap:
20 | name: game-config
21 | restartPolicy: Never
22 | dnsPolicy: ClusterFirst
23 | # nodeSelector:
24 | # kubernetes.io/role: agent
25 | # beta.kubernetes.io/os: linux
26 | # type: virtual-kubelet
27 | # tolerations:
28 | # - key: virtual-kubelet.io/provider
29 | # operator: Exists
30 | # - key: azure.com/aci
31 | # effect: NoSchedule
32 |
33 | # KUBERNETES_PORT_443_TCP_PORT=443
34 | # KUBERNETES_PORT=tcp://10.96.0.1:443
35 | # KUBERNETES_SERVICE_PORT=443
36 | # KUBERNETES_SERVICE_HOST=10.96.0.1
37 | # KUBERNETES_PORT_443_TCP_PROTO=tcp
38 | # KUBERNETES_SERVICE_PORT_HTTPS=443
39 | # KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
40 | # KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
41 |
--------------------------------------------------------------------------------
/examples/configMap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: game-config
5 | data:
6 | allowed: '"true"'
7 | enemies: aliens
8 | lives: "3"
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/CARV-ICS-FORTH/knoc
2 |
3 | go 1.12
4 |
5 | require (
6 | contrib.go.opencensus.io/exporter/ocagent v0.5.0
7 | github.com/akamensky/argparse v1.3.1
8 | github.com/containerd/containerd v1.0.2
9 | github.com/davidmz/go-pageant v1.0.1 // indirect
10 | github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
11 | github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
12 | github.com/google/go-cmp v0.5.2 // indirect
13 | github.com/pkg/sftp v1.13.0
14 | github.com/sfreiberg/simplessh v0.0.0-20180301191542-495cbb862a9c
15 | github.com/sirupsen/logrus v1.4.2
16 | github.com/virtual-kubelet/node-cli v0.1.2
17 | github.com/virtual-kubelet/virtual-kubelet v1.2.0
18 | go.opencensus.io v0.22.0
19 | golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect
20 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
21 | golang.org/x/sys v0.0.0-20210927052749-1cf2251ac284 // indirect
22 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
23 | google.golang.org/grpc v1.23.1 // indirect
24 | gotest.tools v2.2.0+incompatible
25 | k8s.io/api v0.0.0
26 | k8s.io/apimachinery v0.0.0
27 | k8s.io/client-go v11.0.0+incompatible
28 | k8s.io/klog v0.3.3 // indirect
29 | k8s.io/kubernetes v1.15.2
30 | k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a
31 | )
32 |
33 | replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.0.0-20190805144654-3d5bf3a310c1
34 |
35 | replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.0.0-20190805144409-8484242760e7
36 |
37 | replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20190805143448-a07e59fb081d
38 |
39 | replace k8s.io/apiserver => k8s.io/apiserver v0.0.0-20190805142138-368b2058237c
40 |
41 | replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.0.0-20190805144531-3985229e1802
42 |
43 | replace k8s.io/cri-api => k8s.io/cri-api v0.0.0-20190531030430-6117653b35f1
44 |
45 | replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20190805142416-fd821fbbb94e
46 |
47 | replace k8s.io/kubelet => k8s.io/kubelet v0.0.0-20190805143852-517ff267f8d1
48 |
49 | replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.0.0-20190805144128-269742da31dd
50 |
51 | replace k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719
52 |
53 | replace k8s.io/api => k8s.io/api v0.0.0-20190805141119-fdd30b57c827
54 |
55 | replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20190805144246-c01ee70854a1
56 |
57 | replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.0.0-20190805143734-7f1675b90353
58 |
59 | replace k8s.io/component-base => k8s.io/component-base v0.0.0-20190805141645-3a5e5ac800ae
60 |
61 | replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.0.0-20190805144012-2a1ed1f3d8a4
62 |
63 | replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190805143126-cdb999c96590
64 |
65 | replace k8s.io/metrics => k8s.io/metrics v0.0.0-20190805143318-16b07057415d
66 |
67 | replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.0.0-20190805142637-3b65bc4bb24f
68 |
69 | replace k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190612205613-18da4a14b22b
70 |
71 | replace k8s.io/client-go => k8s.io/client-go v0.0.0-20190805141520-2fe0317bcee0
72 |
--------------------------------------------------------------------------------
/internal/README.md:
--------------------------------------------------------------------------------
1 | Copied from https://github.com/virtual-kubelet/virtual-kubelet/tree/master/internal.
2 |
3 | If not otherwise mentioned, Virtual Kubelet copyright/licensing applies.
4 |
--------------------------------------------------------------------------------
/internal/expansion/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2012 The Go Authors. All rights reserved.
2 |
3 | Redistribution and use in source and binary forms, with or without
4 | modification, are permitted provided that the following conditions are
5 | met:
6 |
7 | * Redistributions of source code must retain the above copyright
8 | notice, this list of conditions and the following disclaimer.
9 | * Redistributions in binary form must reproduce the above
10 | copyright notice, this list of conditions and the following disclaimer
11 | in the documentation and/or other materials provided with the
12 | distribution.
13 | * Neither the name of Google Inc. nor the names of its
14 | contributors may be used to endorse or promote products derived from
15 | this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/internal/expansion/README.md:
--------------------------------------------------------------------------------
1 | Copied from
2 | https://github.com/kubernetes/kubernetes/tree/master/third_party/forked/golang/expansion .
3 |
4 | This is to eliminate a direct dependency on kubernetes/kubernetes.
5 |
--------------------------------------------------------------------------------
/internal/expansion/expand.go:
--------------------------------------------------------------------------------
1 | package expansion
2 |
3 | import (
4 | "bytes"
5 | )
6 |
7 | const (
8 | operator = '$'
9 | referenceOpener = '('
10 | referenceCloser = ')'
11 | )
12 |
13 | // syntaxWrap returns the input string wrapped by the expansion syntax.
14 | func syntaxWrap(input string) string {
15 | return string(operator) + string(referenceOpener) + input + string(referenceCloser)
16 | }
17 |
18 | // MappingFuncFor returns a mapping function for use with Expand that
19 | // implements the expansion semantics defined in the expansion spec; it
20 | // returns the input string wrapped in the expansion syntax if no mapping
21 | // for the input is found.
22 | func MappingFuncFor(context ...map[string]string) func(string) string {
23 | return func(input string) string {
24 | for _, vars := range context {
25 | val, ok := vars[input]
26 | if ok {
27 | return val
28 | }
29 | }
30 |
31 | return syntaxWrap(input)
32 | }
33 | }
34 |
35 | // Expand replaces variable references in the input string according to
36 | // the expansion spec using the given mapping function to resolve the
37 | // values of variables.
38 | func Expand(input string, mapping func(string) string) string {
39 | var buf bytes.Buffer
40 | checkpoint := 0
41 | for cursor := 0; cursor < len(input); cursor++ {
42 | if input[cursor] == operator && cursor+1 < len(input) {
43 | // Copy the portion of the input string since the last
44 | // checkpoint into the buffer
45 | buf.WriteString(input[checkpoint:cursor])
46 |
47 | // Attempt to read the variable name as defined by the
48 | // syntax from the input string
49 | read, isVar, advance := tryReadVariableName(input[cursor+1:])
50 |
51 | if isVar {
52 | // We were able to read a variable name correctly;
53 | // apply the mapping to the variable name and copy the
54 | // bytes into the buffer
55 | buf.WriteString(mapping(read))
56 | } else {
57 | // Not a variable name; copy the read bytes into the buffer
58 | buf.WriteString(read)
59 | }
60 |
61 | // Advance the cursor in the input string to account for
62 | // bytes consumed to read the variable name expression
63 | cursor += advance
64 |
65 | // Advance the checkpoint in the input string
66 | checkpoint = cursor + 1
67 | }
68 | }
69 |
70 | // Return the buffer and any remaining unwritten bytes in the
71 | // input string.
72 | return buf.String() + input[checkpoint:]
73 | }
74 |
75 | // tryReadVariableName attempts to read a variable name from the input
76 | // string and returns the content read from the input, whether that content
77 | // represents a variable name to perform mapping on, and the number of bytes
78 | // consumed in the input string.
79 | //
80 | // The input string is assumed not to contain the initial operator.
81 | func tryReadVariableName(input string) (string, bool, int) {
82 | switch input[0] {
83 | case operator:
84 | // Escaped operator; return it.
85 | return input[0:1], false, 1
86 | case referenceOpener:
87 | // Scan to expression closer
88 | for i := 1; i < len(input); i++ {
89 | if input[i] == referenceCloser {
90 | return input[1:i], true, i + 1
91 | }
92 | }
93 |
94 | // Incomplete reference; return it.
95 | return string(operator) + string(referenceOpener), false, 1
96 | default:
97 | // Not the beginning of an expression, ie, an operator
98 | // that doesn't begin an expression. Return the operator
99 | // and the first rune in the string.
100 | return (string(operator) + string(input[0])), false, 1
101 | }
102 | }
103 |
--------------------------------------------------------------------------------
/internal/expansion/expand_test.go:
--------------------------------------------------------------------------------
1 | package expansion
2 |
3 | import (
4 | "testing"
5 |
6 | api "k8s.io/kubernetes/pkg/apis/core"
7 | )
8 |
9 | func TestMapReference(t *testing.T) {
10 | envs := []api.EnvVar{
11 | {
12 | Name: "FOO",
13 | Value: "bar",
14 | },
15 | {
16 | Name: "ZOO",
17 | Value: "$(FOO)-1",
18 | },
19 | {
20 | Name: "BLU",
21 | Value: "$(ZOO)-2",
22 | },
23 | }
24 |
25 | declaredEnv := map[string]string{
26 | "FOO": "bar",
27 | "ZOO": "$(FOO)-1",
28 | "BLU": "$(ZOO)-2",
29 | }
30 |
31 | serviceEnv := map[string]string{}
32 |
33 | mapping := MappingFuncFor(declaredEnv, serviceEnv)
34 |
35 | for _, env := range envs {
36 | declaredEnv[env.Name] = Expand(env.Value, mapping)
37 | }
38 |
39 | expectedEnv := map[string]string{
40 | "FOO": "bar",
41 | "ZOO": "bar-1",
42 | "BLU": "bar-1-2",
43 | }
44 |
45 | for k, v := range expectedEnv {
46 | if e, a := v, declaredEnv[k]; e != a {
47 | t.Errorf("Expected %v, got %v", e, a)
48 | } else {
49 | delete(declaredEnv, k)
50 | }
51 | }
52 |
53 | if len(declaredEnv) != 0 {
54 | t.Errorf("Unexpected keys in declared env: %v", declaredEnv)
55 | }
56 | }
57 |
58 | func TestMapping(t *testing.T) {
59 | context := map[string]string{
60 | "VAR_A": "A",
61 | "VAR_B": "B",
62 | "VAR_C": "C",
63 | "VAR_REF": "$(VAR_A)",
64 | "VAR_EMPTY": "",
65 | }
66 | mapping := MappingFuncFor(context)
67 |
68 | doExpansionTest(t, mapping)
69 | }
70 |
71 | func TestMappingDual(t *testing.T) {
72 | context := map[string]string{
73 | "VAR_A": "A",
74 | "VAR_EMPTY": "",
75 | }
76 | context2 := map[string]string{
77 | "VAR_B": "B",
78 | "VAR_C": "C",
79 | "VAR_REF": "$(VAR_A)",
80 | }
81 | mapping := MappingFuncFor(context, context2)
82 |
83 | doExpansionTest(t, mapping)
84 | }
85 |
86 | func doExpansionTest(t *testing.T, mapping func(string) string) {
87 | cases := []struct {
88 | name string
89 | input string
90 | expected string
91 | }{
92 | {
93 | name: "whole string",
94 | input: "$(VAR_A)",
95 | expected: "A",
96 | },
97 | {
98 | name: "repeat",
99 | input: "$(VAR_A)-$(VAR_A)",
100 | expected: "A-A",
101 | },
102 | {
103 | name: "beginning",
104 | input: "$(VAR_A)-1",
105 | expected: "A-1",
106 | },
107 | {
108 | name: "middle",
109 | input: "___$(VAR_B)___",
110 | expected: "___B___",
111 | },
112 | {
113 | name: "end",
114 | input: "___$(VAR_C)",
115 | expected: "___C",
116 | },
117 | {
118 | name: "compound",
119 | input: "$(VAR_A)_$(VAR_B)_$(VAR_C)",
120 | expected: "A_B_C",
121 | },
122 | {
123 | name: "escape & expand",
124 | input: "$$(VAR_B)_$(VAR_A)",
125 | expected: "$(VAR_B)_A",
126 | },
127 | {
128 | name: "compound escape",
129 | input: "$$(VAR_A)_$$(VAR_B)",
130 | expected: "$(VAR_A)_$(VAR_B)",
131 | },
132 | {
133 | name: "mixed in escapes",
134 | input: "f000-$$VAR_A",
135 | expected: "f000-$VAR_A",
136 | },
137 | {
138 | name: "backslash escape ignored",
139 | input: "foo\\$(VAR_C)bar",
140 | expected: "foo\\Cbar",
141 | },
142 | {
143 | name: "backslash escape ignored",
144 | input: "foo\\\\$(VAR_C)bar",
145 | expected: "foo\\\\Cbar",
146 | },
147 | {
148 | name: "lots of backslashes",
149 | input: "foo\\\\\\\\$(VAR_A)bar",
150 | expected: "foo\\\\\\\\Abar",
151 | },
152 | {
153 | name: "nested var references",
154 | input: "$(VAR_A$(VAR_B))",
155 | expected: "$(VAR_A$(VAR_B))",
156 | },
157 | {
158 | name: "nested var references second type",
159 | input: "$(VAR_A$(VAR_B)",
160 | expected: "$(VAR_A$(VAR_B)",
161 | },
162 | {
163 | name: "value is a reference",
164 | input: "$(VAR_REF)",
165 | expected: "$(VAR_A)",
166 | },
167 | {
168 | name: "value is a reference x 2",
169 | input: "%%$(VAR_REF)--$(VAR_REF)%%",
170 | expected: "%%$(VAR_A)--$(VAR_A)%%",
171 | },
172 | {
173 | name: "empty var",
174 | input: "foo$(VAR_EMPTY)bar",
175 | expected: "foobar",
176 | },
177 | {
178 | name: "unterminated expression",
179 | input: "foo$(VAR_Awhoops!",
180 | expected: "foo$(VAR_Awhoops!",
181 | },
182 | {
183 | name: "expression without operator",
184 | input: "f00__(VAR_A)__",
185 | expected: "f00__(VAR_A)__",
186 | },
187 | {
188 | name: "shell special vars pass through",
189 | input: "$?_boo_$!",
190 | expected: "$?_boo_$!",
191 | },
192 | {
193 | name: "bare operators are ignored",
194 | input: "$VAR_A",
195 | expected: "$VAR_A",
196 | },
197 | {
198 | name: "undefined vars are passed through",
199 | input: "$(VAR_DNE)",
200 | expected: "$(VAR_DNE)",
201 | },
202 | {
203 | name: "multiple (even) operators, var undefined",
204 | input: "$$$$$$(BIG_MONEY)",
205 | expected: "$$$(BIG_MONEY)",
206 | },
207 | {
208 | name: "multiple (even) operators, var defined",
209 | input: "$$$$$$(VAR_A)",
210 | expected: "$$$(VAR_A)",
211 | },
212 | {
213 | name: "multiple (odd) operators, var undefined",
214 | input: "$$$$$$$(GOOD_ODDS)",
215 | expected: "$$$$(GOOD_ODDS)",
216 | },
217 | {
218 | name: "multiple (odd) operators, var defined",
219 | input: "$$$$$$$(VAR_A)",
220 | expected: "$$$A",
221 | },
222 | {
223 | name: "missing open expression",
224 | input: "$VAR_A)",
225 | expected: "$VAR_A)",
226 | },
227 | {
228 | name: "shell syntax ignored",
229 | input: "${VAR_A}",
230 | expected: "${VAR_A}",
231 | },
232 | {
233 | name: "trailing incomplete expression not consumed",
234 | input: "$(VAR_B)_______$(A",
235 | expected: "B_______$(A",
236 | },
237 | {
238 | name: "trailing incomplete expression, no content, is not consumed",
239 | input: "$(VAR_C)_______$(",
240 | expected: "C_______$(",
241 | },
242 | {
243 | name: "operator at end of input string is preserved",
244 | input: "$(VAR_A)foobarzab$",
245 | expected: "Afoobarzab$",
246 | },
247 | {
248 | name: "shell escaped incomplete expr",
249 | input: "foo-\\$(VAR_A",
250 | expected: "foo-\\$(VAR_A",
251 | },
252 | {
253 | name: "lots of $( in middle",
254 | input: "--$($($($($--",
255 | expected: "--$($($($($--",
256 | },
257 | {
258 | name: "lots of $( in beginning",
259 | input: "$($($($($--foo$(",
260 | expected: "$($($($($--foo$(",
261 | },
262 | {
263 | name: "lots of $( at end",
264 | input: "foo0--$($($($(",
265 | expected: "foo0--$($($($(",
266 | },
267 | {
268 | name: "escaped operators in variable names are not escaped",
269 | input: "$(foo$$var)",
270 | expected: "$(foo$$var)",
271 | },
272 | {
273 | name: "newline not expanded",
274 | input: "\n",
275 | expected: "\n",
276 | },
277 | }
278 |
279 | for _, tc := range cases {
280 | expanded := Expand(tc.input, mapping)
281 | if e, a := tc.expected, expanded; e != a {
282 | t.Errorf("%v: expected %q, got %q", tc.name, e, a)
283 | }
284 | }
285 | }
286 |
--------------------------------------------------------------------------------
/internal/manager/doc.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2017 The virtual-kubelet authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Package manager provides access to kubernetes resources for providers.
16 | //
17 | // DEPRECATION WARNING:
18 | // Though this package is still in use, it should be considered deprecated as it
19 | // is just wrapping a k8s client and not much else.
20 | // Implementers should look at replacing their use of this with something else.
21 | package manager
22 |
--------------------------------------------------------------------------------
/internal/manager/resource.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2017 The virtual-kubelet authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package manager
16 |
17 | import (
18 | v1 "k8s.io/api/core/v1"
19 | "k8s.io/apimachinery/pkg/labels"
20 | corev1listers "k8s.io/client-go/listers/core/v1"
21 |
22 | "github.com/virtual-kubelet/virtual-kubelet/log"
23 | )
24 |
25 | // ResourceManager acts as a passthrough to a cache (lister) for pods assigned to the current node.
26 | // It is also a passthrough to a cache (lister) for Kubernetes secrets and config maps.
27 | type ResourceManager struct {
28 | podLister corev1listers.PodLister
29 | secretLister corev1listers.SecretLister
30 | configMapLister corev1listers.ConfigMapLister
31 | serviceLister corev1listers.ServiceLister
32 | }
33 |
34 | // NewResourceManager returns a ResourceManager with the internal maps initialized.
35 | func NewResourceManager(podLister corev1listers.PodLister, secretLister corev1listers.SecretLister, configMapLister corev1listers.ConfigMapLister, serviceLister corev1listers.ServiceLister) (*ResourceManager, error) {
36 | rm := ResourceManager{
37 | podLister: podLister,
38 | secretLister: secretLister,
39 | configMapLister: configMapLister,
40 | serviceLister: serviceLister,
41 | }
42 | return &rm, nil
43 | }
44 |
45 | // GetPods returns a list of all known pods assigned to this virtual node.
46 | func (rm *ResourceManager) GetPods() []*v1.Pod {
47 | l, err := rm.podLister.List(labels.Everything())
48 | if err == nil {
49 | return l
50 | }
51 | log.L.Errorf("failed to fetch pods from lister: %v", err)
52 | return make([]*v1.Pod, 0)
53 | }
54 |
55 | // GetConfigMap retrieves the specified config map from the cache.
56 | func (rm *ResourceManager) GetConfigMap(name, namespace string) (*v1.ConfigMap, error) {
57 | return rm.configMapLister.ConfigMaps(namespace).Get(name)
58 | }
59 |
60 | // GetSecret retrieves the specified secret from Kubernetes.
61 | func (rm *ResourceManager) GetSecret(name, namespace string) (*v1.Secret, error) {
62 | return rm.secretLister.Secrets(namespace).Get(name)
63 | }
64 |
65 | // ListServices retrieves the list of services from Kubernetes.
66 | func (rm *ResourceManager) ListServices() ([]*v1.Service, error) {
67 | return rm.serviceLister.List(labels.Everything())
68 | }
69 |
--------------------------------------------------------------------------------
/internal/manager/resource_test.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2017 The virtual-kubelet authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package manager_test
16 |
17 | import (
18 | "testing"
19 |
20 | "github.com/CARV-ICS-FORTH/knoc/internal/manager"
21 | testutil "github.com/CARV-ICS-FORTH/knoc/internal/test/util"
22 | "gotest.tools/assert"
23 | v1 "k8s.io/api/core/v1"
24 | "k8s.io/apimachinery/pkg/api/errors"
25 | corev1listers "k8s.io/client-go/listers/core/v1"
26 | "k8s.io/client-go/tools/cache"
27 | )
28 |
29 | // TestGetPods verifies that the resource manager acts as a passthrough to a pod lister.
30 | func TestGetPods(t *testing.T) {
31 | var (
32 | lsPods = []*v1.Pod{
33 | testutil.FakePodWithSingleContainer("namespace-0", "name-0", "image-0"),
34 | testutil.FakePodWithSingleContainer("namespace-1", "name-1", "image-1"),
35 | }
36 | )
37 |
38 | // Create a pod lister that will list the pods defined above.
39 | indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
40 | for _, pod := range lsPods {
41 | assert.NilError(t, indexer.Add(pod))
42 | }
43 | podLister := corev1listers.NewPodLister(indexer)
44 |
45 | // Create a new instance of the resource manager based on the pod lister.
46 | rm, err := manager.NewResourceManager(podLister, nil, nil, nil)
47 | if err != nil {
48 | t.Fatal(err)
49 | }
50 |
51 | // Check that the resource manager returns two pods in the call to "GetPods".
52 | rmPods := rm.GetPods()
53 | if len(rmPods) != len(lsPods) {
54 | t.Fatalf("expected %d pods, found %d", len(lsPods), len(rmPods))
55 | }
56 | }
57 |
58 | // TestGetSecret verifies that the resource manager acts as a passthrough to a secret lister.
59 | func TestGetSecret(t *testing.T) {
60 | var (
61 | lsSecrets = []*v1.Secret{
62 | testutil.FakeSecret("namespace-0", "name-0", map[string]string{"key-0": "val-0"}),
63 | testutil.FakeSecret("namespace-1", "name-1", map[string]string{"key-1": "val-1"}),
64 | }
65 | )
66 |
67 | // Create a secret lister that will list the secrets defined above.
68 | indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
69 | for _, secret := range lsSecrets {
70 | assert.NilError(t, indexer.Add(secret))
71 | }
72 | secretLister := corev1listers.NewSecretLister(indexer)
73 |
74 | // Create a new instance of the resource manager based on the secret lister.
75 | rm, err := manager.NewResourceManager(nil, secretLister, nil, nil)
76 | if err != nil {
77 | t.Fatal(err)
78 | }
79 |
80 | // Get the secret with coordinates "namespace-0/name-0".
81 | secret, err := rm.GetSecret("name-0", "namespace-0")
82 | if err != nil {
83 | t.Fatal(err)
84 | }
85 | value := secret.Data["key-0"]
86 | if string(value) != "val-0" {
87 | t.Fatal("got unexpected value", string(value))
88 | }
89 |
90 | // Try to get a secret that does not exist, and make sure we've got a "not found" error as a response.
91 | _, err = rm.GetSecret("name-X", "namespace-X")
92 | if err == nil || !errors.IsNotFound(err) {
93 | t.Fatalf("expected a 'not found' error, got %v", err)
94 | }
95 | }
96 |
97 | // TestGetConfigMap verifies that the resource manager acts as a passthrough to a config map lister.
98 | func TestGetConfigMap(t *testing.T) {
99 | var (
100 | lsConfigMaps = []*v1.ConfigMap{
101 | testutil.FakeConfigMap("namespace-0", "name-0", map[string]string{"key-0": "val-0"}),
102 | testutil.FakeConfigMap("namespace-1", "name-1", map[string]string{"key-1": "val-1"}),
103 | }
104 | )
105 |
106 | // Create a config map lister that will list the config maps defined above.
107 | indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
108 | for _, secret := range lsConfigMaps {
109 | assert.NilError(t, indexer.Add(secret))
110 | }
111 | configMapLister := corev1listers.NewConfigMapLister(indexer)
112 |
113 | // Create a new instance of the resource manager based on the config map lister.
114 | rm, err := manager.NewResourceManager(nil, nil, configMapLister, nil)
115 | if err != nil {
116 | t.Fatal(err)
117 | }
118 |
119 | // Get the config map with coordinates "namespace-0/name-0".
120 | configMap, err := rm.GetConfigMap("name-0", "namespace-0")
121 | if err != nil {
122 | t.Fatal(err)
123 | }
124 | value := configMap.Data["key-0"]
125 | if value != "val-0" {
126 | t.Fatal("got unexpected value", string(value))
127 | }
128 |
129 | // Try to get a configmap that does not exist, and make sure we've got a "not found" error as a response.
130 | _, err = rm.GetConfigMap("name-X", "namespace-X")
131 | if err == nil || !errors.IsNotFound(err) {
132 | t.Fatalf("expected a 'not found' error, got %v", err)
133 | }
134 | }
135 |
136 | // TestListServices verifies that the resource manager acts as a passthrough to a service lister.
137 | func TestListServices(t *testing.T) {
138 | var (
139 | lsServices = []*v1.Service{
140 | testutil.FakeService("namespace-0", "service-0", "1.2.3.1", "TCP", 8081),
141 | testutil.FakeService("namespace-1", "service-1", "1.2.3.2", "TCP", 8082),
142 | }
143 | )
144 |
145 | // Create a pod lister that will list the pods defined above.
146 | indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
147 | for _, service := range lsServices {
148 | assert.NilError(t, indexer.Add(service))
149 | }
150 | serviceLister := corev1listers.NewServiceLister(indexer)
151 |
152 | // Create a new instance of the resource manager based on the pod lister.
153 | rm, err := manager.NewResourceManager(nil, nil, nil, serviceLister)
154 | if err != nil {
155 | t.Fatal(err)
156 | }
157 |
158 | // Check that the resource manager returns two pods in the call to "GetPods".
159 | services, err := rm.ListServices()
160 | if err != nil {
161 | t.Fatal(err)
162 | }
163 | if len(lsServices) != len(services) {
164 | t.Fatalf("expected %d services, found %d", len(lsServices), len(services))
165 | }
166 | }
167 |
--------------------------------------------------------------------------------
/internal/test/e2e/framework/framework.go:
--------------------------------------------------------------------------------
1 | package framework
2 |
3 | import (
4 | "time"
5 |
6 | "k8s.io/client-go/kubernetes"
7 | "k8s.io/client-go/rest"
8 | "k8s.io/client-go/tools/clientcmd"
9 | )
10 |
11 | // Framework encapsulates the configuration for the current run, and provides helper methods to be used during testing.
12 | type Framework struct {
13 | KubeClient kubernetes.Interface
14 | Namespace string
15 | NodeName string
16 | WatchTimeout time.Duration
17 | }
18 |
19 | // NewTestingFramework returns a new instance of the testing framework.
20 | func NewTestingFramework(kubeconfig, namespace, nodeName string, watchTimeout time.Duration) *Framework {
21 | return &Framework{
22 | KubeClient: createKubeClient(kubeconfig),
23 | Namespace: namespace,
24 | NodeName: nodeName,
25 | WatchTimeout: watchTimeout,
26 | }
27 | }
28 |
29 | // createKubeClient creates a new Kubernetes client based on the specified kubeconfig file.
30 | // If no value for kubeconfig is specified, in-cluster configuration is assumed.
31 | func createKubeClient(kubeconfig string) *kubernetes.Clientset {
32 | var (
33 | cfg *rest.Config
34 | err error
35 | )
36 | if kubeconfig == "" {
37 | cfg, err = rest.InClusterConfig()
38 | } else {
39 | cfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
40 | }
41 | if err != nil {
42 | panic(err)
43 | }
44 | return kubernetes.NewForConfigOrDie(cfg)
45 | }
46 |
--------------------------------------------------------------------------------
/internal/test/e2e/framework/node.go:
--------------------------------------------------------------------------------
1 | package framework
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | corev1 "k8s.io/api/core/v1"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | "k8s.io/apimachinery/pkg/fields"
10 | "k8s.io/apimachinery/pkg/runtime"
11 | watchapi "k8s.io/apimachinery/pkg/watch"
12 | "k8s.io/client-go/tools/cache"
13 | "k8s.io/client-go/tools/watch"
14 | )
15 |
16 | // WaitUntilNodeCondition establishes a watch on the vk node.
17 | // Then, it waits for the specified condition function to be verified.
18 | func (f *Framework) WaitUntilNodeCondition(fn watch.ConditionFunc) error {
19 | // Watch for updates to the Pod resource until fn is satisfied, or until the timeout is reached.
20 | ctx, cancel := context.WithTimeout(context.Background(), f.WatchTimeout)
21 | defer cancel()
22 |
23 | // Create a field selector that matches the specified Pod resource.
24 | fs := fields.OneTermEqualSelector("metadata.name", f.NodeName).String()
25 | // Create a ListWatch so we can receive events for the matched Pod resource.
26 | lw := &cache.ListWatch{
27 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
28 | options.FieldSelector = fs
29 | return f.KubeClient.CoreV1().Nodes().List(options)
30 | },
31 | WatchFunc: func(options metav1.ListOptions) (watchapi.Interface, error) {
32 | options.FieldSelector = fs
33 | return f.KubeClient.CoreV1().Nodes().Watch(options)
34 | },
35 | }
36 |
37 | last, err := watch.UntilWithSync(ctx, lw, &corev1.Node{}, nil, fn)
38 | if err != nil {
39 | return err
40 | }
41 | if last == nil {
42 | return fmt.Errorf("no events received for node %q", f.NodeName)
43 | }
44 | return nil
45 | }
46 |
47 | // DeleteNode deletes the vk node used by the framework
48 | func (f *Framework) DeleteNode(ctx context.Context) error {
49 | var gracePeriod int64
50 | propagation := metav1.DeletePropagationBackground
51 | opts := metav1.DeleteOptions{
52 | PropagationPolicy: &propagation,
53 | GracePeriodSeconds: &gracePeriod,
54 | }
55 | return f.KubeClient.CoreV1().Nodes().Delete(f.NodeName, &opts)
56 | }
57 |
58 | // GetNode gets the vk nodeused by the framework
59 | func (f *Framework) GetNode(ctx context.Context) (*corev1.Node, error) {
60 | return f.KubeClient.CoreV1().Nodes().Get(f.NodeName, metav1.GetOptions{})
61 | }
62 |
--------------------------------------------------------------------------------
/internal/test/e2e/framework/pod.go:
--------------------------------------------------------------------------------
1 | package framework
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "strings"
7 |
8 | corev1 "k8s.io/api/core/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/apimachinery/pkg/fields"
11 | "k8s.io/apimachinery/pkg/runtime"
12 | watchapi "k8s.io/apimachinery/pkg/watch"
13 | "k8s.io/client-go/tools/cache"
14 | "k8s.io/client-go/tools/watch"
15 | podutil "k8s.io/kubernetes/pkg/api/v1/pod"
16 | )
17 |
18 | // CreateDummyPodObjectWithPrefix creates a dujmmy pod object using the specified prefix as the value of .metadata.generateName.
19 | // A variable number of strings can be provided.
20 | // For each one of these strings, a container that uses the string as its image will be appended to the pod.
21 | // This method DOES NOT create the pod in the Kubernetes API.
22 | func (f *Framework) CreateDummyPodObjectWithPrefix(testName string, prefix string, images ...string) *corev1.Pod {
23 | // Safe the test name
24 | if testName != "" {
25 | testName = stripParentTestName(strings.ToLower(testName))
26 | prefix = prefix + "-" + testName + "-"
27 | }
28 | enableServiceLink := false
29 | hellocarv := []string{"echo", "hello_carv"}
30 | pod := &corev1.Pod{
31 | ObjectMeta: metav1.ObjectMeta{
32 | GenerateName: prefix,
33 | Namespace: f.Namespace,
34 | },
35 | Spec: corev1.PodSpec{
36 | NodeName: f.NodeName,
37 | EnableServiceLinks: &enableServiceLink,
38 | },
39 | }
40 | for idx, img := range images {
41 | pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
42 | Name: fmt.Sprintf("%s%d", prefix, idx),
43 | Image: img,
44 | Command: hellocarv,
45 | })
46 | }
47 | return pod
48 | }
49 |
50 | // CreatePod creates the specified pod in the Kubernetes API.
51 | func (f *Framework) CreatePod(ctx context.Context, pod *corev1.Pod) (*corev1.Pod, error) {
52 | return f.KubeClient.CoreV1().Pods(f.Namespace).Create(pod)
53 | }
54 |
55 | // DeletePod deletes the pod with the specified name and namespace in the Kubernetes API using the default grace period.
56 | func (f *Framework) DeletePod(ctx context.Context, namespace, name string) error {
57 | return f.KubeClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{})
58 | }
59 |
60 | // DeletePodImmediately forcibly deletes the pod with the specified name and namespace in the Kubernetes API.
61 | // This is equivalent to running "kubectl delete --force --grace-period 0 --namespace pod ".
62 | func (f *Framework) DeletePodImmediately(ctx context.Context, namespace, name string) error {
63 | grace := int64(0)
64 | propagation := metav1.DeletePropagationBackground
65 | return f.KubeClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{
66 | GracePeriodSeconds: &grace,
67 | PropagationPolicy: &propagation,
68 | })
69 | }
70 |
71 | // WaitUntilPodCondition establishes a watch on the pod with the specified name and namespace.
72 | // Then, it waits for the specified condition function to be verified.
73 | func (f *Framework) WaitUntilPodCondition(namespace, name string, fn watch.ConditionFunc) (*corev1.Pod, error) {
74 | // Watch for updates to the Pod resource until fn is satisfied, or until the timeout is reached.
75 | ctx, cfn := context.WithTimeout(context.Background(), f.WatchTimeout)
76 | defer cfn()
77 | // Create a field selector that matches the specified Pod resource.
78 | fs := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.namespace==%s,metadata.name==%s", namespace, name))
79 | // Create a ListWatch so we can receive events for the matched Pod resource.
80 | lw := &cache.ListWatch{
81 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
82 | options.FieldSelector = fs.String()
83 | return f.KubeClient.CoreV1().Pods(namespace).List(options)
84 | },
85 | WatchFunc: func(options metav1.ListOptions) (watchapi.Interface, error) {
86 | options.FieldSelector = fs.String()
87 | return f.KubeClient.CoreV1().Pods(namespace).Watch(options)
88 | },
89 | }
90 | last, err := watch.UntilWithSync(ctx, lw, &corev1.Pod{}, nil, fn)
91 | if err != nil {
92 | return nil, err
93 | }
94 | if last == nil {
95 | return nil, fmt.Errorf("no events received for pod %q", name)
96 | }
97 | pod := last.Object.(*corev1.Pod)
98 | return pod, nil
99 | }
100 |
101 | // WaitUntilPodReady blocks until the pod with the specified name and namespace is reported to be running and ready.
102 | func (f *Framework) WaitUntilPodReady(namespace, name string) (*corev1.Pod, error) {
103 | return f.WaitUntilPodCondition(namespace, name, func(event watchapi.Event) (bool, error) {
104 | pod := event.Object.(*corev1.Pod)
105 | return pod.Status.Phase == corev1.PodRunning && podutil.IsPodReady(pod) && pod.Status.PodIP != "", nil
106 | })
107 | }
108 |
109 | // WaitUntilPodDeleted blocks until the pod with the specified name and namespace is deleted from apiserver.
110 | func (f *Framework) WaitUntilPodDeleted(namespace, name string) (*corev1.Pod, error) {
111 | return f.WaitUntilPodCondition(namespace, name, func(event watchapi.Event) (bool, error) {
112 | pod := event.Object.(*corev1.Pod)
113 | return event.Type == watchapi.Deleted || pod.ObjectMeta.DeletionTimestamp != nil, nil
114 | })
115 | }
116 |
117 | // WaitUntilPodInPhase blocks until the pod with the specified name and namespace is in one of the specified phases
118 | func (f *Framework) WaitUntilPodInPhase(namespace, name string, phases ...corev1.PodPhase) (*corev1.Pod, error) {
119 | return f.WaitUntilPodCondition(namespace, name, func(event watchapi.Event) (bool, error) {
120 | pod := event.Object.(*corev1.Pod)
121 | for _, p := range phases {
122 | if pod.Status.Phase == p {
123 | return true, nil
124 | }
125 | }
126 | return false, nil
127 | })
128 | }
129 |
130 | // WaitUntilPodEventWithReason establishes a watch on events involving the specified pod.
131 | // Then, it waits for an event with the specified reason to be created/updated.
132 | func (f *Framework) WaitUntilPodEventWithReason(pod *corev1.Pod, reason string) error {
133 | // Watch for updates to the Event resource until fn is satisfied, or until the timeout is reached.
134 | ctx, cfn := context.WithTimeout(context.Background(), f.WatchTimeout)
135 | defer cfn()
136 |
137 | // Create a field selector that matches Event resources involving the specified pod.
138 | fs := fields.ParseSelectorOrDie(fmt.Sprintf("involvedObject.kind==Pod,involvedObject.uid==%s", pod.UID))
139 | // Create a ListWatch so we can receive events for the matched Event resource.
140 | lw := &cache.ListWatch{
141 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
142 | options.FieldSelector = fs.String()
143 | return f.KubeClient.CoreV1().Events(pod.Namespace).List(options)
144 | },
145 | WatchFunc: func(options metav1.ListOptions) (watchapi.Interface, error) {
146 | options.FieldSelector = fs.String()
147 | return f.KubeClient.CoreV1().Events(pod.Namespace).Watch(options)
148 | },
149 | }
150 |
151 | last, err := watch.UntilWithSync(ctx, lw, &corev1.Event{}, nil, func(event watchapi.Event) (b bool, e error) {
152 | switch event.Type {
153 | case watchapi.Error:
154 | fallthrough
155 | case watchapi.Deleted:
156 | return false, fmt.Errorf("got event of unexpected type %q", event.Type)
157 | default:
158 | return event.Object.(*corev1.Event).Reason == reason, nil
159 | }
160 | })
161 | if err != nil {
162 | return err
163 | }
164 | if last == nil {
165 | return fmt.Errorf("no events involving pod \"%s/%s\" have been seen", pod.Namespace, pod.Name)
166 | }
167 | return nil
168 | }
169 |
170 | // GetRunningPodsFromProvider gets the running pods from the provider of the virtual kubelet
171 | func (f *Framework) GetRunningPodsFromProvider(ctx context.Context) (*corev1.PodList, error) {
172 | result := &corev1.PodList{}
173 |
174 | err := f.KubeClient.CoreV1().
175 | RESTClient().
176 | Get().
177 | Resource("nodes").
178 | Name(f.NodeName).
179 | SubResource("proxy").
180 | Suffix("runningpods/").
181 | Do().
182 | Into(result)
183 |
184 | return result, err
185 | }
186 |
187 | // GetRunningPodsFromKubernetes gets the running pods from the provider of the virtual kubelet
188 | func (f *Framework) GetRunningPodsFromKubernetes(ctx context.Context) (*corev1.PodList, error) {
189 | result := &corev1.PodList{}
190 |
191 | err := f.KubeClient.CoreV1().
192 | RESTClient().
193 | Get().
194 | Resource("nodes").
195 | Name(f.NodeName).
196 | SubResource("proxy").
197 | Suffix("runningpods/").
198 | Do().
199 | Into(result)
200 |
201 | return result, err
202 | }
203 |
204 | // stripParentTestName strips out the parent's test name from the input (in the form of 'TestParent/TestChild').
205 | // Some test cases use their name as the pod name for testing purpose, and sometimes it might exceed 63
206 | // characters (Kubernetes's limit for pod name). This function ensures that we strip out the parent's
207 | // test name to decrease the length of the pod name
208 | func stripParentTestName(name string) string {
209 | parts := strings.Split(name, "/")
210 | if len(parts) == 1 {
211 | return parts[0]
212 | }
213 | return parts[len(parts)-1]
214 | }
215 |
--------------------------------------------------------------------------------
/internal/test/e2e/framework/stats.go:
--------------------------------------------------------------------------------
1 | package framework
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "strconv"
7 |
8 | "k8s.io/apimachinery/pkg/util/net"
9 | stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
10 | )
11 |
12 | // GetStatsSummary queries the /stats/summary endpoint of the virtual-kubelet and returns the Summary object obtained as a response.
13 | func (f *Framework) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
14 | // Query the /stats/summary endpoint.
15 | b, err := f.KubeClient.CoreV1().
16 | RESTClient().
17 | Get().
18 | Namespace(f.Namespace).
19 | Resource("pods").
20 | SubResource("proxy").
21 | Name(net.JoinSchemeNamePort("http", f.NodeName, strconv.Itoa(10255))).
22 | Suffix("/stats/summary").DoRaw()
23 | if err != nil {
24 | return nil, err
25 | }
26 | // Unmarshal the response as a Summary object and return it.
27 | res := &stats.Summary{}
28 | if err := json.Unmarshal(b, res); err != nil {
29 | return nil, err
30 | }
31 | return res, nil
32 | }
33 |
--------------------------------------------------------------------------------
/internal/test/e2e/main_test.go:
--------------------------------------------------------------------------------
1 | package e2e
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "testing"
7 |
8 | vke2e "github.com/CARV-ICS-FORTH/knoc/test/e2e"
9 |
10 | v1 "k8s.io/api/core/v1"
11 | )
12 |
13 | const (
14 | defaultNamespace = v1.NamespaceDefault
15 | defaultNodeName = "vkubelet-knoc-0"
16 | )
17 |
18 | var (
19 | kubeconfig string
20 | namespace string
21 | nodeName string
22 | )
23 |
24 | // go1.13 compatibility cf. https://github.com/golang/go/issues/31859
25 | var _ = func() bool {
26 | testing.Init()
27 | return true
28 | }()
29 |
30 | func init() {
31 | flag.StringVar(&kubeconfig, "kubeconfig", "", "path to the kubeconfig file to use when running the test suite outside a kubernetes cluster")
32 | flag.StringVar(&namespace, "namespace", defaultNamespace, "the name of the kubernetes namespace to use for running the test suite (i.e. where to create pods)")
33 | flag.StringVar(&nodeName, "node-name", defaultNodeName, "the name of the virtual-kubelet node to test")
34 | flag.Parse()
35 | }
36 |
37 | // Provider-specific setup function
38 | func setup() error {
39 | fmt.Println("Setting up end-to-end test suite for knoc provider...")
40 | return nil
41 | }
42 |
43 | // Provider-specific teardown function
44 | func teardown() error {
45 | fmt.Println("Tearing down end-to-end test suite for knoc provider...")
46 | return nil
47 | }
48 |
49 | // Provider-specific shouldSkipTest function
50 | func shouldSkipTest(testName string) bool {
51 | return false
52 | }
53 |
54 | // TestEndToEnd creates and runs the end-to-end test suite for virtual kubelet
55 | func TestEndToEnd(t *testing.T) {
56 | setDefaults()
57 | config := vke2e.EndToEndTestSuiteConfig{
58 | Kubeconfig: kubeconfig,
59 | Namespace: namespace,
60 | NodeName: nodeName,
61 | Setup: setup,
62 | Teardown: teardown,
63 | ShouldSkipTest: shouldSkipTest,
64 | }
65 | ts := vke2e.NewEndToEndTestSuite(config)
66 | ts.Run(t)
67 | }
68 |
69 | // setDefaults sets sane defaults in case no values (or empty ones) have been provided.
70 | func setDefaults() {
71 | if namespace == "" {
72 | namespace = defaultNamespace
73 | }
74 | if nodeName == "" {
75 | nodeName = defaultNodeName
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/internal/test/suite/suite.go:
--------------------------------------------------------------------------------
1 | package suite
2 |
3 | import (
4 | "reflect"
5 | "runtime/debug"
6 | "strings"
7 | "testing"
8 | )
9 |
10 | // TestFunc defines the test function in a test case
11 | type TestFunc func(*testing.T)
12 |
13 | // SetUpFunc sets up provider-specific resource in the test suite
14 | type SetUpFunc func() error
15 |
16 | // TeardownFunc tears down provider-specific resources from the test suite
17 | type TeardownFunc func() error
18 |
19 | // ShouldSkipTestFunc determines whether the test suite should skip certain tests
20 | type ShouldSkipTestFunc func(string) bool
21 |
22 | // TestSuite contains methods that defines the lifecycle of a test suite
23 | type TestSuite interface {
24 | Setup(t *testing.T)
25 | Teardown()
26 | }
27 |
28 | // TestSkipper allows providers to skip certain tests
29 | type TestSkipper interface {
30 | ShouldSkipTest(string) bool
31 | }
32 |
33 | type testCase struct {
34 | name string
35 | f TestFunc
36 | }
37 |
38 | // Run runs tests registered in the test suite
39 | func Run(t *testing.T, ts TestSuite) {
40 | defer failOnPanic(t)
41 |
42 | ts.Setup(t)
43 | defer ts.Teardown()
44 |
45 | // The implementation below is based on https://github.com/stretchr/testify
46 | testFinder := reflect.TypeOf(ts)
47 | tests := []testCase{}
48 | for i := 0; i < testFinder.NumMethod(); i++ {
49 | method := testFinder.Method(i)
50 | if !isValidTestFunc(method) {
51 | continue
52 | }
53 |
54 | test := testCase{
55 | name: method.Name,
56 | f: func(t *testing.T) {
57 | defer failOnPanic(t)
58 | if tSkipper, ok := ts.(TestSkipper); ok && tSkipper.ShouldSkipTest(method.Name) {
59 | t.Skipf("Skipped due to shouldSkipTest()")
60 | }
61 | method.Func.Call([]reflect.Value{reflect.ValueOf(ts), reflect.ValueOf(t)})
62 | },
63 | }
64 | tests = append(tests, test)
65 | }
66 |
67 | for _, test := range tests {
68 | t.Run(test.name, test.f)
69 | }
70 | }
71 |
72 | // failOnPanic recovers panic occurred in the test suite and marks the test / test suite as failed
73 | func failOnPanic(t *testing.T) {
74 | if r := recover(); r != nil {
75 | t.Fatalf("%v\n%s", r, debug.Stack())
76 | }
77 | }
78 |
79 | // isValidTestFunc determines whether or not a given method is a valid test function
80 | func isValidTestFunc(method reflect.Method) bool {
81 | return strings.HasPrefix(method.Name, "Test") && // Test function name must start with "Test",
82 | method.Type.NumIn() == 2 && // the number of function input should be 2 (*TestSuite ts and t *testing.T),
83 | method.Type.In(1) == reflect.TypeOf(&testing.T{}) &&
84 | method.Type.NumOut() == 0 // and the number of function output should be 0
85 | }
86 |
--------------------------------------------------------------------------------
/internal/test/suite/suite_test.go:
--------------------------------------------------------------------------------
1 | package suite
2 |
3 | import (
4 | "strings"
5 | "testing"
6 |
7 | "gotest.tools/assert"
8 | is "gotest.tools/assert/cmp"
9 | )
10 |
11 | type basicTestSuite struct {
12 | setupCount int
13 | testFooCount int
14 | testBarCount int
15 | bazCount int
16 | testFooBarCount int
17 | testFooBazCount int
18 | testBarBazCount int
19 | teardownCount int
20 | testsRan []string
21 | }
22 |
23 | func (bts *basicTestSuite) Setup(t *testing.T) {
24 | bts.setupCount++
25 | }
26 |
27 | func (bts *basicTestSuite) Teardown() {
28 | bts.teardownCount++
29 | }
30 |
31 | func (bts *basicTestSuite) TestFoo(t *testing.T) {
32 | bts.testFooCount++
33 | bts.testsRan = append(bts.testsRan, t.Name())
34 | }
35 |
36 | func (bts *basicTestSuite) TestBar(t *testing.T) {
37 | bts.testBarCount++
38 | bts.testsRan = append(bts.testsRan, t.Name())
39 | }
40 |
41 | // Baz should not be executed by the test suite
42 | // because it does not have the prefix 'Test'
43 | func (bts *basicTestSuite) Baz(t *testing.T) {
44 | bts.bazCount++
45 | bts.testsRan = append(bts.testsRan, t.Name())
46 | }
47 |
48 | // TestFooBar should not be executed by the test suite
49 | // because the number of function input is not 2 (*basicTestSuite and *testing.T)
50 | func (bts *basicTestSuite) TestFooBar() {
51 | bts.testFooBarCount++
52 | bts.testsRan = append(bts.testsRan, "TestFooBar")
53 | }
54 |
55 | // TestFooBaz should not be executed by the test suite
56 | // because the number of function output is not 0
57 | func (bts *basicTestSuite) TestFooBaz(t *testing.T) error {
58 | bts.testFooBazCount++
59 | bts.testsRan = append(bts.testsRan, t.Name())
60 | return nil
61 | }
62 |
63 | // TestBarBaz should not be executed by the test suite
64 | // because the type of the function input is not *testing.T
65 | func (bts *basicTestSuite) TestBarBaz(t string) {
66 | bts.testBarBazCount++
67 | bts.testsRan = append(bts.testsRan, "TestBarBaz")
68 | }
69 |
70 | func TestBasicTestSuite(t *testing.T) {
71 | bts := new(basicTestSuite)
72 | Run(t, bts)
73 |
74 | assert.Equal(t, bts.setupCount, 1)
75 | assert.Equal(t, bts.testFooCount, 1)
76 | assert.Equal(t, bts.testBarCount, 1)
77 | assert.Equal(t, bts.teardownCount, 1)
78 | assert.Assert(t, is.Len(bts.testsRan, 2))
79 | assertTestsRan(t, bts.testsRan)
80 | assertNonTests(t, bts)
81 | }
82 |
83 | type skipTestSuite struct {
84 | basicTestSuite
85 | skippedTestCount int
86 | }
87 |
88 | func (sts *skipTestSuite) ShouldSkipTest(testName string) bool {
89 | if testName == "TestBar" {
90 | sts.skippedTestCount++
91 | return true
92 | }
93 | return false
94 | }
95 |
96 | func TestSkipTest(t *testing.T) {
97 | sts := new(skipTestSuite)
98 | Run(t, sts)
99 |
100 | assert.Equal(t, sts.setupCount, 1)
101 | assert.Equal(t, sts.testFooCount, 1)
102 | assert.Equal(t, sts.testBarCount, 0)
103 | assert.Equal(t, sts.teardownCount, 1)
104 | assert.Equal(t, sts.skippedTestCount, 1)
105 | assert.Assert(t, is.Len(sts.testsRan, 1))
106 | assertTestsRan(t, sts.testsRan)
107 | assertNonTests(t, &sts.basicTestSuite)
108 | }
109 |
110 | func assertTestsRan(t *testing.T, testsRan []string) {
111 | for _, testRan := range testsRan {
112 | parts := strings.Split(testRan, "/")
113 | // Make sure that the name of the test has exactly one parent name and one subtest name
114 | assert.Assert(t, is.Len(parts, 2))
115 | // Check the parent test's name
116 | assert.Equal(t, parts[0], t.Name())
117 | }
118 | }
119 |
120 | // assertNonTests ensures that any malformed test functions are not run by the test suite
121 | func assertNonTests(t *testing.T, bts *basicTestSuite) {
122 | assert.Equal(t, bts.bazCount, 0)
123 | assert.Equal(t, bts.testFooBarCount, 0)
124 | assert.Equal(t, bts.testFooBazCount, 0)
125 | assert.Equal(t, bts.testBarBazCount, 0)
126 | }
127 |
--------------------------------------------------------------------------------
/internal/test/util/kubernetes.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | corev1 "k8s.io/api/core/v1"
5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
6 | "k8s.io/client-go/tools/record"
7 | )
8 |
9 | // FakeConfigMap returns a configmap with the specified namespace, name and data.
10 | func FakeConfigMap(namespace, name string, data map[string]string) *corev1.ConfigMap {
11 | return &corev1.ConfigMap{
12 | ObjectMeta: metav1.ObjectMeta{
13 | Namespace: namespace,
14 | Name: name,
15 | },
16 | Data: data,
17 | }
18 | }
19 |
20 | // FakeEventRecorder returns an event recorder that can be used to capture events.
21 | func FakeEventRecorder(bufferSize int) *record.FakeRecorder {
22 | return record.NewFakeRecorder(bufferSize)
23 | }
24 |
25 | // FakePodWithSingleContainer returns a pod with the specified namespace and name, and having a single container with the specified image.
26 | func FakePodWithSingleContainer(namespace, name, image string) *corev1.Pod {
27 | enableServiceLink := corev1.DefaultEnableServiceLinks
28 |
29 | return &corev1.Pod{
30 | ObjectMeta: metav1.ObjectMeta{
31 | Namespace: namespace,
32 | Name: name,
33 | },
34 | Spec: corev1.PodSpec{
35 | Containers: []corev1.Container{
36 | {
37 | Name: name,
38 | Image: image,
39 | },
40 | },
41 | EnableServiceLinks: &enableServiceLink,
42 | },
43 | }
44 | }
45 |
46 | // FakeSecret returns a secret with the specified namespace, name and data.
47 | func FakeSecret(namespace, name string, data map[string]string) *corev1.Secret {
48 | res := &corev1.Secret{
49 | ObjectMeta: metav1.ObjectMeta{
50 | Namespace: namespace,
51 | Name: name,
52 | },
53 | Data: make(map[string][]byte),
54 | }
55 | for key, val := range data {
56 | res.Data[key] = []byte(val)
57 | }
58 | return res
59 | }
60 |
61 | // FakeService returns a service with the specified namespace and name and service info.
62 | func FakeService(namespace, name, clusterIP, protocol string, port int32) *corev1.Service {
63 | return &corev1.Service{
64 | ObjectMeta: metav1.ObjectMeta{
65 | Namespace: namespace,
66 | Name: name,
67 | },
68 | Spec: corev1.ServiceSpec{
69 | Ports: []corev1.ServicePort{{
70 | Protocol: corev1.Protocol(protocol),
71 | Port: port,
72 | }},
73 | ClusterIP: clusterIP,
74 | },
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/internal/test/util/provider.go:
--------------------------------------------------------------------------------
1 | package util
2 |
--------------------------------------------------------------------------------
/knoc.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2021 FORTH-ICS
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.package main
14 |
15 | package knoc
16 |
17 | import (
18 | "context"
19 | "encoding/json"
20 | "fmt"
21 | "io"
22 | "io/ioutil"
23 | "math/rand"
24 | "os"
25 | "strings"
26 | "time"
27 |
28 | common "github.com/CARV-ICS-FORTH/knoc/common"
29 |
30 | "github.com/sfreiberg/simplessh"
31 | "github.com/virtual-kubelet/node-cli/manager"
32 | "github.com/virtual-kubelet/virtual-kubelet/errdefs"
33 | "github.com/virtual-kubelet/virtual-kubelet/log"
34 | "github.com/virtual-kubelet/virtual-kubelet/node/api"
35 | "github.com/virtual-kubelet/virtual-kubelet/trace"
36 | v1 "k8s.io/api/core/v1"
37 | "k8s.io/apimachinery/pkg/api/resource"
38 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
39 | stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
40 | )
41 |
42 | // KNOCProvider implements the virtual-kubelet provider interface and stores pods in memory.
43 | type KNOCProvider struct { // nolint:golint
44 | nodeName string
45 | operatingSystem string
46 | internalIP string
47 | daemonEndpointPort int32
48 | pods map[string]*v1.Pod
49 | config KNOCConfig
50 | startTime time.Time
51 | resourceManager *manager.ResourceManager
52 | notifier func(*v1.Pod)
53 | }
54 | type KNOCConfig struct { // nolint:golint
55 | CPU string `json:"cpu,omitempty"`
56 | Memory string `json:"memory,omitempty"`
57 | Pods string `json:"pods,omitempty"`
58 | }
59 |
60 | // NewProviderConfig creates a new KNOCV0Provider. KNOC legacy provider does not implement the new asynchronous podnotifier interface
61 | func NewProviderConfig(config KNOCConfig, nodeName, operatingSystem string, internalIP string, rm *manager.ResourceManager, daemonEndpointPort int32) (*KNOCProvider, error) {
62 | // set defaults
63 | if config.CPU == "" {
64 | config.CPU = common.DefaultCPUCapacity
65 | }
66 | if config.Memory == "" {
67 | config.Memory = common.DefaultMemoryCapacity
68 | }
69 | if config.Pods == "" {
70 | config.Pods = common.DefaultPodCapacity
71 | }
72 | provider := KNOCProvider{
73 | nodeName: nodeName,
74 | operatingSystem: operatingSystem,
75 | internalIP: internalIP,
76 | daemonEndpointPort: daemonEndpointPort,
77 | resourceManager: rm,
78 | pods: make(map[string]*v1.Pod),
79 | config: config,
80 | startTime: time.Now(),
81 | }
82 |
83 | return &provider, nil
84 | }
85 |
86 | // NewProvider creates a new Provider, which implements the PodNotifier interface
87 | func NewProvider(providerConfig, nodeName, operatingSystem string, internalIP string, rm *manager.ResourceManager, daemonEndpointPort int32) (*KNOCProvider, error) {
88 | config, err := loadConfig(providerConfig, nodeName)
89 | if err != nil {
90 | return nil, err
91 | }
92 | return NewProviderConfig(config, nodeName, operatingSystem, internalIP, rm, daemonEndpointPort)
93 | }
94 |
95 | // loadConfig loads the given json configuration files.
96 | func loadConfig(providerConfig, nodeName string) (config KNOCConfig, err error) {
97 | data, err := ioutil.ReadFile(providerConfig)
98 | if err != nil {
99 | return config, err
100 | }
101 | configMap := map[string]KNOCConfig{}
102 | err = json.Unmarshal(data, &configMap)
103 | if err != nil {
104 | return config, err
105 | }
106 | if _, exist := configMap[nodeName]; exist {
107 | config = configMap[nodeName]
108 | if config.CPU == "" {
109 | config.CPU = common.DefaultCPUCapacity
110 | }
111 | if config.Memory == "" {
112 | config.Memory = common.DefaultMemoryCapacity
113 | }
114 | if config.Pods == "" {
115 | config.Pods = common.DefaultPodCapacity
116 | }
117 | }
118 |
119 | if _, err = resource.ParseQuantity(config.CPU); err != nil {
120 | return config, fmt.Errorf("Invalid CPU value %v", config.CPU)
121 | }
122 | if _, err = resource.ParseQuantity(config.Memory); err != nil {
123 | return config, fmt.Errorf("Invalid memory value %v", config.Memory)
124 | }
125 | if _, err = resource.ParseQuantity(config.Pods); err != nil {
126 | return config, fmt.Errorf("Invalid pods value %v", config.Pods)
127 | }
128 | return config, nil
129 | }
130 |
131 | // CreatePod accepts a Pod definition and stores it in memory.
132 | func (p *KNOCProvider) CreatePod(ctx context.Context, pod *v1.Pod) error {
133 | ctx, span := trace.StartSpan(ctx, "CreatePod")
134 | var hasInitContainers bool = false
135 | var state v1.ContainerState
136 | defer span.End()
137 | distribution := "docker://"
138 | // Add the pod's coordinates to the current span.
139 | ctx = addAttributes(ctx, span, common.NamespaceKey, pod.Namespace, common.NameKey, pod.Name)
140 | key, err := common.BuildKey(pod)
141 | if err != nil {
142 | return err
143 | }
144 | now := metav1.NewTime(time.Now())
145 | running_state := v1.ContainerState{
146 | Running: &v1.ContainerStateRunning{
147 | StartedAt: now,
148 | },
149 | }
150 | waiting_state := v1.ContainerState{
151 | Waiting: &v1.ContainerStateWaiting{
152 | Reason: "Waiting for InitContainers",
153 | },
154 | }
155 | state = running_state
156 |
157 | // in case we have initContainers we need to stop main containers from executing for now ...
158 | if len(pod.Spec.InitContainers) > 0 {
159 | state = waiting_state
160 | hasInitContainers = true
161 | // run init container with remote execution enabled
162 | for _, container := range pod.Spec.InitContainers {
163 | // MUST TODO: Run init containers sequentialy and NOT all-together
164 | RemoteExecution(p, ctx, common.CREATE, distribution+container.Image, pod, container)
165 | }
166 |
167 | pod.Status = v1.PodStatus{
168 | Phase: v1.PodRunning,
169 | HostIP: "127.0.0.1",
170 | PodIP: "127.0.0.1",
171 | StartTime: &now,
172 | Conditions: []v1.PodCondition{
173 | {
174 | Type: v1.PodInitialized,
175 | Status: v1.ConditionFalse,
176 | },
177 | {
178 | Type: v1.PodReady,
179 | Status: v1.ConditionFalse,
180 | },
181 | {
182 | Type: v1.PodScheduled,
183 | Status: v1.ConditionTrue,
184 | },
185 | },
186 | }
187 | } else {
188 | pod.Status = v1.PodStatus{
189 | Phase: v1.PodRunning,
190 | HostIP: "127.0.0.1",
191 | PodIP: "127.0.0.1",
192 | StartTime: &now,
193 | Conditions: []v1.PodCondition{
194 | {
195 | Type: v1.PodInitialized,
196 | Status: v1.ConditionTrue,
197 | },
198 | {
199 | Type: v1.PodReady,
200 | Status: v1.ConditionTrue,
201 | },
202 | {
203 | Type: v1.PodScheduled,
204 | Status: v1.ConditionTrue,
205 | },
206 | },
207 | }
208 | }
209 | // deploy main containers
210 | for _, container := range pod.Spec.Containers {
211 | var err error
212 |
213 | if !hasInitContainers {
214 | err = RemoteExecution(p, ctx, common.CREATE, distribution+container.Image, pod, container)
215 |
216 | }
217 | if err != nil {
218 | pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{
219 | Name: container.Name,
220 | Image: container.Image,
221 | Ready: false,
222 | RestartCount: 1,
223 | State: v1.ContainerState{
224 | Terminated: &v1.ContainerStateTerminated{
225 | Message: "Could not reach remote cluster",
226 | StartedAt: now,
227 | ExitCode: 130,
228 | },
229 | },
230 | })
231 | pod.Status.Phase = v1.PodFailed
232 | continue
233 | }
234 | pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{
235 | Name: container.Name,
236 | Image: container.Image,
237 | Ready: !hasInitContainers,
238 | RestartCount: 1,
239 | State: state,
240 | })
241 |
242 | }
243 |
244 | p.pods[key] = pod
245 | p.notifier(pod)
246 |
247 | return nil
248 | }
249 |
250 | // UpdatePod accepts a Pod definition and updates its reference.
251 | func (p *KNOCProvider) UpdatePod(ctx context.Context, pod *v1.Pod) error {
252 | ctx, span := trace.StartSpan(ctx, "UpdatePod")
253 | defer span.End()
254 |
255 | // Add the pod's coordinates to the current span.
256 | ctx = addAttributes(ctx, span, common.NamespaceKey, pod.Namespace, common.NameKey, pod.Name)
257 |
258 | log.G(ctx).Infof("receive UpdatePod %q", pod.Name)
259 |
260 | key, err := common.BuildKey(pod)
261 | if err != nil {
262 | return err
263 | }
264 |
265 | p.pods[key] = pod
266 | p.notifier(pod)
267 |
268 | return nil
269 | }
270 |
271 | // DeletePod deletes the specified pod out of memory.
272 | func (p *KNOCProvider) DeletePod(ctx context.Context, pod *v1.Pod) (err error) {
273 | ctx, span := trace.StartSpan(ctx, "DeletePod")
274 | defer span.End()
275 |
276 | // Add the pod's coordinates to the current span.
277 | ctx = addAttributes(ctx, span, common.NamespaceKey, pod.Namespace, common.NameKey, pod.Name)
278 |
279 | log.G(ctx).Infof("receive DeletePod %q", pod.Name)
280 |
281 | key, err := common.BuildKey(pod)
282 | if err != nil {
283 | return err
284 | }
285 |
286 | if _, exists := p.pods[key]; !exists {
287 | return errdefs.NotFound("pod not found")
288 | }
289 |
290 | now := metav1.Now()
291 | pod.Status.Phase = v1.PodSucceeded
292 | pod.Status.Reason = "KNOCProviderPodDeleted"
293 |
294 | for _, container := range pod.Spec.Containers {
295 | RemoteExecution(p, ctx, common.DELETE, "", pod, container)
296 | }
297 | for _, container := range pod.Spec.InitContainers {
298 | RemoteExecution(p, ctx, common.DELETE, "", pod, container)
299 | }
300 | for idx := range pod.Status.ContainerStatuses {
301 | pod.Status.ContainerStatuses[idx].Ready = false
302 | pod.Status.ContainerStatuses[idx].State = v1.ContainerState{
303 | Terminated: &v1.ContainerStateTerminated{
304 | Message: "KNOC provider terminated container upon deletion",
305 | FinishedAt: now,
306 | Reason: "KNOCProviderPodContainerDeleted",
307 | // StartedAt: pod.Status.ContainerStatuses[idx].State.Running.StartedAt,
308 | },
309 | }
310 | }
311 | for idx := range pod.Status.InitContainerStatuses {
312 | pod.Status.InitContainerStatuses[idx].Ready = false
313 | pod.Status.InitContainerStatuses[idx].State = v1.ContainerState{
314 | Terminated: &v1.ContainerStateTerminated{
315 | Message: "KNOC provider terminated container upon deletion",
316 | FinishedAt: now,
317 | Reason: "KNOCProviderPodContainerDeleted",
318 | // StartedAt: pod.Status.InitContainerStatuses[idx].State.Running.StartedAt,
319 | },
320 | }
321 | }
322 |
323 | p.notifier(pod)
324 | delete(p.pods, key)
325 |
326 | return nil
327 | }
328 |
329 | // GetPod returns a pod by name that is stored in memory.
330 | func (p *KNOCProvider) GetPod(ctx context.Context, namespace, name string) (pod *v1.Pod, err error) {
331 | ctx, span := trace.StartSpan(ctx, "GetPod")
332 | defer func() {
333 | span.SetStatus(err)
334 | span.End()
335 | }()
336 |
337 | // Add the pod's coordinates to the current span.
338 | ctx = addAttributes(ctx, span, common.NamespaceKey, namespace, common.NameKey, name)
339 |
340 | log.G(ctx).Infof("receive GetPod %q", name)
341 |
342 | key, err := common.BuildKeyFromNames(namespace, name)
343 | if err != nil {
344 | return nil, err
345 | }
346 |
347 | if pod, ok := p.pods[key]; ok {
348 | return pod, nil
349 | }
350 | return nil, errdefs.NotFoundf("pod \"%s/%s\" is not known to the provider", namespace, name)
351 | }
352 |
353 | // GetContainerLogs retrieves the logs of a container by name from the provider.
354 | func (p *KNOCProvider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, opts api.ContainerLogOpts) (io.ReadCloser, error) {
355 | ctx, span := trace.StartSpan(ctx, "GetContainerLogs")
356 | defer span.End()
357 |
358 | // Add pod and container attributes to the current span.
359 | ctx = addAttributes(ctx, span, common.NamespaceKey, namespace, common.NameKey, podName, common.ContainerNameKey, containerName)
360 |
361 | log.G(ctx).Infof("receive GetContainerLogs %q", podName)
362 | client, err := simplessh.ConnectWithKey(os.Getenv("REMOTE_HOST")+":"+os.Getenv("REMOTE_PORT"), os.Getenv("REMOTE_USER"), os.Getenv("REMOTE_KEY"))
363 | if err != nil {
364 | panic(err)
365 | }
366 | defer client.Close()
367 | key, err := common.BuildKeyFromNames(namespace, podName)
368 | if err != nil {
369 | return nil, err
370 | }
371 |
372 | pod := p.pods[key]
373 | instance_name := ""
374 | for iter := range pod.Spec.InitContainers {
375 | if pod.Spec.InitContainers[iter].Name == containerName {
376 | instance_name = BuildRemoteExecutionInstanceName(pod.Spec.InitContainers[iter], pod)
377 | }
378 | }
379 | for iter := range pod.Spec.Containers {
380 | if pod.Spec.Containers[iter].Name == containerName {
381 | instance_name = BuildRemoteExecutionInstanceName(pod.Spec.Containers[iter], pod)
382 | }
383 | }
384 | // in case we dont find it or if it hasnt run yet we should return empty string
385 | output, _ := client.Exec("cat " + ".knoc/" + instance_name + ".out ")
386 |
387 | return ioutil.NopCloser(strings.NewReader(string(output))), nil
388 | }
389 |
390 | // RunInContainer executes a command in a container in the pod, copying data
391 | // between in/out/err and the container's stdin/stdout/stderr.
392 | func (p *KNOCProvider) RunInContainer(ctx context.Context, namespace, name, container string, cmd []string, attach api.AttachIO) error {
393 | client, err := simplessh.ConnectWithKey(os.Getenv("REMOTE_HOST")+":"+os.Getenv("REMOTE_PORT"), os.Getenv("REMOTE_USER"), os.Getenv("REMOTE_KEY"))
394 | if err != nil {
395 | panic(err)
396 | }
397 | defer client.Close()
398 |
399 | client.Exec(strings.Join(cmd, " "))
400 | log.G(context.TODO()).Infof("receive ExecInContainer %q", strings.Join(cmd, " "))
401 | return nil
402 | }
403 |
404 | // GetPodStatus returns the status of a pod by name that is "running".
405 | // returns nil if a pod by that name is not found.
406 | func (p *KNOCProvider) GetPodStatus(ctx context.Context, namespace, name string) (*v1.PodStatus, error) {
407 | ctx, span := trace.StartSpan(ctx, "GetPodStatus")
408 | defer span.End()
409 |
410 | // Add namespace and name as attributes to the current span.
411 | ctx = addAttributes(ctx, span, common.NamespaceKey, namespace, common.NameKey, name)
412 |
413 | log.G(ctx).Infof("receive GetPodStatus %q", name)
414 |
415 | pod, err := p.GetPod(ctx, namespace, name)
416 | if err != nil {
417 | return nil, err
418 | }
419 |
420 | return &pod.Status, nil
421 | }
422 |
423 | // GetPods returns a list of all pods known to be "running".
424 | func (p *KNOCProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) {
425 | ctx, span := trace.StartSpan(ctx, "GetPods")
426 | defer span.End()
427 |
428 | log.G(ctx).Info("receive GetPods")
429 |
430 | var pods []*v1.Pod
431 |
432 | for _, pod := range p.pods {
433 | pods = append(pods, pod)
434 | }
435 |
436 | return pods, nil
437 | }
438 |
439 | func (p *KNOCProvider) ConfigureNode(ctx context.Context, n *v1.Node) { // nolint:golint
440 | ctx, span := trace.StartSpan(ctx, "KNOC.ConfigureNode") // nolint:staticcheck,ineffassign
441 | defer span.End()
442 |
443 | n.Status.Capacity = p.capacity()
444 | n.Status.Allocatable = p.capacity()
445 | n.Status.Conditions = p.nodeConditions()
446 | n.Status.Addresses = p.nodeAddresses()
447 | n.Status.DaemonEndpoints = p.nodeDaemonEndpoints()
448 | os := p.operatingSystem
449 | if os == "" {
450 | os = "Linux"
451 | }
452 | n.Status.NodeInfo.OperatingSystem = os
453 | n.Status.NodeInfo.Architecture = "amd64"
454 | n.ObjectMeta.Labels["alpha.service-controller.kubernetes.io/exclude-balancer"] = "true"
455 | n.ObjectMeta.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
456 | }
457 |
458 | // Capacity returns a resource list containing the capacity limits.
459 | func (p *KNOCProvider) capacity() v1.ResourceList {
460 | return v1.ResourceList{
461 | "cpu": resource.MustParse(p.config.CPU),
462 | "memory": resource.MustParse(p.config.Memory),
463 | "pods": resource.MustParse(p.config.Pods),
464 | }
465 | }
466 |
467 | // NodeConditions returns a list of conditions (Ready, OutOfDisk, etc), for updates to the node status
468 | // within Kubernetes.
469 | func (p *KNOCProvider) nodeConditions() []v1.NodeCondition {
470 | // TODO: Make this configurable
471 | return []v1.NodeCondition{
472 | {
473 | Type: "Ready",
474 | Status: v1.ConditionTrue,
475 | LastHeartbeatTime: metav1.Now(),
476 | LastTransitionTime: metav1.Now(),
477 | Reason: "KubeletPending",
478 | Message: "kubelet is pending.",
479 | },
480 | {
481 | Type: "OutOfDisk",
482 | Status: v1.ConditionFalse,
483 | LastHeartbeatTime: metav1.Now(),
484 | LastTransitionTime: metav1.Now(),
485 | Reason: "KubeletHasSufficientDisk",
486 | Message: "kubelet has sufficient disk space available",
487 | },
488 | {
489 | Type: "MemoryPressure",
490 | Status: v1.ConditionFalse,
491 | LastHeartbeatTime: metav1.Now(),
492 | LastTransitionTime: metav1.Now(),
493 | Reason: "KubeletHasSufficientMemory",
494 | Message: "kubelet has sufficient memory available",
495 | },
496 | {
497 | Type: "DiskPressure",
498 | Status: v1.ConditionFalse,
499 | LastHeartbeatTime: metav1.Now(),
500 | LastTransitionTime: metav1.Now(),
501 | Reason: "KubeletHasNoDiskPressure",
502 | Message: "kubelet has no disk pressure",
503 | },
504 | {
505 | Type: "NetworkUnavailable",
506 | Status: v1.ConditionFalse,
507 | LastHeartbeatTime: metav1.Now(),
508 | LastTransitionTime: metav1.Now(),
509 | Reason: "RouteCreated",
510 | Message: "RouteController created a route",
511 | },
512 | }
513 |
514 | }
515 |
516 | // NodeAddresses returns a list of addresses for the node status
517 | // within Kubernetes.
518 | func (p *KNOCProvider) nodeAddresses() []v1.NodeAddress {
519 | return []v1.NodeAddress{
520 | {
521 | Type: "InternalIP",
522 | Address: p.internalIP,
523 | },
524 | }
525 | }
526 |
527 | // NodeDaemonEndpoints returns NodeDaemonEndpoints for the node status
528 | // within Kubernetes.
529 | func (p *KNOCProvider) nodeDaemonEndpoints() v1.NodeDaemonEndpoints {
530 | return v1.NodeDaemonEndpoints{
531 | KubeletEndpoint: v1.DaemonEndpoint{
532 | Port: p.daemonEndpointPort,
533 | },
534 | }
535 | }
536 |
537 | // GetStatsSummary returns dummy stats for all pods known by this provider.
538 | func (p *KNOCProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
539 | var span trace.Span
540 | ctx, span = trace.StartSpan(ctx, "GetStatsSummary") //nolint: ineffassign,staticcheck
541 | defer span.End()
542 |
543 | // Grab the current timestamp so we can report it as the time the stats were generated.
544 | time := metav1.NewTime(time.Now())
545 |
546 | // Create the Summary object that will later be populated with node and pod stats.
547 | res := &stats.Summary{}
548 |
549 | // Populate the Summary object with basic node stats.
550 | res.Node = stats.NodeStats{
551 | NodeName: p.nodeName,
552 | StartTime: metav1.NewTime(p.startTime),
553 | }
554 |
555 | // Populate the Summary object with dummy stats for each pod known by this provider.
556 | for _, pod := range p.pods {
557 | var (
558 | // totalUsageNanoCores will be populated with the sum of the values of UsageNanoCores computes across all containers in the pod.
559 | totalUsageNanoCores uint64
560 | // totalUsageBytes will be populated with the sum of the values of UsageBytes computed across all containers in the pod.
561 | totalUsageBytes uint64
562 | )
563 |
564 | // Create a PodStats object to populate with pod stats.
565 | pss := stats.PodStats{
566 | PodRef: stats.PodReference{
567 | Name: pod.Name,
568 | Namespace: pod.Namespace,
569 | UID: string(pod.UID),
570 | },
571 | StartTime: pod.CreationTimestamp,
572 | }
573 |
574 | // Iterate over all containers in the current pod to compute dummy stats.
575 | for _, container := range pod.Spec.Containers {
576 | // Grab a dummy value to be used as the total CPU usage.
577 | // The value should fit a uint32 in order to avoid overflows later on when computing pod stats.
578 | dummyUsageNanoCores := uint64(rand.Uint32())
579 | totalUsageNanoCores += dummyUsageNanoCores
580 | // Create a dummy value to be used as the total RAM usage.
581 | // The value should fit a uint32 in order to avoid overflows later on when computing pod stats.
582 | dummyUsageBytes := uint64(rand.Uint32())
583 | totalUsageBytes += dummyUsageBytes
584 | // Append a ContainerStats object containing the dummy stats to the PodStats object.
585 | pss.Containers = append(pss.Containers, stats.ContainerStats{
586 | Name: container.Name,
587 | StartTime: pod.CreationTimestamp,
588 | CPU: &stats.CPUStats{
589 | Time: time,
590 | UsageNanoCores: &dummyUsageNanoCores,
591 | },
592 | Memory: &stats.MemoryStats{
593 | Time: time,
594 | UsageBytes: &dummyUsageBytes,
595 | },
596 | })
597 | }
598 |
599 | // Populate the CPU and RAM stats for the pod and append the PodsStats object to the Summary object to be returned.
600 | pss.CPU = &stats.CPUStats{
601 | Time: time,
602 | UsageNanoCores: &totalUsageNanoCores,
603 | }
604 | pss.Memory = &stats.MemoryStats{
605 | Time: time,
606 | UsageBytes: &totalUsageBytes,
607 | }
608 | res.Pods = append(res.Pods, pss)
609 | }
610 |
611 | // Return the dummy stats.
612 | return res, nil
613 | }
614 |
615 | // NotifyPods is called to set a pod notifier callback function. This should be called before any operations are done
616 | // within the provider.
617 | func (p *KNOCProvider) NotifyPods(ctx context.Context, f func(*v1.Pod)) {
618 | p.notifier = f
619 | go p.statusLoop(ctx)
620 | }
621 |
622 | func (p *KNOCProvider) statusLoop(ctx context.Context) {
623 | t := time.NewTimer(5 * time.Second)
624 | if !t.Stop() {
625 | <-t.C
626 | }
627 |
628 | for {
629 | t.Reset(5 * time.Second)
630 | select {
631 | case <-ctx.Done():
632 | return
633 | case <-t.C:
634 | }
635 |
636 | checkPodsStatus(p, ctx)
637 | }
638 | }
639 |
640 | func (p *KNOCProvider) initContainersActive(pod *v1.Pod) bool {
641 | init_containers_active := len(pod.Spec.InitContainers)
642 | for idx, _ := range pod.Spec.InitContainers {
643 | if pod.Status.InitContainerStatuses[idx].State.Terminated != nil {
644 | init_containers_active--
645 | }
646 | }
647 | return init_containers_active != 0
648 | }
649 |
650 | func (p *KNOCProvider) startMainContainers(ctx context.Context, pod *v1.Pod) {
651 | distribution := "docker://"
652 | now := metav1.NewTime(time.Now())
653 |
654 | for idx, container := range pod.Spec.Containers {
655 | err := RemoteExecution(p, ctx, common.CREATE, distribution+container.Image, pod, container)
656 |
657 | if err != nil {
658 | pod.Status.ContainerStatuses[idx] = v1.ContainerStatus{
659 | Name: container.Name,
660 | Image: container.Image,
661 | Ready: false,
662 | RestartCount: 1,
663 | State: v1.ContainerState{
664 | Terminated: &v1.ContainerStateTerminated{
665 | Message: "Could not reach remote cluster",
666 | StartedAt: now,
667 | ExitCode: 130,
668 | },
669 | },
670 | }
671 | pod.Status.Phase = v1.PodFailed
672 | continue
673 | }
674 | pod.Status.ContainerStatuses[idx] = v1.ContainerStatus{
675 | Name: container.Name,
676 | Image: container.Image,
677 | Ready: true,
678 | RestartCount: 1,
679 | State: v1.ContainerState{
680 | Running: &v1.ContainerStateRunning{
681 | StartedAt: now,
682 | },
683 | },
684 | }
685 |
686 | }
687 | }
688 |
689 | // addAttributes adds the specified attributes to the provided span.
690 | // attrs must be an even-sized list of string arguments.
691 | // Otherwise, the span won't be modified.
692 | // TODO: Refactor and move to a "tracing utilities" package.
693 | func addAttributes(ctx context.Context, span trace.Span, attrs ...string) context.Context {
694 | if len(attrs)%2 == 1 {
695 | return ctx
696 | }
697 | for i := 0; i < len(attrs); i += 2 {
698 | ctx = span.WithField(ctx, attrs[i], attrs[i+1])
699 | }
700 | return ctx
701 | }
702 |
--------------------------------------------------------------------------------
/media/Dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CARV-ICS-FORTH/knoc/e1c17e3b6ae63f76dbc832ab665efe9330fc4c81/media/Dark.png
--------------------------------------------------------------------------------
/media/Light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CARV-ICS-FORTH/knoc/e1c17e3b6ae63f76dbc832ab665efe9330fc4c81/media/Light.png
--------------------------------------------------------------------------------
/media/darkcrop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CARV-ICS-FORTH/knoc/e1c17e3b6ae63f76dbc832ab665efe9330fc4c81/media/darkcrop.png
--------------------------------------------------------------------------------
/media/knoc-env.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CARV-ICS-FORTH/knoc/e1c17e3b6ae63f76dbc832ab665efe9330fc4c81/media/knoc-env.png
--------------------------------------------------------------------------------
/media/lightcrop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CARV-ICS-FORTH/knoc/e1c17e3b6ae63f76dbc832ab665efe9330fc4c81/media/lightcrop.png
--------------------------------------------------------------------------------
/remote.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2021 FORTH-ICS
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.package main
14 |
15 | package knoc
16 |
17 | import (
18 | "context"
19 | "encoding/json"
20 | "fmt"
21 | "io/fs"
22 | "os"
23 | "os/exec"
24 | "path/filepath"
25 | "strconv"
26 | "strings"
27 |
28 | b64 "encoding/base64"
29 |
30 | common "github.com/CARV-ICS-FORTH/knoc/common"
31 |
32 | "github.com/containerd/containerd/log"
33 | "github.com/pkg/sftp"
34 | "github.com/sfreiberg/simplessh"
35 | v1 "k8s.io/api/core/v1"
36 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
37 | )
38 |
39 | func getRoutine(mode int8) string {
40 | switch mode {
41 | case 0:
42 | return "Create Remote Execution"
43 | case 1:
44 | return "Delete Remote Execution"
45 | default:
46 | return "UNKNOWN"
47 |
48 | }
49 | }
50 |
51 | func normalizeImageName(instance_name string) string {
52 | instances_str := strings.Split(string(instance_name), "/")
53 | final_name := ""
54 | first_iter := true
55 | for _, strings := range instances_str {
56 | if first_iter {
57 | final_name = strings
58 | first_iter = false
59 | continue
60 | }
61 | final_name = final_name + "-" + strings
62 | }
63 | without_version_stamp := strings.Split(final_name, ":")
64 | return without_version_stamp[0]
65 | }
66 |
67 | func exportContainerb64Json(instance_name string, obj v1.Container, meta metav1.ObjectMeta) string {
68 | obj.Name = instance_name
69 | dc := common.DoorContainer{}
70 | dc.Args = obj.Args
71 | dc.Command = obj.Command
72 | dc.Env = obj.Env
73 | dc.EnvFrom = obj.EnvFrom
74 | dc.Image = obj.Image
75 | dc.Name = obj.Name
76 | dc.Ports = obj.Ports
77 | dc.Resources = obj.Resources
78 | dc.VolumeDevices = obj.VolumeDevices
79 | dc.VolumeMounts = obj.VolumeMounts
80 | dc.Metadata = meta
81 |
82 | u, _ := json.Marshal(dc)
83 | sEnc := b64.StdEncoding.EncodeToString(u)
84 | return sEnc
85 | }
86 |
87 | func hasDoor(client *simplessh.Client) bool {
88 | _, err := client.Exec("./door --version")
89 | return err == nil
90 | }
91 |
92 | func prepareDoor(client *simplessh.Client) {
93 | if !hasDoor(client) {
94 | // Could not find KNoC's Door binary in the remote system...
95 | // send door to remote
96 | local := "/usr/local/bin/door" // from inside the container's root dir
97 | remote := "door"
98 | common.UploadFile(client, local, remote, 0700)
99 | // check again else die
100 | _, err := client.Exec("./door --version")
101 | if err != nil {
102 | fmt.Println("Could not upload KNoC's Door")
103 | panic(err)
104 | }
105 | }
106 | }
107 |
108 | func runRemoteExecutionInstance(ctx context.Context, client *simplessh.Client, imageLocation string, instance_name string, container v1.Container, meta metav1.ObjectMeta) ([]byte, error) {
109 | b64dc := exportContainerb64Json(instance_name, container, meta)
110 | output, err := client.Exec("bash -l -c \"nohup ./door -a submit -c " + b64dc + " -V >> .knoc/door.log 2>> .knoc/door.log < /dev/null & \"")
111 | log.G(ctx).Debugf("bash -l -c \"nohup ./door -a submit -c " + b64dc + " -V >> .knoc/door.log 2>> .knoc/door.log < /dev/null & \"")
112 | if err != nil {
113 | // Could not exec instance
114 | return nil, err
115 | }
116 | return output, nil
117 | }
118 |
119 | func BuildRemoteExecutionInstanceName(container v1.Container, pod *v1.Pod) string {
120 | return pod.Namespace + "-" + string(pod.UID) + "-" + normalizeImageName(container.Image)
121 | }
122 | func BuildRemoteExecutionPodName(pod *v1.Pod) string {
123 | return pod.Namespace + "-" + string(pod.UID)
124 | }
125 |
126 | func stopRemoteExecutionInstance(ctx context.Context, client *simplessh.Client, pod *v1.Pod, instance_name string, container v1.Container, meta metav1.ObjectMeta) ([]byte, error) {
127 | b64dc := exportContainerb64Json(instance_name, container, meta)
128 | output, err := client.Exec("bash -l -c \"nohup ./door -a stop -c " + b64dc + " -V >> .knoc/door.log 2>> .knoc/door.log < /dev/null & \"")
129 | log.G(ctx).Debugf("bash -l -c \"nohup ./door -a stop -c " + b64dc + " -V >> .knoc/door.log 2>> .knoc/door.log < /dev/null & \"")
130 | if err != nil {
131 | // Could not exec instance
132 | return nil, err
133 | }
134 |
135 | return output, nil
136 |
137 | }
138 | func RemoteExecution(p *KNOCProvider, ctx context.Context, mode int8, imageLocation string, pod *v1.Pod, container v1.Container) error {
139 | var err error
140 | instance_name := BuildRemoteExecutionInstanceName(container, pod)
141 | client, err := simplessh.ConnectWithKey(os.Getenv("REMOTE_HOST")+":"+os.Getenv("REMOTE_PORT"), os.Getenv("REMOTE_USER"), os.Getenv("REMOTE_KEY"))
142 | if err != nil {
143 | return err
144 | }
145 | defer client.Close()
146 | log.GetLogger(ctx).Info(getRoutine(mode) + " Container")
147 | prepareDoor(client)
148 | if mode == common.CREATE {
149 |
150 | err = PrepareContainerData(p, ctx, client, instance_name, container, pod)
151 | if err != nil {
152 | return err
153 | }
154 | _, err = runRemoteExecutionInstance(ctx, client, imageLocation, instance_name, container, pod.ObjectMeta)
155 | } else if mode == common.DELETE {
156 | _, err = stopRemoteExecutionInstance(ctx, client, pod, instance_name, container, pod.ObjectMeta)
157 | }
158 | if err != nil {
159 | return err
160 | }
161 |
162 | return nil
163 | }
164 |
165 | func PrepareContainerData(p *KNOCProvider, ctx context.Context, client *simplessh.Client, instance_name string, container v1.Container, pod *v1.Pod) error {
166 | log.G(ctx).Debugf("receive prepareContainerData %v", container.Name)
167 | c, err := sftp.NewClient(client.SSHClient)
168 | if err != nil {
169 | fmt.Println("Could not connect over sftp on the remote system ")
170 | panic(err)
171 | }
172 | defer c.Close()
173 |
174 | //add kubeconfig on remote:$HOME
175 | out, err := exec.Command("test -f .kube/config").Output()
176 | if _, ok := err.(*exec.ExitError); !ok {
177 | log.GetLogger(ctx).Debug("Kubeconfig doesn't exist, so we will generate it...")
178 | out, err = exec.Command("/bin/sh", "/home/user0/scripts/prepare_kubeconfig.sh").Output()
179 | if err != nil {
180 | log.GetLogger(ctx).Errorln("Could not run kubeconfig_setup script!")
181 | log.GetLogger(ctx).Error(string(out))
182 | panic(err)
183 | }
184 | log.GetLogger(ctx).Debug("Kubeconfig generated")
185 | client.Exec("mkdir -p .kube")
186 | _, err = client.Exec("echo \"" + string(out) + "\" > .kube/config")
187 | if err != nil {
188 | log.GetLogger(ctx).Errorln("Could not setup kubeconfig on the remote system ")
189 | panic(err)
190 | }
191 | log.GetLogger(ctx).Debug("Kubeconfig installed")
192 | }
193 |
194 | client.Exec("mkdir -p " + ".knoc")
195 | for _, mountSpec := range container.VolumeMounts {
196 | podVolSpec := findPodVolumeSpec(pod, mountSpec.Name)
197 | if podVolSpec.ConfigMap != nil {
198 | cmvs := podVolSpec.ConfigMap
199 | mode := podVolSpec.ConfigMap.DefaultMode
200 | podConfigMapDir := filepath.Join(common.PodVolRoot, BuildRemoteExecutionPodName(pod)+"/", mountSpec.Name)
201 | configMap, err := p.resourceManager.GetConfigMap(cmvs.Name, pod.Namespace)
202 | if cmvs.Optional != nil && !*cmvs.Optional {
203 | return fmt.Errorf("Configmap %s is required by Pod %s and does not exist", cmvs.Name, pod.Name)
204 | }
205 | if err != nil {
206 | return fmt.Errorf("Error getting configmap %s from API server: %v", pod.Name, err)
207 | }
208 | if configMap == nil {
209 | continue
210 | }
211 | client.Exec("mkdir -p " + podConfigMapDir)
212 | log.GetLogger(ctx).Debugf("%v", "create dir for configmaps "+podConfigMapDir)
213 |
214 | for k, v := range configMap.Data {
215 | // TODO: Ensure that these files are deleted in failure cases
216 | fullPath := filepath.Join(podConfigMapDir, k)
217 | common.UploadData(client, []byte(v), fullPath, fs.FileMode(*mode))
218 | if err != nil {
219 | return fmt.Errorf("Could not write configmap file %s", fullPath)
220 | }
221 | }
222 | } else if podVolSpec.Secret != nil {
223 | svs := podVolSpec.Secret
224 | mode := podVolSpec.Secret.DefaultMode
225 | podSecretDir := filepath.Join(common.PodVolRoot, BuildRemoteExecutionPodName(pod)+"/", mountSpec.Name)
226 | secret, err := p.resourceManager.GetSecret(svs.SecretName, pod.Namespace)
227 | if svs.Optional != nil && !*svs.Optional {
228 | return fmt.Errorf("Secret %s is required by Pod %s and does not exist", svs.SecretName, pod.Name)
229 | }
230 | if err != nil {
231 | return fmt.Errorf("Error getting secret %s from API server: %v", pod.Name, err)
232 | }
233 | if secret == nil {
234 | continue
235 | }
236 | client.Exec("mkdir -p " + podSecretDir)
237 | log.GetLogger(ctx).Debugf("%v", "create dir for secrets "+podSecretDir)
238 | for k, v := range secret.Data {
239 | fullPath := filepath.Join(podSecretDir, k)
240 | common.UploadData(client, []byte(v), fullPath, fs.FileMode(*mode))
241 | if err != nil {
242 | return fmt.Errorf("Could not write secret file %s", fullPath)
243 | }
244 | }
245 | } else if podVolSpec.EmptyDir != nil {
246 | // pod-global directory
247 | edPath := filepath.Join(common.PodVolRoot, BuildRemoteExecutionPodName(pod)+"/"+mountSpec.Name)
248 | // mounted for every container
249 | client.Exec("mkdir -p " + edPath)
250 | // without size limit for now
251 |
252 | }
253 | }
254 | return nil
255 | }
256 |
257 | // Search for a particular volume spec by name in the Pod spec
258 | func findPodVolumeSpec(pod *v1.Pod, name string) *v1.VolumeSource {
259 | for _, volume := range pod.Spec.Volumes {
260 | if volume.Name == name {
261 | return &volume.VolumeSource
262 | }
263 | }
264 | return nil
265 | }
266 |
267 | func checkPodsStatus(p *KNOCProvider, ctx context.Context) {
268 | if len(p.pods) == 0 {
269 | return
270 | }
271 | log.GetLogger(ctx).Debug("received checkPodStatus")
272 | client, err := simplessh.ConnectWithKey(os.Getenv("REMOTE_HOST")+":"+os.Getenv("REMOTE_PORT"), os.Getenv("REMOTE_USER"), os.Getenv("REMOTE_KEY"))
273 | if err != nil {
274 | panic(err)
275 | }
276 | defer client.Close()
277 | instance_name := ""
278 | now := metav1.Now()
279 | for _, pod := range p.pods {
280 | if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodPending {
281 | continue
282 | }
283 | // if its not initialized yet
284 | if pod.Status.Conditions[0].Status == v1.ConditionFalse && pod.Status.Conditions[0].Type == v1.PodInitialized {
285 | containers_count := len(pod.Spec.InitContainers)
286 | successfull := 0
287 | failed := 0
288 | valid := 1
289 | for idx, container := range pod.Spec.InitContainers {
290 | //TODO: find next initcontainer and run it
291 | instance_name = BuildRemoteExecutionInstanceName(container, pod)
292 | if len(pod.Status.InitContainerStatuses) < len(pod.Spec.InitContainers) {
293 | pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses, v1.ContainerStatus{
294 | Name: container.Name,
295 | Image: container.Image,
296 | Ready: true,
297 | RestartCount: 0,
298 | State: v1.ContainerState{
299 | Running: &v1.ContainerStateRunning{
300 | StartedAt: now,
301 | },
302 | },
303 | })
304 | continue
305 | }
306 | lastStatus := pod.Status.InitContainerStatuses[idx]
307 | if lastStatus.Ready {
308 | status_file, err := client.Exec("cat " + ".knoc/" + instance_name + ".status")
309 | status := string(status_file)
310 | if len(status) > 1 {
311 | // remove '\n' from end of status due to golang's string conversion :X
312 | status = status[:len(status)-1]
313 | }
314 | if err != nil || status == "" {
315 | // still running
316 | continue
317 | }
318 | i, err := strconv.Atoi(status)
319 | reason := "Unknown"
320 | if i == 0 && err == nil {
321 | successfull++
322 | reason = "Completed"
323 | } else {
324 | failed++
325 | reason = "Error"
326 | }
327 | containers_count--
328 | pod.Status.InitContainerStatuses[idx] = v1.ContainerStatus{
329 | Name: container.Name,
330 | Image: container.Image,
331 | Ready: false,
332 | State: v1.ContainerState{
333 | Terminated: &v1.ContainerStateTerminated{
334 | StartedAt: lastStatus.State.Running.StartedAt,
335 | FinishedAt: now,
336 | Reason: reason,
337 | ExitCode: int32(i),
338 | },
339 | },
340 | }
341 | valid = 0
342 | } else {
343 | containers_count--
344 | status := lastStatus.State.Terminated.ExitCode
345 | i, _ := strconv.Atoi(string(status))
346 | if i == 0 {
347 | successfull++
348 | } else {
349 | failed++
350 | }
351 | }
352 | }
353 | if containers_count == 0 && pod.Status.Phase == v1.PodRunning {
354 | if successfull == len(pod.Spec.InitContainers) {
355 | log.GetLogger(ctx).Debug("SUCCEEDED InitContainers")
356 | // PodInitialized = true
357 | pod.Status.Conditions[0].Status = v1.ConditionTrue
358 | // PodReady = true
359 | pod.Status.Conditions[1].Status = v1.ConditionTrue
360 | p.startMainContainers(ctx, pod)
361 | valid = 0
362 | } else {
363 | pod.Status.Phase = v1.PodFailed
364 | valid = 0
365 | }
366 | }
367 | if valid == 0 {
368 | p.UpdatePod(ctx, pod)
369 | }
370 | // log.GetLogger(ctx).Infof("init checkPodStatus:%v %v %v", pod.Name, successfull, failed)
371 | } else {
372 | // if its initialized
373 | containers_count := len(pod.Spec.Containers)
374 | successfull := 0
375 | failed := 0
376 | valid := 1
377 | for idx, container := range pod.Spec.Containers {
378 | instance_name = BuildRemoteExecutionInstanceName(container, pod)
379 | lastStatus := pod.Status.ContainerStatuses[idx]
380 | if lastStatus.Ready {
381 | status_file, err := client.Exec("cat " + ".knoc/" + instance_name + ".status")
382 | status := string(status_file)
383 | if len(status) > 1 {
384 | // remove '\n' from end of status due to golang's string conversion :X
385 | status = status[:len(status)-1]
386 | }
387 | if err != nil || status == "" {
388 | // still running
389 | continue
390 | }
391 | containers_count--
392 | i, err := strconv.Atoi(status)
393 | reason := "Unknown"
394 | if i == 0 && err == nil {
395 | successfull++
396 | reason = "Completed"
397 | } else {
398 | failed++
399 | reason = "Error"
400 | // log.GetLogger(ctx).Info("[checkPodStatus] CONTAINER_FAILED")
401 | }
402 | pod.Status.ContainerStatuses[idx] = v1.ContainerStatus{
403 | Name: container.Name,
404 | Image: container.Image,
405 | Ready: false,
406 | State: v1.ContainerState{
407 | Terminated: &v1.ContainerStateTerminated{
408 | StartedAt: lastStatus.State.Running.StartedAt,
409 | FinishedAt: now,
410 | Reason: reason,
411 | ExitCode: int32(i),
412 | },
413 | },
414 | }
415 | valid = 0
416 | } else {
417 | if lastStatus.State.Terminated == nil {
418 | // containers not yet turned on
419 | if p.initContainersActive(pod) {
420 | continue
421 | }
422 | }
423 | containers_count--
424 | status := lastStatus.State.Terminated.ExitCode
425 |
426 | i := status
427 | if i == 0 && err == nil {
428 | successfull++
429 | } else {
430 | failed++
431 | }
432 | }
433 | }
434 | if containers_count == 0 && pod.Status.Phase == v1.PodRunning {
435 | // containers are ready
436 | pod.Status.Conditions[1].Status = v1.ConditionFalse
437 |
438 | if successfull == len(pod.Spec.Containers) {
439 | log.GetLogger(ctx).Debug("[checkPodStatus] POD_SUCCEEDED ")
440 | pod.Status.Phase = v1.PodSucceeded
441 | } else {
442 | log.GetLogger(ctx).Debug("[checkPodStatus] POD_FAILED ", successfull, " ", containers_count, " ", len(pod.Spec.Containers), " ", failed)
443 | pod.Status.Phase = v1.PodFailed
444 | }
445 | valid = 0
446 | }
447 | if valid == 0 {
448 | p.UpdatePod(ctx, pod)
449 | }
450 | log.GetLogger(ctx).Debugf("main checkPodStatus:%v %v %v", pod.Name, successfull, failed)
451 |
452 | }
453 | }
454 |
455 | }
456 |
--------------------------------------------------------------------------------
/test/README.md:
--------------------------------------------------------------------------------
1 | Copied from https://github.com/virtual-kubelet/virtual-kubelet/tree/master/test.
2 |
3 | If not otherwise mentioned, Virtual Kubelet copyright/licensing applies.
4 |
--------------------------------------------------------------------------------
/test/e2e/README.md:
--------------------------------------------------------------------------------
1 | # Importable End-To-End Test Suite
2 |
3 | Virtual Kubelet (VK) provides an importable end-to-end (E2E) test suite containing a set of common integration tests. As a provider, you can import the test suite and use it to validate your VK implementation.
4 |
5 | ## Prerequisite
6 |
7 | To run the E2E test suite, three things are required:
8 |
9 | - A local Kubernetes cluster (we have tested with [Docker for Mac](https://docs.docker.com/docker-for-mac/install/) and [Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/));
10 | - Your _kubeconfig_ default context points to the local Kubernetes cluster;
11 | - [skaffold](https://skaffold.dev/docs/getting-started/#installing-skaffold)
12 |
13 | > The test suite is based on [VK 1.0](https://github.com/virtual-kubelet/virtual-kubelet/releases/tag/v1.0.0). If your VK implementation is based on legacy VK library (< v1.0.0), you will have to upgrade it to VK 1.0 using [virtual-kubelet/node-cli](https://github.com/virtual-kubelet/node-cli).
14 |
15 | ### Skaffold Folder
16 |
17 | Before running the E2E test suite, you will need to copy the [`./hack`](../../hack) folder containing Skaffold-related files such as Dockerfile, manifests, and certificates to your VK project root. Skaffold essentially helps package your virtual kubelet into a container based on the given [`Dockerfile`](../../hack/skaffold/virtual-kubelet/Dockerfile) and deploy it as a pod (see [`pod.yml`](../../hack/skaffold/virtual-kubelet/pod.yml)) to your Kubernetes test cluster. In summary, you will likely need to modify the VK name in those files, customize the VK configuration file, and the API server certificates (`-crt.pem` and `-key.pem`) before running the test suite.
18 |
19 | ### Makefile.e2e
20 |
21 | Also, you will need to copy [`Makefile.e2e`](../../Makefile.e2e) to your VK project root. It contains necessary `make` commands to run the E2E test suite. Do not forget to add `include Makefile.e2e` in your `Makefile`.
22 |
23 | ### File Structure
24 |
25 | A minimal VK provider should now have a file structure similar to the one below:
26 |
27 | ```console
28 | .
29 | ├── Makefile
30 | ├── Makefile.e2e
31 | ├── README.md
32 | ├── cmd
33 | │ └── virtual-kubelet
34 | │ └── main.go
35 | ├── go.mod
36 | ├── go.sum
37 | ├── hack
38 | │ └── skaffold
39 | │ └── virtual-kubelet
40 | │ ├── Dockerfile
41 | │ ├── base.yml
42 | │ ├── pod.yml
43 | │ ├── skaffold.yml
44 | │ ├── vkubelet-provider-0-cfg.json
45 | │ ├── vkubelet-provider-0-crt.pem
46 | │ └── vkubelet-provider-0-key.pem
47 | ├── test
48 | │ └── e2e
49 | │ └── main_test.go # import and run the E2E test suite here
50 | ├── provider.go # provider-specific VK implementation
51 | ├── provider_test.go # unit test
52 | ```
53 |
54 | ## Importing the Test Suite
55 |
56 | The test suite can be easily imported in your test files (e.g. `./test/e2e/main_test.go`) with the following import statement:
57 | ```go
58 | import (
59 | vke2e "github.com/virtual-kubelet/virtual-kubelet/test/e2e"
60 | )
61 | ```
62 |
63 | ### Test Suite Customization
64 |
65 | The test suite allows providers to customize the test suite using `EndToEndTestSuiteConfig`:
66 |
67 | ```go
68 | // EndToEndTestSuiteConfig is the config passed to initialize the testing framework and test suite.
69 | type EndToEndTestSuiteConfig struct {
70 | // Kubeconfig is the path to the kubeconfig file to use when running the test suite outside a Kubernetes cluster.
71 | Kubeconfig string
72 | // Namespace is the name of the Kubernetes namespace to use for running the test suite (i.e. where to create pods).
73 | Namespace string
74 | // NodeName is the name of the virtual-kubelet node to test.
75 | NodeName string
76 | // WatchTimeout is the duration for which the framework watch a particular condition to be satisfied (e.g. watches a pod becoming ready)
77 | WatchTimeout time.Duration
78 | // Setup is a function that sets up provider-specific resource in the test suite
79 | Setup suite.SetUpFunc
80 | // Teardown is a function that tears down provider-specific resources from the test suite
81 | Teardown suite.TeardownFunc
82 | // ShouldSkipTest is a function that determines whether the test suite should skip certain tests
83 | ShouldSkipTest suite.ShouldSkipTestFunc
84 | }
85 | ```
86 |
87 | > `Setup()` is invoked before running the E2E test suite, and `Teardown()` is invoked after all the E2E tests are finished.
88 |
89 | You will need an `EndToEndTestSuiteConfig` to create an `EndToEndTestSuite` using `NewEndToEndTestSuite`. After that, invoke `Run` from `EndToEndTestSuite` to start the test suite. The code snippet below is a minimal example of how to import and run the test suite in your test file.
90 |
91 | ```go
92 | package e2e
93 |
94 | import (
95 | "flag"
96 | "fmt"
97 | "testing"
98 | "time"
99 |
100 | vke2e "github.com/virtual-kubelet/virtual-kubelet/test/e2e"
101 | )
102 |
103 | var (
104 | kubeconfig string
105 | namespace string
106 | nodeName string
107 | )
108 |
109 | var defaultNamespace = "default"
110 | var defaultNodeName = "default-node"
111 |
112 | // Read the following variables from command-line flags
113 | func init() {
114 | flag.StringVar(&kubeconfig, "kubeconfig", "", "path to the kubeconfig file to use when running the test suite outside a kubernetes cluster")
115 | flag.StringVar(&namespace, "namespace", defaultNamespace, "the name of the kubernetes namespace to use for running the test suite (i.e. where to create pods)")
116 | flag.StringVar(&nodeName, "node-name", defaultNodeName, "the name of the virtual-kubelet node to test")
117 | flag.Parse()
118 | }
119 |
120 | func setup() error {
121 | fmt.Println("Setting up end-to-end test suite...")
122 | return nil
123 | }
124 |
125 | func teardown() error {
126 | fmt.Println("Tearing down end-to-end test suite...")
127 | return nil
128 | }
129 |
130 | func shouldSkipTest(testName string) bool {
131 | // Skip the test 'TestGetStatsSummary'
132 | return testName == "TestGetStatsSummary"
133 | }
134 |
135 | // TestEndToEnd runs the e2e tests against a previously configured cluster
136 | func TestEndToEnd(t *testing.T) {
137 | config := vke2e.EndToEndTestSuiteConfig{
138 | Kubeconfig: kubeconfig,
139 | Namespace: namespace,
140 | NodeName: nodeName,
141 | Setup: setup,
142 | Teardown: teardown,
143 | ShouldSkipTest: shouldSkipTest,
144 | WatchTimeout: 5 * time.Minute,
145 | }
146 | ts := vke2e.NewEndToEndTestSuite(config)
147 | ts.Run(t)
148 | }
149 | ```
150 |
151 | ## Running the Test Suite
152 |
153 | Since our CI uses Minikube, we describe below how to run E2E on top of it.
154 |
155 | To create a Minikube cluster, run the following command after [installing Minikube](https://github.com/kubernetes/minikube#installation):
156 |
157 | ```bash
158 | minikube start
159 | ```
160 |
161 | To run the E2E test suite, you can run the following command:
162 |
163 | ```bash
164 | make e2e
165 | ```
166 |
167 | You can see from the console output whether the tests in the test suite pass or not.
168 |
169 | ```console
170 | ...
171 | === RUN TestEndToEnd
172 | Setting up end-to-end test suite for mock provider...
173 | suite.go:62: True
174 | === RUN TestEndToEnd/TestCreatePodWithMandatoryInexistentConfigMap
175 | === RUN TestEndToEnd/TestCreatePodWithMandatoryInexistentSecrets
176 | === RUN TestEndToEnd/TestCreatePodWithOptionalInexistentConfigMap
177 | === RUN TestEndToEnd/TestCreatePodWithOptionalInexistentSecrets
178 | === RUN TestEndToEnd/TestGetPods
179 | basic.go:40: Created pod: nginx-testgetpods-g9s42
180 | basic.go:46: Pod nginx-testgetpods-g9s42 ready
181 | === RUN TestEndToEnd/TestGetStatsSummary
182 | === RUN TestEndToEnd/TestNodeCreateAfterDelete
183 | === RUN TestEndToEnd/TestPodLifecycleForceDelete
184 | basic.go:208: Created pod: nginx-testpodlifecycleforcedelete-wrjgk
185 | basic.go:214: Pod nginx-testpodlifecycleforcedelete-wrjgk ready
186 | basic.go:247: Force deleted pod: nginx-testpodlifecycleforcedelete-wrjgk
187 | basic.go:264: Pod ended as phase: Running
188 | === RUN TestEndToEnd/TestPodLifecycleGracefulDelete
189 | basic.go:135: Created pod: nginx-testpodlifecyclegracefuldelete-tp49x
190 | basic.go:141: Pod nginx-testpodlifecyclegracefuldelete-tp49x ready
191 | basic.go:168: Deleted pod: nginx-testpodlifecyclegracefuldelete-tp49x
192 | Tearing down end-to-end test suite for mock provider...
193 | --- PASS: TestEndToEnd (11.75s)
194 | --- PASS: TestEndToEnd/TestCreatePodWithMandatoryInexistentConfigMap (0.04s)
195 | --- PASS: TestEndToEnd/TestCreatePodWithMandatoryInexistentSecrets (0.03s)
196 | --- PASS: TestEndToEnd/TestCreatePodWithOptionalInexistentConfigMap (0.73s)
197 | --- PASS: TestEndToEnd/TestCreatePodWithOptionalInexistentSecrets (1.00s)
198 | --- PASS: TestEndToEnd/TestGetPods (0.80s)
199 | --- PASS: TestEndToEnd/TestGetStatsSummary (0.80s)
200 | --- PASS: TestEndToEnd/TestNodeCreateAfterDelete (5.25s)
201 | --- PASS: TestEndToEnd/TestPodLifecycleForceDelete (2.05s)
202 | --- PASS: TestEndToEnd/TestPodLifecycleGracefulDelete (1.05s)
203 | PASS
204 | ok github.com/virtual-kubelet/virtual-kubelet/internal/test/e2e 12.298s
205 | ? github.com/virtual-kubelet/virtual-kubelet/internal/test/e2e/framework [no test files]
206 | ...
207 | ```
208 |
--------------------------------------------------------------------------------
/test/e2e/basic.go:
--------------------------------------------------------------------------------
1 | package e2e
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "testing"
7 | "time"
8 |
9 | "gotest.tools/assert"
10 | v1 "k8s.io/api/core/v1"
11 | apierrors "k8s.io/apimachinery/pkg/api/errors"
12 | "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
13 | )
14 |
15 | const (
16 | // deleteGracePeriodForProvider is the maximum amount of time we allow for the provider to react to deletion of a pod
17 | // before proceeding to assert that the pod has been deleted.
18 | deleteGracePeriodForProvider = 1 * time.Second
19 | )
20 |
21 | // TestGetPods tests that the /pods endpoint works, and only returns pods for our kubelet
22 | func (ts *EndToEndTestSuite) TestGetPods(t *testing.T) {
23 | ctx := context.Background()
24 |
25 | // Create a pod with prefix "busybox-" having a single container.
26 | podSpec := f.CreateDummyPodObjectWithPrefix(t.Name(), "busysay", "busybox")
27 | podSpec.Spec.NodeName = f.NodeName
28 |
29 | busybox, err := f.CreatePod(ctx, podSpec)
30 | if err != nil {
31 | t.Fatal(err)
32 | }
33 | // Delete the pod after the test finishes.
34 | defer func() {
35 | if err := f.DeletePodImmediately(ctx, busybox.Namespace, busybox.Name); err != nil && !apierrors.IsNotFound(err) {
36 | t.Error(err)
37 | }
38 | }()
39 | t.Logf("Created pod: %s", busybox.Name)
40 |
41 | // Wait for the "busybox-" pod to be reported as running and ready.
42 | if _, err := f.WaitUntilPodReady(busybox.Namespace, busybox.Name); err != nil {
43 | t.Fatal(err)
44 | }
45 | t.Logf("Pod %s ready", busybox.Name)
46 |
47 | k8sPods, err := f.GetRunningPodsFromKubernetes(ctx)
48 | if err != nil {
49 | t.Fatal(err)
50 | }
51 |
52 | podFound := false
53 | for _, pod := range k8sPods.Items {
54 | if pod.Spec.NodeName != f.NodeName {
55 | t.Fatalf("Found pod with node name %s, whereas expected %s", pod.Spec.NodeName, f.NodeName)
56 | }
57 | if pod.UID == busybox.UID {
58 | podFound = true
59 | }
60 | }
61 | if !podFound {
62 | t.Fatal("busybox pod not found")
63 | }
64 | }
65 |
66 | // TestGetStatsSummary creates a pod having two containers and queries the /stats/summary endpoint of the virtual-kubelet.
67 | // It expects this endpoint to return stats for the current node, as well as for the aforementioned pod and each of its two containers.
68 | func (ts *EndToEndTestSuite) TestGetStatsSummary(t *testing.T) {
69 | ctx := context.Background()
70 |
71 | // Create a pod with prefix "busybox-" having three containers.
72 | pod, err := f.CreatePod(ctx, f.CreateDummyPodObjectWithPrefix(t.Name(), "busysay", "busybox", "busybox", "busybox"))
73 | if err != nil {
74 | t.Fatal(err)
75 | }
76 | // Delete the "busybox-0-X" pod after the test finishes.
77 | defer func() {
78 | if err := f.DeletePodImmediately(ctx, pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
79 | t.Error(err)
80 | }
81 | }()
82 |
83 | // Wait for the "busybox-" pod to be reported as running and ready.
84 | if _, err := f.WaitUntilPodReady(pod.Namespace, pod.Name); err != nil {
85 | t.Fatal(err)
86 | }
87 |
88 | // Grab the stats from the provider.
89 | stats, err := f.GetStatsSummary(ctx)
90 | if err != nil {
91 | t.Fatal(err)
92 | }
93 |
94 | // Make sure that we've got stats for the current node.
95 | if stats.Node.NodeName != f.NodeName {
96 | t.Fatalf("expected stats for node %s, got stats for node %s", f.NodeName, stats.Node.NodeName)
97 | }
98 |
99 | // Make sure the "busybox-" pod exists in the slice of PodStats.
100 | idx, err := findPodInPodStats(stats, pod)
101 | if err != nil {
102 | t.Fatal(err)
103 | }
104 |
105 | // Make sure that we've got stats for all the containers in the "busybox-" pod.
106 | desiredContainerStatsCount := len(pod.Spec.Containers)
107 | currentContainerStatsCount := len(stats.Pods[idx].Containers)
108 | if currentContainerStatsCount != desiredContainerStatsCount {
109 | t.Fatalf("expected stats for %d containers, got stats for %d containers", desiredContainerStatsCount, currentContainerStatsCount)
110 | }
111 | }
112 |
113 | // TestPodLifecycleGracefulDelete creates a pod and verifies that the provider has been asked to create it.
114 | // Then, it deletes the pods and verifies that the provider has been asked to delete it.
115 | // These verifications are made using the /stats/summary endpoint of the virtual-kubelet, by checking for the presence or absence of the pods.
116 | // Hence, the provider being tested must implement the PodMetricsProvider interface.
117 | func (ts *EndToEndTestSuite) TestPodLifecycleGracefulDelete(t *testing.T) {
118 | ctx := context.Background()
119 |
120 | // Create a pod with prefix "busybox-" having a single container.
121 | podSpec := f.CreateDummyPodObjectWithPrefix(t.Name(), "busysay", "busybox")
122 | podSpec.Spec.NodeName = f.NodeName
123 |
124 | pod, err := f.CreatePod(ctx, podSpec)
125 | if err != nil {
126 | t.Fatal(err)
127 | }
128 | // Delete the pod after the test finishes.
129 | defer func() {
130 | if err := f.DeletePodImmediately(ctx, pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
131 | t.Error(err)
132 | }
133 | }()
134 | t.Logf("Created pod: %s", pod.Name)
135 |
136 | // Wait for the "busybox-" pod to be reported as running and ready.
137 | if _, err := f.WaitUntilPodReady(pod.Namespace, pod.Name); err != nil {
138 | t.Fatal(err)
139 | }
140 | t.Logf("Pod %s ready", pod.Name)
141 |
142 | // Grab the pods from the provider.
143 | pods, err := f.GetRunningPodsFromProvider(ctx)
144 | assert.NilError(t, err)
145 |
146 | // Check if the pod exists in the slice of PodStats.
147 | assert.NilError(t, findPodInPods(pods, pod))
148 |
149 | podCh := make(chan error)
150 | var podLast *v1.Pod
151 | go func() {
152 | // Close the podCh channel, signaling we've observed deletion of the pod.
153 | defer close(podCh)
154 |
155 | var err error
156 | podLast, err = f.WaitUntilPodDeleted(pod.Namespace, pod.Name)
157 | if err != nil {
158 | // Propagate the error to the outside so we can fail the test.
159 | podCh <- err
160 | }
161 | }()
162 |
163 | // Gracefully delete the "busybox-" pod.
164 | if err := f.DeletePod(ctx, pod.Namespace, pod.Name); err != nil {
165 | t.Fatal(err)
166 | }
167 | t.Logf("Deleted pod: %s", pod.Name)
168 |
169 | // Wait for the delete event to be ACKed.
170 | if err := <-podCh; err != nil {
171 | t.Fatal(err)
172 | }
173 |
174 | time.Sleep(deleteGracePeriodForProvider)
175 | // Give the provider some time to react to the MODIFIED/DELETED events before proceeding.
176 | // Grab the pods from the provider.
177 | pods, err = f.GetRunningPodsFromProvider(ctx)
178 | assert.NilError(t, err)
179 |
180 | // Make sure the pod DOES NOT exist in the provider's set of running pods
181 | assert.Assert(t, findPodInPods(pods, pod) != nil)
182 |
183 | // Make sure we saw the delete event, and the delete event was graceful
184 | assert.Assert(t, podLast != nil)
185 | assert.Assert(t, podLast.ObjectMeta.GetDeletionGracePeriodSeconds() != nil)
186 | assert.Assert(t, *podLast.ObjectMeta.GetDeletionGracePeriodSeconds() > 0)
187 | }
188 |
189 | // TestPodLifecycleForceDelete creates one podsand verifies that the provider has created them
190 | // and put them in the running lifecycle. It then does a force delete on the pod, and verifies the provider
191 | // has deleted it.
192 | func (ts *EndToEndTestSuite) TestPodLifecycleForceDelete(t *testing.T) {
193 | ctx := context.Background()
194 |
195 | podSpec := f.CreateDummyPodObjectWithPrefix(t.Name(), "busysay", "busybox")
196 | // Create a pod with prefix having a single container.
197 | pod, err := f.CreatePod(ctx, podSpec)
198 | if err != nil {
199 | t.Fatal(err)
200 | }
201 | // Delete the pod after the test finishes.
202 | defer func() {
203 | if err := f.DeletePodImmediately(ctx, pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
204 | t.Error(err)
205 | }
206 | }()
207 | t.Logf("Created pod: %s", pod.Name)
208 |
209 | // Wait for the "busybox-" pod to be reported as running and ready.
210 | if _, err := f.WaitUntilPodReady(pod.Namespace, pod.Name); err != nil {
211 | t.Fatal(err)
212 | }
213 | t.Logf("Pod %s ready", pod.Name)
214 |
215 | // Grab the pods from the provider.
216 | pods, err := f.GetRunningPodsFromProvider(ctx)
217 | assert.NilError(t, err)
218 |
219 | // Check if the pod exists in the slice of Pods.
220 | assert.NilError(t, findPodInPods(pods, pod))
221 |
222 | // Wait for the pod to be deleted in a separate goroutine.
223 | // This ensures that we don't possibly miss the MODIFIED/DELETED events due to establishing the watch too late in the process.
224 | // It also makes sure that in light of soft deletes, we properly handle non-graceful pod deletion
225 | podCh := make(chan error)
226 | var podLast *v1.Pod
227 | go func() {
228 | // Close the podCh channel, signaling we've observed deletion of the pod.
229 | defer close(podCh)
230 |
231 | var err error
232 | // Wait for the pod to be reported as having been deleted.
233 | podLast, err = f.WaitUntilPodDeleted(pod.Namespace, pod.Name)
234 | if err != nil {
235 | // Propagate the error to the outside so we can fail the test.
236 | podCh <- err
237 | }
238 | }()
239 |
240 | time.Sleep(deleteGracePeriodForProvider)
241 | // Forcibly delete the pod.
242 | if err := f.DeletePodImmediately(ctx, pod.Namespace, pod.Name); err != nil {
243 | t.Logf("Last saw pod in state: %+v", podLast)
244 | t.Fatal(err)
245 | }
246 | t.Log("Force deleted pod: ", pod.Name)
247 |
248 | // Wait for the delete event to be ACKed.
249 | if err := <-podCh; err != nil {
250 | t.Logf("Last saw pod in state: %+v", podLast)
251 | t.Fatal(err)
252 | }
253 | // Give the provider some time to react to the MODIFIED/DELETED events before proceeding.
254 | time.Sleep(deleteGracePeriodForProvider)
255 |
256 | // Grab the pods from the provider.
257 | pods, err = f.GetRunningPodsFromProvider(ctx)
258 | assert.NilError(t, err)
259 |
260 | // Make sure the "busybox-" pod DOES NOT exist in the slice of Pods anymore.
261 | assert.Assert(t, findPodInPods(pods, pod) != nil)
262 | f.WaitUntilPodDeleted(pod.Namespace, pod.Name)
263 | t.Logf("Pod ended as phase: %+v", podLast.Status.Phase)
264 |
265 | }
266 |
267 | // findPodInPodStats returns the index of the specified pod in the .pods field of the specified Summary object.
268 | // It returns an error if the specified pod is not found.
269 | func findPodInPodStats(summary *v1alpha1.Summary, pod *v1.Pod) (int, error) {
270 | for i, p := range summary.Pods {
271 | if p.PodRef.Namespace == pod.Namespace && p.PodRef.Name == pod.Name && string(p.PodRef.UID) == string(pod.UID) {
272 | return i, nil
273 | }
274 | }
275 | return -1, fmt.Errorf("failed to find pod \"%s/%s\" in the slice of pod stats", pod.Namespace, pod.Name)
276 | }
277 |
278 | // findPodInPodStats returns the index of the specified pod in the .pods field of the specified PodList object.
279 | // It returns error if the pod doesn't exist in the podlist
280 | func findPodInPods(pods *v1.PodList, pod *v1.Pod) error {
281 | for _, p := range pods.Items {
282 | if p.Namespace == pod.Namespace && p.Name == pod.Name && string(p.UID) == string(pod.UID) {
283 | return nil
284 | }
285 | }
286 | return fmt.Errorf("failed to find pod \"%s/%s\" in the slice of pod list", pod.Namespace, pod.Name)
287 | }
288 |
--------------------------------------------------------------------------------
/test/e2e/knoc_test.go:
--------------------------------------------------------------------------------
1 | package e2e
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestKnoc(t *testing.T) {
8 | // vke2e.NewEndToEndTestSuite()
9 | }
10 |
--------------------------------------------------------------------------------
/test/e2e/suite.go:
--------------------------------------------------------------------------------
1 | package e2e
2 |
3 | import (
4 | "testing"
5 | "time"
6 |
7 | corev1 "k8s.io/api/core/v1"
8 | "k8s.io/apimachinery/pkg/watch"
9 |
10 | "github.com/CARV-ICS-FORTH/knoc/internal/test/e2e/framework"
11 | "github.com/CARV-ICS-FORTH/knoc/internal/test/suite"
12 | )
13 |
14 | const defaultWatchTimeout = 2 * time.Minute
15 |
16 | // f is a testing framework that is accessible across the e2e package
17 | var f *framework.Framework
18 |
19 | // EndToEndTestSuite holds the setup, teardown, and shouldSkipTest functions for a specific provider
20 | type EndToEndTestSuite struct {
21 | setup suite.SetUpFunc
22 | teardown suite.TeardownFunc
23 | shouldSkipTest suite.ShouldSkipTestFunc
24 | }
25 |
26 | // EndToEndTestSuiteConfig is the config passed to initialize the testing framework and test suite.
27 | type EndToEndTestSuiteConfig struct {
28 | // Kubeconfig is the path to the kubeconfig file to use when running the test suite outside a Kubernetes cluster.
29 | Kubeconfig string
30 | // Namespace is the name of the Kubernetes namespace to use for running the test suite (i.e. where to create pods).
31 | Namespace string
32 | // NodeName is the name of the virtual-kubelet node to test.
33 | NodeName string
34 | // WatchTimeout is the duration for which the framework watch a particular condition to be satisfied (e.g. watches a pod becoming ready)
35 | WatchTimeout time.Duration
36 | // Setup is a function that sets up provider-specific resource in the test suite
37 | Setup suite.SetUpFunc
38 | // Teardown is a function that tears down provider-specific resources from the test suite
39 | Teardown suite.TeardownFunc
40 | // ShouldSkipTest is a function that determines whether the test suite should skip certain tests
41 | ShouldSkipTest suite.ShouldSkipTestFunc
42 | }
43 |
44 | // Setup runs the setup function from the provider and other
45 | // procedures before running the test suite
46 | func (ts *EndToEndTestSuite) Setup(t *testing.T) {
47 | if err := ts.setup(); err != nil {
48 | t.Fatal(err)
49 | }
50 |
51 | // Wait for the virtual kubelet node resource to become fully ready
52 | if err := f.WaitUntilNodeCondition(func(ev watch.Event) (bool, error) {
53 | n := ev.Object.(*corev1.Node)
54 | if n.Name != f.NodeName {
55 | return false, nil
56 | }
57 |
58 | for _, c := range n.Status.Conditions {
59 | if c.Type != "Ready" {
60 | continue
61 | }
62 | t.Log(c.Status)
63 | return c.Status == corev1.ConditionTrue, nil
64 | }
65 |
66 | return false, nil
67 | }); err != nil {
68 | t.Fatal(err)
69 | }
70 | }
71 |
72 | // Teardown runs the teardown function from the provider and other
73 | // procedures after running the test suite
74 | func (ts *EndToEndTestSuite) Teardown() {
75 | if err := ts.teardown(); err != nil {
76 | panic(err)
77 | }
78 | }
79 |
80 | // ShouldSkipTest returns true if a provider wants to skip running a particular test
81 | func (ts *EndToEndTestSuite) ShouldSkipTest(testName string) bool {
82 | return ts.shouldSkipTest(testName)
83 | }
84 |
85 | // Run runs tests registered in the test suite
86 | func (ts *EndToEndTestSuite) Run(t *testing.T) {
87 | suite.Run(t, ts)
88 | }
89 |
90 | // NewEndToEndTestSuite returns a new EndToEndTestSuite given a test suite configuration,
91 | // setup, and teardown functions from provider
92 | func NewEndToEndTestSuite(cfg EndToEndTestSuiteConfig) *EndToEndTestSuite {
93 | if cfg.Namespace == "" {
94 | panic("Empty namespace")
95 | } else if cfg.NodeName == "" {
96 | panic("Empty node name")
97 | }
98 |
99 | if cfg.WatchTimeout == time.Duration(0) {
100 | cfg.WatchTimeout = defaultWatchTimeout
101 | }
102 |
103 | f = framework.NewTestingFramework(cfg.Kubeconfig, cfg.Namespace, cfg.NodeName, cfg.WatchTimeout)
104 |
105 | emptyFunc := func() error { return nil }
106 | if cfg.Setup == nil {
107 | cfg.Setup = emptyFunc
108 | }
109 | if cfg.Teardown == nil {
110 | cfg.Teardown = emptyFunc
111 | }
112 | if cfg.ShouldSkipTest == nil {
113 | // This will not skip any test in the test suite
114 | cfg.ShouldSkipTest = func(_ string) bool { return false }
115 | }
116 |
117 | return &EndToEndTestSuite{
118 | setup: cfg.Setup,
119 | teardown: cfg.Teardown,
120 | shouldSkipTest: cfg.ShouldSkipTest,
121 | }
122 | }
123 |
--------------------------------------------------------------------------------