├── .gitignore ├── LICENSE ├── MAINTAINERS ├── Makefile ├── README.md ├── charts ├── core-workshop-infra │ ├── Chart.yaml │ └── templates │ │ ├── code-server.yaml │ │ ├── external-dns.yaml │ │ └── traefik-2.yaml ├── kubernetes-dashboard │ ├── external-chart │ └── values-override.yaml └── podinfo │ ├── external-chart │ └── values-override.yaml ├── cmd └── workshopctl │ ├── cmd │ ├── apply.go │ ├── cleanup.go │ ├── flags.go │ ├── gen.go │ ├── init.go │ ├── kubectl.go │ └── root.go │ └── main.go ├── docs └── cli │ ├── workshopctl.md │ ├── workshopctl_apply.md │ ├── workshopctl_cleanup.md │ ├── workshopctl_gen.md │ ├── workshopctl_init.md │ ├── workshopctl_kubectl.md │ └── workshopctl_version.md ├── go.mod ├── go.sum ├── hack ├── cobra.go ├── ldflags.sh └── traefik2.txt ├── images └── k8s-web-ide │ ├── .bash_aliases │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── entrypoint.sh │ ├── kubeconfig.yaml │ └── settings.json ├── pkg ├── apply │ ├── apply.go │ └── wait.go ├── charts │ └── charts.go ├── config │ ├── keyval │ │ └── keyval.go │ └── types.go ├── constants │ └── constants.go ├── gen │ └── gen.go ├── git │ └── git.go ├── gotk │ └── gotk.go ├── logs │ ├── flag │ │ └── flag.go │ └── logs.go ├── provider │ ├── digitalocean │ │ ├── cloud.go │ │ └── dns.go │ ├── provider.go │ └── providers │ │ └── providers.go ├── util │ ├── context.go │ └── util.go └── version │ ├── cmd │ └── command.go │ └── version.go └── tutorials ├── 1-podinfo ├── README.md └── solution │ ├── README.md │ ├── configmap.yaml │ ├── deployment.yaml │ ├── ingress.yaml │ ├── namespace.yaml │ ├── secret.yaml │ ├── service.yaml │ └── servicemonitor.yaml ├── 2-nodejs-app ├── README.md ├── server.js └── solution │ ├── Dockerfile.b64 │ ├── Makefile.b64 │ └── README.md └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/* 2 | .cache 3 | bin 4 | vendor 5 | .kubeconfig 6 | *.solution 7 | workshopctl.yaml 8 | clusters 9 | .kube 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | Lucas Käldström (@luxas) 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PROJECT=github.com/cloud-native-nordics/workshopctl 2 | GO_VERSION=1.16 3 | BINARIES=workshopctl 4 | CACHE_DIR = $(shell pwd)/bin/cache 5 | 6 | all: build 7 | build: $(BINARIES) 8 | 9 | .PHONY: $(BINARIES) 10 | $(BINARIES): 11 | make shell COMMAND="make bin/$@" 12 | 13 | generated: /go/bin/go-bindata 14 | # This autogenerates the file ./pkg/charts/charts.go from manifests in the ./charts directory 15 | # The package name of ./pkg/charts is charts, and the "charts" prefix is stripped from the beginning 16 | # of the file name path within the application. 17 | # The modification time is hardcoded to 2020-01-01 00:00:00 18 | go-bindata \ 19 | -pkg=charts \ 20 | -o=pkg/charts/charts.go \ 21 | -modtime=1577836800 \ 22 | -prefix=charts \ 23 | charts/... 24 | 25 | .PHONY: bin/workshopctl 26 | bin/workshopctl: bin/%: generated 27 | $(MAKE) tidy 28 | CGO_ENABLED=0 go build -ldflags "$(shell ./hack/ldflags.sh)" -o bin/$* ./cmd/$* 29 | 30 | shell: 31 | mkdir -p $(CACHE_DIR)/go $(CACHE_DIR)/cache 32 | docker run -it --rm \ 33 | -v $(CACHE_DIR)/go:/go \ 34 | -v $(CACHE_DIR)/cache:/.cache/go-build \ 35 | -v $(shell pwd):/go/src/${PROJECT} \ 36 | -w /go/src/${PROJECT} \ 37 | -u $(shell id -u):$(shell id -g) \ 38 | golang:$(GO_VERSION) \ 39 | $(COMMAND) 40 | 41 | node_modules: 42 | docker run -it -v $(pwd):/project -w /project node:slim npm install 43 | 44 | tidy: /go/bin/goimports 45 | go mod tidy 46 | gofmt -s -w pkg cmd 47 | goimports -w pkg cmd 48 | go run hack/cobra.go 49 | 50 | /go/bin/go-bindata: 51 | go get -u github.com/go-bindata/go-bindata/... 52 | 53 | /go/bin/goimports: 54 | go get golang.org/x/tools/cmd/goimports 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # workshopctl 2 | 3 | A tool for running workshops easily in the cloud! 4 | 5 | **WARNING:** This tool is pre-alpha and under heavy development. Don't use it for anything 6 | very important quite yet! However, contributions are very welcome! 7 | 8 | Please check out [these slides](https://docs.google.com/presentation/d/10OxH3s_dFDZ362NIy013LD5Gs78vBXeg2Mp9c5eEjoQ/edit#slide=id.ga4596f4c55_0_201) for an up-to-date description of this project. 9 | 10 | ## Quick Start 11 | 12 | 1. `workshopctl init` -- Give information about what cloud provider to use (and its token), 13 | and what domain to serve on (e.g. `workshopctl.kubernetesfinland.com`) 14 | 1. `workshopctl gen` -- Generate unique sets of Kubernetes manifests, one per cluster. 15 | 1. `workshopctl apply` -- Creates the clusters in the cloud, and applies the manifests 16 | 17 | Boom! A Visual Studio Code instance running in the browser is now available at e.g. `cluster-01.workshopctl.kubernetesfinland.com` in the given example. 18 | The VS Code terminal has full privileges to the Kubernetes cluster, so the attendee may easily 19 | access `kubectl`, `helm` and `docker` (if needed) for completing the tasks in your workshop. 20 | You can also provide pre-created materials in VS Code for the attendee. 21 | 22 | ## How this works 23 | 24 | TODO: Write more docs here. 25 | -------------------------------------------------------------------------------- /charts/core-workshop-infra/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: core-workshop-infra 3 | version: 0.1.0 4 | -------------------------------------------------------------------------------- /charts/core-workshop-infra/templates/code-server.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: code-server 5 | namespace: workshopctl 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: cluster-admin 10 | subjects: 11 | - kind: ServiceAccount 12 | name: code-server 13 | namespace: workshopctl 14 | --- 15 | apiVersion: v1 16 | kind: ServiceAccount 17 | metadata: 18 | name: code-server 19 | namespace: workshopctl 20 | --- 21 | apiVersion: apps/v1 22 | kind: Deployment 23 | metadata: 24 | name: code-server 25 | namespace: workshopctl 26 | labels: 27 | app: code-server 28 | spec: 29 | selector: 30 | matchLabels: 31 | app: code-server 32 | template: 33 | metadata: 34 | labels: 35 | app: code-server 36 | spec: 37 | serviceAccountName: code-server 38 | containers: 39 | - image: luxas/k8s-web-ide:v4.3.0 40 | # TODO: In the future: ghcr.io/cloud-native-nordics/k8s-web-ide:v4.3.0 41 | imagePullPolicy: Always 42 | name: code-server 43 | ports: 44 | - name: http 45 | containerPort: 8080 46 | env: 47 | - name: TUTORIALS_REPO 48 | valueFrom: 49 | secretKeyRef: 50 | name: workshopctl 51 | key: TUTORIALS_REPO 52 | - name: TUTORIALS_DIR 53 | valueFrom: 54 | secretKeyRef: 55 | name: workshopctl 56 | key: TUTORIALS_DIR 57 | - name: PASSWORD 58 | valueFrom: 59 | secretKeyRef: 60 | name: workshopctl 61 | key: CLUSTER_PASSWORD 62 | --- 63 | apiVersion: v1 64 | kind: Service 65 | metadata: 66 | name: code-server 67 | namespace: workshopctl 68 | spec: 69 | selector: 70 | app: code-server 71 | ports: 72 | - port: 80 73 | targetPort: 8080 74 | name: http 75 | --- 76 | apiVersion: networking.k8s.io/v1 77 | kind: Ingress 78 | metadata: 79 | name: code-server 80 | namespace: workshopctl 81 | annotations: 82 | # Force a short TTL so that DNS record changes can propagate faster 83 | external-dns.alpha.kubernetes.io/ttl: "30s" 84 | spec: 85 | ingressClassName: traefik 86 | rules: 87 | - host: "{{ .Values.workshopctl.CLUSTER_DOMAIN }}" 88 | http: 89 | paths: 90 | - path: / 91 | pathType: Prefix 92 | backend: 93 | service: 94 | name: code-server 95 | port: 96 | number: 80 97 | -------------------------------------------------------------------------------- /charts/core-workshop-infra/templates/external-dns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: external-dns 5 | namespace: workshopctl 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | name: external-dns 11 | rules: 12 | - apiGroups: [""] 13 | resources: ["services","endpoints","pods"] 14 | verbs: ["get","watch","list"] 15 | - apiGroups: ["extensions","networking.k8s.io"] 16 | resources: ["ingresses"] 17 | verbs: ["get","watch","list"] 18 | - apiGroups: [""] 19 | resources: ["nodes"] 20 | verbs: ["list"] 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: ClusterRoleBinding 24 | metadata: 25 | name: external-dns-viewer 26 | namespace: workshopctl 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: ClusterRole 30 | name: external-dns 31 | subjects: 32 | - kind: ServiceAccount 33 | name: external-dns 34 | namespace: workshopctl 35 | --- 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: external-dns 40 | namespace: workshopctl 41 | spec: 42 | replicas: 1 43 | strategy: 44 | type: Recreate 45 | selector: 46 | matchLabels: 47 | app: external-dns 48 | template: 49 | metadata: 50 | labels: 51 | app: external-dns 52 | spec: 53 | serviceAccountName: external-dns 54 | containers: 55 | - name: external-dns 56 | image: k8s.gcr.io/external-dns/external-dns:v0.11.0 57 | args: 58 | # Watch Ingress and Service objects and create DNS records correspondingly 59 | - --source=ingress 60 | - --source=service 61 | # Makes ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones 62 | - --domain-filter={{ .Values.workshopctl.ROOT_DOMAIN }}. 63 | - --provider={{ .Values.workshopctl.EXTERNAL_DNS_PROVIDER }} 64 | # Prevents ExternalDNS from deleting any records, omit to enable full synchronization 65 | - --policy=upsert-only 66 | - --registry=txt 67 | - --txt-owner-id=workshopctl 68 | # Resync often as this is a highly dynamic system 69 | - --interval=30s 70 | - --log-level=debug 71 | # Possibly in the future, add --events support 72 | # PROVIDER-CUSTOMIZE: Need to set per-provider ENV VARs here 73 | -------------------------------------------------------------------------------- /charts/core-workshop-infra/templates/traefik-2.yaml: -------------------------------------------------------------------------------- 1 | ### RBAC ### 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: traefik 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - services 11 | - endpoints 12 | - secrets 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - networking.k8s.io 19 | resources: 20 | - ingresses 21 | - ingressclasses 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | - apiGroups: 27 | - networking.k8s.io 28 | resources: 29 | - ingresses/status 30 | verbs: 31 | - update 32 | --- 33 | apiVersion: v1 34 | kind: ServiceAccount 35 | metadata: 36 | name: traefik 37 | namespace: workshopctl 38 | --- 39 | apiVersion: rbac.authorization.k8s.io/v1 40 | kind: ClusterRoleBinding 41 | metadata: 42 | name: traefik 43 | roleRef: 44 | apiGroup: rbac.authorization.k8s.io 45 | kind: ClusterRole 46 | name: traefik 47 | subjects: 48 | - kind: ServiceAccount 49 | name: traefik 50 | namespace: workshopctl 51 | --- 52 | ### ConfigMap, Deployment & Service ### 53 | apiVersion: v1 54 | kind: ConfigMap 55 | metadata: 56 | name: traefik-cfg 57 | namespace: workshopctl 58 | labels: 59 | app: traefik 60 | data: 61 | api.yaml: | 62 | http: 63 | routers: 64 | my-api: 65 | rule: Host(`traefik.{{ .Values.workshopctl.CLUSTER_DOMAIN }}`) 66 | service: api@internal 67 | entryPoints: 68 | - traefik 69 | - websecure 70 | middlewares: 71 | - api-auth 72 | middlewares: 73 | api-auth: 74 | basicAuth: 75 | users: 76 | # This value is replaced on-demand from the given env var, that comes from a Secret 77 | - "\{\{ env "CLUSTER_BASIC_AUTH_BCRYPT" \}\}" 78 | --- 79 | apiVersion: apps/v1 80 | kind: Deployment 81 | metadata: 82 | name: traefik 83 | namespace: workshopctl 84 | labels: 85 | app: traefik 86 | spec: 87 | replicas: 1 88 | selector: 89 | matchLabels: 90 | app: traefik 91 | template: 92 | metadata: 93 | labels: 94 | app: traefik 95 | spec: 96 | serviceAccountName: traefik 97 | containers: 98 | - name: traefik 99 | image: traefik:v2.6.3 100 | args: 101 | - --api=true 102 | - --metrics.prometheus=true 103 | - --accesslog=true 104 | - --accesslog.filepath=/traefik-cache/access.log 105 | - --log.level=DEBUG 106 | - --providers.file.directory=/traefik-cfg 107 | - --providers.file.watch=true 108 | - --providers.kubernetesingress=true 109 | - --providers.kubernetesingress.ingressendpoint.publishedservice=workshopctl/traefik 110 | - --entrypoints.web.address=:80 111 | - --entrypoints.web.http.redirections.entrypoint.to=websecure 112 | - --entrypoints.web.http.redirections.entrypoint.scheme=https 113 | - --entrypoints.websecure.address=:443 114 | - --entrypoints.websecure.http.tls.certresolver=letsencrypt 115 | - --entrypoints.traefik.address=:8080 116 | - --entrypoints.traefik.http.tls.certresolver=letsencrypt 117 | - --certificatesresolvers.letsencrypt.acme.dnschallenge=true 118 | - --certificatesresolvers.letsencrypt.acme.dnschallenge.provider=$(TRAEFIK_DNS_PROVIDER) 119 | - --certificatesresolvers.letsencrypt.acme.dnschallenge.resolvers=1.1.1.1:53,1.0.0.1:53 120 | - --certificatesresolvers.letsencrypt.acme.email=$(LETSENCRYPT_EMAIL) 121 | - --certificatesresolvers.letsencrypt.acme.storage=/traefik-cache/acme.json 122 | # This allows Traefik keep a Let's Encrypt HTTPS connection to the frontend 123 | # while talking to an HTTPS backend as it were HTTP (not trusting the 124 | # HTTPS certificate of the backend). This is a quick fix for showing the 125 | # Kubernetes Dashboard. 126 | - --serversTransport.insecureSkipVerify=true 127 | # Please note that this is the staging Let's Encrypt server. 128 | # Once you get things working, you should remove that whole line altogether. 129 | # - --certificatesresolvers.letsencrypt.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory 130 | ports: 131 | - name: http 132 | containerPort: 80 133 | - name: https 134 | containerPort: 443 135 | - name: admin 136 | containerPort: 8080 137 | env: 138 | - name: LETSENCRYPT_EMAIL 139 | valueFrom: 140 | secretKeyRef: 141 | name: workshopctl 142 | key: LETSENCRYPT_EMAIL 143 | - name: TRAEFIK_DNS_PROVIDER 144 | valueFrom: 145 | secretKeyRef: 146 | name: workshopctl 147 | key: TRAEFIK_DNS_PROVIDER 148 | # This is used in the "static configuration" for the Traefik API 149 | - name: CLUSTER_BASIC_AUTH_BCRYPT 150 | valueFrom: 151 | secretKeyRef: 152 | name: workshopctl 153 | key: CLUSTER_BASIC_AUTH_BCRYPT 154 | # PROVIDER-CUSTOMIZE: Here there needs to be one more dynamic "patch" that makes Traefik able to 155 | # access the DNS token 156 | volumeMounts: 157 | - name: traefik-cfg 158 | mountPath: /traefik-cfg 159 | - name: traefik-cache 160 | mountPath: /traefik-cache 161 | volumes: 162 | - name: traefik-cfg 163 | configMap: 164 | name: traefik-cfg 165 | - name: traefik-cache 166 | emptyDir: {} 167 | --- 168 | apiVersion: v1 169 | kind: Service 170 | metadata: 171 | name: traefik 172 | namespace: workshopctl 173 | labels: 174 | app: traefik 175 | annotations: 176 | # Force a short TTL so that DNS record changes can propagate faster 177 | external-dns.alpha.kubernetes.io/ttl: "30s" 178 | # Create a DNS record for the traefik API & dashboard 179 | external-dns.alpha.kubernetes.io/hostname: "traefik.{{ .Values.workshopctl.CLUSTER_DOMAIN }}" 180 | spec: 181 | ports: 182 | - name: http 183 | port: 80 184 | - name: https 185 | port: 443 186 | selector: 187 | app: traefik 188 | type: LoadBalancer 189 | --- 190 | apiVersion: networking.k8s.io/v1 191 | kind: IngressClass 192 | metadata: 193 | name: traefik 194 | annotations: 195 | ingressclass.kubernetes.io/is-default-class: "true" 196 | spec: 197 | controller: traefik.io/ingress-controller 198 | -------------------------------------------------------------------------------- /charts/kubernetes-dashboard/external-chart: -------------------------------------------------------------------------------- 1 | https://kubernetes.github.io/dashboard/kubernetes-dashboard -------------------------------------------------------------------------------- /charts/kubernetes-dashboard/values-override.yaml: -------------------------------------------------------------------------------- 1 | metricsScraper: 2 | enabled: true 3 | 4 | metrics-server: 5 | enabled: true 6 | 7 | ingress: 8 | enabled: true 9 | annotations: 10 | external-dns.alpha.kubernetes.io/ttl: "30s" 11 | className: traefik 12 | hosts: 13 | - "dashboard.{{ .workshopctl.CLUSTER_DOMAIN }}" 14 | -------------------------------------------------------------------------------- /charts/podinfo/external-chart: -------------------------------------------------------------------------------- 1 | https://stefanprodan.github.io/podinfo/podinfo -------------------------------------------------------------------------------- /charts/podinfo/values-override.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 3 2 | logLevel: info 3 | 4 | ui: 5 | color: "#34577c" 6 | message: "Hello world!" 7 | 8 | image: 9 | tag: 5.0.3 10 | 11 | ingress: 12 | # TODO: Need to upgrade the Ingress version from v1beta1 13 | enabled: false 14 | # path: / 15 | # hosts: 16 | # - "podinfo.{{ .workshopctl.CLUSTER_DOMAIN }}" 17 | # annotations: 18 | # Force a short TTL so that DNS record changes can propagate faster 19 | # external-dns.alpha.kubernetes.io/ttl: "30s" 20 | -------------------------------------------------------------------------------- /cmd/workshopctl/cmd/apply.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/cloud-native-nordics/workshopctl/pkg/apply" 5 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 6 | log "github.com/sirupsen/logrus" 7 | "github.com/spf13/cobra" 8 | "github.com/spf13/pflag" 9 | ) 10 | 11 | type ApplyFlags struct { 12 | *RootFlags 13 | } 14 | 15 | // NewApplyCommand returns the "apply" command 16 | func NewApplyCommand(rf *RootFlags) *cobra.Command { 17 | af := &ApplyFlags{ 18 | RootFlags: rf, 19 | } 20 | cmd := &cobra.Command{ 21 | Use: "apply", 22 | Short: "Create a Kubernetes cluster and apply the desired manifests", 23 | Run: func(cmd *cobra.Command, args []string) { 24 | if err := RunApply(af); err != nil { 25 | log.Fatal(err) 26 | } 27 | }, 28 | } 29 | 30 | addApplyFlags(cmd.Flags(), af) 31 | return cmd 32 | } 33 | 34 | func addApplyFlags(fs *pflag.FlagSet, af *ApplyFlags) {} 35 | 36 | func RunApply(af *ApplyFlags) error { 37 | ctx := util.NewContext(af.DryRun, af.RootDir) 38 | cfg, err := loadConfig(ctx, af.ConfigPath) 39 | if err != nil { 40 | return err 41 | } 42 | return apply.Apply(ctx, cfg) 43 | } 44 | -------------------------------------------------------------------------------- /cmd/workshopctl/cmd/cleanup.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 7 | "github.com/cloud-native-nordics/workshopctl/pkg/provider" 8 | "github.com/cloud-native-nordics/workshopctl/pkg/provider/providers" 9 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 10 | log "github.com/sirupsen/logrus" 11 | "github.com/spf13/cobra" 12 | "github.com/spf13/pflag" 13 | ) 14 | 15 | type CleanupFlags struct { 16 | *RootFlags 17 | } 18 | 19 | // NewCleanupCommand returns the "cleanup" command 20 | func NewCleanupCommand(rf *RootFlags) *cobra.Command { 21 | cf := &CleanupFlags{ 22 | RootFlags: rf, 23 | } 24 | cmd := &cobra.Command{ 25 | Use: "cleanup", 26 | Short: "Delete the k8s-managed cluster", 27 | Run: func(cmd *cobra.Command, args []string) { 28 | if err := RunCleanup(cf); err != nil { 29 | log.Fatal(err) 30 | } 31 | }, 32 | } 33 | 34 | addCleanupFlags(cmd.Flags(), cf) 35 | return cmd 36 | } 37 | 38 | func addCleanupFlags(fs *pflag.FlagSet, cf *CleanupFlags) {} 39 | 40 | func RunCleanup(cf *CleanupFlags) error { 41 | ctx := util.NewContext(cf.DryRun, cf.RootDir) 42 | 43 | cfg, err := loadConfig(ctx, cf.ConfigPath) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | cloudP, err := providers.CloudProviders().NewCloudProvider(ctx, &cfg.CloudProvider) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | dnsP, err := providers.DNSProviders().NewDNSProvider(ctx, &cfg.DNSProvider, cfg.RootDomain) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | return config.ForCluster(ctx, cfg.Clusters, cfg, func(clusterCtx context.Context, clusterInfo *config.ClusterInfo) error { 59 | // TODO: Create helper func for this 60 | clusterMeta := provider.ClusterMeta{ 61 | NamePrefix: clusterInfo.Name, 62 | Index: clusterInfo.Index, 63 | } 64 | 65 | // Delete the Kubernetes cluster 66 | if err := cloudP.DeleteCluster(clusterCtx, clusterMeta); err != nil { 67 | return err 68 | } 69 | // Delete the KubeConfig file 70 | kubeconfigPath := util.JoinPaths(ctx, clusterInfo.Index.KubeConfigPath()) 71 | if err := util.DeletePath(clusterCtx, kubeconfigPath); err != nil { 72 | return err 73 | } 74 | // Delete the DNS records 75 | return dnsP.CleanupRecords(clusterCtx, clusterMeta) 76 | }) 77 | } 78 | -------------------------------------------------------------------------------- /cmd/workshopctl/cmd/flags.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strconv" 7 | 8 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 9 | "github.com/spf13/pflag" 10 | ) 11 | 12 | const ( 13 | EnvCluster = "WORKSHOPCTL_CLUSTER" 14 | EnvClusterDesc = "What cluster number you want to connect to. Env var " + EnvCluster + " can also be used." 15 | ) 16 | 17 | type ClusterFlag uint16 18 | 19 | func (f *ClusterFlag) String() string { 20 | if f == nil { 21 | *f = ClusterFlag(0) 22 | } 23 | if *f == 0 { 24 | clusterEnv := os.Getenv(EnvCluster) 25 | if clusterEnv != "" { 26 | _ = f.Set(clusterEnv) 27 | } 28 | } 29 | return fmt.Sprintf("%d", *f) 30 | } 31 | func (f *ClusterFlag) Set(str string) error { 32 | clusterNum, err := strconv.Atoi(str) 33 | if err != nil { 34 | return err 35 | } 36 | *f = ClusterFlag(clusterNum) 37 | return nil 38 | } 39 | func (f ClusterFlag) Type() string { return "cluster-number" } 40 | 41 | func (f ClusterFlag) Number() config.ClusterNumber { 42 | return config.ClusterNumber(uint16(f)) 43 | } 44 | 45 | func AddClusterFlag(fs *pflag.FlagSet, cf *ClusterFlag) { 46 | fs.VarP(cf, "cluster", "c", EnvClusterDesc) 47 | } 48 | -------------------------------------------------------------------------------- /cmd/workshopctl/cmd/gen.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "io/ioutil" 6 | 7 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 8 | "github.com/cloud-native-nordics/workshopctl/pkg/constants" 9 | "github.com/cloud-native-nordics/workshopctl/pkg/gen" 10 | "github.com/cloud-native-nordics/workshopctl/pkg/git" 11 | "github.com/cloud-native-nordics/workshopctl/pkg/provider/providers" 12 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 13 | log "github.com/sirupsen/logrus" 14 | "github.com/spf13/cobra" 15 | "github.com/spf13/pflag" 16 | ) 17 | 18 | type GenFlags struct { 19 | *RootFlags 20 | 21 | SkipLocalCharts bool 22 | } 23 | 24 | // NewGenCommand returns the "gen" command 25 | func NewGenCommand(rf *RootFlags) *cobra.Command { 26 | gf := &GenFlags{ 27 | RootFlags: rf, 28 | SkipLocalCharts: false, 29 | } 30 | cmd := &cobra.Command{ 31 | Use: "gen", 32 | Short: "Generate a set of manifests based on the configuration", 33 | Run: func(cmd *cobra.Command, args []string) { 34 | if err := RunGen(gf); err != nil { 35 | log.Fatal(err) 36 | } 37 | }, 38 | } 39 | 40 | addGenFlags(cmd.Flags(), gf) 41 | return cmd 42 | } 43 | 44 | func addGenFlags(fs *pflag.FlagSet, gf *GenFlags) { 45 | fs.BoolVar(&gf.SkipLocalCharts, "skip-local-charts", gf.SkipLocalCharts, "Don't consider the local directory's charts/ directory") 46 | } 47 | 48 | func loadConfig(ctx context.Context, configPath string) (*config.Config, error) { 49 | cfg := &config.Config{} 50 | if err := util.ReadYAMLFile(configPath, cfg); err != nil { 51 | return nil, err 52 | } 53 | if err := cfg.Complete(ctx); err != nil { 54 | return nil, err 55 | } 56 | return cfg, nil 57 | } 58 | 59 | func RunGen(gf *GenFlags) error { 60 | ctx := util.NewContext(gf.DryRun, gf.RootDir) 61 | cfg, err := loadConfig(ctx, gf.ConfigPath) 62 | if err != nil { 63 | return err 64 | } 65 | 66 | charts, err := gen.SetupInternalChartCache(ctx) 67 | if err != nil { 68 | return err 69 | } 70 | 71 | // Only generate "external" charts if the skip flag is false and the charts directory exists 72 | chartsDir := util.JoinPaths(ctx, constants.ChartsDir) 73 | if exists, _ := util.PathExists(chartsDir); exists && !gf.SkipLocalCharts { 74 | chartInfos, err := ioutil.ReadDir(chartsDir) 75 | if err != nil { 76 | return err 77 | } 78 | for _, chartInfo := range chartInfos { 79 | if !chartInfo.IsDir() { 80 | continue 81 | } 82 | chart, err := gen.SetupExternalChartCache(ctx, chartInfo.Name()) 83 | if err != nil { 84 | return err 85 | } 86 | charts = append(charts, chart) 87 | } 88 | } 89 | 90 | // dry-run can be always true here as we're not gonna use the provider for requests, only manifest gen 91 | dnsCtx := util.WithDryRun(ctx, true) 92 | dnsProvider, err := providers.DNSProviders().NewDNSProvider(dnsCtx, &cfg.DNSProvider, cfg.RootDomain) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | err = config.ForCluster(ctx, cfg.Clusters, cfg, func(clusterCtx context.Context, clusterInfo *config.ClusterInfo) error { 98 | for _, chart := range charts { 99 | logger := util.Logger(ctx) 100 | logger.Infof("Generating chart %q...", chart.Name) 101 | if err := gen.GenerateChart(clusterCtx, chart, clusterInfo, dnsProvider.ValuesProcessors(), dnsProvider.ChartProcessors()); err != nil { 102 | return err 103 | } 104 | } 105 | return nil 106 | }) 107 | if err != nil { 108 | return err 109 | } 110 | 111 | return git.PushManifests(ctx, cfg) 112 | } 113 | -------------------------------------------------------------------------------- /cmd/workshopctl/cmd/init.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 5 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 6 | log "github.com/sirupsen/logrus" 7 | "github.com/spf13/cobra" 8 | "github.com/spf13/pflag" 9 | ) 10 | 11 | type InitFlags struct { 12 | *RootFlags 13 | *config.Config 14 | 15 | Yes bool 16 | } 17 | 18 | // NewInitCommand returns the "init" command 19 | func NewInitCommand(rf *RootFlags) *cobra.Command { 20 | inf := &InitFlags{ 21 | RootFlags: rf, 22 | Config: &config.Config{}, 23 | } 24 | cmd := &cobra.Command{ 25 | Use: "init", 26 | Short: "Setup the user configuration interactively", 27 | Run: func(cmd *cobra.Command, args []string) { 28 | if err := RunInit(inf); err != nil { 29 | log.Fatal(err) 30 | } 31 | }, 32 | } 33 | 34 | addInitFlags(cmd.Flags(), inf) 35 | return cmd 36 | } 37 | 38 | func addInitFlags(fs *pflag.FlagSet, inf *InitFlags) { 39 | fs.StringVar(&inf.Name, "name", inf.Name, "What name this workshop should have") 40 | fs.StringVar(&inf.CloudProvider.ServiceAccountPath, "cloud-provider-service-account-path", inf.CloudProvider.ServiceAccountPath, "Path to service account for cloud provider") 41 | fs.StringVar(&inf.DNSProvider.ServiceAccountPath, "dns-provider-service-account-path", inf.DNSProvider.ServiceAccountPath, "Path to service account for dns provider") 42 | fs.StringVar(&inf.RootDomain, "root-domain", inf.RootDomain, "What the root domain to be managed is") 43 | fs.StringVar(&inf.LetsEncryptEmail, "lets-encrypt-email", inf.LetsEncryptEmail, "What Let's Encrypt email to use") 44 | fs.StringVar(&inf.Git.Repo, "git-repo", inf.Git.Repo, "What git repo to use. By default, try to auto-detect git remote origin.") 45 | fs.StringVar(&inf.Git.ServiceAccountPath, "git-provider-service-account-path", inf.Git.ServiceAccountPath, "Path to service account for git provider") 46 | 47 | fs.BoolVarP(&inf.Yes, "yes", "y", inf.Yes, "Overwrite the workshopctl.yaml file although it exists") 48 | } 49 | 50 | func RunInit(inf *InitFlags) error { 51 | // TODO: Make this a command-line-input based workflow? 52 | // Don't dry-run, no need for that 53 | ctx := util.NewContext(false, inf.RootDir) 54 | if util.FileExists(inf.ConfigPath) && !inf.Yes { 55 | log.Infof("%s already exists, and --yes isn't specified, won't overwrite file", inf.ConfigPath) 56 | return nil 57 | } 58 | 59 | // Try to dynamically figure out the git origin 60 | if inf.Git.Repo == "" { 61 | rootPath := util.JoinPaths(ctx) 62 | origin, _, err := util.ShellCommand(ctx, `git -C %s remote -v | grep push | grep origin | awk '{print $2}'`, rootPath).Run() 63 | if err != nil { 64 | return err 65 | } 66 | inf.Git.Repo = origin 67 | } 68 | 69 | if err := inf.Config.Complete(ctx); err != nil { 70 | return err 71 | } 72 | 73 | return util.WriteYAMLFile(ctx, inf.ConfigPath, inf.Config) 74 | } 75 | -------------------------------------------------------------------------------- /cmd/workshopctl/cmd/kubectl.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 8 | log "github.com/sirupsen/logrus" 9 | "github.com/spf13/cobra" 10 | "github.com/spf13/pflag" 11 | ) 12 | 13 | type KubectlFlags struct { 14 | *RootFlags 15 | 16 | Cluster ClusterFlag 17 | } 18 | 19 | // NewKubectlCommand returns the "kubectl" command 20 | func NewKubectlCommand(rf *RootFlags) *cobra.Command { 21 | kf := &KubectlFlags{ 22 | RootFlags: rf, 23 | } 24 | cmd := &cobra.Command{ 25 | Use: "kubectl [kubectl commands]", 26 | Short: "An alias for the kubectl command, pointing the KUBECONFIG to the right place", 27 | Run: func(cmd *cobra.Command, args []string) { 28 | if err := RunKubectl(kf, args); err != nil { 29 | log.Fatal(err) 30 | } 31 | }, 32 | } 33 | 34 | addKubectlFlags(cmd.Flags(), kf) 35 | return cmd 36 | } 37 | 38 | func addKubectlFlags(fs *pflag.FlagSet, kf *KubectlFlags) { 39 | AddClusterFlag(fs, &kf.Cluster) 40 | } 41 | 42 | func RunKubectl(kf *KubectlFlags, args []string) error { 43 | if kf.Cluster == 0 { 44 | return fmt.Errorf("--cluster is required") 45 | } 46 | 47 | ctx := util.NewContext(false, kf.RootDir) 48 | 49 | cn := kf.Cluster.Number() 50 | kubeconfigPath := util.JoinPaths(ctx, cn.KubeConfigPath()) 51 | kubeconfigEnv := fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath) 52 | _, _, err := util.Command(ctx, "kubectl", args...). 53 | WithEnv(kubeconfigEnv). 54 | WithStdio(nil, os.Stdout, os.Stderr). // TODO: Maybe an extra flag to enable stdin? 55 | Run() 56 | return err 57 | } 58 | -------------------------------------------------------------------------------- /cmd/workshopctl/cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/cloud-native-nordics/workshopctl/pkg/logs" 7 | logflag "github.com/cloud-native-nordics/workshopctl/pkg/logs/flag" 8 | versioncmd "github.com/cloud-native-nordics/workshopctl/pkg/version/cmd" 9 | "github.com/sirupsen/logrus" 10 | "github.com/spf13/cobra" 11 | "github.com/spf13/pflag" 12 | ) 13 | 14 | type RootFlags struct { 15 | LogLevel logrus.Level 16 | ConfigPath string 17 | RootDir string 18 | DryRun bool 19 | } 20 | 21 | // NewWorkshopCtlCommand returns the root command for workshopctl 22 | func NewWorkshopCtlCommand() *cobra.Command { 23 | rf := &RootFlags{ 24 | LogLevel: logrus.InfoLevel, 25 | ConfigPath: "workshopctl.yaml", 26 | RootDir: ".", 27 | DryRun: true, 28 | } 29 | root := &cobra.Command{ 30 | Use: "workshopctl", 31 | Short: "workshopctl: easily run Kubernetes workshops", 32 | PersistentPreRun: func(cmd *cobra.Command, args []string) { 33 | // Set the desired logging level, now that the flags are parsed 34 | logs.Logger.SetLevel(rf.LogLevel) 35 | }, 36 | } 37 | 38 | addGlobalFlags(root.PersistentFlags(), rf) 39 | 40 | root.AddCommand(NewInitCommand(rf)) 41 | root.AddCommand(NewGenCommand(rf)) 42 | root.AddCommand(NewApplyCommand(rf)) 43 | root.AddCommand(NewKubectlCommand(rf)) 44 | root.AddCommand(NewCleanupCommand(rf)) 45 | root.AddCommand(versioncmd.NewCmdVersion(os.Stdout)) 46 | return root 47 | } 48 | 49 | func addGlobalFlags(fs *pflag.FlagSet, rf *RootFlags) { 50 | logflag.LogLevelFlagVar(fs, &rf.LogLevel) 51 | fs.StringVar(&rf.RootDir, "root-dir", rf.RootDir, "Where the workshopctl directory is. Must be a Git repo.") 52 | fs.StringVar(&rf.ConfigPath, "config-path", rf.ConfigPath, "Where to find the config file") 53 | fs.BoolVar(&rf.DryRun, "dry-run", rf.DryRun, "Whether to apply the selected operation, or just print what would happen (to dry-run)") 54 | } 55 | -------------------------------------------------------------------------------- /cmd/workshopctl/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/cloud-native-nordics/workshopctl/cmd/workshopctl/cmd" 7 | ) 8 | 9 | func main() { 10 | if err := Run(); err != nil { 11 | os.Exit(1) 12 | } 13 | } 14 | 15 | // Run runs the main cobra command of this application 16 | func Run() error { 17 | return cmd.NewWorkshopCtlCommand().Execute() 18 | } 19 | -------------------------------------------------------------------------------- /docs/cli/workshopctl.md: -------------------------------------------------------------------------------- 1 | ## workshopctl 2 | 3 | workshopctl: easily run Kubernetes workshops 4 | 5 | ### Options 6 | 7 | ``` 8 | --config-path string Where to find the config file (default "workshopctl.yaml") 9 | --dry-run Whether to apply the selected operation, or just print what would happen (to dry-run) (default true) 10 | -h, --help help for workshopctl 11 | --log-level loglevel Specify the loglevel for the program (default info) 12 | --root-dir string Where the workshopctl directory is. Must be a Git repo. (default ".") 13 | ``` 14 | 15 | ### SEE ALSO 16 | 17 | * [workshopctl apply](workshopctl_apply.md) - Create a Kubernetes cluster and apply the desired manifests 18 | * [workshopctl cleanup](workshopctl_cleanup.md) - Delete the k8s-managed cluster 19 | * [workshopctl gen](workshopctl_gen.md) - Generate a set of manifests based on the configuration 20 | * [workshopctl init](workshopctl_init.md) - Setup the user configuration interactively 21 | * [workshopctl kubectl](workshopctl_kubectl.md) - An alias for the kubectl command, pointing the KUBECONFIG to the right place 22 | * [workshopctl version](workshopctl_version.md) - Print the version 23 | 24 | -------------------------------------------------------------------------------- /docs/cli/workshopctl_apply.md: -------------------------------------------------------------------------------- 1 | ## workshopctl apply 2 | 3 | Create a Kubernetes cluster and apply the desired manifests 4 | 5 | ``` 6 | workshopctl apply [flags] 7 | ``` 8 | 9 | ### Options 10 | 11 | ``` 12 | -h, --help help for apply 13 | ``` 14 | 15 | ### Options inherited from parent commands 16 | 17 | ``` 18 | --config-path string Where to find the config file (default "workshopctl.yaml") 19 | --dry-run Whether to apply the selected operation, or just print what would happen (to dry-run) (default true) 20 | --log-level loglevel Specify the loglevel for the program (default info) 21 | --root-dir string Where the workshopctl directory is. Must be a Git repo. (default ".") 22 | ``` 23 | 24 | ### SEE ALSO 25 | 26 | * [workshopctl](workshopctl.md) - workshopctl: easily run Kubernetes workshops 27 | 28 | -------------------------------------------------------------------------------- /docs/cli/workshopctl_cleanup.md: -------------------------------------------------------------------------------- 1 | ## workshopctl cleanup 2 | 3 | Delete the k8s-managed cluster 4 | 5 | ``` 6 | workshopctl cleanup [flags] 7 | ``` 8 | 9 | ### Options 10 | 11 | ``` 12 | -h, --help help for cleanup 13 | ``` 14 | 15 | ### Options inherited from parent commands 16 | 17 | ``` 18 | --config-path string Where to find the config file (default "workshopctl.yaml") 19 | --dry-run Whether to apply the selected operation, or just print what would happen (to dry-run) (default true) 20 | --log-level loglevel Specify the loglevel for the program (default info) 21 | --root-dir string Where the workshopctl directory is. Must be a Git repo. (default ".") 22 | ``` 23 | 24 | ### SEE ALSO 25 | 26 | * [workshopctl](workshopctl.md) - workshopctl: easily run Kubernetes workshops 27 | 28 | -------------------------------------------------------------------------------- /docs/cli/workshopctl_gen.md: -------------------------------------------------------------------------------- 1 | ## workshopctl gen 2 | 3 | Generate a set of manifests based on the configuration 4 | 5 | ``` 6 | workshopctl gen [flags] 7 | ``` 8 | 9 | ### Options 10 | 11 | ``` 12 | -h, --help help for gen 13 | --skip-local-charts Don't consider the local directory's charts/ directory 14 | ``` 15 | 16 | ### Options inherited from parent commands 17 | 18 | ``` 19 | --config-path string Where to find the config file (default "workshopctl.yaml") 20 | --dry-run Whether to apply the selected operation, or just print what would happen (to dry-run) (default true) 21 | --log-level loglevel Specify the loglevel for the program (default info) 22 | --root-dir string Where the workshopctl directory is. Must be a Git repo. (default ".") 23 | ``` 24 | 25 | ### SEE ALSO 26 | 27 | * [workshopctl](workshopctl.md) - workshopctl: easily run Kubernetes workshops 28 | 29 | -------------------------------------------------------------------------------- /docs/cli/workshopctl_init.md: -------------------------------------------------------------------------------- 1 | ## workshopctl init 2 | 3 | Setup the user configuration interactively 4 | 5 | ``` 6 | workshopctl init [flags] 7 | ``` 8 | 9 | ### Options 10 | 11 | ``` 12 | --cloud-provider-service-account-path string Path to service account for cloud provider 13 | --dns-provider-service-account-path string Path to service account for dns provider 14 | --git-provider-service-account-path string Path to service account for git provider 15 | --git-repo string What git repo to use. By default, try to auto-detect git remote origin. 16 | -h, --help help for init 17 | --lets-encrypt-email string What Let's Encrypt email to use 18 | --name string What name this workshop should have 19 | --root-domain string What the root domain to be managed is 20 | -y, --yes Overwrite the workshopctl.yaml file although it exists 21 | ``` 22 | 23 | ### Options inherited from parent commands 24 | 25 | ``` 26 | --config-path string Where to find the config file (default "workshopctl.yaml") 27 | --dry-run Whether to apply the selected operation, or just print what would happen (to dry-run) (default true) 28 | --log-level loglevel Specify the loglevel for the program (default info) 29 | --root-dir string Where the workshopctl directory is. Must be a Git repo. (default ".") 30 | ``` 31 | 32 | ### SEE ALSO 33 | 34 | * [workshopctl](workshopctl.md) - workshopctl: easily run Kubernetes workshops 35 | 36 | -------------------------------------------------------------------------------- /docs/cli/workshopctl_kubectl.md: -------------------------------------------------------------------------------- 1 | ## workshopctl kubectl 2 | 3 | An alias for the kubectl command, pointing the KUBECONFIG to the right place 4 | 5 | ``` 6 | workshopctl kubectl [kubectl commands] [flags] 7 | ``` 8 | 9 | ### Options 10 | 11 | ``` 12 | -c, --cluster cluster-number What cluster number you want to connect to. Env var WORKSHOPCTL_CLUSTER can also be used. 13 | -h, --help help for kubectl 14 | ``` 15 | 16 | ### Options inherited from parent commands 17 | 18 | ``` 19 | --config-path string Where to find the config file (default "workshopctl.yaml") 20 | --dry-run Whether to apply the selected operation, or just print what would happen (to dry-run) (default true) 21 | --log-level loglevel Specify the loglevel for the program (default info) 22 | --root-dir string Where the workshopctl directory is. Must be a Git repo. (default ".") 23 | ``` 24 | 25 | ### SEE ALSO 26 | 27 | * [workshopctl](workshopctl.md) - workshopctl: easily run Kubernetes workshops 28 | 29 | -------------------------------------------------------------------------------- /docs/cli/workshopctl_version.md: -------------------------------------------------------------------------------- 1 | ## workshopctl version 2 | 3 | Print the version 4 | 5 | ``` 6 | workshopctl version [flags] 7 | ``` 8 | 9 | ### Options 10 | 11 | ``` 12 | -h, --help help for version 13 | -o, --output string Output format; available options are 'yaml', 'json' and 'short' 14 | ``` 15 | 16 | ### Options inherited from parent commands 17 | 18 | ``` 19 | --config-path string Where to find the config file (default "workshopctl.yaml") 20 | --dry-run Whether to apply the selected operation, or just print what would happen (to dry-run) (default true) 21 | --log-level loglevel Specify the loglevel for the program (default info) 22 | --root-dir string Where the workshopctl directory is. Must be a Git repo. (default ".") 23 | ``` 24 | 25 | ### SEE ALSO 26 | 27 | * [workshopctl](workshopctl.md) - workshopctl: easily run Kubernetes workshops 28 | 29 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/cloud-native-nordics/workshopctl 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/digitalocean/godo v1.48.0 7 | github.com/fluxcd/go-git-providers v0.0.3 8 | github.com/go-openapi/spec v0.19.8 // indirect 9 | github.com/kr/text v0.2.0 // indirect 10 | github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect 11 | github.com/otiai10/copy v1.2.0 12 | github.com/sirupsen/logrus v1.7.0 13 | github.com/spf13/cobra v1.1.1 14 | github.com/spf13/pflag v1.0.5 15 | github.com/stretchr/testify v1.6.1 // indirect 16 | github.com/whilp/git-urls v1.0.0 17 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 18 | golang.org/x/net v0.0.0-20201021035429-f5854403a974 // indirect 19 | golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 20 | gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect 21 | gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c // indirect 22 | k8s.io/apimachinery v0.19.3 23 | sigs.k8s.io/kustomize/kyaml v0.9.2 24 | sigs.k8s.io/yaml v1.2.0 25 | ) 26 | -------------------------------------------------------------------------------- /hack/cobra.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os/exec" 6 | 7 | "github.com/cloud-native-nordics/workshopctl/cmd/workshopctl/cmd" 8 | "github.com/spf13/cobra/doc" 9 | ) 10 | 11 | func main() { 12 | command := cmd.NewWorkshopCtlCommand() 13 | if err := doc.GenMarkdownTree(command, "./docs/cli"); err != nil { 14 | log.Fatal(err) 15 | } 16 | sedCmd := `sed -e "/Auto generated/d" -i docs/cli/*.md` 17 | if output, err := exec.Command("/bin/bash", "-c", sedCmd).CombinedOutput(); err != nil { 18 | log.Fatal(string(output), err) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /hack/ldflags.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Note: This file is heavily inspired by https://github.com/kubernetes/kubernetes/blob/master/hack/lib/version.sh 4 | 5 | get_version_vars() { 6 | GIT_COMMIT=$(git rev-parse "HEAD^{commit}" 2>/dev/null) 7 | if git_status=$(git status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then 8 | GIT_TREE_STATE="clean" 9 | else 10 | GIT_TREE_STATE="dirty" 11 | fi 12 | # Use git describe to find the version based on tags. 13 | GIT_VERSION=$(git describe --tags --abbrev=14 "${GIT_COMMIT}^{commit}" 2>/dev/null) 14 | 15 | # This translates the "git describe" to an actual semver.org 16 | # compatible semantic version that looks something like this: 17 | # v1.1.0-alpha.0.6+84c76d1142ea4d 18 | DASHES_IN_VERSION=$(echo "${GIT_VERSION}" | sed "s/[^-]//g") 19 | if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then 20 | # We have distance to subversion (v1.1.0-subversion-1-gCommitHash) 21 | GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/") 22 | elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then 23 | # We have distance to base tag (v1.1.0-1-gCommitHash) 24 | GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/+\1/") 25 | fi 26 | if [[ "${GIT_TREE_STATE}" == "dirty" ]]; then 27 | # git describe --dirty only considers changes to existing files, but 28 | # that is problematic since new untracked .go files affect the build, 29 | # so use our idea of "dirty" from git status instead. 30 | GIT_VERSION+="-dirty" 31 | fi 32 | 33 | # Try to match the "git describe" output to a regex to try to extract 34 | # the "major" and "minor" versions and whether this is the exact tagged 35 | # version or whether the tree is between two tagged versions. 36 | if [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then 37 | GIT_MAJOR=${BASH_REMATCH[1]} 38 | GIT_MINOR=${BASH_REMATCH[2]} 39 | if [[ -n "${BASH_REMATCH[4]}" ]]; then 40 | GIT_MINOR+="+" 41 | fi 42 | fi 43 | } 44 | 45 | ldflag() { 46 | local key=${1} 47 | local val=${2} 48 | echo "-X 'github.com/cloud-native-nordics/workshopctl/pkg/version.${key}=${val}'" 49 | } 50 | 51 | # Prints the value that needs to be passed to the -ldflags parameter of go build 52 | # in order to set the Ignite version based on the git tree status. 53 | ldflags() { 54 | get_version_vars 55 | 56 | local buildDate= 57 | [[ -z ${SOURCE_DATE_EPOCH-} ]] || buildDate="--date=@${SOURCE_DATE_EPOCH}" 58 | local -a ldflags=($(ldflag "buildDate" "$(date ${buildDate} -u +'%Y-%m-%dT%H:%M:%SZ')")) 59 | if [[ -n ${GIT_COMMIT-} ]]; then 60 | ldflags+=($(ldflag "gitCommit" "${GIT_COMMIT}")) 61 | ldflags+=($(ldflag "gitTreeState" "${GIT_TREE_STATE}")) 62 | fi 63 | 64 | if [[ -n ${GIT_VERSION-} ]]; then 65 | ldflags+=($(ldflag "gitVersion" "${GIT_VERSION}")) 66 | fi 67 | 68 | if [[ -n ${GIT_MAJOR-} && -n ${GIT_MINOR-} ]]; then 69 | ldflags+=( 70 | $(ldflag "gitMajor" "${GIT_MAJOR}") 71 | $(ldflag "gitMinor" "${GIT_MINOR}") 72 | ) 73 | fi 74 | 75 | # Output only the version with this flag 76 | if [[ $1 == "--version-only" ]]; then 77 | echo "${GIT_VERSION}" 78 | exit 0 79 | elif [[ $1 == "--image-tag-only" ]]; then 80 | echo "${GIT_VERSION}" | sed "s/+/-/g" 81 | exit 0 82 | fi 83 | 84 | # The -ldflags parameter takes a single string, so join the output. 85 | echo "${ldflags[*]-}" 86 | } 87 | 88 | ldflags $@ -------------------------------------------------------------------------------- /hack/traefik2.txt: -------------------------------------------------------------------------------- 1 | # I couldn't get Traefik 2 to work, yet, but here's the config I tried with the day I need it 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: ingressroutes.traefik.containo.us 6 | spec: 7 | group: traefik.containo.us 8 | version: v1alpha1 9 | names: 10 | kind: IngressRoute 11 | plural: ingressroutes 12 | singular: ingressroute 13 | scope: Namespaced 14 | --- 15 | apiVersion: apiextensions.k8s.io/v1beta1 16 | kind: CustomResourceDefinition 17 | metadata: 18 | name: ingressroutetcps.traefik.containo.us 19 | spec: 20 | group: traefik.containo.us 21 | version: v1alpha1 22 | names: 23 | kind: IngressRouteTCP 24 | plural: ingressroutetcps 25 | singular: ingressroutetcp 26 | scope: Namespaced 27 | --- 28 | apiVersion: apiextensions.k8s.io/v1beta1 29 | kind: CustomResourceDefinition 30 | metadata: 31 | name: middlewares.traefik.containo.us 32 | spec: 33 | group: traefik.containo.us 34 | version: v1alpha1 35 | names: 36 | kind: Middleware 37 | plural: middlewares 38 | singular: middleware 39 | scope: Namespaced 40 | --- 41 | apiVersion: apiextensions.k8s.io/v1beta1 42 | kind: CustomResourceDefinition 43 | metadata: 44 | name: tlsoptions.traefik.containo.us 45 | spec: 46 | group: traefik.containo.us 47 | version: v1alpha1 48 | names: 49 | kind: TLSOption 50 | plural: tlsoptions 51 | singular: tlsoption 52 | scope: Namespaced 53 | --- 54 | kind: ClusterRole 55 | apiVersion: rbac.authorization.k8s.io/v1beta1 56 | metadata: 57 | name: traefik-ingress-controller 58 | rules: 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - services 63 | - endpoints 64 | - secrets 65 | verbs: 66 | - get 67 | - list 68 | - watch 69 | - apiGroups: 70 | - extensions 71 | resources: 72 | - ingresses 73 | verbs: 74 | - get 75 | - list 76 | - watch 77 | - apiGroups: 78 | - extensions 79 | resources: 80 | - ingresses/status 81 | verbs: 82 | - update 83 | - apiGroups: 84 | - traefik.containo.us 85 | resources: 86 | - middlewares 87 | verbs: 88 | - get 89 | - list 90 | - watch 91 | - apiGroups: 92 | - traefik.containo.us 93 | resources: 94 | - ingressroutes 95 | verbs: 96 | - get 97 | - list 98 | - watch 99 | - apiGroups: 100 | - traefik.containo.us 101 | resources: 102 | - ingressroutetcps 103 | verbs: 104 | - get 105 | - list 106 | - watch 107 | - apiGroups: 108 | - traefik.containo.us 109 | resources: 110 | - tlsoptions 111 | verbs: 112 | - get 113 | - list 114 | - watch 115 | --- 116 | kind: ClusterRoleBinding 117 | apiVersion: rbac.authorization.k8s.io/v1beta1 118 | metadata: 119 | name: traefik-ingress-controller 120 | roleRef: 121 | apiGroup: rbac.authorization.k8s.io 122 | kind: ClusterRole 123 | name: traefik-ingress-controller 124 | subjects: 125 | - kind: ServiceAccount 126 | name: traefik-ingress-controller 127 | namespace: workshopctl 128 | --- 129 | kind: ConfigMap 130 | apiVersion: v1 131 | metadata: 132 | name: traefik-cfg 133 | namespace: workshopctl 134 | labels: 135 | app: traefik 136 | data: 137 | traefik.yaml: | 138 | entryPoints: 139 | http: 140 | address: ":80" 141 | https: 142 | address: ":443" 143 | traefik: 144 | address: ":8080" 145 | http: 146 | routers: 147 | redirecttohttps: 148 | entryPoints: ["http"] 149 | middlewares: ["httpsredirect"] 150 | rule: "HostRegexp(`{host:.+}`)" 151 | service: "noop" 152 | services: 153 | # noop service, the URL will be never called 154 | noop: 155 | loadBalancer: 156 | servers: 157 | - url: "http://192.168.0.1" 158 | middlewares: 159 | httpsredirect: 160 | redirectScheme: 161 | scheme: "https" 162 | certificatesResolvers: 163 | sample: 164 | acme: 165 | email: lucas@luxaslabs.com 166 | storage: /letsencrypt/acme.json 167 | tlsChallenge: {} 168 | caServer: https://acme-v02.api.letsencrypt.org/directory 169 | # Staging: caServer: https://acme-staging-v02.api.letsencrypt.org/directory 170 | # Traefik should be able to access the kubernetes-dashboard over HTTPS without having to trust the dashboard's self-signed cert 171 | serversTransport: 172 | insecureSkipVerify: true 173 | providers: 174 | kubernetesIngress: 175 | ingressEndpoint: 176 | publishedService: "workshopctl/traefik" 177 | metrics: 178 | prometheus: 179 | entryPoint: traefik 180 | accessLog: {} 181 | log: {} 182 | api: 183 | insecure: true 184 | debug: true 185 | global: 186 | # Send anonymous usage data 187 | sendAnonymousUsage: false 188 | --- 189 | apiVersion: v1 190 | kind: Secret 191 | metadata: 192 | name: traefik-basic-auth 193 | namespace: workshopctl 194 | type: Opaque 195 | data: 196 | auth: a3ViZXJuZXRlczokYXByMSRVNDlTVllISiQzNnZVelFhQktTNzRtY3lpT0V6MUkuCg== 197 | --- 198 | apiVersion: v1 199 | kind: Secret 200 | metadata: 201 | name: traefik-basic-auth 202 | namespace: workshopctl 203 | type: Opaque 204 | data: 205 | auth: a3ViZXJuZXRlczokYXByMSRVNDlTVllISiQzNnZVelFhQktTNzRtY3lpT0V6MUkuCg== 206 | --- 207 | kind: Deployment 208 | apiVersion: apps/v1 209 | metadata: 210 | namespace: workshopctl 211 | name: traefik 212 | labels: 213 | app: traefik 214 | spec: 215 | replicas: 1 216 | selector: 217 | matchLabels: 218 | app: traefik 219 | template: 220 | metadata: 221 | labels: 222 | app: traefik 223 | spec: 224 | serviceAccountName: traefik-ingress-controller 225 | containers: 226 | - name: traefik 227 | image: traefik:v2.0-alpine 228 | ports: 229 | - name: web 230 | containerPort: 8000 231 | - name: websecure 232 | containerPort: 4443 233 | - name: admin 234 | containerPort: 8080 235 | volumeMounts: 236 | - name: traefik-cfg 237 | mountPath: /etc/traefik/ 238 | - name: letsencrypt 239 | mountPath: /letsencrypt 240 | volumes: 241 | - name: traefik-cfg 242 | configMap: 243 | name: traefik-cfg 244 | - name: letsencrypt 245 | hostPath: 246 | path: /tmp/traefik 247 | --- 248 | apiVersion: v1 249 | kind: Service 250 | metadata: 251 | name: traefik 252 | namespace: workshopctl 253 | spec: 254 | ports: 255 | - name: web 256 | port: 80 257 | - name: websecure 258 | port: 443 259 | selector: 260 | app: traefik 261 | type: LoadBalancer 262 | -------------------------------------------------------------------------------- /images/k8s-web-ide/.bash_aliases: -------------------------------------------------------------------------------- 1 | function get_dashboard_token() { 2 | kubectl -n workshopctl get secret -otemplate --template {{.data.token}} $(kubectl -n workshopctl get secret | grep -o code-server-[a-z-]*) | base64 -d | xargs echo 3 | } -------------------------------------------------------------------------------- /images/k8s-web-ide/Dockerfile: -------------------------------------------------------------------------------- 1 | # This image has VS Code v1.65.2, built 2022-04-14 2 | FROM codercom/code-server:4.3.0 3 | 4 | # Install needed utilities, e.g. Git is essential for the IDE 5 | USER root 6 | RUN apt-get update && \ 7 | apt-get install -y --no-install-recommends \ 8 | git \ 9 | curl \ 10 | nano \ 11 | jq && \ 12 | apt-get clean 13 | 14 | # Install kubectl 15 | ENV K8S_VERSION=v1.22.8 16 | RUN curl -sSL https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kubectl > /usr/local/bin/kubectl && \ 17 | chmod +x /usr/local/bin/kubectl 18 | 19 | # Install helm 20 | ENV HELM_HOME=/root/.helm 21 | ENV HELM_VERSION=v3.8.2 22 | RUN curl -sSL https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz | sudo tar -xz -C /usr/local/bin linux-amd64/helm --strip-components=1 23 | 24 | USER coder 25 | 26 | # Install extensions 27 | ENV EXTENSIONS="redhat.vscode-yaml ms-azuretools.vscode-docker" 28 | # Consider enabling ms-kubernetes-tools.vscode-kubernetes-tools; it's very resource-intensive, though 29 | RUN for ext in ${EXTENSIONS}; do code-server --install-extension ${ext}; done 30 | 31 | COPY --chown=coder:coder entrypoint.sh / 32 | COPY --chown=coder:coder settings.json /home/coder/.local/share/code-server/User/settings.json 33 | COPY --chown=coder:coder kubeconfig.yaml /home/coder/.kube/config 34 | COPY --chown=coder:coder .bash_aliases /home/coder/.bash_aliases 35 | 36 | ENTRYPOINT ["dumb-init", "/entrypoint.sh"] 37 | -------------------------------------------------------------------------------- /images/k8s-web-ide/Makefile: -------------------------------------------------------------------------------- 1 | VERSION=v4.3.0 2 | all: build 3 | build: 4 | docker build --pull -t luxas/k8s-web-ide:$(VERSION) . 5 | 6 | push: build 7 | docker push luxas/k8s-web-ide:$(VERSION) 8 | 9 | run: build 10 | # Sample usage locally 11 | docker run -it \ 12 | -p 127.0.0.1:8080:8080 \ 13 | -e TUTORIALS_REPO=https://github.com/cloud-native-nordics/workshopctl \ 14 | -e TUTORIALS_DIR=tutorials \ 15 | -e PASSWORD=test1234 \ 16 | -v /var/run/docker.sock:/var/run/docker.sock \ 17 | luxas/k8s-web-ide:$(VERSION) 18 | -------------------------------------------------------------------------------- /images/k8s-web-ide/README.md: -------------------------------------------------------------------------------- 1 | # k8s-web-ide 2 | 3 | A Docker image that builds upon [cdr/code-server](https://github.com/cdr/code-server), and adds 4 | 5 | - `kubectl` -- the Kubernetes CLI 6 | - `helm` -- the Kubernetes package manager 7 | - Kubernetes syntax highlighting for YAML files (from the [YAML VSCode extension](https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml)) 8 | - Common development utilities -- `curl`, `nano`, `jq`, and `git` 9 | -------------------------------------------------------------------------------- /images/k8s-web-ide/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEFAULT_DIRECTORY="" 4 | 5 | if [[ ${TUTORIALS_REPO} != "" ]]; then 6 | # This will do a quick, shallow clone of the repo 7 | git clone --depth 1 ${TUTORIALS_REPO} /home/coder/gitclone 8 | # If TUTORIALS_DIR is "." or "", this will copy the whole git repo. 9 | mkdir -p /home/coder/project 10 | mv /home/coder/gitclone/${TUTORIALS_DIR}/* /home/coder/project 11 | sudo rm -r /home/coder/gitclone 12 | echo "Initialized workspace content from git repo ${TUTORIALS_REPO} with subdir ${TUTORIALS_DIR}" 13 | DEFAULT_DIRECTORY="/home/coder/project" 14 | fi 15 | 16 | # By default run behind a Let's Encrypt proxy, so expose this traffic using insecure HTTP 17 | exec code-server --host=0.0.0.0 --auth=password --disable-telemetry ${DEFAULT_DIRECTORY} 18 | -------------------------------------------------------------------------------- /images/k8s-web-ide/kubeconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - cluster: 5 | certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 6 | server: https://kubernetes.default 7 | name: default 8 | contexts: 9 | - context: 10 | cluster: default 11 | namespace: default 12 | user: default 13 | name: default 14 | current-context: default 15 | users: 16 | - name: default 17 | user: 18 | tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 19 | -------------------------------------------------------------------------------- /images/k8s-web-ide/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "yaml.schemas": { 3 | "kubernetes": "*.yaml" 4 | }, 5 | "workbench.colorTheme": "Default Dark+", 6 | "redhat.telemetry.enabled": false 7 | } -------------------------------------------------------------------------------- /pkg/apply/apply.go: -------------------------------------------------------------------------------- 1 | package apply 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 9 | "github.com/cloud-native-nordics/workshopctl/pkg/config/keyval" 10 | "github.com/cloud-native-nordics/workshopctl/pkg/constants" 11 | "github.com/cloud-native-nordics/workshopctl/pkg/gotk" 12 | "github.com/cloud-native-nordics/workshopctl/pkg/provider" 13 | "github.com/cloud-native-nordics/workshopctl/pkg/provider/providers" 14 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 15 | ) 16 | 17 | func Apply(ctx context.Context, cfg *config.Config) error { 18 | // TODO: Enforce that gen is up-to-date 19 | 20 | cloudP, err := providers.CloudProviders().NewCloudProvider(ctx, &cfg.CloudProvider) 21 | if err != nil { 22 | return err 23 | } 24 | 25 | dnsP, err := providers.DNSProviders().NewDNSProvider(ctx, &cfg.DNSProvider, cfg.RootDomain) 26 | if err != nil { 27 | return err 28 | } 29 | 30 | // Make sure the domain zone is created before starting to reconcile the clusters 31 | // Otherwise external-dns nor Traefik will work. 32 | if err := dnsP.EnsureZone(ctx); err != nil { 33 | return err 34 | } 35 | 36 | return config.ForCluster(ctx, cfg.Clusters, cfg, func(clusterCtx context.Context, clusterInfo *config.ClusterInfo) error { 37 | return ApplyCluster(clusterCtx, clusterInfo, cloudP) 38 | }) 39 | } 40 | 41 | func ApplyCluster(ctx context.Context, clusterInfo *config.ClusterInfo, p provider.CloudProvider) error { 42 | logger := util.Logger(ctx) 43 | 44 | // Add some kind of mark at the end of this procedure in the cluster to say that it's 45 | // been successfully provisioned (maybe in the workshopctl ConfigMap?). With this feature 46 | // it's possible at this stage to skip doing the same things over and over again => idempotent 47 | 48 | kubeconfigPath := clusterInfo.Index.KubeConfigPath() 49 | if !util.FileExists(kubeconfigPath) { 50 | // TODO: Instead, make provisionCluster idempotent 51 | logger.Info("Provisioning the Kubernetes cluster") 52 | if err := provisionCluster(ctx, clusterInfo, p); err != nil { 53 | return err 54 | } 55 | } else { 56 | logger.Infof("Assuming cluster is already provisioned, as %q exists...", kubeconfigPath) 57 | } 58 | 59 | logger.Info("Applying workshopctl Namespace") 60 | if _, err := kubectl(ctx, kubeconfigPath). 61 | Create("namespace", "", constants.WorkshopctlNamespace, true, false). 62 | Run(); err != nil { 63 | return err 64 | } 65 | 66 | // Setup GitOps sync 67 | if err := gotk.SetupGitOps(ctx, clusterInfo); err != nil { 68 | return err 69 | } 70 | 71 | localKubectl := func() *kubectlExecer { 72 | return kubectl(ctx, kubeconfigPath).WithNS(constants.WorkshopctlNamespace) 73 | } 74 | 75 | paramFlags := []string{} 76 | // Append secret parameters 77 | parameters := keyval.FromClusterInfo(clusterInfo) 78 | for k, v := range parameters.ToMap() { 79 | paramFlags = append(paramFlags, fmt.Sprintf("--from-literal=%s=%s", k, v)) 80 | } 81 | 82 | logger.Info("Applying workshopctl Secret") 83 | if _, err := localKubectl(). 84 | Create("secret", "generic", constants.WorkshopctlSecret, true, true). 85 | WithArgs(paramFlags...). 86 | Run(); err != nil { 87 | return err 88 | } 89 | 90 | requiredAddons := []string{"core-workshop-infra"} 91 | for _, addon := range requiredAddons { 92 | addonPath := fmt.Sprintf("%s/%s/%s.yaml", constants.ClustersDir, clusterInfo.Index, addon) 93 | logger.Infof("Applying addon %s", addonPath) 94 | if _, err := localKubectl().WithArgs("apply").WithFile(addonPath).Run(); err != nil { 95 | return err 96 | } 97 | } 98 | 99 | // Wait for the cluster to be healthy 100 | return NewWaiter(ctx, clusterInfo).WaitForAll() 101 | } 102 | 103 | func provisionCluster(ctx context.Context, clusterInfo *config.ClusterInfo, p provider.CloudProvider) error { 104 | logger := util.Logger(ctx) 105 | 106 | logger.Infof("Provisioning cluster %s...", clusterInfo.Index) 107 | cluster, err := p.CreateCluster(ctx, provider.ClusterMeta{ 108 | Index: clusterInfo.Index, 109 | NamePrefix: clusterInfo.Name, 110 | }, provider.ClusterSpec{ 111 | Version: "latest", 112 | NodeGroups: clusterInfo.NodeGroups, 113 | }) 114 | if err != nil { 115 | return fmt.Errorf("encountered an error while creating clusters: %v", err) 116 | } 117 | 118 | logger.Infof("Provisioning of cluster %s took %s.", cluster.Name(), cluster.Status.ProvisionTime()) 119 | util.DebugObject(ctx, "Returned cluster object", cluster) 120 | 121 | kubeconfigPath := clusterInfo.Index.KubeConfigPath() 122 | logger.Infof("Writing KubeConfig file to %q", kubeconfigPath) 123 | return util.WriteFile(ctx, kubeconfigPath, cluster.Status.KubeconfigBytes) 124 | } 125 | 126 | type kubectlExecer struct { 127 | ctx context.Context 128 | kubeConfigPath string 129 | 130 | namespace string 131 | args []string 132 | files []string 133 | 134 | err error 135 | ignoreErrors []string 136 | } 137 | 138 | func kubectl(ctx context.Context, kubeConfigPath string) *kubectlExecer { 139 | return &kubectlExecer{ 140 | ctx: ctx, 141 | kubeConfigPath: kubeConfigPath, 142 | } 143 | } 144 | 145 | func (e *kubectlExecer) WithNS(ns string) *kubectlExecer { 146 | e.namespace = ns 147 | return e 148 | } 149 | 150 | func (e *kubectlExecer) WithFile(file string) *kubectlExecer { 151 | e.files = append(e.files, file) 152 | e.args = append(e.args, []string{"-f", file}...) 153 | return e 154 | } 155 | 156 | func (e *kubectlExecer) WithArgs(args ...string) *kubectlExecer { 157 | e.args = append(e.args, args...) 158 | return e 159 | } 160 | 161 | func (e *kubectlExecer) IgnoreErrors(errStrs ...string) *kubectlExecer { 162 | e.ignoreErrors = append(e.ignoreErrors, errStrs...) 163 | return e 164 | } 165 | 166 | func (e *kubectlExecer) Create(kind, subkind, name string, ignoreExists, recreate bool) *kubectlExecer { 167 | e.args = append(e.args, "create", kind) 168 | if len(subkind) > 0 { 169 | e.args = append(e.args, subkind) 170 | } 171 | e.args = append(e.args, name) 172 | if ignoreExists { 173 | // if we're idempotent, we don't care about "already exists" errors 174 | e.IgnoreErrors("AlreadyExists") 175 | } 176 | if recreate { 177 | _, err := kubectl(e.ctx, e.kubeConfigPath). 178 | WithNS(e.namespace). 179 | WithArgs("delete", kind, name). 180 | IgnoreErrors("NotFound"). // Ignore any possible NotFound error here, that is expected 181 | Run() 182 | if err != nil { 183 | e.err = err 184 | } 185 | } 186 | return e 187 | } 188 | 189 | func (e *kubectlExecer) Run() (string, error) { 190 | if e.err != nil { 191 | return "", e.err 192 | } 193 | 194 | kubectlArgs := []string{"--kubeconfig", e.kubeConfigPath} 195 | if len(e.namespace) != 0 { 196 | kubectlArgs = append(kubectlArgs, []string{"-n", e.namespace}...) 197 | } 198 | kubectlArgs = append(kubectlArgs, e.args...) 199 | 200 | out, _, err := util.Command(e.ctx, "kubectl", kubectlArgs...).Run() 201 | for _, ignored := range e.ignoreErrors { 202 | if strings.Contains(out, ignored) { 203 | return out, nil 204 | } 205 | } 206 | return out, err 207 | } 208 | -------------------------------------------------------------------------------- /pkg/apply/wait.go: -------------------------------------------------------------------------------- 1 | package apply 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "time" 8 | 9 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 10 | "github.com/cloud-native-nordics/workshopctl/pkg/constants" 11 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 12 | "github.com/sirupsen/logrus" 13 | ) 14 | 15 | type Waiter struct { 16 | *config.ClusterInfo 17 | ctx context.Context 18 | logger *logrus.Entry 19 | } 20 | 21 | func NewWaiter(ctx context.Context, info *config.ClusterInfo) *Waiter { 22 | return &Waiter{info, ctx, util.Logger(ctx)} 23 | } 24 | 25 | func (w *Waiter) kubectl() *kubectlExecer { 26 | return kubectl(w.ctx, w.Index.KubeConfigPath()).WithNS(constants.WorkshopctlNamespace) 27 | } 28 | 29 | type waitFn func() error 30 | 31 | func (w *Waiter) WaitForAll() error { 32 | fns := map[string]waitFn{ 33 | "deployments to be Ready": w.WaitForDeployments, 34 | "DNS to have propagated": w.WaitForDNSPropagation, 35 | //"TLS certs to have been created": w.WaitForTLSSetup, 36 | } 37 | for desc, fn := range fns { 38 | msg := fmt.Sprintf("Waiting for %s", desc) 39 | w.logger.Infof("%s...", msg) 40 | before := time.Now().UTC() 41 | if err := fn(); err != nil { 42 | return fmt.Errorf("%s failed with: %v", msg, err) 43 | } 44 | after := time.Now().UTC() 45 | w.logger.Infof("%s took %s", msg, after.Sub(before).String()) 46 | } 47 | return nil 48 | } 49 | 50 | func (w *Waiter) WaitForDeployments() error { 51 | return util.Poll(w.ctx, nil, func() (bool, error) { 52 | // Wait 30s using kubectl until the "global" Poll timeout is reached 53 | _, err := w.kubectl().WithArgs("wait", "deployment", "--for=condition=Available", "--all", "--timeout=30s").Run() 54 | if err != nil { 55 | return false, err 56 | } 57 | return true, nil 58 | }) 59 | } 60 | 61 | func (w *Waiter) WaitForDNSPropagation() error { 62 | var ip net.IP 63 | err := util.Poll(w.ctx, nil, func() (bool, error) { 64 | addr, err := w.kubectl().WithArgs("get", "svc", "traefik", "-otemplate", `--template={{ (index .status.loadBalancer.ingress 0).ip }}`).Run() 65 | if err != nil { 66 | return false, err 67 | } 68 | ip = net.ParseIP(addr) 69 | if ip != nil { 70 | w.logger.Infof("Got LoadBalancer IP %s for Traefik", ip) 71 | return true, nil 72 | } 73 | return false, fmt.Errorf("no valid IP yet: %q", addr) 74 | }) 75 | if err != nil { 76 | return err 77 | } 78 | 79 | return util.Poll(w.ctx, nil, func() (bool, error) { 80 | prefixes := []string{""} // "dashboard" 81 | for _, prefix := range prefixes { 82 | domain := w.Domain() 83 | if len(prefix) > 0 { 84 | domain = fmt.Sprintf("%s.%s", prefix, w.Domain()) 85 | } 86 | if err := domainMatches(domain, ip); err != nil { 87 | return false, err 88 | } 89 | w.logger.Infof("%s now resolves to %s, as expected", domain, ip) 90 | } 91 | 92 | return true, nil 93 | }) 94 | } 95 | 96 | func domainMatches(domain string, expectedIP net.IP) error { 97 | r := &net.Resolver{ 98 | PreferGo: true, 99 | Dial: func(ctx context.Context, network, address string) (net.Conn, error) { 100 | d := net.Dialer{ 101 | Timeout: time.Millisecond * time.Duration(10000), 102 | } 103 | return d.DialContext(ctx, "udp", "1.1.1.1:53") 104 | }, 105 | } 106 | ips, err := r.LookupIPAddr(context.Background(), domain) 107 | if err != nil { 108 | return fmt.Errorf("Domain lookup error for %q: %v", domain, err) 109 | } 110 | // look for the right IP 111 | for _, addr := range ips { 112 | if addr.IP.String() == expectedIP.String() { 113 | return nil 114 | } 115 | } 116 | return fmt.Errorf("Not the right IP found during lookup yet, expected: %s, got: %v", expectedIP, ips) 117 | } 118 | 119 | func (w *Waiter) WaitForTLSSetup() error { 120 | // TODO: Somehow verify if Traefik already has got the TLS cert 121 | _, err := w.kubectl().WithArgs("delete", "pod", "-l=app=traefik").Run() 122 | if err != nil { 123 | return err 124 | } 125 | w.logger.Infof("Restarted traefik") 126 | return nil 127 | /* 128 | TODO: Maybe verify somehow that we can connect to the endpoint(s) correctly. 129 | return util.Poll(nil, w.logger, func() (bool, error) { 130 | _, err := http.Get(w.Domain()) 131 | return (err == nil), err 132 | }, w.dryrun) 133 | */ 134 | } 135 | -------------------------------------------------------------------------------- /pkg/charts/charts.go: -------------------------------------------------------------------------------- 1 | // Code generated for package charts by go-bindata DO NOT EDIT. (@generated) 2 | // sources: 3 | // charts/core-workshop-infra/Chart.yaml 4 | // charts/core-workshop-infra/templates/code-server.yaml 5 | // charts/core-workshop-infra/templates/external-dns.yaml 6 | // charts/core-workshop-infra/templates/traefik-2.yaml 7 | // charts/podinfo/external-chart 8 | // charts/podinfo/values-override.yaml 9 | package charts 10 | 11 | import ( 12 | "bytes" 13 | "compress/gzip" 14 | "fmt" 15 | "io" 16 | "io/ioutil" 17 | "os" 18 | "path/filepath" 19 | "strings" 20 | "time" 21 | ) 22 | 23 | func bindataRead(data []byte, name string) ([]byte, error) { 24 | gz, err := gzip.NewReader(bytes.NewBuffer(data)) 25 | if err != nil { 26 | return nil, fmt.Errorf("Read %q: %v", name, err) 27 | } 28 | 29 | var buf bytes.Buffer 30 | _, err = io.Copy(&buf, gz) 31 | clErr := gz.Close() 32 | 33 | if err != nil { 34 | return nil, fmt.Errorf("Read %q: %v", name, err) 35 | } 36 | if clErr != nil { 37 | return nil, err 38 | } 39 | 40 | return buf.Bytes(), nil 41 | } 42 | 43 | type asset struct { 44 | bytes []byte 45 | info os.FileInfo 46 | } 47 | 48 | type bindataFileInfo struct { 49 | name string 50 | size int64 51 | mode os.FileMode 52 | modTime time.Time 53 | } 54 | 55 | // Name return file name 56 | func (fi bindataFileInfo) Name() string { 57 | return fi.name 58 | } 59 | 60 | // Size return file size 61 | func (fi bindataFileInfo) Size() int64 { 62 | return fi.size 63 | } 64 | 65 | // Mode return file mode 66 | func (fi bindataFileInfo) Mode() os.FileMode { 67 | return fi.mode 68 | } 69 | 70 | // Mode return file modify time 71 | func (fi bindataFileInfo) ModTime() time.Time { 72 | return fi.modTime 73 | } 74 | 75 | // IsDir return file whether a directory 76 | func (fi bindataFileInfo) IsDir() bool { 77 | return fi.mode&os.ModeDir != 0 78 | } 79 | 80 | // Sys return file is sys mode 81 | func (fi bindataFileInfo) Sys() interface{} { 82 | return nil 83 | } 84 | 85 | var _coreWorkshopInfraChartYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x4a\x2c\xc8\x0c\x4b\x2d\x2a\xce\xcc\xcf\xb3\x52\x28\x33\xe2\xca\x4b\xcc\x4d\xb5\x52\x48\xce\x2f\x4a\xd5\x2d\xcf\x2f\xca\x2e\xce\xc8\x2f\xd0\xcd\xcc\x4b\x2b\x4a\xe4\x2a\x83\xa9\x32\xd0\x33\xd4\x33\xe0\x02\x04\x00\x00\xff\xff\x70\x15\x52\x44\x38\x00\x00\x00") 86 | 87 | func coreWorkshopInfraChartYamlBytes() ([]byte, error) { 88 | return bindataRead( 89 | _coreWorkshopInfraChartYaml, 90 | "core-workshop-infra/Chart.yaml", 91 | ) 92 | } 93 | 94 | func coreWorkshopInfraChartYaml() (*asset, error) { 95 | bytes, err := coreWorkshopInfraChartYamlBytes() 96 | if err != nil { 97 | return nil, err 98 | } 99 | 100 | info := bindataFileInfo{name: "core-workshop-infra/Chart.yaml", size: 56, mode: os.FileMode(436), modTime: time.Unix(1577836800, 0)} 101 | a := &asset{bytes: bytes, info: info} 102 | return a, nil 103 | } 104 | 105 | var _coreWorkshopInfraTemplatesCodeServerYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x56\x4d\x73\xdb\x36\x10\xbd\xf3\x57\xec\x58\x67\x52\x76\x7a\xf1\xf0\xe6\x5a\x49\xc7\x53\xc7\xd2\x48\x4a\x7a\xcc\xac\xc0\x95\x88\x0a\x04\x38\xc0\x52\xb1\x9a\xe6\xbf\x77\xc0\x2f\x81\xa2\xd4\x4c\x7c\x08\x4f\x24\x80\xdd\xf7\xf0\xf0\x76\xc1\x09\x96\xf2\x33\x59\x27\x8d\x4e\xe1\x70\x17\x4d\xf6\x52\x67\x29\xbc\x60\x41\xae\x44\x41\xd1\xa4\x20\xc6\x0c\x19\xd3\x68\x02\xa0\xb1\xa0\x14\xbe\x1a\xbb\x77\xb9\x29\x05\xab\x6e\xb0\x5e\x7c\x36\x13\xc7\x71\x14\xa6\xb7\x1b\x14\x09\x56\x9c\x1b\x2b\xff\x41\x96\x46\x27\xfb\x7b\x97\x48\x33\x3d\xdc\x45\x0d\xee\xa3\xaa\x1c\x93\x5d\x1a\x45\xbf\x4b\x9d\x49\xbd\x8b\x4e\xf8\x1d\xbc\x30\x19\xc5\x8e\xec\x81\x6c\x74\x15\xdd\x1a\x45\x4b\xda\xfa\x28\x2c\xe5\x1f\xd6\x54\xe5\xff\x30\x88\x00\x46\x04\x4e\x78\xcd\x58\x8c\x59\x21\x75\xe4\xaa\xcd\xdf\x24\xd8\xa5\x51\xdc\xc6\xac\xc8\x1e\xa4\xa0\x07\x21\x4c\xa5\xf9\xe7\x68\x9e\x6b\xd4\x2b\x71\x96\xf4\x8d\x2a\x5c\x4d\xff\x68\xf4\x56\xee\x3e\x62\xf9\xd6\xcc\x00\x0a\x37\xa4\x9c\x0f\xf3\x12\x97\xc3\xb8\x2e\xa3\xa8\x71\x52\xf8\x37\x6e\xd7\x0d\xb8\xf8\xa1\x90\x4f\x3d\xd0\xca\xdd\x66\x8e\xbb\xef\xe6\xb3\x5e\x40\x96\xe5\x56\x0a\x64\x8a\xdb\xc3\xe4\x63\x0a\xd3\x03\xda\xa9\xad\xf4\xd4\x91\xb0\xc4\x6e\xba\xaf\x36\x64\x35\x31\xd5\x1e\x73\x8d\xa0\xd8\x08\x3a\x15\x98\x08\xcb\x7d\xce\x86\x76\x0a\x39\x73\xe9\xd2\x69\x18\x9b\xd1\x16\x2b\xd5\x2d\x6d\x24\x0a\xc7\x84\xd1\x4c\xaf\x7c\xe2\xdb\x7c\x07\x7c\xdb\x0d\xc0\x30\xd3\x40\xda\xf3\xa9\xca\x8d\x03\x2e\x40\x57\xd6\x92\xe6\xb8\x83\x1c\x4c\xfa\x14\x3d\xa9\x71\x6c\x8b\xd1\x23\xb2\xd9\x93\xfe\x20\x15\xfd\xac\x92\x75\xe0\xc8\x69\x58\x96\xee\x54\xd7\x33\x2a\x95\x39\x16\xf4\x76\x27\xff\xc0\x6f\xae\x24\xe1\xa7\x1c\x29\x12\x6c\xda\x7d\x15\xc8\x22\x7f\x0e\xe2\x2e\x44\x02\x30\x15\xa5\x42\xa6\x36\x26\xe0\xe7\x1f\x35\x08\xbf\x98\x00\xa0\x83\xef\xac\x74\xaa\xdc\x97\x0b\x5b\xec\x5c\x83\x52\xf7\x47\xe4\x0f\x49\x16\xb8\xa3\x14\x54\xf5\x8a\x6e\xba\xbf\x77\xf1\x57\xda\xc4\x32\xa3\xf4\xf0\x2e\xb9\x4b\xee\x7a\x0a\x13\x58\xcf\x67\xf3\x14\x9e\x34\x70\x4e\xb0\xad\xb8\xb2\x94\xc2\x2e\x17\xd6\x9f\x8f\x50\xa6\xca\x62\x8d\x2c\x0f\x14\x6b\x63\x33\x29\xce\xd2\xdd\x26\xef\x92\xdb\x3e\x5d\x0d\xbb\xa8\x94\x5a\x18\x25\xc5\x31\x85\x07\xf5\x15\x8f\x6e\x60\xd3\x4b\x5b\x00\x28\x8d\xe5\x40\x9a\xce\x66\xbe\x88\xfa\xc1\x60\xab\x0b\x63\x39\x85\xfb\xdb\xfb\x13\x36\xe9\xc3\x38\x7e\xfd\x69\x3d\x5f\x3e\x3d\x3c\xaf\xbe\x2c\xdf\x2f\xe6\x41\xa6\x03\xaa\x8a\x3e\x58\x53\xa4\xc1\xa0\x57\xdc\xdb\xf4\x4f\x3a\xb6\x0d\x3f\x7c\xc6\x37\xd6\x70\x7e\x4f\xc7\xab\x80\x63\x3e\xb3\xa7\xe5\x2f\xa5\x13\xe2\x75\x6c\x16\x0f\xab\xd5\x5f\xf3\xe5\xec\x17\x10\x79\x7c\xfe\xb4\x5a\xbf\x5f\x7e\x19\x41\x1e\x8c\xaa\x0a\xfa\xe8\x0d\x7e\xe1\xfc\x7d\xbb\x10\xa7\x86\xde\x3c\x85\x5f\xbc\x40\xce\x53\x98\xe6\xa6\xa0\xa9\x37\x94\x9d\x26\x7e\xf1\x28\x43\x66\xc4\x3e\xf0\xd9\x30\xba\xeb\x4e\xcd\xa2\xc4\x19\xb1\x8f\x42\x5a\x41\x45\x5d\xe5\x23\xba\xfb\x2f\xd4\xe5\x9a\xd1\xaf\xb0\xca\x8d\x6b\x28\x05\x29\xca\xeb\x14\x7f\x74\xd5\xbf\xb5\x33\x5e\x6e\x7d\x17\xda\x54\x5f\xad\x71\xfd\xea\x2b\xb1\x5e\xca\x68\x77\xc4\x67\xc5\x19\x54\xf2\x39\x71\x4d\xec\xf1\xa5\xde\x8d\xfe\xdf\x9e\xf4\xce\x92\x73\x6f\x6f\xf2\xa8\xb5\xe1\xfa\xcf\xac\x3d\xc5\x09\x7c\x30\x56\x10\x20\xb8\xdc\x58\x86\xf5\xfa\x19\x9c\x01\xce\x91\x61\xf6\xb2\x02\x4b\xc2\xd8\x0c\x44\x8e\x7a\x47\x0e\x04\x6a\x28\xad\x29\x71\x87\x4c\xb0\x45\x7f\xf1\xd6\x79\xe8\x95\xc9\x6a\x54\x71\xa6\x5d\x82\xaa\xcc\x31\x19\xde\x6a\xcc\x2a\x85\x9b\xdf\x6e\xdd\x4d\xaf\xa8\x6c\x76\xf3\xa8\xd0\xb9\xa6\x8f\xb3\x45\xda\x4a\x6f\x37\x5b\x29\x6a\xc5\xf4\x3e\x48\xe1\xe6\xdb\x37\x48\x3e\xfb\x42\x74\x49\xb0\xa7\xa4\xab\xa1\xd9\xfc\xe3\xc3\xd3\x0b\x7c\xff\x7e\x53\xf3\xf1\xca\x76\xc6\xf1\xa6\x09\x4c\xdb\x7a\x28\x0a\x3d\xb5\x3e\x96\xbe\xf4\x2d\x6d\xe5\x6b\x3f\xb1\x41\xb1\x27\x9d\x85\xfe\x6b\x2f\x9f\x61\xb5\x5f\x6f\xe0\xd0\x36\xf1\x51\x77\xa8\x8a\x8d\xff\x01\xb9\xbf\x8d\xfe\x0b\x00\x00\xff\xff\xb3\x85\xd9\x3f\x22\x0c\x00\x00") 106 | 107 | func coreWorkshopInfraTemplatesCodeServerYamlBytes() ([]byte, error) { 108 | return bindataRead( 109 | _coreWorkshopInfraTemplatesCodeServerYaml, 110 | "core-workshop-infra/templates/code-server.yaml", 111 | ) 112 | } 113 | 114 | func coreWorkshopInfraTemplatesCodeServerYaml() (*asset, error) { 115 | bytes, err := coreWorkshopInfraTemplatesCodeServerYamlBytes() 116 | if err != nil { 117 | return nil, err 118 | } 119 | 120 | info := bindataFileInfo{name: "core-workshop-infra/templates/code-server.yaml", size: 3106, mode: os.FileMode(436), modTime: time.Unix(1577836800, 0)} 121 | a := &asset{bytes: bytes, info: info} 122 | return a, nil 123 | } 124 | 125 | var _coreWorkshopInfraTemplatesExternalDnsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x54\xdf\x6f\xe3\xb6\x0f\x7f\xcf\x5f\x41\xe4\x5e\xeb\x7c\x5b\x7c\x5f\x06\x03\x79\xe8\x2e\xc1\x50\xe0\x9a\x1c\xd2\x5e\x37\x6c\x28\x0a\x45\x62\x6c\xad\xb2\x24\x90\xb4\x5b\xdf\xa1\xff\xfb\x20\xdb\x6d\xed\x6b\x6e\x87\xa1\x7a\x72\x44\x91\xfc\xfc\x20\xa3\xa2\xbd\x41\x62\x1b\x7c\x0e\xcd\xd9\xec\xde\x7a\x93\xc3\x15\x52\x63\x35\x9e\x6b\x1d\x6a\x2f\xb3\x0a\x45\x19\x25\x2a\x9f\x01\x78\x55\x61\x0e\xf8\x28\x48\x5e\xb9\xcc\x78\x1e\x2e\x39\x2a\x8d\x39\x3c\x04\xba\xe7\x32\x44\x2d\x6e\x96\x65\xd9\x6c\x5c\x9f\xf6\x4a\x2f\x54\x2d\x65\x20\xfb\x55\x89\x0d\x7e\x71\xff\x0b\x2f\x6c\xf8\xdf\x4b\xe7\x8f\xae\x66\x41\xda\x05\x87\x3f\x6b\x4b\xb5\x43\xce\x67\x19\xa8\x68\x7f\xa3\x50\x47\xce\xe1\xaf\xf9\xfc\x76\x06\x40\xc8\xa1\x26\x8d\xdd\x0d\xf7\x64\x78\x7e\x32\x47\x6f\x62\xb0\x5e\xd2\x77\x0c\x86\xbb\xc7\x0d\xd2\xbe\x7b\x58\xa0\xcc\x4f\xe6\x0f\x4a\x74\x39\x3f\x99\x3b\xcb\x32\xbf\xfd\xbe\x7c\x42\xe0\x13\x9b\x54\xc2\xa3\x24\xba\xd6\x17\x03\x8f\xb7\xcd\xad\x2f\x08\x99\x91\xe7\xb7\xf0\x5f\x7b\xbd\xad\xe6\x83\xc1\xef\x40\x3f\xa7\xbe\x4b\xea\x5f\xad\x37\xd6\x17\x3f\x51\x3c\x6b\x2c\x3e\x20\xfd\xd8\x6f\x0a\x0e\x77\x78\x48\xd9\xcf\x44\xfe\x05\xc9\x0c\xe0\xad\xe7\x47\x9d\xe6\x7a\xff\x37\x6a\xe9\xcc\x3e\x3a\xa0\xef\x1b\x4b\x15\x23\xbf\xca\xb2\xc2\xe8\x42\x5b\xe1\x3b\xe6\x9e\x23\xea\xbc\xf3\x2e\x3a\xab\x15\xe7\x70\x36\x03\x60\x21\x25\x58\xb4\x29\x02\x20\x6d\xc4\x1c\x76\xa8\x09\x95\x24\xde\x8c\x0e\xb5\x04\xea\xc3\x55\x9a\x8c\x4f\x6a\x8f\x8e\xfb\x8b\x24\x69\x7c\x03\x40\xb0\x8a\x4e\x09\x0e\x49\x23\xbc\xe9\xb8\x49\xfe\xf1\x0a\x00\xcf\x68\xbb\xef\x89\xae\x9b\x63\x9c\xd3\xd1\xc1\x8b\xb2\x1e\xe9\xa5\x78\x76\x5c\xa0\xfe\xd8\x4a\x15\x98\x43\xb2\xbd\xd0\x94\x86\x70\xfc\x6c\xf2\x23\x6f\x4e\x17\x67\x67\x8b\x53\xf8\x00\xd7\xdb\xd5\x36\x87\x2f\xb1\x20\x65\x10\x24\x40\x17\x7a\x25\x43\xc5\x88\xda\x07\xf8\x3d\x29\x06\x17\xfd\xbe\x81\xf2\xe6\x79\x48\x20\xf4\xd3\xd3\xdd\xf5\x72\xc3\x6a\x73\x05\x84\x3a\x90\x61\xd0\x81\x08\x39\x86\x6e\x05\x5c\x0b\x2f\x35\x33\xc8\xb2\x7e\xf9\x96\xc3\x1a\x1f\x0b\x0d\x92\x8d\x90\x5c\xaa\x7b\x64\x58\x0f\xac\x52\x2b\x46\x84\xe0\x5d\x0b\x52\x22\x94\x81\x05\x0d\x7c\x0d\x1e\xb9\xf7\xd9\xfa\x02\x22\x85\xc6\x1a\x34\x60\x42\xa5\xac\x3f\x81\x50\x59\x49\xac\x23\x05\xdd\x31\x72\x0e\x54\xa3\xac\x53\x7b\x37\x2d\x32\x41\xd5\xa7\x67\x07\xeb\x04\x69\xf9\xed\x1b\x2c\x6e\x94\xab\x91\x17\xa3\x01\x5d\xec\xb6\xdb\xeb\xbb\xd5\xf6\xf2\xfc\x62\x03\x4f\x4f\x8b\x49\x81\x01\xc8\x0f\x73\xd7\x7f\x5c\xaf\x77\x9b\xf3\x4f\x77\xab\xcd\xd5\xdd\xe7\xdd\xf6\xe6\x62\xb5\xde\xc1\xd3\xd3\x48\x80\xcf\x84\x0d\x7a\x99\x6a\x70\xa0\x50\x81\x41\x87\x92\xe8\x2a\xdf\x3e\x1b\xf0\x4a\x15\x7d\xc7\xed\x50\x3b\x07\xdc\x7a\x5d\x52\xf0\xc3\x7f\xc6\x14\x62\x70\x56\xb7\xcb\x3a\x32\x92\x64\x49\xd8\x49\x98\xb0\xb0\x2c\xd4\x2e\xe5\x51\x26\x01\x79\x94\x2c\x3c\x78\xa4\xcc\x9a\xe5\x78\x61\x5f\xa1\xef\x30\x35\x86\x70\x10\xf4\xa0\x18\xa4\xb4\x0c\x96\x41\x41\x69\x8b\xd2\xb5\x60\x5a\xaf\x2a\xab\x81\x5b\x16\xac\x26\xe5\xad\x17\xa4\x46\xb9\xe5\xff\x4f\xa7\x9e\xb8\x50\x64\x0e\x1b\x74\x4b\x83\xfb\xba\x18\x2b\x15\x98\xed\xde\xb5\x60\x7d\x37\x1b\x87\x5a\x6a\xc2\x13\x50\xc6\x40\x96\x0d\x2a\x72\x1d\x63\x20\x19\xa7\x0d\xb2\x67\x1f\xbf\x5c\x5d\x6f\x2f\x2f\xfe\x5c\xe7\xb0\x41\x34\x49\x44\x46\x81\x88\xf4\xe2\x23\xac\x37\x37\x70\x73\xbe\x63\x28\x91\x70\xf6\x4f\x00\x00\x00\xff\xff\x69\x0e\x31\x61\xf0\x07\x00\x00") 126 | 127 | func coreWorkshopInfraTemplatesExternalDnsYamlBytes() ([]byte, error) { 128 | return bindataRead( 129 | _coreWorkshopInfraTemplatesExternalDnsYaml, 130 | "core-workshop-infra/templates/external-dns.yaml", 131 | ) 132 | } 133 | 134 | func coreWorkshopInfraTemplatesExternalDnsYaml() (*asset, error) { 135 | bytes, err := coreWorkshopInfraTemplatesExternalDnsYamlBytes() 136 | if err != nil { 137 | return nil, err 138 | } 139 | 140 | info := bindataFileInfo{name: "core-workshop-infra/templates/external-dns.yaml", size: 2032, mode: os.FileMode(436), modTime: time.Unix(1577836800, 0)} 141 | a := &asset{bytes: bytes, info: info} 142 | return a, nil 143 | } 144 | 145 | var _coreWorkshopInfraTemplatesTraefik2Yaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x58\xfd\x6e\xe3\x36\x12\xff\xdf\x4f\x31\xb0\x8b\xde\x16\x58\x31\xd9\x6e\xef\x50\x08\x30\x70\x8e\xe3\xed\x06\x4d\xb2\x41\xe2\x5d\xe0\x0e\x0b\xa4\x63\x6a\x2c\xb1\xa6\x48\x82\xa4\x9c\xfa\x72\x79\xf7\x03\xf5\x61\xc9\x56\xec\x6d\xf6\x8a\x55\x80\x40\x22\x67\x7e\x33\x9c\x6f\x7a\x34\x1a\xc1\xed\xd9\x64\x0a\xa3\xd1\x68\x80\x46\x7c\x22\xeb\x84\x56\x31\xd8\x05\x72\x86\x85\xcf\xb4\x15\xff\x41\x2f\xb4\x62\xab\x9f\x1d\x13\xfa\x64\xfd\x66\xb0\x12\x2a\x89\x61\x2a\x0b\xe7\xc9\xde\x6a\x49\x83\x9c\x3c\x26\xe8\x31\x1e\x00\x28\xcc\x29\x06\x6f\x91\x96\x62\x35\xb0\x85\x24\x17\x0f\x22\x40\x23\x7e\xb1\xba\x30\x2e\xd0\x00\x44\x30\x1c\x0e\x00\x2c\x39\x5d\x58\x4e\xdb\x55\x47\x76\x2d\x38\xb9\xfa\x93\x54\x62\xb4\x50\xde\x6d\xb7\xb9\xa5\xf2\x6b\x4d\x76\xb1\xe5\x4a\xc9\xd7\x6f\x52\xb8\xe6\xf5\x01\x3d\xcf\x9e\x93\x4c\x7f\x78\x52\xe1\x9c\x0d\xaa\x22\xff\xa0\xed\x4a\xa8\xb4\x3e\xe5\x73\x9a\x09\x95\x5a\x72\x6e\xab\x5a\xfd\xcd\x25\xd6\x8b\x7f\x95\x46\x87\x25\x9f\x38\x8f\xbe\xe8\xc9\x2a\x4c\x82\x9e\x06\x51\x14\xed\xf8\x70\xeb\xa9\xbb\xca\xa6\x13\xce\x75\xa1\xfc\x11\x67\x55\xdf\xce\x20\xa7\x18\x82\x45\x5c\xa6\x0d\xf7\xb2\x07\xfd\xb2\xf0\x38\x13\x2a\x11\x2a\x3d\x16\x25\x5a\xd2\x2d\x2d\xc3\x46\x63\x9b\x23\x42\x06\x00\xfd\x10\xdc\x87\x74\xc5\xe2\x77\xe2\xbe\x8c\xbd\x67\xcd\xf0\xa2\xc3\x87\x44\x99\x6a\xb5\x14\xe9\x15\x9a\xd7\x70\x4e\x46\xea\x4d\x4e\xca\xc3\xf7\x0d\x6e\x2f\x87\x5a\x53\x34\x8c\x87\x2d\x10\xf1\x65\x7a\x58\x03\x00\x89\x0b\x92\xb5\xc3\xd1\x98\x56\xeb\x06\x0d\x8d\x60\x1b\xcc\x65\x0c\xff\x2d\x89\x32\xef\x4d\x45\x0e\x60\x75\xe1\xc9\xba\xe6\x13\x20\xdf\x44\x68\x44\xfb\x0d\x10\xd2\x34\x86\xf7\xda\xf9\x57\xbf\xd5\xd0\xec\xf1\x11\xd8\x27\x94\x05\x39\xd6\x51\x86\x4d\x2f\x3f\xde\xcd\x67\xb7\xf7\xe7\x1f\xae\x26\x17\xd7\xf0\xf4\xf4\xdb\x0f\x1d\xa0\x3a\x7f\xe3\xa0\xd0\x3f\x85\xf2\x64\x15\xca\xce\x3e\x29\x6f\x37\x37\x65\x4a\x77\xe5\x47\x1d\x37\xb4\x6b\x0f\xb4\x70\xc4\x0b\x4b\x9d\xd5\x5c\x24\x89\xa4\x07\xb4\xb4\x07\x80\x46\x44\x21\x5a\x06\x47\xe8\x1a\x9a\x2e\xe7\x02\x9d\xe0\x93\xbd\x45\x80\xc2\xed\x98\x2c\x3c\x23\x98\x67\xc2\xc1\x3a\xd8\x04\x84\x03\x4b\x46\x22\xa7\x04\xb4\x8a\x12\xca\x51\x25\xb0\xb4\x3a\x07\x9f\x11\xa4\x62\x4d\x0a\x48\xad\x61\x8d\xf6\x35\xf8\x0c\x3d\x70\x9d\x93\xab\x48\x10\xee\xca\x4a\xb6\x83\x1f\xc1\xf0\xf3\xe3\xe7\xc7\x92\x6b\xd8\x98\xf9\x6c\x72\x77\x31\xbd\x9f\x7c\x9c\xbf\xbf\x3f\x9b\xde\xfe\xeb\x66\x3e\x84\xcf\x4f\x9f\x9f\x86\xbd\x9c\x44\x63\x5c\x9b\x7e\x6d\x84\x7e\x4d\xbe\x1f\x09\x38\x67\x88\xc7\x65\x95\x32\x52\x70\x74\x31\xbc\x19\x04\xb7\x4b\xe2\x5e\xdb\x8a\x21\x0f\x75\xee\xb2\x83\xb0\x87\x01\xe0\x29\x37\x12\x3d\xd5\xf4\x1d\x0d\xc3\x23\x77\x58\x7b\xcc\x00\x8d\x12\x9d\x88\xab\xd3\xfa\x7a\xef\x80\xe1\xe1\x5a\x79\x14\xaa\xe3\xcf\xa8\x67\x88\xea\x11\x39\xa6\xed\x72\xbc\xfe\x91\xfd\x83\xbd\x6d\xd5\xb0\x69\x47\xa9\x08\xa2\x90\x45\x63\x6f\x0b\xda\x59\xcc\xc9\x5b\xc1\x1d\x33\x56\xe7\xe4\x33\x2a\x5c\x9f\x06\x39\x27\xe7\xa4\x4e\x8f\x6c\xb1\xa5\x90\x64\xd0\x67\xe3\x93\x6d\x95\x40\x9e\xd1\x49\x45\xc1\xa4\x4e\x77\x18\x03\x8b\xa4\x35\xc9\xf1\xf9\xec\xec\xe3\x2f\x3b\x7b\xc6\xea\xb5\x48\xc8\xba\x12\x94\x25\xc2\x96\xee\xda\x74\xa0\x97\xe9\x31\x8e\xb2\x75\xf5\x95\x6d\xa9\x56\xc5\x82\xac\x22\x4f\xae\x6e\x57\x2f\x22\x66\x4d\x8f\xab\xfb\x3d\x33\xc5\x42\x0a\x97\x51\x52\xbb\x77\xdc\x09\xcf\x93\x7d\xb7\x05\xf0\xb2\xb0\x54\xb3\x02\x7b\xa0\x05\xc3\x24\x29\xb5\x88\x7f\x3e\x3d\x4a\x17\x0a\x25\xb3\x54\x59\x24\x74\x60\xd6\x52\x30\xaf\xc7\xfd\x1a\xf4\x62\x14\xc7\x33\xca\x69\x1c\x68\xdc\x31\x94\x4a\x4e\xab\xf9\x4f\x3f\xbd\xfd\x13\xe4\xa5\x68\x2f\x1d\xe3\x64\x7d\x98\x1d\xe4\x9a\xec\x58\x92\x77\xa4\xb8\xdd\x18\x7f\x10\xa3\x29\xf4\x1d\x53\x1d\x31\x56\x43\xfd\x32\x79\x81\x48\x2c\x05\x47\x4f\xae\x21\x76\xac\x43\xcd\x90\xe7\xc4\x12\xe5\x78\x86\x52\x92\x4a\xa9\x1f\x38\x5f\x01\xc2\x9a\x60\x1b\x7f\xf7\x6a\x7e\x3b\x99\xbd\xbb\xf8\xf5\xfe\xfc\xfa\xee\xfe\xe6\xf6\xc3\xa7\x8b\xf3\xd9\xed\x0f\xff\xb7\x80\x2d\xdd\xf8\x0d\x2b\xff\xe2\xbf\xbf\x7d\xfd\x86\x9d\xb2\xd3\xf2\xf5\x6b\xf0\x29\x47\x21\xc7\xdf\xbd\xba\x9c\xcd\xef\x66\xd7\x65\xb5\xbf\x9f\x5d\x4d\x2e\x2e\xbf\x4a\x5b\xe7\xb5\xc5\x94\xfa\xd5\x23\x27\xf6\xbb\xd3\x6a\x8b\x39\x82\x1b\x49\xe8\x08\x94\xf6\x54\xb5\x2a\x1f\x1a\x9d\x70\x65\x2b\x73\x1e\x53\xa1\x52\xb8\x24\xff\x37\x07\xb3\x4a\x44\x59\x76\xc9\xb2\x0e\xc8\x07\xc5\x09\x36\xba\x08\xb3\x6f\x00\x50\xa9\x83\x7a\xac\x7e\x5d\xae\xbb\x4c\x17\x32\x01\x4b\xb9\x5e\xd7\x72\x1e\x32\x2d\x09\xa4\x50\x04\x28\xbd\x4e\x43\xbd\xdc\x01\x7d\xc1\x79\x39\x56\x3a\x55\x89\x16\x9f\x94\x27\x8d\x6a\xed\xa3\xf5\xe9\x8f\x2c\x0c\x49\x5d\x36\x6d\xd3\x93\x6d\x29\xdc\x0a\x35\xda\xfa\x9d\x3a\x5f\xf5\x8a\x80\xda\x69\xd7\xdb\xa6\x72\xa3\xad\x8f\x61\x27\x71\x5a\x06\x77\x98\x63\x37\xbb\x2b\x16\x4c\x72\xa1\x8e\x09\xe9\x88\x21\xb5\xee\x2b\xd9\x8b\x9c\x0e\x58\x39\xb7\xbc\xb3\x3a\xdf\x9d\x6a\xaa\x2b\xd5\xaf\xb4\xa9\x07\xf0\xee\x53\x81\xee\x4e\x06\xdd\x67\x45\x9b\x63\x32\x1b\xad\x9e\x4b\xc0\x6f\xa0\xd8\x51\xb1\xf5\x30\x27\x5c\x98\xf4\x12\x10\xaa\x8c\xf5\x61\xb8\x61\x09\x1e\x0c\xbf\x14\x69\x61\xcb\x6b\xc7\x10\x96\xda\x96\xdb\xf3\x2a\x93\x60\x72\x73\xd1\x3b\xe4\xc1\x81\xed\x1b\x9c\xf4\xcb\xb2\x47\xd0\x58\x20\x9a\x7e\xbc\x9b\x7f\xb8\xba\xf8\xf7\x2c\x86\xf7\x64\x43\x22\x86\xff\x8a\x28\x71\xe0\x35\x2c\x08\xb4\x22\xc8\xb5\x25\x48\x36\x0a\x73\xc1\x61\x68\x42\xfb\x1f\x56\x39\x9b\xe3\x8a\xdc\xd6\x14\xb8\x90\x04\x5e\x77\x04\x55\xe3\x49\x69\xaf\xf3\xeb\x3b\xf0\x7a\x45\x6d\x4c\xaf\xb5\x2c\x72\xba\x0a\xa3\xda\x33\x39\xf6\xdc\x30\x02\x90\x07\xea\x1b\xf4\x59\x0c\x07\xc6\x95\x3d\xee\x50\xe7\xbe\xc4\xdf\xa1\xa9\x74\x3a\x34\x1c\xee\x88\xe2\xcd\x55\xae\xeb\xa6\x43\xe4\xc7\xf5\xa2\xdc\xf8\xcd\xb9\xb0\x31\x3c\x3e\x7d\xf1\xee\xfe\x17\x0f\xf1\x00\xa8\x94\xf6\x65\x74\xd7\xfb\x23\x78\xa7\x2d\x27\xc0\x50\xa6\xad\x87\xf9\xfc\x12\x9c\xae\x3c\x1e\xdc\x68\x89\x6b\x9b\x00\xcf\x50\xa5\xe4\x80\xa3\x02\x63\xb5\xc1\x14\x3d\xc1\x12\xc3\x3d\xbc\xc4\xa1\x3f\xaa\x3b\x5f\x94\x28\xc7\x50\x9a\x0c\x3b\xd3\x1e\x13\xfa\xc4\x7b\x19\xc3\xf0\xed\xa9\x1b\xd6\x72\xa7\x96\x02\x06\x76\xc5\x34\x09\xe7\xdb\x84\x83\xef\x21\x41\x97\x2d\x34\xda\xe4\xcf\x49\xca\xb4\xf3\x95\xad\x86\x2f\xbd\xd3\x0e\xb7\xf7\x9c\x6d\x37\xe8\xf5\x01\xd3\x16\xfe\x7e\xc9\x37\x9d\x1a\xbf\x7b\x39\xda\xbf\x09\x6d\x4c\xa8\xdb\x1a\x93\x33\x94\xa8\x38\xd9\x5e\x30\xf4\x7e\x9e\x6a\xaf\x79\x17\xd5\xd8\x3c\x95\xe8\x5c\x1b\x23\xf0\x4c\x90\xf4\x1c\xde\xfd\xfd\x6a\xcf\x70\xc2\x45\x09\x2d\xb1\x90\x3e\x2a\xb7\x4b\x03\x16\xd4\x1a\x25\xf4\x25\xab\xa5\x24\xbb\x15\x51\xf2\x55\x90\x51\xbb\x3d\xf8\x5f\x00\x00\x00\xff\xff\x89\x0a\x61\x37\x62\x14\x00\x00") 146 | 147 | func coreWorkshopInfraTemplatesTraefik2YamlBytes() ([]byte, error) { 148 | return bindataRead( 149 | _coreWorkshopInfraTemplatesTraefik2Yaml, 150 | "core-workshop-infra/templates/traefik-2.yaml", 151 | ) 152 | } 153 | 154 | func coreWorkshopInfraTemplatesTraefik2Yaml() (*asset, error) { 155 | bytes, err := coreWorkshopInfraTemplatesTraefik2YamlBytes() 156 | if err != nil { 157 | return nil, err 158 | } 159 | 160 | info := bindataFileInfo{name: "core-workshop-infra/templates/traefik-2.yaml", size: 5218, mode: os.FileMode(436), modTime: time.Unix(1577836800, 0)} 161 | a := &asset{bytes: bytes, info: info} 162 | return a, nil 163 | } 164 | 165 | var _podinfoExternalChart = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xca\x28\x29\x29\x28\xb6\xd2\xd7\x2f\x2e\x49\x4d\x4b\xcc\x2b\x28\xca\x4f\x49\xcc\xd3\x4b\xcf\x2c\xc9\x28\x4d\xd2\xcb\xcc\xd7\x2f\xc8\x4f\xc9\xcc\x4b\x83\xd3\x80\x00\x00\x00\xff\xff\xb0\xe7\xfd\xe6\x2e\x00\x00\x00") 166 | 167 | func podinfoExternalChartBytes() ([]byte, error) { 168 | return bindataRead( 169 | _podinfoExternalChart, 170 | "podinfo/external-chart", 171 | ) 172 | } 173 | 174 | func podinfoExternalChart() (*asset, error) { 175 | bytes, err := podinfoExternalChartBytes() 176 | if err != nil { 177 | return nil, err 178 | } 179 | 180 | info := bindataFileInfo{name: "podinfo/external-chart", size: 46, mode: os.FileMode(436), modTime: time.Unix(1577836800, 0)} 181 | a := &asset{bytes: bytes, info: info} 182 | return a, nil 183 | } 184 | 185 | var _podinfoValuesOverrideYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x1c\x8e\x4d\x8b\x1a\x41\x10\x86\xef\xfd\x2b\xde\xb4\x67\x5b\x61\x22\x42\xdf\x82\x26\x24\x60\x5c\x58\xdd\xf3\x52\xf6\x94\x33\x83\x6d\xd7\xd0\x55\xae\x0b\xe2\x7f\x5f\x66\x6f\x0f\x0f\xbc\x1f\x95\xc7\x3c\x24\xda\xc8\xad\x58\x44\xe3\xb2\x74\x3b\xfe\xe0\x1c\x31\x94\xb3\x38\x77\x1b\xa2\x03\x92\x64\xa9\x11\x7e\xd6\xfc\x5c\xad\xd7\xc9\x3b\xe0\xca\xaa\xd4\x71\x84\xff\xcb\x39\x0b\xee\x52\x73\xfb\xc3\x3b\x37\x5c\x27\xed\x00\xa3\x2e\x62\x15\x96\xa1\x71\x6e\x28\x5d\x65\xd5\x49\x73\xa1\x53\xe6\x36\xe2\x4c\x59\xd9\xcd\x80\x91\xac\x8f\x58\x4c\xd8\x8b\x9a\xc6\x89\xe6\xf0\xa3\xb4\xd3\x89\xf0\x78\x20\xdc\xa5\x5e\xb4\x97\x31\x59\x0e\x9b\xdd\xdb\xe1\xf8\xfb\xf5\x7d\xfb\xf2\xff\xd7\xbf\x3d\x9e\x4f\x3f\x05\xa8\x14\x31\xb2\x41\xca\xf7\x0c\x30\xc3\x1f\xa9\x89\x41\xd0\x5e\xaa\xe1\x78\xdc\x41\x05\xd6\x93\x61\xbb\x3f\xa0\x72\x92\xda\x22\xf5\x54\x3a\x56\x24\x2a\x18\xab\x8c\xd4\x91\x31\xce\xa4\xc6\x75\xea\x05\xf8\xd3\xb8\x16\xca\xf3\xb6\x68\xa0\x3c\xf6\x14\x2e\xb7\x13\xd7\xc2\xc6\x1a\x06\x59\x98\xe5\x08\xdf\x2c\xd5\xbb\xaf\x00\x00\x00\xff\xff\x19\x61\xfc\xea\x50\x01\x00\x00") 186 | 187 | func podinfoValuesOverrideYamlBytes() ([]byte, error) { 188 | return bindataRead( 189 | _podinfoValuesOverrideYaml, 190 | "podinfo/values-override.yaml", 191 | ) 192 | } 193 | 194 | func podinfoValuesOverrideYaml() (*asset, error) { 195 | bytes, err := podinfoValuesOverrideYamlBytes() 196 | if err != nil { 197 | return nil, err 198 | } 199 | 200 | info := bindataFileInfo{name: "podinfo/values-override.yaml", size: 336, mode: os.FileMode(436), modTime: time.Unix(1577836800, 0)} 201 | a := &asset{bytes: bytes, info: info} 202 | return a, nil 203 | } 204 | 205 | // Asset loads and returns the asset for the given name. 206 | // It returns an error if the asset could not be found or 207 | // could not be loaded. 208 | func Asset(name string) ([]byte, error) { 209 | cannonicalName := strings.Replace(name, "\\", "/", -1) 210 | if f, ok := _bindata[cannonicalName]; ok { 211 | a, err := f() 212 | if err != nil { 213 | return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) 214 | } 215 | return a.bytes, nil 216 | } 217 | return nil, fmt.Errorf("Asset %s not found", name) 218 | } 219 | 220 | // MustAsset is like Asset but panics when Asset would return an error. 221 | // It simplifies safe initialization of global variables. 222 | func MustAsset(name string) []byte { 223 | a, err := Asset(name) 224 | if err != nil { 225 | panic("asset: Asset(" + name + "): " + err.Error()) 226 | } 227 | 228 | return a 229 | } 230 | 231 | // AssetInfo loads and returns the asset info for the given name. 232 | // It returns an error if the asset could not be found or 233 | // could not be loaded. 234 | func AssetInfo(name string) (os.FileInfo, error) { 235 | cannonicalName := strings.Replace(name, "\\", "/", -1) 236 | if f, ok := _bindata[cannonicalName]; ok { 237 | a, err := f() 238 | if err != nil { 239 | return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) 240 | } 241 | return a.info, nil 242 | } 243 | return nil, fmt.Errorf("AssetInfo %s not found", name) 244 | } 245 | 246 | // AssetNames returns the names of the assets. 247 | func AssetNames() []string { 248 | names := make([]string, 0, len(_bindata)) 249 | for name := range _bindata { 250 | names = append(names, name) 251 | } 252 | return names 253 | } 254 | 255 | // _bindata is a table, holding each asset generator, mapped to its name. 256 | var _bindata = map[string]func() (*asset, error){ 257 | "core-workshop-infra/Chart.yaml": coreWorkshopInfraChartYaml, 258 | "core-workshop-infra/templates/code-server.yaml": coreWorkshopInfraTemplatesCodeServerYaml, 259 | "core-workshop-infra/templates/external-dns.yaml": coreWorkshopInfraTemplatesExternalDnsYaml, 260 | "core-workshop-infra/templates/traefik-2.yaml": coreWorkshopInfraTemplatesTraefik2Yaml, 261 | "podinfo/external-chart": podinfoExternalChart, 262 | "podinfo/values-override.yaml": podinfoValuesOverrideYaml, 263 | } 264 | 265 | // AssetDir returns the file names below a certain 266 | // directory embedded in the file by go-bindata. 267 | // For example if you run go-bindata on data/... and data contains the 268 | // following hierarchy: 269 | // data/ 270 | // foo.txt 271 | // img/ 272 | // a.png 273 | // b.png 274 | // then AssetDir("data") would return []string{"foo.txt", "img"} 275 | // AssetDir("data/img") would return []string{"a.png", "b.png"} 276 | // AssetDir("foo.txt") and AssetDir("notexist") would return an error 277 | // AssetDir("") will return []string{"data"}. 278 | func AssetDir(name string) ([]string, error) { 279 | node := _bintree 280 | if len(name) != 0 { 281 | cannonicalName := strings.Replace(name, "\\", "/", -1) 282 | pathList := strings.Split(cannonicalName, "/") 283 | for _, p := range pathList { 284 | node = node.Children[p] 285 | if node == nil { 286 | return nil, fmt.Errorf("Asset %s not found", name) 287 | } 288 | } 289 | } 290 | if node.Func != nil { 291 | return nil, fmt.Errorf("Asset %s not found", name) 292 | } 293 | rv := make([]string, 0, len(node.Children)) 294 | for childName := range node.Children { 295 | rv = append(rv, childName) 296 | } 297 | return rv, nil 298 | } 299 | 300 | type bintree struct { 301 | Func func() (*asset, error) 302 | Children map[string]*bintree 303 | } 304 | 305 | var _bintree = &bintree{nil, map[string]*bintree{ 306 | "core-workshop-infra": {nil, map[string]*bintree{ 307 | "Chart.yaml": {coreWorkshopInfraChartYaml, map[string]*bintree{}}, 308 | "templates": {nil, map[string]*bintree{ 309 | "code-server.yaml": {coreWorkshopInfraTemplatesCodeServerYaml, map[string]*bintree{}}, 310 | "external-dns.yaml": {coreWorkshopInfraTemplatesExternalDnsYaml, map[string]*bintree{}}, 311 | "traefik-2.yaml": {coreWorkshopInfraTemplatesTraefik2Yaml, map[string]*bintree{}}, 312 | }}, 313 | }}, 314 | "podinfo": {nil, map[string]*bintree{ 315 | "external-chart": {podinfoExternalChart, map[string]*bintree{}}, 316 | "values-override.yaml": {podinfoValuesOverrideYaml, map[string]*bintree{}}, 317 | }}, 318 | }} 319 | 320 | // RestoreAsset restores an asset under the given directory 321 | func RestoreAsset(dir, name string) error { 322 | data, err := Asset(name) 323 | if err != nil { 324 | return err 325 | } 326 | info, err := AssetInfo(name) 327 | if err != nil { 328 | return err 329 | } 330 | err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) 331 | if err != nil { 332 | return err 333 | } 334 | err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) 335 | if err != nil { 336 | return err 337 | } 338 | err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) 339 | if err != nil { 340 | return err 341 | } 342 | return nil 343 | } 344 | 345 | // RestoreAssets restores an asset under the given directory recursively 346 | func RestoreAssets(dir, name string) error { 347 | children, err := AssetDir(name) 348 | // File 349 | if err != nil { 350 | return RestoreAsset(dir, name) 351 | } 352 | // Dir 353 | for _, child := range children { 354 | err = RestoreAssets(dir, filepath.Join(name, child)) 355 | if err != nil { 356 | return err 357 | } 358 | } 359 | return nil 360 | } 361 | 362 | func _filePath(dir, name string) string { 363 | cannonicalName := strings.Replace(name, "\\", "/", -1) 364 | return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) 365 | } 366 | -------------------------------------------------------------------------------- /pkg/config/keyval/keyval.go: -------------------------------------------------------------------------------- 1 | package keyval 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 7 | ) 8 | 9 | var externalDNSMap = map[string]string{ 10 | "digitalocean": "digitalocean", 11 | "gke": "google", 12 | "scaleway": "scaleway", 13 | "aws": "aws", 14 | "cloudflare": "cloudflare", 15 | } 16 | 17 | var traefikDNSMap = map[string]string{ 18 | "digitalocean": "digitalocean", 19 | "gke": "gcloud", 20 | "scaleway": "scaleway", 21 | "aws": "route53", 22 | "cloudflare": "cloudflare", 23 | } 24 | 25 | func FromClusterInfo(cfg *config.ClusterInfo) *Parameters { 26 | return &Parameters{ 27 | WorkshopctlParameters: WorkshopctlParameters{ 28 | CloudProvider: cfg.CloudProvider.Name, 29 | CloudProviderServiceAccount: cfg.CloudProvider.ServiceAccountContent, 30 | CloudProviderSpecific: cfg.CloudProvider.ProviderSpecific, 31 | 32 | ExternalDNSProvider: externalDNSMap[cfg.DNSProvider.Name], 33 | TraefikDNSProvider: traefikDNSMap[cfg.DNSProvider.Name], 34 | DNSProviderServiceAccount: cfg.DNSProvider.ServiceAccountContent, 35 | DNSProviderSpecific: cfg.DNSProvider.ProviderSpecific, 36 | 37 | RootDomain: cfg.RootDomain, 38 | ClusterDomain: cfg.Domain(), 39 | 40 | TutorialsRepo: cfg.Tutorials.Repo, 41 | TutorialsDir: cfg.Tutorials.Dir, 42 | 43 | LetsEncryptEmail: cfg.LetsEncryptEmail, 44 | 45 | ClusterPassword: cfg.Password, 46 | ClusterBasicAuth: cfg.BasicAuth(), 47 | }, 48 | } 49 | } 50 | 51 | type Parameters struct { 52 | WorkshopctlParameters `json:"workshopctl"` 53 | } 54 | 55 | type WorkshopctlParameters struct { 56 | CloudProvider string `json:"CLOUD_PROVIDER"` 57 | CloudProviderServiceAccount string `json:"CLOUD_PROVIDER_SERVICEACCOUNT"` 58 | CloudProviderSpecific map[string]string `json:"-"` 59 | 60 | ExternalDNSProvider string `json:"EXTERNAL_DNS_PROVIDER"` 61 | TraefikDNSProvider string `json:"TRAEFIK_DNS_PROVIDER"` 62 | DNSProviderServiceAccount string `json:"DNS_PROVIDER_SERVICEACCOUNT"` 63 | DNSProviderSpecific map[string]string `json:"-"` 64 | 65 | RootDomain string `json:"ROOT_DOMAIN"` 66 | ClusterDomain string `json:"CLUSTER_DOMAIN"` 67 | 68 | TutorialsRepo string `json:"TUTORIALS_REPO"` 69 | TutorialsDir string `json:"TUTORIALS_DIR"` 70 | 71 | LetsEncryptEmail string `json:"LETSENCRYPT_EMAIL"` 72 | 73 | ClusterPassword string `json:"CLUSTER_PASSWORD"` 74 | ClusterBasicAuth string `json:"CLUSTER_BASIC_AUTH_BCRYPT"` 75 | } 76 | 77 | func (p *Parameters) ToMap() map[string]string { 78 | b, _ := json.Marshal(p.WorkshopctlParameters) 79 | m := map[string]string{} 80 | _ = json.Unmarshal(b, &m) 81 | for k, v := range p.CloudProviderSpecific { 82 | m[k] = v 83 | } 84 | // TODO: handle conflicts? 85 | for k, v := range p.DNSProviderSpecific { 86 | m[k] = v 87 | } 88 | return m 89 | } 90 | 91 | func (p *Parameters) ToMapWithWorkshopctl() map[string]interface{} { 92 | return map[string]interface{}{ 93 | "workshopctl": p.ToMap(), 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /pkg/config/types.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "io/ioutil" 8 | "path/filepath" 9 | "strings" 10 | "sync" 11 | 12 | "github.com/cloud-native-nordics/workshopctl/pkg/constants" 13 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 14 | "github.com/fluxcd/go-git-providers/gitprovider" 15 | "github.com/sirupsen/logrus" 16 | giturls "github.com/whilp/git-urls" 17 | "golang.org/x/crypto/bcrypt" 18 | "golang.org/x/oauth2" 19 | ) 20 | 21 | type Config struct { 22 | // The prefix to use for all identifying names/tags/etc. 23 | // This allows an user to have multiple workshop environments at once in the same provider 24 | Name string `json:"name"` 25 | 26 | // CloudProvider specifies what cloud provider to use and how to authenticate with it. 27 | CloudProvider Provider `json:"cloudProvider"` 28 | // DNSProvider specifies what dns provider to use and how to authenticate with it. 29 | DNSProvider Provider `json:"dnsProvider"` 30 | 31 | RootDomain string `json:"rootDomain"` 32 | // How many clusters should be created? 33 | Clusters uint16 `json:"clusters"` 34 | // Where to store the manifests for collaboration? 35 | Git Git `json:"git"` 36 | 37 | // If this is specified you can use "sealed secrets" 38 | // TODO: Implement this with the help of Mozilla SOPS 39 | // GPGKeyID string `json:"gpgKeyID"` 40 | 41 | // Whom to contact by Let's Encrypt 42 | LetsEncryptEmail string `json:"letsEncryptEmail"` 43 | 44 | Tutorials Tutorials `json:"tutorials"` 45 | 46 | ClusterLogin ClusterLogin `json:"clusterLogin"` 47 | 48 | NodeGroups []NodeGroup `json:"nodeGroups"` 49 | } 50 | 51 | func (c *Config) Validate() error { 52 | if c.Name == "" { 53 | return fmt.Errorf("name must not be empty") 54 | } 55 | if c.CloudProvider.ServiceAccountPath == "" { 56 | return fmt.Errorf("must specify cloud provider SA path") 57 | } 58 | if c.DNSProvider.ServiceAccountPath == "" { 59 | return fmt.Errorf("must specify DNS provider SA path") 60 | } 61 | if c.RootDomain == "" { 62 | return fmt.Errorf("root domain must not be empty") 63 | } 64 | if c.LetsEncryptEmail == "" { 65 | return fmt.Errorf("Let's Encrypt email must not be empty") 66 | } 67 | if c.Git.Repo == "" { 68 | return fmt.Errorf("must specify backing git repo") 69 | } 70 | if c.Git.ServiceAccountPath == "" { 71 | return fmt.Errorf("must specify git provider token") 72 | } 73 | return nil 74 | } 75 | 76 | func (c *Config) Complete(ctx context.Context) error { 77 | // First validate the struct 78 | if err := c.Validate(); err != nil { 79 | return err 80 | } 81 | if c.CloudProvider.Name == "" { 82 | c.CloudProvider.Name = "digitalocean" 83 | } 84 | if c.DNSProvider.Name == "" { 85 | c.DNSProvider.Name = "digitalocean" 86 | } 87 | if c.Clusters == 0 { 88 | c.Clusters = 1 89 | } 90 | if c.ClusterLogin.Username == "" { 91 | c.ClusterLogin.Username = "workshopctl" 92 | } 93 | if c.ClusterLogin.CommonPassword == "" { 94 | pass, err := util.RandomSHA(4) 95 | if err != nil { 96 | return err 97 | } 98 | // TODO: This maybe shouldn't "leak" to the config file when marshalling? 99 | c.ClusterLogin.CommonPassword = pass 100 | } 101 | if c.CloudProvider.ServiceAccountPath != "" { 102 | saPath := util.JoinPaths(ctx, c.CloudProvider.ServiceAccountPath) 103 | if err := readFileInto(saPath, &c.CloudProvider.ServiceAccountContent); err != nil { 104 | return err 105 | } 106 | } 107 | if c.DNSProvider.ServiceAccountPath != "" { 108 | saPath := util.JoinPaths(ctx, c.DNSProvider.ServiceAccountPath) 109 | if err := readFileInto(saPath, &c.DNSProvider.ServiceAccountContent); err != nil { 110 | return err 111 | } 112 | } 113 | if c.Git.ServiceAccountPath != "" { 114 | saPath := util.JoinPaths(ctx, c.Git.ServiceAccountPath) 115 | if err := readFileInto(saPath, &c.Git.ServiceAccountContent); err != nil { 116 | return err 117 | } 118 | } 119 | if c.NodeGroups == nil { 120 | c.NodeGroups = []NodeGroup{ 121 | { 122 | Instances: 1, 123 | NodeClaim: NodeClaim{ 124 | CPU: 2, 125 | RAM: 4, 126 | Dedicated: false, 127 | }, 128 | }, 129 | } 130 | } 131 | // Parse the git URL 132 | // TODO: This should live in go-git-providers 133 | u, err := giturls.Parse(c.Git.Repo) 134 | if err != nil { 135 | return err 136 | } 137 | paths := strings.Split(u.Path, "/") 138 | c.Git.RepoStruct = gitprovider.UserRepositoryRef{ 139 | UserRef: gitprovider.UserRef{ 140 | Domain: u.Host, 141 | UserLogin: paths[0], 142 | }, 143 | RepositoryName: strings.TrimSuffix(paths[1], ".git"), 144 | } 145 | return nil 146 | } 147 | 148 | type ServiceAccount struct { 149 | // ServiceAccountPath specifies the file path to the service account 150 | ServiceAccountPath string `json:"serviceAccountPath"` 151 | // The contents of ServiceAccountPath, read at runtime and never marshalled. 152 | ServiceAccountContent string `json:"-"` 153 | } 154 | 155 | // If the ServiceAccount is an oauth2 token, this helper method might be useful for 156 | // the implementing provider 157 | func (sa ServiceAccount) TokenSource() oauth2.TokenSource { 158 | return oauth2.StaticTokenSource(&oauth2.Token{AccessToken: sa.ServiceAccountContent}) 159 | } 160 | 161 | type Provider struct { 162 | // Name of the provider. For now, only "digitalocean" is supported. 163 | Name string `json:"name"` 164 | // The ServiceAccount struct is embedded and inlined into the provider 165 | ServiceAccount `json:",inline"` 166 | // Provider-specific data 167 | ProviderSpecific map[string]string `json:"providerSpecific,omitempty"` 168 | } 169 | 170 | type NodeGroup struct { 171 | Instances uint16 `json:"instances"` 172 | NodeClaim NodeClaim `json:"nodeClaim"` 173 | } 174 | 175 | type NodeClaim struct { 176 | CPU uint16 `json:"cpus"` 177 | RAM uint16 `json:"ram"` 178 | // Refers to if the CPU is shared with other tenants, or dedicated for this VM 179 | Dedicated bool `json:"dedicated"` 180 | } 181 | 182 | type Git struct { 183 | // Repo specifies where the "infra" git repo should be 184 | Repo string `json:"repo"` 185 | RepoStruct gitprovider.UserRepositoryRef `json:"-"` 186 | 187 | // The ServiceAccount struct is embedded and inlined into this struct 188 | ServiceAccount `json:",inline"` 189 | } 190 | 191 | type ClusterLogin struct { 192 | // Username for basic auth logins. Defaults to workshopctl. 193 | Username string `json:"username"` 194 | // CommonPassword sets the same password for VS code and all basic auth 195 | // for all clusters. If unset, a random password will be generated. 196 | CommonPassword string `json:"commonPassword"` 197 | // UniquePasswords tells whether every cluster should have its own password. 198 | // By default false, which means all clusters share CommonPassword. If true, 199 | // CommonPassword will be ignored and all clusters' passwords will be generated. 200 | UniquePasswords bool `json:"uniquePasswords"` 201 | } 202 | 203 | type Tutorials struct { 204 | Repo string `json:"repo"` 205 | Dir string `json:"dir"` 206 | } 207 | 208 | type ClusterInfo struct { 209 | *Config 210 | Index ClusterNumber 211 | Password string 212 | } 213 | 214 | func NewClusterInfo(ctx context.Context, cfg *Config, i ClusterNumber) *ClusterInfo { 215 | pass := cfg.ClusterLogin.CommonPassword 216 | if cfg.ClusterLogin.UniquePasswords { 217 | var err error 218 | pass, err = util.RandomSHA(4) // TODO: constant 219 | if err != nil { 220 | panic(err) 221 | } 222 | // Warn about possible misconfigurations 223 | if len(cfg.ClusterLogin.CommonPassword) != 0 { 224 | util.Logger(ctx).Warnf("You have specified both .ClusterLogin.UniquePasswords and .ClusterLogin.CommonPassword. UniquePasswords has higher priority and hence CommonPassword is ignored.") 225 | } 226 | } 227 | return &ClusterInfo{cfg, i, pass} 228 | } 229 | 230 | func (c *ClusterInfo) Domain() string { 231 | return c.Index.Domain(c.RootDomain) 232 | } 233 | 234 | func (c *ClusterInfo) BasicAuth() string { 235 | hash, err := bcrypt.GenerateFromPassword([]byte(c.Password), bcrypt.DefaultCost) 236 | if err != nil { 237 | panic(err) 238 | } 239 | return fmt.Sprintf("%s:%s", c.ClusterLogin.Username, hash) 240 | } 241 | 242 | var _ fmt.Stringer = ClusterNumber(0) 243 | 244 | type ClusterNumber uint16 245 | 246 | func (n ClusterNumber) String() string { 247 | return fmt.Sprintf("%02d", n) 248 | } 249 | 250 | func (n ClusterNumber) Subdomain() string { 251 | return fmt.Sprintf("cluster-%s", n) 252 | } 253 | 254 | func (n ClusterNumber) Domain(rootDomain string) string { 255 | return fmt.Sprintf("%s.%s", n.Subdomain(), rootDomain) 256 | } 257 | 258 | func (n ClusterNumber) ClusterDir() string { 259 | return filepath.Join(constants.ClustersDir, n.String()) 260 | } 261 | 262 | func (n ClusterNumber) KubeConfigPath() string { 263 | return filepath.Join(n.ClusterDir(), constants.KubeconfigFile) 264 | } 265 | 266 | func ForCluster(ctx context.Context, n uint16, cfg *Config, fn func(context.Context, *ClusterInfo) error) error { 267 | logrus.Debugf("Running function for all %d clusters", n) 268 | 269 | wg := &sync.WaitGroup{} 270 | wg.Add(int(n)) 271 | foundErr := false 272 | 273 | // mutex shared by cluster threads when they need to coordinate 274 | // TODO: This is limited to only one lock operation, consider supporting more in the future 275 | mux := &sync.Mutex{} 276 | for i := ClusterNumber(1); i <= ClusterNumber(n); i++ { 277 | go func(j ClusterNumber) { 278 | clusterCtx := util.WithClusterNumber(ctx, uint16(j)) 279 | clusterCtx = util.WithMutex(clusterCtx, mux) 280 | logger := util.Logger(clusterCtx) 281 | logger.Tracef("ForCluster goroutine starting...") 282 | clusterInfo := NewClusterInfo(clusterCtx, cfg, j) 283 | if err := fn(clusterCtx, clusterInfo); err != nil { 284 | logger.Error(err) 285 | foundErr = true 286 | } 287 | logger.Tracef("ForCluster goroutine is done") 288 | wg.Done() 289 | }(i) 290 | } 291 | wg.Wait() 292 | if foundErr { 293 | return fmt.Errorf("an error occured previously") 294 | } 295 | return nil 296 | } 297 | 298 | func readFileInto(file string, target *string) error { 299 | b, err := ioutil.ReadFile(file) 300 | if err != nil { 301 | return err 302 | } 303 | *target = string(bytes.TrimSpace(b)) 304 | return nil 305 | } 306 | -------------------------------------------------------------------------------- /pkg/constants/constants.go: -------------------------------------------------------------------------------- 1 | package constants 2 | 3 | import "fmt" 4 | 5 | const ( 6 | // Top-level directories, i.e. ./ 7 | ChartsDir = "charts" 8 | ClustersDir = "clusters" 9 | CacheDir = ".cache" 10 | 11 | // Under ./{ChartsDir}// 12 | // Helm-specific 13 | TemplatesDir = "templates" 14 | ChartYAML = "Chart.yaml" 15 | // workshopctl "extensions" 16 | NamespaceFile = "namespace" 17 | ExternalChartFile = "external-chart" 18 | ValuesOverrideYAML = "values-override.yaml" 19 | // jq "extensions" 20 | PipeJS = "pipe.js" 21 | ValuesJS = "values.js" 22 | 23 | // Under ./{ClustersDir}// 24 | KubeconfigFile = ".kubeconfig" 25 | 26 | // The default namespace in k8s is called "default" 27 | DefaultNamespace = "default" 28 | WorkshopctlNamespace = "workshopctl" 29 | 30 | WorkshopctlSecret = "workshopctl" 31 | ) 32 | 33 | func ClusterName(namePrefix string, index fmt.Stringer) string { 34 | return fmt.Sprintf("workshopctl-%s-%s", namePrefix, index) 35 | } 36 | 37 | // These files will be copied from ./charts// to ./.cache// 38 | var KnownChartFiles = []string{ 39 | // Helm "classic" files 40 | TemplatesDir, 41 | ChartYAML, 42 | // TODO: Include the "classic", non-templated, base values.yaml here too. 43 | 44 | // workshopctl-specific files 45 | NamespaceFile, 46 | ExternalChartFile, 47 | ValuesOverrideYAML, 48 | // jq-specific files 49 | PipeJS, 50 | ValuesJS, 51 | } 52 | -------------------------------------------------------------------------------- /pkg/gen/gen.go: -------------------------------------------------------------------------------- 1 | package gen 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "net/url" 10 | "os" 11 | "path/filepath" 12 | "strings" 13 | 14 | "github.com/cloud-native-nordics/workshopctl/pkg/charts" 15 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 16 | "github.com/cloud-native-nordics/workshopctl/pkg/config/keyval" 17 | "github.com/cloud-native-nordics/workshopctl/pkg/constants" 18 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 19 | log "github.com/sirupsen/logrus" 20 | kyaml "sigs.k8s.io/kustomize/kyaml/yaml" 21 | "sigs.k8s.io/yaml" 22 | ) 23 | 24 | type ChartData struct { 25 | Name string 26 | CacheDir string 27 | CopiedFiles map[string]string 28 | } 29 | 30 | type Processor interface { 31 | Process(ctx context.Context, cd *ChartData, p *keyval.Parameters, r io.Reader, w io.Writer) error 32 | } 33 | 34 | func SetupInternalChartCache(ctx context.Context) ([]*ChartData, error) { 35 | // Restore built-in charts/* to .cache/* 36 | if err := charts.RestoreAssets(util.JoinPaths(ctx, constants.CacheDir), ""); err != nil { 37 | return nil, err 38 | } 39 | // List internal chart names 40 | charts, err := charts.AssetDir("") 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | // Now that the internal files are extracted to disk, 46 | // process them exactly as normal "external" charts 47 | chartCache := make([]*ChartData, 0, len(charts)) 48 | for _, chart := range charts { 49 | cd, err := SetupExternalChartCache(ctx, chart) 50 | if err != nil { 51 | return nil, err 52 | } 53 | chartCache = append(chartCache, cd) 54 | } 55 | return chartCache, nil 56 | } 57 | 58 | func SetupExternalChartCache(ctx context.Context, chartName string) (*ChartData, error) { 59 | cd := &ChartData{ 60 | CacheDir: util.JoinPaths(ctx, constants.CacheDir, chartName), 61 | Name: chartName, 62 | CopiedFiles: map[string]string{}, 63 | } 64 | 65 | // Create the .cache directory for the chart 66 | if err := os.MkdirAll(cd.CacheDir, 0755); err != nil { 67 | return nil, err 68 | } 69 | 70 | chartDir := util.JoinPaths(ctx, constants.ChartsDir, chartName) 71 | for _, f := range constants.KnownChartFiles { 72 | from := filepath.Join(chartDir, f) 73 | to := filepath.Join(cd.CacheDir, f) 74 | 75 | fromExists, _ := util.PathExists(from) 76 | toExists, _ := util.PathExists(to) 77 | if !fromExists && !toExists { 78 | continue // nothing to do 79 | } 80 | if fromExists { // if from exists, always copy to make sure to is up-to-date 81 | if err := util.Copy(from, to); err != nil { 82 | return nil, err 83 | } 84 | } 85 | // if to exists, but not from, just proceed and register to 86 | cd.CopiedFiles[f] = to 87 | } 88 | 89 | // Download the chart if it's explicitely said to be external 90 | if externalChartFile, ok := cd.CopiedFiles[constants.ExternalChartFile]; ok { 91 | if err := downloadChart(ctx, externalChartFile); err != nil { 92 | return nil, err 93 | } 94 | } 95 | 96 | return cd, nil 97 | } 98 | 99 | func downloadChart(ctx context.Context, externalChartFile string) error { 100 | // Read contents of the external-chart file 101 | b, err := ioutil.ReadFile(externalChartFile) 102 | if err != nil { 103 | return err 104 | } 105 | externalChart := string(b) 106 | 107 | // Expecting something like: 108 | // "stable/kubernetes-dashboard" 109 | // "https://charts.fluxcd.io/flux" 110 | u, err := url.Parse(externalChart) 111 | if err != nil { 112 | return err 113 | } 114 | if len(u.Scheme) > 0 { 115 | // Remove the last path element from the URL; that's the name of the chart 116 | cname := filepath.Base(u.Path) 117 | u.Path = filepath.Dir(u.Path) 118 | // Replace dots with dashes in order to craft the name of the repo 119 | crepo := strings.ReplaceAll(u.Host, ".", "-") 120 | // The chart name is "${repo}/${name}" 121 | externalChart = filepath.Join(crepo, cname) 122 | 123 | // Make sure the repo is registered correctly 124 | out, _, err := util.Command(ctx, "helm", "repo", "list").Run() 125 | if err != nil { 126 | return err 127 | } 128 | // Only add the repo if it doesn't already exist 129 | if !strings.Contains(out, crepo) { 130 | log.Infof("Adding a new helm repo called %q pointing to %q", crepo, u.String()) 131 | _, _, err = util.Command(ctx, "helm", "repo", "add", crepo, u.String()).Run() 132 | if err != nil { 133 | return err 134 | } 135 | } 136 | } else { 137 | arr := strings.Split(externalChart, "/") 138 | if len(arr) != 2 { 139 | return fmt.Errorf("invalid format of %q: %q. Should be either {stable,test}/{name} or {repo-url}/{name}", constants.ExternalChartFile, externalChart) 140 | } 141 | } 142 | 143 | log.Infof("Found external chart to download %q", externalChart) 144 | // This extracts the chart to e.g. .cache/kubernetes-dashboard/{Chart.yaml,values.yaml,templates} 145 | cacheDir := util.JoinPaths(ctx, constants.CacheDir) 146 | tmpCacheDir := util.JoinPaths(ctx, cacheDir, "tmp") 147 | 148 | if exists, _ := util.PathExists(tmpCacheDir); exists { 149 | if err := os.RemoveAll(tmpCacheDir); err != nil { 150 | return err 151 | } 152 | } 153 | 154 | _, _, err = util.Command(ctx, "helm", "fetch", externalChart, "--untar", "--untardir", tmpCacheDir).Run() 155 | if err != nil { 156 | return err 157 | } 158 | return util.Copy(tmpCacheDir, cacheDir) 159 | } 160 | 161 | func GenerateChart(ctx context.Context, cd *ChartData, clusterInfo *config.ClusterInfo, valuesProcessors, chartProcessors []Processor) error { 162 | logger := util.Logger(ctx).WithField("chart", cd.Name) 163 | 164 | namespace := constants.DefaultNamespace 165 | if nsFile, ok := cd.CopiedFiles[constants.NamespaceFile]; ok { 166 | b, err := ioutil.ReadFile(nsFile) 167 | if err != nil { 168 | return err 169 | } 170 | namespace = string(b) 171 | } 172 | // 1. Read values.yaml, if exists, otherwise start with an empty buffer as the first io.Reader 173 | // 2. Attach the valuesYAMLProcessor{} values processor which adds the parameters as needed 174 | // 3. Invoke other values processors as needed in a chain 175 | // 4. Run "helm template -n %s workshopctl chart -f -" with values as stdin 176 | // 5. Invoke other chart processors, but always the \{\{ => {{ one 177 | // 6. Write output to ./clusters/001/.yaml 178 | 179 | processorChain := []Processor{ 180 | &valuesYAMLProcessor{}, 181 | } 182 | processorChain = append(processorChain, valuesProcessors...) 183 | processorChain = append(processorChain, []Processor{ 184 | &helmTemplateProcessor{namespace}, 185 | &nsProcessor{namespace}, 186 | &unescapeGoTmpls{}, 187 | }...) 188 | processorChain = append(processorChain, chartProcessors...) 189 | 190 | p := keyval.FromClusterInfo(clusterInfo) 191 | 192 | // If there is a ./.cache//values-override.yaml file, use that as the "beginning" of the processor chain 193 | var initialData []byte 194 | if valuesOverrideYAML, ok := cd.CopiedFiles[constants.ValuesOverrideYAML]; ok { 195 | var err error 196 | initialData, err = ioutil.ReadFile(valuesOverrideYAML) 197 | if err != nil { 198 | return err 199 | } 200 | logger.Tracef("Read file %q, got contents: %s", valuesOverrideYAML, initialData) 201 | } 202 | 203 | input := bytes.NewBuffer(initialData) 204 | output := new(bytes.Buffer) 205 | for i, processor := range processorChain { 206 | logger.Tracef("Before processor %d: %s", i, input.String()) 207 | if err := processor.Process(ctx, cd, p, input, output); err != nil { 208 | logger.Errorf("error: %v, output: %s", err, output.String()) 209 | return err 210 | } 211 | // Reset the input array, that is no longer needed 212 | input.Reset() 213 | // Now we can set the output pointer to be the next input, and the reset output to be an 214 | // empty buffer but with pre-created capacity 215 | var tmp = input 216 | input = output 217 | output = tmp 218 | } 219 | logger.Tracef("After all processing: %s", output.String()) 220 | 221 | outputFile := util.JoinPaths(ctx, constants.ClustersDir, clusterInfo.Index.String(), fmt.Sprintf("%s.yaml", cd.Name)) 222 | // TODO: Make "fake" os.MkdirAll and os.Create util calls that can be used for dry-running 223 | if err := os.MkdirAll(filepath.Dir(outputFile), 0755); err != nil { 224 | return err 225 | } 226 | f, err := os.Create(outputFile) 227 | if err != nil { 228 | return err 229 | } 230 | defer f.Close() 231 | _, err = io.Copy(f, input) 232 | return err 233 | } 234 | 235 | // and template them! TODO: Change this name later 236 | type valuesYAMLProcessor struct{} 237 | 238 | func (pr *valuesYAMLProcessor) Process(ctx context.Context, _ *ChartData, p *keyval.Parameters, r io.Reader, w io.Writer) error { 239 | // It is possible that r doesn't have any content and b will be empty and err == nil 240 | // This is expected if there wasn't a values.yaml file present. 241 | b, err := ioutil.ReadAll(r) 242 | if err != nil { 243 | return err 244 | } 245 | 246 | // Apply templating for customizing the values.yaml file 247 | b, err = util.ApplyTemplate(string(b), p.ToMapWithWorkshopctl()) 248 | if err != nil { 249 | return err 250 | } 251 | 252 | // Add an extra newline between the original, templated data and our own YAML below 253 | b = append(b, byte('\n')) 254 | 255 | yamlBytes, err := yaml.Marshal(p) 256 | if err != nil { 257 | return err 258 | } 259 | b = append(b, yamlBytes...) 260 | 261 | // Write everything to the next processor 262 | _, err = w.Write(b) 263 | return err 264 | } 265 | 266 | type helmTemplateProcessor struct { 267 | namespace string 268 | } 269 | 270 | func (pr *helmTemplateProcessor) Process(ctx context.Context, cd *ChartData, _ *keyval.Parameters, r io.Reader, w io.Writer) error { 271 | _, _, err := util.ShellCommand(ctx, `helm template -n %s workshopctl . -f -`, pr.namespace). 272 | WithStdio(r, w, nil). 273 | WithPwd(cd.CacheDir). 274 | Run() 275 | return err 276 | } 277 | 278 | type unescapeGoTmpls struct{} 279 | 280 | func (pr *unescapeGoTmpls) Process(ctx context.Context, _ *ChartData, _ *keyval.Parameters, r io.Reader, w io.Writer) error { 281 | b, err := ioutil.ReadAll(r) 282 | if err != nil { 283 | return err 284 | } 285 | b = bytes.ReplaceAll(b, []byte(`\{`), []byte(`{`)) 286 | b = bytes.ReplaceAll(b, []byte(`\}`), []byte(`}`)) 287 | 288 | _, err = w.Write(b) 289 | return err 290 | } 291 | 292 | type nsProcessor struct { 293 | ns string 294 | } 295 | 296 | func (pr *nsProcessor) Process(ctx context.Context, cd *ChartData, p *keyval.Parameters, r io.Reader, w io.Writer) error { 297 | return util.KYAMLFilter(r, w, util.KYAMLFilterFunc( 298 | func(node *kyaml.RNode) (*kyaml.RNode, error) { 299 | return node, node.PipeE( 300 | kyaml.LookupCreate(kyaml.MappingNode, "metadata"), 301 | kyaml.FieldMatcher{Name: "namespace", Create: kyaml.NewScalarRNode(pr.ns)}, 302 | ) 303 | }, 304 | )) 305 | } 306 | -------------------------------------------------------------------------------- /pkg/git/git.go: -------------------------------------------------------------------------------- 1 | package git 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "os" 8 | "strings" 9 | 10 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 11 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 12 | ) 13 | 14 | func PushManifests(ctx context.Context, cfg *config.Config) error { 15 | isNew := false 16 | if ok, fi := util.PathExists(".git"); !ok { 17 | if _, _, err := util.Command(ctx, "git", "init").Run(); err != nil { 18 | return err 19 | } 20 | isNew = true 21 | } else if !fi.IsDir() { 22 | return fmt.Errorf(".git must be a directory") 23 | } 24 | 25 | if !isNew { 26 | out, err := gitRun(ctx, "branch") 27 | if err != nil { 28 | return err 29 | } 30 | if len(out) != 0 { 31 | out, err := gitRun(ctx, "describe", "--dirty", "--always") 32 | if err != nil { 33 | return err 34 | } 35 | if strings.Contains(out, "dirty") { 36 | //return fmt.Errorf("won't do anything for dirty git state: %s", out) 37 | fmt.Println("git state is dirty") 38 | } 39 | } 40 | } 41 | 42 | gitIgnoreBytes, err := os.ReadFile(".gitignore") 43 | if err != nil && !os.IsNotExist(err) { 44 | return err 45 | } 46 | oldGitIgnore := string(gitIgnoreBytes) 47 | 48 | var foundTokens = map[string]bool{ 49 | ".cache": false, 50 | ".kube": false, 51 | ".kubeconfig": false, 52 | } 53 | foundTokens[cfg.DNSProvider.ServiceAccountPath] = false 54 | foundTokens[cfg.CloudProvider.ServiceAccountPath] = false 55 | foundTokens[cfg.Git.ServiceAccountPath] = false 56 | 57 | fileScanner := bufio.NewScanner(strings.NewReader(oldGitIgnore)) 58 | fileScanner.Split(bufio.ScanLines) 59 | for fileScanner.Scan() { 60 | _, exists := foundTokens[fileScanner.Text()] 61 | if !exists { 62 | continue 63 | } 64 | foundTokens[fileScanner.Text()] = true 65 | } 66 | 67 | newGitIgnore := oldGitIgnore 68 | altered := false 69 | for name, exists := range foundTokens { 70 | if exists { 71 | continue 72 | } 73 | newGitIgnore += "\n" + name 74 | altered = true 75 | } 76 | if altered { 77 | newGitIgnore += "\n" 78 | } 79 | 80 | if oldGitIgnore != newGitIgnore { 81 | if err := os.WriteFile(".gitignore", []byte(newGitIgnore), 0644); err != nil { 82 | return err 83 | } 84 | if _, err := gitRun(ctx, "add", ".gitignore"); err != nil { 85 | return err 86 | } 87 | } 88 | 89 | if _, err := gitRun(ctx, "add", "clusters", "workshopctl.yaml"); err != nil { 90 | return err 91 | } 92 | 93 | out, err := gitRun(ctx, "remote") 94 | if err != nil { 95 | return err 96 | } 97 | if len(out) == 0 { 98 | _, err := gitRun(ctx, "remote", "add", "origin", cfg.Git.Repo) 99 | if err != nil { 100 | return err 101 | } 102 | } 103 | 104 | fmt.Println("Now run:\ngit commit -m 'Initial commit' && git push --set-upstream origin master") 105 | return nil 106 | } 107 | 108 | func gitRun(ctx context.Context, args ...string) (string, error) { 109 | out, _, err := util.Command(ctx, "git", args...).Run() 110 | return out, err 111 | } 112 | -------------------------------------------------------------------------------- /pkg/gotk/gotk.go: -------------------------------------------------------------------------------- 1 | package gotk 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "strings" 8 | 9 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 10 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 11 | ) 12 | 13 | func SetupGitOps(ctx context.Context, info *config.ClusterInfo) error { 14 | mux, ok := util.GetMutex(ctx) 15 | if !ok || mux == nil { 16 | return fmt.Errorf("SetupGitOps: programmer error, couldn't get mutex for locking: %v", mux) 17 | } 18 | 19 | logger := util.Logger(ctx) 20 | logger.Debug("Waiting for mutex unlock in SetupGitOps") 21 | // Lock during this operation, as the git repo is mutually exclusive 22 | mux.Lock() 23 | logger.Infof("Bootstrapping GitOps for cluster %s...", info.Index) 24 | defer mux.Unlock() 25 | defer logger.Infof("Bootstrapping GitOps for cluster %s is done!", info.Index) 26 | 27 | // Make sure we have all prereqs 28 | kubeConfigArg := "--kubeconfig=" + info.Index.KubeConfigPath() 29 | /*_, _, err := util.Command(ctx, 30 | "gotk", 31 | kubeConfigArg, 32 | "check", 33 | "--pre", 34 | ).Run() 35 | if err != nil { 36 | return err 37 | }*/ 38 | 39 | var provider string 40 | switch info.Git.RepoStruct.Domain { 41 | case "github.com": 42 | provider = "github" 43 | case "gitlab.com": 44 | provider = "gitlab" 45 | default: 46 | return fmt.Errorf("git repo %s: unknown provider domain", info.Git.Repo) 47 | } 48 | 49 | // TODO: Upstream gotk doesn't support the --kubeconfig flag in install/bootstrap at least 50 | // Instead, we use the KUBECONFIG env var for now 51 | kubeConfigEnv := "KUBECONFIG=" + util.JoinPaths(ctx, info.Index.KubeConfigPath()) 52 | 53 | // We assume that the repo is already created, hence we can skip some flags related to that 54 | // TODO: That doesn't work in current gotk, rework that maybe upstream too? 55 | // This command installs the toolkit into the target cluster, and starts reconciling our 56 | // given cluster directory for changes. 57 | _, _, err := util.Command(ctx, 58 | "flux", 59 | kubeConfigArg, 60 | "bootstrap", 61 | provider, 62 | "--owner="+info.Git.RepoStruct.UserLogin, 63 | "--repository="+info.Git.RepoStruct.RepositoryName, 64 | "--path="+info.Index.ClusterDir(), 65 | // Only install these two for now. TODO: In the future, also include notifications 66 | "--components=source-controller,kustomize-controller,helm-controller", 67 | // Use a short interval as this is a highly dynamic env 68 | "--interval=30s", 69 | // TODO: Assuming personal for now 70 | "--personal", 71 | ).WithStdio(nil, os.Stdout, os.Stderr). 72 | WithEnv( 73 | // Forward the {GITHUB,GITLAB}_TOKEN variable from the config file 74 | fmt.Sprintf("%s_TOKEN=%s", strings.ToUpper(provider), info.Git.ServiceAccountContent), 75 | // Forward the PATH variable 76 | fmt.Sprintf("PATH=%s", os.Getenv("PATH")), 77 | kubeConfigEnv, 78 | ).Run() 79 | return err 80 | } 81 | -------------------------------------------------------------------------------- /pkg/logs/flag/flag.go: -------------------------------------------------------------------------------- 1 | package flag 2 | 3 | import ( 4 | "github.com/sirupsen/logrus" 5 | "github.com/spf13/pflag" 6 | ) 7 | 8 | type LogLevelFlag struct { 9 | value *logrus.Level 10 | } 11 | 12 | func (lf *LogLevelFlag) Set(val string) error { 13 | var err error 14 | *lf.value, err = logrus.ParseLevel(val) 15 | return err 16 | } 17 | 18 | func (lf *LogLevelFlag) String() string { 19 | if lf.value == nil { 20 | return "" 21 | } 22 | return lf.value.String() 23 | } 24 | 25 | func (lf *LogLevelFlag) Type() string { 26 | return "loglevel" 27 | } 28 | 29 | var _ pflag.Value = &LogLevelFlag{} 30 | 31 | func LogLevelFlagVar(fs *pflag.FlagSet, ptr *logrus.Level) { 32 | fs.Var(&LogLevelFlag{value: ptr}, "log-level", "Specify the loglevel for the program") 33 | } 34 | -------------------------------------------------------------------------------- /pkg/logs/logs.go: -------------------------------------------------------------------------------- 1 | package logs 2 | 3 | import ( 4 | golog "log" 5 | "os" 6 | 7 | log "github.com/sirupsen/logrus" 8 | ) 9 | 10 | // Wrap the logrus logger together with the exit code 11 | // so we can control what log.Fatal returns 12 | type logger struct { 13 | *log.Logger 14 | ExitCode int 15 | } 16 | 17 | func newLogger() *logger { 18 | l := &logger{ 19 | Logger: log.StandardLogger(), // Use the standard logrus logger 20 | ExitCode: 1, 21 | } 22 | 23 | l.ExitFunc = func(_ int) { 24 | os.Exit(l.ExitCode) 25 | } 26 | 27 | return l 28 | } 29 | 30 | // Expose the logger 31 | var Logger *logger 32 | 33 | // Automatically initialize the logging system for Ignite 34 | func init() { 35 | // Initialize the logger 36 | Logger = newLogger() 37 | 38 | // Disable timestamp logging, but still output the seconds elapsed 39 | Logger.SetFormatter(&log.TextFormatter{ 40 | DisableTimestamp: false, 41 | FullTimestamp: false, 42 | }) 43 | 44 | // Disable the stdlib's automatic add of the timestamp in beginning of the log message, 45 | // as we stream the logs from stdlib log to this logrus instance. 46 | golog.SetFlags(0) 47 | golog.SetOutput(Logger.Writer()) 48 | } 49 | -------------------------------------------------------------------------------- /pkg/provider/digitalocean/cloud.go: -------------------------------------------------------------------------------- 1 | package digitalocean 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "net" 9 | "net/url" 10 | "strconv" 11 | "time" 12 | 13 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 14 | "github.com/cloud-native-nordics/workshopctl/pkg/provider" 15 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 16 | "github.com/digitalocean/godo" 17 | log "github.com/sirupsen/logrus" 18 | "golang.org/x/oauth2" 19 | ) 20 | 21 | var clusterNotFound = fmt.Errorf("couldn't find cluster by name") 22 | 23 | const ( 24 | DefaultRegion = "fra1" 25 | RegionKey = "region" 26 | WorkshopctlTag = "workshopctl" 27 | ) 28 | 29 | type doCommon struct { 30 | p *config.Provider 31 | c *godo.Client 32 | dryRun bool 33 | } 34 | 35 | func initCommon(ctx context.Context, p *config.Provider) doCommon { 36 | oauthClient := oauth2.NewClient(ctx, p.TokenSource()) 37 | return doCommon{ 38 | p: p, 39 | c: godo.NewClient(oauthClient), 40 | dryRun: util.IsDryRun(ctx), 41 | } 42 | } 43 | 44 | func NewDigitalOceanCloudProvider(ctx context.Context, p *config.Provider) (provider.CloudProvider, error) { 45 | doProvider := &DigitalOceanCloudProvider{ 46 | doCommon: initCommon(ctx, p), 47 | region: DefaultRegion, 48 | } 49 | 50 | if r, ok := p.ProviderSpecific[RegionKey]; ok { 51 | doProvider.region = r 52 | } 53 | return doProvider, nil 54 | } 55 | 56 | type DigitalOceanCloudProvider struct { 57 | doCommon 58 | 59 | region string 60 | } 61 | 62 | func chooseSize(c config.NodeClaim) string { 63 | // TODO: Improve this to first check ratio between CPU and memory to distinguish 64 | // what kind of type of droplet there should be (2 * CPU = RAM e.g. for Basic and CPU-optimized 65 | // droplets, 4 for General Purpose, and 8 for Mem-optimized). 66 | m := map[config.NodeClaim]string{ 67 | {CPU: 2, RAM: 2, Dedicated: false}: "s-2vcpu-2gb", // $15 68 | {CPU: 2, RAM: 4, Dedicated: false}: "s-2vcpu-4gb", // $20 69 | {CPU: 4, RAM: 8, Dedicated: false}: "s-4vcpu-8gb", // $40 70 | {CPU: 8, RAM: 16, Dedicated: false}: "s-8vcpu-16gb", // $80 71 | {CPU: 2, RAM: 4, Dedicated: true}: "c-2", // $40 72 | {CPU: 4, RAM: 8, Dedicated: true}: "c-4", // $80 73 | } 74 | if str, ok := m[c]; ok { 75 | return str 76 | } 77 | log.Warnf("didn't find a good size for you, fallback to s-2vcpu-4gb") 78 | return "s-2vcpu-4gb" 79 | } 80 | 81 | func (do *DigitalOceanCloudProvider) CreateCluster(ctx context.Context, m provider.ClusterMeta, c provider.ClusterSpec) (*provider.Cluster, error) { 82 | logger := util.Logger(ctx) 83 | 84 | start := time.Now().UTC() 85 | cluster := &provider.Cluster{ 86 | ClusterMeta: m, 87 | Spec: c, 88 | Status: provider.ClusterStatus{ 89 | ProvisionStart: &start, 90 | }, 91 | } 92 | 93 | // For now we only have one nodepool, hence we hard-code this to 01 94 | nodePools := []*godo.KubernetesNodePoolCreateRequest{} 95 | for i, ng := range c.NodeGroups { 96 | // This starts from 01, and always is padded to two digits like the cluster number 97 | idx := config.ClusterNumber(i + 1) 98 | nodePoolName := fmt.Sprintf("%s-nodepool-%s", cluster.Name(), idx) 99 | nodePools = append(nodePools, &godo.KubernetesNodePoolCreateRequest{ 100 | Name: nodePoolName, 101 | 102 | Size: chooseSize(ng.NodeClaim), 103 | Count: int(ng.Instances), 104 | AutoScale: false, 105 | Tags: []string{ 106 | WorkshopctlTag, 107 | nodePoolName, 108 | cluster.Name(), 109 | }, 110 | }) 111 | } 112 | 113 | req := &godo.KubernetesClusterCreateRequest{ 114 | Name: cluster.Name(), 115 | RegionSlug: do.region, 116 | VersionSlug: cluster.Spec.Version, // TODO: Resolve c.Version correctly 117 | Tags: []string{ 118 | WorkshopctlTag, 119 | cluster.Name(), 120 | }, 121 | NodePools: nodePools, 122 | AutoUpgrade: false, 123 | } 124 | 125 | if do.dryRun || log.IsLevelEnabled(log.DebugLevel) { 126 | b, _ := json.Marshal(req) 127 | if do.dryRun { 128 | log.Infof("Would send this request to DO: %s", string(b)) 129 | // TODO: Revamp this dry-run logic and unify it with DebugObject 130 | return cluster, nil 131 | } 132 | log.Debugf("Would send this request to DO: %s", string(b)) 133 | } 134 | // TODO: Rate limiting 135 | doCluster, err := do.getClusterByName(ctx, cluster.Name()) 136 | if err == nil { 137 | // If the cluster was found, just note it's ID 138 | cluster.Status.ID = doCluster.ID 139 | logger.Infof("Found existing cluster with name %q and ID %q", cluster.Name(), cluster.Status.ID) 140 | 141 | } else if errors.Is(err, clusterNotFound) { 142 | // If the cluster wasn't found, create it 143 | logger.Infof("Creating new cluster with name %s", cluster.Name()) 144 | doCluster, _, err = do.c.Kubernetes.Create(ctx, req) 145 | if err != nil { 146 | return nil, err 147 | } 148 | cluster.Status.ID = doCluster.ID 149 | } else { // unexpected err != nil 150 | return nil, err 151 | } 152 | 153 | err = util.Poll(ctx, nil, func() (bool, error) { 154 | kcluster, _, err := do.c.Kubernetes.Get(ctx, cluster.Status.ID) 155 | if err != nil { 156 | return false, fmt.Errorf("getting a kubernetes cluster failed: %v", err) 157 | } 158 | util.DebugObject(ctx, "Got Kubernetes cluster response from DO", kcluster) 159 | 160 | if kcluster.Status.State == godo.KubernetesClusterStatusRunning { 161 | logger.Infof("Awesome, the cluster is Ready! Endpoints: %s %s", kcluster.Endpoint, kcluster.IPv4) 162 | 163 | u, err := url.Parse(kcluster.Endpoint) 164 | if err != nil { 165 | return true, err // fatal; exit 166 | } 167 | cluster.Status.EndpointURL = u 168 | cluster.Status.EndpointIP = net.ParseIP(kcluster.IPv4) 169 | now := time.Now().UTC() 170 | cluster.Status.ProvisionDone = &now 171 | 172 | return true, nil 173 | } 174 | if kcluster.Status.State == godo.KubernetesClusterStatusProvisioning { 175 | return false, fmt.Errorf("Cluster is still provisioning") 176 | } 177 | 178 | return false, fmt.Errorf("Unknown state %q! Message: %q", kcluster.Status.State, kcluster.Status.Message) 179 | }) 180 | if err != nil { 181 | return nil, err 182 | } 183 | 184 | log.Infof("Downloading KubeConfig...") 185 | cc, _, err := do.c.Kubernetes.GetKubeConfig(ctx, cluster.Status.ID) 186 | if err != nil { 187 | return nil, err 188 | } 189 | cluster.Status.KubeconfigBytes = cc.KubeconfigYAML 190 | 191 | return cluster, nil 192 | } 193 | 194 | func (do *DigitalOceanCloudProvider) DeleteCluster(ctx context.Context, m provider.ClusterMeta) error { 195 | 196 | cluster, err := do.getClusterByName(ctx, m.Name()) 197 | if err != nil { 198 | return err 199 | } 200 | 201 | util.DebugObject(ctx, "Found wanted cluster", cluster) 202 | 203 | // List all relevant LBs 204 | lbs, err := do.listLBsForCluster(ctx, cluster) 205 | if err != nil { 206 | return err 207 | } 208 | util.DebugObject(ctx, "LBs", lbs) 209 | 210 | for _, lb := range lbs { 211 | if err := do.deleteLB(ctx, lb); err != nil { 212 | return err 213 | } 214 | } 215 | 216 | return do.deleteCluster(ctx, cluster) 217 | } 218 | 219 | func (do *DigitalOceanCloudProvider) getClusterByName(ctx context.Context, name string) (*godo.KubernetesCluster, error) { 220 | logger := util.Logger(ctx) 221 | 222 | logger.Debug("Listing Kubernetes clusters...") 223 | clusters, _, err := do.c.Kubernetes.List(ctx, &godo.ListOptions{}) 224 | if err != nil { 225 | return nil, err 226 | } 227 | 228 | for _, cluster := range clusters { 229 | // Filter by name 230 | if cluster.Name != name { 231 | logger.Debugf("Cluster name %s isn't desired %s", cluster.Name, name) 232 | continue 233 | } 234 | // Found it 235 | return cluster, nil 236 | } 237 | 238 | return nil, fmt.Errorf("%w: %s", clusterNotFound, name) 239 | } 240 | 241 | func (do *DigitalOceanCloudProvider) deleteCluster(ctx context.Context, c *godo.KubernetesCluster) error { 242 | logger := util.Logger(ctx) 243 | 244 | if util.IsDryRun(ctx) { 245 | logger.Infof("Would delete Kubernetes cluster %s", c.Name) 246 | return nil 247 | } 248 | logger.Infof("Deleting Kubernetes cluster %s", c.Name) 249 | _, err := do.c.Kubernetes.Delete(ctx, c.ID) 250 | return err 251 | } 252 | 253 | func (do *DigitalOceanCloudProvider) listLBsForCluster(ctx context.Context, cluster *godo.KubernetesCluster) ([]godo.LoadBalancer, error) { 254 | logger := util.Logger(ctx) 255 | 256 | droplets := map[string]struct{}{} 257 | for _, nodePool := range cluster.NodePools { 258 | for _, node := range nodePool.Nodes { 259 | logger.Debugf("Found droplet with ID %s in node pool %s", node.DropletID, nodePool.Name) 260 | droplets[node.DropletID] = struct{}{} 261 | } 262 | } 263 | 264 | lbs, _, err := do.c.LoadBalancers.List(ctx, &godo.ListOptions{}) 265 | if err != nil { 266 | return nil, err 267 | } 268 | 269 | clusterLBs := []godo.LoadBalancer{} 270 | for _, lb := range lbs { 271 | // Is there any droplet in our current cluster that is served by this LB? 272 | found := false 273 | for _, lbDropletID := range lb.DropletIDs { 274 | // lbDropletID is an int but the same droplet IDs above are strings, hence cast this to a string 275 | lbDropletIDStr := strconv.Itoa(lbDropletID) 276 | if _, ok := droplets[lbDropletIDStr]; ok { 277 | logger.Debugf("LB %s is served by droplet with ID %s", lb.Name, lbDropletIDStr) 278 | found = true 279 | break 280 | } 281 | } 282 | // If this LB didn't match any of our known droplets, it's isn't ours so proceed 283 | if !found { 284 | logger.Debugf("LB %s isn't served by any of the droplets in this cluster", lb.Name) 285 | continue 286 | } 287 | // Append to the list 288 | clusterLBs = append(clusterLBs, lb) 289 | } 290 | return clusterLBs, nil 291 | } 292 | 293 | func (do *DigitalOceanCloudProvider) deleteLB(ctx context.Context, lb godo.LoadBalancer) error { 294 | logger := util.Logger(ctx) 295 | 296 | if util.IsDryRun(ctx) { 297 | logger.Infof("Would delete Kubernetes load balancer %s", lb.Name) 298 | return nil 299 | } 300 | logger.Infof("Deleting Kubernetes load balancer %s", lb.Name) 301 | _, err := do.c.LoadBalancers.Delete(ctx, lb.ID) 302 | return err 303 | } 304 | -------------------------------------------------------------------------------- /pkg/provider/digitalocean/dns.go: -------------------------------------------------------------------------------- 1 | package digitalocean 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "strings" 9 | 10 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 11 | "github.com/cloud-native-nordics/workshopctl/pkg/config/keyval" 12 | "github.com/cloud-native-nordics/workshopctl/pkg/constants" 13 | "github.com/cloud-native-nordics/workshopctl/pkg/gen" 14 | "github.com/cloud-native-nordics/workshopctl/pkg/provider" 15 | "github.com/cloud-native-nordics/workshopctl/pkg/util" 16 | "github.com/digitalocean/godo" 17 | "github.com/sirupsen/logrus" 18 | kyaml "sigs.k8s.io/kustomize/kyaml/yaml" 19 | ) 20 | 21 | const expectedNSDomains = `ns3.digitalocean.com. 22 | ns1.digitalocean.com. 23 | ns2.digitalocean.com. 24 | ` 25 | 26 | func NewDigitalOceanDNSProvider(ctx context.Context, p *config.Provider, rootDomain string) (provider.DNSProvider, error) { 27 | return &DigitalOceanDNSProvider{ 28 | doCommon: initCommon(ctx, p), 29 | rootDomain: rootDomain, 30 | }, nil 31 | } 32 | 33 | type DigitalOceanDNSProvider struct { 34 | doCommon 35 | rootDomain string 36 | } 37 | 38 | func (do *DigitalOceanDNSProvider) ChartProcessors() []gen.Processor { 39 | return []gen.Processor{&dnsProcessor{}} 40 | } 41 | 42 | func (do *DigitalOceanDNSProvider) ValuesProcessors() []gen.Processor { 43 | return nil 44 | } 45 | 46 | func (do *DigitalOceanDNSProvider) EnsureZone(ctx context.Context) error { 47 | logger := util.Logger(ctx) 48 | 49 | out, _, err := util.ShellCommand(ctx, "dig +short NS %s", do.rootDomain). 50 | WithDryRunContent(expectedNSDomains). 51 | Run() 52 | if err != nil { 53 | return err 54 | } 55 | for i := 1; i <= 3; i++ { 56 | domain := fmt.Sprintf("ns%d.digitalocean.com.", i) 57 | if !strings.Contains(out, domain) { 58 | return fmt.Errorf("%s doesn't have an NS record to %s", do.rootDomain, domain) 59 | } 60 | } 61 | 62 | // First, check if it exists 63 | logger.Debugf("Ensuring domain %s is managed by DigitalOcean DNS", do.rootDomain) 64 | domain, resp, err := do.c.Domains.Get(ctx, do.rootDomain) 65 | if err == nil { 66 | util.DebugObject(ctx, "Domain already exists", domain) 67 | return nil 68 | } else if resp.StatusCode != http.StatusNotFound { // err != nil and status code is not 404 69 | return err 70 | } // else resp.StatusCode == http.StatusNotFound 71 | return do.createDomain(ctx, do.rootDomain, logger) 72 | } 73 | 74 | func (do *DigitalOceanDNSProvider) createDomain(ctx context.Context, rootDomain string, logger *logrus.Entry) error { 75 | if do.dryRun { 76 | logger.Infof("Would create domain %s in DigitalOcean DNS", rootDomain) 77 | return nil 78 | } 79 | // Create the domain 80 | logger.Infof("Creating domain %s in DigitalOcean DNS", rootDomain) 81 | domain, _, err := do.c.Domains.Create(ctx, &godo.DomainCreateRequest{ 82 | Name: do.rootDomain, 83 | }) 84 | if err != nil { 85 | return err 86 | } 87 | util.DebugObject(ctx, "Created domain", domain) 88 | return err 89 | } 90 | 91 | func (do *DigitalOceanDNSProvider) CleanupRecords(ctx context.Context, m provider.ClusterMeta) error { 92 | logger := util.Logger(ctx) 93 | 94 | subdomain := m.Index.Subdomain() 95 | logger.Debugf("Asking for records for domain %s and sub-domain %s", do.rootDomain, subdomain) 96 | // List all records for domain 97 | records, _, err := do.c.Domains.Records(ctx, do.rootDomain, &godo.ListOptions{}) 98 | if err != nil { 99 | return err 100 | } 101 | 102 | for _, record := range records { 103 | logger.Debugf("Observed record: %s", record) 104 | // Skip records that aren't associated with the given subdomain 105 | // TODO: Maybe be even more restrictive/specific about what to delete 106 | // e.g. look at heritage=external-dns fields, or only delete A/TXT records. 107 | if !strings.HasSuffix(record.Name, subdomain) { 108 | logger.Debugf("Skipped record: %s", record) 109 | continue 110 | } 111 | // Delete records that are related to this subdomain 112 | if err := do.deleteRecord(ctx, &record, logger); err != nil { 113 | return err 114 | } 115 | } 116 | return nil 117 | } 118 | 119 | func (do *DigitalOceanDNSProvider) deleteRecord(ctx context.Context, record *godo.DomainRecord, logger *logrus.Entry) error { 120 | recordStr := do.recordStr(record) 121 | if do.dryRun { 122 | logger.Infof("Would delete record: %s", recordStr) 123 | return nil 124 | } 125 | logger.Infof("Deleting record: %s", recordStr) 126 | _, err := do.c.Domains.DeleteRecord(ctx, do.rootDomain, record.ID) 127 | return err 128 | } 129 | 130 | func (do *DigitalOceanDNSProvider) recordStr(record *godo.DomainRecord) string { 131 | return fmt.Sprintf("%s %s.%s: %s", record.Type, record.Name, do.rootDomain, record.Data) 132 | } 133 | 134 | var ( 135 | externalDNSEnvValue = kyaml.MustParse(` 136 | - name: DO_TOKEN 137 | valueFrom: 138 | secretKeyRef: 139 | name: workshopctl 140 | key: DNS_PROVIDER_SERVICEACCOUNT 141 | `) 142 | 143 | traefikDNSEnvValue = kyaml.MustParse(` 144 | - name: DO_AUTH_TOKEN 145 | valueFrom: 146 | secretKeyRef: 147 | name: workshopctl 148 | key: DNS_PROVIDER_SERVICEACCOUNT 149 | `) 150 | ) 151 | 152 | type dnsProcessor struct{} 153 | 154 | func (pr *dnsProcessor) Process(ctx context.Context, cd *gen.ChartData, p *keyval.Parameters, r io.Reader, w io.Writer) error { 155 | return util.KYAMLFilter(r, w, util.KYAMLFilterFunc( 156 | func(node *kyaml.RNode) (*kyaml.RNode, error) { 157 | return node, util.KYAMLResourceMetaMatcher(node, util.KYAMLResourceMetaMatch{ 158 | Kind: "Deployment", 159 | Name: "traefik", 160 | Namespace: constants.WorkshopctlNamespace, 161 | Func: func() error { 162 | return node.PipeE( 163 | kyaml.LookupCreate(kyaml.SequenceNode, "spec", "template", "spec", "containers", "[name=traefik]", "env"), 164 | kyaml.Append(traefikDNSEnvValue.YNode().Content...)) 165 | }, 166 | }, util.KYAMLResourceMetaMatch{ 167 | Kind: "Deployment", 168 | Name: "external-dns", 169 | Namespace: constants.WorkshopctlNamespace, 170 | Func: func() error { 171 | return node.PipeE( 172 | kyaml.LookupCreate(kyaml.SequenceNode, "spec", "template", "spec", "containers", "[name=external-dns]", "env"), 173 | kyaml.Append(externalDNSEnvValue.YNode().Content...)) 174 | }, 175 | }) 176 | }, 177 | )) 178 | } 179 | -------------------------------------------------------------------------------- /pkg/provider/provider.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "net/url" 7 | "time" 8 | 9 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 10 | "github.com/cloud-native-nordics/workshopctl/pkg/constants" 11 | "github.com/cloud-native-nordics/workshopctl/pkg/gen" 12 | ) 13 | 14 | type Cluster struct { 15 | ClusterMeta 16 | Spec ClusterSpec 17 | Status ClusterStatus 18 | } 19 | 20 | type ClusterMeta struct { 21 | NamePrefix string 22 | Index config.ClusterNumber 23 | } 24 | 25 | func (m ClusterMeta) Name() string { 26 | return constants.ClusterName(m.NamePrefix, m.Index) 27 | } 28 | 29 | type ClusterSpec struct { 30 | Version string 31 | NodeGroups []config.NodeGroup 32 | } 33 | 34 | type ClusterStatus struct { 35 | ID string 36 | ProvisionStart *time.Time 37 | ProvisionDone *time.Time 38 | EndpointURL *url.URL 39 | EndpointIP net.IP 40 | KubeconfigBytes []byte 41 | } 42 | 43 | func (s ClusterStatus) ProvisionTime() time.Duration { 44 | if s.ProvisionStart == nil || s.ProvisionDone == nil { 45 | return 0 46 | } 47 | return s.ProvisionDone.Sub(*s.ProvisionStart) 48 | } 49 | 50 | type CloudProviderFactory interface { 51 | NewCloudProvider(ctx context.Context, p *config.Provider) (CloudProvider, error) 52 | } 53 | 54 | type CloudProvider interface { 55 | // CreateCluster creates a cluster. This call is _blocking_ until the cluster is properly provisioned 56 | CreateCluster(ctx context.Context, m ClusterMeta, c ClusterSpec) (*Cluster, error) 57 | // DeleteCluster deletes a cluster and its associated load balancers 58 | DeleteCluster(ctx context.Context, m ClusterMeta) error 59 | } 60 | 61 | type DNSProviderFactory interface { 62 | NewDNSProvider(ctx context.Context, p *config.Provider, rootDomain string) (DNSProvider, error) 63 | } 64 | 65 | type DNSProvider interface { 66 | ChartProcessors() []gen.Processor 67 | ValuesProcessors() []gen.Processor 68 | // EnsureZone ensures that the root domain zone is registered with the DNS provider 69 | // This is run at apply-time before the individual cluster processors 70 | EnsureZone(ctx context.Context) error 71 | // CleanupRecords deletes records associated with a cluster 72 | CleanupRecords(ctx context.Context, m ClusterMeta) error 73 | } 74 | -------------------------------------------------------------------------------- /pkg/provider/providers/providers.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/cloud-native-nordics/workshopctl/pkg/config" 8 | "github.com/cloud-native-nordics/workshopctl/pkg/provider" 9 | "github.com/cloud-native-nordics/workshopctl/pkg/provider/digitalocean" 10 | ) 11 | 12 | func CloudProviders() provider.CloudProviderFactory { 13 | return providers 14 | } 15 | 16 | func DNSProviders() provider.DNSProviderFactory { 17 | return providers 18 | } 19 | 20 | var providers = providersImpl{} 21 | 22 | type cloudFunc func(ctx context.Context, p *config.Provider) (provider.CloudProvider, error) 23 | 24 | type dnsFunc func(ctx context.Context, p *config.Provider, rootDomain string) (provider.DNSProvider, error) 25 | 26 | var cloudProviders = map[string]cloudFunc{ 27 | "digitalocean": digitalocean.NewDigitalOceanCloudProvider, 28 | } 29 | 30 | var dnsProviders = map[string]dnsFunc{ 31 | "digitalocean": digitalocean.NewDigitalOceanDNSProvider, 32 | } 33 | 34 | type providersImpl struct{} 35 | 36 | func (providersImpl) NewCloudProvider(ctx context.Context, p *config.Provider) (provider.CloudProvider, error) { 37 | fn, ok := cloudProviders[p.Name] 38 | if !ok { 39 | return nil, fmt.Errorf("cloud provider %s not supported", p.Name) 40 | } 41 | return fn(ctx, p) 42 | } 43 | 44 | func (providersImpl) NewDNSProvider(ctx context.Context, p *config.Provider, rootDomain string) (provider.DNSProvider, error) { 45 | fn, ok := dnsProviders[p.Name] 46 | if !ok { 47 | return nil, fmt.Errorf("DNS provider %s not supported", p.Name) 48 | } 49 | return fn(ctx, p, rootDomain) 50 | } 51 | -------------------------------------------------------------------------------- /pkg/util/context.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "path/filepath" 7 | "sync" 8 | 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | func NewContext(dryRun bool, rootPath string) context.Context { 13 | ctx := context.Background() 14 | ctx = WithDryRun(ctx, dryRun) 15 | ctx = withRootPath(ctx, rootPath) 16 | return ctx 17 | } 18 | 19 | var clusterNumberKey = clusterNumberKeyImpl{} 20 | 21 | type clusterNumberKeyImpl struct{} 22 | 23 | func WithClusterNumber(ctx context.Context, n uint16) context.Context { 24 | return context.WithValue(ctx, clusterNumberKey, n) 25 | } 26 | 27 | func getClusterNumber(ctx context.Context) (uint16, bool) { 28 | n, ok := ctx.Value(clusterNumberKey).(uint16) 29 | if !ok { 30 | logrus.Debug("Didn't find cluster number from context") 31 | } 32 | return n, ok 33 | } 34 | 35 | var dryRunKey = dryRunKeyImpl{} 36 | 37 | type dryRunKeyImpl struct{} 38 | 39 | func WithDryRun(ctx context.Context, dryRun bool) context.Context { 40 | return context.WithValue(ctx, dryRunKey, dryRun) 41 | } 42 | 43 | func IsDryRun(ctx context.Context) bool { 44 | dryRun, ok := ctx.Value(dryRunKey).(bool) 45 | if !ok { 46 | logrus.Warn("Expected to be able to get dry-run from context, but got nothing") 47 | logrus.Warn("Setting dryRun to be true because of this") 48 | dryRun = true 49 | } 50 | return dryRun 51 | } 52 | 53 | var rootPathKey = rootPathKeyImpl{} 54 | 55 | type rootPathKeyImpl struct{} 56 | 57 | func withRootPath(ctx context.Context, rootPath string) context.Context { 58 | // Always make the path absolute before putting it into the context 59 | if !filepath.IsAbs(rootPath) { 60 | wd, err := os.Getwd() 61 | if err != nil { 62 | logrus.Fatalf("Failed to get working directory due to: %v. Either fix the underlying problem or specify an absolute path to --root-dir", err) 63 | } 64 | rootPath = filepath.Join(wd, rootPath) 65 | } 66 | return context.WithValue(ctx, rootPathKey, rootPath) 67 | } 68 | 69 | func getRootPath(ctx context.Context) (string, bool) { 70 | rootPath, ok := ctx.Value(rootPathKey).(string) 71 | if !ok { 72 | logrus.Debug("Didn't find rootPath from context, defaulting to '.'") 73 | return ".", false 74 | } 75 | return rootPath, ok 76 | } 77 | 78 | // If called without any filePaths this just returns the RootPath 79 | func JoinPaths(ctx context.Context, filePaths ...string) string { 80 | rootPath, _ := getRootPath(ctx) 81 | filePaths = append([]string{rootPath}, filePaths...) 82 | return filepath.Join(filePaths...) 83 | } 84 | 85 | var muxKey = muxKeyImpl{} 86 | 87 | type muxKeyImpl struct{} 88 | 89 | func WithMutex(ctx context.Context, mux *sync.Mutex) context.Context { 90 | return context.WithValue(ctx, muxKey, mux) 91 | } 92 | 93 | func GetMutex(ctx context.Context) (*sync.Mutex, bool) { 94 | mux, ok := ctx.Value(muxKey).(*sync.Mutex) 95 | if !ok { 96 | logrus.Debug("Didn't find mux from context, defaulting to nil") 97 | return nil, false 98 | } 99 | return mux, ok 100 | } 101 | 102 | func Logger(ctx context.Context) *logrus.Entry { 103 | logger := logrus.WithContext(ctx) 104 | // If cluster number is set, add that logging field 105 | if n, ok := getClusterNumber(ctx); ok { 106 | logger = logger.WithField("cluster", n) 107 | } 108 | // If root path is set on ctx and debug logging is enabled, add the root-path field 109 | if rootPath, ok := getRootPath(ctx); ok && logrus.IsLevelEnabled(logrus.DebugLevel) { 110 | logger = logger.WithField("root-path", rootPath) 111 | } 112 | return logger 113 | } 114 | -------------------------------------------------------------------------------- /pkg/util/util.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/rand" 7 | "encoding/hex" 8 | "encoding/json" 9 | "fmt" 10 | "html/template" 11 | "io" 12 | "io/ioutil" 13 | "os" 14 | "os/exec" 15 | "strings" 16 | "time" 17 | 18 | "github.com/otiai10/copy" 19 | "github.com/sirupsen/logrus" 20 | log "github.com/sirupsen/logrus" 21 | "k8s.io/apimachinery/pkg/util/wait" 22 | "sigs.k8s.io/kustomize/kyaml/kio" 23 | kyaml "sigs.k8s.io/kustomize/kyaml/yaml" 24 | "sigs.k8s.io/yaml" 25 | ) 26 | 27 | func PathExists(path string) (bool, os.FileInfo) { 28 | info, err := os.Stat(path) 29 | if os.IsNotExist(err) { 30 | return false, nil 31 | } 32 | 33 | return true, info 34 | } 35 | 36 | func FileExists(filename string) bool { 37 | exists, info := PathExists(filename) 38 | if !exists { 39 | return false 40 | } 41 | 42 | return !info.IsDir() 43 | } 44 | 45 | // Copy copies both files and directories 46 | func Copy(src string, dst string) error { 47 | log.Debugf("Copying %q to %q", src, dst) 48 | return copy.Copy(src, dst) 49 | } 50 | 51 | func Poll(ctx context.Context, d *time.Duration, fn wait.ConditionFunc) error { 52 | logger := Logger(ctx) 53 | 54 | logger.Traceln("Poll function started") 55 | defer logger.Traceln("Poll function quit") 56 | 57 | duration := 15 * time.Second 58 | if d != nil { 59 | duration = *d 60 | } 61 | // Set a deadline at 10 mins 62 | ctxWithDeadline, cancel := context.WithTimeout(ctx, 10*time.Minute) 63 | // releases resources if operation completes before timeout elapses 64 | defer cancel() 65 | 66 | tryCount := 0 67 | return wait.PollImmediateUntil(duration, func() (bool, error) { 68 | tryCount++ 69 | errFn := logger.Debugf 70 | if tryCount%3 == 0 { // print info every third time 71 | errFn = logger.Infof 72 | } 73 | 74 | done, err := fn() 75 | logger.Tracef("Poll function (round %d) returned %t %v", tryCount, done, err) 76 | if err != nil { 77 | // if we're not "done" yet, set the err to nil so that PollImmediateInfinite doesn't exit 78 | if !done { 79 | errFn("Polling continues due to: %v", err) 80 | err = nil 81 | } 82 | } 83 | if IsDryRun(ctx) { 84 | logger.Info("This is a dry-run, hence one loop run is enough. Under normal circumstances, this loop would continue until the condition is met.") 85 | return true, nil 86 | } 87 | return done, err 88 | }, ctxWithDeadline.Done()) 89 | } 90 | 91 | func DebugObject(ctx context.Context, msg string, obj interface{}) { 92 | // If debug logging isn't enabled, just exit 93 | if !log.IsLevelEnabled(log.DebugLevel) { 94 | return 95 | } 96 | 97 | logger := Logger(ctx) 98 | b, err := json.Marshal(obj) 99 | if err != nil { 100 | logger.Errorf("DebugObject failed with %v", err) 101 | return 102 | } 103 | logger.Debugf("%s: %s", msg, string(b)) 104 | } 105 | 106 | // RandomSHA returns a hex-encoded string from {byteLen} random bytes. 107 | func RandomSHA(byteLen int) (string, error) { 108 | b := make([]byte, byteLen) 109 | _, err := rand.Read(b) 110 | if err != nil { 111 | return "", err 112 | } 113 | return hex.EncodeToString(b), nil 114 | } 115 | 116 | func ReadYAMLFile(file string, obj interface{}) error { 117 | b, err := ioutil.ReadFile(file) 118 | if err != nil { 119 | return err 120 | } 121 | return yaml.UnmarshalStrict(b, obj) 122 | } 123 | 124 | func WriteYAMLFile(ctx context.Context, file string, obj interface{}) error { 125 | b, err := yaml.Marshal(obj) 126 | if err != nil { 127 | return err 128 | } 129 | return WriteFile(ctx, file, b) 130 | } 131 | 132 | func WriteFile(ctx context.Context, file string, b []byte) error { 133 | logger := Logger(ctx) 134 | if IsDryRun(ctx) { 135 | logger.Infof("Would write the following contents to file %q: %s", file, string(b)) 136 | return nil 137 | } 138 | logger.Debugf("Writing the following contents to file %q: %s", file, string(b)) 139 | return ioutil.WriteFile(file, b, 0644) 140 | } 141 | 142 | func DeletePath(ctx context.Context, fileOrFolder string) error { 143 | logger := Logger(ctx) 144 | if IsDryRun(ctx) { 145 | logger.Infof("Would delete path %q", fileOrFolder) 146 | return nil 147 | } 148 | logger.Debugf("Deleting path %q", fileOrFolder) 149 | return os.RemoveAll(fileOrFolder) 150 | } 151 | 152 | func ApplyTemplate(tmpl string, data interface{}) ([]byte, error) { 153 | buf := &bytes.Buffer{} 154 | if err := template.Must(template.New("tmpl").Parse(tmpl)).Execute(buf, data); err != nil { 155 | return nil, err 156 | } 157 | return buf.Bytes(), nil 158 | } 159 | 160 | type KYAMLFilterFunc func(*kyaml.RNode) (*kyaml.RNode, error) 161 | 162 | func KYAMLFilter(r io.Reader, w io.Writer, filters ...KYAMLFilterFunc) error { 163 | setAnnotationFn := kio.FilterFunc(func(operand []*kyaml.RNode) ([]*kyaml.RNode, error) { 164 | var err error 165 | for i := range operand { 166 | resource := operand[i] 167 | for _, filter := range filters { 168 | resource, err = filter(resource) 169 | if err != nil { 170 | return nil, err 171 | } 172 | } 173 | } 174 | return operand, nil 175 | }) 176 | 177 | return kio.Pipeline{ 178 | Inputs: []kio.Reader{&kio.ByteReader{Reader: r}}, 179 | Filters: []kio.Filter{setAnnotationFn}, 180 | Outputs: []kio.Writer{kio.ByteWriter{Writer: w}}, 181 | }.Execute() 182 | } 183 | 184 | type KYAMLResourceMetaMatch struct { 185 | Kind string 186 | Name string 187 | Namespace string 188 | Func func() error 189 | } 190 | 191 | func KYAMLResourceMetaMatcher(node *kyaml.RNode, matchStatements ...KYAMLResourceMetaMatch) error { 192 | meta, err := node.GetMeta() 193 | if err != nil { 194 | return err 195 | } 196 | for _, statement := range matchStatements { 197 | // If statement.Kind is set, it must match meta.Kind in order not be skipped 198 | if len(statement.Kind) != 0 && statement.Kind != meta.Kind { 199 | continue 200 | } 201 | // If statement.Name is set, it must match meta.Name in order not be skipped 202 | if len(statement.Name) != 0 && statement.Name != meta.Name { 203 | continue 204 | } 205 | // If statement.Namespace is set, it must match meta.Namespace in order not be skipped 206 | if len(statement.Namespace) != 0 && statement.Namespace != meta.Namespace { 207 | continue 208 | } 209 | // All case statements matched, let's run the function 210 | if err := statement.Func(); err != nil { 211 | return err 212 | } 213 | } 214 | return nil 215 | } 216 | 217 | func Command(ctx context.Context, command string, args ...string) *ExecUtil { 218 | return &ExecUtil{ 219 | cmd: exec.CommandContext(ctx, command, args...), 220 | outBuf: new(bytes.Buffer), 221 | ctx: ctx, 222 | logger: Logger(ctx), 223 | } 224 | } 225 | 226 | func ShellCommand(ctx context.Context, format string, args ...interface{}) *ExecUtil { 227 | return Command(ctx, "/bin/sh", "-c", fmt.Sprintf(format, args...)) 228 | } 229 | 230 | type ExecUtil struct { 231 | cmd *exec.Cmd 232 | outBuf *bytes.Buffer 233 | ctx context.Context 234 | logger *logrus.Entry 235 | dryRunOut string 236 | } 237 | 238 | func (e *ExecUtil) Cmd() *exec.Cmd { 239 | return e.cmd 240 | } 241 | 242 | func (e *ExecUtil) WithStdio(stdin io.Reader, stdout, stderr io.Writer) *ExecUtil { 243 | if stdin != nil { 244 | e.logger.Debug("Set command stdin") 245 | e.cmd.Stdin = stdin 246 | } 247 | if stdout != nil { 248 | e.logger.Debug("Set command stdout") 249 | e.cmd.Stdout = stdout 250 | } 251 | if stderr != nil { 252 | e.logger.Debug("Set command stderr") 253 | e.cmd.Stderr = stderr 254 | } 255 | return e 256 | } 257 | 258 | func (e *ExecUtil) WithPwd(pwd string) *ExecUtil { 259 | e.logger.Debugf("Set command pwd: %q", pwd) 260 | e.cmd.Dir = pwd 261 | return e 262 | } 263 | 264 | func (e *ExecUtil) WithEnv(envVars ...string) *ExecUtil { 265 | e.logger.Debugf("Set command env vars: %v", envVars) 266 | e.cmd.Env = append(e.cmd.Env, envVars...) 267 | return e 268 | } 269 | 270 | func (e *ExecUtil) WithDryRunContent(out string) *ExecUtil { 271 | e.dryRunOut = out 272 | return e 273 | } 274 | 275 | func (e *ExecUtil) Run() (output string, exitCode int, cmdErr error) { 276 | cmdArgs := strings.Join(e.cmd.Args, " ") 277 | 278 | // Don't do this if we're dry-running 279 | if IsDryRun(e.ctx) { 280 | e.logger.Infof("Would execute command %q", cmdArgs) 281 | return e.dryRunOut, 0, nil 282 | } 283 | 284 | // Always capture stdout output to e.outBuf 285 | if e.cmd.Stdout != nil { 286 | e.cmd.Stdout = io.MultiWriter(e.cmd.Stdout, e.outBuf) 287 | } else { 288 | e.cmd.Stdout = e.outBuf 289 | } 290 | // Always capture stderr output to e.outBuf 291 | if e.cmd.Stderr != nil { 292 | e.cmd.Stderr = io.MultiWriter(e.cmd.Stderr, e.outBuf) 293 | } else { 294 | e.cmd.Stderr = e.outBuf 295 | } 296 | // Run command 297 | e.logger.Debugf("Running command %q", cmdArgs) 298 | err := e.cmd.Run() 299 | 300 | // Capture combined output 301 | output = string(bytes.TrimSpace(e.outBuf.Bytes())) 302 | if len(output) != 0 { 303 | e.logger.Debugf("Command %q produced output: %s", cmdArgs, output) 304 | } 305 | 306 | // Handle the error 307 | if err != nil { 308 | exitCodeStr := "'unknown'" 309 | if exitError, ok := err.(*exec.ExitError); ok { 310 | exitCode = exitError.ExitCode() 311 | exitCodeStr = fmt.Sprintf("%d", exitCode) 312 | } 313 | 314 | cmdErr = fmt.Errorf("external command %q exited with code %s, error: %w and output: %s", cmdArgs, exitCodeStr, err, output) 315 | e.logger.Debugf("Command error: %v", cmdErr) 316 | } 317 | return 318 | } 319 | -------------------------------------------------------------------------------- /pkg/version/cmd/command.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/cloud-native-nordics/workshopctl/pkg/version" 9 | "github.com/spf13/cobra" 10 | "sigs.k8s.io/yaml" 11 | ) 12 | 13 | // NewCmdVersion provides the version information of ignite 14 | func NewCmdVersion(out io.Writer) *cobra.Command { 15 | var output string 16 | cmd := &cobra.Command{ 17 | Use: "version", 18 | Short: "Print the version", 19 | RunE: func(cmd *cobra.Command, args []string) error { 20 | return RunVersion(out, output) 21 | }, 22 | } 23 | 24 | cmd.Flags().StringVarP(&output, "output", "o", output, "Output format; available options are 'yaml', 'json' and 'short'") 25 | return cmd 26 | } 27 | 28 | // RunVersion provides the version information for the specified format 29 | func RunVersion(out io.Writer, output string) error { 30 | v := version.Get() 31 | switch output { 32 | case "": 33 | fmt.Fprintf(out, "Version: %#v\n", v) 34 | case "short": 35 | fmt.Fprintf(out, "%s\n", v) 36 | case "yaml": 37 | y, err := yaml.Marshal(&v) 38 | if err != nil { 39 | return err 40 | } 41 | fmt.Fprintln(out, string(y)) 42 | case "json": 43 | y, err := json.MarshalIndent(&v, "", " ") 44 | if err != nil { 45 | return err 46 | } 47 | fmt.Fprintln(out, string(y)) 48 | default: 49 | return fmt.Errorf("invalid output format: %s", output) 50 | } 51 | 52 | return nil 53 | } 54 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | ) 7 | 8 | var ( 9 | gitMajor = "" 10 | gitMinor = "" 11 | gitVersion = "" 12 | gitCommit = "" 13 | gitTreeState = "" 14 | buildDate = "" 15 | ) 16 | 17 | // Info stores information about a component's version 18 | type Info struct { 19 | Major string `json:"major"` 20 | Minor string `json:"minor"` 21 | GitVersion string `json:"gitVersion"` 22 | GitCommit string `json:"gitCommit"` 23 | GitTreeState string `json:"gitTreeState"` 24 | BuildDate string `json:"buildDate"` 25 | GoVersion string `json:"goVersion"` 26 | Compiler string `json:"compiler"` 27 | Platform string `json:"platform"` 28 | } 29 | 30 | // String returns info as a human-friendly version string. 31 | func (info Info) String() string { 32 | return info.GitVersion 33 | } 34 | 35 | // Get gets the version 36 | func Get() Info { 37 | return Info{ 38 | Major: gitMajor, 39 | Minor: gitMinor, 40 | GitVersion: gitVersion, 41 | GitCommit: gitCommit, 42 | GitTreeState: gitTreeState, 43 | BuildDate: buildDate, 44 | GoVersion: runtime.Version(), 45 | Compiler: runtime.Compiler, 46 | Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /tutorials/1-podinfo/README.md: -------------------------------------------------------------------------------- 1 | # Sample podinfo application 2 | 3 | This first exercise will be taught by the instructor of the workshop, and 4 | everybody in the workshop does these exercises at the same pace. 5 | 6 | You will learn: 7 | 8 | - What a Namespace, Service, Pod, Deployment, Ingress, ConfigMap and Secret is 9 | - How to use Liveness and Readiness Probes on a Pod 10 | - How to set Resource Requests and Limits on a Pod 11 | - How to expose ConfigMap and Secret information in a Pod 12 | - How to mount a Volume into a Pod 13 | - How to expose your Pod replicas as a Service 14 | - How to expose your Service with an Ingress 15 | - How to do a rolling upgrade of a Deployment 16 | - How to monitor your service with Prometheus using a ServiceMonitor 17 | -------------------------------------------------------------------------------- /tutorials/1-podinfo/solution/README.md: -------------------------------------------------------------------------------- 1 | # Solution to the first exercises 2 | 3 | Here the correct YAML files are for the first exercise. 4 | Use these only if you really need. Ask for help first. 5 | -------------------------------------------------------------------------------- /tutorials/1-podinfo/solution/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | creationTimestamp: null 5 | name: podinfo 6 | namespace: demo 7 | data: 8 | IS_KUBERNETES_FINLAND: "true" 9 | my-config-file.json: | 10 | { "amazing": true } 11 | -------------------------------------------------------------------------------- /tutorials/1-podinfo/solution/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: podinfo 6 | name: podinfo 7 | namespace: demo 8 | spec: 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: podinfo 13 | template: 14 | metadata: 15 | labels: 16 | app: podinfo 17 | spec: 18 | containers: 19 | - image: stefanprodan/podinfo:1.5.0 20 | name: podinfo 21 | command: 22 | - ./podinfo 23 | - --config-path=/configmap 24 | readinessProbe: 25 | httpGet: 26 | path: /readyz 27 | port: 9898 28 | initialDelaySeconds: 1 29 | periodSeconds: 5 30 | failureThreshold: 1 31 | livenessProbe: 32 | httpGet: 33 | path: /healthz 34 | port: 9898 35 | initialDelaySeconds: 1 36 | periodSeconds: 10 37 | failureThreshold: 2 38 | resources: 39 | requests: 40 | memory: "32Mi" 41 | cpu: "10m" 42 | limits: 43 | memory: "32Mi" 44 | cpu: "10m" 45 | env: 46 | - name: IS_KUBERNETES_FINLAND 47 | valueFrom: 48 | configMapKeyRef: 49 | # The ConfigMap containing the value you want to assign to IS_KUBERNETES_FINLAND 50 | name: podinfo 51 | # Specify the key associated with the value 52 | key: IS_KUBERNETES_FINLAND 53 | - name: SECRET_PASSWORD 54 | valueFrom: 55 | secretKeyRef: 56 | # The ConfigMap containing the value you want to assign to SECRET_PASSWORD 57 | name: podinfo 58 | # Specify the key associated with the value 59 | key: SECRET_PASSWORD 60 | volumeMounts: 61 | - name: configmap-projection 62 | mountPath: /configmap 63 | - name: secret-projection 64 | mountPath: /secret 65 | volumes: 66 | - name: configmap-projection 67 | configMap: 68 | # Provide the name of the ConfigMap containing the files you want 69 | # to add to the container 70 | name: podinfo 71 | - name: secret-projection 72 | secret: 73 | # Provide the name of the Secret containing the files you want 74 | # to add to the container 75 | secretName: podinfo 76 | 77 | -------------------------------------------------------------------------------- /tutorials/1-podinfo/solution/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: podinfo 5 | namespace: demo 6 | spec: 7 | rules: 8 | - host: podinfo.cluster-XX.workshopctl.kubernetesfinland.com 9 | http: 10 | paths: 11 | - path: /podinfo 12 | backend: 13 | serviceName: podinfo 14 | servicePort: 80 15 | -------------------------------------------------------------------------------- /tutorials/1-podinfo/solution/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: demo 5 | -------------------------------------------------------------------------------- /tutorials/1-podinfo/solution/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: podinfo 5 | namespace: demo 6 | data: 7 | SECRET_PASSWORD: UGFzc3cwcmQx 8 | -------------------------------------------------------------------------------- /tutorials/1-podinfo/solution/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: podinfo 6 | name: podinfo 7 | namespace: demo 8 | spec: 9 | ports: 10 | - name: 80-9898 11 | port: 80 12 | protocol: TCP 13 | targetPort: 9898 14 | selector: 15 | app: podinfo 16 | type: ClusterIP 17 | -------------------------------------------------------------------------------- /tutorials/1-podinfo/solution/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app: podinfo 6 | release: k8sfin 7 | name: podinfo 8 | namespace: demo 9 | spec: 10 | endpoints: 11 | - port: 80-9898 12 | jobLabel: app 13 | namespaceSelector: 14 | matchNames: 15 | - demo 16 | selector: 17 | matchLabels: 18 | app: podinfo 19 | -------------------------------------------------------------------------------- /tutorials/2-nodejs-app/README.md: -------------------------------------------------------------------------------- 1 | # nodejs-app 2 | 3 | This is a sample application written in nodejs. Your task here is to: 4 | 5 | a) Build the application from source code as a Docker image called `cloudnativenordics/nodejs-app:v0.1.0` 6 | b) Create the Kubernetes manifests according to these specifications: 7 | 8 | - It should be running in the `staging` namespace 9 | - It should have the labels `app=nodejs-app` and `env=staging` applied consistently 10 | - It should have liveness and readiness probes set up 11 | - It should be running with 10 replicas 12 | - It should be available at URL `cluster-XX.workshopctl.kubernetesfinland.com/nodejs-app` 13 | - It should have a Secret with the content `SECRET_TOKEN=test1234` exposed to an environment 14 | variables. 15 | - The Service should be available at `nodejs-app.staging.svc.cluster.local:80`, but forward 16 | the traffic to port 8080 for the Pods. 17 | - Each Pod should be allowed to consume 20 milli-CPUs, and 16 MiB of RAM 18 | - The workload exposes the `http_requests_total` counter at `/metrics`. Create a `ServiceMonitor` 19 | that targets the Service you created, and makes Prometheus scrape all the metrics endpoints. 20 | -------------------------------------------------------------------------------- /tutorials/2-nodejs-app/server.js: -------------------------------------------------------------------------------- 1 | var http = require('http'); 2 | var os = require('os'); 3 | 4 | var totalrequests = 0; 5 | var isReady = true 6 | 7 | http.createServer(function(request, response) { 8 | totalrequests += 1 9 | 10 | if (request.url == "/readyz") { 11 | if (isReady == true) { 12 | response.writeHead(200); 13 | response.end("Not OK"); 14 | } else { 15 | response.writeHead(503); 16 | response.end("OK"); 17 | } 18 | return; 19 | } 20 | 21 | response.writeHead(200); 22 | 23 | if (request.url == "/metrics") { 24 | response.end("# HELP http_requests_total The amount of requests served by the server in total\n# TYPE http_requests_total counter\nhttp_requests_total " + totalrequests + "\n"); 25 | return; 26 | } 27 | if (request.url == "/toggleReady") { 28 | isReady = !isReady 29 | response.end("OK"); 30 | return; 31 | } 32 | if (request.url == "/healthz") { 33 | response.end("OK"); 34 | return; 35 | } 36 | if (request.url == "/env") { 37 | response.end(JSON.stringify(process.env)); 38 | return; 39 | } 40 | response.end("Hello! My name is " + os.hostname() + ". I have served "+ totalrequests + " requests so far.\n"); 41 | }).listen(8080) 42 | -------------------------------------------------------------------------------- /tutorials/2-nodejs-app/solution/Dockerfile.b64: -------------------------------------------------------------------------------- 1 | RlJPTSBub2RlOmFscGluZQpDT1BZIHNlcnZlci5qcyAvc2VydmVyLmpzCkNNRCBbIm5vZGUiLCAi 2 | L3NlcnZlci5qcyJdCg== 3 | -------------------------------------------------------------------------------- /tutorials/2-nodejs-app/solution/Makefile.b64: -------------------------------------------------------------------------------- 1 | VVNFUj89Y2xvdWRuYXRpdmVub3JkaWNzCklNQUdFX05BTUU/PW5vZGVqcy1hcHAKVkVSU0lPTj89 2 | djAuMS4wCklNQUdFPSQoVVNFUikvJChJTUFHRV9OQU1FKTokKFZFUlNJT04pCgpidWlsZDoKCWRv 3 | Y2tlciBidWlsZCAtdCAkKElNQUdFKSAuCg== 4 | -------------------------------------------------------------------------------- /tutorials/2-nodejs-app/solution/README.md: -------------------------------------------------------------------------------- 1 | # Solutions to the problem 2 | 3 | In this folder the solutions are available, however they're base64-encoded to not spoil it for you. 4 | 5 | To convert them back to the decoded variants, run: 6 | 7 | ```bash 8 | base64 -d Dockerfile.b64 > Dockerfile 9 | base64 -d Makefile.b64 > Makefile 10 | ``` 11 | -------------------------------------------------------------------------------- /tutorials/README.md: -------------------------------------------------------------------------------- 1 | # Tutorials for workshop attendees 2 | 3 | This folder contains various exercises for attendees of this 101 Kubernetes workshop. 4 | Check out the subfolders in order for more information on what to do. 5 | --------------------------------------------------------------------------------