├── .ansible-lint ├── .github ├── renovate.json └── workflows │ ├── ansible-lint.yml │ ├── check-opentofu-syntax.yml │ ├── check-yaml-syntax.yml │ └── stale.yml ├── .gitignore ├── .yamllint.yml ├── .zuul.yaml ├── LICENSE ├── README.md ├── Release-Notes-R1.md ├── Release-Notes-R2.md ├── Release-Notes-R3.md ├── Release-Notes-R4.md ├── Release-Notes-R5.md ├── Release-Notes-R6.md ├── doc ├── LoadBalancer-ExtTrafficLocal.md ├── Maintenance_and_Troubleshooting.md ├── Upgrade-Guide.md ├── application-credentials.md ├── configuration.md ├── continuous-integration.md ├── make-reference.md ├── overview.md ├── quickstart.md ├── requirements.md ├── roadmap.md └── usage │ ├── cluster-mgmt-capi-mgmt-node.md │ ├── containter-registry-configuration.md │ ├── create-new-cluster.md │ ├── custom-ca.md │ ├── gateway-api.md │ ├── harbor.md │ ├── managing-many-clusters.md │ ├── migrate-to-cluster-class.md │ ├── migrate-to-kaas-v2.md │ ├── multi-az-and-multi-cloud-environments.md │ ├── testing.md │ └── usage.md ├── playbooks ├── cleanup.yaml ├── dependencies.yaml ├── e2e.yaml ├── tasks │ ├── label_nodes.yaml │ ├── scs_compliance.yaml │ └── sonobouy.yaml └── templates │ └── environment.tfvars.j2 └── terraform ├── Makefile ├── cleanup └── cleanup.sh ├── clouds.yaml.sample ├── environments ├── environment-default.tfvars ├── environment-gx-bc.tfvars ├── environment-gx-bc2.tfvars ├── environment-gx-betacloud.tfvars ├── environment-gx-betacloud2.tfvars ├── environment-gx-betacloud3.tfvars ├── environment-gx-betacloud4.tfvars ├── environment-gx-betacloud5.tfvars ├── environment-gx-betacloud6.tfvars ├── environment-gx-betacloud7.tfvars ├── environment-gx-citycloud.tfvars ├── environment-gx-scs-staging.tfvars ├── environment-gx-scs.tfvars ├── environment-gx-wavestack.tfvars └── environment-regio.tfvars ├── extension └── 01_example.sh ├── files ├── bin │ ├── add_cluster-network.sh │ ├── apply_cert_manager.sh │ ├── apply_cindercsi.sh │ ├── apply_kubeapi_cidrs.sh │ ├── apply_metrics.sh │ ├── apply_nginx_ingress.sh │ ├── apply_openstack_integration.sh │ ├── bootstrap.sh │ ├── cccfg.inc │ ├── cleanup.sh │ ├── clusterctl_template.sh │ ├── configure_containerd.sh │ ├── configure_containerd_proxy.sh │ ├── configure_proxy.sh │ ├── create_appcred.sh │ ├── create_cluster.sh │ ├── delete_cluster.sh │ ├── deploy_cluster_api.sh │ ├── deploy_harbor.sh │ ├── enable-cilium-sg-kube.sh │ ├── enable-cilium-sg.sh │ ├── fixup_flavor_volumes.sh │ ├── fixup_k8s_version.sh │ ├── flavor_disk.sh │ ├── get_capi_helm.sh │ ├── get_k8s_git.sh │ ├── get_mtu.sh │ ├── handle_ovn_lb.sh │ ├── inject_custom_ca.sh │ ├── install_flux.sh │ ├── install_helm.sh │ ├── install_k9s.sh │ ├── install_kind.sh │ ├── install_kube_ps1.sh │ ├── install_kubectx.sh │ ├── kustpatch.sh │ ├── migrate-to-cluster-class.sh │ ├── nginx_proxy_realip.sh │ ├── openstack-kube-versions.inc │ ├── parse_k8s_version.inc │ ├── prepare_openstack.sh │ ├── print-cloud.py │ ├── remove_cluster-network.sh │ ├── signer.sh │ ├── sonobuoy.sh │ ├── update-R2-to-R3.sh │ ├── update-R4-to-R5.sh │ ├── update-R5-to-R6.sh │ ├── upload_capi_image.sh │ ├── utils.inc │ ├── wait.sh │ └── wait_capi_image.sh ├── containerd │ ├── docker.io │ ├── ghcr.io │ ├── quay.io │ ├── registry.gitlab.com │ └── registry.k8s.io ├── fix-keystoneauth-plugins-unversioned.diff ├── kubernetes-manifests.d │ ├── add-vol-to-ctrl.yaml │ ├── add-vol-to-worker.yaml │ ├── calico.yaml │ ├── cert-manager-test.yaml │ ├── cinder-provider.yaml │ ├── cinder.yaml │ ├── cloud-controller-manager-rbac.yaml │ ├── cso.yaml │ ├── cspo.yaml │ ├── harbor │ │ ├── base │ │ │ └── kustomization.yaml │ │ └── envs │ │ │ ├── clusterIP │ │ │ ├── harbor-config.yaml │ │ │ └── kustomization.yaml │ │ │ └── ingress │ │ │ ├── harbor-config.yaml │ │ │ ├── issuer.yaml │ │ │ └── kustomization.yaml │ ├── kuard.yaml │ ├── nginx-ingress │ │ ├── base │ │ │ ├── kustomization.yaml │ │ │ └── nginx-ingress-controller-v1.9.6.yaml │ │ ├── nginx-monitor │ │ │ ├── kustomization.yaml │ │ │ └── nginx-monitor.yaml │ │ ├── nginx-nomonitor │ │ │ ├── kustomization.yaml │ │ │ └── nginx-nomonitor.yaml │ │ └── nginx-proxy │ │ │ ├── kustomization.yaml │ │ │ ├── nginx-monitor.yaml │ │ │ ├── nginx-proxy-cfgmap.yaml │ │ │ └── nginx-proxy-lb.yaml │ ├── openstack.yaml │ ├── rmv-vol-from-ctrl.yaml │ └── rmv-vol-from-worker.yaml ├── template │ ├── capi-settings.tmpl │ ├── cloud.conf.tmpl │ ├── clouds.yaml.tmpl │ ├── cluster-template.yaml │ ├── clusterctl.yaml.tmpl │ └── harbor-settings.tmpl └── update │ ├── R2_to_R3 │ ├── update-cluster-template.diff │ ├── update-clusterctl-control-gen.sed │ ├── update-clusterctl-worker-gen.sed │ └── update-clusterctl.diff │ └── R4_to_R5 │ └── update-cluster-template.diff ├── main.tf ├── mgmtcluster.tf ├── neutron.tf ├── outputs.tf ├── secure.yaml.sample └── variables.tf /.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | use_default_rules: true 3 | skip_list: 4 | - yaml # disabled because we use yamllint 5 | # Roles and modules imported from https://opendev.org/zuul/zuul-jobs 6 | mock_roles: 7 | - ensure-pip 8 | mock_modules: 9 | - zuul_return 10 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "commitBody": "Signed-off-by: SCS Renovate Bot ", 3 | "gitAuthor": "SCS Renovate Bot ", 4 | "dependencyDashboard": "true", 5 | "enabledManagers": ["regex", "terraform"], 6 | "terraform": { 7 | "pinDigests": true 8 | }, 9 | "labels": ["e2e-quick-test"], 10 | "regexManagers":[ 11 | { 12 | "description": "Regex used to match versions without prefix 'v'. Example: capi, capo.", 13 | "fileMatch":[ 14 | "^terraform\\/variables.tf","^doc\\/configuration.md","^terraform\\/environments\\/environment-default.tfvars" 15 | ], 16 | "matchStrings":[ 17 | "`(?(\\d+\\.){1,2}(x|\\d+))`\\s*", 18 | "default\\s*= \"(?(\\d+\\.){1,2}(x|\\d+))\"\\s*# renovate: datasource=(?.*?) depName=(?.*?)\n" 19 | ], 20 | "extractVersionTemplate": "^v(?.*)$" 21 | }, 22 | { 23 | "description": "Regex used to match versions with prefix 'v'. Example: calico.", 24 | "fileMatch":[ 25 | "^terraform\\/variables.tf","^doc\\/configuration.md","^terraform\\/environments\\/environment-default.tfvars" 26 | ], 27 | "matchStrings":[ 28 | "`(?v(\\d+\\.){1,2}(x|\\d+))`\\s*", 29 | "default\\s*= \"(?v(\\d+\\.){1,2}(x|\\d+))\"\\s*# renovate: datasource=(?.*?) depName=(?.*?)\n" 30 | ] 31 | }, 32 | { 33 | "description": "Matcher for k9s (version declared and used in shell script).", 34 | "fileMatch":[ 35 | "^terraform\\/files\\/bin\\/install_k9s.sh" 36 | ], 37 | "matchStrings":[ 38 | "K9S_VERSION=(?v(\\d+\\.){1,2}(x|\\d+))\\s# renovate: datasource=(?.*?) depName=(?.*?)\n" 39 | ] 40 | } 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /.github/workflows/ansible-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Ansible lint 3 | 4 | "on": 5 | push: 6 | branches: 7 | - main 8 | paths: 9 | - 'playbooks/**' 10 | pull_request: 11 | paths: 12 | - 'playbooks/**' 13 | 14 | jobs: 15 | build: 16 | name: Ansible Lint 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: Run ansible-lint 21 | uses: ansible/ansible-lint@v24 22 | -------------------------------------------------------------------------------- /.github/workflows/check-opentofu-syntax.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Check opentofu syntax 3 | 4 | "on": 5 | push: 6 | paths: 7 | - 'terraform/**' 8 | - '.github/workflows/check-opentofu-syntax.yml' 9 | pull_request: 10 | paths: 11 | - 'terraform/**' 12 | - '.github/workflows/check-opentofu-syntax.yml' 13 | 14 | jobs: 15 | check-opentofu-syntax: 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v4 21 | - name: create config 22 | run: cp clouds.yaml.sample clouds.yaml 23 | working-directory: ./terraform 24 | - name: create credentials 25 | run: cp secure.yaml.sample secure.yaml 26 | working-directory: ./terraform 27 | - name: Setup OpenTofu 28 | uses: opentofu/setup-opentofu@v1 29 | with: 30 | tofu_version: 1.6.1 31 | - name: Tofu init 32 | run: tofu init 33 | working-directory: ./terraform 34 | - name: Tofu validate 35 | run: tofu validate 36 | working-directory: ./terraform 37 | env: 38 | ENVIRONMENT: gx-betacloud 39 | - name: Tofu format 40 | run: tofu fmt -check 41 | working-directory: ./terraform 42 | -------------------------------------------------------------------------------- /.github/workflows/check-yaml-syntax.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Check yaml syntax 3 | 4 | "on": 5 | push: 6 | paths: 7 | - '**.yaml' 8 | - '**.yml' 9 | - .github/workflows/check-yaml-syntax.yml 10 | pull_request: 11 | paths: 12 | - '**.yaml' 13 | - '**.yml' 14 | - .github/workflows/check-yaml-syntax.yml 15 | 16 | jobs: 17 | check-yaml-syntax: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | - uses: actions/setup-python@v5 22 | with: 23 | python-version: '3.x' 24 | - run: pip3 install yamllint 25 | - run: yamllint . 26 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Mark and close stale issues 3 | 4 | "on": 5 | schedule: 6 | - cron: "30 1 * * *" 7 | 8 | jobs: 9 | stale: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | issues: write 13 | pull-requests: write 14 | 15 | steps: 16 | - uses: actions/stale@v9 17 | with: 18 | repo-token: ${{ secrets.GITHUB_TOKEN }} 19 | exempt-all-issue-assignees: true # the issues with an assignee will not be marked as stale automatically 20 | stale-issue-message: "This issue is stale because it has been open 30 days with no activity. Remove stale label or comment, or this will be closed in 60 days." 21 | close-issue-message: "This issue was closed because it has been stalled for 60 days with no activity." 22 | stale-issue-label: "stale" 23 | exempt-issue-labels: "longterm,epic" 24 | days-before-issue-stale: 30 25 | days-before-issue-close: 60 26 | days-before-pr-stale: -1 # disabled for PRs 27 | days-before-pr-close: -1 # disabled for PRs 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .deploy.* 2 | .terraform 3 | clouds.yaml 4 | clouds.conf 5 | clusterctl.yaml 6 | errored.tfstate 7 | minio.env 8 | minio.tf 9 | secure.yaml 10 | terraform.tfstate.d 11 | terraform.tfstate* 12 | .terraform.lock.hcl 13 | .envrc 14 | -------------------------------------------------------------------------------- /.yamllint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | rules: 5 | comments: enable 6 | line-length: disable 7 | # accept both key: 8 | # - item 9 | # and key: 10 | # - item 11 | # (the latter is very common in k8s land) 12 | indentation: 13 | indent-sequences: whatever 14 | -------------------------------------------------------------------------------- /.zuul.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - job: 3 | name: k8s-cluster-api-provider-e2e-abstract 4 | abstract: true 5 | parent: openstack-access-base 6 | description: | 7 | An abstract job for e2e testing of k8s-cluster-api-provider project. 8 | This job is not intended to be run directly, but instead must be inherited from it. 9 | pre-run: playbooks/dependencies.yaml 10 | run: playbooks/e2e.yaml 11 | cleanup-run: playbooks/cleanup.yaml # executed also when the job is canceled 12 | vars: 13 | wait_for_cluster: 600 # 10min 14 | extra_env: {} 15 | sonobouy: 16 | enabled: false 17 | scs_compliance: 18 | enabled: false 19 | 20 | - job: 21 | name: k8s-cluster-api-provider-e2e-conformance 22 | parent: k8s-cluster-api-provider-e2e-abstract 23 | description: | 24 | Run e2e tests of k8s-cluster-api-provider project using 25 | [sonobuoy](https://sonobuoy.io/) with mode conformance and 26 | SCS compliance checks meaning it will test if the Kubernetes 27 | cluster is conformant to the CNCF and to the SCS. 28 | timeout: 10800 # 3h 29 | vars: 30 | sonobouy: 31 | enabled: true 32 | mode: conformance 33 | scs_compliance: 34 | enabled: true 35 | 36 | - job: 37 | name: k8s-cluster-api-provider-e2e-quick 38 | parent: k8s-cluster-api-provider-e2e-abstract 39 | description: | 40 | Run e2e tests of k8s-cluster-api-provider project using 41 | [sonobuoy](https://sonobuoy.io/) with mode quick and 42 | SCS compliance checks. 43 | timeout: 3600 # 1h 44 | vars: 45 | sonobouy: 46 | enabled: true 47 | mode: quick 48 | scs_compliance: 49 | enabled: true 50 | 51 | - job: 52 | name: k8s-cluster-api-provider-scs-compliance-1.27 53 | parent: k8s-cluster-api-provider-e2e-abstract 54 | description: | 55 | Run SCS compliance KaaS tests. 56 | vars: 57 | extra_env: 58 | TF_VAR_kubernetes_version: 1.27.x 59 | scs_compliance: 60 | enabled: true 61 | 62 | - project: 63 | name: SovereignCloudStack/k8s-cluster-api-provider 64 | default-branch: main 65 | merge-mode: "squash-merge" 66 | e2e-test: 67 | jobs: 68 | - k8s-cluster-api-provider-e2e-conformance 69 | unlabel-on-update-e2e-test: 70 | jobs: 71 | - noop 72 | e2e-quick-test: 73 | jobs: 74 | - k8s-cluster-api-provider-e2e-quick 75 | unlabel-on-update-e2e-quick-test: 76 | jobs: 77 | - noop 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k8s-cluster-api-provider repository 2 | 3 | > This repository as SCS reference implementation KaaS v1 is deprecated from release R6. 4 | > It is recommended to evaluate the new reference implementation KaaS v2 - [Cluster Stacks](https://github.com/SovereignCloudStack/cluster-stacks). 5 | > Existing R6 clusters can also be migrated, see [Migration to KaaS v2](doc/usage/migrate-to-kaas-v2.md). 6 | 7 | | Version | CNCF Conformance Check | 8 | |---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------| 9 | | Latest | ![latest](https://zuul.scs.community/api/tenant/SCS/badge?project=SovereignCloudStack/k8s-cluster-api-provider&pipeline=periodic-daily&branch=main) | 10 | 11 | This repository is the reference implementation of the Kubernetes Cluster 12 | Management as a Service in the [Sovereign Cloud Stack](https://scs.community/) 13 | project. 14 | 15 | The documentation was here previously, it has all been moved into the `doc/` 16 | subdirectory and is most conveniently accessible via the docusaurus rendering 17 | on . 18 | 19 | Please also consider reading the project specific release notes here. 20 | -------------------------------------------------------------------------------- /doc/LoadBalancer-ExtTrafficLocal.md: -------------------------------------------------------------------------------- 1 | # Ingress with `externalTrafficPolicy: local` 2 | 3 | Setting up the nginx ingress controller from the upstream deployment templates 4 | using the `externalTrafficPolicy: local` setting and -- without any special 5 | treatment -- results in a service that is only partially working: Only requests 6 | that the LoadBalancer happens to route at the node where the nginx container is 7 | running get a response. 8 | 9 | nginx could just use the `cluster` setting instead and kube-proxy would forward 10 | the network packets. There are two reasons for nginx not to do that 11 | 12 | 1. Having a load-balancer balance the traffic to a node that is not active just 13 | to have kube-proxy forward it to the active node does not make much sense. 14 | It creates an unnecessary hop and makes the LoadBalancer pretty useless. 15 | 16 | 2. Packets forwarded by kube-proxy do not carry the original client IP, so any 17 | source IP dependant handling in nginx (filtering, QoS, ...) will not be 18 | possible. 19 | 20 | # Getting it to work for managed ingress 21 | 22 | There does not seem to be a standard mechanism where k8s tells the LoadBalancer (LB) 23 | which backend members are active, but the load-balancer can find this out by using 24 | a health-monitor that probes for the availability of the service and then takes 25 | the inactive nodes out of the rotation. Should the container be rescheduled on 26 | some other node, the health-monitor will adapt within a few seconds. 27 | 28 | Since SCS R2, the deployed nginx-ingress deployment is patched to carry a service 29 | annotation (a behavior specifically needed by OpenStack) that enables the health-monitor for the LB in 30 | front of the ingress. This results in traffic to flow. 31 | 32 | This covers the nginx ingress controller that is deployed by setting 33 | `DEPLOY_NGINX_INGRESS: true` with the `create_cluster.sh` or `apply_nginx_ingress.sh`. 34 | That the ingress we call the "managed ingress". 35 | 36 | For the ingress service to see the client IPs, more is needed. The Octavia LB 37 | as well as the nginx service both support the proxy protocol, which can be used to 38 | communicate the real client IP. We had plumbing included which we disabled by 39 | default prior to releasing R2, because it broke the access to ingress from 40 | software that runs inside the cluster. 41 | 42 | A workaround for this has been implemented, so the default is 43 | `NGINX_USE_PROXY: true` as of R4. So the managed nginx ingress service 44 | does work reliably and gets the client IPs. 45 | 46 | # Getting it to work in general 47 | 48 | Users that deploy their own nginx or other services with `externalTrafficPolicy: local` 49 | won't be helped by the annotations done by the SCS cluster management. They will 50 | have to do similar custom patching or revert to a `cluster` policy and forego the 51 | visibility on real client IPs. 52 | 53 | A generic solution to this would be a different kind of LB that does work at the 54 | networking layer 3 (routing), so the (TCP) connections are not terminated at the 55 | LB and then data being forwarded on a new connection to the backend member, but 56 | the routing would create a direct connection. Google (with Direct Server Return, DSR) 57 | and Azure support such LB modes. 58 | 59 | As it turns out, on OpenStack clouds that use OVN as networking (SDN) layer, the OVN 60 | loadbalancer does almost deliver what's needed. 61 | 62 | # OVN provider LoadBalancer 63 | 64 | The OVN provider for the load-balancer does create direct flows to the chosen backend 65 | member, so no proxy protocol (or similar hacks) are needed to make the backend service 66 | see the client IPs. This has been validated (and can even be monitored by openstack-health-monitor) 67 | on SCS clouds that use OVN. 68 | 69 | A health-monitor is still needed to ensure that only active members receive requests. 70 | Health monitors for the ovn provider are only supported on OpenStack Wallaby and later. 71 | See also occm [docs](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md). 72 | 73 | OVN LoadBalancer can be enabled by setting `use_ovn_lb_provider = "true"` or `use_ovn_lb_provider = "auto"`. 74 | 75 | Note that the `use_ovn_lb_provider` does not affect the LB in front of the kube API. 76 | That one is created by capo and requires other settings. Also note that it would 77 | not yet support the [CIDR filtering](https://docs.openstack.org/octavia/latest/user/feature-classification/index.html#operation_allowed_cidr) 78 | with `restrict_kubeapi` setting. 79 | 80 | # Disabled health-monitor by default 81 | 82 | We could enable a health-monitor by default for load-balancers created from OCCM 83 | in the k8s clusters. This would make services with `externalTrafficPolicy: local` 84 | work, as the traffic would be routed exclusively to active members. But the 85 | other goal would not be achieved: Getting the real client IPs. 86 | We decided against turning on the health-monitor by default, as this might result 87 | in the wrong impression that `local` fully works. Rather break and then have users take 88 | a decision to go for `cluster`, to enable health-monitoring to get it half-working 89 | or to do health-monitoring plus some extra plumbing for proxy protocol (or similar) 90 | to get all aspects working. 91 | -------------------------------------------------------------------------------- /doc/application-credentials.md: -------------------------------------------------------------------------------- 1 | # Application Credentials 2 | 3 | OpenTofu creates an [application credential](https://docs.openstack.org/keystone/wallaby/user/application_credentials.html) that it passes into the created VM. This one is then used to authenticate the cluster API provider against the OpenStack API to allow it to create resources needed for the k8s cluster. 4 | 5 | The AppCredential has a few advantages: 6 | 7 | - We take out variance in how the authentication works -- we don't have to deal with a mixture of project_id, project_name, project_domain_name, user_domain_name, only a subset of which is needed depending on the cloud. 8 | - We do not leak the user credentials into the cluster, making any security breach easier to contain. 9 | - AppCreds are connected to one project and can be revoked. 10 | 11 | We are using an unrestricted AppCred for the management server which can then create further AppCreds, so we can each cluster its own (restricted) credentials. In the case of breaches, these AppCreds can be revoked. 12 | 13 | Note that you can have additional projects or clouds listed in your `~/.config/openstack/clouds.yaml` (and `secure.yaml`) and reference them in the `OPENSTACK_CLOUD` setting of your `clusterctl.yaml`, so you can manage clusters in various projects and clouds from the same management server. 14 | -------------------------------------------------------------------------------- /doc/overview.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | Creating and scaling k8s clusters on demand is providing a lot of flexibility to DevOps teams that develop, test, deploy and operate services and applications. 4 | 5 | We expect the functionality to be mainly consumed in two scenarios: 6 | 7 | - Self-service: The DevOps team leverages the code provided from this repository to create their own capi management server and use it then to manage a number of k8s clusters for their own needs. 8 | - Managed k8s: The Operator's service team creates the capi management server and uses it to provide managed k8s clusters for their clients. 9 | 10 | Note that we have an intermediate model in mind -- a model where a one-click / one-API call interface would allow the management server to be created on behalf of a user and then serve as an API endpoint to that user's k8s CAPI needs. Ideally with some dashboard or GUI that would shield less experienced users from all the YAML. 11 | 12 | Once we as the SCS Community have the gitops style cluster control working, the self-service model will become more convenient to use. 13 | 14 | The provided solution covers the following two main topics: 15 | 16 | 1. Automation (opentofu, Makefile) to bootstrap a cluster-API management server by installing kind on a vanilla Ubuntu image and deploying some tools on this node ([kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), [openstack CLI tools](https://docs.openstack.org/newton/user-guide/common/cli-install-openstack-command-line-clients.html), [k9s](https://github.com/derailed/k9s), [cilium](https://cilium.io/), [calico](https://www.tigera.io/tigera-products/calico/), [helm](https://helm.sh/), [flux](https://fluxcd.io/) ...) and deploying [cluster-API](https://cluster-api.sigs.k8s.io/) (clusterctl) and the [OpenStack cluster-api provider](https://github.com/kubernetes-sigs/cluster-api-provider-openstack) along with suitable credentials. The opentofu automation is driven by a Makefile for convenience. The tooling also contains all the logic to clean up again. The newly deployed node clones this git repository early in the bootstrap process and uses the thus received files to set up the management cluster and scripts. 17 | 18 | 2. This node can be connected to via ssh and the deployed scripts there can be used to manage workload clusters and then deploy various standardized tools (such as e.g. [OpenStack Cloud Controller Manager](https://github.com/kubernetes/cloud-provider-openstack)(OCCM), [cinder CSI](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md), calico or cilium CNI, [nginx ingress controller](https://kubernetes.github.io/ingress-nginx/), [cert-manager](https://cert-manager.io/), ...) and run tests (e.g. CNCF conformance with [sonobuoy](https://sonobuoy.io/)). The tools and artifacts can be updated via `git pull` at any time and the updated settings rolled out to the workload clusters. Note that the script collection will eventually be superseded by the [capi-helm-charts](https://github.com/stackhpc/capi-helm-charts). The medium-term goal is to actually create a reconciliation loop here that would perform life-cycle-management for clusters according to the cluster configuration stored in an enhanced [cluster-api style](https://cluster-api.sigs.k8s.io/clusterctl/configuration.html) clusterctl.yaml from git repositories and thus allow a pure [gitops](https://www.weave.works/technologies/gitops/) style cluster management without ever ssh'ing to the management server. 19 | -------------------------------------------------------------------------------- /doc/quickstart.md: -------------------------------------------------------------------------------- 1 | # Quickstart 2 | 3 | This guide shows you how to get working Kubernetes clusters on a SCS cloud 4 | via [cluster-api](https://cluster-api.sigs.k8s.io/)(CAPI). 5 | 6 | ## Requirements 7 | 8 | - make 9 | - kubectl 10 | - opentofu 11 | - yq 12 | - python3-openstackclient, python3-octaviaclient 13 | 14 | ## Prepare the environment 15 | 16 | You need access to an OpenStack project. 17 | Copy the default environment and adjust the options according to your cloud. 18 | 19 | ```bash 20 | cp terraform/environments/environment-{default,}.tfvars 21 | ``` 22 | 23 | Edit `terraform/environments/environment-.tfvars` with your favourite text editor. Every option without a 24 | default value must be set. 25 | Add 26 | a [clouds.yaml](https://docs.openstack.org/python-openstackclient/latest/configuration/index.html#configuration-files) 27 | inside the `terraform` dir, in `~/.config/openstack` or `/etc/openstack`. 28 | 29 | It is recommended to set the name of the cloud in the `clouds.yml` to the same value as the `cloud_provider` in 30 | the `environment-.tfvars`, then you only have to specify the `ENVIRONMENT` or `OS_CLOUD` variable. 31 | 32 | ## Create a test cluster 33 | 34 | ```bash 35 | # Set the ENVIRONMENT to the name specified in the name of the file 36 | # `cloud_provider` option has to be set in the environment file 37 | # to the name of the cloud in the clouds.yaml 38 | export ENVIRONMENT= 39 | 40 | # Create your environment. This includes a management node as virtual machine 41 | # in your OpenStack environment as well as a Kubernetes testcluster. 42 | make create 43 | 44 | # Get the kubeconfig of the testcluster 45 | make get-kubeconfig 46 | 47 | # Interact with the testcluster 48 | kubectl --kubeconfig testcluster.yaml. get nodes 49 | ``` 50 | 51 | ## Teardown 52 | 53 | ```bash 54 | make clean 55 | ``` 56 | 57 | If `make clean` fails to clean up completely, you can also use the `fullclean` target. 58 | Review the [Teardown section of the Makefile reference](make-reference.md#teardown) document for more details. 59 | 60 | ## Beyond quickstart 61 | 62 | This guide assumes you just create one test cluster directly when creating the 63 | management server. 64 | In a production setting, you would not use this test cluster but create clusters 65 | via the management server. You can read more about this in the [usage guide](usage/usage.md). 66 | -------------------------------------------------------------------------------- /doc/requirements.md: -------------------------------------------------------------------------------- 1 | # Requirements 2 | 3 | Cluster API requires an existing Kubernetes cluster to operate. In our setup, we 4 | utilize [kind](https://kind.sigs.k8s.io/) a tool for running Kubernetes clusters using Docker containers, to create 5 | the initial management Kubernetes cluster in a single docker container. The OpenStack instance serves as the CAPI 6 | management server or management cluster, responsible for overseeing the 7 | management and operation of the created kubernetes clusters. 8 | 9 | The provisioning of the CAPI management server is done on a deployment host, possibly a tiny jumphost style VM, or some 10 | Linux/MacOS/WSL laptop. 11 | 12 | Requirements for the deployment host: 13 | 14 | - You need to have installed: 15 | - [OpenTofu](https://opentofu.org/docs/intro/install/) 16 | - `yq` (python3-yq or yq snap) 17 | - GNU make 18 | - openstack (python3-openstackclient) and plugin for octavia (python3-octaviaclient) Via pip or your distribution. 19 | *Needed only in case you want to clean the management server or interact with openstack directly.* 20 | - You must have credentials to access the cloud. OpenTofu will look for `clouds.yaml` and optionally `secure.yaml` in 21 | the current working directory (`terraform`), in `~/.config/openstack/` or `/etc/openstack` (in this order), just like 22 | the [openstack client](https://docs.openstack.org/python-openstackclient/latest/configuration/index.html#clouds-yaml). 23 | - The API endpoints of the OpenStack cloud should have a certificate signed by a trusted CA. (Self-signed or custom CAs 24 | need significant manual work -- this will be improved after R4.) 25 | - An Environment file for the cloud you want to use. See [Environments](#environments) below for more details. 26 | 27 | ## Environments 28 | 29 | To use a specific environment, you have to set the `ENVIRONMENT` variable (`export ENVIRONMENT=`) or pass it 30 | to the `make` command by using `make ENVIRONMENT=`. 31 | You can also do the same by utilizing the `OS_CLOUD` (openstack native) variable. 32 | The name of the environment is derived from the name of the file `environments/environment-.tfvars`. 33 | 34 | The name of the environment specified either via `ENVIRONMENT` or `OS_CLOUD` has to be equal the name of the 35 | cloud (`cloud_provider`) as specified in your `clouds.yaml`. 36 | 37 | In case you use [plusserver community environment](#plusserver-community-environment) 38 | or [wavestack environment](#wavestack-environment) you can use the default environment file for 39 | those directly or base your configuration on it. In case you need custom configuration 40 | see [Custom environment](#custom-environment). 41 | 42 | More information about the configuration options can be found in the [configuration documentation](configuration.md). 43 | 44 | ### Plusserver community environment 45 | 46 | Using it directly: 47 | `export ENVIRONMENT=gx-scs` 48 | 49 | or insert inside of Makefile: 50 | `ENVIRONMENT=gx-scs` 51 | 52 | File: `environments/environment-gx-scs.tfvars` 53 | 54 | The name of the cloud has to be `gx-scs` in the `cloud.yaml` file, otherwise you will need 55 | to change the `cloud_provider` variable inside of `terraform/environments/environment-gx-scs.tfvars` file. 56 | 57 | ### Wavestack environment 58 | 59 | Using it directly: 60 | `export ENVIRONMENT=gx-wavestack` 61 | 62 | or insert inside of Makefile: 63 | `ENVIRONMENT=gx-wavestack` 64 | 65 | File: `environments/environment-gx-wavestack.tfvars` 66 | 67 | The name of the cloud has to be `gx-wavestack` in the `cloud.yaml` file, otherwise you will need 68 | to change the `cloud_provider` variable inside of `terraform/environments/environment-gx-scs.tfvars` file. 69 | 70 | ### Custom environment 71 | 72 | You can create your own environment file from the sample file `environments/environment-default.tfvars` and provide the 73 | necessary information like machine flavor or machine image. You can comment out all lines where the defaults match your 74 | needs. 75 | -------------------------------------------------------------------------------- /doc/roadmap.md: -------------------------------------------------------------------------------- 1 | # Roadmap 2 | 3 | ## Advanced cluster templating with helm (Technical Preview) 4 | 5 | On the management server, we have not only helm installed, but also the repository [https://github.com/stackhpc/capi-helm-charts](https://github.com/stackhpc/capi-helm-charts) checked out. Amongst other things, it automates the creation of new machine templates when needed and doing rolling updates on your k8s cluster with clusterctl. This allows for an easy adaptation of your cluster to 6 | different requirements, new k8s versions etc. 7 | 8 | Please note that this is currently evolving quickly and we have not completely assessed and tested the capabilities. We intend to do this after R2 and eventually recommend this as the standard way of managing clusters in production. At this point, it's included as a technical preview. 9 | -------------------------------------------------------------------------------- /doc/usage/cluster-mgmt-capi-mgmt-node.md: -------------------------------------------------------------------------------- 1 | # Cluster Management on the capi management node 2 | 3 | You can use `make ssh` to log in to the capi management server. There you can issue`clusterctl` and `kubectl` (aliased 4 | to `k`) commands. The context `kind-kind` 5 | is used for the CAPI management while the context `testcluster-admin@testcluster` can 6 | be used to control the workload cluster `testcluster`. You can of course create many 7 | of them. There are management scripts on the management server: 8 | 9 | - In the user's (ubuntu) home directory, create a subdirectory with the CLUSTERNAME 10 | to hold your cluster's configuration data. Copy over the `clusterctl.yaml` file 11 | from `~/cluster-defaults/` and edit it to meet your needs. Note that you can also 12 | copy over `cloud.conf` and `cluster-template.yaml` and adjust them, but you don't 13 | need to. (If you don't create the subdirectory, the `create_cluster.sh` script 14 | will do so for you and use all defaults settings.) 15 | - `create_cluster.sh CLUSTERNAME`: Use this command to create a cluster with 16 | the settings from `~/$CLUSTERNAME/clusterctl.yaml`. More precisely, it uses the template 17 | `$CLUSTERNAME/cluster-template.yaml` and fills in the settings from 18 | `$CLUSTERNAME/clusterctl.yaml` to render a config file `$CLUSTERNAME/$CLUSTERNAME-config.yaml` 19 | which will then be submitted to the capi server (`kind-kind` context) for creating 20 | the control plane nodes and worker nodes. The script will also apply openstack integration, 21 | cinder CSI, calico or cilium CNI, and optionally also metrics server, nginx ingress controller, 22 | flux, cert-manager. (These can be controlled by `DEPLOY_XXX` variables, see below. 23 | Defaults can be preconfigured from the environment.tfvars file during management server 24 | creation.) 25 | Note that `CLUSTERNAME` defaults to `testcluster` and must not contain 26 | whitespace. 27 | The script also makes sure that appropriate CAPI images are available (it grabs them 28 | from [OSISM](https://swift.services.a.regiocloud.tech/swift/v1/AUTH_b182637428444b9aa302bb8d5a5a418c/openstack-k8s-capi-images) 29 | as needed and registers them with OpenStack, following the SCS image metadata 30 | standard). 31 | The script returns once the control plane is fully working (the worker 32 | nodes might still be under construction). The kubectl file to talk to this 33 | cluster (as admin) can be found in `~/$CLUSTERNAME/$CLUSTERNAME.yaml`. Expect the cluster 34 | creation to take ~8mins. (CLUSTERNAME defaults to testcluster.) You can pass 35 | `--context=${CLUSTERNAME}-admin@$CLUSTERNAME` to `kubectl` (with the 36 | default `~/.kubernetes/config` config file) or `export KUBECONFIG=$CLUSTERNAME.yaml`\ 37 | to talk to the workload cluster. 38 | - The subdirectory `~/$CLUSTERNAME/deployed-manifests.d/` will contain the 39 | deployed manifests for reference (and in case of nginx-ingress also to facilitate 40 | a full cleanup). 41 | - The `clusterctl.yaml` file can be edited the `create_cluster.sh` script 42 | be called again to submit the changes. (If you have not done any changes, 43 | re-running the script again is harmless.) Note that the `create_cluster.sh` 44 | does not currently remove any of the previously deployed services/deployments 45 | from the workload clusters -- this will be added later on with the appropriate 46 | care and warnings. Also note that not all changes are allowed. You can easily 47 | change the number of nodes or add k8s services to a cluster. For changing 48 | machine flavors, machine images, kubernetes versions ... you will need to 49 | also increase the `CONTROL_PLANE_MACHINE_GEN` or the `WORKER_MACHINE_GEN` 50 | counter to add a different suffix to these read-only resources. This will 51 | cause Cluster-API to orchestrate a rolling upgrade for you on rollout. 52 | (This is solved more elegantly in the helm chart style cluster management, see below.) 53 | - The directory `~/k8s-cluster-api-provider/` contains a checked out git tree 54 | from the SCS project. It can be updated (`git pull`) to receive the latest 55 | fixes and improvements. This way, most incremental updates do not need the 56 | recreation of the management server (and thus also not the recreation of your 57 | managed workload clusters), but can be applied with calling `create_cluster.sh` 58 | again to the workload clusters. 59 | - The installation of the openstack integration, cinder CSI, metrics server and 60 | nginx ingress controller is done via the `bin/apply_*.sh` scripts that are called 61 | from `create_cluster.sh`. You can manually call them as well -- they take 62 | the cluster name as argument. (It's better to just call `create_cluster.sh` 63 | again, though.) The applied yaml files are collected in 64 | `~/$CLUSTERNAME/deployed-manifests.d/`. You can `kubectl delete -f` them 65 | to remove the functionality again. 66 | - You can, of course, also delete the cluster and create a new one if that 67 | level of disruption is fine for you. (See below in Advanced cluster templating 68 | with helm to get an idea how we want to make this more convenient in the future.) 69 | - Use `kubectl get clusters -A` in the `kind-kind` context to see what clusters 70 | exist. Use `kubectl get all -A` in the `testcluster-admin@testcluster` context 71 | to get an overview over the state of your workload cluster. You can access the logs 72 | from the capo controller in case you have trouble with cluster creation. 73 | - `delete_cluster.sh [CLUSTERNAME]`: Tell the capi management server to remove 74 | the cluster $CLUSTERNAME. It will also remove persistent volume claims belonging 75 | to the cluster. The script will return once the removal is done. 76 | - `cleanup.sh`: Remove all running clusters. 77 | - `add_cluster-network.sh CLUSTERNAME` adds the management server to the node network 78 | of the cluster `CLUSTERNAME`, assuming that it runs on the same cloud (a region). 79 | `remove_cluster-network.sh` undoes this again. This is useful for debugging 80 | purposes. 81 | 82 | For your convenience, `k9s` is installed on the management server as well 83 | as `calicoctl`, `cilium`, `hubble`, `cmctl`, `helm` and `sonobuoy`. 84 | These binaries can all be found in `/usr/local/bin` while the helper scripts 85 | have been deployed to `~/bin/`. 86 | -------------------------------------------------------------------------------- /doc/usage/create-new-cluster.md: -------------------------------------------------------------------------------- 1 | # Create a new cluster 2 | 3 | On the management server (login with `make ssh`), create a directory (below the home of 4 | the standard ubuntu user) with the name of your cluster. Copy over `clusterctl.yaml` from 5 | `~/cluster-defaults/` and edit it according to your needs. You can also copy over other 6 | files from `~/cluster-defaults/` and adjust them, but this is only needed in exceptional 7 | cases. 8 | Now run `create_cluster.sh ` 9 | 10 | This will copy all missing defaults from `~/cluster-defaults/` into the directory with your 11 | cluster name and then ask cluster-api to create the cluster. The scripts also take 12 | care of security groups, anti-affinity, node image registration (if needed) and 13 | of deploying CCM, CNI, CSI as well as optional services such as metrics or nginx-ingress 14 | controller. 15 | 16 | You can access the new cluster with `kubectl --context clustername-admin@cluster` 17 | or `KUBECONFIG=~/clustername/clustername.yaml kubectl`. 18 | 19 | The management cluster is in context `kind-kind`. 20 | 21 | Note that you can always change `clusterctl.yaml` and re-run `create_cluster.sh`. The script is idempotent and running 22 | it multiple times with the unchanged input file will result in no changes to the cluster. 23 | -------------------------------------------------------------------------------- /doc/usage/custom-ca.md: -------------------------------------------------------------------------------- 1 | # Custom CA 2 | 3 | OpenStack provides public-facing API endpoints which protection by SSL/TLS certificates 4 | is highly recommended in production environments. 5 | These certificates are usually issued by public CA but also the custom or private CA could be used. 6 | 7 | If the communication with OpenStack API is protected by the certificate issued by custom CA 8 | the `cacert` setting needs to be provided inside clouds.yaml, e.g.: 9 | 10 | ```yaml 11 | clouds: 12 | devstack: 13 | cacert: ca-bundle.pem 14 | auth: 15 | auth_url: https://10.0.3.15/identity 16 | project_domain_id: default 17 | project_name: demo 18 | user_domain_id: default 19 | identity_api_version: 3 20 | region_name: RegionOne 21 | interface: public 22 | ``` 23 | 24 | Here the file `ca-bundle.pem` contains custom root CA and potentially intermediate CA(s). 25 | > The `ca-bundle.pem` file will be copied to the management server and used by CAPO 26 | > in the management cluster. Also, it will be copied to the workload cluster (control plane and worker nodes) 27 | > and mounted and used by OCCM and CCSI pods. 28 | > So provide only the necessary certificates in that file. 29 | 30 | Steps of what happens with the custom cacert in k8s-cluster-api-provider: 31 | 32 | 1. `cacert` setting is provided inside clouds.yaml 33 | 2. Cacert file referenced by `cacert` key (1.) is copied to the management server 34 | directory `~/cluster-defaults/${cloud_provider}-cacert` by OpenTofu 35 | 3. During the management server bootstrap process cacert is injected to 36 | the `~/cluster-defaults/cluster-template.yaml` to `KubeadmControlPlane` and `KubeadmConfigTemplate` files 37 | as file with cacert content from already defined secret `${CLUSTER_NAME}-cloud-config` and will be later 38 | templated and copied to the workload cluster (control plane and worker nodes) provisioned by CAPO, e.g.: 39 | 40 | ```yaml 41 | files: 42 | - contentFrom: 43 | secret: 44 | key: cacert 45 | name: ${CLUSTER_NAME}-cloud-config 46 | owner: root:root 47 | path: /etc/ssl/certs/devstack-cacert 48 | permissions: "0644" 49 | ``` 50 | 51 | 4. When the creation of the workload cluster (`create_cluster.sh`) starts, 52 | `~/cluster-defaults/cluster-template.yaml` is copied into workload cluster directory (`~/$CLUSTER_NAME/`) 53 | 5. Then the cacert file content is base64 encoded and saved in OPENSTACK_CLOUD_CACERT_B64 variable 54 | inside `~/$CLUSTER_NAME/clusterctl.yaml`, so it can be used during 55 | the workload cluster templating 56 | 6. Later, when the workload cluster templates are applied to the management cluster, 57 | secret `${CLUSTER_NAME}-cloud-config` with base64 encoded cacert is created and used by CAPO 58 | 7. CAPO will create workload cluster (thanks to steps 5. and 6.) and cacert is 59 | transferred to the control plane and worker nodes (thanks to steps 3. and 4.) 60 | 8. OCCM and CCSI pods mount cacert via hostPath volume 61 | and use it for e.g. creating load balancers or volumes 62 | 63 | ## Rotation 64 | 65 | When the custom CA expires or otherwise changes it needs to be rotated. 66 | CAPO uses the custom CA certificate in the management cluster for creating the infrastructure 67 | for the workload clusters and in the workload clusters by OCCM and CCSI for e.g. creating load balancers or volumes. 68 | In both cases, cacert is provided via secret `${CLUSTER_NAME}-cloud-config` and needs to be updated. 69 | 70 | There are 3 steps in this rotation process: 71 | 72 | 1. Replace/append custom CA certificate in `~/cluster-defaults/${cloud_provider}-cacert` 73 | 2. Increase generation counters `CONTROL_PLANE_MACHINE_GEN` and `WORKER_MACHINE_GEN` in `~/$CLUSTER_NAME/clusterctl.yaml` 74 | 3. Run `create_cluster.sh $CLUSTER_NAME` and wait for the rolling update of your workload cluster 75 | 76 | > In step 1, appending can be useful for avoiding downtime of your services. 77 | > Your cacert file will contain two CA certificates - old and new. 78 | > This should help with a smooth transition to a new certificate and later, the old one can be removed. 79 | > 80 | > Steps 2 and 3 need to be done per workload cluster. 81 | > 82 | > When step 2 is omitted, only cacert secret in the management cluster is updated and no rolling update of 83 | > the workload cluster in step 3 is started and existing nodes remain with the old certificate. 84 | -------------------------------------------------------------------------------- /doc/usage/gateway-api.md: -------------------------------------------------------------------------------- 1 | # Gateway-API 2 | 3 | Starting with R5, this k8s-solution offers experimental kubernetes gateway API support. You need to set `deploy_gateway_api` to `true` in your environments configuration. Also you need to use cilium as your CNI (default since R5). 4 | 5 | After deploying your cluster, you can use gateway API and deploy Gateways and HTTP-Routes. As a starting point you can deploy this example app: 6 | 7 | ```bash 8 | kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.14/samples/bookinfo/platform/kube/bookinfo.yaml 9 | ``` 10 | and this example Gateway and HTTP-Route 11 | ```bash 12 | kubectl apply -f https://raw.githubusercontent.com/cilium/cilium/1.15.1/examples/kubernetes/gateway/basic-http.yaml 13 | ``` 14 | -------------------------------------------------------------------------------- /doc/usage/managing-many-clusters.md: -------------------------------------------------------------------------------- 1 | # Managing many clusters 2 | 3 | While the scripts all use a default `testcluster`, they have 4 | been developed and tested to manage many clusters from a single management 5 | node. Copy the `~/cluster-defaults/clusterctl.yaml` file to 6 | `~/MYCLUSTER/clusterctl.yaml` 7 | and edit the copy to describe the properties of the cluster to be created. 8 | Use `./create_cluster.sh MYCLUSTER` then to create a workload cluster 9 | with the name `MYCLUSTER`. You will find the kubeconfig file in 10 | `~/MYCLUSTER/MYCLUSTER.yaml`, granting its owner admin access to that cluster. 11 | Likewise, `delete_cluster.sh` and the `apply_*.sh` scripts take a 12 | cluster name as parameter. 13 | 14 | This way, dozens of clusters can be controlled from one management server. 15 | 16 | You can add credentials from different projects into 17 | `~/.config/openstack/clouds.yaml` and reference them in the `OPENSTACK_CLOUD` 18 | setting in `clusterctl.yaml`, this way managing clusters in many different 19 | projects and even clouds from one management server. 20 | -------------------------------------------------------------------------------- /doc/usage/migrate-to-cluster-class.md: -------------------------------------------------------------------------------- 1 | # Migration to ClusterClass 2 | 3 | From [#600](https://github.com/SovereignCloudStack/k8s-cluster-api-provider/pull/600), this repository uses CAPI 4 | [ClusterClass](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/) feature for the creation of 5 | new clusters, additionally see k8s [blog](https://kubernetes.io/blog/2021/10/08/capi-clusterclass-and-managed-topologies/). 6 | This feature is also used e.g. in the SCS [cluster-stacks](https://github.com/SovereignCloudStack/cluster-stacks) repository. 7 | 8 | > Note: For now, ClusterClass is created per Cluster, which is not optimal and most likely it can be improved. 9 | 10 | Therefore, existing clusters must be migrated from 'old' cluster templates to 'new' cluster class based templates. 11 | Based on ClusterClass [proposal](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210526-cluster-class-and-managed-topologies.md#upgrade-strategy) 12 | of update strategy, we shouldn't be able to migrate, but in CAPI PR [#6292](https://github.com/kubernetes-sigs/cluster-api/pull/6292) 13 | the validation webhook was relaxed (via special Cluster annotation 14 | **unsafe.topology.cluster.x-k8s.io/disable-update-class-name-check**) and manual migration is now possible. 15 | 16 | The script `migrate-to-cluster-class.sh` uses that special annotation for migration. In the beginning, it patches 17 | CAPI and KCP deployments with the **ClusterTopology=true** container argument. Then creates new resources 18 | (*KubeadmControlPlaneTemplate*, *OpenStackClusterTemplate*, *ClusterClass*). After that, annotates, labels and 19 | patches existing cluster resources. In the end, it patches the **Cluster** object with **.spec.topology** and it's done. 20 | 21 | ## Migration 22 | 23 | ### Prerequisites 24 | - CAPI >= v1.5.2 due to [NamingStrategy](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/write-clusterclass#clusterclass-with-custom-naming-strategies) feature 25 | - upgrade can be performed as stated in upgrade [guide](https://github.com/SovereignCloudStack/k8s-cluster-api-provider/blob/main/doc/Upgrade-Guide.md#updating-cluster-api-and-openstack-cluster-api-provider) 26 | 27 | ### Steps 28 | 1. Git pull/checkout new changes into the `~/k8s-cluster-api-provider` directory 29 | 2. Run `migrate-to-cluster-class.sh ` (verify machines were not recreated) 30 | 3. Copy new templates for existing and new clusters (consider backup) 31 | ```bash 32 | cp ~/k8s-cluster-api-provider/terraform/files/template/cluster-template.yaml ~//cluster-template.yaml 33 | cp ~/k8s-cluster-api-provider/terraform/files/template/cluster-template.yaml ~/cluster-defaults/cluster-template.yaml 34 | ``` 35 | 4. Add the newly introduced generation counter for the OpenStackClusterTemplate to the settings in `clusterctl.yaml` 36 | ```bash 37 | echo "OPENSTACK_CLUSTER_GEN: geno01" >> ~//clusterctl.yaml 38 | echo "OPENSTACK_CLUSTER_GEN: geno01" >> ~/cluster-defaults/clusterctl.yaml 39 | ``` 40 | 5. Next run of `create_cluster.sh ` should be idempotent 41 | -------------------------------------------------------------------------------- /doc/usage/multi-az-and-multi-cloud-environments.md: -------------------------------------------------------------------------------- 1 | # Multi-AZ and multi-cloud environments 2 | 3 | The provided `cluster-template.yaml` assumes that all control nodes on one hand and all worker nodes on the other are 4 | equal. They are in the same cloud within the same availability zone, using the same flavor. cluster API allows k8s 5 | clusters to have varying flavors, span availability zones and even clouds. For this, you can create an advanced 6 | cluster-template with more different machine descriptions and potentially several secrets. Depending on your changes, 7 | the logic in `create_cluster.sh` might also need enhancements to handle this. Extending this is not hard and we're happy 8 | to hear from your use cases and take patches. 9 | 10 | However, we are currently investigating to use helm templating for anything beyond the simple use cases instead, see 11 | next chapter. 12 | -------------------------------------------------------------------------------- /doc/usage/testing.md: -------------------------------------------------------------------------------- 1 | # Testing 2 | 3 | To test the created k8s cluster, there are several tools available. 4 | Apply all commands to the testcluster context (by passing the appropriate 5 | `--context` setting to `kubectl` or by using the right `KUBECONFIG` 6 | file). 7 | 8 | - Looking at all pods (`kubectl get pods -A`) to see that they all come 9 | up (and don't suffer excessive restarts) is a good first check. 10 | Look at the pod logs to investigate any failures. 11 | 12 | - You can create a very simple deployment with the provided `kuard.yaml`, which is 13 | an example taken from the O'Reilly book from B. Burns, J. Beda, K. Hightower: 14 | "Kubernetes Up & Running" enhanced to also use a persistent volume. 15 | 16 | - You can deploy [Google's demo microservice application](https://github.com/GoogleCloudPlatform/microservices-demo). 17 | 18 | - `sonobuoy` runs a subset of the k8s tests, providing a simple way to 19 | filter the >5000 existing test cases to only run the CNCF conformance 20 | tests or to restrict testing to non-disruptive tests. The `sonobuoy.sh` wrapper 21 | helps with calling it. There are also `Makefile` targets `check-*` that 22 | call various [sonobuoy](https://sonobuoy.io) test sets. 23 | This is how we call sonobuoy for our CI tests. 24 | 25 | - You can use `cilium connectivity test` to check whether your cilium 26 | CNI is working properly. You might need to enable hubble to get 27 | a fully successful result. 28 | -------------------------------------------------------------------------------- /doc/usage/usage.md: -------------------------------------------------------------------------------- 1 | # Usage 2 | 3 | The subsequent management of the cluster can best be done from the management server VM, as it has all the tools 4 | deployed there and config files can be edited and resubmitted to the kubernetes kind cluster for reconciliation. To log 5 | in to this management server via ssh, you can issue `make ssh`. 6 | 7 | You can create and do life cycle management for many more clusters from this management server. 8 | 9 | The kubeconfig with admin power for the created testcluster is named `testcluster/testcluster.yaml` ( 10 | or `$CLUSTER_NAME/$CLUSTER_NAME.yaml` for all the other clusters) and can be handed out to users that should get full 11 | administrative control over the cluster. You can also retrieve them 12 | using `make get-kubeconfig TESTCLUSTER=${CLUSTER_NAME}` from the machines where you created the management server from, 13 | and possibly create an encrypted .zip file for handing these out. (You can omit ``TESTCLUSTER=...`` for the 14 | default testcluster.) 15 | -------------------------------------------------------------------------------- /playbooks/cleanup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup 3 | hosts: all 4 | vars: 5 | cloud_provider: "{{ cloud }}" # inherited from the parent job 6 | project_dir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}" 7 | project_tf_dir: "{{ project_dir }}/terraform" 8 | # Some Kubernetes resources managed by the k8s-cluster-api-provider project employ names 9 | # formed by combining elements such as a prefix, cluster-name, and additional suffix. 10 | # This combined naming convention may potentially exceed the character limits imposed by 11 | # Kubernetes. To mitigate this issue, we take precautions by truncating both the prefix 12 | # and the cluster name to 12 characters each. This adjustment ensures that we stay 13 | # within Kubernetes' limits and allows us to utilize a single OpenStack project with 14 | # multiple k8s-capi deployments. 15 | pr_or_main: "{{ 'pr' + zuul.change if zuul.change is defined else 'main' }}" 16 | prefix: "{{ (pr_or_main + '-' + zuul.build) | truncate(12, True, '') }}" 17 | testcluster_name: "{{ (pr_or_main + '-' + zuul.build) | truncate(12, True, '') }}" 18 | environment: 19 | ENVIRONMENT: "{{ cloud_provider }}" 20 | PATH: "{{ ansible_user_dir }}/.local/bin:{{ ansible_env.PATH }}" 21 | tasks: 22 | - name: Extract Zuul config 23 | ansible.builtin.set_fact: 24 | zuul_config: "{{ zuul.change_message | regex_search('(?s)```ZUUL_CONFIG(.+?)```', '\\1', multiline=true) }}" 25 | when: zuul.change_message is defined 26 | - name: Trim Zuul config 27 | ansible.builtin.set_fact: 28 | zuul_config: "{{ zuul_config | first | split('/n') | map('trim') | join('\n') }}" 29 | when: zuul_config is defined and zuul_config is not none and zuul_config != '' 30 | - name: Ensure environment file 31 | ansible.builtin.template: 32 | src: "templates/environment.tfvars.j2" 33 | dest: "{{ project_tf_dir }}/environments/environment-{{ cloud_provider }}.tfvars" 34 | mode: "0644" 35 | - name: Cleanup - forceclean 36 | ansible.builtin.command: "make forceclean" 37 | args: 38 | chdir: "{{ project_tf_dir }}" 39 | changed_when: true 40 | -------------------------------------------------------------------------------- /playbooks/dependencies.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure k8s-cluster-api-provider project dependencies 3 | hosts: all 4 | vars: 5 | jq_version: "1.7.1" 6 | yq_version: "4.40.7" 7 | kubectl_version: "1.28.7" 8 | openstackclient_version: "6.5.0" 9 | octaviaclient_version: "3.6.0" 10 | opentofu_version: "1.6.1" 11 | install_dir: "{{ ansible_user_dir }}/.local/bin" 12 | environment: 13 | PATH: "{{ install_dir }}:{{ ansible_env.PATH }}" 14 | roles: 15 | - role: ensure-pip # https://zuul-ci.org/docs/zuul-jobs/latest/python-roles.html#role-ensure-pip 16 | tasks: 17 | - name: Make sure installation directory exists 18 | ansible.builtin.file: 19 | path: "{{ install_dir }}" 20 | state: directory 21 | mode: 0755 22 | - name: Install jq 23 | ansible.builtin.get_url: 24 | url: "https://github.com/jqlang/jq/releases/download/jq-{{ jq_version }}/jq-linux64" 25 | dest: "{{ install_dir }}/jq" 26 | mode: "+x" 27 | # TODO: use `checksum` attr here to verify the digest of the destination file, if available 28 | - name: Install yq 29 | ansible.builtin.get_url: 30 | url: "https://github.com/mikefarah/yq/releases/download/v{{ yq_version }}/yq_linux_amd64" 31 | dest: "{{ install_dir }}/yq" 32 | mode: "+x" 33 | # TODO: use `checksum` attr here to verify the digest of the destination file, if available 34 | - name: Install kubectl 35 | ansible.builtin.get_url: 36 | url: "https://dl.k8s.io/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl" 37 | dest: "{{ install_dir }}/kubectl" 38 | mode: "+x" 39 | # TODO: use `checksum` attr here to verify the digest of the destination file, if available 40 | - name: Install openstack client dependencies 41 | ansible.builtin.pip: 42 | name: 43 | - "python-openstackclient=={{ openstackclient_version }}" 44 | - "python-octaviaclient=={{ octaviaclient_version }}" 45 | extra_args: --user 46 | - name: Install ip command 47 | ansible.builtin.package: 48 | name: iproute2 49 | become: true 50 | - name: Install OpenTofu 51 | # TODO: write separate role ensure-opentofu, see https://opendev.org/zuul/zuul-jobs/src/branch/master/roles/ensure-terraform 52 | block: 53 | - name: Install unzip 54 | ansible.builtin.package: 55 | name: unzip 56 | become: true 57 | - name: Create temp directory 58 | ansible.builtin.tempfile: 59 | state: directory 60 | register: tofu_install_tempdir 61 | - name: Download OpenTofu archive 62 | ansible.builtin.get_url: 63 | url: "https://github.com/opentofu/opentofu/releases/download/v{{ opentofu_version }}/tofu_{{ opentofu_version }}_linux_amd64.zip" 64 | dest: "{{ tofu_install_tempdir.path }}/opentofu.zip" 65 | mode: 0644 66 | # TODO: checksum 67 | - name: Create OpenTofu package directory 68 | ansible.builtin.file: 69 | path: "{{ tofu_install_tempdir.path }}/opentofu" 70 | state: directory 71 | mode: 0755 72 | - name: Unarchive OpenTofu 73 | ansible.builtin.unarchive: 74 | src: "{{ tofu_install_tempdir.path }}/opentofu.zip" 75 | dest: "{{ tofu_install_tempdir.path }}/opentofu" 76 | remote_src: true 77 | - name: Install OpenTofu 78 | ansible.builtin.copy: 79 | src: "{{ tofu_install_tempdir.path }}/opentofu/tofu" 80 | dest: "{{ install_dir }}/tofu" 81 | mode: "+x" 82 | remote_src: true 83 | - name: Remove temp directory 84 | ansible.builtin.file: 85 | path: "{{ tofu_install_tempdir }}" 86 | state: absent 87 | - name: Output OpenTofu version 88 | ansible.builtin.command: "tofu version" 89 | register: tofu_version 90 | changed_when: tofu_version.rc != 0 91 | -------------------------------------------------------------------------------- /playbooks/tasks/label_nodes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Label k8s nodes based on OpenStack host IDs 3 | vars: 4 | # Note (@mfeder): The following label key serves as a temporary label until upstream 5 | # proposes and implements an alternative label key/solution for indicating a physical machine 6 | # within the Kubernetes cluster. 7 | # refer to: https://github.com/SovereignCloudStack/issues/issues/540 8 | label_key: "topology.scs.community/host-id" 9 | openstackclient_version: "6.5.0" 10 | jq_version: "1.7.1" 11 | kubectl_version: "1.28.7" 12 | install_dir: "{{ ansible_user_dir }}/.local/bin" 13 | block: 14 | - name: Check if `os_cloud` variable is defined 15 | ansible.builtin.fail: 16 | msg: "os_cloud is not defined or empty" 17 | when: os_cloud is not defined or os_cloud == '' 18 | - name: Check if `kubeconfig_path` variable is defined 19 | ansible.builtin.fail: 20 | msg: "kubeconfig_path is not defined or empty" 21 | when: kubeconfig_path is not defined or kubeconfig_path == '' 22 | - name: Install jq 23 | ansible.builtin.get_url: 24 | url: "https://github.com/jqlang/jq/releases/download/jq-{{ jq_version }}/jq-linux64" 25 | dest: "{{ install_dir }}/jq" 26 | mode: "+x" 27 | # TODO: use `checksum` attr here to verify the digest of the destination file, if available 28 | - name: Install kubectl 29 | ansible.builtin.get_url: 30 | url: "https://dl.k8s.io/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl" 31 | dest: "{{ install_dir }}/kubectl" 32 | mode: "+x" 33 | # TODO: use `checksum` attr here to verify the digest of the destination file, if available 34 | - name: Install openstack cli 35 | ansible.builtin.pip: 36 | name: 37 | - "python-openstackclient=={{ openstackclient_version }}" 38 | extra_args: --user 39 | - name: Get list of OpenStack server details 40 | ansible.builtin.shell: 41 | cmd: | 42 | set -o pipefail 43 | openstack server list -f json | jq -r '.[].ID' | while read id; do openstack server show $id -f json; done | jq -s '.' 44 | executable: /bin/bash 45 | register: openstack_server_list 46 | changed_when: false 47 | environment: 48 | OS_CLOUD: "{{ os_cloud }}" 49 | - name: Populate openstack_hosts dict with hostname=host_id pairs 50 | ansible.builtin.set_fact: 51 | openstack_hosts: "{{ openstack_hosts | default({}) | combine({item.name: item.hostId}) }}" 52 | with_items: "{{ openstack_server_list.stdout | from_json }}" 53 | - name: Get a list of nodes 54 | ansible.builtin.command: kubectl get nodes -o json 55 | register: kubernetes_node_list 56 | changed_when: false 57 | environment: 58 | KUBECONFIG: "{{ kubeconfig_path }}" 59 | - name: Add node label 60 | ansible.builtin.command: "kubectl label nodes {{ item.metadata.name }} {{ label_key }}={{ openstack_hosts[item.metadata.name] }}" 61 | with_items: "{{ (kubernetes_node_list.stdout | from_json)['items'] }}" 62 | changed_when: false 63 | environment: 64 | KUBECONFIG: "{{ kubeconfig_path }}" 65 | -------------------------------------------------------------------------------- /playbooks/tasks/scs_compliance.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download, install, configure, and execute SCS KaaS compliance check 3 | vars: 4 | check_dir: "{{ ansible_user_dir }}/scs-compliance" 5 | python_venv_dir: "{{ ansible_user_dir }}/scs-compliance/venv" 6 | block: 7 | - name: Check if `kubeconfig_path` variable is defined 8 | ansible.builtin.fail: 9 | msg: "kubeconfig_path is not defined or empty" 10 | when: kubeconfig_path is not defined or kubeconfig_path == '' 11 | - name: Ensure check directory 12 | ansible.builtin.file: 13 | path: "{{ check_dir }}" 14 | state: directory 15 | mode: 0755 16 | - name: Get SCS KaaS compliance check assets 17 | ansible.builtin.git: 18 | repo: https://github.com/SovereignCloudStack/standards.git 19 | dest: "{{ check_dir }}" 20 | single_branch: true 21 | version: main 22 | - name: Install virtualenv 23 | ansible.builtin.package: 24 | name: virtualenv 25 | become: true 26 | - name: Install check requirements 27 | ansible.builtin.pip: 28 | requirements: "{{ check_dir }}/Tests/requirements.txt" 29 | virtualenv: "{{ python_venv_dir }}" 30 | - name: Execute SCS KaaS compliance check 31 | ansible.builtin.shell: 32 | cmd: 33 | ". {{ python_venv_dir }}/bin/activate && 34 | python3 {{ check_dir }}/Tests/scs-compliance-check.py {{ check_dir }}/Tests/scs-compatible-kaas.yaml -v -s KaaS_V1 -V v2 -a kubeconfig={{ kubeconfig_path }}" 35 | changed_when: false 36 | register: scs_compliance_results 37 | always: 38 | - name: Parse SCS KaaS compliance results # noqa: ignore-errors 39 | ansible.builtin.set_fact: 40 | scs_compliance_results_parsed: "{{ scs_compliance_results.stdout }}" 41 | when: scs_compliance_results is defined 42 | ignore_errors: true 43 | - name: Insert SCS compliance results to the warning message that will be appended to the comment zuul leaves on the PR # noqa: ignore-errors 44 | zuul_return: 45 | data: 46 | zuul: 47 | warnings: 48 | - "
\n SCS Compliance results\n{{ scs_compliance_results_parsed }}\n
" 49 | when: scs_compliance_results_parsed is defined and scs_compliance_results_parsed | length > 0 50 | ignore_errors: true 51 | -------------------------------------------------------------------------------- /playbooks/tasks/sonobouy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Execute sonobouy check mode {{ sonobouy.mode }} 3 | block: 4 | - name: Execute sonobouy 5 | ansible.builtin.command: "make check-{{ sonobouy.mode }}" 6 | register: sonobouy_results 7 | args: 8 | chdir: "{{ project_tf_dir }}" 9 | changed_when: true 10 | always: 11 | - name: Parse sonobouy results # noqa: ignore-errors 12 | ansible.builtin.set_fact: 13 | sonobouy_results_parsed: "{{ sonobouy_results.stdout | regex_search('=== Collecting results ===[\\S\\s]*') }}" 14 | when: sonobouy_results is defined 15 | ignore_errors: true 16 | - name: Insert sonobouy results to the warning message that will be appended to the comment zuul leaves on the PR # noqa: ignore-errors 17 | zuul_return: 18 | data: 19 | zuul: 20 | warnings: 21 | - "
\n Sonobouy results\n{{ sonobouy_results_parsed }}\n
" 22 | when: sonobouy_results_parsed is defined and sonobouy_results_parsed | length > 0 23 | ignore_errors: true 24 | -------------------------------------------------------------------------------- /playbooks/templates/environment.tfvars.j2: -------------------------------------------------------------------------------- 1 | cloud_provider = "{{ cloud_provider }}" 2 | prefix = "{{ prefix }}" 3 | testcluster_name = "{{ testcluster_name }}" 4 | 5 | availability_zone = "nova" 6 | external = "ext01" 7 | dns_nameservers = ["62.138.222.111", "62.138.222.222"] 8 | kind_flavor = "SCS-2V:4" 9 | controller_flavor = "SCS-2V-4-20s" 10 | worker_flavor = "SCS-2V:4:20" 11 | 12 | controller_metadata = { 13 | ps_restart_after_maint = "true" 14 | } 15 | 16 | # FIXME: Remove when CI runs on gx-scs2 environment(3+ physical machines for local ssd flavors) 17 | soft_anti_affinity_controller = true 18 | 19 | controller_count = 3 20 | worker_count = 3 21 | 22 | {{ zuul_config|default("") }} 23 | -------------------------------------------------------------------------------- /terraform/clouds.yaml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | clouds: 3 | default: 4 | interface: "public" 5 | identity_api_version: 3 6 | image_api_version: 2 7 | auth: 8 | auth_url: 9 | project_name: 10 | user_domain_name: 11 | # not required if you dont use make purge, can usually be set to the same value as user_domain_name 12 | project_domain_name: 13 | region_name: 14 | # custom CA 15 | cacert: 16 | -------------------------------------------------------------------------------- /terraform/environments/environment-default.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for OpenStack 2 | # 3 | prefix = "" # defaults to "capi" 4 | cloud_provider = "" 5 | availability_zone = "" 6 | external = "" # defaults to "" using auto-detection 7 | external_id = "" # defaults to "" using auto-detection 8 | dns_nameservers = [ "DNS_IP1", "DNS_IP2" ] # defaults to [ "5.1.66.255", "185.150.99.255" ] (FF MUC) 9 | kind_flavor = "" # defaults to SCS-2V-4 (larger does not hurt) 10 | ssh_username = "" # defaults to "ubuntu" 11 | clusterapi_version = "<1.x.y>" # defaults to `1.6.3` 12 | capi_openstack_version = "<0.x.y>" # defaults to `0.9.0` 13 | image = "" # defaults to "Ubuntu 22.04" 14 | cilium_binaries = "" # defaults to "v0.15.23;v0.13.0" 15 | mgmt_cidr = "" # defaults to "10.0.0.0/24" 16 | mgmt_ip_range = {"start": "", "end": ""} # defaults to '{"start": "10.0.0.11", "end": "10.0.0.254"}' 17 | # Settings for testcluster 18 | kubernetes_version = "" # defaults to "v1.28.x" 19 | kube_image_raw = "" # defaults to "true" 20 | calico_version = "" # defaults to `v3.27.3` 21 | controller_flavor = "" # defaults to SCS-2V-4-20s (use etcd tweaks if you only have SCS-2V-4-20 in multi-controller setups) 22 | worker_flavor = "" # defaults to SCS-2V-4-20s (larger helps) 23 | controller_count = # defaults to 1 (0 skips testcluster creation) 24 | worker_count = # defaults to 3 25 | kind_mtu = # defaults to 0 (autodetection) 26 | node_cidr = "" # defaults to "10.8.0.0/20" 27 | service_cidr = "" # defaults to "10.96.0.0/12" 28 | pod_cidr = "" # defaults to "192.168.0.0/16" 29 | anti_affinity = "" # defaults to "true" 30 | soft_anti_affinity_controller = "" # defaults to "false" 31 | use_cilium = "version/true/false" # defaults to "true", can also be set to "vx.y.z", also see cilium_binaries 32 | use_ovn_lb_provider = "auto/true/false" # use OVN LB if available (auto) or force (true) or never (false) 33 | deploy_nginx_ingress = "version/true/false" # defaults to "true", you can also set vX.Y.Z if you want 34 | deploy_gateway_api = "true/false" # defaults to "false" 35 | deploy_cert_manager = "version/true/false" # defaults to "false", you can also set to vX.Y.Z if you want 36 | deploy_flux = "version/true/false" # defaults to "false", you can also set to vX.Y.Z if you want 37 | deploy_metrics = "" # defaults to "true" 38 | deploy_occm = "" # defaults to "true" (meaning matching k8s) 39 | deploy_cindercsi = "" # defaults to "true", dito 40 | etcd_unsafe_fs = "" # defaults to "false", dangerous 41 | testcluster_name = "NAME" # defaults to "testcluster" 42 | restrict_kubeapi = [ "IP/20", "IP/22" ] # defaults to empty (fully open), use [ "none" ] for exclusive internal access 43 | controller_metadata = { metadata_key = "metadata_value" } # defaults to empty dict (no additional metadata) 44 | worker_metadata = { metadata_key = "metadata_value" } # defaults to empty dict (no additional metadata) 45 | containerd_registry_files = {"hosts":[""], "certs":[""]} # defaults to '{"hosts":["./files/containerd/docker.io"], "certs":[]}' 46 | deploy_harbor = "" # defaults to "false", "true" deploys Harbor and forces deployment of flux and potentially other services (`cert_manager`, `nginx_ingress` and `cindercsi`), see `doc/usage/harbor.md` 47 | harbor_config = {"domain_name":"", "issuer_email":"", "persistence":"", "database_size":"size", "redis_size":"size", "trivy_size":"size"} # for defaults see ../variables.tf 48 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-bc.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for gx-scs 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-bc" 5 | availability_zone = "south-2" 6 | external = "external" 7 | kind_flavor = "2C-4GB" 8 | controller_flavor = "4C-4GB-40GB" 9 | worker_flavor = "4C-4GB-40GB" 10 | image = "Ubuntu 20.04" 11 | ssh_username = "ubuntu" 12 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-bc2.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Betacloud 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-betacloud" 5 | availability_zone = "south-2" 6 | external = "external" 7 | kind_flavor = "2C-2GB" 8 | controller_flavor = "2C-2GB-20GB" 9 | worker_flavor = "2C-2GB-20GB" 10 | image = "Ubuntu 20.04" 11 | ssh_username = "ubuntu" 12 | 13 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-betacloud.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Betacloud 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-betacloud" 5 | availability_zone = "south-2" 6 | external = "external" 7 | kind_flavor = "4C-8GB-40GB" 8 | controller_flavor = "4C-8GB-40GB" 9 | worker_flavor = "4C-8GB-40GB" 10 | image = "Ubuntu 20.04" 11 | ssh_username = "ubuntu" 12 | 13 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-betacloud2.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Betacloud 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-betacloud2" 5 | availability_zone = "south-2" 6 | external = "external" 7 | kind_flavor = "2C-4GB" 8 | controller_flavor = "4C-8GB-40GB" 9 | worker_flavor = "4C-8GB-40GB" 10 | image = "Ubuntu 20.04" 11 | ssh_username = "ubuntu" 12 | 13 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-betacloud3.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Betacloud 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-betacloud3" 5 | availability_zone = "south-2" 6 | external = "external" 7 | kind_flavor = "2C-4GB" 8 | controller_flavor = "4C-8GB-40GB" 9 | worker_flavor = "4C-8GB-40GB" 10 | image = "Ubuntu 20.04" 11 | ssh_username = "ubuntu" 12 | 13 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-betacloud4.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Betacloud 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-betacloud4" 5 | availability_zone = "south-2" 6 | external = "external" 7 | kind_flavor = "2C-4GB" 8 | controller_flavor = "4C-8GB-40GB" 9 | worker_flavor = "4C-8GB-40GB" 10 | image = "Ubuntu 20.04" 11 | ssh_username = "ubuntu" 12 | 13 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-betacloud5.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Betacloud 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-betacloud5" 5 | availability_zone = "south-2" 6 | external = "external" 7 | kind_flavor = "2C-4GB" 8 | controller_flavor = "4C-8GB-40GB" 9 | worker_flavor = "4C-8GB-40GB" 10 | image = "Ubuntu 20.04" 11 | ssh_username = "ubuntu" 12 | 13 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-betacloud6.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Betacloud 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-betacloud6" 5 | availability_zone = "south-2" 6 | external = "external" 7 | kind_flavor = "2C-4GB" 8 | controller_flavor = "4C-8GB-40GB" 9 | worker_flavor = "4C-8GB-40GB" 10 | image = "Ubuntu 20.04" 11 | ssh_username = "ubuntu" 12 | 13 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-betacloud7.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Betacloud 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-betacloud7" 5 | availability_zone = "south-2" 6 | external = "external" 7 | kind_flavor = "2C-4GB" 8 | controller_flavor = "4C-8GB-40GB" 9 | worker_flavor = "4C-8GB-40GB" 10 | image = "Ubuntu 20.04" 11 | ssh_username = "ubuntu" 12 | 13 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-citycloud.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Citycloud 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-citycloud" 5 | availability_zone = "nova" 6 | external = "ext-net" 7 | kind_flavor = "2C-4GB" 8 | controller_flavor = "2C-4GB-50GB_LOCAL_SSD" 9 | worker_flavor = "2C-4GB-50GB" 10 | image = "Ubuntu 20.04 Focal Fossa 20200423" 11 | ssh_username = "ubuntu" 12 | 13 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-scs-staging.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for gx-scs 2 | # 3 | prefix = "capi" 4 | cloud_provider = "gx-scs-staging" 5 | availability_zone = "nova" 6 | external = "ext01" 7 | kind_flavor = "SCS-2V:4" 8 | controller_flavor = "SCS-4V-16-100s" 9 | worker_flavor = "SCS-8V:16:100" 10 | #image = "Ubuntu 22.04" 11 | #ssh_username = "ubuntu" 12 | controller_metadata = { 13 | ps_restart_after_maint = "true" 14 | } 15 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-scs.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for gx-scs 2 | # 3 | cloud_provider = "gx-scs" 4 | availability_zone = "nova" 5 | external = "ext01" 6 | kind_flavor = "SCS-2V:4" 7 | controller_flavor = "SCS-2V-4-20s" 8 | worker_flavor = "SCS-2V:4:20" 9 | #image = "Ubuntu 22.04" 10 | #ssh_username = "ubuntu" 11 | #kube_image_raw = "true" 12 | dns_nameservers = ["62.138.222.111", "62.138.222.222"] 13 | #controller_count = 0 14 | controller_metadata = { 15 | ps_restart_after_maint = "true" 16 | } 17 | -------------------------------------------------------------------------------- /terraform/environments/environment-gx-wavestack.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for Wavestack 2 | # 3 | prefix = "capi" 4 | cloud_provider = "wave-scs" 5 | availability_zone = "muc5-a" 6 | #external = "external" 7 | kind_flavor = "SCS-2V-4" 8 | controller_flavor = "SCS-2V-4-20s" 9 | worker_flavor = "SCS-4V-8-20" 10 | #image = "Ubuntu 22.04" 11 | #ssh_username = "ubuntu" 12 | -------------------------------------------------------------------------------- /terraform/environments/environment-regio.tfvars: -------------------------------------------------------------------------------- 1 | # a working set for regiocloud 2 | # 3 | cloud_provider = "regio" 4 | availability_zone = "nova" 5 | external = "public" 6 | kind_flavor = "SCS-2V-4-20s" 7 | # Settings for testcluster 8 | worker_flavor = "SCS-2V-8-20" # defaults to SCS-2V-4-20 (larger helps) 9 | anti_affinity = "true" # defaults to "true" 10 | -------------------------------------------------------------------------------- /terraform/extension/01_example.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo "This is an example script, it is executed automatically at the end" 3 | exit 0 4 | -------------------------------------------------------------------------------- /terraform/files/bin/add_cluster-network.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Attach a port from the workload cluster network to the mgmtcluster node 3 | # so we can talk to the nodes (ssh login, ...) -- for debugging only 4 | # (c) Kurt Garloff , 1/2022 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | . ~/.capi-settings 8 | . ~/bin/cccfg.inc 9 | 10 | OLDNICLIST=($(ls /sys/class/net | sort)) 11 | findnewnic() 12 | { 13 | NEWNIC="" 14 | NEWNICLIST=($(ls /sys/class/net | sort)) 15 | for i in $(seq 0 ${#NEWNICLIST[*]}); do 16 | if test "${NEWNICLIST[$i]}" != "${OLDNICLIST[$i]}"; then 17 | NEWNIC="${NEWNICLIST[$i]}" 18 | return 0 19 | fi 20 | done 21 | return 1 22 | } 23 | 24 | MGMT="$PREFIX-mgmtcluster" 25 | # FIXME: We already know the name ($PREFIX-mgmtcluster) 26 | #MGMT=$(openstack server list --name "$PREFIX-mgmtcluster" -f value -c Name) 27 | NETWORK=$(openstack network list -f value -c Name | grep "k8s-clusterapi-cluster-\(default-${CLUSTER_NAME}\|${CLUSTER_NAME}-${CLUSTER_NAME}\)") 28 | openstack server add network $MGMT $NETWORK || exit 29 | WAIT=0 30 | while test $WAIT -lt 30; do 31 | findnewnic 32 | if test $? = 0; then break; fi 33 | sleep 1 34 | let WAIT+=1 35 | done 36 | #sudo dhclient $NEWNIC 37 | #sudo ip route del default via 10.8.0.1 dev $NEWNIC 38 | #sudo ip route del default dev $NEWNIC 39 | MAC=$(ip link show $NEWNIC | grep 'link/ether' | sed 's/^ *link\/ether \([0-9a-f:]*\) .*$/\1/') 40 | IP=$(openstack port list --mac=$MAC -f value -c 'Fixed IP Addresses' | sed "s/^.*'ip_address': '\([0-9\.]*\)'.*\$/\1/") 41 | NETMASK=$(grep NODE_CIDR "$CCCFG" | head -n 1 | sed 's/^.*NODE_CIDR: //') 42 | NETMASK=${NETMASK#*/} 43 | sudo ip link set dev $NEWNIC up 44 | sudo ip add add $IP/$NETMASK dev $NEWNIC 45 | echo "Added NIC $NEWNIC (MAC $MAC) with addr $IP/$NETMASK" 46 | -------------------------------------------------------------------------------- /terraform/files/bin/apply_cert_manager.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # imports 4 | . ~/bin/utils.inc 5 | . ~/bin/cccfg.inc 6 | . ~/$CLUSTER_NAME/harbor-settings 7 | 8 | # Switch to capi workload cluster 9 | if [ -z ${KCONTEXT} ]; then 10 | setup_kubectl_context_workspace 11 | set_workload_cluster_kubectl_namespace 12 | fi 13 | 14 | echo "Deploy cert-manager to $CLUSTER_NAME" 15 | # cert-manager 16 | DEPLOY_CERT_MANAGER=$(yq eval '.DEPLOY_CERT_MANAGER' $CCCFG) 17 | if test "$DEPLOY_CERT_MANAGER" = "false" -a "$DEPLOY_HARBOR" = "true" -a -n "$HARBOR_DOMAIN_NAME"; then 18 | DEPLOY_CERT_MANAGER="true" 19 | fi 20 | if test "$DEPLOY_CERT_MANAGER" = "true"; then 21 | CERTMGR_VERSION="v1.14.2" 22 | elif test "$DEPLOY_CERT_MANAGER" = "false"; then 23 | echo "cert-manager disabled" 1>&2; exit 1 24 | else 25 | CERTMGR_VERSION="$DEPLOY_CERT_MANAGER" 26 | fi 27 | 28 | if test ! -s ~/kubernetes-manifests.d/cert-manager-${CERTMGR_VERSION}.yaml; then 29 | # FIXME: Check sig 30 | curl -L https://github.com/cert-manager/cert-manager/releases/download/${CERTMGR_VERSION}/cert-manager.yaml > ~/kubernetes-manifests.d/cert-manager-${CERTMGR_VERSION}.yaml || exit 2 31 | fi 32 | 33 | cp -p ~/kubernetes-manifests.d/cert-manager-${CERTMGR_VERSION}.yaml ~/${CLUSTER_NAME}/deployed-manifests.d/cert-manager.yaml 34 | kubectl --context=$KCONTEXT apply -f ~/${CLUSTER_NAME}/deployed-manifests.d/cert-manager.yaml || exit 9 35 | 36 | # TODO: Optionally test, using cert-manager-test.yaml 37 | # See https://cert-manager.io/docs/installation/kubernetes/ 38 | # kubectl plugin 39 | #if ! test -x /usr/local/bin/kubectl-cert_manager; then 40 | # # FIXME: Check sig 41 | # curl -L -o kubectl-cert-manager.tar.gz https://github.com/cert-manager/cert-manager/releases/download/v${CERTMGR_VERSION}/kubectl-cert_manager-linux-amd64.tar.gz 42 | # tar xzf kubectl-cert-manager.tar.gz && rm kubectl-cert-manager.tar.gz 43 | # sudo mv kubectl-cert_manager /usr/local/bin 44 | #fi 45 | # cmctl -- don't treat trouble as fatal error 46 | if ! test -x /usr/local/bin/cmctl-$CERTMGR_VERSION; then 47 | cd ~ 48 | OS=linux; ARCH=$(uname -m | sed 's/x86_64/amd64/') 49 | # FIXME: Check sig 50 | curl -L -o cmctl.tar.gz https://github.com/cert-manager/cert-manager/releases/download/${CERTMGR_VERSION}/cmctl-$OS-$ARCH.tar.gz 51 | tar xzf cmctl.tar.gz && rm cmctl.tar.gz 52 | sudo mv cmctl /usr/local/bin/cmctl-${CERTMGR_VERSION} 53 | sudo ln -sf cmctl-${CERTMGR_VERSION} /usr/local/bin/cmctl 54 | mv LICENS* ~/doc/LICENSE.cert-manager-${CERTMGR_VERSION} 55 | fi 56 | -------------------------------------------------------------------------------- /terraform/files/bin/apply_cindercsi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # deploy_cindercsi.sh 3 | 4 | # imports 5 | . ~/bin/utils.inc 6 | . ~/bin/cccfg.inc 7 | . ~/bin/openstack-kube-versions.inc 8 | . ~/$CLUSTER_NAME/harbor-settings 9 | 10 | # Switch to capi workload cluster 11 | if [ -z ${KCONTEXT} ]; then 12 | setup_kubectl_context_workspace 13 | set_workload_cluster_kubectl_namespace 14 | fi 15 | 16 | # apply cinder-csi 17 | KUBERNETES_VERSION=$(yq eval '.KUBERNETES_VERSION' $CCCFG) 18 | DEPLOY_CINDERCSI=$(yq eval '.DEPLOY_CINDERCSI' $CCCFG) 19 | if test "$DEPLOY_CINDERCSI" = "null"; then DEPLOY_CINDERCSI=true; fi 20 | cd ~/kubernetes-manifests.d/ 21 | if test "$DEPLOY_CINDERCSI" = "false"; then 22 | if test "$DEPLOY_HARBOR" = "true" -a "$HARBOR_PERSISTENCE" = "true"; then 23 | echo "INFO: Installation of Cinder CSI forced by Harbor deployment" 24 | DEPLOY_CINDERCSI=true 25 | else 26 | exit 1 27 | fi 28 | fi 29 | if test "$DEPLOY_CINDERCSI" = "true"; then 30 | find_openstack_versions $KUBERNETES_VERSION 31 | else 32 | find_openstack_versions $DEPLOY_CINDERCSI 33 | CCSI_VERSION=$DEPLOY_CINDERCSI 34 | fi 35 | echo "# Install Cinder CSI persistent storage support $CCSI_VERSION to $CLUSTER_NAME" 36 | 37 | SNAP_VERSION="master" 38 | # deploy snapshot CRDs 39 | for name in snapshot.storage.k8s.io_volumesnapshotcontents.yaml snapshot.storage.k8s.io_volumesnapshotclasses.yaml snapshot.storage.k8s.io_volumesnapshots.yaml; do 40 | NAME=${name%.yaml}-$SNAP_VERSION.yaml 41 | if ! test -s $NAME; then 42 | curl -L https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/$SNAP_VERSION/client/config/crd/$name -o $NAME 43 | echo -e "\n---" >> $NAME 44 | fi 45 | done 46 | # FIXME: Should we ignore non-working snapshots? 47 | cat snapshot.storage.k8s.io_volumesnapshot* > cindercsi-snapshot-$SNAP_VERSION.yaml 48 | 49 | # deploy snapshot controller 50 | for name in rbac-snapshot-controller.yaml setup-snapshot-controller.yaml; do 51 | NAME=${name%.yaml}-$SNAP_VERSION.yaml 52 | if ! test -s $NAME; then 53 | curl -L https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/$SNAP_VERSION/deploy/kubernetes/snapshot-controller/$name -o $NAME 54 | echo -e "\n---" >> $NAME 55 | fi 56 | cat $NAME >> cindercsi-snapshot-$SNAP_VERSION.yaml 57 | done 58 | 59 | cp -p cindercsi-snapshot-$SNAP_VERSION.yaml ~/${CLUSTER_NAME}/deployed-manifests.d/cindercsi-snapshot.yaml 60 | 61 | if test -n "$CCSI_VERSION"; then 62 | # Now get cinder 63 | for name in cinder-csi-controllerplugin-rbac.yaml cinder-csi-controllerplugin.yaml cinder-csi-nodeplugin-rbac.yaml cinder-csi-nodeplugin.yaml csi-cinder-driver.yaml csi-secret-cinderplugin.yaml; do 64 | NAME=${name%.yaml}-$CCSI_VERSION.yaml 65 | if ! test -s $NAME; then 66 | #curl -L https://github.com/kubernetes/cloud-provider-openstack/raw/master/manifests/cinder-csi-plugin/$name -o $NAME 67 | curl -L https://raw.githubusercontent.com/kubernetes/cloud-provider-openstack/$CCSI_VERSION/manifests/cinder-csi-plugin/$name -o $NAME 68 | echo -e "\n---" >> $NAME 69 | fi 70 | done 71 | # Note: We leave out the secret which we should already have 72 | cat cinder-csi-*-rbac-$CCSI_VERSION.yaml cinder-csi-*plugin-$CCSI_VERSION.yaml csi-cinder-driver-$CCSI_VERSION.yaml cinder-provider.yaml > cindercsi-$CCSI_VERSION.yaml 73 | # correct ccsi image version - workaround for the https://github.com/kubernetes/cloud-provider-openstack/issues/2094 74 | sed -i "s|\(docker.io/k8scloudprovider/cinder-csi-plugin:\).*|\1$CCSI_VERSION|g" cindercsi-$CCSI_VERSION.yaml 75 | CCSI=cindercsi-$CCSI_VERSION.yaml 76 | else 77 | CCSI=cinder.yaml 78 | fi 79 | kubectl --context=$KCONTEXT apply -f ~/$CLUSTER_NAME/deployed-manifests.d/cindercsi-snapshot.yaml || exit 8 80 | CACERT=$(print-cloud.py | yq eval '.clouds."'"$OS_CLOUD"'".cacert // "null"' -) 81 | if test "$CACERT" != "null"; then 82 | CAMOUNT="/etc/ssl/certs" # see prepare_openstack.sh, CACERT is already injected in the k8s nodes 83 | CAVOLUME="cacert" 84 | declare -a plugins=("csi-cinder-controllerplugin" "csi-cinder-nodeplugin") 85 | for plugin in "${plugins[@]}"; do 86 | # test if volume exists - also need to provide default value(// empty array) in expression in case of missing volumes(array) 87 | volume=$(yq 'select(.metadata.name == "'"$plugin"'").spec.template.spec | (.volumes // (.volumes = []))[] | select(.name == "'"$CAVOLUME"'")' $CCSI) 88 | # if volume does not exist, inject CACERT volume 89 | if test -z "$volume"; then 90 | yq 'select(.metadata.name == "'"$plugin"'").spec.template.spec.volumes += [{"name": "'"$CAVOLUME"'", "hostPath": {"path": "'"$CAMOUNT"'"}}]' -i $CCSI 91 | yq '(select(.metadata.name == "'"$plugin"'").spec.template.spec.containers[] | select(.name == "cinder-csi-plugin").volumeMounts) += [{"name": "'"$CAVOLUME"'", "mountPath": "'"$CAMOUNT"'", "readOnly": true}]' -i $CCSI 92 | fi 93 | done 94 | fi 95 | sed "/ *\- name: CLUSTER_NAME/{n 96 | s/value: .*\$/value: ${CLUSTER_NAME}/ 97 | }" $CCSI > ~/$CLUSTER_NAME/deployed-manifests.d/cindercsi.yaml 98 | kubectl --context=$KCONTEXT apply -f ~/${CLUSTER_NAME}/deployed-manifests.d/cindercsi.yaml || exit 8 99 | 100 | -------------------------------------------------------------------------------- /terraform/files/bin/apply_kubeapi_cidrs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # apply_kubeapi_cidrs.sh 3 | # Patch $2 (${CLUSTER_NAME}-config.yaml) to enforce API access restrictions (if enabled) 4 | # Reference: https://cluster-api-openstack.sigs.k8s.io/clusteropenstack/configuration.html#restrict-access-to-the-api-server 5 | # (c) Kurt Garloff, 03/2023 6 | # SPDX-License-Identifier: Apache-2.0 7 | . /etc/profile.d/proxy.sh 8 | # Test if passed list is empty 9 | empty_list() 10 | { 11 | if test -z "$1" -o "$1" = "null"; then return 0; fi 12 | if test "$1" = "[]" -o "$1" = "[ ]" -o "$1" = "[ ]"; then return 0; fi 13 | return 1 14 | } 15 | 16 | get_own_fip() 17 | { 18 | NETS=$(openstack server list --name "${PREFIX}-mgmtcluster" -f value -c Networks) 19 | #FIP=${NETS##*, } 20 | FIP=$(echo "$NETS" | sed "s/^.*, [']\{0,1\}\(\([0-9]*\.\)\{3\}[0-9]*\).*\$/\1/g") 21 | } 22 | 23 | # Add access restrictions 24 | # Input is a list in brackets. 25 | # Ignore none, always add own FIP 26 | kustomize_cluster_cidrs() 27 | { 28 | # Namespace is the same as cluster name 29 | OPENSTACK_CLUSTER_NAMESPACE=$(kubectl get ns $CLUSTER_NAME -o jsonpath='{.metadata.name}' 2>/dev/null) 30 | KPATCH=~/${CLUSTER_NAME}/restrict-kubeapi-cidr.yaml 31 | cat >$KPATCH <> $KPATCH 53 | done 54 | cp -p "$2" "$2.orig" 55 | #cat $KPATCH 56 | kustpatch.sh $KPATCH <"$2.orig" >"$2" 57 | RC=$? 58 | if test $RC != 0; then cp -p "$2.orig" "$2"; fi 59 | return $RC 60 | } 61 | 62 | if test -z "$2"; then echo "ERROR: Need clusterctl.yaml cluster-template args" 1>&2; exit 1; fi 63 | RESTRICT_KUBEAPI=$(yq eval .RESTRICT_KUBEAPI $1) 64 | if empty_list "$RESTRICT_KUBEAPI"; then exit 0; fi 65 | get_own_fip 66 | OPENSTACK_CLUSTER_GEN=$(yq eval '.OPENSTACK_CLUSTER_GEN' $1) 67 | kustomize_cluster_cidrs "$RESTRICT_KUBEAPI" "$2" 68 | -------------------------------------------------------------------------------- /terraform/files/bin/apply_metrics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export KUBECONFIG=~/.kube/config 3 | 4 | # imports 5 | . ~/bin/utils.inc 6 | . ~/bin/cccfg.inc 7 | 8 | # Switch to capi workload cluster 9 | if [ -z ${KCONTEXT} ]; then 10 | setup_kubectl_context_workspace 11 | set_workload_cluster_kubectl_namespace 12 | fi 13 | 14 | METRICS_VERSION=v0.7.0 15 | 16 | echo "Deploy metrics server to $CLUSTER_NAME" 17 | # Metrics server 18 | if test ! -s ~/kubernetes-manifests.d/metrics-server-${METRICS_VERSION}.yaml; then 19 | curl -L https://github.com/kubernetes-sigs/metrics-server/releases/download/$METRICS_VERSION/components.yaml | sed '/ - --kubelet-use-node-status-port/a\ - --kubelet-insecure-tls' > ~/kubernetes-manifests.d/metrics-server-${METRICS_VERSION}.yaml 20 | fi 21 | cp -p ~/kubernetes-manifests.d/metrics-server-${METRICS_VERSION}.yaml ~/${CLUSTER_NAME}/deployed-manifests.d/metrics-server.yaml 22 | kubectl --context=$KCONTEXT apply -f ~/${CLUSTER_NAME}/deployed-manifests.d/metrics-server.yaml || exit 9 23 | -------------------------------------------------------------------------------- /terraform/files/bin/apply_nginx_ingress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # imports 4 | . ~/bin/utils.inc 5 | . ~/bin/cccfg.inc 6 | . ~/$CLUSTER_NAME/harbor-settings 7 | 8 | # Switch to capi workload cluster 9 | if [ -z ${KCONTEXT} ]; then 10 | setup_kubectl_context_workspace 11 | set_workload_cluster_kubectl_namespace 12 | fi 13 | 14 | # Are we enabled? Has a version been set explicitly? 15 | DEPLOY_NGINX_INGRESS=$(yq eval '.DEPLOY_NGINX_INGRESS' $CCCFG) 16 | if test "$DEPLOY_NGINX_INGRESS" = "false" -a "$DEPLOY_HARBOR" = "true" -a -n "$HARBOR_DOMAIN_NAME"; then 17 | DEPLOY_NGINX_INGRESS="true" 18 | fi 19 | if test "$DEPLOY_NGINX_INGRESS" = "true"; then 20 | NGINX_VERSION="v1.9.6" 21 | elif test "$DEPLOY_NGINX_INGRESS" = "false"; then 22 | echo "nginx ingress disabled" 1>&2; exit 1 23 | else 24 | NGINX_VERSION="$DEPLOY_NGINX_INGRESS" 25 | fi 26 | NGINX_INGRESS_PROXY=$(yq eval '.NGINX_INGRESS_PROXY' $CCCFG) 27 | NODE_CIDR=$(yq eval '.NODE_CIDR' $CCCFG) 28 | 29 | cd ~/kubernetes-manifests.d/nginx-ingress 30 | echo "Deploy NGINX ingress $NGINX_VERSION controller to $CLUSTER_NAME" 31 | if test ! -s base/nginx-ingress-controller-${NGINX_VERSION}.yaml; then 32 | curl -L https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-${NGINX_VERSION}/deploy/static/provider/cloud/deploy.yaml > base/nginx-ingress-controller-${NGINX_VERSION}.yaml || exit 2 33 | fi 34 | # Default to original (may be overwritten by kustomize) 35 | cp -p base/nginx-ingress-controller-${NGINX_VERSION}.yaml ~/$CLUSTER_NAME/deployed-manifests.d/nginx-ingress.yaml 36 | ln -sf nginx-ingress-controller-${NGINX_VERSION}.yaml base/nginx-ingress-controller.yaml 37 | if test "$NGINX_INGRESS_PROXY" = "false"; then 38 | if ! grep '^create\-monitor=true' ~/$CLUSTER_NAME/cloud.conf >/dev/null 2>&1; then 39 | kustomize build nginx-monitor > ~/$CLUSTER_NAME/deployed-manifests.d/nginx-ingress.yaml || exit 3 40 | fi 41 | else 42 | if ! grep '^lb\-provider=ovn' ~/$CLUSTER_NAME/cloud.conf >/dev/null 2>&1; then 43 | kustomize build nginx-proxy > ~/$CLUSTER_NAME/deployed-manifests.d/nginx-ingress.yaml || exit 3 44 | fi 45 | fi 46 | sed -i "s@set-real-ip-from: .*\$@set-real-ip-from: \"${NODE_CIDR}\"@" ~/$CLUSTER_NAME/deployed-manifests.d/nginx-ingress.yaml 47 | sed -i "s@proxy-real-ip-cidr: .*\$@proxy-real-ip-cidr: \"${NODE_CIDR}\"@" ~/$CLUSTER_NAME/deployed-manifests.d/nginx-ingress.yaml 48 | kubectl --context=$KCONTEXT apply -f ~/$CLUSTER_NAME/deployed-manifests.d/nginx-ingress.yaml 49 | -------------------------------------------------------------------------------- /terraform/files/bin/apply_openstack_integration.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # imports 4 | . ~/bin/utils.inc 5 | . ~/bin/cccfg.inc 6 | . ~/bin/openstack-kube-versions.inc 7 | 8 | # Switch to capi workload cluster 9 | if [ -z ${KCONTEXT} ]; then 10 | setup_kubectl_context_workspace 11 | set_workload_cluster_kubectl_namespace 12 | fi 13 | 14 | kubectl --context=$KCONTEXT create secret generic cloud-config --from-file="$HOME/$CLUSTER_NAME/"cloud.conf -n kube-system #|| exit 6 15 | 16 | cd ~/kubernetes-manifests.d 17 | # install external cloud-provider openstack 18 | KUBERNETES_VERSION=$(yq eval '.KUBERNETES_VERSION' $CCCFG) 19 | DEPLOY_OCCM=$(yq eval '.DEPLOY_OCCM' $CCCFG) 20 | if test "$DEPLOY_OCCM" = "null"; then DEPLOY_OCCM=true; fi 21 | if test "$DEPLOY_OCCM" = "false"; then echo "ERROR: k8s will be uninitialized without occm" 1>&2; exit 1; fi 22 | if test "$DEPLOY_OCCM" = "true"; then 23 | find_openstack_versions $KUBERNETES_VERSION 24 | else 25 | find_openstack_versions $DEPLOY_OCCM 26 | if test "$OCCM_VERSION" = "$CCMR_VERSION"; then 27 | OCCM_VERSION=$DEPLOY_OCCM 28 | CCMR_VERSION=$DEPLOY_OCCM 29 | else 30 | OCCM_VERSION=$DEPLOY_OCCM 31 | fi 32 | fi 33 | echo "# Install external OpenStack cloud provider $OCCM_VERSION to $CLUSTER_NAME" 34 | 35 | if test -n "$OCCM_VERSION"; then 36 | for name in openstack-cloud-controller-manager-ds.yaml openstack-cloud-controller-manager-pod.yaml; do 37 | NAME=${name%.yaml}-$OCCM_VERSION.yaml 38 | if test ! -s $NAME; then 39 | curl -L https://github.com/kubernetes/cloud-provider-openstack/raw/$OCCM_VERSION/manifests/controller-manager/$name -o $NAME 40 | echo -e "\n---" >> $NAME 41 | # correct occm image version - workaround for the https://github.com/kubernetes/cloud-provider-openstack/issues/2094 42 | sed -i "s|\(docker.io/k8scloudprovider/openstack-cloud-controller-manager:\).*|\1$OCCM_VERSION|g" $NAME 43 | fi 44 | done 45 | OCCM=openstack-cloud-controller-manager-ds-$OCCM_VERSION.yaml 46 | else 47 | OCCM=openstack.yaml 48 | fi 49 | 50 | if test -n "$CCMR_VERSION"; then 51 | for name in cloud-controller-manager-role-bindings.yaml cloud-controller-manager-roles.yaml; do 52 | NAME=${name%.yaml}-$CCMR_VERSION.yaml 53 | if test ! -s $NAME; then 54 | curl -L https://github.com/kubernetes/cloud-provider-openstack/raw/$CCMR_VERSION/manifests/controller-manager/$name -o $NAME 55 | echo -e "\n---" >> $NAME 56 | fi 57 | done 58 | cat cloud-controller-manager*-$CCMR_VERSION.yaml > cloud-controller-manager-rbac-$CCMR_VERSION.yaml 59 | CCMR=cloud-controller-manager-rbac-$CCMR_VERSION.yaml 60 | else 61 | CCMR=cloud-controller-manager-rbac.yaml 62 | fi 63 | if grep '\-\-cluster\-name=' $OCCM >/dev/null 2>&1; then 64 | sed "/ *\- name: CLUSTER_NAME/{n 65 | s/value: kubernetes/value: ${CLUSTER_NAME}/ 66 | }" $OCCM > ~/${CLUSTER_NAME}/deployed-manifests.d/openstack-cloud-controller-manager.yaml 67 | else 68 | sed -e "/^ \- \/bin\/openstack\-cloud\-controller\-manager/a\ - --cluster-name=${CLUSTER_NAME}" \ 69 | -e "/^ \- \/bin\/openstack\-cloud\-controller\-manager/a\ - --cluster-name=${CLUSTER_NAME}" $OCCM > ~/${CLUSTER_NAME}/deployed-manifests.d/openstack-cloud-controller-manager.yaml 70 | fi 71 | cp -p $CCMR ~/${CLUSTER_NAME}/deployed-manifests.d/cloud-controller-manager-rbac.yaml 72 | kubectl --context=$KCONTEXT apply -f ~/${CLUSTER_NAME}/deployed-manifests.d/cloud-controller-manager-rbac.yaml || exit 7 73 | kubectl --context=$KCONTEXT apply -f ~/${CLUSTER_NAME}/deployed-manifests.d/openstack-cloud-controller-manager.yaml || exit 7 74 | -------------------------------------------------------------------------------- /terraform/files/bin/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ## desc: bootstrap a cluster-api environment for openstack 4 | ## license: Apache-2.0 5 | 6 | # Source proxy settings: 7 | . /etc/profile.d/proxy.sh 8 | 9 | # Find helper scripts 10 | export PATH=$PATH:~/bin 11 | 12 | # Need yaml parsing capabilities 13 | # flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo 14 | if type snap >/dev/null 2>&1; then 15 | sudo snap install yq 16 | else 17 | ARCH=`uname -m` 18 | if test "$ARCH" = "x86_64"; then ARCH=amd64; fi 19 | # FIXME: CHECK SIGNATURE 20 | curl -LO https://github.com/mikefarah/yq/releases/download/v4.40.7/yq_linux_$ARCH 21 | chmod +x yq_linux_$ARCH 22 | sudo mv yq_linux_$ARCH /usr/local/bin/yq 23 | fi 24 | 25 | # Source global settings 26 | test -r ~/.capi-settings && source ~/.capi-settings 27 | 28 | # Prepare OpenStack 29 | prepare_openstack.sh 30 | # Start image registration early, so it can proceed in the background 31 | upload_capi_image.sh 32 | 33 | ## install tools and utils at local account 34 | 35 | # install kubectl 36 | sudo apt-get install --no-install-recommends --no-install-suggests -y binutils jq 37 | if type snap >/dev/null 2>&1; then 38 | sudo snap install kubectl --classic 39 | sudo snap install kustomize 40 | else 41 | sudo apt-get install --no-install-recommends --no-install-suggests -y apt-transport-https ca-certificates curl gnupg2 42 | # FIXME: CHECK SIGNATURE 43 | KUBECTLVER=v1.28 44 | curl -fsSL https://pkgs.k8s.io/core:/stable:/$KUBECTLVER/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg 45 | #sudo mkdir -m 755 /etc/apt/keyrings 46 | echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/$KUBECTLVER/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list 47 | sudo apt-get update 48 | sudo apt-get install -y kubectl 49 | # FIXME: CHECK SIGNATURE 50 | KUSTVER=v5.3.0 51 | curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/$KUSTVER/kustomize_${KUSTVER}_linux_amd64.tar.gz | tar xvz 52 | #chmod +x kustomize 53 | sudo mv kustomize /usr/local/bin/ 54 | fi 55 | 56 | install_kube_ps1.sh 57 | 58 | # setup aliases and environment 59 | echo "# setup environment" 60 | cat <> ~/.bash_aliases 61 | export PATH=\$PATH:~/bin 62 | # kubernetes-cli 63 | alias k=kubectl 64 | source <( kubectl completion bash | sed 's# kubectl\$# k kubectl\$#' ) 65 | source <( kubectl completion bash ) 66 | 67 | # clusterctl 68 | source <( clusterctl completion bash ) 69 | 70 | # kube_ps1 71 | source ~/.kube-ps1/kube-ps1.sh 72 | 73 | # Error code in prompt 74 | PS1="\${PS1%\\\\\$ } \\\$(kube_ps1) [\\\$?]\\\$ " 75 | # We may do git commits and nano feels unusual ... 76 | export VISUAL=/usr/bin/vim 77 | # eof 78 | EOF 79 | 80 | # openstack completion 81 | openstack complete > ~/.bash_openstack 2>/dev/null 82 | echo -e "#openstack completion\nsource ~/.bash_openstack" >> ~/.bash_aliases 83 | 84 | # set inputrc set tab once 85 | cat < .inputrc 86 | # set tab once 87 | set show-all-if-ambiguous on 88 | # alternate mappings for "page up" and "page down" to search the history 89 | "\e[5~": history-search-backward 90 | "\e[6~": history-search-forward 91 | EOF 92 | 93 | install_kind.sh 94 | install_helm.sh 95 | deploy_cluster_api.sh 96 | install_k9s.sh 97 | get_capi_helm.sh 98 | install_kubectx.sh 99 | 100 | # install Flux CLI always - regardless of deploy_flux variable(it can be used only for version change) 101 | DEPLOY_FLUX=`yq eval '.DEPLOY_FLUX' ~/cluster-defaults/clusterctl.yaml` 102 | if test "$DEPLOY_FLUX" = "true" -o "$DEPLOY_FLUX" = "false"; then 103 | FLUX_VERSION="2.2.3" 104 | else 105 | FLUX_VERSION="${DEPLOY_FLUX:1}" 106 | fi 107 | install_flux.sh $FLUX_VERSION 108 | 109 | #git clone https://github.com/Pharb/kubernetes-iperf3.git 110 | 111 | CONTROLLERS=`yq eval '.CONTROL_PLANE_MACHINE_COUNT' ~/cluster-defaults/clusterctl.yaml` 112 | export TESTCLUSTER=${1:-$TESTCLUSTER} 113 | if test "$CONTROLLERS" != "0"; then 114 | create_cluster.sh $TESTCLUSTER 115 | fi 116 | # Extensions 117 | cd extension 118 | for script in $(find ./ -name '*.sh' | sort) 119 | do 120 | echo executing $script 121 | bash $script 122 | done 123 | # eof 124 | -------------------------------------------------------------------------------- /terraform/files/bin/cccfg.inc: -------------------------------------------------------------------------------- 1 | # File to be included 2 | . ~/.capi-settings 3 | . /etc/profile.d/proxy.sh 4 | 5 | # All three lines needed to support both orders for these parameters 6 | if test "$1" == "--allow-preview-versions"; then export ALLOW_PREVIEW_VERSIONS=1; shift; fi 7 | if test "$1" == "--allow-preview-features"; then export ALLOW_PREVIEW_FEATURES=1; shift; fi 8 | if test "$1" == "--allow-preview-versions"; then export ALLOW_PREVIEW_VERSIONS=1; shift; fi 9 | if test -n "$1"; then CLUSTER_NAME="${1%/}"; else CLUSTER_NAME="$TESTCLUSTER"; fi 10 | export PREFIX CLUSTER_NAME 11 | KUBECONFIG_WORKLOADCLUSTER="${HOME}/${CLUSTER_NAME}/${CLUSTER_NAME}.yaml" 12 | if test -e "$HOME/${CLUSTER_NAME}/clusterctl.yaml"; then 13 | CCCFG="$HOME/${CLUSTER_NAME}/clusterctl.yaml" 14 | else 15 | CCCFG=$HOME/cluster-defaults/clusterctl.yaml 16 | fi 17 | 18 | export OS_CLOUD=$(yq eval '.OPENSTACK_CLOUD' $CCCFG) 19 | -------------------------------------------------------------------------------- /terraform/files/bin/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # cleanup.sh 3 | 4 | # Source .bash_aliases in case we are called from non-interactive bash (Makefile) 5 | source ~/.bash_aliases 6 | 7 | export KUBECONFIG=~/.kube/config 8 | kubectl config use-context kind-kind 9 | CLUSTERS=$(kubectl get cluster --all-namespaces -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}') 10 | echo "Deleting all clusters: $CLUSTERS" 11 | echo "Hit ^C to interrupt" 12 | sleep 3 13 | #for file in *-config.yaml; do cluster="${file%-config.yaml}" 14 | for cluster in $CLUSTERS; do 15 | ~/bin/delete_cluster.sh "$cluster" 16 | done 17 | kubectl get clusters --all-namespaces 18 | -------------------------------------------------------------------------------- /terraform/files/bin/clusterctl_template.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Fill in OPENSTACK_CLOUD_YAML_B64, OPENSTACK_CLOUD_PROVIDER_CONF_B64, 3 | # OPENSTACK_CLOUD_CACERT_B64 into clusterctl.yaml 4 | . /etc/profile.d/proxy.sh 5 | # yq installation done by bootstrap.sh 6 | #sudo snap install yq 7 | if test -z "$1"; then CLUSTER_NAME="cluster-defaults"; else CLUSTER_NAME="$1"; fi 8 | 9 | # Encode clouds.yaml 10 | # Using application credentials, we don't need project_id, and openstackclient is 11 | # even confused (asking for scoped tokens which fails). However, the cluster-api-provider-openstack 12 | # does not consider the AuthInfo to be valid of there is no projectID. It knows how to derive it 13 | # from the name, but not how to derive it from an application credential. (Not sure gophercloud 14 | # even has the needed helpers.) 15 | if test -z "$PROJECTID"; then 16 | PROJECTID=$(grep 'tenant.id=' ~/$CLUSTER_NAME/cloud.conf | sed 's/^[^=]*=//') 17 | else 18 | sed -i "s/^tenant.id=.*\$/tenant-id=$PROJECTID/" ~/$CLUSTER_NAME/cloud.conf 19 | fi 20 | #CLOUD_YAML_ENC=$( (cat ~/.config/openstack/clouds.yaml; echo " project_id: $PROJECTID") | base64 -w 0) 21 | OLD_OS_CLOUD=$(yq eval '.OPENSTACK_CLOUD' ~/$CLUSTER_NAME/clusterctl.yaml) 22 | if test -z "$OS_CLOUD"; then 23 | OS_CLOUD=$OLD_OS_CLOUD 24 | fi 25 | CLOUD_YAML_ENC=$(print-cloud.py -s | sed 's/#project_id:/project_id:/' | base64 -w 0) 26 | #echo $CLOUD_YAML_ENC 27 | 28 | # Encode cloud.conf 29 | CLOUD_CONF_ENC=$(base64 -w 0 ~/$CLUSTER_NAME/cloud.conf) 30 | #echo $CLOUD_CONF_ENC 31 | 32 | #Get CA and Encode CA 33 | # Update OPENSTACK_CLOUD 34 | if test "$OS_CLOUD" != "$OLD_OS_CLOUD"; then 35 | echo "#Info: Changing OPENSTACK_CLOUD from $OLD_OS_CLOUD to $OS_CLOUD" 36 | yq eval '.OPENSTACK_CLOUD = "'"$OS_CLOUD"'"' -i ~/$CLUSTER_NAME/clusterctl.yaml 37 | sed -i "/^OPENSTACK_CLOUD:/a\ 38 | OLD_OPENSTACK_CLOUD: $OLD_OS_CLOUD" ~/$CLUSTER_NAME/clusterctl.yaml 39 | fi 40 | 41 | CACERT=$(print-cloud.py | yq eval '.clouds."'"$OS_CLOUD"'".cacert // "null"' -) 42 | if test "$CACERT" != "null"; then 43 | CLOUD_CA_ENC=$(base64 -w 0 "$CACERT") 44 | else 45 | # Snaps are broken - can not access ~/.config/openstack/clouds.yaml 46 | AUTH_URL=$(print-cloud.py | yq eval .clouds.${OS_CLOUD}.auth.auth_url -) 47 | #AUTH_URL=$(grep -A12 "${cloud_provider}" ~/.config/openstack/clouds.yaml | grep auth_url | head -n1 | sed -e 's/^ *auth_url: //' -e 's/"//g') 48 | AUTH_URL_SHORT=$(echo "$AUTH_URL" | sed s'/https:\/\///' | sed s'/\/.*$//') 49 | # Check whether AUTH_URL_SHORT includes port, otherwise append ":443" 50 | if ! [[ "$AUTH_URL_SHORT" =~ .*":".* ]]; then AUTH_URL_SHORT="$AUTH_URL_SHORT:443"; fi 51 | CERT_CERT=$(openssl s_client -connect "$AUTH_URL_SHORT" &1 | head -n 1 | sed s'/.*CN\ =\ //' | sed s'/\ /_/g' | sed s'/$/.pem/') 52 | CLOUD_CA_ENC=$(base64 -w 0 /etc/ssl/certs/"$CERT_CERT") 53 | fi 54 | 55 | yq eval '.OPENSTACK_CLOUD_YAML_B64 = "'"$CLOUD_YAML_ENC"'"' -i ~/$CLUSTER_NAME/clusterctl.yaml 56 | yq eval '.OPENSTACK_CLOUD_PROVIDER_CONF_B64 = "'"$CLOUD_CONF_ENC"'"' -i ~/$CLUSTER_NAME/clusterctl.yaml 57 | yq eval '.OPENSTACK_CLOUD_CACERT_B64 = "'"$CLOUD_CA_ENC"'"' -i ~/$CLUSTER_NAME/clusterctl.yaml 58 | 59 | -------------------------------------------------------------------------------- /terraform/files/bin/configure_containerd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # ./configure_containerd.sh cluster-template.yaml $CLUSTER_NAME 3 | # 4 | # Script injects containerd registry host and cert files into $1 (cluster-template.yaml). 5 | # Script reads files located in directories $HOME/$CLUSTER_NAME/containerd/hosts and 6 | # $HOME/$CLUSTER_NAME/containerd/certs and then executes the following on each: 7 | # 8 | # - Composes full destination path of the file (i.e. path on cluster node). The full path is composed as follows: 9 | # - Host file (file is stored in the `hosts.toml` file in the subdirectory created based on its filename): 10 | # <`host` file directory (`/etc/containerd/certs.d/`)> + + 11 | # - Cert file (file is stored as it is in a dedicated directory): 12 | # <`cert` file directory (`/etc/containerd/certs/`)> + 13 | # - Creates temporary YAML file from the file content with destination path from above as follows: 14 | # ```yaml 15 | # --- 16 | # path: 17 | # owner: "root:root" 18 | # permissions: "0644" 19 | # content: | 20 | # 21 | # ``` 22 | # - Injects temporary YAML file into $1 (cluster-template.yaml) file (using `yq` in place edit). 23 | # Temporary file is injected to the `KubeadmControlPlaneTemplate.spec.template.spec.kubeadmConfigSpec.files` that specifies extra files to be 24 | # passed to user_data upon creation of control plane nodes and to the `KubeadmConfigTemplate.spec.template.spec.files` 25 | # that specifies extra files to be passed to user_data upon creation of worker nodes. 26 | # - Removes temporary YAML file 27 | # 28 | # (c) Matej Feder, 06/2023 29 | # SPDX-License-Identifier: Apache-2.0 30 | 31 | # imports 32 | . ~/bin/utils.inc 33 | 34 | if test -z "$1"; then echo "ERROR: Need cluster-template.yaml arg" 1>&2; exit 1; fi 35 | if test -z "$2"; then echo "ERROR: Need CLUSTER_NAME arg" 1>&2; exit 1; fi 36 | 37 | declare -a paths 38 | paths=("hosts" "certs") 39 | 40 | for path in "${paths[@]}"; do 41 | for file in "$HOME"/"$2"/containerd/"$path"/*; do 42 | export file 43 | if [ -f "$file" ]; then 44 | 45 | if [ "$path" = "hosts" ]; then 46 | file_name="$(basename "$file")/hosts.toml" 47 | destination_path="/etc/containerd/certs.d/" 48 | export destination_path file_name 49 | fi 50 | 51 | if [ "$path" = "certs" ]; then 52 | file_name=$(basename "$file") 53 | destination_path="/etc/containerd/certs/" 54 | export destination_path file_name 55 | fi 56 | 57 | yq --null-input ' 58 | .path = env(destination_path) + env(file_name) | 59 | .owner = "root:root" | 60 | .permissions = "0644" | 61 | .content = loadstr(env(file)) 62 | ' > file_tmp 63 | # Evaluate whether the file is already present in the cluster-template.yaml. 64 | # YAML key `files` is not mandatory therefore it should be added as an empty array to ensure that the whole evaluation will work as expected, 65 | # see related YQ docs: https://mikefarah.gitbook.io/yq/operators/alternative-default-value#update-or-create-entity-does-not-exist 66 | file_cp_exist=$(yq 'select(.kind == "KubeadmControlPlaneTemplate").spec.template.spec.kubeadmConfigSpec | (.files // (.files = []))[] | select(.path == env(destination_path) + env(file_name))' "$1") 67 | if test -z "$file_cp_exist"; then 68 | echo "Adding $file_name to the KubeadmControlPlaneTemplate files" 69 | yq 'select(.kind == "KubeadmControlPlaneTemplate").spec.template.spec.kubeadmConfigSpec.files += [load("file_tmp")]' -i "$1" 70 | else 71 | echo "$file_name is already defined in KubeadmControlPlaneTemplate files" 72 | fi 73 | file_ct_exist=$(yq 'select(.kind == "KubeadmConfigTemplate").spec.template.spec | (.files // (.files = []))[] | select(.path == env(destination_path) + env(file_name))' "$1") 74 | if test -z "$file_ct_exist"; then 75 | echo "Adding $file_name to the KubeadmConfigTemplate files" 76 | yq 'select(.kind == "KubeadmConfigTemplate").spec.template.spec.files += [load("file_tmp")]' -i "$1" 77 | else 78 | echo "$file_name is already defined in KubeadmConfigTemplate files" 79 | fi 80 | rm file_tmp 81 | fi 82 | done 83 | done 84 | -------------------------------------------------------------------------------- /terraform/files/bin/configure_containerd_proxy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # ./configure_containerd_proxy.sh cluster-template.yaml 3 | # 4 | # Script injects containerd proxy config file into $1 (cluster-template.yaml). 5 | # Script reads proxy configuration from /etc/profile.d/proxy.sh 6 | # 7 | # - Creates temporary YAML file with the proxy config as follows: 8 | # ```yaml 9 | # --- 10 | # path: /etc/systemd/system/containerd.service.d/http-proxy.conf 11 | # owner: "root:root" 12 | # permissions: "0644" 13 | # content: | 14 | # [Service] 15 | # Environment="HTTP_PROXY=<$HTTP_PROXY from /etc/profile.d/proxy.sh>" 16 | # Environment="HTTPS_PROXY=<$HTTP_PROXY from /etc/profile.d/proxy.sh>" 17 | # Environment="NO_PROXY=<$NO_PROXY from /etc/profile.d/proxy.sh>" 18 | # ``` 19 | # - Injects temporary YAML file into $1 (cluster-template.yaml) file (using `yq` in place edit). 20 | # Temporary file is injected to the `KubeadmControlPlaneTemplate.spec.template.spec.kubeadmConfigSpec.files` that specifies extra files to be 21 | # passed to user_data upon creation of control plane nodes and to the `KubeadmConfigTemplate.spec.template.spec.files` 22 | # that specifies extra files to be passed to user_data upon creation of worker nodes. 23 | # - Removes temporary YAML file 24 | # 25 | # (c) Malte Muench, 11/2023 26 | # SPDX-License-Identifier: Apache-2.0 27 | if test -z "$1"; then echo "ERROR: Need cluster-template.yaml arg" 1>&2; exit 1; fi 28 | 29 | . /etc/profile.d/proxy.sh 30 | 31 | if [ ! -v HTTP_PROXY ] 32 | then 33 | echo "No HTTP_PROXY set, nothing to do, exiting." 34 | exit 0 35 | fi 36 | 37 | export SYSTEMD_CONFIG_CONTENT=containerd_systemd_conf 38 | export CLUSTER_TEMPLATE_SNIPPET=clustertemplate_snippet 39 | 40 | echo "[Service]" > $SYSTEMD_CONFIG_CONTENT 41 | echo "Environment=\"HTTP_PROXY=$HTTP_PROXY\"" >> $SYSTEMD_CONFIG_CONTENT 42 | echo "Environment=\"HTTPS_PROXY=$HTTP_PROXY\"" >> $SYSTEMD_CONFIG_CONTENT 43 | echo "Environment=\"NO_PROXY=$NO_PROXY\"" >> $SYSTEMD_CONFIG_CONTENT 44 | 45 | 46 | yq --null-input ' 47 | .path = "/etc/systemd/system/containerd.service.d/http-proxy.conf" | 48 | .owner = "root:root" | 49 | .permissions = "0644" | 50 | .content = loadstr(env(SYSTEMD_CONFIG_CONTENT))' > $CLUSTER_TEMPLATE_SNIPPET 51 | 52 | # Test whether the file is already present in cluster-template.yaml 53 | file_cp_exist=$(yq 'select(.kind == "KubeadmControlPlaneTemplate").spec.template.spec.kubeadmConfigSpec | (.files // (.files = []))[] | select(.path == "/etc/systemd/system/containerd.service.d/http-proxy.conf")' "$1") 54 | 55 | if test -z "$file_cp_exist"; then 56 | echo "Adding containerd proxy config to the KubeadmControlPlaneTemplate files" 57 | yq 'select(.kind == "KubeadmControlPlaneTemplate").spec.template.spec.kubeadmConfigSpec.files += [load(env(CLUSTER_TEMPLATE_SNIPPET))]' -i "$1" 58 | else 59 | echo "containerd proxy config is already defined in KubeadmControlPlaneTemplate files" 60 | fi 61 | 62 | file_ct_exist=$(yq 'select(.kind == "KubeadmConfigTemplate").spec.template.spec | (.files // (.files = []))[] | select(.path == "/etc/systemd/system/containerd.service.d/http-proxy.conf")' "$1") 63 | if test -z "$file_ct_exist"; then 64 | echo "Adding containerd proxy config to the KubeadmConfigTemplate files" 65 | yq 'select(.kind == "KubeadmConfigTemplate").spec.template.spec.files += [load(env(CLUSTER_TEMPLATE_SNIPPET))]' -i "$1" 66 | else 67 | echo "containerd proxy config is already defined in KubeadmConfigTemplate files" 68 | fi 69 | 70 | rm $SYSTEMD_CONFIG_CONTENT 71 | rm $CLUSTER_TEMPLATE_SNIPPET 72 | exit 0 73 | -------------------------------------------------------------------------------- /terraform/files/bin/configure_proxy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # ./configure_proxy.sh cluster-template.yaml clusterctl.yaml 3 | # 4 | # Script injects proxy profile config file into $1 (cluster-template.yaml) and proxy command into $2 (clusterctl.yaml). 5 | # Script reads proxy configuration from /etc/profile.d/proxy.sh 6 | # 7 | # - Creates temporary YAML file with the proxy config as follows: 8 | # ```yaml 9 | # --- 10 | # path: /etc/profile.d/proxy.sh 11 | # owner: "root:root" 12 | # permissions: "0644" 13 | # content: | 14 | # 15 | # ``` 16 | # - Injects temporary YAML file into $1 (cluster-template.yaml) file (using `yq` in place edit). 17 | # Temporary file is injected to the `KubeadmControlPlaneTemplate.spec.kubeadmConfigSpec.files` that specifies extra files to be 18 | # passed to user_data upon creation of control plane nodes and to the `KubeadmConfigTemplate.spec.template.spec.files` 19 | # that specifies extra files to be passed to user_data upon creation of worker nodes. 20 | # - Removes temporary YAML file 21 | # - Sets PROXY_CMD in $2 (clusterctl.yaml) 22 | # 23 | # (c) Malte Muench, 11/2023 24 | # SPDX-License-Identifier: Apache-2.0 25 | if test -z "$1"; then echo "ERROR: Need cluster-template.yaml arg" 1>&2; exit 1; fi 26 | if test -z "$2"; then echo "ERROR: Need clusterctl.yaml arg" 1>&2; exit 1; fi 27 | 28 | . /etc/profile.d/proxy.sh 29 | 30 | if [ ! -v HTTP_PROXY ] 31 | then 32 | echo "No HTTP_PROXY set, nothing to do, exiting." 33 | exit 0 34 | fi 35 | 36 | export PROFILE_CONFIG_CONTENT=proxy-profile.sh 37 | export CLUSTER_TEMPLATE_SNIPPET=clustertemplate_snippet 38 | 39 | # yq might be installed as snap which can not read /etc 40 | cp /etc/profile.d/proxy.sh $PROFILE_CONFIG_CONTENT 41 | 42 | 43 | yq --null-input ' 44 | .path = "/etc/profile.d/proxy.sh" | 45 | .owner = "root:root" | 46 | .permissions = "0644" | 47 | .content = loadstr(env(PROFILE_CONFIG_CONTENT))' > $CLUSTER_TEMPLATE_SNIPPET 48 | 49 | # Test whether the file is already present in cluster-template.yaml 50 | file_cp_exist=$(yq 'select(.kind == "KubeadmControlPlaneTemplate").spec.template.spec.kubeadmConfigSpec | (.files // (.files = []))[] | select(.path == "/etc/profile.d/proxy.sh")' "$1") 51 | 52 | if test -z "$file_cp_exist"; then 53 | echo "Adding proxy config to the KubeadmControlPlaneTemplate files" 54 | yq 'select(.kind == "KubeadmControlPlaneTemplate").spec.template.spec.kubeadmConfigSpec.files += [load(env(CLUSTER_TEMPLATE_SNIPPET))]' -i "$1" 55 | else 56 | echo "proxy profile config is already defined in KubeadmControlPlaneTemplate files" 57 | fi 58 | 59 | file_ct_exist=$(yq 'select(.kind == "KubeadmConfigTemplate").spec.template.spec | (.files // (.files = []))[] | select(.path == "/etc/profile.d/proxy.sh")' "$1") 60 | if test -z "$file_ct_exist"; then 61 | echo "Adding proxy config to the KubeadmConfigTemplate files" 62 | yq 'select(.kind == "KubeadmConfigTemplate").spec.template.spec.files += [load(env(CLUSTER_TEMPLATE_SNIPPET))]' -i "$1" 63 | else 64 | echo "proxy profile config is already defined in KubeadmConfigTemplate files" 65 | fi 66 | 67 | rm $PROFILE_CONFIG_CONTENT 68 | rm $CLUSTER_TEMPLATE_SNIPPET 69 | 70 | yq eval '.PROXY_CMD = ". /etc/profile.d/proxy.sh; " | .PROXY_CMD style="double"' -i "$2" 71 | exit 0 72 | -------------------------------------------------------------------------------- /terraform/files/bin/create_appcred.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # create_appcred.sh 3 | # Determine whether we need to create a per-cluster application credential 4 | # and add an appropriate config to the clouds.yaml section. 5 | # Call clusterctl_template.sh to update clusterctl.yaml 6 | # 7 | # (c) Kurt Garloff , 7/2022 8 | # SPDX-License-Identifier: Apache-2.0 9 | 10 | # 11 | 12 | # imports 13 | . ~/bin/utils.inc 14 | . ~/bin/cccfg.inc 15 | 16 | # Switch to capi workload cluster 17 | if [ -z ${KCONTEXT} ]; then 18 | setup_kubectl_context_workspace 19 | set_workload_cluster_kubectl_namespace 20 | fi 21 | 22 | # If the cluster exists already and we don't have a private appcred, leave it alone 23 | if kubectl get cluster $CLUSTER_NAME >/dev/null 2>&1 && ! grep '^OLD_OPENSTACK_CLOUD' ~/$CLUSTER_NAME/clusterctl.yaml >/dev/null 2>&1; then 24 | echo "#Warn: Old style cluster, disable new appcred handling" 25 | exit 0 26 | fi 27 | if kubectl -n default get cluster $CLUSTER_NAME >/dev/null 2>&1 && ! grep '^OLD_OPENSTACK_CLOUD' ~/$CLUSTER_NAME/clusterctl.yaml >/dev/null 2>&1; then 28 | echo "#Warn: Old style cluster, disable new appcred handling" 29 | exit 0 30 | fi 31 | APPCREDS=$(openstack application credential list -f value -c ID -c Name -c "Project ID") 32 | while read id nm prjid; do 33 | #echo "\"$nm\" \"$prjid\" \"$id\"" 34 | if test "$nm" = "$PREFIX-$CLUSTER_NAME-appcred"; then 35 | echo "#Reuse AppCred $nm $id" 36 | APPCRED_ID=$id 37 | APPCRED_PRJ=$prjid 38 | fi 39 | done < <(echo "$APPCREDS") 40 | # Generate a new application credential 41 | if test -z "$APPCRED_ID"; then 42 | NEWCRED=$(openstack application credential create "$PREFIX-$CLUSTER_NAME-appcred" --description "App Cred $PREFIX for cluster $CLUSTER_NAME" -f value -c id -c project_id -c secret) 43 | if test $? != 0; then 44 | echo "Application Credential generation failed." 1>&2 45 | exit 1 46 | fi 47 | read APPCRED_ID APPCRED_PRJ APPCRED_SECRET < <(echo $NEWCRED) 48 | echo "#Created AppCred $APPCRED_ID" 49 | if test ! -e ~/.config/openstack/clouds.yaml.orig; then cp -p ~/.config/openstack/clouds.yaml ~/.config/openstack/clouds.yaml.orig; fi 50 | #print-cloud.py -c $PREFIX-$CLUSTER_NAME -r application_credential_id=$APPCRED_ID -r application_credential_secret="\"$APPCRED_SECRET\"" -i auth_url="#project_id: $APPCRED_PRJ" | grep -v '^#' | grep -v '^---' | grep -v '^clouds:' >> ~/.config/openstack/clouds.yaml 51 | # Generate a fresh section rather than relying on cleanliness of existing setup 52 | AUTH_URL=$(print-cloud.py | yq eval .clouds.${OS_CLOUD}.auth.auth_url -) 53 | REGION=$(print-cloud.py | yq eval .clouds.${OS_CLOUD}.region_name -) 54 | CACERT=$(print-cloud.py | yq eval '.clouds."'"$OS_CLOUD"'".cacert // "null"' -) 55 | # In theory we could also make interface and id_api_vers variable, 56 | # but let's do that once we find the necessity. Error handling makes 57 | # it slightly complex, so it's not an obvious win. 58 | cat >> ~/.config/openstack/clouds.yaml </dev/null 2>&1; then 75 | echo "ERROR: Application credential $PREFIX-$CLUSTER_NAME-appcred exists but unknown to us. Please clean up." 76 | exit 1 77 | fi 78 | fi 79 | export OS_CLOUD=$PREFIX-$CLUSTER_NAME 80 | export PROJECTID=$APPCRED_PRJ 81 | # Generate clouds.yaml and cloud.conf and create b64 encoded pieces for clusterctl.yaml 82 | clusterctl_template.sh $CLUSTER_NAME 83 | 84 | -------------------------------------------------------------------------------- /terraform/files/bin/deploy_cluster_api.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ## desc: a helper for deploy a workload cluster on mgmt cluster 4 | ## license: Apache-2.0 5 | 6 | # variables 7 | . ~/.capi-settings 8 | . ~/bin/openstack-kube-versions.inc 9 | . /etc/profile.d/proxy.sh 10 | 11 | ARCH=$(uname -m | sed 's/x86_64/amd64/') 12 | # Install clusterctl 13 | echo "# install clusterctl $CLUSTERAPI_VERSION" 14 | # TODO: Check signature 15 | sudo curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/v$CLUSTERAPI_VERSION/clusterctl-linux-$ARCH -o /usr/local/bin/clusterctl 16 | sudo chmod +x /usr/local/bin/clusterctl 17 | 18 | # Source .bash_aliases in case we are called from non-interactive bash (Makefile) 19 | # This does not seem to be strictly needed for deploy_cluster_api.sh right now. 20 | # We have moved it until after installation of clusterctl to avoid a cosmetic error. 21 | source ~/.bash_aliases 22 | 23 | # get the clusterctl version 24 | echo "show the clusterctl version:" 25 | clusterctl version --output yaml 26 | 27 | # We used to encode secrets here for clusterctl.yaml 28 | #bash clusterctl_template.sh 29 | # This is done per cluster now, here's what's left: 30 | # Generate SET_MTU_B64 31 | #MTU=`yq eval '.MTU_VALUE' ~/cluster-defaults/clusterctl.yaml` 32 | # Fix up nameserver list (trailing comma -- cosmetic) 33 | sed '/OPENSTACK_DNS_NAMESERVERS:/s@, \]"@ ]"@' -i ~/cluster-defaults/clusterctl.yaml 34 | # Fix metadata dicts (trailing comma -- cosmetic) 35 | sed '/OPENSTACK_CONTROL_PLANE_MACHINE_METADATA:/s@, }"@ }"@' -i ~/cluster-defaults/clusterctl.yaml 36 | sed '/OPENSTACK_NODE_MACHINE_METADATA:/s@, }"@ }"@' -i ~/cluster-defaults/clusterctl.yaml 37 | 38 | # cp clusterctl.yaml to the right place 39 | if test "$(dotversion "$(clusterctl version -o short)")" -ge 10500; then 40 | cp -p $HOME/cluster-defaults/clusterctl.yaml $HOME/.config/cluster-api/clusterctl.yaml 41 | else 42 | cp -p $HOME/cluster-defaults/clusterctl.yaml $HOME/.cluster-api/clusterctl.yaml 43 | fi 44 | 45 | # deploy cluster-api on mgmt cluster 46 | echo "deploy cluster-api with openstack provider $CLUSTERAPI_OPENSTACK_PROVIDER_VERSION" 47 | export CLUSTER_TOPOLOGY=true 48 | clusterctl init --infrastructure openstack:v$CLUSTERAPI_OPENSTACK_PROVIDER_VERSION --core cluster-api:v$CLUSTERAPI_VERSION -b kubeadm:v$CLUSTERAPI_VERSION -c kubeadm:v$CLUSTERAPI_VERSION 49 | 50 | # Install calicoctl 51 | # TODO: Check signature 52 | CALICO_VERSION=`yq eval '.CALICO_VERSION' ~/cluster-defaults/clusterctl.yaml` 53 | curl -o calicoctl -L "https://github.com/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$ARCH" 54 | if test $? != 0 -o $(stat -c "%s" calicoctl) -lt 1000; then 55 | curl -o calicoctl -L "https://github.com/projectcalico/calicoctl/releases/download/$CALICO_VERSION/calicoctl" 56 | fi 57 | chmod +x calicoctl 58 | sudo mv calicoctl /usr/local/bin 59 | 60 | # Install cilium 61 | # TODO: Check signature 62 | #CILIUM_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt) 63 | CILIUM_VERSION="${CILIUM_BINARIES%%;*}" 64 | curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/download/$CILIUM_VERSION/cilium-linux-$ARCH.tar.gz{,.sha256sum} 65 | sha256sum --check cilium-linux-$ARCH.tar.gz.sha256sum || exit 66 | #https://github.com/cilium/cilium-cli/releases/download/v0.12.3/cilium-linux-amd64.tar.gz 67 | sudo tar xzvfC cilium-linux-$ARCH.tar.gz /usr/local/bin 68 | rm cilium-linux-$ARCH.tar.gz{,.sha256sum} 69 | #HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt) 70 | HUBBLE_VERSION="${CILIUM_BINARIES##*;}" 71 | curl -L --remote-name-all https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-$ARCH.tar.gz{,.sha256sum} 72 | sha256sum --check hubble-linux-$ARCH.tar.gz.sha256sum || exit 73 | sudo tar xzvfC hubble-linux-$ARCH.tar.gz /usr/local/bin 74 | rm hubble-linux-$ARCH.tar.gz{,.sha256sum} 75 | 76 | # wait for CAPI pods 77 | echo "# wait for all components are ready for cluster-api" 78 | kubectl wait --for=condition=Ready --timeout=5m -n capi-system pod --all 79 | #kubectl wait --for=condition=Ready --timeout=5m -n capi-webhook-system pod --all 80 | kubectl wait --for=condition=Ready --timeout=5m -n capi-kubeadm-bootstrap-system pod --all 81 | kubectl wait --for=condition=Ready --timeout=5m -n capi-kubeadm-control-plane-system pod --all 82 | kubectl wait --for=condition=Ready --timeout=5m -n capo-system pod --all 83 | 84 | # wait for CAPO crds 85 | kubectl wait --for condition=established --timeout=60s crds/openstackmachines.infrastructure.cluster.x-k8s.io 86 | kubectl wait --for condition=established --timeout=60s crds/openstackmachinetemplates.infrastructure.cluster.x-k8s.io 87 | kubectl wait --for condition=established --timeout=60s crds/openstackclusters.infrastructure.cluster.x-k8s.io 88 | kubectl wait --for condition=established --timeout=60s crds/openstackclustertemplates.infrastructure.cluster.x-k8s.io 89 | -------------------------------------------------------------------------------- /terraform/files/bin/deploy_harbor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # ./deploy_harbor.sh CLUSTER_NAME 3 | # 4 | # Script deploys Harbor to cluster "CLUSTER_NAME" 5 | # It also creates Swift container and ec2 credentials in the OpenStack project 6 | # 7 | # (c) Roman Hros, 07/2023 8 | # SPDX-License-Identifier: Apache-2.0 9 | 10 | . ~/bin/cccfg.inc 11 | export KUBECONFIG=${KUBECONFIG_WORKLOADCLUSTER} 12 | 13 | # export harbor variables as env for envsubst 14 | set -a 15 | . ~/$CLUSTER_NAME/harbor-settings 16 | set +a 17 | 18 | echo "Deploy harbor to $CLUSTER_NAME" 19 | 20 | mkdir -p ~/$CLUSTER_NAME/deployed-manifests.d/harbor 21 | cd ~/$CLUSTER_NAME/deployed-manifests.d/harbor 22 | 23 | # download scripts 24 | TAG=v6.1.1 25 | RAW_TAG_URL="https://raw.githubusercontent.com/SovereignCloudStack/k8s-harbor/$TAG" 26 | if test ! -s ~/bin/harbor-secrets.bash; then 27 | curl -L "$RAW_TAG_URL/base/harbor-secrets.bash" -o ~/bin/harbor-secrets.bash || exit 2 28 | fi 29 | if test ! -s ~/bin/s3-credentials.bash; then 30 | curl -L "$RAW_TAG_URL/envs/public/s3-credentials.bash" -o ~/bin/s3-credentials.bash || exit 2 31 | fi 32 | sudo apt-get install -y pwgen apache2-utils 33 | 34 | # generate harbor secrets 35 | bash ~/bin/harbor-secrets.bash 36 | 37 | # create ec2 credentials if they don't already exist 38 | if test ! -s .ec2; then 39 | EC2CRED=$(openstack ec2 credentials create -f value -c access -c secret) 40 | read EC2CRED_ACCESS EC2CRED_SECRET < <(echo $EC2CRED) 41 | echo "#Created EC2Cred for the cluster $CLUSTER_NAME" 42 | cat > .ec2 < harbor.yaml 69 | else 70 | kubectl kustomize ~/kubernetes-manifests.d/harbor/envs/clusterIP | envsubst > harbor.yaml 71 | fi 72 | kubectl apply -f harbor.yaml 73 | -------------------------------------------------------------------------------- /terraform/files/bin/enable-cilium-sg-kube.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . ~/bin/cccfg.inc 3 | SGS=$(openstack security group list -f value -c ID -c Name | grep "k8s-cluster-\(default-${CLUSTER_NAME}\|${CLUSTER_NAME}-${CLUSTER_NAME}\)-secgroup-") 4 | SG_WORKER=$(echo "$SGS" | grep worker | cut -d " " -f1) 5 | SG_CONTROL=$(echo "$SGS" | grep controlplane | cut -d " " -f1) 6 | #rm -f enable-cilium-control.yaml enable-cilium-worker.yaml 7 | echo -e "status:\n controlPlaneSecurityGroup:\n rules:" > enable-cilium-control.yaml 8 | echo -e "status:\n workerSecurityGroup:\n rules:" > enable-cilium-worker.yaml 9 | for proto in udp/8472/VXLAN tcp/4240/HealthCheck tcp/31813/EchoOther tcp/31374/EchoSame; do 10 | prot=${proto%%/*} 11 | port=${proto#*/} 12 | desc="${port#*/} (cilium)" 13 | port=${port%/*} 14 | #openstack security group rule create --description "capi $CLUSTER_NAME $desc" --ingress --ethertype IPv4 --proto $prot --dst-port $port:$port --remote-group $SG_WORKER $SG_WORKER 15 | #openstack security group rule create --description "capi $CLUSTER_NAME $desc" --ingress --ethertype IPv4 --proto $prot --dst-port $port:$port --remote-group $SG_WORKER $SG_CONTROL 16 | #openstack security group rule create --description "capi $CLUSTER_NAME $desc" --ingress --ethertype IPv4 --proto $prot --dst-port $port:$port --remote-group $SG_CONTROL $SG_WORKER 17 | #openstack security group rule create --description "capi $CLUSTER_NAME $desc" --ingress --ethertype IPv4 --proto $prot --dst-port $port:$port --remote-group $SG_CONTROL $SG_CONTROL 18 | echo -e " - description: capi $CLUSTER_NAME $desc\n direction: ingress\n etherType: IPv4\n protocol: $prot\n portRangeMin: ${port%:*}\n portRangeMax: ${port##*:}\n remoteGroupID: $SG_WORKER" >> enable-cilium-control.yaml 19 | echo -e " - description: capi $CLUSTER_NAME $desc\n direction: ingress\n etherType: IPv4\n protocol: $prot\n portRangeMin: ${port%:*}\n portRangeMax: ${port##*:}\n remoteGroupID: $SG_CONTROL" >> enable-cilium-control.yaml 20 | echo -e " - description: capi $CLUSTER_NAME $desc\n direction: ingress\n etherType: IPv4\n protocol: $prot\n portRangeMin: ${port%:*}\n portRangeMax: ${port##*:}\n remoteGroupID: $SG_WORKER" >> enable-cilium-worker.yaml 21 | echo -e " - description: capi $CLUSTER_NAME $desc\n direction: ingress\n etherType: IPv4\n protocol: $prot\n portRangeMin: ${port%:*}\n portRangeMax: ${port##*:}\n remoteGroupID: $SG_CONTROL" >> enable-cilium-worker.yaml 22 | done 23 | kubectl --context=kind-kind --namespace "$CLUSTER_NAME" patch openstackcluster "$CLUSTER_NAME" --type=merge --patch-file enable-cilium-control.yaml 24 | kubectl --context=kind-kind --namespace "$CLUSTER_NAME" patch openstackcluster "$CLUSTER_NAME" --type=merge --patch-file enable-cilium-worker.yaml 25 | -------------------------------------------------------------------------------- /terraform/files/bin/enable-cilium-sg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . ~/bin/cccfg.inc 3 | COLUMNS=${COLUMNS:-80} 4 | SGS=$(openstack security group list -f value -c ID -c Name | grep "${PREFIX}-${CLUSTER_NAME}-cilium") 5 | if test -z "$SGS"; then 6 | SGS=$(openstack security group create ${PREFIX}-${CLUSTER_NAME}-cilium -f value -c id -c name) 7 | # Note: Cilium connectivity test will requires tcp/3xxxx/EchoOther tcp/3xxxx/EchoSame 8 | # See ports with k get svc -A 9 | # Should this really be required? 10 | #SG=${SGS%% *} 11 | SG=$(echo "$SGS" | head -n1) 12 | for proto in udp/8472/VXLAN tcp/4240/HealthCheck tcp/4244/Hubble; do 13 | prot=${proto%%/*} 14 | port=${proto#*/} 15 | desc="${port#*/} (cilium)" 16 | port=${port%/*} 17 | if test "${port%:*}" == "$port"; then port="$port:$port"; fi 18 | # Note: we could instead use --remote-ip ${NODE_CIDR} -- less secure, but better performance 19 | openstack security group rule create --description "$PREFIX $CLUSTER_NAME $desc" --ingress --ethertype IPv4 --proto $prot --dst-port $port --remote-group $SG $SG --max-width=$COLUMNS 20 | done 21 | fi 22 | # SD is consumed in cluster-template.yaml 23 | -------------------------------------------------------------------------------- /terraform/files/bin/fixup_flavor_volumes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # fixup_flavor_volumes.sh 4 | # 5 | # Usage: fixup_flavor_volumes.sh CCCFG CLUSTERTEMPLATE 6 | # Check if the flavor has a disk that's large enough 7 | # Change machine template to allocate root disk of sufficient size if no disk exists 8 | # 9 | # (c) Kurt Garloff , 6/2023 10 | # SPDX-License-Identifier: Apache-2.0 11 | 12 | usage() 13 | { 14 | echo "Usage: fixup_flavor_volumes.sh CLUSTERCTL CLUSTERTEMPLATE" 15 | exit 1 16 | } 17 | 18 | if test -z "$2"; then usage; fi 19 | 20 | UBU_IMG_NM=$(yq eval '.OPENSTACK_IMAGE_NAME' $1) 21 | CTRLFLAVOR=$(yq eval '.OPENSTACK_CONTROL_PLANE_MACHINE_FLAVOR' $1) 22 | WORKFLAVOR=$(yq eval '.OPENSTACK_NODE_MACHINE_FLAVOR' $1) 23 | 24 | flavor_disk.sh "$CTRLFLAVOR" "$UBU_IMG_NM" 25 | DISKCTRL=$? 26 | flavor_disk.sh "$WORKFLAVOR" "$UBU_IMG_NM" 27 | DISKWORK=$? 28 | 29 | if test $DISKCTRL -ge 128; then echo "ERROR $((DISKCTRL-256)) using ctrlplane flavor $CTRLFLAVOR for image $UBU_IMG_NM"; exit 1; fi 30 | if test $DISKWORK -ge 128; then echo "ERROR $((DISKWORK-256)) using worker flavor $WORKFLAVOR for image $UBU_IMG_NM"; exit 1; fi 31 | if test $DISKCTRL != 0; then 32 | if grep 'CONTROL_PLANE_ROOT_DISKSIZE' $1 >/dev/null 2>&1; then 33 | if ! grep '^CONTROL_PLANE_ROOT_DISKSIZE' $1 >/dev/null 2>&1; then 34 | sed -i 's/^.*\(CONTROL_PLANE_ROOT_DISKSIZE\)/\1/' $1 35 | fi 36 | if grep '^CONTROL_PLANE_ROOT_DISKSIZE: 0 *$' $1 >/dev/null 2>&1; then 37 | sed -i "s/^\(CONTROL_PLANE_ROOT_DISKSIZE: \)0/\1$DISKCTRL/" $1 38 | fi 39 | else 40 | echo -e "# Volume for control plane disk\nCONTROL_PLANE_ROOT_DISKSIZE: $DISKCTRL" >> $1 41 | fi 42 | cp -p $2 $2.orig 43 | kustpatch.sh ~/kubernetes-manifests.d/add-vol-to-ctrl.yaml <$2.orig >$2 44 | else 45 | sed -i 's/^\(CONTROL_PLANE_ROOT_DISKSIZE\)/#\1/' $1 46 | cp -p $2 $2.orig 47 | kustpatch.sh ~/kubernetes-manifests.d/rmv-vol-from-ctrl.yaml <$2.orig >$2 48 | fi 49 | if test $DISKWORK != 0; then 50 | if grep 'WORKER_ROOT_DISKSIZE' $1 >/dev/null 2>&1; then 51 | if ! grep '^WORKER_ROOT_DISKSIZE' $1 >/dev/null 2>&1; then 52 | sed -i 's/^.*\(WORKER_ROOT_DISKSIZE\)/\1/' $1 53 | fi 54 | if grep '^WORKER_ROOT_DISKSIZE: 0 *$' $1 >/dev/null 2>&1; then 55 | sed -i "s/^\(WORKER_ROOT_DISKSIZE: \)0/\1$DISKWORK/" $1 56 | fi 57 | else 58 | echo -e "# Volume for worker node disk\nWORKER_ROOT_DISKSIZE: $DISKWORK" >> $1 59 | fi 60 | cp -p $2 $2.orig 61 | kustpatch.sh ~/kubernetes-manifests.d/add-vol-to-worker.yaml <$2.orig >$2 62 | else 63 | sed -i 's/^\(WORKER_ROOT_DISKSIZE\)/#\1/' $1 64 | cp -p $2 $2.orig 65 | kustpatch.sh ~/kubernetes-manifests.d/rmv-vol-from-worker.yaml <$2.orig >$2 66 | fi 67 | -------------------------------------------------------------------------------- /terraform/files/bin/fixup_k8s_version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # fixup_k8s_version.sh 3 | # Patch $1 (clusterctl.yaml) with fixed up k8s version if needed 4 | # (c) Kurt Garloff, 03/2022 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | if test -z "$1"; then echo "ERROR: Need clusterctl.yaml arg" 1>&2; exit 1; fi 8 | KUBERNETES_VERSION=$(yq eval '.KUBERNETES_VERSION' $1) 9 | . ~/bin/openstack-kube-versions.inc 10 | # Now is the time to error out 11 | if is_tech_preview $KUBERNETES_VERSION; then 12 | echo "WARNING: k8s $KUBERNETES_VERSION not yet officially supported by capo" 1>&2 13 | if test -z "$ALLOW_PREVIEW_VERSIONS"; then 14 | echo "ERROR: You need to pass --allow-preview-versions to allow this" 1>&2 15 | exit 1 16 | fi 17 | fi 18 | if test "${KUBERNETES_VERSION:$((${#KUBERNETES_VERSION}-1)):1}" != "x"; then exit 0; fi 19 | k8s=$KUBERNETES_VERSION 20 | set_k8s_latestpatch $KUBERNETES_VERSION 21 | echo "Correct k8s from $k8s to $KUBERNETES_VERSION" 22 | sed -i "s/KUBERNETES_VERSION:\([^v]*\)v[^x]*x/KUBERNETES_VERSION:\1$KUBERNETES_VERSION/" $1 23 | sed -i "s/OPENSTACK_IMAGE_NAME:\(.*\)\-v[^x]*x/OPENSTACK_IMAGE_NAME:\1-$KUBERNETES_VERSION/" $1 24 | 25 | -------------------------------------------------------------------------------- /terraform/files/bin/flavor_disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # flavor_disk.sh 4 | # 5 | # Determine if flavor needs a volume and return needed size 6 | # Usage: flavor_disk.sh FLAVOR [IMAGE] 7 | # Determines whether FLAVOR has a disk 8 | # If yes, return 0 (no disk needed) 9 | # If no, return a number (size of disk to be created), 10 | # The size is calculatd by heuristic: 20+RAM/2 rounded to next 5, max 125 11 | # If FLAVOR does not exit: return -1 12 | # If IMAGE is passed in addition: 13 | # If FLAVOR has large enough disk: return 0 14 | # If FLAVOR disk is too small: return -2 15 | # If FLAVOR has no disk: calculate ImgSize+RAM/2 rounded to 5, max 125 16 | # If FLAVOR does not exist: -1 17 | # If IMAGE does not exist: -3 18 | # 19 | # Requirements: OS_CLOUD needs to be set to a working cloud, openstack CLI 20 | # needs to be installed and work and cloud API needs to respond. 21 | # 22 | # This is used to determine if we need to add disks to capo machine templates. 23 | # 24 | # (c) Kurt Garloff , 6/2023 25 | # SPDX-License-Identifier: Apache-2.0 26 | 27 | . /etc/profile.d/proxy.sh 28 | 29 | usage() 30 | { 31 | echo "Usage: flavor_disk.sh FLAVOR [IMAGE]" 32 | exit 1 33 | } 34 | 35 | if test -z "$1"; then usage; fi 36 | if ! command -v jq &>/dev/null; then 37 | sudo apt-get update && sudo apt-get install -y jq 38 | fi 39 | 40 | FLAVOR=`openstack flavor show $1 -f json` 41 | if test $? != 0; then exit -1; fi 42 | if test -n "$2"; then 43 | IMAGE=`openstack image show $2 -f json` 44 | if test $? != 0; then exit -3; fi 45 | ISIZE=`echo "$IMAGE" | jq '.min_disk'` 46 | else 47 | ISIZE=20 48 | fi 49 | CPU=`echo "$FLAVOR" | jq '.vcpus'` # | tr -d '"' 50 | RAM=`echo "$FLAVOR" | jq '.ram'` 51 | RAM=$(((RAM+64)/1024)) 52 | DISK=`echo "$FLAVOR" | jq '.disk'` 53 | #FIXME: Should we prevent single CPU here? 54 | if test $DISK != 0; then 55 | if test $DISK -lt $ISIZE; then exit -2; else exit 0; fi 56 | else 57 | WANT=$(((ISIZE+2+$RAM/2)/5*5)) 58 | if test $WANT -gt 125; then WANT=125; fi 59 | if test $WANT -lt $ISIZE; then WANT=$ISIZE; fi 60 | exit $WANT 61 | fi 62 | -------------------------------------------------------------------------------- /terraform/files/bin/get_capi_helm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . /etc/profile.d/proxy.sh 3 | # This checks out 4 | if test -e capi-helm-charts; then 5 | echo "Updating capi-helm-charts" 6 | cd capi-helm-charts 7 | git pull 8 | else 9 | echo "Cloning capi-helm-charts" 10 | git clone https://github.com/stackhpc/capi-helm-charts 11 | fi 12 | -------------------------------------------------------------------------------- /terraform/files/bin/get_k8s_git.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Install k8s-cluster-api-provider repo 3 | # Optional args: branch and patch 4 | # (c) Kurt Garloff , 3/2022 5 | # SPDX-License-Identifier: Apache-2.0 6 | . /etc/profile.d/proxy.sh 7 | cd 8 | getent hosts github.com || sleep 30 9 | #cd k8s-cluster-api-provider 10 | if test -n "$1"; then git clone "$1" k8s-cluster-api-provider || exit 1; fi 11 | cd k8s-cluster-api-provider 12 | if test -n "$2"; then git checkout "$2" || exit 1; fi 13 | if test -n "$3"; then patch -p1 <"$3" || exit 1; fi 14 | cd 15 | # Create links 16 | ln -s k8s-cluster-api-provider/terraform/files/bin . 17 | ln -s k8s-cluster-api-provider/terraform/files/kubernetes-manifests.d . 18 | mkdir ~/doc 19 | ln -s ../k8s-cluster-api-provider/README.md ~/doc 20 | ln -s ~/k8s-cluster-api-provider/Release-Notes*.md ~/doc 21 | 22 | -------------------------------------------------------------------------------- /terraform/files/bin/get_mtu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # If we have not hardcoded an MTU, discover the one from 3 | # the running system and assume our cluster is on the same cloud 4 | #CLOUDMTU=$(grep '^MTU_VALUE:' clusterctl.yaml | sed 's/MTU_VALUE: //') 5 | if test -e /tmp/daemon.json && grep '"mtu":' /tmp/daemon.json >/dev/null; then 6 | CLOUDMTU=$(grep '"mtu":' /tmp/daemon.json | sed 's/^ *"mtu": *//' | tr -d '"') 7 | echo "Read MTU $CLOUDMTU from /tmp/daemon.json" 8 | elif test -e /etc/docker/daemon.json && grep '"mtu":' /etc/docker/daemon.json >/dev/null; then 9 | CLOUDMTU=$(grep '"mtu":' /etc/docker/daemon.json | sed 's/^ *"mtu": *//' | tr -d '"') 10 | echo "Read MTU $CLOUDMTU from /etc/docker/daemon.json" 11 | else 12 | CLOUDMTU=$(grep '^MTU_VALUE:' ~/cluster-defaults/clusterctl.yaml | sed 's/MTU_VALUE: //') 13 | if test "$CLOUDMTU" != "0"; then let CLOUDMTU+=50; fi 14 | echo "Read MTU $CLOUDMTU from ~/cluster-defaults/clusterctl.yaml" 15 | fi 16 | 17 | if test "$CLOUDMTU" == "0"; then 18 | DEV=$(ip route show default | head -n1 | sed 's/^.*dev \([^ ]*\).*$/\1/') 19 | CLOUDMTU=$(ip link show $DEV | head -n1 | sed 's/^.*mtu \([0-9]*\) .*$/\1/') 20 | echo "Detected MTU $CLOUDMTU (dev $DEV)" 21 | DOCKERMTU=$((CLOUDMTU/8*8)) 22 | if test -e /tmp/daemon.json && grep '"mtu": 0' /tmp/daemon.json >/dev/null; then 23 | sed -i "s/\"mtu\": 0/\"mtu\": $DOCKERMTU/" /tmp/daemon.json 24 | fi 25 | fi 26 | CALICOMTU=$(((CLOUDMTU-50)/8*8)) 27 | sed -i "s/MTU_VALUE: 0/MTU_VALUE: $CALICOMTU/" ~/cluster-defaults/clusterctl.yaml 28 | -------------------------------------------------------------------------------- /terraform/files/bin/handle_ovn_lb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # handle_ovn_lb.sh 3 | # Check USE_OVN_LB_PROVIDER setting and react accordingly: 4 | # * false: do nothingspecial 5 | # * auto: determine if ovn provider LB is available and act like false or true 6 | # * true: set provider to ovn and enable health-monitor 7 | # 8 | # (c) Kurt Garloff , 02/2023 9 | # SPDX-License-Identifier: Apache-2.0 10 | 11 | # imports 12 | . ~/bin/utils.inc 13 | . ~/bin/cccfg.inc 14 | 15 | test_ovn_avail() 16 | { 17 | PROVIDERS=$(openstack loadbalancer provider list -f value -c name) 18 | if echo "$PROVIDERS" | grep "^ovn$" >/dev/null 2>&1; then return 0; fi 19 | return 1 20 | } 21 | 22 | set_cfg_octavia() 23 | { 24 | unset VALUE 25 | while read line; do 26 | if test "${line:0:1}" = "["; then 27 | SECTION="${line#[}" 28 | SECTION="${SECTION%%]*}" 29 | fi 30 | if test "$SECTION" = "LoadBalancer" -a "${line:0:${#1}}" = "$1"; then 31 | VALUE="${line#*=}" 32 | #echo "Found $1=$VALUE" 33 | fi 34 | done < $CLOUDCONF 35 | if test -n "$VALUE"; then 36 | if test "$VALUE" = "$2"; then return 0; fi # Nothing to be done 37 | #echo "Replace $1=$2" 38 | sed -i "s@^$1=.*\$@$1=$2@" $CLOUDCONF 39 | return 1 40 | else 41 | #echo "Insert $1=$2" 42 | sed -i "/^\[LoadBalancer\]/a\ 43 | $1=$2" $CLOUDCONF 44 | return 2 45 | fi 46 | } 47 | 48 | use_ovn() 49 | { 50 | CLOUDCONF="$HOME/$CLUSTER_NAME/cloud.conf" 51 | set_cfg_octavia "lb-provider" "ovn" 52 | set_cfg_octavia "lb-method" "SOURCE_IP_PORT" 53 | set_cfg_octavia "create-monitor" "true" 54 | return 0 55 | } 56 | 57 | disable_ovn() 58 | { 59 | CLOUDCONF="$HOME/$CLUSTER_NAME/cloud.conf" 60 | #sed -i "s/^\(lb-provider=ovn\)/#\1/g" $CLOUDCONF 61 | sed -i '/lb\-provider=ovn/d' $CLOUDCONF 62 | sed -i '/lb\-method=SOURCE_IP_PORT/d' $CLOUDCONF 63 | #sed -i '/create\-monitor=true/d' $CLOUDCONF 64 | } 65 | 66 | export USE_OVN=$(yq eval '.USE_OVN_LB_PROVIDER' $CCCFG) 67 | if test "$USE_OVN" = "false"; then disable_ovn 68 | elif test "$USE_OVN" = "auto"; then if test_ovn_avail; then use_ovn; else disable_ovn; fi 69 | elif test "$USE_OVN" = "true"; then use_ovn 70 | else echo "ERROR: Invalid setting for USE_OVN_LB_PROVIDER \"$USE_OVN\"" 1>&2; fi 71 | 72 | -------------------------------------------------------------------------------- /terraform/files/bin/inject_custom_ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # ./inject_custom_ca.sh cluster-template.yaml CADEST 3 | # 4 | # Script injects cacert (file with content from secret ${CLUSTER_NAME}-cloud-config) into $1 (cluster-template.yaml). 5 | # Secret ${CLUSTER_NAME}-cloud-config contains key cacert with OPENSTACK_CLOUD_CACERT_B64 variable. 6 | # Cacert will be templated later and injected to k8s nodes on $2 (CADEST) path. 7 | # Inspiration taken from configure_containerd.sh 8 | # 9 | # (c) Roman Hros, 07/2023 10 | # SPDX-License-Identifier: Apache-2.0 11 | 12 | if test -z "$1"; then echo "ERROR: Need cluster-template.yaml arg" 1>&2; exit 1; fi 13 | if test -z "$2"; then echo "ERROR: Need CADEST arg" 1>&2; exit 1; fi 14 | 15 | export CA_DEST="$2" 16 | 17 | yq --null-input ' 18 | .path = env(CA_DEST) | 19 | .owner = "root:root" | 20 | .permissions = "0644" | 21 | .contentFrom = {"secret": {"key": "cacert", "name": "${CLUSTER_NAME}-cloud-config"}} 22 | ' > file_tmp 23 | 24 | yq 'select(.kind == "KubeadmControlPlaneTemplate").spec.template.spec.kubeadmConfigSpec.files += [load("file_tmp")]' -i "$1" 25 | yq 'select(.kind == "KubeadmConfigTemplate").spec.template.spec.files += [load("file_tmp")]' -i "$1" 26 | 27 | rm file_tmp 28 | -------------------------------------------------------------------------------- /terraform/files/bin/install_flux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . /etc/profile.d/proxy.sh 3 | # TODO: Check sig 4 | if test -x /usr/local/bin/flux; then exit 0; fi 5 | curl -s https://fluxcd.io/install.sh > ~/bin/install-flux2.sh 6 | chmod +x ~/bin/install-flux2.sh 7 | # Install 8 | FLUX_VERSION=$1 install-flux2.sh 9 | flux completion bash > ~/.bash_flux 10 | echo "source ~/.bash_flux" >> ~/.bash_aliases 11 | -------------------------------------------------------------------------------- /terraform/files/bin/install_helm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Download and deploy helm 3 | . /etc/profile.d/proxy.sh 4 | HELMVER=3.14.1 5 | OS=linux; ARCH=$(uname -m | sed 's/x86_64/amd64/') 6 | curl -LO https://get.helm.sh/helm-v${HELMVER}-$OS-$ARCH.tar.gz 7 | tar xvzf helm-v${HELMVER}-$OS-$ARCH.tar.gz 8 | sudo mv $OS-$ARCH/helm /usr/local/bin/ 9 | rm helm-v${HELMVER}-$OS-$ARCH.tar.gz 10 | rm -rf $OS-$ARCH 11 | 12 | -------------------------------------------------------------------------------- /terraform/files/bin/install_k9s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Deploy k9s on mgmt cluster for convenience 3 | # (c) Kurt Garloff / Malte Münch / Thosten Schifferdecker 1/2021 -- 2/2022 4 | # SPDX-License-Identifier: Apache-2.0 5 | . /etc/profile.d/proxy.sh 6 | 7 | # install k9s 8 | K9S_VERSION=v0.32.5 # renovate: datasource=github-releases depName=derailed/k9s 9 | echo "# install k9s $K9S_VERSION" 10 | ARCH=$(uname -m | sed 's/x86_64/amd64/') 11 | # TODO: Check signature 12 | #REDIR=$(curl --silent https://api.github.com/repos/derailed/k9s/releases/latest | grep tag_name) 13 | #VERSION=$(echo $REDIR | sed -E 's/.*"([^"]+)".*/\1/') 14 | cd ~ 15 | curl -L https://github.com/derailed/k9s/releases/download/$K9S_VERSION/k9s_Linux_$ARCH.tar.gz | tar xzvf - 16 | sudo mv ./k9s /usr/local/bin/k9s 17 | mv README.md ~/doc/README-k9s-$K9S_VERSION.md 18 | mv LICENSE ~/doc/LICENSE-k9s-$K9S_VERSION 19 | -------------------------------------------------------------------------------- /terraform/files/bin/install_kind.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | . /etc/profile.d/proxy.sh 3 | KIND_VERSION=0.20.0 4 | sudo wget -O /usr/local/bin/kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64 5 | sudo chmod +x /usr/local/bin/kind 6 | kind create cluster 7 | 8 | -------------------------------------------------------------------------------- /terraform/files/bin/install_kube_ps1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | KUBEPS1_VERSION=v0.8.0 4 | echo "# install kube-ps1 $KUBEPS1_VERSION" 5 | git clone --depth 1 --branch $KUBEPS1_VERSION https://github.com/jonmosco/kube-ps1 ~/.kube-ps1 6 | -------------------------------------------------------------------------------- /terraform/files/bin/install_kubectx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt-get install --no-install-recommends --no-install-suggests -y fzf pkg-config 4 | 5 | KUBECTX_VERSION=v0.9.5 6 | echo "# install kubectx $KUBECTX_VERSION" 7 | # Deploy kubectx on mgmt cluster for convenience 8 | git clone --depth 1 --branch $KUBECTX_VERSION https://github.com/ahmetb/kubectx.git ~/.kubectx 9 | COMPDIR=$(pkg-config --variable=completionsdir bash-completion) 10 | sudo ln -sf ~/.kubectx/completion/kubens.bash $COMPDIR/kubens 11 | sudo ln -sf ~/.kubectx/completion/kubectx.bash $COMPDIR/kubectx 12 | cat << EOF >> ~/.bashrc 13 | 14 | #kubectx and kubens 15 | export PATH=~/.kubectx:\$PATH 16 | alias kns=kubens 17 | alias kctx=kubectx 18 | EOF 19 | -------------------------------------------------------------------------------- /terraform/files/bin/kustpatch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # kustpatch.sh 3 | # 4 | # I see kustomize as an intelligent (format-aware) way to apply 5 | # patches to yaml. The usage is cumbersome however, as it requires 6 | # to setup directories with kustomization files etc. 7 | # This can be simplified a lot. 8 | # 9 | # Apply a set of kustomizations (passed on the command line) to 10 | # yaml file provided via stdin; result is output to stdout 11 | # 12 | # This takes care of setting up the directory structure that kustomize 13 | # expects. 14 | # 15 | # Usage: 16 | # kustpatch.sh kust1.yaml [kust2.yaml [...]] < base.yaml > result.yaml 17 | # 18 | # (c) Kurt Garloff , 3/2022 19 | # SPDX-License-Identifier: Apache-2.0 20 | 21 | . /etc/profile.d/proxy.sh 22 | 23 | unset KTMPDIR 24 | cleanup() 25 | { 26 | # Set KEEPKUST for debugging 27 | if test -n "$KTMPDIR" -a -d "$KTMPDIR" -a -z "$KEEPKUST"; then 28 | cd; rm -rf "$KTMPDIR" 29 | fi 30 | } 31 | 32 | usage() 33 | { 34 | echo "Usage: kustpatch.sh kust1.yaml [kust2.yaml [...]] < base.yaml > result.yaml" 1>&2 35 | cleanup 36 | exit ${1:-1} 37 | } 38 | 39 | if test -z "$1"; then usage; fi 40 | 41 | # stupid snap 42 | #TMPDIR=$(mktemp -d /dev/shm/kustpatch.XXXXXX) || exit 2 43 | if test ! -d ~/tmp; then mkdir ~/tmp; fi 44 | IWD=$(pwd) 45 | KTMPDIR=$(mktemp -d ~/tmp/kustpatch.XXXXXX) || exit 2 46 | 47 | mkdir $KTMPDIR/base || exit 2 48 | mkdir $KTMPDIR/patch 49 | cd $KTMPDIR/base; kustomize create || exit 3 50 | cd ..; cp -p base/kustomization.yaml patch/ 51 | cd $IWD 52 | 53 | if test ! -t 0; then cat > $KTMPDIR/base/base.yaml; fi 54 | if test ! -s $KTMPDIR/base/base.yaml; then 55 | INPUT=$(grep '^# YAML_TO_PATCH:' "$@" | sed 's/^.*# YAML_TO_PATCH: *//g' | head -n1) 56 | if test -z "$INPUT"; then echo "ERROR: Pass input YAML via stdin (or specify in patch header)" 1>&2; usage 4; fi 57 | if test "${INPUT:0:4}" = "http"; then 58 | curl -fsSL "$INPUT" -o $KTMPDIR/base/base.yaml 59 | RC=$? 60 | if test $RC != 0; then echo "ERROR: Could not retrieve $INPUT" 1>&2; usage 4; fi 61 | else 62 | if test ! -s "$INPUT"; then echo "ERROR: Base file $INPUT not readable" 1>&2; usage 4; fi 63 | cp -p "$INPUT" $KTMPDIR/base/base.yaml 64 | fi 65 | fi 66 | echo -e "resources:\n - base.yaml" >> $KTMPDIR/base/kustomization.yaml 67 | echo -e "resources:\n - ../base\npatches:" >> $KTMPDIR/patch/kustomization.yaml 68 | 69 | for patch in "$@"; do 70 | if test ! -s "$patch"; then echo "ERROR: Patch file $patch not readable" 1>&2; usage 5; fi 71 | cp -p "$patch" $KTMPDIR/patch/ 72 | echo " - path: ${patch##*/}" >> $KTMPDIR/patch/kustomization.yaml 73 | done 74 | cd $KTMPDIR 75 | kustomize build patch 76 | RC=$? 77 | #if test $RC = 0; then cleanup; fi 78 | cleanup 79 | exit $RC 80 | 81 | -------------------------------------------------------------------------------- /terraform/files/bin/nginx_proxy_realip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # nginx_proxy_realip.sh 3 | # Set proxy-real-ip-cidr in ingress-nginx-controller configmap to LB VIP 4 | # (c) Kurt Garloff , 3/2022 5 | # SPDX-License-Identifier: Apache-2.0 6 | 7 | . ~/bin/cccfg.inc 8 | 9 | NGINX_YAML=~/$CLUSTER_NAME/deployed-manifests.d/nginx-ingress.yaml 10 | 11 | get_ip_configmap() 12 | { 13 | PROXYIP_K8S=$(kubectl $KCONTEXT describe -n ingress-nginx configmaps ingress-nginx-controller | grep -A2 proxy-real-ip-cidr | tail -n1) 14 | } 15 | 16 | get_ip_yaml() 17 | { 18 | PROXYIP_YAML=$(yq eval '.data.proxy-real-ip-cidr' $NGINX_YAML | grep -v '^null' | grep -v '^\-\-\-') 19 | } 20 | 21 | get_ip_lb() 22 | { 23 | PROXYIP_LB=$(openstack loadbalancer list --name=kube_service_${CLUSTER_NAME}_ingress-nginx_ingress-nginx-controller -f value -c vip_address) 24 | PROXYIP_LB="$PROXYIP_LB/32" 25 | } 26 | 27 | patch_ip_yaml() 28 | { 29 | cp -p $NGINX_YAML $NGINX_YAML.bak 30 | sed -i "s@proxy-real-ip-cidr:.*\$@proxy-real-ip-cidr: \"$1\"@" $NGINX_YAML 31 | diff -up $NGINX_YAML.bak $NGINX_YAML 32 | kubectl $KCONTEXT apply -f $NGINX_YAML || return 33 | rm $NGINX_YAML.bak 34 | } 35 | 36 | reconcile() 37 | { 38 | get_ip_configmap 39 | get_ip_yaml 40 | if test "$PROXYIP_K8S" != "$PROXYIP_YAML"; then echo "ERROR: K8S ConfigMap $PROXYIP_K8S, YAML $PROXYIP_YAML" 1>&2; fi 41 | get_ip_lb 42 | if test "$PROXYIP_K8S" != "$PROXYIP_LB"; then 43 | echo "#Info: Adjusting K8S nginx proxy-real-ip-cidr from $PROXYIP_K8S to $PROXYIP_LB" 1>&2 44 | patch_ip_yaml $PROXYIP_LB 45 | return 1 46 | fi 47 | return 0 48 | } 49 | 50 | test_enabled() 51 | { 52 | DEPLOY_NGINX_INGRESS=$(yq eval '.DEPLOY_NGINX_INGRESS' $CCCFG) 53 | NGINX_INGRESS_PROXY=$(yq eval '.NGINX_INGRESS_PROXY' $CCCFG) 54 | if test "$DEPLOY_NGINX_INGRESS" = "false"; then echo "ERROR: DEPLOY_NGINX_INGRESS not enabled" 1>&2; return 1; fi 55 | if test "$NGINX_INGRESS_PROXY" != "true"; then echo "ERROR: NGINX_INGRESS_PROXY not set" 1>&2; return 2; fi 56 | } 57 | 58 | 59 | reconcile_loop() 60 | { 61 | while true; do 62 | sleep 30 63 | test_enabled || exit $? 64 | reconcile 65 | sleep 90 66 | done 67 | } 68 | 69 | # main 70 | if test "${0##*/}" = "nginx_proxy_realip.sh"; then 71 | test_enabled && reconcile && echo "#Info: Nothing to be done ($PROXYIP_LB)" 1>&2 72 | fi 73 | -------------------------------------------------------------------------------- /terraform/files/bin/openstack-kube-versions.inc: -------------------------------------------------------------------------------- 1 | # Table of kubernetes and openstack versions 2 | # vim: set syntax=bash: 3 | # (c) Kurt Garloff , 3/2022 4 | # SPDX-License-Identifier: Apache-2.0 5 | # Images from https://swift.services.a.regiocloud.tech/swift/v1/AUTH_b182637428444b9aa302bb8d5a5a418c/openstack-k8s-capi-images 6 | k8s_versions=("v1.21.14" "v1.22.17" "v1.23.16" "v1.24.15" "v1.25.15" "v1.26.14" "v1.27.12" "v1.28.11" "v1.29.3") 7 | # OCCM, CCM-RBAC, Cinder CSI, Cinder-Snapshot (TODO: Manila CSI) 8 | occm_versions=("v1.21.1" "v1.22.2" "v1.23.4" "v1.24.6" "v1.25.6" "v1.26.4" "v1.27.3" "v1.28.2" "v1.29.0") 9 | #ccmr_versions=("" "v1.22.2" "v1.23.4" "v1.24.6" "v1.25.6" "v1.26.4" "v1.27.3" "v1.28.2" "v1.29.0") 10 | ccmr_versions=("v1.22.2" "v1.22.2" "v1.23.4" "v1.24.6" "v1.25.6" "v1.26.4" "v1.27.3" "v1.28.2" "v1.29.0") 11 | ccsi_versions=("v1.21.1" "v1.22.2" "v1.23.4" "v1.24.6" "v1.25.6" "v1.26.4" "v1.27.3" "v1.28.2" "v1.29.0") 12 | 13 | # Versions that require a --allow-preview-versions flag 14 | techprev_versions=("v1.29" "v2") 15 | 16 | # Convert vxx.yy.zz to the number xxyyzz. Also works for z.y.z (0x0y0z). 17 | dotversion() 18 | { 19 | if test "$1" = "latest" -o "$1" = "main" -o "$1" = "master" -o "$1" = "HEAD"; then 20 | VERSION=999999 21 | echo $VERSION 22 | return 23 | elif test -z "$1"; then 24 | VERSION=1 25 | echo $VERSION 26 | return 27 | fi 28 | VERS=${1#v} 29 | one=${VERS%%.*} 30 | two=${VERS#*.} 31 | three=${two#*.} 32 | if test $three=$two; then three=0; fi 33 | two=${two%%.*} 34 | VERSION=$((10000*$one+100*$two+$three)) 35 | unset V one two three 36 | echo $VERSION 37 | } 38 | 39 | # Get versions from arrays 40 | setversions() 41 | { 42 | OCCM_VERSION=${occm_versions[$1]} 43 | CCMR_VERSION=${ccmr_versions[$1]} 44 | CCSI_VERSION=${ccsi_versions[$1]} 45 | } 46 | 47 | # Limit patchlevel to $1 in var $2 48 | adjustpl() 49 | { 50 | if test ${2##*.} -le $1; then echo "$2" 51 | else echo "${2%.*}.$1" 52 | fi 53 | } 54 | 55 | # Avoid minor version being larger then k8s version for OCCM >= v1.25.0 56 | # (Cloud-Provider-OpenStack v1.xx.yy requires k8s API v1.xx.yy for xx >= 25) 57 | limitpatchlevels() 58 | { 59 | K8PL=${k8s##*.} 60 | OCCM_VERSION=$(adjustpl $K8PL $OCCM_VERSION) 61 | CCMR_VERSION=$(adjustpl $K8PL $CCMR_VERSION) 62 | CCSI_VERSION=$(adjustpl $K8PL $CCSI_VERSION) 63 | unset K8PL 64 | } 65 | 66 | # Is is tech preview 67 | is_tech_preview() 68 | { 69 | k8s=${1:-$KUBERNETES_VERSION} 70 | for tp in ${techprev_versions[@]}; do 71 | tpln=${#tp} 72 | if test ${k8s:0:$tpln} = $tp; then return 0; fi 73 | done 74 | return 1 75 | } 76 | 77 | # Determine which openstack-cloud-provider versions to use 78 | find_openstack_versions() 79 | { 80 | k8s=${1:-$KUBERNETES_VERSION} 81 | k8vers=$(dotversion $k8s) 82 | if test -z "$k8s"; then echo "ERROR: Need to pass k8s version" 1>&2; return 1; fi 83 | NUMV=${#k8s_versions[*]} 84 | k8min=$(dotversion ${k8s_versions[0]%.*}) 85 | k8max=$(dotversion ${k8s_versions[$((NUMV-1))]%.*}) 86 | #echo "$k8vers $k8min $k8max" 87 | if test $k8vers -lt $k8min; then setversions 0; return 0; fi 88 | if test $k8vers -gt $((k8max+99)); then setversions $((NUMV-1)); return 0; fi 89 | declare -i idx=0 90 | for k8 in ${k8s_versions[*]}; do 91 | k8test=$(dotversion ${k8%.*}) 92 | if test $k8vers -ge $k8test -a $k8vers -le $((k8test+99)); then 93 | setversions $idx 94 | if test $k8vers -ge 12500; then limitpatchlevels $k8s; fi 95 | return 0 96 | fi 97 | let idx+=1 98 | done 99 | return 1 100 | } 101 | 102 | # Convert v1.19.x to latest v1.19.$LATEST 103 | set_k8s_latestpatch() 104 | { 105 | k8s=${1:-$KUBERNETES_VERSION} 106 | if test "${k8s:$((${#k8s}-1)):1}" != "x"; then KUBERNETES_VERSION=$k8s; return 0; fi 107 | k8vers=$(dotversion ${k8s%.x}) 108 | for k8 in ${k8s_versions[*]}; do 109 | k8test=$(dotversion ${k8%.*}) 110 | if test $k8vers -ge $k8test -a $k8vers -le $((k8test+99)); then KUBERNETES_VERSION=$k8; return 0; break; fi 111 | done 112 | KUBERNETES_VERSION="${k8s%.x}.0" 113 | return 1 114 | } 115 | -------------------------------------------------------------------------------- /terraform/files/bin/parse_k8s_version.inc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # File to be sourced to get a numeric k8s version 3 | # parse_k8s_version.inc 4 | # 5 | # (c) Kurt Garloff , 2/2023 6 | # SPDX-License-Identifer: Apache-2.0 7 | # 8 | # We parse the KUBERNETES_VERSION in clusterctl.yaml from $CCCFG 9 | # and create K8SMAJOR, K8SMINOR, K8SPATCH and K8SVER for the environment 10 | # K8SVER is a number with 10000*$K8SMAJOR+100*$K8SMINOR+$K8SPATCH 11 | # This parser is robust against omitted patch levels (assuming 0 then) 12 | 13 | K8SVER=$(grep '^KUBERNETES_VERSION:' "$CCCFG" | sed 's/^KUBERNETES_VERSION: v\([0-9.]*\)/\1/') 14 | K8SMINOR=${K8SVER#*.} 15 | K8SPATCH=${K8SMINOR#*.} 16 | # Avoid omitted patchlevel being mistreated 17 | if test "$K8SPATCH" = "$K8SMINOR"; then K8SPATCH=0; fi 18 | K8SMINOR=${K8SMINOR%%.*} 19 | K8SMAJOR=${K8SVER%%.*} 20 | K8SVER=${K8SMAJOR}$(printf %02i ${K8SMINOR})$(printf %02i ${K8SPATCH}) 21 | -------------------------------------------------------------------------------- /terraform/files/bin/prepare_openstack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | . ~/.capi-settings 3 | export OS_CLOUD=$(yq eval '.OPENSTACK_CLOUD' ~/cluster-defaults/clusterctl.yaml) 4 | 5 | #install Openstack CLI 6 | sudo apt-get install --no-install-recommends --no-install-suggests -y python3-openstackclient python3-octaviaclient 7 | # fix bug 1876317 8 | sudo patch -p2 -N -d /usr/lib/python3/dist-packages/keystoneauth1 < /tmp/fix-keystoneauth-plugins-unversioned.diff 9 | 10 | # convenience 11 | echo "export OS_CLOUD=\"$OS_CLOUD\"" >> $HOME/.bash_aliases 12 | 13 | # Determine project ID and inject into cloud.conf 14 | PROJECTID=$(openstack application credential show "${PREFIX}-appcred" -f value -c project_id) 15 | echo "Set tenant-id to $PROJECTID for $OS_CLOUD" 16 | if ! grep '^tenant.id' ~/cluster-defaults/cloud.conf >/dev/null; then 17 | sed -i "/^application.credential.secret/atenant-id=$PROJECTID" ~/cluster-defaults/cloud.conf 18 | fi 19 | 20 | # Determine cacert and inject into cloud.conf and cluster-template.yaml 21 | CACERT=$(print-cloud.py | yq eval '.clouds."'"$OS_CLOUD"'".cacert // "null"' -) 22 | if test "$CACERT" != "null"; then 23 | CADEST="/etc/ssl/certs/$(basename "$CACERT")" # path for OCCM 24 | echo "Set ca-file to $CADEST for $OS_CLOUD" 25 | sed -i "/^application.credential.secret/aca-file=$CADEST" ~/cluster-defaults/cloud.conf 26 | inject_custom_ca.sh ~/cluster-defaults/cluster-template.yaml "$CADEST" 27 | fi 28 | -------------------------------------------------------------------------------- /terraform/files/bin/print-cloud.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # Print clouds/secure.yaml config without disclosing the password/token/appcred 4 | # This would be a lot easier with using pyaml 5 | # 6 | 7 | import sys, os, string, getopt 8 | 9 | repl_list = {} 10 | inj_list = {} 11 | reveal = False 12 | exclude = False 13 | 14 | def usage(): 15 | print("Usage: print_cloud.py [OPTIONS] [CLOUD [CLOUD [..]]]", file=sys.stderr) 16 | print("OPTIONS: -c/--cloud CLOUD: Replace cloud name", file=sys.stderr) 17 | print(" -r/--replace attr=value", file=sys.stderr) 18 | print(" -i/--inject attr=value", file=sys.stderr) 19 | print(" -s/--sensitive", file=sys.stderr) 20 | print(" -x/--exclude", file=sys.stderr) 21 | return 1 22 | 23 | def countws(st): 24 | idx = 0 25 | while idx < len(st) and st[idx].isspace(): 26 | idx += 1 27 | return idx 28 | 29 | def output_nonsecret(ln): 30 | kwds = ln.split(':') 31 | idx = countws(kwds[0]) 32 | if kwds[0][idx:] in repl_list: 33 | print("%s: %s" % (kwds[0], repl_list[kwds[0][idx:]])) 34 | elif kwds[0][idx:] in inj_list: 35 | print("%s%s" % (kwds[0][:idx], inj_list[kwds[0][idx:]])) 36 | print(ln) 37 | elif not reveal and ('password' in kwds[0] or 'secret' in kwds[0] or 'token' in kwds[0]): 38 | print('%s: SECRET' % kwds[0]) 39 | else: 40 | print(ln) 41 | 42 | def findcloud(cloudname, fn): 43 | hits = 0 44 | if not os.access(fn, os.R_OK): 45 | return False 46 | f = open(fn, "r") 47 | found = False 48 | nextind = False 49 | indent = " " 50 | lenind = len(indent) 51 | for line in f: 52 | line = line.rstrip('\r\n') 53 | if line == "---": 54 | continue 55 | if line == "clouds:": 56 | #print("Found clouds:") 57 | if exclude: 58 | output_nonsecret(line) 59 | nextind = True 60 | continue 61 | if not line or (line and line[0] == "#") or not line.rstrip(string.whitespace): 62 | continue 63 | if nextind: 64 | for lenind in range(0, len(line)-1): 65 | if line[lenind] not in string.whitespace: 66 | break 67 | assert(lenind != len(line)-1 and lenind != 0) 68 | indent = line[0:lenind] 69 | #print("Indentation: \"%s\"" % indent) 70 | nextind = False 71 | if not found and line == "%s%s:" % (indent, cloudname): 72 | found = True 73 | hits += 1 74 | if exclude: 75 | continue 76 | print("---\n#Cloud %s in %s:\nclouds:" % (cloudname, fn)) 77 | #output_nonsecret(line) 78 | if 'cloud' in repl_list: 79 | print("%s%s:" % (indent, repl_list["cloud"])) 80 | else: 81 | print("%s%s:" % (indent, cloudname)) 82 | continue 83 | if found and (line[:lenind] != indent or line[lenind:lenind+1] not in string.whitespace): 84 | #print("END: %s" % line) 85 | if not exclude: 86 | return found 87 | else: 88 | found = False 89 | if not found ^ exclude: 90 | continue 91 | output_nonsecret(line) 92 | return hits > 0 93 | 94 | def main(argv): 95 | global repl_list, inj_list, reveal, exclude 96 | home = os.environ["HOME"] 97 | err = 0 98 | try: 99 | optlist, arg = getopt.gnu_getopt(argv, "c:r:i:hsx", 100 | ('--cloud=', '--replace=', '--inject=', '--help', '--sensitive', '--exclude')) 101 | except getopt.GetoptError as exc: 102 | print("Error:", exc, file=sys.stderr) 103 | sys.exit(usage()) 104 | for opt in optlist: 105 | if opt[0] == '-h' or opt[0] == '--help': 106 | usage() 107 | sys.exit(0) 108 | elif opt[0] == '-s' or opt[0] == '--sensitive': 109 | reveal = True 110 | elif opt[0] == '-x' or opt[0] == '--exclude': 111 | exclude = True 112 | elif opt[0] == '-c' or opt[0] == '--cloud': 113 | repl_list['cloud'] = opt[1] 114 | elif opt[0] == '-r' or opt[0] == '--replace': 115 | pair = opt[1].split('=') 116 | repl_list[pair[0]] = pair[1] 117 | elif opt[0] == '-i' or opt[0] == '--inject': 118 | pair = opt[1].split('=') 119 | inj_list[pair[0]] = pair[1] 120 | else: 121 | sys.exit(usage()) 122 | 123 | if not len(arg) and "OS_CLOUD" in os.environ: 124 | arg = (os.environ["OS_CLOUD"],) 125 | for cloud in arg: 126 | success = False 127 | for (cyaml,syaml) in (("./clouds.yaml", "./secure.yaml"), 128 | ("%s/.config/openstack/clouds.yaml" % home, "%s/.config/openstack/secure.yaml" % home), 129 | ("/etc/openstack/clouds.yaml", "/etc/openstack/secure.yaml")): 130 | success = findcloud(cloud, cyaml) 131 | if success: 132 | findcloud(cloud, syaml) 133 | break 134 | if not success: 135 | print("#Cloud config for %s not found"% cloud) 136 | err += 1 137 | return err 138 | 139 | if __name__ == "__main__": 140 | sys.exit(main(sys.argv[1:])) 141 | -------------------------------------------------------------------------------- /terraform/files/bin/remove_cluster-network.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # imports 4 | . ~/bin/utils.inc 5 | . ~/bin/cccfg.inc 6 | 7 | NET_NAME=$(openstack network list -f value -c Name | grep "k8s-clusterapi-cluster-\(default-${CLUSTER_NAME}\|${CLUSTER_NAME}-${CLUSTER_NAME}\)") 8 | 9 | # Determine mgmtserver networks 10 | MGMT="$PREFIX-mgmtcluster" 11 | MGMTNET=$(openstack server list --name "$MGMT" -f value -c Networks) 12 | if [[ $NET_NAME == "k8s-clusterapi-cluster-default-"* ]]; then 13 | # Old format of network name based on cluster in default namespace 14 | NET=$(echo "$MGMTNET" | grep "k8s-clusterapi-cluster-default-$CLUSTER_NAME=" | sed "s/.*k8s-clusterapi-cluster-default-$CLUSTER_NAME=\([0-9a-f:\.]*\).*\$/\1/") 15 | # New format 16 | if test -z "$NET"; then NET=$(echo "$MGMTNET" | grep "'k8s-clusterapi-cluster-default-$CLUSTER_NAME':" | sed "s/.*'k8s-clusterapi-cluster-default-$CLUSTER_NAME':[^']*'\([0-9a-f:\.]*\)'.*\$/\1/"); fi 17 | else 18 | # New format of network name based on cluster in cluster with namespace name as cluster name 19 | NET=$(echo "$MGMTNET" | grep "k8s-clusterapi-cluster-$CLUSTER_NAME-$CLUSTER_NAME=" | sed "s/.*k8s-clusterapi-cluster-$CLUSTER_NAME-$CLUSTER_NAME=\([0-9a-f:\.]*\).*\$/\1/") 20 | # New format 21 | if test -z "$NET"; then NET=$(echo "$MGMTNET" | grep "'k8s-clusterapi-cluster-$CLUSTER_NAME-$CLUSTER_NAME':" | sed "s/.*'k8s-clusterapi-cluster-$CLUSTER_NAME-$CLUSTER_NAME':[^']*'\([0-9a-f:\.]*\)'.*\$/\1/"); fi 22 | fi 23 | if test -z "$NET"; then 24 | echo "No network to remove ..." 25 | exit 1 26 | fi 27 | NIC=$(ip addr | grep -B4 "inet $NET/" | grep '^[0-9]' | sed 's/^[0-9]*: \([^: ]*\): .*$/\1/') 28 | 29 | #sudo ip link set dev ens8 down 30 | echo "Removing NIC $NIC $NET ..." 31 | openstack server remove network $MGMT $NET_NAME || exit 32 | -------------------------------------------------------------------------------- /terraform/files/bin/signer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # k8s signer 3 | # (c) Kurt Garloff , 8/2022 4 | # SPDX-License-Identifier: Apache-2.0 5 | # 6 | # Usage: 7 | # * Create a directory where you put the kubernetes ca.crt and .key. 8 | # * Go into this directory 9 | # * Calling signer.sh CLUSTERNAME will sign all CSRs that have been marked approved 10 | # with the CA cert and push the cert back into k8s. 11 | # * For Pending requests, it will display and interactively ask, which can be 12 | # changed using options -f, -a and -u. 13 | # * -u asks the underlaying infra (OpenStack) whether hosts and IPs exist as requested 14 | # in the CSr and judges the approval based on it. 15 | # You could run this in an endless loop (in tmux): 16 | # while true; signer.sh CLUSTERNAME -u; sleep 30; done 17 | # Note that a proper CSR handler (in CAPI) is the real solution for valid server 18 | # certificates. Consider this script a PoC. 19 | 20 | . /etc/profile.d/proxy.sh 21 | 22 | usage() 23 | { 24 | echo "Usage: signer.sh CLUSTERNAME [OPTIONS]" 25 | echo "Options: -a => only sign approved CSRs" 26 | echo " -f => approve and sign all" 27 | echo " -u => unattended mode: check CSRs and approve and sign" 28 | echo " valid ones. Valid=correct name and IP in IaaS" 29 | echo "Default is to ask for unapproved" 30 | exit 1 31 | } 32 | 33 | 34 | if test -z "$1"; then usage; fi 35 | 36 | CTX="--context=$1-admin@$1" 37 | shift 38 | if test "$1" == "-a"; then ONLYAPPROVED=1; shift; fi 39 | if test "$1" == "-f"; then FORCEAPPROVE=1; shift; fi 40 | if test "$1" == "-u"; then UNATTENDED=1; shift; fi 41 | 42 | if test ! -r ca.crt -o ! -r ca.key; then 43 | echo "Need ca.crt and ca.key in current directory" 44 | exit 2 45 | fi 46 | 47 | if ! type -p jq >/dev/null; then echo "Need jq installed"; exit 2; fi 48 | if ! type -p cfssl >/dev/null; then echo "Need cfssl installed"; exit 2; fi 49 | 50 | if test ! -r server-signing-config.json; then cat > server-signing-config.json <> signed-$1.pem 81 | kubectl $CTX get csr $1 -o json | jq '.status.certificate = "'$(base64 signed-$1.pem | tr -d '\n')'"' | \ 82 | kubectl $CTX replace --raw /apis/certificates.k8s.io/$CERTAPI/certificatesigningrequests/$1/status -f - || exit 6 83 | } 84 | 85 | checkvalidity() 86 | { 87 | KCSR=$(kubectl $CTX get csr $1 -o json) 88 | HNAME=$(echo "$KCSR" | jq .spec.username | tr -d '"') 89 | HNAME=${HNAME#system:node:} 90 | CRAPI=$(echo "$KCSR" | jq .metadata.managedFields[].apiVersion | tr -d '"') 91 | CERTAPI=${CRAPI##*io/} 92 | #echo "Cert request for $HNAME (API $CERTAPI)" 93 | REQ=$(echo "$KCSR" | jq .spec.request | tr -d '"') 94 | SAN=$(echo "$REQ" | base64 -d | openssl req -noout -text | grep -A1 "X509v3 Subject Alternative Name:" | grep "DNS:") 95 | #echo $SAN 96 | HNM2=${SAN#*DNS:}; HNM2=${HNM2%%,*} 97 | IP=${SAN#*,}; IP=${IP# IP Address:} 98 | if test -z "$HNM2" -o -z "$IP"; then echo "Parsing error"; return 1; fi 99 | echo "Cert request $1 for $HNM2 ($IP):" 100 | OST=$(openstack server list --name="$HNM2" -f value -c Networks) 101 | RC=$? 102 | if test "$RC" != 0; then echo "Not found"; return $$c; fi 103 | MYIP=${OST##*=} 104 | if test "$MYIP" != "$IP"; then echo "IP does not match"; return 3; fi 105 | # FIXME: Should we compare CA and cluster to belong together? 106 | echo "Good for OpenStack" 107 | return 0 108 | } 109 | 110 | while read req age signer requestor status; do 111 | # Skip header 112 | if test "$req" = "NAME"; then continue; fi 113 | # Status 114 | if test "$status" = "Approved,Issued"; then continue 115 | elif test "$status" = "Approved"; then sign $req; continue 116 | elif test "$status" != "Pending"; then echo "Unexpected $req status $status"; continue 117 | fi 118 | #FIXME: Should ask openstack whether DNS nama and IP match .... 119 | if test "$ONLYAPPROVED" = "1"; then continue; fi 120 | checkvalidity $req 121 | VALID=$? 122 | if test $VALID = 0 -a "$UNATTENDED" = "1" || test "$FORCEAPPROVE" = "1"; then 123 | if test "$FORCEAPPROVE" = "1"; then 124 | echo "Force approval for $req ($requestor -> $signer / $age)" 125 | fi 126 | kubectl $CTX certificate approve $req 127 | sign $req 128 | continue 129 | fi 130 | REQ=$(kubectl $CTX get csr $req -o jsonpath='{.spec.request}' | base64 --decode) 131 | OSSLINFO=$(echo "$REQ" | openssl req -noout -text -in -) 132 | echo -ne "$OSSLINFO\nApprove $req ($requestor -> $signer / $age)? " 133 | read ans 7 | # SPDX-License-Identifier: Apache-2.0 8 | # 9 | # Source .bash_aliases in case we are called from non-interactive bash (Makefile) 10 | # This does not seem to be strictly needed for sonobuoy.sh right now. 11 | source ~/.bash_aliases 12 | . /etc/profile.d/proxy.sh 13 | 14 | unset TZ 15 | export LC_ALL=POSIX 16 | if ! test -x /usr/local/bin/sonobuoy; then 17 | cd ~ 18 | OS=linux; ARCH=$(uname -m | sed 's/x86_64/amd64/') 19 | SONOBUOY_VERSION=0.57.1 20 | SONOTARBALL=sonobuoy_${SONOBUOY_VERSION}_${OS}_${ARCH}.tar.gz 21 | curl -LO https://github.com/vmware-tanzu/sonobuoy/releases/download/v${SONOBUOY_VERSION}/${SONOTARBALL} || exit 1 22 | tar xvzf ${SONOTARBALL} || exit 2 23 | chmod +x ./sonobuoy || exit 2 24 | sudo mv sonobuoy /usr/local/bin/ 25 | mv LICENSE ~/doc/LICENSE.sonobuoy-${SONOBUOY_VERSION} 26 | rm ${SONOTARBALL} 27 | fi 28 | . ~/bin/cccfg.inc 29 | shift 30 | export KUBECONFIG="$KUBECONFIG_WORKLOADCLUSTER" 31 | if ! test -s "$KUBECONFIG"; then echo "No $KUBECONFIG" 1>&2; exit 3; fi 32 | #./sonobuoy status 2>/dev/null 33 | #./sonobuoy delete --wait 34 | START=$(date +%s) 35 | echo "=== Running sonobuoy conformance tests ... $@ ===" 36 | sonobuoy run --plugin-env=e2e.E2E_PROVIDER=openstack "$@" || exit 4 37 | if test "$1" == "--mode" -a "$2" == "quick"; then SLP=10; ALL=""; else SLP=60; ALL="--all"; fi 38 | while true; do 39 | sleep $SLP 40 | COMPLETE=$(sonobuoy status) 41 | date +%FT%TZ 42 | echo "$COMPLETE" 43 | #sonobuoy logs -f 44 | if echo "$COMPLETE" | grep "has completed" >/dev/null 2>&1; then break; fi 45 | #./sonobuoy logs 46 | done 47 | echo "=== Collecting results ===" 48 | resfile=$(sonobuoy retrieve) 49 | sonobuoy delete $ALL 50 | REPORT=$(sonobuoy results $resfile) 51 | echo "$REPORT" 52 | END=$(date +%s) 53 | declare -i fail=0 54 | while read number; do 55 | let fail+=$number 56 | done < <(echo "$REPORT" | grep '^Failed: [0-9]\+' | sed 's/Failed: \([0-9]\+\)/\1/') 57 | sonobuoy delete $ALL --wait 58 | if test $fail != 0; then 59 | echo "FAIL: Investigate $resfile for further inspection" 1>&2 60 | exit $((4+$fail)) 61 | fi 62 | rm $resfile 63 | echo "=== Sonobuoy conformance tests passed in $((END-START))s ===" 64 | -------------------------------------------------------------------------------- /terraform/files/bin/update-R2-to-R3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Apply patches to cluster-template.yaml and clusterctl.yaml 3 | # (c) Kurt Garloff , 7/2022 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | usage() 7 | { 8 | echo "Usage: update-R2-to-R3.sh CLUSTERNAME" 9 | echo "Updates the cluster-template.yaml and clusterctl.yaml to have the new" 10 | echo "variables CONTROL_PLANE_MACHINE_GEN and WORKER_MACHINE_GEN" 11 | exit 1 12 | } 13 | 14 | CLUSTER_NAME="$1" 15 | if test -z "$CLUSTER_NAME"; then usage; fi 16 | 17 | restore() 18 | { 19 | echo "Patching failed ($1)" 1>&2 20 | cp -p cluster-template.yaml.backup cluster-template.yaml 21 | cp -p clusterctl.yaml.backup clusterctl.yaml 22 | exit $1 23 | } 24 | 25 | cd ~/${CLUSTER_NAME} || { echo "Cluster config $CLUSTER_NAME does not exist" 1>&2; exit 2; } 26 | if test ! -r cluster-template.yaml -o ! -r clusterctl.yaml; then echo "cluster-template.yaml or clusterctl.yaml not found" 1>&2; exit 3; fi 27 | # Backup 28 | cp -p cluster-template.yaml cluster-template.yaml.backup 29 | cp -p clusterctl.yaml clusterctl.yaml.backup 30 | # cluster-template 31 | patch -R --dry-run cluster-template.yaml < ~/k8s-cluster-api-provider/terraform/files/update/R2_to_R3/update-cluster-template.diff >/dev/null 2>&1 32 | if test $? == 0; then 33 | echo "cluster-template.yaml already upgraded" 1>&2 34 | else 35 | patch cluster-template.yaml < ~/k8s-cluster-api-provider/terraform/files/update/R2_to_R3/update-cluster-template.diff || restore 4 36 | fi 37 | # CONTROL_PLANE_MACHINE_GEN 38 | if grep '^CONTROL_PLANE_MACHINE_GEN' clusterctl.yaml >/dev/null 2>&1; then 39 | echo "CONTROL_PLANE_MACHINE_GEN already set in clusterctl.yaml" 1>&2 40 | else 41 | sed -i -f ~/k8s-cluster-api-provider/terraform/files/update/R2_to_R3/update-clusterctl-control-gen.sed clusterctl.yaml || restore 5 42 | fi 43 | # WORKER_MACHINE_GEN 44 | if grep '^WORKER_MACHINE_GEN' clusterctl.yaml >/dev/null 2>&1; then 45 | echo "WORKER_MACHINE_GEN already set in clusterctl.yaml" 1>&2 46 | else 47 | sed -i -f ~/k8s-cluster-api-provider/terraform/files/update/R2_to_R3/update-clusterctl-worker-gen.sed clusterctl.yaml || restore 6 48 | fi 49 | rm cluster-template.yaml.backup clusterctl.yaml.backup 50 | 51 | 52 | -------------------------------------------------------------------------------- /terraform/files/bin/upload_capi_image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ~/bin/cccfg.inc 4 | 5 | ~/bin/fixup_k8s_version.sh $CCCFG 6 | KUBERNETES_VERSION=$(yq eval '.KUBERNETES_VERSION' $CCCFG) 7 | . ~/bin/parse_k8s_version.inc 8 | #UBU_IMG_NM=ubuntu-capi-image-$KUBERNETES_VERSION 9 | UBU_IMG_NM=$(yq eval '.OPENSTACK_IMAGE_NAME' $CCCFG) 10 | IMG_RAW=$(yq eval '.OPENSTACK_IMAGE_RAW' $CCCFG) 11 | IMGREG_EXTRA=$(yq eval '.OPENSTACK_IMAGE_REGISTRATION_EXTRA_FLAGS' $CCCFG) 12 | 13 | VERSION_CAPI_IMAGE=$(echo $KUBERNETES_VERSION | sed 's/\.[[:digit:]]*$//g') 14 | if test "$K8SVER" -ge 12703 || 15 | test "$K8SVER" -lt 12700 -a "$K8SVER" -ge 12606 || 16 | test "$K8SVER" -lt 12600 -a "$K8SVER" -ge 12511; then 17 | UBUVER=2204 18 | UBUVERS="22.04" 19 | else 20 | UBUVER=2004 21 | UBUVERS="20.04" 22 | fi 23 | 24 | UBU_IMG=ubuntu-$UBUVER-kube-$KUBERNETES_VERSION 25 | 26 | WAITLOOP=64 27 | #download/upload image to openstack 28 | CAPIIMG=$(openstack image list --name "$UBU_IMG_NM") 29 | IMGURL=https://swift.services.a.regiocloud.tech/swift/v1/AUTH_b182637428444b9aa302bb8d5a5a418c/openstack-k8s-capi-images 30 | IMAGESRC=$IMGURL/ubuntu-$UBUVER-kube-$VERSION_CAPI_IMAGE/$UBU_IMG.qcow2 31 | if test -z "$CAPIIMG"; then 32 | # TODO: Check signature 33 | wget $IMAGESRC 34 | FMT=qcow2 35 | IMGINFO=$(qemu-img info $UBU_IMG.qcow2) 36 | DISKSZ=$(echo "$IMGINFO" | grep '^virtual size' | sed 's/^[^(]*(\([0-9]*\) bytes).*$/\1/') 37 | DISKSZ=$(((DISKSZ+1073741823)/1073741824)) 38 | IMGDATE=$(date -r $UBU_IMG.qcow2 +%F) 39 | if test ${IMGDATE:5:99} == "02-29"; then 40 | UNTIL=$((${IMGDATE:0:4}+1))-03-01 41 | else 42 | UNTIL=$((${IMGDATE:0:4}+1))-${IMGDATE:5:99} 43 | fi 44 | if test "$IMG_RAW" = "true"; then 45 | FMT=raw 46 | qemu-img convert $UBU_IMG.qcow2 -O raw -S 4k $UBU_IMG.raw && rm $UBU_IMG.qcow2 || exit 1 47 | fi 48 | #TODO min-disk, min-ram, other std. image metadata 49 | mkdir -p ~/tmp 50 | echo "Creating image $UBU_IMG_NM from $UBU_IMG.$FMT" 51 | nohup openstack image create --disk-format $FMT --min-ram 1024 --min-disk $DISKSZ --property image_build_date="$IMGDATE" --property image_original_user=ubuntu --property architecture=x86_64 --property hypervisor_type=kvm --property os_distro=ubuntu --property os_version="$UBUVERS" --property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi --property hw_rng_model=virtio --property image_source=$IMAGESRC --property image_description="https://github.com/osism/k8s-capi-images" --property kubernetes_version=$KUBERNETES_VERSION --property replace_frequency=never --property provided_until=$UNTIL --property uuid_validity=$UNTIL --tag managed_by_osism $IMGREG_EXTRA --file $UBU_IMG.$FMT $UBU_IMG_NM > ~/tmp/img-create-$UBU_IMG_NM.out & 52 | CPID=$! 53 | sleep 5 54 | echo "Waiting for image $UBU_IMG_NM: " 55 | let -i ctr=0 56 | while test $ctr -le $WAITLOOP; do 57 | CAPIIMG=$(openstack image list --name "$UBU_IMG_NM" -f value -c ID -c Status) 58 | STATUS="${CAPIIMG##* }" 59 | if test "$STATUS" = "saving" -o "$STATUS" = "active"; then break; fi 60 | echo -n "." 61 | let ctr+=1 62 | sleep 10 63 | done 64 | echo " $CAPIIMG" 65 | if test $ctr -ge $WAITLOOP; then 66 | echo "ERROR: Image $UBU_IMG_NM not found" 1>&2 67 | exit 2 68 | fi 69 | # wait $CPID 70 | rm $UBU_IMG.$FMT 71 | fi 72 | -------------------------------------------------------------------------------- /terraform/files/bin/utils.inc: -------------------------------------------------------------------------------- 1 | # File to be included 2 | 3 | export KCONTEXT="${KCONTEXT:-kind-kind}" 4 | export KUBECONFIG="${KUBECONFIG:-${HOME}/.kube/config}" 5 | export KUBECONFIG_ORIG="" 6 | 7 | reset_kubectl_context_workspace() { 8 | if [ ! -z ${KUBECONFIG_ORIG} ]; then 9 | unlink ${KUBECONFIG} 10 | 11 | KUBECONFIG=${KUBECONFIG_ORIG} 12 | KUBECONFIG_ORIG="" 13 | fi 14 | } 15 | 16 | setup_kubectl_context_workspace() { 17 | if [ ! -z ${KUBECONFIG_ORIG} ]; then 18 | echo "kubectl context workspace already created" 19 | exit 1 20 | fi 21 | 22 | KUBECONFIG_ORIG=${KUBECONFIG} 23 | 24 | local KUBECONFIG_WORKSPACE=`mktemp --tmpdir kubeconfig.yaml.XXXXXX` 25 | trap 'reset_kubectl_context_workspace' EXIT 26 | 27 | # Always use default KUBECONFIG location as source 28 | KUBECONFIG=${HOME}/.kube/config 29 | kubectl config view --flatten >${KUBECONFIG_WORKSPACE} 30 | KUBECONFIG=${KUBECONFIG_WORKSPACE} 31 | } 32 | 33 | unset_kubectl_namespace() { 34 | if [ -n "${CLUSTER_NAME}" && `kubectl config view --minify=true -o jsonpath='{..namespace}'` == "${CLUSTER_NAME}" ]; then 35 | kubectl config set-context --current --namespace=default 36 | fi 37 | } 38 | 39 | set_mgmt_kubectl_namespace() { 40 | KCONTEXT="kind-kind" 41 | kubectl config use-context ${KCONTEXT} 42 | } 43 | 44 | set_workload_cluster_kubectl_namespace() { 45 | if [ -z "${CLUSTER_NAME}" ]; then 46 | echo "CLUSTER_NAME is not set. Exiting." 47 | exit 1 48 | fi 49 | 50 | local CREATE_NEW_NAMESPACE=$1 51 | KCONTEXT="kind-kind" 52 | kubectl config use-context $KCONTEXT 53 | 54 | # Check if the cluster already exists 55 | local EXISTING_CLUSTER=`kubectl get cluster --all-namespaces -o jsonpath='{range .items[?(@.metadata.name == "'${CLUSTER_NAME}'")]}{.metadata.namespace}{end}'` 56 | 57 | if [ -n "${EXISTING_CLUSTER}" ]; then 58 | echo "> Cluster ${EXISTING_CLUSTER} already exists in namespace ${EXISTING_CLUSTER}" 59 | kubectl config set-context --current --namespace=${EXISTING_CLUSTER} 60 | export CLUSTER_NAMESPACE=${EXISTING_CLUSTER} 61 | else 62 | if kubectl get ns "${CLUSTER_NAME}" &>/dev/null; then 63 | echo "> Cluster ${CLUSTER_NAME} does not exist, but namespace ${CLUSTER_NAME} already exists." 64 | kubectl config set-context --current --namespace=${CLUSTER_NAME} 65 | export CLUSTER_NAMESPACE=${CLUSTER_NAME} 66 | elif [ -z "$CREATE_NEW_NAMESPACE" ] || [ "$CREATE_NEW_NAMESPACE" = true ]; then 67 | echo "> Cluster ${CLUSTER_NAME} does not exist. Creating a new cluster namespace..." 68 | kubectl create namespace ${CLUSTER_NAME} 69 | kubectl config set-context --current --namespace=${CLUSTER_NAME} 70 | export CLUSTER_NAMESPACE=${CLUSTER_NAME} 71 | else 72 | echo "> Cluster ${CLUSTER_NAME} does not exist, and new namespace creation is disabled." 73 | fi 74 | fi 75 | 76 | KCONTEXT="${CLUSTER_NAME}-admin@${CLUSTER_NAME}" 77 | } 78 | 79 | wait_for_k8s_resource_matching() { 80 | local SLEEP=0 81 | until kubectl $2 get $1 -o=jsonpath='{.metadata.name}' >/dev/null 2>&1; do 82 | echo "[${SLEEP}s] Waiting for $1" 83 | sleep 10 84 | let SLEEP+=10 85 | done 86 | } 87 | 88 | wait_for_k8s_resources_matching() { 89 | local SLEEP=0 90 | until [ ! -z $(kubectl $3 get $2 --template '{{if len .items}}{{with index .items 0}}{{.metadata.name}}{{end}}{{end}}') ]; do 91 | echo "[${SLEEP}s] Waiting for $1" 92 | sleep 10 93 | let SLEEP+=10 94 | done 95 | } 96 | -------------------------------------------------------------------------------- /terraform/files/bin/wait.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SLEEP=0 4 | while [ ! -f /var/lib/cloud/instance/boot-finished ] 5 | do 6 | echo "[${SLEEP}s] Waiting for cloud-init to finish" 7 | sleep 5 8 | SLEEP=$(( SLEEP + 5 )) 9 | done 10 | -------------------------------------------------------------------------------- /terraform/files/bin/wait_capi_image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ~/bin/cccfg.inc 4 | 5 | KUBERNETES_VERSION=$(yq eval '.KUBERNETES_VERSION' $CCCFG) 6 | UBU_IMG_NM=$(yq eval '.OPENSTACK_IMAGE_NAME' $CCCFG) 7 | 8 | #download/upload image to openstack 9 | echo -n "Waiting for image $UBU_IMG_NM to become active: " 10 | let -i ctr=0 11 | while test $ctr -lt 180; do 12 | CAPIIMG=$(openstack image list --name "$UBU_IMG_NM" -f value -c ID -c Status) 13 | if test -z "$CAPIIMG"; then 14 | echo "Image $UBU_IMG_NM does not exist, create ..." 15 | $HOME/bin/upload_capi_image.sh "$1" || exit $? 16 | continue 17 | fi 18 | if test "${CAPIIMG##* }" = "active"; then echo "$CAPIIMG"; break; fi 19 | echo -n "." 20 | let ctr+=1 21 | sleep 10 22 | done 23 | if test $ctr -ge 180; then echo "TIMEOUT"; exit 2; fi 24 | -------------------------------------------------------------------------------- /terraform/files/containerd/docker.io: -------------------------------------------------------------------------------- 1 | server = "https://registry-1.docker.io" 2 | 3 | [host."https://registry.scs.community/v2/docker.io"] 4 | capabilities = ["pull"] 5 | override_path = true 6 | -------------------------------------------------------------------------------- /terraform/files/containerd/ghcr.io: -------------------------------------------------------------------------------- 1 | server = "https://ghcr.io" 2 | 3 | [host."https://registry.scs.community/v2/ghcr.io"] 4 | capabilities = ["pull"] 5 | override_path = true 6 | -------------------------------------------------------------------------------- /terraform/files/containerd/quay.io: -------------------------------------------------------------------------------- 1 | server = "https://quay.io" 2 | 3 | [host."https://registry.scs.community/v2/quay.io"] 4 | capabilities = ["pull"] 5 | override_path = true 6 | -------------------------------------------------------------------------------- /terraform/files/containerd/registry.gitlab.com: -------------------------------------------------------------------------------- 1 | server = "https://registry.gitlab.com" 2 | 3 | [host."https://registry.scs.community/v2/registry.gitlab.com"] 4 | capabilities = ["pull"] 5 | override_path = true 6 | -------------------------------------------------------------------------------- /terraform/files/containerd/registry.k8s.io: -------------------------------------------------------------------------------- 1 | server = "https://registry.k8s.io" 2 | 3 | [host."https://registry.scs.community/v2/registry.k8s.io"] 4 | capabilities = ["pull"] 5 | override_path = true 6 | -------------------------------------------------------------------------------- /terraform/files/fix-keystoneauth-plugins-unversioned.diff: -------------------------------------------------------------------------------- 1 | This is a minimal version of 2 | 3 | commit ad46262148e7b099e6c7239887e20ade5b8e6ac8 4 | Author: Lance Bragstad 5 | Date: Fri May 1 01:02:12 2020 +0000 6 | 7 | Inject /v3 in token path for v3 plugins 8 | 9 | Without this, it's possible to get HTTP 404 errors from keystone if 10 | OS_AUTH_URL isn't versioned (e.g., https://keystone.example.com/ instead 11 | of https://keystone.example.com/v3), even if OS_IDENTITY_API is set to 12 | 3. 13 | 14 | This commit works around this issue by checking the AUTH_URL before 15 | building the token_url and appending '/v3' to the URL before sending the 16 | request. 17 | 18 | Closes-Bug: 1876317 19 | 20 | Change-Id: Ic75f0c9b36022b884105b87bfe05f4f8292d53b2 21 | 22 | 23 | diff --git a/keystoneauth1/identity/v3/base.py b/keystoneauth1/identity/v3/base.py 24 | index 20a86db..bcd6441 100644 25 | --- a/keystoneauth1/identity/v3/base.py 26 | +++ b/keystoneauth1/identity/v3/base.py 27 | @@ -173,9 +173,13 @@ class Auth(BaseAuth): 28 | if self.system_scope == 'all': 29 | body['auth']['scope'] = {'system': {'all': True}} 30 | 31 | + token_url = self.token_url 32 | + 33 | + if not self.auth_url.rstrip('/').endswith('v3'): 34 | + token_url = '%s/v3/auth/tokens' % self.auth_url.rstrip('/') 35 | + 36 | # NOTE(jamielennox): we add nocatalog here rather than in token_url 37 | # directly as some federation plugins require the base token_url 38 | - token_url = self.token_url 39 | if not self.include_catalog: 40 | token_url += '?nocatalog' 41 | 42 | 43 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/add-vol-to-ctrl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7 3 | kind: OpenStackMachineTemplate 4 | metadata: 5 | name: ${PREFIX}-${CLUSTER_NAME}-control-plane-${CONTROL_PLANE_MACHINE_GEN} 6 | spec: 7 | template: 8 | spec: 9 | rootVolume: 10 | diskSize: ${CONTROL_PLANE_ROOT_DISKSIZE} 11 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/add-vol-to-worker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7 3 | kind: OpenStackMachineTemplate 4 | metadata: 5 | name: ${PREFIX}-${CLUSTER_NAME}-md-0-${WORKER_MACHINE_GEN} 6 | spec: 7 | template: 8 | spec: 9 | rootVolume: 10 | diskSize: ${WORKER_ROOT_DISKSIZE} 11 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/cert-manager-test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: cert-manager-test 6 | --- 7 | apiVersion: cert-manager.io/v1 8 | kind: Issuer 9 | metadata: 10 | name: test-selfsigned 11 | namespace: cert-manager-test 12 | spec: 13 | selfSigned: {} 14 | --- 15 | apiVersion: cert-manager.io/v1 16 | kind: Certificate 17 | metadata: 18 | name: selfsigned-cert 19 | namespace: cert-manager-test 20 | spec: 21 | dnsNames: 22 | - example.com 23 | secretName: selfsigned-cert-tls 24 | issuerRef: 25 | name: test-selfsigned 26 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/cinder-provider.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # task: create default storageclass for cinder/csi 3 | # 4 | apiVersion: storage.k8s.io/v1 5 | kind: StorageClass 6 | metadata: 7 | name: cinder-default 8 | annotations: 9 | storageclass.kubernetes.io/is-default-class: "true" 10 | provisioner: cinder.csi.openstack.org 11 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/cloud-controller-manager-rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | items: 4 | - apiVersion: rbac.authorization.k8s.io/v1 5 | kind: ClusterRole 6 | metadata: 7 | name: system:cloud-controller-manager 8 | rules: 9 | - apiGroups: 10 | - coordination.k8s.io 11 | resources: 12 | - leases 13 | verbs: 14 | - get 15 | - create 16 | - update 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - events 21 | verbs: 22 | - create 23 | - patch 24 | - update 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - nodes 29 | verbs: 30 | - '*' 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - nodes/status 35 | verbs: 36 | - patch 37 | - apiGroups: 38 | - "" 39 | resources: 40 | - services 41 | verbs: 42 | - list 43 | - patch 44 | - update 45 | - watch 46 | - apiGroups: 47 | - "" 48 | resources: 49 | - serviceaccounts 50 | verbs: 51 | - create 52 | - get 53 | - apiGroups: 54 | - "" 55 | resources: 56 | - persistentvolumes 57 | verbs: 58 | - '*' 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - endpoints 63 | verbs: 64 | - create 65 | - get 66 | - list 67 | - watch 68 | - update 69 | - apiGroups: 70 | - "" 71 | resources: 72 | - configmaps 73 | verbs: 74 | - get 75 | - list 76 | - watch 77 | - apiGroups: 78 | - "" 79 | resources: 80 | - secrets 81 | verbs: 82 | - list 83 | - get 84 | - watch 85 | - apiVersion: rbac.authorization.k8s.io/v1 86 | kind: ClusterRole 87 | metadata: 88 | name: system:cloud-node-controller 89 | rules: 90 | - apiGroups: 91 | - "" 92 | resources: 93 | - nodes 94 | verbs: 95 | - '*' 96 | - apiGroups: 97 | - "" 98 | resources: 99 | - nodes/status 100 | verbs: 101 | - patch 102 | - apiGroups: 103 | - "" 104 | resources: 105 | - events 106 | verbs: 107 | - create 108 | - patch 109 | - update 110 | - apiVersion: rbac.authorization.k8s.io/v1 111 | kind: ClusterRole 112 | metadata: 113 | name: system:pvl-controller 114 | rules: 115 | - apiGroups: 116 | - "" 117 | resources: 118 | - persistentvolumes 119 | verbs: 120 | - '*' 121 | - apiGroups: 122 | - "" 123 | resources: 124 | - events 125 | verbs: 126 | - create 127 | - patch 128 | - update 129 | kind: List 130 | metadata: {} 131 | --- 132 | apiVersion: v1 133 | items: 134 | - apiVersion: rbac.authorization.k8s.io/v1 135 | kind: ClusterRoleBinding 136 | metadata: 137 | name: system:cloud-node-controller 138 | roleRef: 139 | apiGroup: rbac.authorization.k8s.io 140 | kind: ClusterRole 141 | name: system:cloud-node-controller 142 | subjects: 143 | - kind: ServiceAccount 144 | name: cloud-node-controller 145 | namespace: kube-system 146 | - apiVersion: rbac.authorization.k8s.io/v1 147 | kind: ClusterRoleBinding 148 | metadata: 149 | name: system:pvl-controller 150 | roleRef: 151 | apiGroup: rbac.authorization.k8s.io 152 | kind: ClusterRole 153 | name: system:pvl-controller 154 | subjects: 155 | - kind: ServiceAccount 156 | name: pvl-controller 157 | namespace: kube-system 158 | - apiVersion: rbac.authorization.k8s.io/v1 159 | kind: ClusterRoleBinding 160 | metadata: 161 | name: system:cloud-controller-manager 162 | roleRef: 163 | apiGroup: rbac.authorization.k8s.io 164 | kind: ClusterRole 165 | name: system:cloud-controller-manager 166 | subjects: 167 | - kind: ServiceAccount 168 | name: cloud-controller-manager 169 | namespace: kube-system 170 | kind: List 171 | metadata: {} 172 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/harbor/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - https://github.com/SovereignCloudStack/k8s-harbor//base/?ref=v6.1.1 6 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/harbor/envs/clusterIP/harbor-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | config.yaml: | 5 | trivy: 6 | enabled: true 7 | replicas: 2 8 | portal: 9 | replicas: 2 10 | core: 11 | replicas: 2 12 | jobservice: 13 | replicas: 2 14 | jobLoggers: 15 | - database 16 | registry: 17 | replicas: 2 18 | metrics: 19 | enabled: true 20 | cache: 21 | enabled: true 22 | expose: 23 | type: clusterIP 24 | tls: 25 | enabled: false 26 | externalURL: http://harbor 27 | database: 28 | type: internal 29 | redis: 30 | type: internal 31 | persistence: 32 | enabled: ${HARBOR_PERSISTENCE} 33 | persistentVolumeClaim: 34 | database: 35 | size: ${HARBOR_DATABASE_SIZE} 36 | redis: 37 | size: ${HARBOR_REDIS_SIZE} 38 | trivy: 39 | size: ${HARBOR_TRIVY_SIZE} 40 | imageChartStorage: 41 | type: s3 42 | s3: 43 | # generated by s3-credentials.bash 44 | existingSecret: s3-credentials 45 | regionendpoint: ${HARBOR_S3_ENDPOINT} 46 | bucket: ${HARBOR_S3_BUCKET} 47 | region: ${HARBOR_S3_REGION} 48 | kind: ConfigMap 49 | metadata: 50 | name: harbor-config 51 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/harbor/envs/clusterIP/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - ../../base 6 | - harbor-config.yaml 7 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/harbor/envs/ingress/harbor-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | config.yaml: | 5 | trivy: 6 | enabled: true 7 | replicas: 2 8 | portal: 9 | replicas: 2 10 | core: 11 | replicas: 2 12 | jobservice: 13 | replicas: 2 14 | jobLoggers: 15 | - database 16 | registry: 17 | replicas: 2 18 | metrics: 19 | enabled: true 20 | cache: 21 | enabled: true 22 | expose: 23 | type: ingress 24 | tls: 25 | enabled: true 26 | certSource: secret 27 | secret: 28 | secretName: harbor-crt 29 | ingress: 30 | hosts: 31 | core: ${HARBOR_DOMAIN_NAME} 32 | className: nginx 33 | harbor: 34 | annotations: 35 | cert-manager.io/issuer: letsencrypt 36 | nginx.ingress.kubernetes.io/limit-rps: "25" 37 | externalURL: https://${HARBOR_DOMAIN_NAME} 38 | database: 39 | type: internal 40 | redis: 41 | type: internal 42 | persistence: 43 | enabled: ${HARBOR_PERSISTENCE} 44 | persistentVolumeClaim: 45 | database: 46 | size: ${HARBOR_DATABASE_SIZE} 47 | redis: 48 | size: ${HARBOR_REDIS_SIZE} 49 | trivy: 50 | size: ${HARBOR_TRIVY_SIZE} 51 | imageChartStorage: 52 | type: s3 53 | s3: 54 | # generated by s3-credentials.bash 55 | existingSecret: s3-credentials 56 | regionendpoint: ${HARBOR_S3_ENDPOINT} 57 | bucket: ${HARBOR_S3_BUCKET} 58 | region: ${HARBOR_S3_REGION} 59 | kind: ConfigMap 60 | metadata: 61 | name: harbor-config 62 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/harbor/envs/ingress/issuer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: Issuer 4 | metadata: 5 | name: letsencrypt 6 | spec: 7 | acme: 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | email: ${HARBOR_ISSUER_EMAIL} 10 | privateKeySecretRef: 11 | name: le-harbor 12 | solvers: 13 | - http01: 14 | ingress: 15 | class: nginx 16 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/harbor/envs/ingress/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - ../../base 6 | - harbor-config.yaml 7 | - issuer.yaml 8 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/kuard.yaml: -------------------------------------------------------------------------------- 1 | # This is a stripped down and recomposed example from the Book 2 | # "Kubernetes Up and Running", 2nd ed, by B Burns, J Beda, and K Hightower (O'Reilly), 2019 3 | # with a Persistent Volume Claim and a Persistent Volume added in for testing 4 | --- 5 | kind: PersistentVolumeClaim 6 | apiVersion: v1 7 | metadata: 8 | name: my-vol-claim 9 | annotations: 10 | volume.kubernetes.io/storage-class: default 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 10Gi 17 | --- 18 | apiVersion: v1 19 | kind: Pod 20 | metadata: 21 | name: kuard 22 | spec: 23 | volumes: 24 | - name: my-vol 25 | persistentVolumeClaim: 26 | claimName: my-vol-claim 27 | containers: 28 | - image: gcr.io/kuar-demo/kuard-amd64:blue 29 | name: kuard 30 | ports: 31 | - containerPort: 8080 32 | name: http 33 | protocol: TCP 34 | volumeMounts: 35 | - name: my-vol 36 | mountPath: /data 37 | resources: 38 | requests: 39 | cpu: "50m" 40 | memory: "128Mi" 41 | limits: 42 | cpu: "1000m" 43 | memory: "256Mi" 44 | livenessProbe: 45 | httpGet: 46 | path: /healthy 47 | port: 8080 48 | initialDelaySeconds: 5 49 | timeoutSeconds: 1 50 | periodSeconds: 10 51 | failureThreshold: 3 52 | readinessProbe: 53 | httpGet: 54 | path: /ready 55 | port: 8080 56 | initialDelaySeconds: 30 57 | timeoutSeconds: 1 58 | periodSeconds: 10 59 | failureThreshold: 3 60 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/nginx-ingress/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - nginx-ingress-controller.yaml 6 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/nginx-ingress/nginx-monitor/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - ../base 6 | patches: 7 | - path: nginx-monitor.yaml 8 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/nginx-ingress/nginx-monitor/nginx-monitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # YAML_TO_PATCH: https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.9.6/deploy/static/provider/cloud/deploy.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: ingress-nginx-controller 7 | namespace: ingress-nginx 8 | annotations: 9 | loadbalancer.openstack.org/enable-health-monitor: "true" 10 | spec: 11 | type: LoadBalancer 12 | externalTrafficPolicy: Local 13 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/nginx-ingress/nginx-nomonitor/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - ../base 6 | patches: 7 | - path: nginx-nomonitor.yaml 8 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/nginx-ingress/nginx-nomonitor/nginx-nomonitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # YAML_TO_PATCH: https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.9.6/deploy/static/provider/cloud/deploy.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: ingress-nginx-controller 7 | namespace: ingress-nginx 8 | annotations: 9 | loadbalancer.openstack.org/enable-health-monitor: "false" 10 | spec: 11 | type: LoadBalancer 12 | externalTrafficPolicy: Cluster 13 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/nginx-ingress/nginx-proxy/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - ../base 6 | patches: 7 | - path: nginx-monitor.yaml 8 | - path: nginx-proxy-cfgmap.yaml 9 | - path: nginx-proxy-lb.yaml 10 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/nginx-ingress/nginx-proxy/nginx-monitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # YAML_TO_PATCH: https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.9.6/deploy/static/provider/cloud/deploy.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: ingress-nginx-controller 7 | namespace: ingress-nginx 8 | annotations: 9 | loadbalancer.openstack.org/enable-health-monitor: "true" 10 | spec: 11 | type: LoadBalancer 12 | externalTrafficPolicy: Local 13 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/nginx-ingress/nginx-proxy/nginx-proxy-cfgmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: ingress-nginx-controller 6 | namespace: ingress-nginx 7 | data: 8 | # https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap 9 | use-proxy-protocol: "true" 10 | # FIXME: Could set exact LB VIP address here, NODE_CIDR is a good start 11 | proxy-real-ip-cidr: "0.0.0.0/0" 12 | # enable-real-ip: "true" 13 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/nginx-ingress/nginx-proxy/nginx-proxy-lb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: ingress-nginx-controller 6 | namespace: ingress-nginx 7 | annotations: 8 | loadbalancer.openstack.org/proxy-protocol: "true" 9 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/openstack.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: cloud-controller-manager 6 | namespace: kube-system 7 | --- 8 | apiVersion: apps/v1 9 | kind: DaemonSet 10 | metadata: 11 | name: openstack-cloud-controller-manager 12 | namespace: kube-system 13 | labels: 14 | k8s-app: openstack-cloud-controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | k8s-app: openstack-cloud-controller-manager 19 | updateStrategy: 20 | type: RollingUpdate 21 | template: 22 | metadata: 23 | labels: 24 | k8s-app: openstack-cloud-controller-manager 25 | spec: 26 | nodeSelector: 27 | node-role.kubernetes.io/master: "" 28 | securityContext: 29 | runAsUser: 1001 30 | tolerations: 31 | - key: node.cloudprovider.kubernetes.io/uninitialized 32 | value: "true" 33 | effect: NoSchedule 34 | - key: node-role.kubernetes.io/master 35 | effect: NoSchedule 36 | serviceAccountName: cloud-controller-manager 37 | containers: 38 | - name: openstack-cloud-controller-manager 39 | image: docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.19.2 40 | args: 41 | - /bin/openstack-cloud-controller-manager 42 | - --v=1 43 | - --cloud-config=$(CLOUD_CONFIG) 44 | - --cloud-provider=openstack 45 | - --use-service-account-credentials=true 46 | - --bind-address=127.0.0.1 47 | volumeMounts: 48 | - mountPath: /etc/kubernetes/pki 49 | name: k8s-certs 50 | readOnly: true 51 | - mountPath: /etc/ssl/certs 52 | name: ca-certs 53 | readOnly: true 54 | - mountPath: /etc/config 55 | name: cloud-config-volume 56 | readOnly: true 57 | resources: 58 | requests: 59 | cpu: 200m 60 | env: 61 | - name: CLOUD_CONFIG 62 | value: /etc/config/cloud.conf 63 | hostNetwork: true 64 | volumes: 65 | - hostPath: 66 | path: /etc/kubernetes/pki 67 | type: DirectoryOrCreate 68 | name: k8s-certs 69 | - hostPath: 70 | path: /etc/ssl/certs 71 | type: DirectoryOrCreate 72 | name: ca-certs 73 | - name: cloud-config-volume 74 | secret: 75 | secretName: cloud-config 76 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/rmv-vol-from-ctrl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7 3 | kind: OpenStackMachineTemplate 4 | metadata: 5 | name: ${PREFIX}-${CLUSTER_NAME}-control-plane-${CONTROL_PLANE_MACHINE_GEN} 6 | spec: 7 | template: 8 | spec: 9 | rootVolume: 10 | $patch: delete 11 | -------------------------------------------------------------------------------- /terraform/files/kubernetes-manifests.d/rmv-vol-from-worker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha7 3 | kind: OpenStackMachineTemplate 4 | metadata: 5 | name: ${PREFIX}-${CLUSTER_NAME}-md-0-${WORKER_MACHINE_GEN} 6 | spec: 7 | template: 8 | spec: 9 | rootVolume: 10 | $patch: delete 11 | -------------------------------------------------------------------------------- /terraform/files/template/capi-settings.tmpl: -------------------------------------------------------------------------------- 1 | # Global CAPI settings, to be sourced 2 | CLUSTERAPI_OPENSTACK_PROVIDER_VERSION=${capi_openstack_version} 3 | CLUSTERAPI_VERSION=${clusterapi_version} 4 | CILIUM_BINARIES="${cilium_binaries}" 5 | PREFIX=${prefix} 6 | TESTCLUSTER=${testcluster_name} 7 | -------------------------------------------------------------------------------- /terraform/files/template/cloud.conf.tmpl: -------------------------------------------------------------------------------- 1 | [Global] 2 | auth-url=${clouds.auth.auth_url} 3 | region="${clouds.region_name}" 4 | application-credential-id=${appcredid} 5 | application-credential-secret="${appcredsecret}" 6 | 7 | [LoadBalancer] 8 | manage-security-groups=true 9 | use-octavia=true 10 | enable-ingress-hostname=true 11 | -------------------------------------------------------------------------------- /terraform/files/template/clouds.yaml.tmpl: -------------------------------------------------------------------------------- 1 | --- 2 | clouds: 3 | ${cloud_provider}: 4 | interface: ${clouds.interface} 5 | identity_api_version: ${clouds.identity_api_version} 6 | region_name: ${clouds.region_name} 7 | cacert: ${cacert} 8 | auth_type: "v3applicationcredential" 9 | auth: 10 | auth_url: ${clouds.auth.auth_url} 11 | application_credential_id: ${appcredid} 12 | application_credential_secret: "${appcredsecret}" 13 | -------------------------------------------------------------------------------- /terraform/files/template/clusterctl.yaml.tmpl: -------------------------------------------------------------------------------- 1 | # This is the settings file to configure clusters with clusterctl 2 | # Secrets (the b64 encoded data at the end) are filled in using clusterctl_template.sh. 3 | # How to use: 4 | # * Create a copy of this file in clusterctl-CLUSTER_NAME.yaml and adjust it. 5 | # * ./create_cluster.sh CLUSTER_NAME 6 | # This will copy it to ~/.cluster-api/clusterctl.yaml and use 7 | # clusterctl generate cluster $CLUSTER_NAME --from cluster-template.yaml 8 | # to generate $CLUSTER_NAME-config.yaml 9 | # 10 | # (c) Thorsten Schifferdecker, 2020, Malte Münch, 2021, Kurt Garloff, 2020-2023 11 | # SPDX-License-Identifier: Apache-2 12 | 13 | # Kubernetes version - only upgrades (+1 minor version) are allowed 14 | KUBERNETES_VERSION: ${kubernetes_version} 15 | OPENSTACK_IMAGE_NAME: ubuntu-capi-image-${kubernetes_version} 16 | # Specify version (git branch name) of openstack ccm and cindercsi 17 | # true means using the recommended version for this k8s version 18 | DEPLOY_OCCM: ${deploy_occm} 19 | DEPLOY_CINDERCSI: ${deploy_cindercsi} 20 | 21 | # cilium 22 | USE_CILIUM: ${use_cilium} 23 | # calico 24 | CALICO_VERSION: ${calico_version} 25 | # deploy nginx ingress controller 26 | DEPLOY_NGINX_INGRESS: ${deploy_nginx_ingress} 27 | # deploy Gateway API CRDs and enable ciliums Gateway API implementation (requires USE_CILIUM=true) 28 | DEPLOY_GATEWAY_API: ${deploy_gateway_api} 29 | # Use PROXY protocol to get real IPs 30 | NGINX_INGRESS_PROXY: true 31 | # Use OVN LB provider (false, auto, true) 32 | USE_OVN_LB_PROVIDER: ${use_ovn_lb_provider} 33 | # deploy cert-manager 34 | DEPLOY_CERT_MANAGER: ${deploy_cert_manager} 35 | # deploy flux2 36 | DEPLOY_FLUX: ${deploy_flux} 37 | # deploy metrics service 38 | DEPLOY_METRICS: ${deploy_metrics} 39 | 40 | # OpenStack instance additional metadata 41 | OPENSTACK_CONTROL_PLANE_MACHINE_METADATA: "%{ if length(controller_metadata) > 0 }{ %{ for metadata_key, metadata_value in controller_metadata ~} ${metadata_key}: '${metadata_value}', %{ endfor ~} }%{ endif }" 42 | OPENSTACK_NODE_MACHINE_METADATA: "%{ if length(worker_metadata) > 0 }{ %{ for metadata_key, metadata_value in worker_metadata ~} ${metadata_key}: '${metadata_value}', %{ endfor ~} }%{ endif }" 43 | 44 | # OpenStack flavors and machine count 45 | OPENSTACK_CONTROL_PLANE_MACHINE_FLAVOR: ${controller_flavor} 46 | CONTROL_PLANE_MACHINE_COUNT: ${controller_count} 47 | # Increase generation counter when changing flavor or k8s version or other CP settings 48 | CONTROL_PLANE_MACHINE_GEN: genc01 49 | 50 | OPENSTACK_NODE_MACHINE_FLAVOR: ${worker_flavor} 51 | WORKER_MACHINE_COUNT: ${worker_count} 52 | # Increase generation counter when changing flavor or k8s version or other MD settings 53 | WORKER_MACHINE_GEN: genw01 54 | 55 | # Openstack Availability Zone 56 | OPENSTACK_FAILURE_DOMAIN: ${availability_zone} 57 | 58 | ETCD_UNSAFE_FS: ${etcd_unsafe_fs} 59 | 60 | # configure_proxy.sh sets it to ". /etc/profile.d/proxy.sh; " 61 | PROXY_CMD: "" 62 | 63 | # CIDRs 64 | NODE_CIDR: ${node_cidr} 65 | SERVICE_CIDR: ${service_cidr} 66 | POD_CIDR: ${pod_cidr} 67 | 68 | # Set MTU for k8s CNI network (50 smaller than cloud, 0 = auto) 69 | MTU_VALUE: 0 70 | 71 | # Restrict kube-api access 72 | RESTRICT_KUBEAPI: "[ %{ for cidr in restrict_kubeapi ~} ${cidr}, %{ endfor ~} ]" 73 | 74 | # Openstack external Network ID 75 | # hint: openstack network list --external -f value -c ID 76 | OPENSTACK_EXTERNAL_NETWORK_ID: ${external_id} 77 | 78 | OPENSTACK_DNS_NAMESERVERS: "[ %{ for dnsip in dns_nameservers ~} ${dnsip}, %{ endfor ~} ]" 79 | 80 | # Increase generation counter when changing restrict_kubeapi or other OC settings 81 | OPENSTACK_CLUSTER_GEN: geno01 82 | 83 | OPENSTACK_SSH_KEY_NAME: ${prefix}-keypair 84 | 85 | # Use anti-affinity server groups 86 | OPENSTACK_ANTI_AFFINITY: ${anti_affinity} 87 | OPENSTACK_SOFT_ANTI_AFFINITY_CONTROLLER: ${soft_anti_affinity_controller} 88 | OPENSTACK_SRVGRP_CONTROLLER: nonono 89 | OPENSTACK_SRVGRP_WORKER: nonono 90 | 91 | # OpenStack image handling 92 | OPENSTACK_IMAGE_RAW: ${kube_image_raw} 93 | OPENSTACK_IMAGE_REGISTRATION_EXTRA_FLAGS: ${image_registration_extra_flags} 94 | 95 | # the section used at clouds.yaml 96 | OPENSTACK_CLOUD: ${cloud_provider} 97 | 98 | # the clouds.yaml 99 | # Note: current OCCM needs project_id to be included 100 | # (unlike the openstack client tools) 101 | # hint: $(base64 -w0 < .config/openstack/clouds.yaml ) 102 | OPENSTACK_CLOUD_YAML_B64: 103 | 104 | # the cloud provider config 105 | # hint: base64 -w0 < cloud.conf 106 | OPENSTACK_CLOUD_PROVIDER_CONF_B64: 107 | 108 | # The Certificate Authority (CA) used for Openstack API endpoints. We use 109 | # "let's encrypt" https://letsencrypt.org/certs/letsencryptauthorityx3.pem.txt 110 | # hint: base64 -w0 < cacert.pam 111 | OPENSTACK_CLOUD_CACERT_B64: 112 | 113 | # set OpenStack Instance create timeout (in minutes) 114 | CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT: ${capo_instance_create_timeout} 115 | -------------------------------------------------------------------------------- /terraform/files/template/harbor-settings.tmpl: -------------------------------------------------------------------------------- 1 | DEPLOY_HARBOR=${deploy_harbor} 2 | %{ for config_key, config_value in harbor_config ~} 3 | HARBOR_${upper(config_key)}=${config_value} 4 | %{ endfor ~} 5 | -------------------------------------------------------------------------------- /terraform/files/update/R2_to_R3/update-cluster-template.diff: -------------------------------------------------------------------------------- 1 | diff --git a/terraform/files/template/cluster-template.yaml b/terraform/files/template/cluster-template.yaml 2 | index b7b90c5..25af06b 100644 3 | --- a/cluster-template.yaml 4 | +++ b/cluster-template.yaml 5 | @@ -46,7 +46,7 @@ spec: 6 | kind: OpenStackMachineTemplate 7 | #apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 8 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 9 | - name: "k8s-clusterapi-${CLUSTER_NAME}-control-plane-genc1" 10 | + name: "k8s-clusterapi-${CLUSTER_NAME}-control-plane-${CONTROL_PLANE_MACHINE_GEN}" 11 | kubeadmConfigSpec: 12 | initConfiguration: 13 | nodeRegistration: 14 | @@ -85,7 +85,7 @@ spec: 15 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 16 | kind: OpenStackMachineTemplate 17 | metadata: 18 | - name: k8s-clusterapi-${CLUSTER_NAME}-control-plane-genc1 19 | + name: k8s-clusterapi-${CLUSTER_NAME}-control-plane-${CONTROL_PLANE_MACHINE_GEN} 20 | spec: 21 | template: 22 | spec: 23 | @@ -105,7 +105,7 @@ spec: 24 | apiVersion: cluster.x-k8s.io/v1beta1 25 | kind: MachineDeployment 26 | metadata: 27 | - name: "${CLUSTER_NAME}-md-0-genw1" 28 | + name: "${CLUSTER_NAME}-md-0-no1" 29 | spec: 30 | clusterName: "${CLUSTER_NAME}" 31 | replicas: ${WORKER_MACHINE_COUNT} 32 | @@ -118,11 +118,11 @@ spec: 33 | failureDomain: ${OPENSTACK_FAILURE_DOMAIN} 34 | bootstrap: 35 | configRef: 36 | - name: "${CLUSTER_NAME}-md-0-genw1" 37 | + name: "${CLUSTER_NAME}-md-0-${WORKER_MACHINE_GEN}" 38 | apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 39 | kind: KubeadmConfigTemplate 40 | infrastructureRef: 41 | - name: "k8s-clusterapi-${CLUSTER_NAME}-md-0-genw1" 42 | + name: "k8s-clusterapi-${CLUSTER_NAME}-md-0-${WORKER_MACHINE_GEN}" 43 | #apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 44 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 45 | kind: OpenStackMachineTemplate 46 | @@ -131,7 +131,7 @@ spec: 47 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 48 | kind: OpenStackMachineTemplate 49 | metadata: 50 | - name: k8s-clusterapi-${CLUSTER_NAME}-md-0-genw1 51 | + name: k8s-clusterapi-${CLUSTER_NAME}-md-0-${WORKER_MACHINE_GEN} 52 | spec: 53 | template: 54 | spec: 55 | @@ -151,7 +151,7 @@ spec: 56 | apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 57 | kind: KubeadmConfigTemplate 58 | metadata: 59 | - name: ${CLUSTER_NAME}-md-0-genw1 60 | + name: ${CLUSTER_NAME}-md-0-${WORKER_MACHINE_GEN} 61 | spec: 62 | template: 63 | spec: 64 | 65 | -------------------------------------------------------------------------------- /terraform/files/update/R2_to_R3/update-clusterctl-control-gen.sed: -------------------------------------------------------------------------------- 1 | #!/usr/bin/sed -f 2 | /^CONTROL_PLANE_MACHINE_COUNT:/{ a\ 3 | # Increase generation counter when changing flavor or k8s version or other MD settings 4 | a\ 5 | CONTROL_PLANE_MACHINE_GEN: genc01 6 | } 7 | -------------------------------------------------------------------------------- /terraform/files/update/R2_to_R3/update-clusterctl-worker-gen.sed: -------------------------------------------------------------------------------- 1 | #!/usr/bin/sed -f 2 | /^WORKER_MACHINE_COUNT:/{ a\ 3 | # Increase generation counter when changing flavor or k8s version or other MD settings 4 | a\ 5 | WORKER_MACHINE_GEN: genw01 6 | } 7 | -------------------------------------------------------------------------------- /terraform/files/update/R2_to_R3/update-clusterctl.diff: -------------------------------------------------------------------------------- 1 | diff --git a/terraform/files/template/clusterctl.yaml.tmpl b/terraform/files/template/clusterctl.yaml.tmpl 2 | index fd3e73f..d093b76 100644 3 | --- a/clusterctl.yaml 4 | +++ b/clusterctl.yaml 5 | @@ -34,9 +34,13 @@ DEPLOY_METRICS: ${deploy_metrics} 6 | # OpenStack flavors and machine count 7 | OPENSTACK_CONTROL_PLANE_MACHINE_FLAVOR: ${controller_flavor} 8 | CONTROL_PLANE_MACHINE_COUNT: ${controller_count} 9 | +# Increase generation counter when changing flavor or k8s version or other MD settings 10 | +CONTROL_PLANE_MACHINE_GEN: genc01 11 | 12 | OPENSTACK_NODE_MACHINE_FLAVOR: ${worker_flavor} 13 | WORKER_MACHINE_COUNT: ${worker_count} 14 | +# Increase generation counter when changing flavor or k8s version or other MD settings 15 | +WORKER_MACHINE_GEN: genw01 16 | 17 | # Openstack Availablity Zone 18 | OPENSTACK_FAILURE_DOMAIN: ${availability_zone} 19 | -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | # - main - 2 | provider "openstack" { 3 | cloud = var.cloud_provider 4 | } 5 | 6 | terraform { 7 | required_version = ">= 1.6.0" 8 | 9 | required_providers { 10 | openstack = { 11 | source = "terraform-provider-openstack/openstack" 12 | version = "1.54.1" 13 | } 14 | local = { 15 | source = "hashicorp/local" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /terraform/neutron.tf: -------------------------------------------------------------------------------- 1 | # generic security group allow ssh connection 2 | # used for cluster-api-nodes 3 | resource "openstack_compute_secgroup_v2" "security_group_ssh" { 4 | name = "${var.prefix}-allow-ssh" 5 | description = "security group for ssh 22/tcp (managed by opentofu)" 6 | 7 | rule { 8 | cidr = "0.0.0.0/0" 9 | ip_protocol = "tcp" 10 | from_port = 22 11 | to_port = 22 12 | } 13 | } 14 | 15 | # generic security group allow icmp connection 16 | # used for cluster-api-nodes 17 | resource "openstack_compute_secgroup_v2" "security_group_icmp" { 18 | name = "${var.prefix}-allow-icmp" 19 | description = "security group for ICMP" 20 | 21 | rule { 22 | cidr = "0.0.0.0/0" 23 | ip_protocol = "icmp" 24 | from_port = -1 25 | to_port = -1 26 | } 27 | } 28 | # security group allow ssh/icmp connection to mgmt cluster/host 29 | # 30 | resource "openstack_compute_secgroup_v2" "security_group_mgmt" { 31 | name = "${var.prefix}-mgmt" 32 | description = "security group for mgmtcluster (managed by opentofu)" 33 | 34 | dynamic "rule" { 35 | for_each = var.restrict_mgmt_server 36 | iterator = cidr 37 | content { 38 | cidr = cidr.value 39 | ip_protocol = "tcp" 40 | from_port = 22 41 | to_port = 22 42 | } 43 | } 44 | 45 | rule { 46 | cidr = "0.0.0.0/0" 47 | ip_protocol = "icmp" 48 | from_port = -1 49 | to_port = -1 50 | } 51 | } 52 | 53 | resource "openstack_networking_network_v2" "network_mgmt" { 54 | name = "${var.prefix}-net" 55 | # availability_zone_hints = [var.availability_zone] 56 | # admin_state_up = "true" 57 | } 58 | 59 | resource "openstack_networking_subnet_v2" "subnet_mgmt" { 60 | name = "${var.prefix}-subnet" 61 | network_id = openstack_networking_network_v2.network_mgmt.id 62 | ip_version = 4 63 | cidr = var.mgmt_cidr 64 | dns_nameservers = var.dns_nameservers 65 | 66 | allocation_pool { 67 | start = var.mgmt_ip_range.start 68 | end = var.mgmt_ip_range.end 69 | } 70 | } 71 | 72 | data "openstack_networking_network_v2" "external" { 73 | name = data.openstack_networking_network_v2.extnet.name 74 | } 75 | 76 | resource "openstack_networking_router_v2" "router_mgmt" { 77 | name = "${var.prefix}-rtr" 78 | description = "router for mgmtcluster (managed by opentofu)" 79 | external_network_id = data.openstack_networking_network_v2.external.id 80 | availability_zone_hints = [var.availability_zone] # comment this out if your cloud does not have network AZs 81 | } 82 | 83 | resource "openstack_networking_router_interface_v2" "router_interface" { 84 | router_id = openstack_networking_router_v2.router_mgmt.id 85 | subnet_id = openstack_networking_subnet_v2.subnet_mgmt.id 86 | } 87 | -------------------------------------------------------------------------------- /terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | output "mgmtcluster_address" { 2 | value = openstack_networking_floatingip_v2.mgmtcluster_floatingip.address 3 | #sensitive = true 4 | } 5 | 6 | output "private_key" { 7 | value = openstack_compute_keypair_v2.keypair.private_key 8 | sensitive = true 9 | } 10 | 11 | resource "local_sensitive_file" "id_rsa" { 12 | filename = ".deploy.id_rsa.${var.cloud_provider}" 13 | file_permission = "0600" 14 | content = openstack_compute_keypair_v2.keypair.private_key 15 | } 16 | 17 | resource "local_file" "MGMTCLUSTER_ADDRESS" { 18 | filename = ".deploy.MGMTCLUSTER_ADDRESS.${var.cloud_provider}" 19 | file_permission = "0644" 20 | content = "MGMTCLUSTER_ADDRESS=${openstack_networking_floatingip_v2.mgmtcluster_floatingip.address}\n" 21 | } 22 | -------------------------------------------------------------------------------- /terraform/secure.yaml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | clouds: 3 | default: 4 | auth: 5 | username: 6 | password: 7 | --------------------------------------------------------------------------------