├── .gitignore ├── Makefile ├── README.md ├── bastion.tf ├── clusters.tf ├── envs ├── alaska-sriov.tfvars ├── alaska.tfvars ├── cumulus.tfvars ├── devstack-ipv6.tfvars ├── devstack.tfvars ├── kayobe-aio-overcloud.tfvars ├── sausage.tfvars ├── smslab.tfvars └── stein.tfvars ├── images ├── dev.txt ├── legacy.txt ├── master.txt └── sonobuoy.txt ├── manifests ├── nginx-cinder-csi.yaml ├── nginx-in-tree-cinder.yaml ├── sc-cinder-csi.yaml └── sc-in-tree-cinder.yaml ├── site ├── create.sh ├── destroy.sh ├── magnum-tiller.sh ├── plan.sh ├── pull-retag-push.py ├── purge.sh ├── taint.sh ├── upload-atomic.sh └── upload-coreos.sh ├── sonobuoy.yml ├── templates.tf ├── terraform.tfvars.sample ├── tfvars ├── atomic.tfvars ├── coreos.tfvars ├── flannel.tfvars └── podman.tfvars └── versions.tf /.gitignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | terraform.tfvars 3 | terraform.tfstate 4 | terraform.tfstate.backup 5 | *.bak 6 | *.qcow2 7 | *.swp 8 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | HELM ?=v3.6.1 2 | HELM2 ?=v2.17.0 3 | SONOBUOY ?= 0.53.2 4 | TERRAFORM ?= 1.0.3 5 | KUBECTL ?= $(shell curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) 6 | CLUSTERCTL ?= 0.4.0 7 | 8 | OS = $(shell lsb_release -si || echo CentOS) 9 | ifeq ($(OS),Ubuntu) 10 | PM = apt 11 | else ifeq ($(OS),CentOS) 12 | PM = dnf 13 | else 14 | exit 1 15 | endif 16 | 17 | deps: helm kubectl terraform sonobuoy 18 | 19 | # Install helm (version 3) 20 | helm: 21 | curl -L https://get.helm.sh/helm-${HELM}-linux-amd64.tar.gz --output helm.tar.gz && \ 22 | mkdir -p tmp && \ 23 | tar -xzf helm.tar.gz -C tmp/ && \ 24 | sudo mv tmp/linux-amd64/helm /usr/local/bin/helm && \ 25 | rm -rf helm.tar.gz tmp 26 | 27 | # Install helm (version 2) 28 | helm2: 29 | curl -L https://get.helm.sh/helm-${HELM2}-linux-amd64.tar.gz --output helm.tar.gz && \ 30 | mkdir -p tmp && \ 31 | tar -xzf helm.tar.gz -C tmp/ && \ 32 | sudo mv tmp/linux-amd64/helm /usr/local/bin/helm2 && \ 33 | rm -rf helm.tar.gz tmp 34 | 35 | unzip: 36 | sudo ${PM} install unzip -y 37 | 38 | # Install known latest terraform 39 | terraform: unzip 40 | curl -L https://releases.hashicorp.com/terraform/${TERRAFORM}/terraform_${TERRAFORM}_linux_amd64.zip --output terraform.zip && \ 41 | unzip terraform.zip && \ 42 | rm terraform.zip && \ 43 | sudo mv terraform /usr/local/bin/terraform 44 | 45 | # Install latest kubectl 46 | kubectl: 47 | curl -LO https://storage.googleapis.com/kubernetes-release/release/${KUBECTL}/bin/linux/amd64/kubectl && \ 48 | chmod +x kubectl && \ 49 | sudo mv kubectl /usr/local/bin/kubectl 50 | 51 | # Install latest known clusterctl 52 | clusterctl: 53 | curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/v${CLUSTERCTL}/clusterctl-linux-amd64 -o clusterctl && \ 54 | chmod +x clusterctl && \ 55 | sudo mv clusterctl /usr/local/bin/clusterctl 56 | jq: 57 | sudo ${PM} install jq -y 58 | 59 | # Install latest known sonobuoy 60 | sonobuoy: jq 61 | curl -L "https://github.com/vmware-tanzu/sonobuoy/releases/download/v${SONOBUOY}/sonobuoy_${SONOBUOY}_linux_amd64.tar.gz" --output sonobuoy.tar.gz && \ 62 | mkdir -p tmp && \ 63 | tar -xzf sonobuoy.tar.gz -C tmp/ && \ 64 | chmod +x tmp/sonobuoy && \ 65 | sudo mv tmp/sonobuoy /usr/local/bin/sonobuoy && \ 66 | rm -rf sonobuoy.tar.gz tmp 67 | 68 | # Run conformance 69 | conformance: 70 | sonobuoy run --sonobuoy-image ghcr.io/stackhpc/sonobuoy:v${SONOBUOY} --systemd-logs-image ghcr.io/stackhpc/systemd-logs:v0.3 --mode=certified-conformance 71 | 72 | # Retrieve conformance results, requires kubectl 73 | result: 74 | $(eval dir=k8s-conformance/$(shell kubectl version --short=true | grep Server | cut -f3 -d' ')/) 75 | $(eval output=$(shell sonobuoy retrieve)) 76 | mkdir -p /tmp/${output} ${dir} 77 | tar xzf ${output} -C /tmp/${output} 78 | cp /tmp/${output}/plugins/e2e/results/global/* ${dir} 79 | rm -rf /tmp/${output} ${output} 80 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenStack Magnum and Terraform to deploy Kubernetes cluster 2 | 3 | Using this repository will deploy two separate Kubernetes clusters: one with 4 | Calico and another with Flannel. 5 | 6 | ## Prerequisites: 7 | 8 | - OpenStack Queens, Magnum Stein 8.1.0 minimum 9 | - Terraform v0.14.6, provider.openstack v1.37.1 10 | - Kubernetes client (to be able to run `kubectl`) 11 | 12 | Install dependencies: 13 | 14 | make deps 15 | 16 | ## Upload OS image 17 | 18 | Upload the latest stable Fedora CoreOS image to Glance: 19 | 20 | ./site/upload-coreos.sh 21 | 22 | Upload the last ever Fedora Atomic image to Glance (DEPRECATED): 23 | 24 | ./site/upload-atomic.sh 25 | 26 | ## Prepare registry 27 | 28 | Pull retag and push list of images to a local container registry: 29 | 30 | ./site/pull-retag-push.py images/master.txt 31 | 32 | Pulling images in master.txt 33 | rancher/hyperkube:v1.19.1-rancher1 | exists locally 34 | k8s.gcr.io/hyperkube:v1.18.8 | exists locally 35 | ... 36 | gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2 | exists locally 37 | quay.io/coreos/configmap-reload:v0.0.1 | exists locally 38 | --- 39 | Pushing images in master.txt 40 | localhost:5000/hyperkube:v1.19.1-rancher1 | pushed 41 | localhost:5000/hyperkube:v1.18.8 | pushed 42 | ... 43 | localhost:5000/csi-node-driver-registrar:v1.1.0 | pushed 44 | localhost:5000/node-problem-detector:v0.6.2 | pushed 45 | --- 46 | 47 | ## Deployment: 48 | 49 | Initialise terraform 50 | 51 | terraform init --upgrade 52 | 53 | Copy sample variable file: 54 | 55 | cp terraform.tfvars{.sample,} 56 | 57 | Edit `terraform.tfvars` and fill in details like `external_network_id` and `keypair_name`. 58 | 59 | Source your OpenStack cloud environment variables: 60 | 61 | source openrc.sh 62 | 63 | To upload the latest Fedora CoreOS image: 64 | 65 | ./site/upload-coreos.sh # requires Magnum Train 9.1.0 minimum and Heat Train. 66 | ./site/upload-atomic.sh # if using older Magnum releases 67 | 68 | To execute `terraform plan`: 69 | 70 | ./site/cluster.sh tfvars/coreos.tfvars 71 | 72 | To deploy the clusters (replace with `atomic.tfvars` or `podman.tfvars` if using Magnum release older than Train 9.1.0): 73 | 74 | ./site/create.sh tfvars/coreos.tfvars # requires Magnum Train (9.1.0) and Heat Train minimum. 75 | ./site/create.sh tfvars/podman.tfvars # requires Magnum Train (9.1.0) and Heat Queens minimum. 76 | ./site/create.sh tfvars/atomic.tfvars # requires Magnum Stein (8.1.0) and Heat Queens minimum. 77 | 78 | To execute `terraform taint` against all clusters: 79 | 80 | ./site/taint.sh 81 | 82 | To execute `terraform destroy`: 83 | 84 | ./site/destroy.sh tfvars/coreos.tfvars 85 | 86 | To delete ALL clusters and templates manually using the OpenStack CLI [DANGER - for dev use only]: 87 | 88 | ./site/purge.sh 89 | 90 | ## Autoscaling 91 | 92 | SSH into the master node: 93 | 94 | kubectl create deployment test-autoscale --image=nginx 95 | kubectl scale deployment test-autoscale --replicas=100 96 | 97 | Sample output of `kubectl logs deploy/cluster-autoscaler -n kube-system`: 98 | 99 | I1017 13:26:11.617165 1 leaderelection.go:217] attempting to acquire leader lease kube-system/cluster-autoscaler... 100 | I1017 13:26:11.626499 1 leaderelection.go:227] successfully acquired lease kube-system/cluster-autoscaler 101 | I1017 13:26:13.804795 1 magnum_manager_heat.go:293] For stack ID 3e981ac7-4a6e-47a7-9d16-7874f5e108a0, stack name is k8s-sb7k6mtqieim 102 | I1017 13:26:13.974239 1 magnum_manager_heat.go:310] Found nested kube_minions stack: name k8s-sb7k6mtqieim-kube_minions-33izbolw5kvp, ID 2f7b5dff-9960-4ae2-8572-abed511d0801 103 | I1017 13:32:25.461803 1 scale_up.go:689] Scale-up: setting group default-worker size to 3 104 | I1017 13:32:28.400053 1 magnum_nodegroup.go:101] Increasing size by 1, 2->3 105 | I1017 13:33:02.387803 1 magnum_nodegroup.go:67] Waited for cluster UPDATE_IN_PROGRESS status 106 | I1017 13:36:11.528032 1 magnum_nodegroup.go:67] Waited for cluster UPDATE_COMPLETE status 107 | I1017 13:36:21.550679 1 scale_up.go:689] Scale-up: setting group default-worker size to 5 108 | I1017 13:36:24.157717 1 magnum_nodegroup.go:101] Increasing size by 2, 3->5 109 | I1017 13:36:58.062981 1 magnum_nodegroup.go:67] Waited for cluster UPDATE_IN_PROGRESS status 110 | I1017 13:40:07.134681 1 magnum_nodegroup.go:67] Waited for cluster UPDATE_COMPLETE status 111 | W1017 13:50:14.668777 1 reflector.go:289] k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes/listers.go:190: watch of *v1.Pod ended with: too old resource version: 15787 (16414) 112 | I1017 14:00:17.891270 1 scale_down.go:882] Scale-down: removing empty node k8s-sb7k6mtqieim-minion-2 113 | I1017 14:00:17.891315 1 scale_down.go:882] Scale-down: removing empty node k8s-sb7k6mtqieim-minion-3 114 | I1017 14:00:17.891323 1 scale_down.go:882] Scale-down: removing empty node k8s-sb7k6mtqieim-minion-4 115 | I1017 14:00:23.255551 1 magnum_manager_heat.go:344] Resolved node k8s-sb7k6mtqieim-minion-2 to stack index 2 116 | I1017 14:00:23.255579 1 magnum_manager_heat.go:344] Resolved node k8s-sb7k6mtqieim-minion-4 to stack index 4 117 | I1017 14:00:23.255584 1 magnum_manager_heat.go:344] Resolved node k8s-sb7k6mtqieim-minion-3 to stack index 3 118 | I1017 14:00:24.283658 1 magnum_manager_heat.go:280] Waited for stack UPDATE_IN_PROGRESS status 119 | I1017 14:01:25.030818 1 magnum_manager_heat.go:280] Waited for stack UPDATE_COMPLETE status 120 | I1017 14:01:58.970490 1 magnum_nodegroup.go:67] Waited for cluster UPDATE_IN_PROGRESS status 121 | 122 | ## Cinder Volumes 123 | 124 | In order to ensure support for cinder volumes, ensure that `volume_driver = "cinder"` in `terraform.tfvars`. 125 | 126 | To attach cinder volumes: 127 | 128 | openstack volume create nginx-volume --size 100 129 | 130 | cat < "${HELM_HOME}/ca.pem" 8 | kubectl -n magnum-tiller get secret helm-client-secret -o jsonpath='{.data.key\.pem}' | base64 --decode > "${HELM_HOME}/key.pem" 9 | kubectl -n magnum-tiller get secret helm-client-secret -o jsonpath='{.data.cert\.pem}' | base64 --decode > "${HELM_HOME}/cert.pem" 10 | helm2 init --upgrade 11 | -------------------------------------------------------------------------------- /site/plan.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | DIR=`dirname $0`/.. 4 | TFDIR=`realpath $DIR` 5 | TFVARS=${1:-$TFDIR/terraform.tfvars} 6 | TFSTATE=${2:-$TFDIR/terraform.tfstate} 7 | ACTION=${3:-plan} 8 | terraform $ACTION -var-file=$TFVARS -state=$TFSTATE 9 | -------------------------------------------------------------------------------- /site/pull-retag-push.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # for i in `cat worker.txt`; do (docker pull $i &); done 3 | import argparse 4 | import asyncio 5 | import docker 6 | import json 7 | import sys 8 | 9 | d = docker.from_env() 10 | 11 | 12 | def read_images(fname, filters): 13 | with open(fname) as f: 14 | images = f.read().splitlines() 15 | return [i for i in images if all(f(i) for f in filters)] 16 | 17 | 18 | def pull(image, max_width): 19 | try: 20 | d.images.get(image) 21 | result = "exists locally" 22 | except docker.errors.ImageNotFound: 23 | try: 24 | d.images.pull(image) 25 | result = "pulled" 26 | except Exception: 27 | result = "error" 28 | cols = image.ljust(max_width), result 29 | print(" | ".join(cols)) 30 | 31 | 32 | def push(image, local_image, max_width): 33 | try: 34 | d.api.tag(image, local_image) 35 | error = json.loads(d.images.push(local_image).splitlines()[-1]).get("error") 36 | result = error if error else "pushed" 37 | except docker.errors.ImageNotFound: 38 | result = "not found" 39 | cols = local_image.ljust(max_width), result 40 | print(" | ".join(cols)) 41 | 42 | 43 | async def main(): 44 | parser = argparse.ArgumentParser() 45 | parser.add_argument( 46 | "inputs", 47 | nargs="+", 48 | help="list of input files with list of remote images to pull, retag and push (default: images.txt)", 49 | ) 50 | parser.add_argument( 51 | "--registry", 52 | "-r", 53 | default="ghcr.io/stackhpc", 54 | help="name of the local registry to retag and push images to (default: ghcr.io/stackhpc)", 55 | ) 56 | parser.add_argument( 57 | "--filter", 58 | "-f", 59 | default="", 60 | required=False, 61 | help="filter images (default: '')", 62 | ) 63 | args = parser.parse_args() 64 | 65 | if not args.inputs: 66 | print("Nothing to pull, retag and push.") 67 | parser.print_help() 68 | sys.exit(1) 69 | 70 | for fname in args.inputs: 71 | filters = [lambda x: x, lambda x: not x.startswith("#")] 72 | if args.filter: 73 | filters.append(lambda x: args.filter in x) 74 | images = read_images(fname, filters) 75 | max_width = max([len(i) for i in images]) 76 | 77 | print("Pulling images in %s" % fname) 78 | tasks = [loop.run_in_executor(None, pull, image, max_width) for image in images] 79 | await asyncio.gather(*tasks) 80 | print("---") 81 | 82 | print("Pushing images in %s" % fname) 83 | local_images = ["{}/{}".format(args.registry, image.rsplit("/", maxsplit=1)[-1]) for image in images] 84 | max_width = max([len(i) for i in local_images]) 85 | 86 | tasks = [ 87 | loop.run_in_executor(None, push, image, local_image, max_width) 88 | for image, local_image in zip(images, local_images) 89 | ] 90 | await asyncio.gather(*tasks) 91 | print("---") 92 | 93 | 94 | if __name__ == "__main__": 95 | loop = asyncio.get_event_loop() 96 | loop.run_until_complete(main()) 97 | -------------------------------------------------------------------------------- /site/purge.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | while openstack coe cluster delete `openstack coe cluster list -c uuid -f value` 4 | do 5 | sleep 5 6 | done 7 | openstack coe cluster template delete `openstack coe cluster template list -c uuid -f value` 8 | -------------------------------------------------------------------------------- /site/taint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | TFDIR=`dirname $0`/.. 4 | TFSTATE=$PWD/${1:-$TFDIR/terraform.tfstate} 5 | for i in `terraform state list -state=$TFSTATE | grep _cluster_`; do terraform taint -state=$TFSTATE $i; done 6 | -------------------------------------------------------------------------------- /site/upload-atomic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | DATE=${1:-20191126.0} 4 | ARCH=${ARCH:-x86_64} 5 | IMAGE=${IMAGE:-Fedora-AtomicHost-29-$DATE.$ARCH} 6 | FNAME=$IMAGE.qcow2 7 | set -e 8 | openstack image show $IMAGE || ( 9 | curl -OL https://dl.fedoraproject.org/pub/alt/atomic/stable/Fedora-29-updates-$DATE/AtomicHost/$ARCH/images/$FNAME 10 | openstack image create \ 11 | --disk-format=qcow2 \ 12 | --container-format=bare \ 13 | --file=$FNAME \ 14 | --property os_distro=fedora-atomic --property hw_rng_model=virtio \ 15 | $IMAGE 16 | ) 17 | -------------------------------------------------------------------------------- /site/upload-coreos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | LATEST=$(curl -s https://builds.coreos.fedoraproject.org/streams/stable.json | python3 -c "import json, sys; print(json.loads(sys.stdin.read())['architectures']['x86_64']['artifacts']['openstack']['release'])") 4 | DEFAULT=33.20210426.3.0 #34.20210529.3.0 5 | DATE=${1:-$DEFAULT} 6 | STREAM=${STREAM:-stable} 7 | ARCH=${ARCH:-x86_64} 8 | USELATEST="y" 9 | [[ "$DATE" = "$LATEST" ]] || read -p "The image you are using is $DATE but the latest is $LATEST. Use latest? [Y/n] " USELATEST 10 | [[ "$(echo $USELATEST | tr '[:upper:]' '[:lower:]')" = "n" ]] || DATE=$LATEST 11 | IMAGE=${IMAGE:-fedora-coreos-$DATE-openstack.$ARCH} 12 | echo "Using image: $IMAGE" 13 | FNAME=$IMAGE.qcow2 14 | set -x 15 | openstack image show $IMAGE || ( 16 | [[ -f "$FNAME" ]] || ( 17 | curl -OL https://builds.coreos.fedoraproject.org/prod/streams/$STREAM/builds/$DATE/$ARCH/$FNAME.xz 18 | unxz $FNAME.xz 19 | ) 20 | openstack image create --disk-format=qcow2 --container-format=bare --file=$FNAME --property os_distro='fedora-coreos' $IMAGE 21 | ) 22 | sed -i.bak "s/fedora-coreos-.*-openstack.$ARCH/$IMAGE/g" *.tfvars tfvars/*.tfvars *.tf 23 | sed -i.bak "s/$DEFAULT/$DATE/g" $0 24 | -------------------------------------------------------------------------------- /sonobuoy.yml: -------------------------------------------------------------------------------- 1 | dockerLibraryRegistry: ghcr.io/stackhpc 2 | -------------------------------------------------------------------------------- /templates.tf: -------------------------------------------------------------------------------- 1 | # kube_tag: https://github.com/rancher/hyperkube/tags 2 | # cloud_provider_tag: https://github.com/kubernetes/cloud-provider-openstack/tags 3 | 4 | variable "templates" { 5 | type = map(any) 6 | default = { 7 | "k8s-1.19.12" = { 8 | labels = { 9 | kube_tag = "v1.19.12" 10 | cloud_provider_tag = "v1.19.2" 11 | } 12 | } 13 | "k8s-1.20.8" = { 14 | labels = { 15 | kube_tag = "v1.20.8" 16 | cloud_provider_tag = "v1.20.2" 17 | } 18 | } 19 | "k8s-1.21.2" = { 20 | labels = { 21 | kube_tag = "v1.21.2" 22 | cloud_provider_tag = "v1.20.2" 23 | } 24 | } 25 | } 26 | } 27 | 28 | variable "extra_templates" { 29 | type = map(any) 30 | default = {} 31 | } 32 | 33 | variable "template_labels" { 34 | type = map(any) 35 | default = { 36 | monitoring_enabled = "true" 37 | auto_scaling_enabled = "true" 38 | auto_healing_enabled = "true" 39 | auto_healing_controller = "magnum-auto-healer" 40 | magnum_auto_healer_tag = "v1.20.0" 41 | master_lb_floating_ip_enabled = "true" 42 | cinder_csi_enabled = "true" 43 | } 44 | } 45 | 46 | variable "image" { 47 | type = string 48 | default = "fedora-coreos-33.20210426.3.0-openstack.x86_64" 49 | } 50 | 51 | variable "floating_ip_enabled" { 52 | type = string 53 | default = "false" 54 | } 55 | 56 | variable "flavor" { 57 | type = string 58 | default = "ds4G" 59 | } 60 | 61 | variable "master_flavor" { 62 | type = string 63 | default = "ds4G" 64 | } 65 | 66 | variable "volume_driver" { 67 | type = string 68 | default = "cinder" 69 | } 70 | 71 | variable "external_network" { 72 | type = string 73 | } 74 | 75 | variable "network_driver" { 76 | type = string 77 | default = "calico" 78 | } 79 | 80 | variable "fixed_network" { 81 | type = string 82 | default = "" 83 | } 84 | 85 | variable "fixed_subnet" { 86 | type = string 87 | default = "" 88 | } 89 | 90 | variable "insecure_registry" { 91 | type = string 92 | default = "" 93 | } 94 | 95 | variable "docker_volume_size" { 96 | type = number 97 | default = "0" 98 | } 99 | 100 | variable "tls_disabled" { 101 | type = string 102 | default = "false" 103 | } 104 | 105 | variable "master_lb_enabled" { 106 | type = string 107 | default = "true" 108 | } 109 | 110 | resource "openstack_containerinfra_clustertemplate_v1" "templates" { 111 | for_each = merge(var.templates, var.extra_templates) 112 | name = each.key 113 | coe = "kubernetes" 114 | docker_storage_driver = "overlay2" 115 | server_type = "vm" 116 | tls_disabled = var.tls_disabled 117 | image = lookup(each.value, "image", var.image) 118 | volume_driver = var.volume_driver 119 | external_network_id = var.external_network 120 | master_lb_enabled = var.master_lb_enabled 121 | fixed_network = var.fixed_network 122 | fixed_subnet = var.fixed_subnet 123 | insecure_registry = var.insecure_registry 124 | floating_ip_enabled = var.floating_ip_enabled 125 | docker_volume_size = var.docker_volume_size 126 | network_driver = lookup(each.value, "network_driver", var.network_driver) 127 | flavor = lookup(each.value, "flavor", var.flavor) 128 | master_flavor = lookup(each.value, "master_flavor", var.master_flavor) 129 | labels = merge(var.template_labels, lookup(each.value, "labels", {})) 130 | 131 | lifecycle { 132 | create_before_destroy = true 133 | } 134 | } 135 | 136 | output "templates" { 137 | value = openstack_containerinfra_clustertemplate_v1.templates 138 | } 139 | 140 | -------------------------------------------------------------------------------- /terraform.tfvars.sample: -------------------------------------------------------------------------------- 1 | external_network = "public" 2 | keypair_name = "default" 3 | floating_ip_enabled = "true" 4 | -------------------------------------------------------------------------------- /tfvars/atomic.tfvars: -------------------------------------------------------------------------------- 1 | templates = { 2 | "k8s-calico-atomic" = { 3 | network_driver = "calico" 4 | image = "Fedora-AtomicHost-29-20191126.0.x86_64" 5 | labels = { 6 | kube_tag = "v1.15.12" 7 | cloud_provider_tag = "v1.15.0" 8 | } 9 | } 10 | "k8s-flannel-atomic" = { 11 | network_driver = "flannel" 12 | image = "Fedora-AtomicHost-29-20191126.0.x86_64" 13 | labels = { 14 | kube_tag = "v1.15.12" 15 | cloud_provider_tag = "v1.15.0" 16 | } 17 | } 18 | } 19 | 20 | clusters = { 21 | "k8s-calico-atomic" = { 22 | template = "k8s-calico-atomic" 23 | } 24 | "k8s-flannel-atomic" = { 25 | template = "k8s-flannel-atomic" 26 | } 27 | } 28 | 29 | kubeconfig = "k8s-calico-atomic" 30 | -------------------------------------------------------------------------------- /tfvars/coreos.tfvars: -------------------------------------------------------------------------------- 1 | clusters = { 2 | "k8s-1.18" = { 3 | template = "k8s-1.18.16" 4 | labels = { 5 | } 6 | } 7 | "k8s-1.19" = { 8 | template = "k8s-1.19.8" 9 | labels = { 10 | } 11 | } 12 | "k8s-1.20" = { 13 | template = "k8s-1.20.4" 14 | labels = { 15 | } 16 | } 17 | } 18 | 19 | kubeconfig = "k8s-1.20" 20 | -------------------------------------------------------------------------------- /tfvars/flannel.tfvars: -------------------------------------------------------------------------------- 1 | network_driver = "flannel" 2 | 3 | clusters = { 4 | "k8s-flannel-1.18" = { 5 | template = "k8s-1.18.16" 6 | labels = { 7 | } 8 | } 9 | "k8s-flannel-1.19" = { 10 | template = "k8s-1.19.8" 11 | labels = { 12 | } 13 | } 14 | "k8s-flannel-1.20" = { 15 | template = "k8s-1.20.3" 16 | labels = { 17 | } 18 | } 19 | } 20 | 21 | kubeconfig = "k8s-flannel-1.20" 22 | -------------------------------------------------------------------------------- /tfvars/podman.tfvars: -------------------------------------------------------------------------------- 1 | templates = { 2 | "k8s-calico-atomic" = { 3 | network_driver = "calico" 4 | image = "Fedora-AtomicHost-29-20191126.0.x86_64" 5 | labels = { 6 | kube_tag = "v1.15.12" 7 | cloud_provider_tag = "v1.15.0" 8 | } 9 | } 10 | "k8s-flannel-atomic" = { 11 | network_driver = "flannel" 12 | image = "Fedora-AtomicHost-29-20191126.0.x86_64" 13 | labels = { 14 | kube_tag = "v1.15.12" 15 | cloud_provider_tag = "v1.15.0" 16 | } 17 | } 18 | } 19 | 20 | clusters = { 21 | "k8s-calico-podman" = { 22 | template = "k8s-calico-atomic" 23 | labels = { 24 | use_podman = "true" 25 | etcd_tag = "v3.4.6" 26 | } 27 | } 28 | "k8s-flannel-podman" = { 29 | template = "k8s-flannel-atomic" 30 | labels = { 31 | use_podman = "true" 32 | etcd_tag = "v3.4.6" 33 | } 34 | } 35 | } 36 | 37 | kubeconfig = "k8s-calico-podman" 38 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | openstack = { 4 | source = "terraform-provider-openstack/openstack" 5 | version = ">=1.31.0" 6 | } 7 | local = { 8 | source = "hashicorp/local" 9 | } 10 | null = { 11 | source = "hashicorp/null" 12 | } 13 | } 14 | required_version = ">= 1.0.0" 15 | experiments = [module_variable_optional_attrs] 16 | } 17 | --------------------------------------------------------------------------------