├── .dockerignore ├── examples ├── aws_ec2 │ ├── aws │ │ ├── main.tf │ │ ├── keys.tf │ │ ├── variables.tf │ │ ├── outputs.tf │ │ ├── ami.tf │ │ ├── iam.tf │ │ └── nodes.tf │ ├── rke.tf │ └── README.md ├── digitalocean │ ├── do │ │ ├── main.tf │ │ ├── variables.tf │ │ ├── keys.tf │ │ ├── outputs.tf │ │ └── nodes.tf │ ├── manifest.yaml │ ├── templates.tf │ ├── files │ │ ├── ingress-cloud-generic.yaml │ │ └── ccm-digitalocean-v0.1.15.yaml │ ├── README.md │ └── rke.tf ├── minimal │ └── example.tf ├── with_kubernetes_provider │ └── example.tf └── multiple_nodes │ └── example.tf ├── scripts ├── build ├── ci ├── entry ├── gogetcookie.sh ├── gofmtcheck.sh ├── gotestacc.sh ├── version.sh ├── cleanup_testacc.sh ├── errcheck.sh ├── Dockerfile ├── changelog-links.sh ├── gopackage.sh └── gobuild.sh ├── rke ├── version.go ├── schema_rke_cluster_restore.go ├── provider_test.go ├── schema_rke_cluster_monitoring.go ├── schema_rke_cluster_authorization.go ├── structure_rke_cluster_restore.go ├── schema_rke_cluster_private_registries.go ├── structure_rke_cluster_authorization.go ├── structure_rke_cluster_authentication.go ├── structure_rke_cluster_monitoring.go ├── structure_rke_cluster_rotate_certificates.go ├── schema_rke_cluster_taint.go ├── provider.go ├── schema_rke_cluster_services_kubeproxy.go ├── schema_rke_cluster_services_scheduler.go ├── structure_rke_cluster_taint.go ├── config.go ├── structure_rke_cluster_private_registries.go ├── structure_rke_cluster_services_kubeproxy.go ├── structure_rke_cluster_services_scheduler.go ├── schema_rke_cluster_certificates.go ├── structure_rke_cluster_certificates.go ├── schema_rke_cluster_services_kube_controller.go ├── schema_rke_cluster_services.go ├── structure_rke_cluster_restore_test.go ├── structure_rke_cluster_taint_test.go ├── schema_rke_cluster_bastion_host.go ├── import_rke_cluster.go ├── schema_rke_cluster_rotate_certificates.go ├── structure_rke_cluster_authentication_test.go ├── structure_rke_cluster_test.go ├── structure_rke_cluster_monitoring_test.go ├── structure_rke_cluster_authorization_test.go ├── structure_rke_cluster_services_kube_controller.go ├── structure_rke_cluster_rotate_certificates_test.go ├── schema_rke_cluster_authentication.go ├── structure_rke_cluster_private_registries_test.go ├── structure_rke_cluster_certificates_test.go ├── schema_rke_cluster_services_kubelet.go ├── structure_rke_cluster_bastion_host.go ├── schema_rke_cluster_dns.go ├── structure_rke_cluster_services_kubeproxy_test.go ├── structure_rke_cluster_services_scheduler_test.go ├── structure_rke_cluster_ingress.go ├── structure_rke_cluster_bastion_host_test.go ├── structure_rke_cluster_services.go ├── structure_rke_cluster_services_kubelet.go ├── structure_rke_cluster_services_test.go ├── structure_rke_cluster_services_kube_controller_test.go ├── structure_rke_cluster_ingress_test.go ├── structure_rke_cluster_cloud_provider.go ├── structure_rke_cluster_dns.go ├── structure_rke_cluster_services_kubelet_test.go ├── schema_rke_cluster_ingress.go ├── schema_rke_cluster_cloud_provider.go ├── schema_rke_cluster_cloud_provider_aws.go ├── schema_rke_cluster_system_images.go ├── structure_rke_cluster_dns_test.go ├── schema_rke_cluster_services_etcd.go ├── structure_rke_cluster_cloud_provider_z_test.go ├── structure_rke_cluster_cloud_provider_azure_test.go ├── schema_rke_cluster_cloud_provider_vsphere.go ├── schema_rke_cluster_cloud_provider_openstack.go ├── schema_rke_cluster_node.go ├── structure_rke_cluster_system_images_test.go ├── structure_rke_cluster_cloud_provider_aws.go ├── structure_rke_cluster_system_images.go ├── structure_rke_cluster_cloud_provider_aws_test.go └── schema_rke_cluster_network.go ├── main.go ├── .github ├── workflows │ ├── label-opened-issues.yml │ ├── release.yaml │ └── pre-release.yaml └── pull_request_template.md ├── docs ├── index.md └── guides │ └── upgrade_to_0.13.md ├── Dockerfile.dapper ├── .gitignore └── GNUmakefile /.dockerignore: -------------------------------------------------------------------------------- 1 | .env 2 | bin/ 3 | -------------------------------------------------------------------------------- /examples/aws_ec2/aws/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | 5 | -------------------------------------------------------------------------------- /scripts/build: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | cd $(dirname $0)/.. 5 | 6 | make build -------------------------------------------------------------------------------- /examples/digitalocean/do/main.tf: -------------------------------------------------------------------------------- 1 | provider "digitalocean" { 2 | token = var.do_token 3 | } 4 | 5 | -------------------------------------------------------------------------------- /scripts/ci: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | cd $(dirname $0)/.. 5 | 6 | make build 7 | make testacc -------------------------------------------------------------------------------- /examples/digitalocean/manifest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: digitalocean 6 | namespace: kube-system 7 | stringData: 8 | access-token: "${do_token}" -------------------------------------------------------------------------------- /examples/digitalocean/templates.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "addons" { 2 | template = "${file("${path.module}/manifest.yaml")}" 3 | 4 | vars = { 5 | do_token = var.do_token 6 | } 7 | } -------------------------------------------------------------------------------- /examples/digitalocean/do/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | default = "nyc1" 3 | } 4 | 5 | variable "droplet_size" { 6 | default = "s-2vcpu-4gb" 7 | } 8 | 9 | variable "do_token" { 10 | default = "" 11 | } -------------------------------------------------------------------------------- /scripts/entry: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | mkdir -p bin dist build/bin 5 | if [ -e ./scripts/$1 ]; then 6 | ./scripts/"$@" 7 | else 8 | exec "$@" 9 | fi 10 | 11 | chown -R $DAPPER_UID:$DAPPER_GID . -------------------------------------------------------------------------------- /examples/aws_ec2/aws/keys.tf: -------------------------------------------------------------------------------- 1 | resource "tls_private_key" "node-key" { 2 | algorithm = "RSA" 3 | } 4 | 5 | resource "aws_key_pair" "rke-node-key" { 6 | key_name = "rke-node-key" 7 | public_key = tls_private_key.node-key.public_key_openssh 8 | } 9 | 10 | -------------------------------------------------------------------------------- /examples/digitalocean/do/keys.tf: -------------------------------------------------------------------------------- 1 | resource "tls_private_key" "node-key" { 2 | algorithm = "RSA" 3 | } 4 | 5 | resource "digitalocean_ssh_key" "key" { 6 | name = "rke-node-key" 7 | public_key = tls_private_key.node-key.public_key_openssh 8 | } 9 | 10 | -------------------------------------------------------------------------------- /scripts/gogetcookie.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | touch ~/.gitcookies 4 | chmod 0600 ~/.gitcookies 5 | 6 | git config --global http.cookiefile ~/.gitcookies 7 | 8 | tr , \\t <<\__END__ >>~/.gitcookies 9 | .googlesource.com,TRUE,/,TRUE,2147483647,o,git-paul.hashicorp.com=1/z7s05EYPudQ9qoe6dMVfmAVwgZopEkZBb1a2mA5QtHE 10 | __END__ 11 | -------------------------------------------------------------------------------- /examples/aws_ec2/aws/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | default = "us-east-1" 3 | } 4 | 5 | variable "instance_type" { 6 | default = "t2.large" 7 | } 8 | 9 | variable "cluster_id" { 10 | default = "rke" 11 | } 12 | 13 | variable "docker_install_url" { 14 | default = "https://releases.rancher.com/install-docker/19.03.sh" 15 | } 16 | -------------------------------------------------------------------------------- /examples/aws_ec2/aws/outputs.tf: -------------------------------------------------------------------------------- 1 | output "private_key" { 2 | value = tls_private_key.node-key.private_key_pem 3 | } 4 | 5 | output "ssh_username" { 6 | value = "ubuntu" 7 | } 8 | 9 | output "addresses" { 10 | value = aws_instance.rke-node[*].public_dns 11 | } 12 | 13 | output "internal_ips" { 14 | value = aws_instance.rke-node[*].private_ip 15 | } 16 | -------------------------------------------------------------------------------- /examples/aws_ec2/aws/ami.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "ubuntu" { 2 | most_recent = true 3 | 4 | filter { 5 | name = "name" 6 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 7 | } 8 | 9 | filter { 10 | name = "virtualization-type" 11 | values = ["hvm"] 12 | } 13 | 14 | owners = ["099720109477"] # Canonical 15 | } 16 | 17 | -------------------------------------------------------------------------------- /rke/version.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import "fmt" 4 | 5 | var ( 6 | // Version app version 7 | Version = "0.14.1" 8 | // Revision git commit short commithash 9 | Revision = "xxxxxx" // set on build 10 | ) 11 | 12 | // FullVersion return sackerel full version text 13 | func FullVersion() string { 14 | return fmt.Sprintf("%s, build %s", Version, Revision) 15 | } 16 | -------------------------------------------------------------------------------- /examples/digitalocean/do/outputs.tf: -------------------------------------------------------------------------------- 1 | output "private_key" { 2 | value = tls_private_key.node-key.private_key_pem 3 | } 4 | 5 | output "ssh_username" { 6 | value = "root" 7 | } 8 | 9 | output "addresses" { 10 | value = digitalocean_droplet.rke-node[*].ipv4_address 11 | } 12 | 13 | output "internal_addresses" { 14 | value = digitalocean_droplet.rke-node.*.ipv4_address_private 15 | } -------------------------------------------------------------------------------- /scripts/gofmtcheck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Check gofmt 4 | echo "==> Checking that code complies with gofmt requirements..." 5 | gofmt_files=$(gofmt -l -s `find . -name '*.go' | grep -v vendor`) 6 | if [[ -n ${gofmt_files} ]]; then 7 | echo 'gofmt needs running on the following files:' 8 | echo "${gofmt_files}" 9 | echo "You can use the command: \`make fmt\` to reformat code." 10 | exit 1 11 | fi 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /scripts/gotestacc.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | echo "==> Running acceptance testing..." 6 | 7 | cleanup() 8 | { 9 | $(dirname $0)/cleanup_testacc.sh 10 | } 11 | trap cleanup EXIT TERM 12 | 13 | PACKAGES="$(find . -name '*.go' | xargs -I{} dirname {} | cut -f2 -d/ | sort -u | grep -Ev '(^\.$|.git|vendor|bin)' | sed -e 's!^!./!' -e 's!$!/...!')" 14 | TF_ACC=1 go test -cover -tags=test ${PACKAGES} -v -timeout 120m 15 | -------------------------------------------------------------------------------- /scripts/version.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 4 | DIRTY="-dirty" 5 | fi 6 | 7 | GIT_TAG=$(git tag -l --contains HEAD | head -n 1) 8 | 9 | if [ -n "$VERSION" ]; then 10 | VERSION="$VERSION${DIRTY}" 11 | elif [ -n "$GIT_TAG" ]; then 12 | VERSION="$GIT_TAG${DIRTY}" 13 | else 14 | COMMIT=$(git rev-parse --short HEAD) 15 | VERSION="${COMMIT}${DIRTY}" 16 | fi 17 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | 6 | "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" 7 | "github.com/rancher/terraform-provider-rke/rke" 8 | ) 9 | 10 | func main() { 11 | var debug bool 12 | 13 | flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") 14 | flag.Parse() 15 | 16 | opts := &plugin.ServeOpts{ 17 | Debug: debug, 18 | ProviderFunc: rke.Provider, 19 | } 20 | 21 | plugin.Serve(opts) 22 | } 23 | -------------------------------------------------------------------------------- /.github/workflows/label-opened-issues.yml: -------------------------------------------------------------------------------- 1 | name: Label issue 2 | on: 3 | issues: 4 | types: 5 | - opened 6 | - reopened 7 | 8 | permissions: 9 | issues: write 10 | 11 | jobs: 12 | label_issues: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Label issue 16 | id: run 17 | run: gh issue edit -R ${GITHUB_REPOSITORY} --add-label ${LABEL} ${{ github.event.issue.number }} 18 | env: 19 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 20 | LABEL: "team/area2" 21 | -------------------------------------------------------------------------------- /scripts/cleanup_testacc.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | TESTACC_DOCKER_LIST=${TESTACC_DOCKER_LIST:-"rke-dind-tf-testacc1 rke-dind-tf-testacc2"} 6 | 7 | DOCKER_BIN=${DOCKER_BIN:-$(which docker)} 8 | 9 | 10 | if [ "${TESTACC_DOCKER_LIST}" != "" ]; then 11 | echo Cleaning up testacc docker list ${TESTACC_DOCKER_LIST} 12 | for DOCKER_TEST in ${TESTACC_DOCKER_LIST} 13 | do 14 | DOCKER_ID=$(${DOCKER_BIN} ps -q -f name=${DOCKER_TEST}) 15 | if [ "${DOCKER_ID}" != "" ]; then 16 | ${DOCKER_BIN} rm -fv ${DOCKER_TEST} 17 | fi 18 | done 19 | fi 20 | 21 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_restore.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterRestoreFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "restore": { 12 | Type: schema.TypeBool, 13 | Optional: true, 14 | Default: false, 15 | Description: "Restore RKE cluster", 16 | }, 17 | "snapshot_name": { 18 | Type: schema.TypeString, 19 | Optional: true, 20 | Description: "Snapshot name", 21 | }, 22 | } 23 | return s 24 | } 25 | -------------------------------------------------------------------------------- /examples/digitalocean/files/ingress-cloud-generic.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: ingress-nginx 6 | namespace: ingress-nginx 7 | labels: 8 | app.kubernetes.io/name: ingress-nginx 9 | app.kubernetes.io/part-of: ingress-nginx 10 | spec: 11 | externalTrafficPolicy: Local 12 | type: LoadBalancer 13 | selector: 14 | app.kubernetes.io/name: ingress-nginx 15 | app.kubernetes.io/part-of: ingress-nginx 16 | ports: 17 | - name: http 18 | port: 80 19 | targetPort: http 20 | - name: https 21 | port: 443 22 | targetPort: https 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /examples/minimal/example.tf: -------------------------------------------------------------------------------- 1 | resource "rke_cluster" "cluster" { 2 | nodes { 3 | address = "1.2.3.4" 4 | user = "ubuntu" 5 | role = ["controlplane", "worker", "etcd"] 6 | ssh_key = file("~/.ssh/id_rsa") 7 | } 8 | } 9 | 10 | ############################################################################### 11 | # If you need kubeconfig.yml for using kubectl, please uncomment follows. 12 | ############################################################################### 13 | // resource "local_file" "kube_cluster_yaml" { 14 | // filename = "${path.root}/kube_config_cluster.yml" 15 | // content = rke_cluster.cluster.kube_config_yaml 16 | // } 17 | 18 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | page_title: "RKE Provider" 3 | --- 4 | 5 | # RKE Provider 6 | 7 | The RKE provider is used to interact with Rancher Kubernetes Engine kubernetes clusters. 8 | 9 | ## Example Usage 10 | 11 | ```hcl 12 | # Configure the RKE provider 13 | provider "rke" { 14 | debug = true 15 | log_file = "" 16 | } 17 | ``` 18 | 19 | ## Argument Reference 20 | 21 | The following arguments are supported: 22 | 23 | * `debug` - (Optional) Enable RKE debug logs. It can also be sourced from the `RKE_DEBUG` environment variable. Default `false` (bool) 24 | * `log_file` - (Optional) Save RKE logs to a file. It can also be sourced from the `RKE_LOG_FILE` environment variable (string) 25 | -------------------------------------------------------------------------------- /examples/digitalocean/do/nodes.tf: -------------------------------------------------------------------------------- 1 | # Droplet(ノード)の定義 2 | resource "digitalocean_droplet" "rke-node" { 3 | image = "ubuntu-18-04-x64" 4 | name = "rke-nodes-${count.index + 1}" 5 | region = var.region 6 | size = var.droplet_size 7 | ssh_keys = [digitalocean_ssh_key.key.id] 8 | count = 4 9 | 10 | private_networking = true 11 | 12 | provisioner "remote-exec" { 13 | connection { 14 | type = "ssh" 15 | user = "root" 16 | host = self.ipv4_address 17 | private_key = tls_private_key.node-key.private_key_pem 18 | } 19 | 20 | inline = [ 21 | "curl releases.rancher.com/install-docker/18.09.4.sh | bash", 22 | ] 23 | } 24 | } 25 | 26 | -------------------------------------------------------------------------------- /rke/provider_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 7 | ) 8 | 9 | var testAccProviders map[string]func() (*schema.Provider, error) 10 | var testAccProvider *schema.Provider 11 | 12 | func init() { 13 | testAccProvider = Provider() 14 | testAccProviders = map[string]func() (*schema.Provider, error){ 15 | "rke": func() (*schema.Provider, error) { 16 | return testAccProvider, nil 17 | }, 18 | } 19 | } 20 | 21 | func TestProvider(t *testing.T) { 22 | if err := Provider().InternalValidate(); err != nil { 23 | t.Fatalf("err: %s", err) 24 | } 25 | } 26 | 27 | func TestProvider_impl(t *testing.T) { 28 | var _ = Provider() 29 | } 30 | -------------------------------------------------------------------------------- /examples/with_kubernetes_provider/example.tf: -------------------------------------------------------------------------------- 1 | resource "rke_cluster" "cluster" { 2 | nodes { 3 | address = "1.2.3.4" 4 | user = "ubuntu" 5 | role = ["controlplane", "worker", "etcd"] 6 | ssh_key = file("~/.ssh/id_rsa") 7 | } 8 | } 9 | 10 | provider "kubernetes" { 11 | host = rke_cluster.cluster.api_server_url 12 | username = rke_cluster.cluster.kube_admin_user 13 | 14 | client_certificate = rke_cluster.cluster.client_cert 15 | client_key = rke_cluster.cluster.client_key 16 | cluster_ca_certificate = rke_cluster.cluster.ca_crt 17 | } 18 | 19 | resource "kubernetes_namespace" "example" { 20 | metadata { 21 | name = "terraform-example-namespace" 22 | } 23 | } 24 | 25 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_monitoring.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterMonitoringFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "node_selector": { 12 | Type: schema.TypeMap, 13 | Optional: true, 14 | Description: "Node selector key pair", 15 | }, 16 | "options": { 17 | Type: schema.TypeMap, 18 | Optional: true, 19 | Description: "Monitoring options", 20 | }, 21 | "provider": { 22 | Type: schema.TypeString, 23 | Optional: true, 24 | Computed: true, 25 | Description: "Monitoring provider", 26 | }, 27 | } 28 | return s 29 | } 30 | -------------------------------------------------------------------------------- /scripts/errcheck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Check gofmt 4 | echo "==> Checking for unchecked errors..." 5 | 6 | if ! which errcheck > /dev/null; then 7 | echo "==> Installing errcheck..." 8 | go get -u github.com/kisielk/errcheck 9 | fi 10 | 11 | err_files=$(errcheck -ignoretests \ 12 | -ignore 'github.com/hashicorp/terraform/helper/schema:Set' \ 13 | -ignore 'bytes:.*' \ 14 | -ignore 'io:Close|Write' \ 15 | $(go list ./...| grep -v /vendor/)) 16 | 17 | if [[ -n ${err_files} ]]; then 18 | echo 'Unchecked errors found in the following places:' 19 | echo "${err_files}" 20 | echo "Please handle returned errors. You can check directly with \`make errcheck\`" 21 | exit 1 22 | fi 23 | 24 | exit 0 25 | -------------------------------------------------------------------------------- /scripts/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.12.3 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y xz-utils zip rsync jq curl ca-certificates && \ 5 | curl -fsSL https://get.docker.com | sh - && \ 6 | apt-get clean && \ 7 | rm -rf /var/cache/apt/archives/* /var/lib/apt/lists/* && \ 8 | go get -u golang.org/x/lint/golint && \ 9 | curl -L https://dl.k8s.io/release/$(curl -sL https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl -o /usr/bin/kubectl && chmod 755 /usr/bin/kubectl && \ 10 | curl -LO https://releases.hashicorp.com/terraform/0.12.20/terraform_0.12.20_linux_amd64.zip && unzip terraform_0.12.20_linux_amd64.zip && \ 11 | mv terraform /usr/bin/terraform && chmod 755 /usr/bin/terraform && rm terraform_0.12.20_linux_amd64.zip 12 | VOLUME /go/src/github.com/rancher/terraform-provider-rke 13 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_authorization.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 6 | ) 7 | 8 | const authorizationModeRBAC = "rbac" 9 | 10 | var ( 11 | authorizationModeList = []string{authorizationModeRBAC, "none"} 12 | ) 13 | 14 | //Schemas 15 | 16 | func rkeClusterAuthorizationFields() map[string]*schema.Schema { 17 | s := map[string]*schema.Schema{ 18 | "mode": { 19 | Type: schema.TypeString, 20 | Optional: true, 21 | Default: authorizationModeRBAC, 22 | ValidateFunc: validation.StringInSlice(authorizationModeList, true), 23 | }, 24 | "options": { 25 | Type: schema.TypeMap, 26 | Optional: true, 27 | Description: "Authorization mode options", 28 | }, 29 | } 30 | return s 31 | } 32 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_restore.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterRestore(in rancher.RestoreConfig) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | obj["restore"] = in.Restore 13 | 14 | if len(in.SnapshotName) > 0 { 15 | obj["snapshot_name"] = in.SnapshotName 16 | } 17 | 18 | return []interface{}{obj} 19 | } 20 | 21 | // Expanders 22 | 23 | func expandRKEClusterRestore(p []interface{}) rancher.RestoreConfig { 24 | obj := rancher.RestoreConfig{} 25 | if len(p) == 0 || p[0] == nil { 26 | return obj 27 | } 28 | in := p[0].(map[string]interface{}) 29 | 30 | if v, ok := in["restore"].(bool); ok { 31 | obj.Restore = v 32 | } 33 | 34 | if v, ok := in["snapshot_name"].(string); ok && len(v) > 0 { 35 | obj.SnapshotName = v 36 | } 37 | 38 | return obj 39 | } 40 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_private_registries.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterPrivateRegistriesFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "url": { 12 | Type: schema.TypeString, 13 | Required: true, 14 | Description: "Registry URL", 15 | }, 16 | "is_default": { 17 | Type: schema.TypeBool, 18 | Optional: true, 19 | Default: false, 20 | Description: "Set as default registry", 21 | }, 22 | "password": { 23 | Type: schema.TypeString, 24 | Optional: true, 25 | Sensitive: true, 26 | Description: "Registry password", 27 | }, 28 | "user": { 29 | Type: schema.TypeString, 30 | Optional: true, 31 | Sensitive: true, 32 | Description: "Registry user", 33 | }, 34 | } 35 | return s 36 | } 37 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_authorization.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterAuthorization(in rancher.AuthzConfig) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | if len(in.Mode) > 0 { 13 | obj["mode"] = in.Mode 14 | } 15 | 16 | if len(in.Options) > 0 { 17 | obj["options"] = toMapInterface(in.Options) 18 | } 19 | 20 | return []interface{}{obj} 21 | } 22 | 23 | // Expanders 24 | 25 | func expandRKEClusterAuthorization(p []interface{}) rancher.AuthzConfig { 26 | obj := rancher.AuthzConfig{} 27 | if len(p) == 0 || p[0] == nil { 28 | return obj 29 | } 30 | in := p[0].(map[string]interface{}) 31 | 32 | if v, ok := in["mode"].(string); ok && len(v) > 0 { 33 | obj.Mode = v 34 | } 35 | 36 | if v, ok := in["options"].(map[string]interface{}); ok && len(v) > 0 { 37 | obj.Options = toMapString(v) 38 | } 39 | 40 | return obj 41 | } 42 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_authentication.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterAuthentication(in rancher.AuthnConfig) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | if len(in.SANs) > 0 { 13 | obj["sans"] = toArrayInterface(in.SANs) 14 | } 15 | 16 | if len(in.Strategy) > 0 { 17 | obj["strategy"] = in.Strategy 18 | } 19 | 20 | return []interface{}{obj} 21 | } 22 | 23 | // Expanders 24 | 25 | func expandRKEClusterAuthentication(p []interface{}) rancher.AuthnConfig { 26 | obj := rancher.AuthnConfig{} 27 | if len(p) == 0 || p[0] == nil { 28 | return obj 29 | } 30 | in := p[0].(map[string]interface{}) 31 | 32 | if v, ok := in["sans"].([]interface{}); ok && len(v) > 0 { 33 | obj.SANs = toArrayString(v) 34 | } 35 | 36 | if v, ok := in["strategy"].(string); ok && len(v) > 0 { 37 | obj.Strategy = v 38 | } 39 | 40 | return obj 41 | } 42 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_monitoring.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterMonitoring(in rancher.MonitoringConfig) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | if len(in.Options) > 0 { 13 | obj["options"] = toMapInterface(in.Options) 14 | } 15 | 16 | if len(in.Provider) > 0 { 17 | obj["provider"] = in.Provider 18 | } 19 | 20 | return []interface{}{obj} 21 | } 22 | 23 | // Expanders 24 | 25 | func expandRKEClusterMonitoring(p []interface{}) rancher.MonitoringConfig { 26 | obj := rancher.MonitoringConfig{} 27 | if len(p) == 0 || p[0] == nil { 28 | return obj 29 | } 30 | in := p[0].(map[string]interface{}) 31 | 32 | if v, ok := in["options"].(map[string]interface{}); ok && len(v) > 0 { 33 | obj.Options = toMapString(v) 34 | } 35 | 36 | if v, ok := in["provider"].(string); ok && len(v) > 0 { 37 | obj.Provider = v 38 | } 39 | 40 | return obj 41 | } 42 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_rotate_certificates.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterRotateCertificates(in *rancher.RotateCertificates) []interface{} { 10 | obj := make(map[string]interface{}) 11 | if in == nil { 12 | return []interface{}{} 13 | } 14 | 15 | obj["ca_certificates"] = in.CACertificates 16 | 17 | if len(in.Services) > 0 { 18 | obj["services"] = toArrayInterface(in.Services) 19 | } 20 | 21 | return []interface{}{obj} 22 | } 23 | 24 | // Expanders 25 | 26 | func expandRKEClusterRotateCertificates(p []interface{}) *rancher.RotateCertificates { 27 | obj := &rancher.RotateCertificates{} 28 | if len(p) == 0 || p[0] == nil { 29 | return obj 30 | } 31 | in := p[0].(map[string]interface{}) 32 | 33 | if v, ok := in["ca_certificates"].(bool); ok { 34 | obj.CACertificates = v 35 | } 36 | 37 | if v, ok := in["services"].([]interface{}); ok && len(v) > 0 { 38 | obj.Services = toArrayString(v) 39 | } 40 | 41 | return obj 42 | } 43 | -------------------------------------------------------------------------------- /scripts/changelog-links.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script rewrites [GH-nnnn]-style references in the CHANGELOG.md file to 4 | # be Markdown links to the given github issues. 5 | # 6 | # This is run during releases so that the issue references in all of the 7 | # released items are presented as clickable links, but we can just use the 8 | # easy [GH-nnnn] shorthand for quickly adding items to the "Unrelease" section 9 | # while merging things between releases. 10 | 11 | set -e 12 | 13 | if [[ ! -f CHANGELOG.md ]]; then 14 | echo "ERROR: CHANGELOG.md not found in pwd." 15 | echo "Please run this from the root of the terraform provider repository" 16 | exit 1 17 | fi 18 | 19 | if [[ `uname` == "Darwin" ]]; then 20 | echo "Using BSD sed" 21 | SED="sed -i.bak -E -e" 22 | else 23 | echo "Using GNU sed" 24 | SED="sed -i.bak -r -e" 25 | fi 26 | 27 | PROVIDER_URL="https:\/\/github.com\/terraform-providers\/terraform-provider-rke\/issues" 28 | 29 | $SED "s/GH-([0-9]+)/\[#\1\]\($PROVIDER_URL\/\1\)/g" -e 's/\[\[#(.+)([0-9])\)]$/(\[#\1\2))/g' CHANGELOG.md 30 | 31 | rm CHANGELOG.md.bak 32 | -------------------------------------------------------------------------------- /examples/multiple_nodes/example.tf: -------------------------------------------------------------------------------- 1 | resource rke_cluster "cluster" { 2 | nodes { 3 | address = "192.2.0.1" 4 | internal_address = "192.2.0.1" 5 | user = "rancher" 6 | role = ["controlplane", "worker", "etcd"] 7 | ssh_key = file("~/.ssh/id_rsa") 8 | } 9 | nodes { 10 | address = "192.2.0.2" 11 | internal_address = "192.2.0.2" 12 | user = "rancher" 13 | role = ["controlplane", "worker", "etcd"] 14 | ssh_key = file("~/.ssh/id_rsa") 15 | } 16 | nodes { 17 | address = "15.188.119.237" 18 | internal_address = "172.31.46.40" 19 | user = "rancher" 20 | role = ["controlplane", "worker", "etcd"] 21 | ssh_key = file("~/.ssh/id_rsa") 22 | } 23 | } 24 | 25 | ############################################################################### 26 | # If you need kubeconfig.yml for using kubectl, please uncomment follows. 27 | ############################################################################### 28 | # resource "local_file" "kube_cluster_yaml" { 29 | # filename = "${path.root}/kube_config_cluster.yml" 30 | # content = rke_cluster.cluster.kube_config_yaml 31 | # } 32 | -------------------------------------------------------------------------------- /scripts/gopackage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $(dirname $0)/version.sh 6 | 7 | VERSION_NO_V=$(echo $VERSION | sed "s/^[v|V]//") 8 | 9 | echo "==> Packaging binaries version ${VERSION_NO_V} ..." 10 | 11 | DIST=$(pwd)/dist/artifacts 12 | 13 | mkdir -p $DIST/${VERSION} 14 | 15 | for i in build/bin/*; do 16 | if [ ! -e $i ]; then 17 | continue 18 | fi 19 | 20 | BASE=build/archive 21 | DIR=${BASE}/${VERSION} 22 | 23 | rm -rf $BASE 24 | mkdir -p $BASE $DIR 25 | 26 | EXT= 27 | if [[ $i =~ .*windows.* ]]; then 28 | EXT=.exe 29 | fi 30 | 31 | cp $i ${DIR}/terraform-provider-rke_${VERSION}${EXT} 32 | 33 | ( 34 | cd $DIR 35 | NAME=$(basename $i | cut -f1 -d_) 36 | ARCH=$(basename $i | cut -f2,3 -d_ | cut -f1 -d.) 37 | ARCHIVE=${NAME}_${VERSION_NO_V}_${ARCH}.zip 38 | echo "Packaging dist/artifacts/${VERSION}/${ARCHIVE} ..." 39 | zip -q $DIST/${VERSION}/${ARCHIVE} * 40 | ) 41 | done 42 | 43 | ( 44 | cd $DIST/${VERSION}/ 45 | shasum -a 256 * > terraform-provider-rke_${VERSION_NO_V}_SHA256SUMS 46 | ) 47 | 48 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_taint.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 6 | ) 7 | 8 | const ( 9 | rkeClusterTaintEffectNoExecute = "NoExecute" 10 | rkeClusterTaintEffectNoSchedule = "NoSchedule" 11 | rkeClusterTaintEffectPreferNoSchedule = "PreferNoSchedule" 12 | ) 13 | 14 | var ( 15 | rkeClusterTaintEffectTypes = []string{ 16 | rkeClusterTaintEffectNoExecute, 17 | rkeClusterTaintEffectNoSchedule, 18 | rkeClusterTaintEffectPreferNoSchedule, 19 | } 20 | ) 21 | 22 | //Schemas 23 | 24 | func rkeClusterTaintFields() map[string]*schema.Schema { 25 | s := map[string]*schema.Schema{ 26 | "key": { 27 | Type: schema.TypeString, 28 | Required: true, 29 | }, 30 | "value": { 31 | Type: schema.TypeString, 32 | Required: true, 33 | }, 34 | "effect": { 35 | Type: schema.TypeString, 36 | Optional: true, 37 | Default: rkeClusterTaintEffectNoSchedule, 38 | ValidateFunc: validation.StringInSlice(rkeClusterTaintEffectTypes, true), 39 | }, 40 | } 41 | 42 | return s 43 | } 44 | -------------------------------------------------------------------------------- /rke/provider.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag" 7 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 8 | ) 9 | 10 | // Provider returns a schema.Provider. 11 | func Provider() *schema.Provider { 12 | return &schema.Provider{ 13 | Schema: map[string]*schema.Schema{ 14 | "debug": { 15 | Type: schema.TypeBool, 16 | Optional: true, 17 | DefaultFunc: schema.EnvDefaultFunc("RKE_DEBUG", false), 18 | }, 19 | "log_file": { 20 | Type: schema.TypeString, 21 | Optional: true, 22 | DefaultFunc: schema.EnvDefaultFunc("RKE_LOG_FILE", ""), 23 | }, 24 | }, 25 | ResourcesMap: map[string]*schema.Resource{ 26 | "rke_cluster": resourceRKECluster(), 27 | }, 28 | DataSourcesMap: map[string]*schema.Resource{}, 29 | ConfigureContextFunc: providerConfigure, 30 | } 31 | } 32 | 33 | func providerConfigure(_ context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { 34 | config := &Config{ 35 | Debug: d.Get("debug").(bool), 36 | LogFile: d.Get("log_file").(string), 37 | } 38 | 39 | config.initLogger() 40 | return config, nil 41 | } 42 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_services_kubeproxy.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterServicesKubeproxyFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "extra_args": { 12 | Type: schema.TypeMap, 13 | Optional: true, 14 | Computed: true, 15 | Description: "Extra arguments that are added to the kubeproxy services", 16 | }, 17 | "extra_binds": { 18 | Type: schema.TypeList, 19 | Optional: true, 20 | Computed: true, 21 | Description: "Extra binds added to the worker nodes", 22 | Elem: &schema.Schema{ 23 | Type: schema.TypeString, 24 | }, 25 | }, 26 | "extra_env": { 27 | Type: schema.TypeList, 28 | Optional: true, 29 | Computed: true, 30 | Description: "Extra env added to the worker nodes", 31 | Elem: &schema.Schema{ 32 | Type: schema.TypeString, 33 | }, 34 | }, 35 | "image": { 36 | Type: schema.TypeString, 37 | Optional: true, 38 | Computed: true, 39 | Description: "Docker image of the kubeproxy service", 40 | }, 41 | } 42 | return s 43 | } 44 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_services_scheduler.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterServicesSchedulerFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "extra_args": { 12 | Type: schema.TypeMap, 13 | Optional: true, 14 | Computed: true, 15 | Description: "Extra arguments that are added to the scheduler services", 16 | }, 17 | "extra_binds": { 18 | Type: schema.TypeList, 19 | Optional: true, 20 | Computed: true, 21 | Description: "Extra binds added to the controlplane nodes", 22 | Elem: &schema.Schema{ 23 | Type: schema.TypeString, 24 | }, 25 | }, 26 | "extra_env": { 27 | Type: schema.TypeList, 28 | Optional: true, 29 | Computed: true, 30 | Description: "Extra env added to the controlplane nodes", 31 | Elem: &schema.Schema{ 32 | Type: schema.TypeString, 33 | }, 34 | }, 35 | "image": { 36 | Type: schema.TypeString, 37 | Optional: true, 38 | Computed: true, 39 | Description: "Docker image of the scheduler service", 40 | }, 41 | } 42 | return s 43 | } 44 | -------------------------------------------------------------------------------- /scripts/gobuild.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $(dirname $0)/version.sh 6 | 7 | echo "==> Building code binaries version ${VERSION} ..." 8 | 9 | declare -A OS_ARCH_ARG 10 | 11 | OS_PLATFORM_ARG=(linux windows darwin freebsd openbsd) 12 | OS_ARCH_ARG[linux]="amd64 arm arm64" 13 | OS_ARCH_ARG[windows]="386 amd64" 14 | OS_ARCH_ARG[darwin]="amd64 arm64" 15 | 16 | BIN_NAME="terraform-provider-rke" 17 | BUILD_DIR=$(dirname $0)"/../build/bin" 18 | 19 | 20 | CGO_ENABLED=0 go build -ldflags="-w -s -X main.VERSION=$VERSION -extldflags -static" -o bin/${BIN_NAME} 21 | 22 | if [ -n "$CROSS" ]; then 23 | rm -rf ${BUILD_DIR} 24 | mkdir -p ${BUILD_DIR} 25 | for OS in ${OS_PLATFORM_ARG[@]}; do 26 | for ARCH in ${OS_ARCH_ARG[${OS}]}; do 27 | OUTPUT_BIN="${BUILD_DIR}/${BIN_NAME}_${OS}_${ARCH}" 28 | if test "$OS" = "windows"; then 29 | OUTPUT_BIN="${OUTPUT_BIN}.exe" 30 | fi 31 | echo "Building ${BIN_NAME}_${OS}_${ARCH} ..." 32 | GOARCH=$ARCH GOOS=$OS CGO_ENABLED=0 go build \ 33 | -ldflags="-w -X main.VERSION=$VERSION" \ 34 | -o ${OUTPUT_BIN} ./ 35 | done 36 | done 37 | fi -------------------------------------------------------------------------------- /rke/structure_rke_cluster_taint.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | v1 "k8s.io/api/core/v1" 6 | ) 7 | 8 | // Flatteners 9 | 10 | func flattenRKEClusterTaints(p []rancher.RKETaint) []interface{} { 11 | if len(p) == 0 { 12 | return []interface{}{} 13 | } 14 | 15 | out := make([]interface{}, len(p)) 16 | for i, in := range p { 17 | obj := make(map[string]interface{}) 18 | 19 | if len(in.Key) > 0 { 20 | obj["key"] = in.Key 21 | } 22 | 23 | if len(in.Value) > 0 { 24 | obj["value"] = in.Value 25 | } 26 | 27 | if len(in.Effect) > 0 { 28 | obj["effect"] = string(in.Effect) 29 | } 30 | 31 | out[i] = obj 32 | } 33 | 34 | return out 35 | } 36 | 37 | // Expanders 38 | 39 | func expandRKEClusterTaints(p []interface{}) []rancher.RKETaint { 40 | if len(p) == 0 || p[0] == nil { 41 | return []rancher.RKETaint{} 42 | } 43 | 44 | obj := make([]rancher.RKETaint, len(p)) 45 | 46 | for i := range p { 47 | in := p[i].(map[string]interface{}) 48 | 49 | if v, ok := in["key"].(string); ok && len(v) > 0 { 50 | obj[i].Key = v 51 | } 52 | 53 | if v, ok := in["value"].(string); ok && len(v) > 0 { 54 | obj[i].Value = v 55 | } 56 | 57 | if v, ok := in["effect"].(string); ok && len(v) > 0 { 58 | obj[i].Effect = v1.TaintEffect(v) 59 | } 60 | } 61 | 62 | return obj 63 | } 64 | -------------------------------------------------------------------------------- /examples/aws_ec2/rke.tf: -------------------------------------------------------------------------------- 1 | module "nodes" { 2 | source = "./aws" 3 | # region = "us-east-1" 4 | # instance_type = "t2.large" 5 | # cluster_id = "rke" 6 | } 7 | 8 | resource "rke_cluster" "cluster" { 9 | cloud_provider { 10 | name = "aws" 11 | } 12 | 13 | nodes { 14 | address = module.nodes.addresses[0] 15 | internal_address = module.nodes.internal_ips[0] 16 | user = module.nodes.ssh_username 17 | ssh_key = module.nodes.private_key 18 | role = ["controlplane", "etcd"] 19 | } 20 | nodes { 21 | address = module.nodes.addresses[1] 22 | internal_address = module.nodes.internal_ips[1] 23 | user = module.nodes.ssh_username 24 | ssh_key = module.nodes.private_key 25 | role = ["worker"] 26 | } 27 | nodes { 28 | address = module.nodes.addresses[2] 29 | internal_address = module.nodes.internal_ips[2] 30 | user = module.nodes.ssh_username 31 | ssh_key = module.nodes.private_key 32 | role = ["worker"] 33 | } 34 | nodes { 35 | address = module.nodes.addresses[3] 36 | internal_address = module.nodes.internal_ips[3] 37 | user = module.nodes.ssh_username 38 | ssh_key = module.nodes.private_key 39 | role = ["worker"] 40 | } 41 | } 42 | 43 | resource "local_file" "kube_cluster_yaml" { 44 | filename = "./kube_config_cluster.yml" 45 | content = rke_cluster.cluster.kube_config_yaml 46 | } 47 | 48 | -------------------------------------------------------------------------------- /examples/aws_ec2/aws/iam.tf: -------------------------------------------------------------------------------- 1 | # Step 1: Create an IAM role 2 | resource "aws_iam_role" "rke-role" { 3 | name = "rke-role" 4 | 5 | assume_role_policy = < 0 { 41 | if c.File == nil { 42 | f, errFile := os.OpenFile(c.LogFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) 43 | if errFile != nil { 44 | log.Errorf("Opening logfile %s err:%v", c.LogFile, errFile) 45 | return 46 | } 47 | c.File = f 48 | } 49 | writer = io.MultiWriter(c.LogBuffer, c.File) 50 | } 51 | log.SetOutput(writer) 52 | } 53 | 54 | func (c *Config) saveRKEOutput(err error) diag.Diagnostics { 55 | if c.File != nil { 56 | defer c.File.Close() 57 | defer c.File.Sync() 58 | } 59 | if err != nil { 60 | return diag.FromErr(fmt.Errorf(rkeErrorTemplate, c.LogBuffer.String(), err)) 61 | } 62 | 63 | return nil 64 | } 65 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_private_registries.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterPrivateRegistries(p []rancher.PrivateRegistry) []interface{} { 10 | out := []interface{}{} 11 | 12 | for _, in := range p { 13 | obj := make(map[string]interface{}) 14 | obj["is_default"] = in.IsDefault 15 | 16 | if len(in.Password) > 0 { 17 | obj["password"] = in.Password 18 | } 19 | 20 | if len(in.URL) > 0 { 21 | obj["url"] = in.URL 22 | } 23 | 24 | if len(in.User) > 0 { 25 | obj["user"] = in.User 26 | } 27 | 28 | out = append(out, obj) 29 | } 30 | 31 | return out 32 | } 33 | 34 | // Expanders 35 | 36 | func expandRKEClusterPrivateRegistries(p []interface{}) []rancher.PrivateRegistry { 37 | out := []rancher.PrivateRegistry{} 38 | if len(p) == 0 || p[0] == nil { 39 | return out 40 | } 41 | 42 | for i := range p { 43 | in := p[i].(map[string]interface{}) 44 | obj := rancher.PrivateRegistry{} 45 | 46 | if v, ok := in["is_default"].(bool); ok { 47 | obj.IsDefault = v 48 | } 49 | 50 | if v, ok := in["password"].(string); ok && len(v) > 0 { 51 | obj.Password = v 52 | } 53 | 54 | if v, ok := in["url"].(string); ok && len(v) > 0 { 55 | obj.URL = v 56 | } 57 | 58 | if v, ok := in["user"].(string); ok && len(v) > 0 { 59 | obj.User = v 60 | } 61 | out = append(out, obj) 62 | } 63 | 64 | return out 65 | } 66 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services_kubeproxy.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterServicesKubeproxy(in rancher.KubeproxyService) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | if len(in.ExtraArgs) > 0 { 13 | obj["extra_args"] = toMapInterface(in.ExtraArgs) 14 | } 15 | 16 | if len(in.ExtraBinds) > 0 { 17 | obj["extra_binds"] = toArrayInterface(in.ExtraBinds) 18 | } 19 | 20 | if len(in.ExtraEnv) > 0 { 21 | obj["extra_env"] = toArrayInterface(in.ExtraEnv) 22 | } 23 | 24 | if len(in.Image) > 0 { 25 | obj["image"] = in.Image 26 | } 27 | 28 | return []interface{}{obj} 29 | } 30 | 31 | // Expanders 32 | 33 | func expandRKEClusterServicesKubeproxy(p []interface{}) rancher.KubeproxyService { 34 | obj := rancher.KubeproxyService{} 35 | if len(p) == 0 || p[0] == nil { 36 | return obj 37 | } 38 | in := p[0].(map[string]interface{}) 39 | 40 | if v, ok := in["extra_args"].(map[string]interface{}); ok && len(v) > 0 { 41 | obj.ExtraArgs = toMapString(v) 42 | } 43 | 44 | if v, ok := in["extra_binds"].([]interface{}); ok && len(v) > 0 { 45 | obj.ExtraBinds = toArrayString(v) 46 | } 47 | 48 | if v, ok := in["extra_env"].([]interface{}); ok && len(v) > 0 { 49 | obj.ExtraEnv = toArrayString(v) 50 | } 51 | 52 | if v, ok := in["image"].(string); ok && len(v) > 0 { 53 | obj.Image = v 54 | } 55 | 56 | return obj 57 | } 58 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services_scheduler.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterServicesScheduler(in rancher.SchedulerService) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | if len(in.ExtraArgs) > 0 { 13 | obj["extra_args"] = toMapInterface(in.ExtraArgs) 14 | } 15 | 16 | if len(in.ExtraBinds) > 0 { 17 | obj["extra_binds"] = toArrayInterface(in.ExtraBinds) 18 | } 19 | 20 | if len(in.ExtraEnv) > 0 { 21 | obj["extra_env"] = toArrayInterface(in.ExtraEnv) 22 | } 23 | 24 | if len(in.Image) > 0 { 25 | obj["image"] = in.Image 26 | } 27 | 28 | return []interface{}{obj} 29 | } 30 | 31 | // Expanders 32 | 33 | func expandRKEClusterServicesScheduler(p []interface{}) rancher.SchedulerService { 34 | obj := rancher.SchedulerService{} 35 | if len(p) == 0 || p[0] == nil { 36 | return obj 37 | } 38 | in := p[0].(map[string]interface{}) 39 | 40 | if v, ok := in["extra_args"].(map[string]interface{}); ok && len(v) > 0 { 41 | obj.ExtraArgs = toMapString(v) 42 | } 43 | 44 | if v, ok := in["extra_binds"].([]interface{}); ok && len(v) > 0 { 45 | obj.ExtraBinds = toArrayString(v) 46 | } 47 | 48 | if v, ok := in["extra_env"].([]interface{}); ok && len(v) > 0 { 49 | obj.ExtraEnv = toArrayString(v) 50 | } 51 | 52 | if v, ok := in["image"].(string); ok && len(v) > 0 { 53 | obj.Image = v 54 | } 55 | 56 | return obj 57 | } 58 | -------------------------------------------------------------------------------- /Dockerfile.dapper: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | # FROM arm=armhf/ubuntu:16.04 3 | 4 | ARG DAPPER_HOST_ARCH 5 | ENV HOST_ARCH=${DAPPER_HOST_ARCH} ARCH=${DAPPER_HOST_ARCH} 6 | 7 | RUN apt-get update && \ 8 | apt-get install -y gcc make ca-certificates git wget curl vim less file kmod iptables xz-utils zip && \ 9 | rm -f /bin/sh && ln -s /bin/bash /bin/sh 10 | 11 | ENV GOLANG_ARCH_amd64=amd64 GOLANG_ARCH_arm=armv6l GOLANG_ARCH_arm64=arm64 GOLANG_ARCH=GOLANG_ARCH_${ARCH} \ 12 | GOPATH=/go PATH=/go/bin:/usr/local/go/bin:${PATH} SHELL=/bin/bash 13 | 14 | RUN wget -O - https://storage.googleapis.com/golang/go1.19.7.linux-${!GOLANG_ARCH}.tar.gz | tar -xzf - -C /usr/local 15 | 16 | ENV DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-1.10.3 \ 17 | DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm \ 18 | DOCKER_URL_arm64=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm64 \ 19 | DOCKER_URL=DOCKER_URL_${ARCH} 20 | 21 | RUN wget -O - ${!DOCKER_URL} > /usr/bin/docker && chmod +x /usr/bin/docker 22 | 23 | ENV DAPPER_SOURCE /go/src/github.com/rancher/terraform-provider-rke/ 24 | ENV DAPPER_RUN_ARGS --privileged -v /var/lib/docker 25 | ENV DAPPER_OUTPUT ./bin ./dist ./build/bin 26 | ENV DAPPER_DOCKER_SOCKET true 27 | ENV DAPPER_ENV TAG REPO GOOS CROSS DRONE_TAG 28 | ENV TRASH_CACHE ${DAPPER_SOURCE}/.trash-cache 29 | ENV HOME ${DAPPER_SOURCE} 30 | WORKDIR ${DAPPER_SOURCE} 31 | 32 | ENTRYPOINT ["./scripts/entry"] 33 | CMD ["ci"] 34 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_certificates.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterCertificatesFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "id": { 12 | Type: schema.TypeString, 13 | Computed: true, 14 | }, 15 | "certificate": { 16 | Type: schema.TypeString, 17 | Computed: true, 18 | Sensitive: true, 19 | }, 20 | "key": { 21 | Type: schema.TypeString, 22 | Sensitive: true, 23 | Computed: true, 24 | }, 25 | "config": { 26 | Type: schema.TypeString, 27 | Computed: true, 28 | Sensitive: true, 29 | }, 30 | "name": { 31 | Type: schema.TypeString, 32 | Computed: true, 33 | }, 34 | "common_name": { 35 | Type: schema.TypeString, 36 | Computed: true, 37 | }, 38 | "ou_name": { 39 | Type: schema.TypeString, 40 | Computed: true, 41 | }, 42 | "env_name": { 43 | Type: schema.TypeString, 44 | Computed: true, 45 | }, 46 | "path": { 47 | Type: schema.TypeString, 48 | Computed: true, 49 | }, 50 | "key_env_name": { 51 | Type: schema.TypeString, 52 | Computed: true, 53 | }, 54 | "key_path": { 55 | Type: schema.TypeString, 56 | Computed: true, 57 | }, 58 | "config_env_name": { 59 | Type: schema.TypeString, 60 | Computed: true, 61 | }, 62 | "config_path": { 63 | Type: schema.TypeString, 64 | Computed: true, 65 | }, 66 | } 67 | return s 68 | } 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Include override files you do wish to add to version control using negated pattern 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | # Ignore CLI configuration files 33 | .terraformrc 34 | terraform.rc 35 | 36 | # Ignore terraform lock file 37 | .terraform.lock.hcl 38 | 39 | # Binaries for programs and plugins 40 | *.exe 41 | *.exe~ 42 | *.dll 43 | *.so 44 | *.dylib 45 | 46 | # Test binary, built with `go test -c` 47 | *.test 48 | 49 | # Output of the go coverage tool, specifically when used with LiteIDE 50 | *.out 51 | 52 | # Dependency directories (remove the comment below to include it) 53 | # vendor/ 54 | 55 | # Go workspace file 56 | go.work 57 | 58 | # Artifacts 59 | bin/ 60 | 61 | # Developer resources 62 | .idea 63 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_certificates.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "sort" 5 | 6 | "github.com/rancher/rke/pki" 7 | ) 8 | 9 | const ( 10 | rkeClusterCertificatesKubeAdminCertName = pki.KubeAdminCertName 11 | ) 12 | 13 | // Flatteners 14 | 15 | func flattenRKEClusterCertificates(in map[string]pki.CertificatePKI) (string, string, string, []interface{}) { 16 | var caCrt, clientCrt, clientKey string 17 | outLen := len(in) 18 | if in == nil || outLen == 0 { 19 | return caCrt, clientCrt, clientKey, []interface{}{} 20 | } 21 | sortedKeys := make([]string, 0, outLen) 22 | for k := range in { 23 | sortedKeys = append(sortedKeys, k) 24 | } 25 | sort.Strings(sortedKeys) 26 | out := make([]interface{}, outLen) 27 | for i, k := range sortedKeys { 28 | v := in[k] 29 | 30 | if k == pki.CACertName { 31 | caCrt = v.CertificatePEM 32 | } 33 | 34 | if k == pki.KubeAdminCertName { 35 | clientCrt = v.CertificatePEM 36 | clientKey = v.KeyPEM 37 | } 38 | 39 | obj := map[string]interface{}{ 40 | "id": k, 41 | "certificate": v.CertificatePEM, 42 | "key": v.KeyPEM, 43 | "config": v.Config, 44 | "name": v.Name, 45 | "common_name": v.CommonName, 46 | "ou_name": v.OUName, 47 | "env_name": v.EnvName, 48 | "path": v.Path, 49 | "key_env_name": v.KeyEnvName, 50 | "key_path": v.KeyPath, 51 | "config_env_name": v.ConfigEnvName, 52 | "config_path": v.ConfigPath, 53 | } 54 | out[i] = obj 55 | } 56 | return caCrt, clientCrt, clientKey, out 57 | } 58 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_services_kube_controller.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterServicesKubeControllerFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "cluster_cidr": { 12 | Type: schema.TypeString, 13 | Optional: true, 14 | Computed: true, 15 | Description: "CIDR Range for Pods in cluster", 16 | }, 17 | "extra_args": { 18 | Type: schema.TypeMap, 19 | Optional: true, 20 | Computed: true, 21 | Description: "Extra arguments that are added to the kube-controller service", 22 | }, 23 | "extra_binds": { 24 | Type: schema.TypeList, 25 | Optional: true, 26 | Computed: true, 27 | Description: "Extra binds added to the controlplane nodes", 28 | Elem: &schema.Schema{ 29 | Type: schema.TypeString, 30 | }, 31 | }, 32 | "extra_env": { 33 | Type: schema.TypeList, 34 | Optional: true, 35 | Computed: true, 36 | Description: "Extra env added to the controlplane nodes", 37 | Elem: &schema.Schema{ 38 | Type: schema.TypeString, 39 | }, 40 | }, 41 | "image": { 42 | Type: schema.TypeString, 43 | Optional: true, 44 | Computed: true, 45 | Description: "Docker image of the kube-controller service", 46 | }, 47 | "service_cluster_ip_range": { 48 | Type: schema.TypeString, 49 | Optional: true, 50 | Computed: true, 51 | Description: "Virtual IP range that will be used by Kubernetes services", 52 | }, 53 | } 54 | return s 55 | } 56 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_services.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterServicesFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "etcd": { 12 | Type: schema.TypeList, 13 | MaxItems: 1, 14 | Optional: true, 15 | Computed: true, 16 | Elem: &schema.Resource{ 17 | Schema: rkeClusterServicesEtcdFields(), 18 | }, 19 | }, 20 | "kube_api": { 21 | Type: schema.TypeList, 22 | MaxItems: 1, 23 | Optional: true, 24 | Computed: true, 25 | Elem: &schema.Resource{ 26 | Schema: rkeClusterServicesKubeAPIFields(), 27 | }, 28 | }, 29 | "kube_controller": { 30 | Type: schema.TypeList, 31 | MaxItems: 1, 32 | Optional: true, 33 | Computed: true, 34 | Elem: &schema.Resource{ 35 | Schema: rkeClusterServicesKubeControllerFields(), 36 | }, 37 | }, 38 | "kubelet": { 39 | Type: schema.TypeList, 40 | MaxItems: 1, 41 | Optional: true, 42 | Computed: true, 43 | Elem: &schema.Resource{ 44 | Schema: rkeClusterServicesKubeletFields(), 45 | }, 46 | }, 47 | "kubeproxy": { 48 | Type: schema.TypeList, 49 | MaxItems: 1, 50 | Optional: true, 51 | Computed: true, 52 | Elem: &schema.Resource{ 53 | Schema: rkeClusterServicesKubeproxyFields(), 54 | }, 55 | }, 56 | "scheduler": { 57 | Type: schema.TypeList, 58 | MaxItems: 1, 59 | Optional: true, 60 | Computed: true, 61 | Elem: &schema.Resource{ 62 | Schema: rkeClusterServicesSchedulerFields(), 63 | }, 64 | }, 65 | } 66 | return s 67 | } 68 | -------------------------------------------------------------------------------- /examples/aws_ec2/aws/nodes.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | cluster_id_tag = { 3 | "kubernetes.io/cluster/${var.cluster_id}" = "owned" 4 | } 5 | } 6 | 7 | data "aws_availability_zones" "az" { 8 | } 9 | 10 | resource "aws_default_subnet" "default" { 11 | availability_zone = data.aws_availability_zones.az.names[count.index] 12 | tags = local.cluster_id_tag 13 | count = length(data.aws_availability_zones.az.names) 14 | } 15 | 16 | resource "aws_security_group" "allow-all" { 17 | name = "rke-default-security-group" 18 | description = "rke" 19 | 20 | ingress { 21 | from_port = 0 22 | to_port = 0 23 | protocol = "-1" 24 | cidr_blocks = ["0.0.0.0/0"] 25 | } 26 | 27 | egress { 28 | from_port = 0 29 | to_port = 0 30 | protocol = "-1" 31 | cidr_blocks = ["0.0.0.0/0"] 32 | } 33 | 34 | tags = local.cluster_id_tag 35 | } 36 | 37 | resource "aws_instance" "rke-node" { 38 | count = 4 39 | 40 | ami = data.aws_ami.ubuntu.id 41 | instance_type = var.instance_type 42 | key_name = aws_key_pair.rke-node-key.id 43 | iam_instance_profile = aws_iam_instance_profile.rke-aws.name 44 | vpc_security_group_ids = [aws_security_group.allow-all.id] 45 | tags = local.cluster_id_tag 46 | 47 | provisioner "remote-exec" { 48 | connection { 49 | host = coalesce(self.public_ip, self.private_ip) 50 | type = "ssh" 51 | user = "ubuntu" 52 | private_key = tls_private_key.node-key.private_key_pem 53 | } 54 | 55 | inline = [ 56 | "curl ${var.docker_install_url} | sh", 57 | "sudo usermod -a -G docker ubuntu", 58 | ] 59 | } 60 | } 61 | 62 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_restore_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterRestoreConf rancher.RestoreConfig 12 | testRKEClusterRestoreInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterRestoreConf = rancher.RestoreConfig{ 17 | Restore: true, 18 | SnapshotName: "snapshot_name", 19 | } 20 | testRKEClusterRestoreInterface = []interface{}{ 21 | map[string]interface{}{ 22 | "restore": true, 23 | "snapshot_name": "snapshot_name", 24 | }, 25 | } 26 | } 27 | 28 | func TestFlattenRKEClusterRestore(t *testing.T) { 29 | 30 | cases := []struct { 31 | Input rancher.RestoreConfig 32 | ExpectedOutput []interface{} 33 | }{ 34 | { 35 | testRKEClusterRestoreConf, 36 | testRKEClusterRestoreInterface, 37 | }, 38 | } 39 | 40 | for _, tc := range cases { 41 | output := flattenRKEClusterRestore(tc.Input) 42 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 43 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 44 | tc.ExpectedOutput, output) 45 | } 46 | } 47 | } 48 | 49 | func TestExpandRKEClusterRestore(t *testing.T) { 50 | 51 | cases := []struct { 52 | Input []interface{} 53 | ExpectedOutput rancher.RestoreConfig 54 | }{ 55 | { 56 | testRKEClusterRestoreInterface, 57 | testRKEClusterRestoreConf, 58 | }, 59 | } 60 | 61 | for _, tc := range cases { 62 | output := expandRKEClusterRestore(tc.Input) 63 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 64 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 65 | tc.ExpectedOutput, output) 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_taint_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterTaintsConf []rancher.RKETaint 12 | testRKEClusterTaintsInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterTaintsConf = []rancher.RKETaint{ 17 | { 18 | Key: "key", 19 | Value: "value", 20 | Effect: "recipient", 21 | }, 22 | } 23 | testRKEClusterTaintsInterface = []interface{}{ 24 | map[string]interface{}{ 25 | "key": "key", 26 | "value": "value", 27 | "effect": "recipient", 28 | }, 29 | } 30 | } 31 | 32 | func TestFlattenRKEClusterTaints(t *testing.T) { 33 | 34 | cases := []struct { 35 | Input []rancher.RKETaint 36 | ExpectedOutput []interface{} 37 | }{ 38 | { 39 | testRKEClusterTaintsConf, 40 | testRKEClusterTaintsInterface, 41 | }, 42 | } 43 | 44 | for _, tc := range cases { 45 | output := flattenRKEClusterTaints(tc.Input) 46 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 47 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 48 | tc.ExpectedOutput, output) 49 | } 50 | } 51 | } 52 | 53 | func TestExpandRKEClusterTaints(t *testing.T) { 54 | 55 | cases := []struct { 56 | Input []interface{} 57 | ExpectedOutput []rancher.RKETaint 58 | }{ 59 | { 60 | testRKEClusterTaintsInterface, 61 | testRKEClusterTaintsConf, 62 | }, 63 | } 64 | 65 | for _, tc := range cases { 66 | output := expandRKEClusterTaints(tc.Input) 67 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 68 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 69 | tc.ExpectedOutput, output) 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_bastion_host.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterBastionHostFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "address": { 12 | Type: schema.TypeString, 13 | Required: true, 14 | Description: "Address of Bastion Host", 15 | }, 16 | "user": { 17 | Type: schema.TypeString, 18 | Required: true, 19 | Description: "SSH User to Bastion Host", 20 | }, 21 | "ignore_proxy_env_vars": { 22 | Type: schema.TypeBool, 23 | Optional: true, 24 | Default: false, 25 | Description: "Ignore proxy env vars at Bastion Host?", 26 | }, 27 | "port": { 28 | Type: schema.TypeString, 29 | Optional: true, 30 | Default: "22", 31 | Description: "SSH Port of Bastion Host", 32 | }, 33 | "ssh_agent_auth": { 34 | Type: schema.TypeBool, 35 | Optional: true, 36 | Computed: true, 37 | Description: "SSH Agent Auth enable", 38 | }, 39 | "ssh_cert": { 40 | Type: schema.TypeString, 41 | Sensitive: true, 42 | Optional: true, 43 | Description: "SSH Certificate Key", 44 | }, 45 | "ssh_cert_path": { 46 | Type: schema.TypeString, 47 | Optional: true, 48 | Computed: true, 49 | Description: "SSH Certificate Key Path", 50 | }, 51 | "ssh_key": { 52 | Type: schema.TypeString, 53 | Optional: true, 54 | Sensitive: true, 55 | Description: "SSH Private Key", 56 | }, 57 | "ssh_key_path": { 58 | Type: schema.TypeString, 59 | Optional: true, 60 | Computed: true, 61 | Description: "SSH Private Key Path", 62 | }, 63 | } 64 | return s 65 | } 66 | -------------------------------------------------------------------------------- /rke/import_rke_cluster.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "os" 8 | 9 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 10 | ) 11 | 12 | func resourceRKEClusterImport(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { 13 | files, err := splitImportID(d.Id()) 14 | if err != nil { 15 | return []*schema.ResourceData{}, err 16 | } 17 | clusterYamlBytes, err := os.ReadFile(files[0]) 18 | if err != nil { 19 | return []*schema.ResourceData{}, fmt.Errorf("Reading RKE config file %s: %v", files[0], err) 20 | } 21 | if len(clusterYamlBytes) == 0 { 22 | return []*schema.ResourceData{}, fmt.Errorf("RKE config is nil") 23 | } 24 | _, err = yamlToMapInterface(string(clusterYamlBytes)) 25 | if err != nil { 26 | return []*schema.ResourceData{}, fmt.Errorf("unmarshalling RKE config yaml: %v", err) 27 | } 28 | 29 | clusterStateBytes, err := os.ReadFile(files[1]) 30 | if err != nil { 31 | return []*schema.ResourceData{}, fmt.Errorf("Reading RKE state file %s: %v", files[0], err) 32 | } 33 | if len(clusterStateBytes) == 0 { 34 | return []*schema.ResourceData{}, fmt.Errorf("RKE state is nil") 35 | } 36 | _, err = yamlToMapInterface(string(clusterStateBytes)) 37 | if err != nil { 38 | return []*schema.ResourceData{}, fmt.Errorf("unmarshalling RKE state yaml: %v", err) 39 | } 40 | 41 | if len(files) == 3 && files[2] == "dind" { 42 | d.Set("dind", true) 43 | } 44 | 45 | d.Set("cluster_yaml", string(clusterYamlBytes)) 46 | d.Set("rke_state", string(clusterStateBytes)) 47 | d.SetId("") 48 | diag := resourceRKEClusterCreate(ctx, d, meta) 49 | if diag.HasError() { 50 | return []*schema.ResourceData{}, errors.New(diag[0].Summary) 51 | } 52 | 53 | return []*schema.ResourceData{d}, nil 54 | } 55 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v[0-9]+.[0-9]+.[0-9]+' 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | contents: write 13 | id-token: write 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: build binaries 17 | env: 18 | CROSS: 1 19 | VERSION: ${{ github.ref_name }} 20 | run: | 21 | make build 22 | 23 | - name: package 24 | run: | 25 | make package 26 | 27 | - name: retrieve GPG Credentials 28 | uses: rancher-eio/read-vault-secrets@main 29 | with: 30 | secrets: | 31 | secret/data/github/repo/${{ github.repository }}/signing/gpg privateKey | GPG_KEY; 32 | secret/data/github/repo/${{ github.repository }}/signing/gpg passphrase | GPG_PASSPHRASE 33 | 34 | - name: sign shasum 35 | env: 36 | GPG_KEY: ${{ env.GPG_KEY }} 37 | GPG_PASSPHRASE: ${{ env.GPG_PASSPHRASE }} 38 | run: | 39 | echo "Importing gpg key" 40 | echo -n '${{ env.GPG_KEY }}' | gpg --import --batch > /dev/null 41 | echo "signing SHASUM file" 42 | VERSION_NO_V="$(echo ${{ github.ref_name }} | tr -d 'v')" 43 | SHASUM_FILE="dist/artifacts/${{ github.ref_name }}/terraform-provider-rke_${VERSION_NO_V}_SHA256SUMS" 44 | echo '${{ env.GPG_PASSPHRASE }}' | gpg --detach-sig --pinentry-mode loopback --passphrase-fd 0 --output "${SHASUM_FILE}.sig" --sign "${SHASUM_FILE}" 45 | 46 | - name: GH release 47 | env: 48 | GH_TOKEN: ${{ github.token }} 49 | run: | 50 | gh release create ${{ github.ref_name }} --verify-tag --generate-notes ./dist/artifacts/${{ github.ref_name }}/* 51 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_rotate_certificates.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 6 | ) 7 | 8 | const ( 9 | rkeClusterRotateCertificatesServicesEtcd = "etcd" 10 | rkeClusterRotateCertificatesServicesKubelet = "kubelet" 11 | rkeClusterRotateCertificatesServicesKubeAPI = "kube-apiserver" 12 | rkeClusterRotateCertificatesServicesKubeProxy = "kube-proxy" 13 | rkeClusterRotateCertificatesServicesKubeScheduler = "kube-scheduler" 14 | rkeClusterRotateCertificatesServicesKubeController = "kube-controller-manager" 15 | ) 16 | 17 | var ( 18 | rkeClusterRotateCertificatesServicesList = []string{ 19 | rkeClusterRotateCertificatesServicesEtcd, 20 | rkeClusterRotateCertificatesServicesKubelet, 21 | rkeClusterRotateCertificatesServicesKubeAPI, 22 | rkeClusterRotateCertificatesServicesKubeProxy, 23 | rkeClusterRotateCertificatesServicesKubeScheduler, 24 | rkeClusterRotateCertificatesServicesKubeController, 25 | } 26 | ) 27 | 28 | //Schemas 29 | 30 | func rkeClusterRotateCertificatesFields() map[string]*schema.Schema { 31 | s := map[string]*schema.Schema{ 32 | "ca_certificates": { 33 | Type: schema.TypeBool, 34 | Optional: true, 35 | Default: false, 36 | Description: "Rotate CA Certificates", 37 | }, 38 | "services": { 39 | Type: schema.TypeList, 40 | Optional: true, 41 | Description: "Services to rotate their certs. valid values are etcd|kubelet|kube-apiserver|kube-proxy|kube-scheduler|kube-controller-manager", 42 | Elem: &schema.Schema{ 43 | Type: schema.TypeString, 44 | ValidateFunc: validation.StringInSlice(rkeClusterRotateCertificatesServicesList, true), 45 | }, 46 | }, 47 | } 48 | return s 49 | } 50 | -------------------------------------------------------------------------------- /.github/workflows/pre-release.yaml: -------------------------------------------------------------------------------- 1 | name: Prerelease 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+' 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | contents: write 13 | id-token: write 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: build binaries 17 | env: 18 | CROSS: 1 19 | VERSION: ${{ github.ref_name }} 20 | run: | 21 | make build 22 | 23 | - name: package 24 | run: | 25 | make package 26 | 27 | - name: retrieve GPG Credentials 28 | uses: rancher-eio/read-vault-secrets@main 29 | with: 30 | secrets: | 31 | secret/data/github/repo/${{ github.repository }}/signing/gpg privateKey | GPG_KEY; 32 | secret/data/github/repo/${{ github.repository }}/signing/gpg passphrase | GPG_PASSPHRASE 33 | 34 | - name: sign shasum 35 | env: 36 | GPG_KEY: ${{ env.GPG_KEY }} 37 | GPG_PASSPHRASE: ${{ env.GPG_PASSPHRASE }} 38 | run: | 39 | echo "Importing gpg key" 40 | echo -n '${{ env.GPG_KEY }}' | gpg --import --batch > /dev/null 41 | echo "signing SHASUM file" 42 | VERSION_NO_V="$(echo ${{ github.ref_name }} | tr -d 'v')" 43 | SHASUM_FILE="dist/artifacts/${{ github.ref_name }}/terraform-provider-rke_${VERSION_NO_V}_SHA256SUMS" 44 | echo '${{ env.GPG_PASSPHRASE }}' | gpg --detach-sig --pinentry-mode loopback --passphrase-fd 0 --output "${SHASUM_FILE}.sig" --sign "${SHASUM_FILE}" 45 | 46 | - name: GH release 47 | env: 48 | GH_TOKEN: ${{ github.token }} 49 | run: | 50 | gh release create ${{ github.ref_name }} --prerelease --verify-tag --generate-notes ./dist/artifacts/${{ github.ref_name }}/* 51 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_authentication_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterAuthenticationConf rancher.AuthnConfig 12 | testRKEClusterAuthenticationInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterAuthenticationConf = rancher.AuthnConfig{ 17 | SANs: []string{"sans1", "sans2"}, 18 | Strategy: "strategy", 19 | } 20 | testRKEClusterAuthenticationInterface = []interface{}{ 21 | map[string]interface{}{ 22 | "sans": []interface{}{"sans1", "sans2"}, 23 | "strategy": "strategy", 24 | }, 25 | } 26 | } 27 | 28 | func TestFlattenRKEClusterAuthentication(t *testing.T) { 29 | 30 | cases := []struct { 31 | Input rancher.AuthnConfig 32 | ExpectedOutput []interface{} 33 | }{ 34 | { 35 | testRKEClusterAuthenticationConf, 36 | testRKEClusterAuthenticationInterface, 37 | }, 38 | } 39 | 40 | for _, tc := range cases { 41 | output := flattenRKEClusterAuthentication(tc.Input) 42 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 43 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 44 | tc.ExpectedOutput, output) 45 | } 46 | } 47 | } 48 | 49 | func TestExpandRKEClusterAuthentication(t *testing.T) { 50 | 51 | cases := []struct { 52 | Input []interface{} 53 | ExpectedOutput rancher.AuthnConfig 54 | }{ 55 | { 56 | testRKEClusterAuthenticationInterface, 57 | testRKEClusterAuthenticationConf, 58 | }, 59 | } 60 | 61 | for _, tc := range cases { 62 | output := expandRKEClusterAuthentication(tc.Input) 63 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 64 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 65 | tc.ExpectedOutput, output) 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import "testing" 4 | 5 | func Test_k8sVersionRequiresCri(t *testing.T) { 6 | type args struct { 7 | kubernetesVersion string 8 | } 9 | tests := []struct { 10 | name string 11 | args args 12 | want bool 13 | }{ 14 | { 15 | name: "v1.26.9-rancher1-1", 16 | args: args{ 17 | kubernetesVersion: "v1.26.9-rancher1-1", 18 | }, 19 | want: true, 20 | }, 21 | { 22 | name: "v1.26.4-rancher2-1", 23 | args: args{ 24 | kubernetesVersion: "v1.26.4-rancher2-1", 25 | }, 26 | want: true, 27 | }, 28 | { 29 | name: "v1.25.9-rancher2-2", 30 | args: args{ 31 | kubernetesVersion: "v1.25.9-rancher2-2", 32 | }, 33 | want: true, 34 | }, 35 | { 36 | name: "v1.24.13-rancher2-2", 37 | args: args{ 38 | kubernetesVersion: "v1.24.13-rancher2-2", 39 | }, 40 | want: true, 41 | }, 42 | { 43 | name: "v1.23.16-rancher2-3", 44 | args: args{ 45 | kubernetesVersion: "v1.23.16-rancher2-3", 46 | }, 47 | want: false, 48 | }, 49 | { 50 | name: "v1.22.17-rancher1-2", 51 | args: args{ 52 | kubernetesVersion: "v1.22.17-rancher1-2", 53 | }, 54 | want: false, 55 | }, 56 | { 57 | name: "v1.21.14-rancher1-1", 58 | args: args{ 59 | kubernetesVersion: "v1.21.14-rancher1-1", 60 | }, 61 | want: false, 62 | }, 63 | { 64 | name: "v1.20.15-rancher2-2", 65 | args: args{ 66 | kubernetesVersion: "v1.20.15-rancher2-2", 67 | }, 68 | want: false, 69 | }, 70 | { 71 | name: "invalid", 72 | args: args{ 73 | kubernetesVersion: "invalid", 74 | }, 75 | want: false, 76 | }, 77 | } 78 | for _, tt := range tests { 79 | t.Run(tt.name, func(t *testing.T) { 80 | if got := k8sVersionRequiresCri(tt.args.kubernetesVersion); got != tt.want { 81 | t.Errorf("k8sVersionRequiresCri() = %v, want %v", got, tt.want) 82 | } 83 | }) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_monitoring_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterMonitoringConf rancher.MonitoringConfig 12 | testRKEClusterMonitoringInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterMonitoringConf = rancher.MonitoringConfig{ 17 | Options: map[string]string{ 18 | "option1": "value1", 19 | "option2": "value2", 20 | }, 21 | Provider: "test", 22 | } 23 | testRKEClusterMonitoringInterface = []interface{}{ 24 | map[string]interface{}{ 25 | "options": map[string]interface{}{ 26 | "option1": "value1", 27 | "option2": "value2", 28 | }, 29 | "provider": "test", 30 | }, 31 | } 32 | } 33 | 34 | func TestFlattenRKEClusterMonitoring(t *testing.T) { 35 | 36 | cases := []struct { 37 | Input rancher.MonitoringConfig 38 | ExpectedOutput []interface{} 39 | }{ 40 | { 41 | testRKEClusterMonitoringConf, 42 | testRKEClusterMonitoringInterface, 43 | }, 44 | } 45 | 46 | for _, tc := range cases { 47 | output := flattenRKEClusterMonitoring(tc.Input) 48 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 49 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 50 | tc.ExpectedOutput, output) 51 | } 52 | } 53 | } 54 | 55 | func TestExpandRKEClusterMonitoring(t *testing.T) { 56 | 57 | cases := []struct { 58 | Input []interface{} 59 | ExpectedOutput rancher.MonitoringConfig 60 | }{ 61 | { 62 | testRKEClusterMonitoringInterface, 63 | testRKEClusterMonitoringConf, 64 | }, 65 | } 66 | 67 | for _, tc := range cases { 68 | output := expandRKEClusterMonitoring(tc.Input) 69 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 70 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 71 | tc.ExpectedOutput, output) 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_authorization_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterAuthorizationConf rancher.AuthzConfig 12 | testRKEClusterAuthorizationInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterAuthorizationConf = rancher.AuthzConfig{ 17 | Mode: "rbac", 18 | Options: map[string]string{ 19 | "option1": "value1", 20 | "option2": "value2", 21 | }, 22 | } 23 | testRKEClusterAuthorizationInterface = []interface{}{ 24 | map[string]interface{}{ 25 | "mode": "rbac", 26 | "options": map[string]interface{}{ 27 | "option1": "value1", 28 | "option2": "value2", 29 | }, 30 | }, 31 | } 32 | } 33 | 34 | func TestFlattenRKEClusterAuthorization(t *testing.T) { 35 | 36 | cases := []struct { 37 | Input rancher.AuthzConfig 38 | ExpectedOutput []interface{} 39 | }{ 40 | { 41 | testRKEClusterAuthorizationConf, 42 | testRKEClusterAuthorizationInterface, 43 | }, 44 | } 45 | 46 | for _, tc := range cases { 47 | output := flattenRKEClusterAuthorization(tc.Input) 48 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 49 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 50 | tc.ExpectedOutput, output) 51 | } 52 | } 53 | } 54 | 55 | func TestExpandRKEClusterAuthorization(t *testing.T) { 56 | 57 | cases := []struct { 58 | Input []interface{} 59 | ExpectedOutput rancher.AuthzConfig 60 | }{ 61 | { 62 | testRKEClusterAuthorizationInterface, 63 | testRKEClusterAuthorizationConf, 64 | }, 65 | } 66 | 67 | for _, tc := range cases { 68 | output := expandRKEClusterAuthorization(tc.Input) 69 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 70 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 71 | tc.ExpectedOutput, output) 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services_kube_controller.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterServicesKubeController(in rancher.KubeControllerService) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | if len(in.ClusterCIDR) > 0 { 13 | obj["cluster_cidr"] = in.ClusterCIDR 14 | } 15 | 16 | if len(in.ExtraArgs) > 0 { 17 | obj["extra_args"] = toMapInterface(in.ExtraArgs) 18 | } 19 | 20 | if len(in.ExtraBinds) > 0 { 21 | obj["extra_binds"] = toArrayInterface(in.ExtraBinds) 22 | } 23 | 24 | if len(in.ExtraEnv) > 0 { 25 | obj["extra_env"] = toArrayInterface(in.ExtraEnv) 26 | } 27 | 28 | if len(in.Image) > 0 { 29 | obj["image"] = in.Image 30 | } 31 | 32 | if len(in.ServiceClusterIPRange) > 0 { 33 | obj["service_cluster_ip_range"] = in.ServiceClusterIPRange 34 | } 35 | 36 | return []interface{}{obj} 37 | } 38 | 39 | // Expanders 40 | 41 | func expandRKEClusterServicesKubeController(p []interface{}) rancher.KubeControllerService { 42 | obj := rancher.KubeControllerService{} 43 | if len(p) == 0 || p[0] == nil { 44 | return obj 45 | } 46 | in := p[0].(map[string]interface{}) 47 | 48 | if v, ok := in["cluster_cidr"].(string); ok && len(v) > 0 { 49 | obj.ClusterCIDR = v 50 | } 51 | 52 | if v, ok := in["extra_args"].(map[string]interface{}); ok && len(v) > 0 { 53 | obj.ExtraArgs = toMapString(v) 54 | } 55 | 56 | if v, ok := in["extra_binds"].([]interface{}); ok && len(v) > 0 { 57 | obj.ExtraBinds = toArrayString(v) 58 | } 59 | 60 | if v, ok := in["extra_env"].([]interface{}); ok && len(v) > 0 { 61 | obj.ExtraEnv = toArrayString(v) 62 | } 63 | 64 | if v, ok := in["image"].(string); ok && len(v) > 0 { 65 | obj.Image = v 66 | } 67 | 68 | if v, ok := in["service_cluster_ip_range"].(string); ok && len(v) > 0 { 69 | obj.ServiceClusterIPRange = v 70 | } 71 | 72 | return obj 73 | } 74 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_rotate_certificates_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterRotateCertificatesConf *rancher.RotateCertificates 12 | testRKEClusterRotateCertificatesInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterRotateCertificatesConf = &rancher.RotateCertificates{ 17 | CACertificates: true, 18 | Services: []string{"serv1", "serv2"}, 19 | } 20 | testRKEClusterRotateCertificatesInterface = []interface{}{ 21 | map[string]interface{}{ 22 | "ca_certificates": true, 23 | "services": []interface{}{"serv1", "serv2"}, 24 | }, 25 | } 26 | } 27 | 28 | func TestFlattenRKEClusterRotateCertificates(t *testing.T) { 29 | 30 | cases := []struct { 31 | Input *rancher.RotateCertificates 32 | ExpectedOutput []interface{} 33 | }{ 34 | { 35 | testRKEClusterRotateCertificatesConf, 36 | testRKEClusterRotateCertificatesInterface, 37 | }, 38 | } 39 | 40 | for _, tc := range cases { 41 | output := flattenRKEClusterRotateCertificates(tc.Input) 42 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 43 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 44 | tc.ExpectedOutput, output) 45 | } 46 | } 47 | } 48 | 49 | func TestExpandRKEClusterRotateCertificates(t *testing.T) { 50 | 51 | cases := []struct { 52 | Input []interface{} 53 | ExpectedOutput *rancher.RotateCertificates 54 | }{ 55 | { 56 | testRKEClusterRotateCertificatesInterface, 57 | testRKEClusterRotateCertificatesConf, 58 | }, 59 | } 60 | 61 | for _, tc := range cases { 62 | output := expandRKEClusterRotateCertificates(tc.Input) 63 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 64 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 65 | tc.ExpectedOutput, output) 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_authentication.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 6 | ) 7 | 8 | const ( 9 | rkeClusterAuthenticationStrategyX509 = "x509" 10 | ) 11 | 12 | var ( 13 | rkeClusterAuthenticationStrategyList = []string{rkeClusterAuthenticationStrategyX509} 14 | ) 15 | 16 | //Schemas 17 | 18 | func rkeClusterAuthenticationWebhookFields() map[string]*schema.Schema { 19 | s := map[string]*schema.Schema{ 20 | "config_file": { 21 | Type: schema.TypeString, 22 | Optional: true, 23 | Description: "Multiline string that represent a custom webhook config file", 24 | }, 25 | "cache_timeout": { 26 | Type: schema.TypeString, 27 | Optional: true, 28 | Description: "Controls how long to cache authentication decisions", 29 | }, 30 | } 31 | 32 | return s 33 | } 34 | func rkeClusterAuthenticationFields() map[string]*schema.Schema { 35 | s := map[string]*schema.Schema{ 36 | "sans": { 37 | Type: schema.TypeList, 38 | Optional: true, 39 | Computed: true, 40 | Description: "List of additional hostnames and IPs to include in the api server PKI cert", 41 | Elem: &schema.Schema{ 42 | Type: schema.TypeString, 43 | }, 44 | }, 45 | "strategy": { 46 | Type: schema.TypeString, 47 | Optional: true, 48 | Default: rkeClusterAuthenticationStrategyX509, 49 | Description: "Authentication strategy that will be used in RKE k8s cluster", 50 | ValidateFunc: validation.StringInSlice(rkeClusterAuthenticationStrategyList, false), 51 | }, 52 | 53 | "webhook": { 54 | Type: schema.TypeList, 55 | MaxItems: 1, 56 | Optional: true, 57 | Computed: true, 58 | Description: "Webhook configuration options", 59 | Elem: &schema.Resource{ 60 | Schema: rkeClusterAuthenticationWebhookFields(), 61 | }, 62 | }, 63 | } 64 | return s 65 | } 66 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_private_registries_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterPrivateRegistriesConf []rancher.PrivateRegistry 12 | testRKEClusterPrivateRegistriesInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterPrivateRegistriesConf = []rancher.PrivateRegistry{ 17 | { 18 | IsDefault: true, 19 | Password: "XXXXXXXX", 20 | URL: "url.terraform.test", 21 | User: "user", 22 | }, 23 | } 24 | testRKEClusterPrivateRegistriesInterface = []interface{}{ 25 | map[string]interface{}{ 26 | "is_default": true, 27 | "password": "XXXXXXXX", 28 | "url": "url.terraform.test", 29 | "user": "user", 30 | }, 31 | } 32 | } 33 | 34 | func TestFlattenPrivateRegistries(t *testing.T) { 35 | 36 | cases := []struct { 37 | Input []rancher.PrivateRegistry 38 | ExpectedOutput []interface{} 39 | }{ 40 | { 41 | testRKEClusterPrivateRegistriesConf, 42 | testRKEClusterPrivateRegistriesInterface, 43 | }, 44 | } 45 | 46 | for _, tc := range cases { 47 | output := flattenRKEClusterPrivateRegistries(tc.Input) 48 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 49 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 50 | tc.ExpectedOutput, output) 51 | } 52 | } 53 | } 54 | 55 | func TestExpandPrivateRegistries(t *testing.T) { 56 | 57 | cases := []struct { 58 | Input []interface{} 59 | ExpectedOutput []rancher.PrivateRegistry 60 | }{ 61 | { 62 | testRKEClusterPrivateRegistriesInterface, 63 | testRKEClusterPrivateRegistriesConf, 64 | }, 65 | } 66 | 67 | for _, tc := range cases { 68 | output := expandRKEClusterPrivateRegistries(tc.Input) 69 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 70 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 71 | tc.ExpectedOutput, output) 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_certificates_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/rancher/rke/pki" 8 | ) 9 | 10 | var ( 11 | testRKEClusterCertificatesConf map[string]pki.CertificatePKI 12 | testRKEClusterCertificatesInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterCertificatesConf = map[string]pki.CertificatePKI{ 17 | "test": { 18 | CertificatePEM: "certificate", 19 | KeyPEM: "key", 20 | Config: "config", 21 | Name: "name", 22 | CommonName: "common_name", 23 | OUName: "ou_name", 24 | EnvName: "env_name", 25 | Path: "path", 26 | KeyEnvName: "key_env_name", 27 | KeyPath: "key_path", 28 | ConfigEnvName: "config_env_name", 29 | ConfigPath: "config_path", 30 | }, 31 | } 32 | testRKEClusterCertificatesInterface = []interface{}{ 33 | map[string]interface{}{ 34 | "id": "test", 35 | "certificate": "certificate", 36 | "key": "key", 37 | "config": "config", 38 | "name": "name", 39 | "common_name": "common_name", 40 | "ou_name": "ou_name", 41 | "env_name": "env_name", 42 | "path": "path", 43 | "key_env_name": "key_env_name", 44 | "key_path": "key_path", 45 | "config_env_name": "config_env_name", 46 | "config_path": "config_path", 47 | }, 48 | } 49 | } 50 | 51 | func TestFlattenRKEClusterCertificates(t *testing.T) { 52 | 53 | cases := []struct { 54 | Input map[string]pki.CertificatePKI 55 | ExpectedOutput []interface{} 56 | }{ 57 | { 58 | testRKEClusterCertificatesConf, 59 | testRKEClusterCertificatesInterface, 60 | }, 61 | } 62 | 63 | for _, tc := range cases { 64 | _, _, _, output := flattenRKEClusterCertificates(tc.Input) 65 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 66 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 67 | tc.ExpectedOutput, output) 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_services_kubelet.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterServicesKubeletFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "cluster_dns_server": { 12 | Type: schema.TypeString, 13 | Optional: true, 14 | Computed: true, 15 | Description: "Cluster DNS service ip", 16 | }, 17 | "cluster_domain": { 18 | Type: schema.TypeString, 19 | Optional: true, 20 | Default: "cluster.local", 21 | Description: "Domain of the cluster", 22 | }, 23 | "extra_args": { 24 | Type: schema.TypeMap, 25 | Optional: true, 26 | Computed: true, 27 | Description: "Extra arguments that are added to the kubelet services", 28 | }, 29 | "extra_binds": { 30 | Type: schema.TypeList, 31 | Optional: true, 32 | Computed: true, 33 | Description: "Extra binds added to the worker nodes", 34 | Elem: &schema.Schema{ 35 | Type: schema.TypeString, 36 | }, 37 | }, 38 | "extra_env": { 39 | Type: schema.TypeList, 40 | Optional: true, 41 | Computed: true, 42 | Description: "Extra env added to the nodes", 43 | Elem: &schema.Schema{ 44 | Type: schema.TypeString, 45 | }, 46 | }, 47 | "fail_swap_on": { 48 | Type: schema.TypeBool, 49 | Optional: true, 50 | Computed: true, 51 | Description: "Fail if swap is enabled", 52 | }, 53 | "generate_serving_certificate": { 54 | Type: schema.TypeBool, 55 | Optional: true, 56 | Default: false, 57 | }, 58 | "image": { 59 | Type: schema.TypeString, 60 | Optional: true, 61 | Computed: true, 62 | Description: "Docker image of the kubelet service", 63 | }, 64 | "infra_container_image": { 65 | Type: schema.TypeString, 66 | Optional: true, 67 | Computed: true, 68 | Description: "The image whose network/ipc namespaces containers in each pod will use", 69 | }, 70 | } 71 | return s 72 | } 73 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_bastion_host.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterBastionHost(in rancher.BastionHost) []interface{} { 10 | if len(in.Address) == 0 || len(in.User) == 0 { 11 | return nil 12 | } 13 | 14 | obj := make(map[string]interface{}) 15 | 16 | obj["address"] = in.Address 17 | obj["user"] = in.User 18 | 19 | obj["ignore_proxy_env_vars"] = in.IgnoreProxyEnvVars 20 | 21 | if len(in.Port) > 0 { 22 | obj["port"] = in.Port 23 | } 24 | 25 | obj["ssh_agent_auth"] = in.SSHAgentAuth 26 | 27 | if len(in.SSHCert) > 0 { 28 | obj["ssh_cert"] = in.SSHCert 29 | } 30 | 31 | if len(in.SSHCertPath) > 0 { 32 | obj["ssh_cert_path"] = in.SSHCertPath 33 | } 34 | 35 | if len(in.SSHKey) > 0 { 36 | obj["ssh_key"] = in.SSHKey 37 | } 38 | 39 | if len(in.SSHKeyPath) > 0 { 40 | obj["ssh_key_path"] = in.SSHKeyPath 41 | } 42 | 43 | return []interface{}{obj} 44 | } 45 | 46 | // Expanders 47 | 48 | func expandRKEClusterBastionHost(p []interface{}) rancher.BastionHost { 49 | obj := rancher.BastionHost{} 50 | if len(p) == 0 || p[0] == nil { 51 | return obj 52 | } 53 | in := p[0].(map[string]interface{}) 54 | 55 | if v, ok := in["address"].(string); ok && len(v) > 0 { 56 | obj.Address = v 57 | } 58 | 59 | if v, ok := in["ignore_proxy_env_vars"].(bool); ok { 60 | obj.IgnoreProxyEnvVars = v 61 | } 62 | 63 | if v, ok := in["port"].(string); ok && len(v) > 0 { 64 | obj.Port = v 65 | } 66 | 67 | if v, ok := in["ssh_agent_auth"].(bool); ok { 68 | obj.SSHAgentAuth = v 69 | } 70 | 71 | if v, ok := in["ssh_cert"].(string); ok && len(v) > 0 { 72 | obj.SSHCert = v 73 | } 74 | 75 | if v, ok := in["ssh_cert_path"].(string); ok && len(v) > 0 { 76 | obj.SSHCertPath = v 77 | } 78 | 79 | if v, ok := in["ssh_key"].(string); ok && len(v) > 0 { 80 | obj.SSHKey = v 81 | } 82 | 83 | if v, ok := in["ssh_key_path"].(string); ok && len(v) > 0 { 84 | obj.SSHKeyPath = v 85 | } 86 | 87 | if v, ok := in["user"].(string); ok && len(v) > 0 { 88 | obj.User = v 89 | } 90 | 91 | return obj 92 | } 93 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_dns.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 6 | ) 7 | 8 | const ( 9 | rkeClusterDNSProviderKube = "kube-dns" 10 | rkeClusterDNSProviderCore = "coredns" 11 | rkeClusterDNSProviderNone = "none" 12 | ) 13 | 14 | var ( 15 | rkeClusterDNSProviderList = []string{ 16 | rkeClusterDNSProviderKube, 17 | rkeClusterDNSProviderCore, 18 | rkeClusterDNSProviderNone, 19 | } 20 | ) 21 | 22 | //Schemas 23 | 24 | func rkeClusterDNSNodelocalFields() map[string]*schema.Schema { 25 | s := map[string]*schema.Schema{ 26 | "ip_address": { 27 | Type: schema.TypeString, 28 | Optional: true, 29 | }, 30 | "node_selector": { 31 | Type: schema.TypeMap, 32 | Optional: true, 33 | Description: "Node selector key pair", 34 | }, 35 | } 36 | return s 37 | } 38 | 39 | func rkeClusterDNSFields() map[string]*schema.Schema { 40 | s := map[string]*schema.Schema{ 41 | "node_selector": { 42 | Type: schema.TypeMap, 43 | Optional: true, 44 | Description: "NodeSelector key pair", 45 | }, 46 | "nodelocal": { 47 | Type: schema.TypeList, 48 | Optional: true, 49 | MaxItems: 1, 50 | Description: "Nodelocal dns", 51 | Elem: &schema.Resource{ 52 | Schema: rkeClusterDNSNodelocalFields(), 53 | }, 54 | }, 55 | "provider": { 56 | Type: schema.TypeString, 57 | Optional: true, 58 | Default: rkeClusterDNSProviderCore, 59 | Description: "DNS provider", 60 | ValidateFunc: validation.StringInSlice(rkeClusterDNSProviderList, true), 61 | }, 62 | "reverse_cidrs": { 63 | Type: schema.TypeList, 64 | Optional: true, 65 | Description: "ReverseCIDRs", 66 | Elem: &schema.Schema{ 67 | Type: schema.TypeString, 68 | }, 69 | }, 70 | "upstream_nameservers": { 71 | Type: schema.TypeList, 72 | Optional: true, 73 | Description: "Upstream nameservers", 74 | Elem: &schema.Schema{ 75 | Type: schema.TypeString, 76 | }, 77 | }, 78 | } 79 | 80 | return s 81 | } 82 | -------------------------------------------------------------------------------- /examples/digitalocean/README.md: -------------------------------------------------------------------------------- 1 | # How To Deploy Kubernetes Clusters on DigitalOcean using Terraform and Terraform RKE Provider 2 | 3 | This repository is an examples for building a Kubernetes cluster using Terraform and Terraform RKE provider on DigitalOcean. 4 | 5 | ## How to use 6 | 7 | ### Requirements 8 | 9 | - [terraform](https://terraform.io) v0.11+ 10 | - [terraform-provider-rke](https://github.com/rancher/terraform-provider-rke) 11 | - Valid DigitalOcean API token 12 | - [optional] `kubectl` command 13 | 14 | ### Deploy Kubernetes Cluster on AWS 15 | 16 | ```console 17 | #clone this repo 18 | $ git clone https://github.com/rancher/terraform-provider-rke 19 | $ cd terraform-provider-rke/examples/digialocean 20 | 21 | #set API token to environment variables 22 | $ export DIGITALOCEAN_TOKEN="" 23 | 24 | #deploy 25 | $ terraform init && terraform apply -var do_token=$DIGITALOCEAN_TOKEN 26 | 27 | ########################################################################### 28 | #When "terraform apply" is completed, 29 | #kubeconfig file should be created in the current directory 30 | ########################################################################### 31 | 32 | #set KUBECONFIG environment variable for kubectl 33 | $ export KUBECONFIG=${PWD}/kube_config_cluster.yml 34 | 35 | ########################################################################### 36 | #Then, kubectl command can be used 37 | ########################################################################### 38 | 39 | #component statuses 40 | $ kubectl get cs 41 | 42 | NAME STATUS MESSAGE ERROR 43 | controller-manager Healthy ok 44 | scheduler Healthy ok 45 | etcd-0 Healthy {"health": "true"} 46 | 47 | #nodes 48 | $ kubectl get nodes 49 | 50 | NAME STATUS ROLES AGE VERSION 51 | ip-xx-xx-xx-xx.ap-northeast-1.compute.internal Ready controlplane,etcd 1m v1.10.1 52 | ip-xx-xx-xx-xx.ap-northeast-1.compute.internal Ready worker 1m v1.10.1 53 | ip-xx-xx-xx-xx.ap-northeast-1.compute.internal Ready worker 1m v1.10.1 54 | ip-xx-xx-xx-xx.ap-northeast-1.compute.internal Ready worker 1m v1.10.1 55 | ``` 56 | 57 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services_kubeproxy_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterServicesKubeproxyConf rancher.KubeproxyService 12 | testRKEClusterServicesKubeproxyInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterServicesKubeproxyConf = rancher.KubeproxyService{} 17 | testRKEClusterServicesKubeproxyConf.ExtraArgs = map[string]string{ 18 | "arg_one": "one", 19 | "arg_two": "two", 20 | } 21 | testRKEClusterServicesKubeproxyConf.ExtraBinds = []string{"bind_one", "bind_two"} 22 | testRKEClusterServicesKubeproxyConf.ExtraEnv = []string{"env_one", "env_two"} 23 | testRKEClusterServicesKubeproxyConf.Image = "image" 24 | testRKEClusterServicesKubeproxyInterface = []interface{}{ 25 | map[string]interface{}{ 26 | "extra_args": map[string]interface{}{ 27 | "arg_one": "one", 28 | "arg_two": "two", 29 | }, 30 | "extra_binds": []interface{}{"bind_one", "bind_two"}, 31 | "extra_env": []interface{}{"env_one", "env_two"}, 32 | "image": "image", 33 | }, 34 | } 35 | } 36 | 37 | func TestFlattenRKEClusterServicesKubeproxy(t *testing.T) { 38 | 39 | cases := []struct { 40 | Input rancher.KubeproxyService 41 | ExpectedOutput []interface{} 42 | }{ 43 | { 44 | testRKEClusterServicesKubeproxyConf, 45 | testRKEClusterServicesKubeproxyInterface, 46 | }, 47 | } 48 | 49 | for _, tc := range cases { 50 | output := flattenRKEClusterServicesKubeproxy(tc.Input) 51 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 52 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 53 | tc.ExpectedOutput, output) 54 | } 55 | } 56 | } 57 | 58 | func TestExpandRKEClusterServicesKubeproxy(t *testing.T) { 59 | 60 | cases := []struct { 61 | Input []interface{} 62 | ExpectedOutput rancher.KubeproxyService 63 | }{ 64 | { 65 | testRKEClusterServicesKubeproxyInterface, 66 | testRKEClusterServicesKubeproxyConf, 67 | }, 68 | } 69 | 70 | for _, tc := range cases { 71 | output := expandRKEClusterServicesKubeproxy(tc.Input) 72 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 73 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 74 | tc.ExpectedOutput, output) 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services_scheduler_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterServicesSchedulerConf rancher.SchedulerService 12 | testRKEClusterServicesSchedulerInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterServicesSchedulerConf = rancher.SchedulerService{} 17 | testRKEClusterServicesSchedulerConf.ExtraArgs = map[string]string{ 18 | "arg_one": "one", 19 | "arg_two": "two", 20 | } 21 | testRKEClusterServicesSchedulerConf.ExtraBinds = []string{"bind_one", "bind_two"} 22 | testRKEClusterServicesSchedulerConf.ExtraEnv = []string{"env_one", "env_two"} 23 | testRKEClusterServicesSchedulerConf.Image = "image" 24 | testRKEClusterServicesSchedulerInterface = []interface{}{ 25 | map[string]interface{}{ 26 | "extra_args": map[string]interface{}{ 27 | "arg_one": "one", 28 | "arg_two": "two", 29 | }, 30 | "extra_binds": []interface{}{"bind_one", "bind_two"}, 31 | "extra_env": []interface{}{"env_one", "env_two"}, 32 | "image": "image", 33 | }, 34 | } 35 | } 36 | 37 | func TestFlattenRKEClusterServicesScheduler(t *testing.T) { 38 | 39 | cases := []struct { 40 | Input rancher.SchedulerService 41 | ExpectedOutput []interface{} 42 | }{ 43 | { 44 | testRKEClusterServicesSchedulerConf, 45 | testRKEClusterServicesSchedulerInterface, 46 | }, 47 | } 48 | 49 | for _, tc := range cases { 50 | output := flattenRKEClusterServicesScheduler(tc.Input) 51 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 52 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 53 | tc.ExpectedOutput, output) 54 | } 55 | } 56 | } 57 | 58 | func TestExpandRKEClusterServicesScheduler(t *testing.T) { 59 | 60 | cases := []struct { 61 | Input []interface{} 62 | ExpectedOutput rancher.SchedulerService 63 | }{ 64 | { 65 | testRKEClusterServicesSchedulerInterface, 66 | testRKEClusterServicesSchedulerConf, 67 | }, 68 | } 69 | 70 | for _, tc := range cases { 71 | output := expandRKEClusterServicesScheduler(tc.Input) 72 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 73 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 74 | tc.ExpectedOutput, output) 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_ingress.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterIngress(in rancher.IngressConfig) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | if len(in.DNSPolicy) > 0 { 13 | obj["dns_policy"] = in.DNSPolicy 14 | } 15 | 16 | if len(in.ExtraArgs) > 0 { 17 | obj["extra_args"] = toMapInterface(in.ExtraArgs) 18 | } 19 | 20 | if in.HTTPPort > 0 { 21 | obj["http_port"] = in.HTTPPort 22 | } 23 | 24 | if in.HTTPSPort > 0 { 25 | obj["https_port"] = in.HTTPSPort 26 | } 27 | 28 | if len(in.NetworkMode) > 0 { 29 | obj["network_mode"] = in.NetworkMode 30 | } 31 | 32 | if len(in.NodeSelector) > 0 { 33 | obj["node_selector"] = toMapInterface(in.NodeSelector) 34 | } 35 | 36 | if len(in.Options) > 0 { 37 | obj["options"] = toMapInterface(in.Options) 38 | } 39 | 40 | if len(in.Provider) > 0 { 41 | obj["provider"] = in.Provider 42 | } 43 | 44 | if in.DefaultBackend != nil { 45 | obj["default_backend"] = *in.DefaultBackend 46 | } 47 | 48 | return []interface{}{obj} 49 | } 50 | 51 | // Expanders 52 | 53 | func expandRKEClusterIngress(p []interface{}) rancher.IngressConfig { 54 | obj := rancher.IngressConfig{} 55 | if len(p) == 0 || p[0] == nil { 56 | return obj 57 | } 58 | in := p[0].(map[string]interface{}) 59 | 60 | if v, ok := in["dns_policy"].(string); ok && len(v) > 0 { 61 | obj.DNSPolicy = v 62 | } 63 | 64 | if v, ok := in["extra_args"].(map[string]interface{}); ok && len(v) > 0 { 65 | obj.ExtraArgs = toMapString(v) 66 | } 67 | 68 | if v, ok := in["http_port"].(int); ok && v > 0 { 69 | obj.HTTPPort = v 70 | } 71 | 72 | if v, ok := in["https_port"].(int); ok && v > 0 { 73 | obj.HTTPSPort = v 74 | } 75 | 76 | if v, ok := in["network_mode"].(string); ok && len(v) > 0 { 77 | obj.NetworkMode = v 78 | } 79 | 80 | if v, ok := in["node_selector"].(map[string]interface{}); ok && len(v) > 0 { 81 | obj.NodeSelector = toMapString(v) 82 | } 83 | 84 | if v, ok := in["options"].(map[string]interface{}); ok && len(v) > 0 { 85 | obj.Options = toMapString(v) 86 | } 87 | 88 | if v, ok := in["provider"].(string); ok && len(v) > 0 { 89 | obj.Provider = v 90 | } 91 | 92 | if v, ok := in["default_backend"].(bool); ok { 93 | obj.DefaultBackend = &v 94 | } 95 | 96 | return obj 97 | } 98 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_bastion_host_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterBastionHostConf rancher.BastionHost 12 | testRKEClusterBastionHostInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterBastionHostConf = rancher.BastionHost{ 17 | Address: "bastion.terraform.test", 18 | IgnoreProxyEnvVars: true, 19 | SSHCert: "XXXXXXXX", 20 | SSHCertPath: "/home/user/.ssh", 21 | Port: "22", 22 | SSHAgentAuth: true, 23 | SSHKey: "XXXXXXXX", 24 | SSHKeyPath: "/home/user/.ssh", 25 | User: "test", 26 | } 27 | testRKEClusterBastionHostInterface = []interface{}{ 28 | map[string]interface{}{ 29 | "address": "bastion.terraform.test", 30 | "ignore_proxy_env_vars": true, 31 | "port": "22", 32 | "ssh_agent_auth": true, 33 | "ssh_cert": "XXXXXXXX", 34 | "ssh_cert_path": "/home/user/.ssh", 35 | "ssh_key": "XXXXXXXX", 36 | "ssh_key_path": "/home/user/.ssh", 37 | "user": "test", 38 | }, 39 | } 40 | } 41 | 42 | func TestFlattenRKEClusterBastionHost(t *testing.T) { 43 | 44 | cases := []struct { 45 | Input rancher.BastionHost 46 | ExpectedOutput []interface{} 47 | }{ 48 | { 49 | testRKEClusterBastionHostConf, 50 | testRKEClusterBastionHostInterface, 51 | }, 52 | } 53 | 54 | for _, tc := range cases { 55 | output := flattenRKEClusterBastionHost(tc.Input) 56 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 57 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 58 | tc.ExpectedOutput, output) 59 | } 60 | } 61 | } 62 | 63 | func TestExpandRKEClusterBastionHost(t *testing.T) { 64 | 65 | cases := []struct { 66 | Input []interface{} 67 | ExpectedOutput rancher.BastionHost 68 | }{ 69 | { 70 | testRKEClusterBastionHostInterface, 71 | testRKEClusterBastionHostConf, 72 | }, 73 | } 74 | 75 | for _, tc := range cases { 76 | output := expandRKEClusterBastionHost(tc.Input) 77 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 78 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 79 | tc.ExpectedOutput, output) 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterServices(in rancher.RKEConfigServices, p []interface{}) ([]interface{}, error) { 10 | var obj map[string]interface{} 11 | if len(p) == 0 || p[0] == nil { 12 | obj = make(map[string]interface{}) 13 | } else { 14 | obj = p[0].(map[string]interface{}) 15 | } 16 | 17 | v, ok := obj["etcd"].([]interface{}) 18 | if !ok { 19 | v = []interface{}{} 20 | } 21 | obj["etcd"] = flattenRKEClusterServicesEtcd(in.Etcd, v) 22 | kubeAPI, err := flattenRKEClusterServicesKubeAPI(in.KubeAPI) 23 | if err != nil { 24 | return []interface{}{obj}, err 25 | } 26 | obj["kube_api"] = kubeAPI 27 | obj["kube_controller"] = flattenRKEClusterServicesKubeController(in.KubeController) 28 | obj["kubelet"] = flattenRKEClusterServicesKubelet(in.Kubelet) 29 | obj["kubeproxy"] = flattenRKEClusterServicesKubeproxy(in.Kubeproxy) 30 | obj["scheduler"] = flattenRKEClusterServicesScheduler(in.Scheduler) 31 | 32 | return []interface{}{obj}, nil 33 | } 34 | 35 | // Expanders 36 | 37 | func expandRKEClusterServices(p []interface{}) (rancher.RKEConfigServices, error) { 38 | obj := rancher.RKEConfigServices{} 39 | if p == nil || len(p) == 0 || p[0] == nil { 40 | return obj, nil 41 | } 42 | in := p[0].(map[string]interface{}) 43 | 44 | if v, ok := in["etcd"].([]interface{}); ok && len(v) > 0 { 45 | etcd, err := expandRKEClusterServicesEtcd(v) 46 | if err != nil { 47 | return obj, err 48 | } 49 | obj.Etcd = etcd 50 | } 51 | 52 | if v, ok := in["kube_api"].([]interface{}); ok && len(v) > 0 { 53 | kubeAPI, err := expandRKEClusterServicesKubeAPI(v) 54 | if err != nil { 55 | return obj, err 56 | } 57 | obj.KubeAPI = kubeAPI 58 | } 59 | 60 | if v, ok := in["kube_controller"].([]interface{}); ok && len(v) > 0 { 61 | obj.KubeController = expandRKEClusterServicesKubeController(v) 62 | } 63 | 64 | if v, ok := in["kubelet"].([]interface{}); ok && len(v) > 0 { 65 | obj.Kubelet = expandRKEClusterServicesKubelet(v) 66 | } 67 | 68 | if v, ok := in["kubeproxy"].([]interface{}); ok && len(v) > 0 { 69 | obj.Kubeproxy = expandRKEClusterServicesKubeproxy(v) 70 | } 71 | 72 | if v, ok := in["scheduler"].([]interface{}); ok && len(v) > 0 { 73 | obj.Scheduler = expandRKEClusterServicesScheduler(v) 74 | } 75 | 76 | return obj, nil 77 | } 78 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services_kubelet.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterServicesKubelet(in rancher.KubeletService) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | if len(in.ClusterDNSServer) > 0 { 13 | obj["cluster_dns_server"] = in.ClusterDNSServer 14 | } 15 | 16 | if len(in.ClusterDomain) > 0 { 17 | obj["cluster_domain"] = in.ClusterDomain 18 | } 19 | 20 | if len(in.ExtraArgs) > 0 { 21 | obj["extra_args"] = toMapInterface(in.ExtraArgs) 22 | } 23 | 24 | if len(in.ExtraBinds) > 0 { 25 | obj["extra_binds"] = toArrayInterface(in.ExtraBinds) 26 | } 27 | 28 | if len(in.ExtraEnv) > 0 { 29 | obj["extra_env"] = toArrayInterface(in.ExtraEnv) 30 | } 31 | 32 | obj["fail_swap_on"] = in.FailSwapOn 33 | obj["generate_serving_certificate"] = in.GenerateServingCertificate 34 | 35 | if len(in.Image) > 0 { 36 | obj["image"] = in.Image 37 | } 38 | 39 | if len(in.InfraContainerImage) > 0 { 40 | obj["infra_container_image"] = in.InfraContainerImage 41 | } 42 | 43 | return []interface{}{obj} 44 | } 45 | 46 | // Expanders 47 | 48 | func expandRKEClusterServicesKubelet(p []interface{}) rancher.KubeletService { 49 | obj := rancher.KubeletService{} 50 | if len(p) == 0 || p[0] == nil { 51 | return obj 52 | } 53 | in := p[0].(map[string]interface{}) 54 | 55 | if v, ok := in["cluster_dns_server"].(string); ok && len(v) > 0 { 56 | obj.ClusterDNSServer = v 57 | } 58 | 59 | if v, ok := in["cluster_domain"].(string); ok && len(v) > 0 { 60 | obj.ClusterDomain = v 61 | } 62 | 63 | if v, ok := in["extra_args"].(map[string]interface{}); ok && len(v) > 0 { 64 | obj.ExtraArgs = toMapString(v) 65 | } 66 | 67 | if v, ok := in["extra_binds"].([]interface{}); ok && len(v) > 0 { 68 | obj.ExtraBinds = toArrayString(v) 69 | } 70 | 71 | if v, ok := in["extra_env"].([]interface{}); ok && len(v) > 0 { 72 | obj.ExtraEnv = toArrayString(v) 73 | } 74 | 75 | if v, ok := in["fail_swap_on"].(bool); ok { 76 | obj.FailSwapOn = v 77 | } 78 | 79 | if v, ok := in["generate_serving_certificate"].(bool); ok { 80 | obj.GenerateServingCertificate = v 81 | } 82 | 83 | if v, ok := in["image"].(string); ok && len(v) > 0 { 84 | obj.Image = v 85 | } 86 | 87 | if v, ok := in["infra_container_image"].(string); ok && len(v) > 0 { 88 | obj.InfraContainerImage = v 89 | } 90 | 91 | return obj 92 | } 93 | -------------------------------------------------------------------------------- /examples/aws_ec2/README.md: -------------------------------------------------------------------------------- 1 | # How To Deploy Kubernetes Clusters on AWS using Terraform and Terraform RKE Provider 2 | 3 | This repository is an examples for building a Kubernetes cluster using Terraform and Terraform RKE provider on AWS. 4 | 5 | > ref: [https://rancher.com/blog/2018/2018-05-14-rke-on-aws/](https://rancher.com/blog/2018/2018-05-14-rke-on-aws/) 6 | 7 | ## How to use 8 | 9 | ### Requirements 10 | 11 | - [terraform](https://terraform.io) v0.11+ 12 | - [terraform-provider-rke](https://github.com/rancher/terraform-provider-rke) 13 | - Valid AWS access_key and secret_key 14 | - [optional] `kubectl` command 15 | 16 | ### Deploy Kubernetes Cluster on AWS 17 | 18 | ```console 19 | #clone this repo 20 | $ git clone https://github.com/rancher/terraform-provider-rke 21 | $ cd terraform-provider-rke/examples/aws_ec2 22 | 23 | #set API keys to environment variables 24 | $ export AWS_ACCESS_KEY_ID="" 25 | $ export AWS_SECRET_ACCESS_KEY="" 26 | 27 | #deploy 28 | $ terraform init && terraform apply 29 | 30 | ########################################################################### 31 | #When "terraform apply" is completed, 32 | #kubeconfig file should be created in the current directory 33 | ########################################################################### 34 | 35 | #set KUBECONFIG environment variable for kubectl 36 | $ export KUBECONFIG=${PWD}/kube_config_cluster.yml 37 | 38 | ########################################################################### 39 | #Then, kubectl command can be used 40 | ########################################################################### 41 | 42 | #component statuses 43 | $ kubectl get cs 44 | 45 | NAME STATUS MESSAGE ERROR 46 | controller-manager Healthy ok 47 | scheduler Healthy ok 48 | etcd-0 Healthy {"health": "true"} 49 | 50 | #nodes 51 | $ kubectl get nodes 52 | 53 | NAME STATUS ROLES AGE VERSION 54 | ip-xx-xx-xx-xx.ap-northeast-1.compute.internal Ready controlplane,etcd 1m v1.10.1 55 | ip-xx-xx-xx-xx.ap-northeast-1.compute.internal Ready worker 1m v1.10.1 56 | ip-xx-xx-xx-xx.ap-northeast-1.compute.internal Ready worker 1m v1.10.1 57 | ip-xx-xx-xx-xx.ap-northeast-1.compute.internal Ready worker 1m v1.10.1 58 | ``` 59 | 60 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterServicesConf rancher.RKEConfigServices 12 | testRKEClusterServicesInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterServicesConf = rancher.RKEConfigServices{ 17 | Etcd: testRKEClusterServicesETCDConf, 18 | KubeAPI: testRKEClusterServicesKubeAPIConf, 19 | KubeController: testRKEClusterServicesKubeControllerConf, 20 | Kubelet: testRKEClusterServicesKubeletConf, 21 | Kubeproxy: testRKEClusterServicesKubeproxyConf, 22 | Scheduler: testRKEClusterServicesSchedulerConf, 23 | } 24 | testRKEClusterServicesInterface = []interface{}{ 25 | map[string]interface{}{ 26 | "etcd": testRKEClusterServicesETCDInterface, 27 | "kube_api": testRKEClusterServicesKubeAPIInterface, 28 | "kube_controller": testRKEClusterServicesKubeControllerInterface, 29 | "kubelet": testRKEClusterServicesKubeletInterface, 30 | "kubeproxy": testRKEClusterServicesKubeproxyInterface, 31 | "scheduler": testRKEClusterServicesSchedulerInterface, 32 | }, 33 | } 34 | } 35 | 36 | func TestFlattenRKEClusterServices(t *testing.T) { 37 | 38 | cases := []struct { 39 | Input rancher.RKEConfigServices 40 | ExpectedOutput []interface{} 41 | }{ 42 | { 43 | testRKEClusterServicesConf, 44 | testRKEClusterServicesInterface, 45 | }, 46 | } 47 | 48 | for _, tc := range cases { 49 | output, err := flattenRKEClusterServices(tc.Input, testRKEClusterServicesInterface) 50 | if err != nil { 51 | t.Fatalf("[ERROR] on flattener: %#v", err) 52 | } 53 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 54 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 55 | tc.ExpectedOutput, output) 56 | } 57 | } 58 | } 59 | 60 | func TestExpandRKEClusterServices(t *testing.T) { 61 | 62 | cases := []struct { 63 | Input []interface{} 64 | ExpectedOutput rancher.RKEConfigServices 65 | }{ 66 | { 67 | testRKEClusterServicesInterface, 68 | testRKEClusterServicesConf, 69 | }, 70 | } 71 | 72 | for _, tc := range cases { 73 | output, err := expandRKEClusterServices(tc.Input) 74 | if err != nil { 75 | t.Fatalf("[ERROR] on expander: %#v", err) 76 | } 77 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 78 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 79 | tc.ExpectedOutput, output) 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services_kube_controller_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterServicesKubeControllerConf rancher.KubeControllerService 12 | testRKEClusterServicesKubeControllerInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterServicesKubeControllerConf = rancher.KubeControllerService{ 17 | ClusterCIDR: "10.42.0.0/16", 18 | ServiceClusterIPRange: "10.43.0.0/16", 19 | } 20 | testRKEClusterServicesKubeControllerConf.ExtraArgs = map[string]string{ 21 | "arg_one": "one", 22 | "arg_two": "two", 23 | } 24 | testRKEClusterServicesKubeControllerConf.ExtraBinds = []string{"bind_one", "bind_two"} 25 | testRKEClusterServicesKubeControllerConf.ExtraEnv = []string{"env_one", "env_two"} 26 | testRKEClusterServicesKubeControllerConf.Image = "image" 27 | testRKEClusterServicesKubeControllerInterface = []interface{}{ 28 | map[string]interface{}{ 29 | "cluster_cidr": "10.42.0.0/16", 30 | "extra_args": map[string]interface{}{ 31 | "arg_one": "one", 32 | "arg_two": "two", 33 | }, 34 | "extra_binds": []interface{}{"bind_one", "bind_two"}, 35 | "extra_env": []interface{}{"env_one", "env_two"}, 36 | "image": "image", 37 | "service_cluster_ip_range": "10.43.0.0/16", 38 | }, 39 | } 40 | } 41 | 42 | func TestFlattenRKEClusterServicesKubeController(t *testing.T) { 43 | 44 | cases := []struct { 45 | Input rancher.KubeControllerService 46 | ExpectedOutput []interface{} 47 | }{ 48 | { 49 | testRKEClusterServicesKubeControllerConf, 50 | testRKEClusterServicesKubeControllerInterface, 51 | }, 52 | } 53 | 54 | for _, tc := range cases { 55 | output := flattenRKEClusterServicesKubeController(tc.Input) 56 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 57 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 58 | tc.ExpectedOutput, output) 59 | } 60 | } 61 | } 62 | 63 | func TestExpandRKEClusterServicesKubeController(t *testing.T) { 64 | 65 | cases := []struct { 66 | Input []interface{} 67 | ExpectedOutput rancher.KubeControllerService 68 | }{ 69 | { 70 | testRKEClusterServicesKubeControllerInterface, 71 | testRKEClusterServicesKubeControllerConf, 72 | }, 73 | } 74 | 75 | for _, tc := range cases { 76 | output := expandRKEClusterServicesKubeController(tc.Input) 77 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 78 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 79 | tc.ExpectedOutput, output) 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_ingress_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterIngressConf rancher.IngressConfig 12 | testRKEClusterIngressInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterIngressConf = rancher.IngressConfig{ 17 | DNSPolicy: "test", 18 | ExtraArgs: map[string]string{ 19 | "arg_one": "one", 20 | "arg_two": "two", 21 | }, 22 | HTTPPort: 8080, 23 | HTTPSPort: 8443, 24 | NetworkMode: "network_mode", 25 | NodeSelector: map[string]string{ 26 | "node_one": "one", 27 | "node_two": "two", 28 | }, 29 | Options: map[string]string{ 30 | "option1": "value1", 31 | "option2": "value2", 32 | }, 33 | Provider: "test", 34 | DefaultBackend: newTrue(), 35 | } 36 | testRKEClusterIngressInterface = []interface{}{ 37 | map[string]interface{}{ 38 | "dns_policy": "test", 39 | "extra_args": map[string]interface{}{ 40 | "arg_one": "one", 41 | "arg_two": "two", 42 | }, 43 | "http_port": 8080, 44 | "https_port": 8443, 45 | "network_mode": "network_mode", 46 | "node_selector": map[string]interface{}{ 47 | "node_one": "one", 48 | "node_two": "two", 49 | }, 50 | "options": map[string]interface{}{ 51 | "option1": "value1", 52 | "option2": "value2", 53 | }, 54 | "provider": "test", 55 | "default_backend": true, 56 | }, 57 | } 58 | } 59 | 60 | func TestFlattenRKEClusterIngress(t *testing.T) { 61 | 62 | cases := []struct { 63 | Input rancher.IngressConfig 64 | ExpectedOutput []interface{} 65 | }{ 66 | { 67 | testRKEClusterIngressConf, 68 | testRKEClusterIngressInterface, 69 | }, 70 | } 71 | 72 | for _, tc := range cases { 73 | output := flattenRKEClusterIngress(tc.Input) 74 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 75 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 76 | tc.ExpectedOutput, output) 77 | } 78 | } 79 | } 80 | 81 | func TestExpandRKEClusterIngress(t *testing.T) { 82 | 83 | cases := []struct { 84 | Input []interface{} 85 | ExpectedOutput rancher.IngressConfig 86 | }{ 87 | { 88 | testRKEClusterIngressInterface, 89 | testRKEClusterIngressConf, 90 | }, 91 | } 92 | 93 | for _, tc := range cases { 94 | output := expandRKEClusterIngress(tc.Input) 95 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 96 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 97 | tc.ExpectedOutput, output) 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Issue: 2 | 3 | 4 | ## Problem 5 | 6 | 7 | ## Solution 8 | 9 | 10 | ## Testing 11 | 12 | 13 | ## Engineering Testing 14 | ### Manual Testing 15 | 16 | 17 | ### Automated Testing 18 | 19 | * Test types added/modified: 20 | * Unit 21 | * None 22 | * If "None" - Reason: 23 | 32 | * If "None" - GH Issue/PR: _LINK TO GH ISSUE/PR TO ADD TESTS_ 33 | 34 | Summary: _TODO_ 35 | 36 | ## QA Testing Considerations 37 | 38 | 39 | ### Regressions Considerations 40 | 41 | _TODO_ 42 | 43 | Existing / newly added automated tests that provide evidence there are no regressions: 44 | * _TODO_ 45 | 46 | ### Terraform 47 | 48 |
49 | Click to Expand Terraform 50 | 51 | 52 | 53 |
-------------------------------------------------------------------------------- /rke/structure_rke_cluster_cloud_provider.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterCloudProvider(in rancher.CloudProvider, p []interface{}) []interface{} { 10 | if len(in.Name) == 0 { 11 | return nil 12 | } 13 | 14 | var obj map[string]interface{} 15 | if len(p) == 0 || p[0] == nil { 16 | obj = make(map[string]interface{}) 17 | } else { 18 | obj = p[0].(map[string]interface{}) 19 | } 20 | 21 | obj["name"] = in.Name 22 | 23 | if in.AWSCloudProvider != nil { 24 | obj["aws_cloud_provider"] = flattenRKEClusterCloudProviderAws(in.AWSCloudProvider) 25 | } 26 | 27 | if in.AzureCloudProvider != nil { 28 | v, ok := obj["azure_cloud_provider"].([]interface{}) 29 | if !ok { 30 | v = []interface{}{} 31 | } 32 | obj["azure_cloud_provider"] = flattenRKEClusterCloudProviderAzure(in.AzureCloudProvider, v) 33 | } 34 | 35 | if len(in.CustomCloudProvider) > 0 { 36 | obj["custom_cloud_provider"] = in.CustomCloudProvider 37 | } 38 | 39 | if in.OpenstackCloudProvider != nil { 40 | v, ok := obj["openstack_cloud_provider"].([]interface{}) 41 | if !ok { 42 | v = []interface{}{} 43 | } 44 | obj["openstack_cloud_provider"] = flattenRKEClusterCloudProviderOpenstack(in.OpenstackCloudProvider, v) 45 | } 46 | 47 | if in.VsphereCloudProvider != nil { 48 | v, ok := obj["vsphere_cloud_provider"].([]interface{}) 49 | if !ok { 50 | v = []interface{}{} 51 | } 52 | obj["vsphere_cloud_provider"] = flattenRKEClusterCloudProviderVsphere(in.VsphereCloudProvider, v) 53 | } 54 | 55 | return []interface{}{obj} 56 | } 57 | 58 | // Expanders 59 | 60 | func expandRKEClusterCloudProvider(p []interface{}) rancher.CloudProvider { 61 | obj := rancher.CloudProvider{} 62 | if len(p) == 0 || p[0] == nil { 63 | return obj 64 | } 65 | in := p[0].(map[string]interface{}) 66 | 67 | if v, ok := in["aws_cloud_provider"].([]interface{}); ok && len(v) > 0 { 68 | obj.AWSCloudProvider = expandRKEClusterCloudProviderAws(v) 69 | } 70 | 71 | if v, ok := in["azure_cloud_provider"].([]interface{}); ok && len(v) > 0 { 72 | obj.AzureCloudProvider = expandRKEClusterCloudProviderAzure(v) 73 | } 74 | 75 | if v, ok := in["custom_cloud_provider"].(string); ok && len(v) > 0 { 76 | obj.CustomCloudProvider = v 77 | } 78 | 79 | if v, ok := in["name"].(string); ok && len(v) > 0 { 80 | obj.Name = v 81 | } 82 | 83 | if v, ok := in["openstack_cloud_provider"].([]interface{}); ok && len(v) > 0 { 84 | obj.OpenstackCloudProvider = expandRKEClusterCloudProviderOpenstack(v) 85 | } 86 | 87 | if v, ok := in["vsphere_cloud_provider"].([]interface{}); ok && len(v) > 0 { 88 | obj.VsphereCloudProvider = expandRKEClusterCloudProviderVsphere(v) 89 | } 90 | 91 | return obj 92 | } 93 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_dns.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterDNSNodelocal(in *rancher.Nodelocal) []interface{} { 10 | obj := make(map[string]interface{}) 11 | if in == nil { 12 | return nil 13 | } 14 | 15 | if len(in.IPAddress) > 0 { 16 | obj["ip_address"] = in.IPAddress 17 | } 18 | 19 | if len(in.NodeSelector) > 0 { 20 | obj["node_selector"] = toMapInterface(in.NodeSelector) 21 | } 22 | 23 | return []interface{}{obj} 24 | } 25 | 26 | func flattenRKEClusterDNS(in *rancher.DNSConfig) []interface{} { 27 | obj := make(map[string]interface{}) 28 | if in == nil { 29 | return []interface{}{} 30 | } 31 | 32 | if in.Nodelocal != nil { 33 | obj["nodelocal"] = flattenRKEClusterDNSNodelocal(in.Nodelocal) 34 | } 35 | 36 | if len(in.NodeSelector) > 0 { 37 | obj["node_selector"] = toMapInterface(in.NodeSelector) 38 | } 39 | 40 | if len(in.Provider) > 0 { 41 | obj["provider"] = in.Provider 42 | } 43 | 44 | if len(in.ReverseCIDRs) > 0 { 45 | obj["reverse_cidrs"] = toArrayInterface(in.ReverseCIDRs) 46 | } 47 | 48 | if len(in.UpstreamNameservers) > 0 { 49 | obj["upstream_nameservers"] = toArrayInterface(in.UpstreamNameservers) 50 | } 51 | 52 | return []interface{}{obj} 53 | } 54 | 55 | // Expanders 56 | 57 | func expandRKEClusterDNSNodelocal(p []interface{}) *rancher.Nodelocal { 58 | obj := &rancher.Nodelocal{} 59 | if len(p) == 0 || p[0] == nil { 60 | return nil 61 | } 62 | in := p[0].(map[string]interface{}) 63 | 64 | if v, ok := in["ip_address"].(string); ok && len(v) > 0 { 65 | obj.IPAddress = v 66 | } 67 | 68 | if v, ok := in["node_selector"].(map[string]interface{}); ok && len(v) > 0 { 69 | obj.NodeSelector = toMapString(v) 70 | } 71 | 72 | return obj 73 | } 74 | 75 | func expandRKEClusterDNS(p []interface{}) *rancher.DNSConfig { 76 | obj := &rancher.DNSConfig{} 77 | if len(p) == 0 || p[0] == nil { 78 | return obj 79 | } 80 | in := p[0].(map[string]interface{}) 81 | 82 | if v, ok := in["nodelocal"].([]interface{}); ok && len(v) > 0 { 83 | obj.Nodelocal = expandRKEClusterDNSNodelocal(v) 84 | } 85 | 86 | if v, ok := in["node_selector"].(map[string]interface{}); ok && len(v) > 0 { 87 | obj.NodeSelector = toMapString(v) 88 | } 89 | 90 | if v, ok := in["provider"].(string); ok && len(v) > 0 { 91 | obj.Provider = v 92 | } 93 | 94 | if v, ok := in["reverse_cidrs"].([]interface{}); ok && len(v) > 0 { 95 | obj.ReverseCIDRs = toArrayString(v) 96 | } 97 | 98 | if v, ok := in["upstream_nameservers"].([]interface{}); ok && len(v) > 0 { 99 | obj.UpstreamNameservers = toArrayString(v) 100 | } 101 | 102 | return obj 103 | } 104 | -------------------------------------------------------------------------------- /examples/digitalocean/rke.tf: -------------------------------------------------------------------------------- 1 | ### Example works for RKE v1.13.5-rancher1-2 2 | 3 | variable "do_token" { 4 | default = "" 5 | } 6 | 7 | module "nodes" { 8 | source = "./do" 9 | do_token = var.do_token 10 | # region = "nyc1" 11 | # droplet_size = "t2.micro" 12 | } 13 | 14 | resource "rke_cluster" "cluster" { 15 | nodes { 16 | internal_address = module.nodes.internal_addresses[0] 17 | address = module.nodes.addresses[0] 18 | user = module.nodes.ssh_username 19 | ssh_key = module.nodes.private_key 20 | role = ["controlplane", "etcd"] 21 | } 22 | nodes { 23 | internal_address = module.nodes.internal_addresses[1] 24 | address = module.nodes.addresses[1] 25 | user = module.nodes.ssh_username 26 | ssh_key = module.nodes.private_key 27 | role = ["worker"] 28 | } 29 | nodes { 30 | internal_address = module.nodes.internal_addresses[2] 31 | address = module.nodes.addresses[2] 32 | user = module.nodes.ssh_username 33 | ssh_key = module.nodes.private_key 34 | role = ["worker"] 35 | } 36 | nodes { 37 | internal_address = module.nodes.internal_addresses[3] 38 | address = module.nodes.addresses[3] 39 | user = module.nodes.ssh_username 40 | ssh_key = module.nodes.private_key 41 | role = ["worker"] 42 | } 43 | 44 | services { 45 | kube_api { 46 | extra_args = { 47 | kubelet-preferred-address-types = "InternalIP,ExternalIP,Hostname" 48 | feature-gates = "VolumeSnapshotDataSource=true,KubeletPluginsWatcher=true,CSINodeInfo=true,CSIDriverRegistry=true" 49 | } 50 | } 51 | kubelet { 52 | extra_args = { 53 | cloud-provider = "external" 54 | feature-gates = "VolumeSnapshotDataSource=true,KubeletPluginsWatcher=true,CSINodeInfo=true,CSIDriverRegistry=true" 55 | } 56 | } 57 | } 58 | 59 | ingress { 60 | provider = "none" 61 | } 62 | 63 | addon_job_timeout = 60 64 | addons = "${data.template_file.addons.rendered}" 65 | 66 | addons_include = [ 67 | # Cloud Controller Manager for DigitalOcean 68 | "${path.module}/files/ccm-digitalocean-v0.1.15.yaml", 69 | # CSI driver for DO, only v1.0.1 version works with rke v1.13.5-rancher1-2 70 | "${path.module}/files/csi-digitalocean-v1.0.1.yaml", 71 | # Ingess-nginx for generic cloud (with LoadBalancer type service) 72 | "${path.module}/files/ingress-mandatory.yaml", 73 | "${path.module}/files/ingress-cloud-generic.yaml" 74 | 75 | ] 76 | } 77 | 78 | resource "local_file" "kube_cluster_yaml" { 79 | filename = "./kube_config_cluster.yml" 80 | content = rke_cluster.cluster.kube_config_yaml 81 | } 82 | 83 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_services_kubelet_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterServicesKubeletConf rancher.KubeletService 12 | testRKEClusterServicesKubeletInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterServicesKubeletConf = rancher.KubeletService{ 17 | ClusterDNSServer: "dns.hostname.test", 18 | ClusterDomain: "terraform.test", 19 | FailSwapOn: true, 20 | GenerateServingCertificate: true, 21 | InfraContainerImage: "infra_image", 22 | } 23 | testRKEClusterServicesKubeletConf.ExtraArgs = map[string]string{ 24 | "arg_one": "one", 25 | "arg_two": "two", 26 | } 27 | testRKEClusterServicesKubeletConf.ExtraBinds = []string{"bind_one", "bind_two"} 28 | testRKEClusterServicesKubeletConf.ExtraEnv = []string{"env_one", "env_two"} 29 | testRKEClusterServicesKubeletConf.Image = "image" 30 | testRKEClusterServicesKubeletInterface = []interface{}{ 31 | map[string]interface{}{ 32 | "cluster_dns_server": "dns.hostname.test", 33 | "cluster_domain": "terraform.test", 34 | "extra_args": map[string]interface{}{ 35 | "arg_one": "one", 36 | "arg_two": "two", 37 | }, 38 | "extra_binds": []interface{}{"bind_one", "bind_two"}, 39 | "extra_env": []interface{}{"env_one", "env_two"}, 40 | "fail_swap_on": true, 41 | "generate_serving_certificate": true, 42 | "image": "image", 43 | "infra_container_image": "infra_image", 44 | }, 45 | } 46 | } 47 | 48 | func TestFlattenRKEClusterServicesKubelet(t *testing.T) { 49 | 50 | cases := []struct { 51 | Input rancher.KubeletService 52 | ExpectedOutput []interface{} 53 | }{ 54 | { 55 | testRKEClusterServicesKubeletConf, 56 | testRKEClusterServicesKubeletInterface, 57 | }, 58 | } 59 | 60 | for _, tc := range cases { 61 | output := flattenRKEClusterServicesKubelet(tc.Input) 62 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 63 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 64 | tc.ExpectedOutput, output) 65 | } 66 | } 67 | } 68 | 69 | func TestExpandRKEClusterServicesKubelet(t *testing.T) { 70 | 71 | cases := []struct { 72 | Input []interface{} 73 | ExpectedOutput rancher.KubeletService 74 | }{ 75 | { 76 | testRKEClusterServicesKubeletInterface, 77 | testRKEClusterServicesKubeletConf, 78 | }, 79 | } 80 | 81 | for _, tc := range cases { 82 | output := expandRKEClusterServicesKubelet(tc.Input) 83 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 84 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 85 | tc.ExpectedOutput, output) 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /GNUmakefile: -------------------------------------------------------------------------------- 1 | GOFMT_FILES?=$$(find . -name '*.go' |grep -v vendor) 2 | WEBSITE_REPO=github.com/hashicorp/terraform-website 3 | PKG_NAME=rke 4 | TEST?="./${PKG_NAME}" 5 | PROVIDER_NAME=terraform-provider-rke 6 | 7 | default: build 8 | 9 | dapper-build: .dapper 10 | ./.dapper build 11 | 12 | dapper-ci: .dapper 13 | ./.dapper ci 14 | 15 | dapper-testacc: .dapper 16 | ./.dapper gotestacc.sh 17 | 18 | build: validate 19 | @sh -c "'$(CURDIR)/scripts/gobuild.sh'" 20 | 21 | validate: vet fmtcheck 22 | 23 | package: 24 | @sh -c "'$(CURDIR)/scripts/gopackage.sh'" 25 | 26 | test: fmtcheck 27 | go test $(TEST) || exit 1 28 | echo $(TEST) | \ 29 | xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4 30 | 31 | testacc: 32 | @sh -c "'$(CURDIR)/scripts/gotestacc.sh'" 33 | 34 | .dapper: 35 | @echo Downloading dapper 36 | @curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m` > .dapper.tmp 37 | @@chmod +x .dapper.tmp 38 | @./.dapper.tmp -v 39 | @mv .dapper.tmp .dapper 40 | 41 | vet: 42 | @echo "==> Checking that code complies with go vet requirements..." 43 | @go vet ./... ; if [ $$? -gt 0 ]; then \ 44 | echo ""; \ 45 | echo "Vet found suspicious constructs. Please check the reported constructs"; \ 46 | echo "and fix them if necessary before submitting the code for review."; \ 47 | exit 1; \ 48 | fi 49 | 50 | bin: 51 | go build -o $(PROVIDER_NAME) 52 | 53 | fmt: 54 | gofmt -w -s $(GOFMT_FILES) 55 | 56 | fmtcheck: 57 | @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" 58 | 59 | errcheck: 60 | @sh -c "'$(CURDIR)/scripts/errcheck.sh'" 61 | 62 | vendor-status: 63 | @govendor status 64 | 65 | test-compile: 66 | @if [ "$(TEST)" = "./..." ]; then \ 67 | echo "ERROR: Set TEST to a specific package. For example,"; \ 68 | echo " make test-compile TEST=./$(PKG_NAME)"; \ 69 | exit 1; \ 70 | fi 71 | go test -c $(TEST) $(TESTARGS) 72 | 73 | vendor: 74 | @echo "==> Updating vendor modules..." 75 | @GO111MODULE=on go mod vendor 76 | 77 | website: 78 | ifeq (,$(wildcard $(GOPATH)/src/$(WEBSITE_REPO))) 79 | echo "$(WEBSITE_REPO) not found in your GOPATH (necessary for layouts and assets), get-ting..." 80 | git clone https://$(WEBSITE_REPO) $(GOPATH)/src/$(WEBSITE_REPO) 81 | endif 82 | @$(MAKE) -C $(GOPATH)/src/$(WEBSITE_REPO) website-provider PROVIDER_PATH=$(shell pwd) PROVIDER_NAME=$(PKG_NAME) 83 | 84 | website-test: 85 | ifeq (,$(wildcard $(GOPATH)/src/$(WEBSITE_REPO))) 86 | echo "$(WEBSITE_REPO) not found in your GOPATH (necessary for layouts and assets), get-ting..." 87 | git clone https://$(WEBSITE_REPO) $(GOPATH)/src/$(WEBSITE_REPO) 88 | endif 89 | @$(MAKE) -C $(GOPATH)/src/$(WEBSITE_REPO) website-provider-test PROVIDER_PATH=$(shell pwd) PROVIDER_NAME=$(PKG_NAME) 90 | 91 | .PHONY: bin build test testacc vet fmt fmtcheck errcheck vendor-status test-compile vendor website website-test build-dapper 92 | 93 | 94 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_ingress.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 6 | "k8s.io/api/core/v1" 7 | ) 8 | 9 | const ( 10 | rkeClusterIngressNginx = "nginx" 11 | rkeClusterIngressNone = "none" 12 | rkeClusterIngressNetworkModeHostNetwork = "hostNetwork" 13 | rkeClusterIngressNetworkModeHostPort = "hostPort" 14 | rkeClusterIngressNetworkModeNone = "none" 15 | ) 16 | 17 | var ( 18 | rkeClusterIngressDNSPolicyClusterFirst = string(v1.DNSClusterFirst) 19 | rkeClusterIngressDNSPolicyClusterFirstWithHostNet = string(v1.DNSClusterFirstWithHostNet) 20 | rkeClusterIngressDNSPolicyDefault = string(v1.DNSDefault) 21 | rkeClusterIngressDNSPolicyNone = string(v1.DNSNone) 22 | rkeClusterIngressProviderList = []string{rkeClusterIngressNginx, rkeClusterIngressNone} 23 | rkeClusterIngressDNSPolicyList = []string{ 24 | rkeClusterIngressDNSPolicyClusterFirst, 25 | rkeClusterIngressDNSPolicyClusterFirstWithHostNet, 26 | rkeClusterIngressDNSPolicyDefault, 27 | rkeClusterIngressDNSPolicyNone, 28 | } 29 | rkeClusterIngressNetworkModeList = []string{ 30 | rkeClusterIngressNetworkModeHostNetwork, 31 | rkeClusterIngressNetworkModeHostPort, 32 | rkeClusterIngressNetworkModeNone, 33 | } 34 | ) 35 | 36 | //Schemas 37 | 38 | func rkeClusterIngressFields() map[string]*schema.Schema { 39 | s := map[string]*schema.Schema{ 40 | "dns_policy": { 41 | Type: schema.TypeString, 42 | Optional: true, 43 | ValidateFunc: validation.StringInSlice(rkeClusterIngressDNSPolicyList, true), 44 | Description: "Ingress controller dns policy", 45 | }, 46 | "extra_args": { 47 | Type: schema.TypeMap, 48 | Optional: true, 49 | Description: "Extra arguments for the ingress controller", 50 | }, 51 | "http_port": { 52 | Type: schema.TypeInt, 53 | Optional: true, 54 | Description: "Ingress controller http port", 55 | }, 56 | "https_port": { 57 | Type: schema.TypeInt, 58 | Optional: true, 59 | Description: "Ingress controller https port", 60 | }, 61 | "network_mode": { 62 | Type: schema.TypeString, 63 | Optional: true, 64 | ValidateFunc: validation.StringInSlice(rkeClusterIngressNetworkModeList, true), 65 | Description: "Ingress controller network mode", 66 | }, 67 | "node_selector": { 68 | Type: schema.TypeMap, 69 | Optional: true, 70 | Description: "Node selector key pair", 71 | }, 72 | "options": { 73 | Type: schema.TypeMap, 74 | Optional: true, 75 | Description: "Ingress controller options", 76 | }, 77 | "provider": { 78 | Type: schema.TypeString, 79 | Optional: true, 80 | Default: rkeClusterIngressNginx, 81 | ValidateFunc: validation.StringInSlice(rkeClusterIngressProviderList, true), 82 | Description: "Ingress controller provider", 83 | }, 84 | "default_backend": { 85 | Type: schema.TypeBool, 86 | Optional: true, 87 | Default: true, 88 | Description: "Ingress Default Backend", 89 | }, 90 | } 91 | return s 92 | } 93 | -------------------------------------------------------------------------------- /examples/digitalocean/files/ccm-digitalocean-v0.1.15.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: digitalocean-cloud-controller-manager 6 | namespace: kube-system 7 | spec: 8 | replicas: 1 9 | revisionHistoryLimit: 2 10 | template: 11 | metadata: 12 | labels: 13 | app: digitalocean-cloud-controller-manager 14 | annotations: 15 | scheduler.alpha.kubernetes.io/critical-pod: '' 16 | spec: 17 | dnsPolicy: Default 18 | hostNetwork: true 19 | serviceAccountName: cloud-controller-manager 20 | tolerations: 21 | # this taint is set by all kubelets running `--cloud-provider=external` 22 | # so we should tolerate it to schedule the digitalocean ccm 23 | - key: "node.cloudprovider.kubernetes.io/uninitialized" 24 | value: "true" 25 | effect: "NoSchedule" 26 | - key: "CriticalAddonsOnly" 27 | operator: "Exists" 28 | # cloud controller manages should be able to run on masters 29 | - key: "node-role.kubernetes.io/master" 30 | effect: NoSchedule 31 | containers: 32 | - image: digitalocean/digitalocean-cloud-controller-manager:v0.1.15 33 | name: digitalocean-cloud-controller-manager 34 | command: 35 | - "/bin/digitalocean-cloud-controller-manager" 36 | - "--leader-elect=false" 37 | resources: 38 | requests: 39 | cpu: 100m 40 | memory: 50Mi 41 | env: 42 | - name: DO_ACCESS_TOKEN 43 | valueFrom: 44 | secretKeyRef: 45 | name: digitalocean 46 | key: access-token 47 | 48 | --- 49 | apiVersion: v1 50 | kind: ServiceAccount 51 | metadata: 52 | name: cloud-controller-manager 53 | namespace: kube-system 54 | --- 55 | apiVersion: rbac.authorization.k8s.io/v1 56 | kind: ClusterRole 57 | metadata: 58 | annotations: 59 | rbac.authorization.kubernetes.io/autoupdate: "true" 60 | name: system:cloud-controller-manager 61 | rules: 62 | - apiGroups: 63 | - "" 64 | resources: 65 | - events 66 | verbs: 67 | - create 68 | - patch 69 | - update 70 | - apiGroups: 71 | - "" 72 | resources: 73 | - nodes 74 | verbs: 75 | - '*' 76 | - apiGroups: 77 | - "" 78 | resources: 79 | - nodes/status 80 | verbs: 81 | - patch 82 | - apiGroups: 83 | - "" 84 | resources: 85 | - services 86 | verbs: 87 | - list 88 | - patch 89 | - update 90 | - watch 91 | - apiGroups: 92 | - "" 93 | resources: 94 | - services/status 95 | verbs: 96 | - list 97 | - patch 98 | - update 99 | - watch 100 | - apiGroups: 101 | - "" 102 | resources: 103 | - serviceaccounts 104 | verbs: 105 | - create 106 | - apiGroups: 107 | - "" 108 | resources: 109 | - persistentvolumes 110 | verbs: 111 | - get 112 | - list 113 | - update 114 | - watch 115 | - apiGroups: 116 | - "" 117 | resources: 118 | - endpoints 119 | verbs: 120 | - create 121 | - get 122 | - list 123 | - watch 124 | - update 125 | --- 126 | kind: ClusterRoleBinding 127 | apiVersion: rbac.authorization.k8s.io/v1 128 | metadata: 129 | name: system:cloud-controller-manager 130 | roleRef: 131 | apiGroup: rbac.authorization.k8s.io 132 | kind: ClusterRole 133 | name: system:cloud-controller-manager 134 | subjects: 135 | - kind: ServiceAccount 136 | name: cloud-controller-manager 137 | namespace: kube-system 138 | 139 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_cloud_provider.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 6 | ) 7 | 8 | const ( 9 | rkeClusterCloudProviderCustomName = "custom" 10 | rkeClusterCloudProviderExternalName = "external" 11 | ) 12 | 13 | var ( 14 | rkeClusterCloudProviderList = []string{ 15 | rkeClusterCloudProviderAwsName, 16 | rkeClusterCloudProviderAzureName, 17 | rkeClusterCloudProviderCustomName, 18 | rkeClusterCloudProviderExternalName, 19 | rkeClusterCloudProviderOpenstackName, 20 | rkeClusterCloudProviderVsphereName, 21 | } 22 | ) 23 | 24 | //Schemas 25 | 26 | func rkeClusterCloudProviderFields() map[string]*schema.Schema { 27 | s := map[string]*schema.Schema{ 28 | "name": { 29 | Type: schema.TypeString, 30 | Required: true, 31 | ValidateFunc: validation.StringInSlice(rkeClusterCloudProviderList, true), 32 | }, 33 | "aws_cloud_config": { 34 | Type: schema.TypeList, 35 | MaxItems: 1, 36 | Optional: true, 37 | Deprecated: "Use aws_cloud_provider instead", 38 | Elem: &schema.Resource{ 39 | Schema: rkeClusterCloudProviderAwsFields(), 40 | }, 41 | }, 42 | "aws_cloud_provider": { 43 | Type: schema.TypeList, 44 | MaxItems: 1, 45 | Optional: true, 46 | Description: "AWS Cloud Provider config", 47 | Elem: &schema.Resource{ 48 | Schema: rkeClusterCloudProviderAwsFields(), 49 | }, 50 | }, 51 | "azure_cloud_config": { 52 | Type: schema.TypeList, 53 | MaxItems: 1, 54 | Optional: true, 55 | Deprecated: "Use azure_cloud_provider instead", 56 | Elem: &schema.Resource{ 57 | Schema: rkeClusterCloudProviderAzureFields(), 58 | }, 59 | }, 60 | "azure_cloud_provider": { 61 | Type: schema.TypeList, 62 | MaxItems: 1, 63 | Optional: true, 64 | Description: "Azure Cloud Provider config", 65 | Elem: &schema.Resource{ 66 | Schema: rkeClusterCloudProviderAzureFields(), 67 | }, 68 | }, 69 | "custom_cloud_config": { 70 | Type: schema.TypeString, 71 | Optional: true, 72 | Deprecated: "Use custom_cloud_provider instead", 73 | }, 74 | "custom_cloud_provider": { 75 | Type: schema.TypeString, 76 | Optional: true, 77 | Description: "Custom Cloud Provider config", 78 | }, 79 | "openstack_cloud_config": { 80 | Type: schema.TypeList, 81 | MaxItems: 1, 82 | Optional: true, 83 | Deprecated: "Use openstack_cloud_provider instead", 84 | Elem: &schema.Resource{ 85 | Schema: rkeClusterCloudProviderOpenstackFields(), 86 | }, 87 | }, 88 | "openstack_cloud_provider": { 89 | Type: schema.TypeList, 90 | MaxItems: 1, 91 | Optional: true, 92 | Description: "Openstack Cloud Provider config", 93 | Elem: &schema.Resource{ 94 | Schema: rkeClusterCloudProviderOpenstackFields(), 95 | }, 96 | }, 97 | "vsphere_cloud_config": { 98 | Type: schema.TypeList, 99 | MaxItems: 1, 100 | Optional: true, 101 | Deprecated: "Use vsphere_cloud_provider instead", 102 | Elem: &schema.Resource{ 103 | Schema: rkeClusterCloudProviderVsphereFields(), 104 | }, 105 | }, 106 | "vsphere_cloud_provider": { 107 | Type: schema.TypeList, 108 | MaxItems: 1, 109 | Optional: true, 110 | Description: "Vsphere Cloud Provider config", 111 | Elem: &schema.Resource{ 112 | Schema: rkeClusterCloudProviderVsphereFields(), 113 | }, 114 | }, 115 | } 116 | return s 117 | } 118 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_cloud_provider_aws.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | const ( 8 | rkeClusterCloudProviderAwsName = "aws" 9 | ) 10 | 11 | //Schemas 12 | 13 | func rkeClusterCloudProviderAwsGlobalFields() map[string]*schema.Schema { 14 | s := map[string]*schema.Schema{ 15 | "disable_security_group_ingress": { 16 | Type: schema.TypeBool, 17 | Optional: true, 18 | Default: false, 19 | Description: "Disables the automatic ingress creation", 20 | }, 21 | "disable_strict_zone_check": { 22 | Type: schema.TypeBool, 23 | Optional: true, 24 | Default: false, 25 | Description: "Setting this to true will disable the check and provide a warning that the check was skipped", 26 | }, 27 | "elb_security_group": { 28 | Type: schema.TypeString, 29 | Optional: true, 30 | Description: "Use these ELB security groups instead create new", 31 | }, 32 | "kubernetes_cluster_id": { 33 | Type: schema.TypeString, 34 | Optional: true, 35 | Description: "The cluster id we'll use to identify our cluster resources", 36 | }, 37 | "kubernetes_cluster_tag": { 38 | Type: schema.TypeString, 39 | Optional: true, 40 | Description: "Legacy cluster id we'll use to identify our cluster resources", 41 | }, 42 | "role_arn": { 43 | Type: schema.TypeString, 44 | Optional: true, 45 | Description: "IAM role to assume when interaction with AWS APIs", 46 | }, 47 | "route_table_id": { 48 | Type: schema.TypeString, 49 | Optional: true, 50 | Description: "Enables using a specific RouteTable", 51 | }, 52 | "subnet_id": { 53 | Type: schema.TypeString, 54 | Optional: true, 55 | Description: "Enables using a specific subnet to use for ELB's", 56 | }, 57 | "vpc": { 58 | Type: schema.TypeString, 59 | Optional: true, 60 | Description: "The AWS VPC flag enables the possibility to run the master components on a different aws account, on a different cloud provider or on-premises. If the flag is set also the KubernetesClusterTag must be provided", 61 | }, 62 | "zone": { 63 | Type: schema.TypeString, 64 | Optional: true, 65 | Description: "The AWS zone", 66 | }, 67 | } 68 | return s 69 | } 70 | 71 | func rkeClusterCloudProviderAwsServiceOverrideFields() map[string]*schema.Schema { 72 | s := map[string]*schema.Schema{ 73 | "service": { 74 | Type: schema.TypeString, 75 | Required: true, 76 | }, 77 | "key": { 78 | Type: schema.TypeString, 79 | Optional: true, 80 | Deprecated: "Use service instead", 81 | }, 82 | "region": { 83 | Type: schema.TypeString, 84 | Optional: true, 85 | }, 86 | "signing_method": { 87 | Type: schema.TypeString, 88 | Optional: true, 89 | Computed: true, 90 | }, 91 | "signing_name": { 92 | Type: schema.TypeString, 93 | Optional: true, 94 | }, 95 | "signing_region": { 96 | Type: schema.TypeString, 97 | Optional: true, 98 | }, 99 | "url": { 100 | Type: schema.TypeString, 101 | Optional: true, 102 | }, 103 | } 104 | return s 105 | } 106 | 107 | func rkeClusterCloudProviderAwsFields() map[string]*schema.Schema { 108 | s := map[string]*schema.Schema{ 109 | "global": { 110 | Type: schema.TypeList, 111 | MaxItems: 1, 112 | Optional: true, 113 | Elem: &schema.Resource{ 114 | Schema: rkeClusterCloudProviderAwsGlobalFields(), 115 | }, 116 | }, 117 | "service_override": { 118 | Type: schema.TypeList, 119 | Optional: true, 120 | Elem: &schema.Resource{ 121 | Schema: rkeClusterCloudProviderAwsServiceOverrideFields(), 122 | }, 123 | }, 124 | } 125 | return s 126 | } 127 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_system_images.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterSystemImagesFields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "etcd": { 12 | Type: schema.TypeString, 13 | Optional: true, 14 | }, 15 | "alpine": { 16 | Type: schema.TypeString, 17 | Optional: true, 18 | }, 19 | "nginx_proxy": { 20 | Type: schema.TypeString, 21 | Optional: true, 22 | }, 23 | "cert_downloader": { 24 | Type: schema.TypeString, 25 | Optional: true, 26 | }, 27 | "kubernetes_services_sidecar": { 28 | Type: schema.TypeString, 29 | Optional: true, 30 | }, 31 | "kube_dns": { 32 | Type: schema.TypeString, 33 | Optional: true, 34 | }, 35 | "dnsmasq": { 36 | Type: schema.TypeString, 37 | Optional: true, 38 | }, 39 | "kube_dns_sidecar": { 40 | Type: schema.TypeString, 41 | Optional: true, 42 | }, 43 | "kube_dns_autoscaler": { 44 | Type: schema.TypeString, 45 | Optional: true, 46 | }, 47 | "coredns": { 48 | Type: schema.TypeString, 49 | Optional: true, 50 | }, 51 | "coredns_autoscaler": { 52 | Type: schema.TypeString, 53 | Optional: true, 54 | }, 55 | "nodelocal": { 56 | Type: schema.TypeString, 57 | Optional: true, 58 | }, 59 | "kubernetes": { 60 | Type: schema.TypeString, 61 | Optional: true, 62 | }, 63 | "flannel": { 64 | Type: schema.TypeString, 65 | Optional: true, 66 | }, 67 | "flannel_cni": { 68 | Type: schema.TypeString, 69 | Optional: true, 70 | }, 71 | "calico_node": { 72 | Type: schema.TypeString, 73 | Optional: true, 74 | }, 75 | "calico_cni": { 76 | Type: schema.TypeString, 77 | Optional: true, 78 | }, 79 | "calico_controllers": { 80 | Type: schema.TypeString, 81 | Optional: true, 82 | }, 83 | "calico_ctl": { 84 | Type: schema.TypeString, 85 | Optional: true, 86 | }, 87 | "calico_flex_vol": { 88 | Type: schema.TypeString, 89 | Optional: true, 90 | }, 91 | "canal_node": { 92 | Type: schema.TypeString, 93 | Optional: true, 94 | }, 95 | "canal_cni": { 96 | Type: schema.TypeString, 97 | Optional: true, 98 | }, 99 | "canal_flannel": { 100 | Type: schema.TypeString, 101 | Optional: true, 102 | }, 103 | "canal_flex_vol": { 104 | Type: schema.TypeString, 105 | Optional: true, 106 | }, 107 | "weave_node": { 108 | Type: schema.TypeString, 109 | Optional: true, 110 | }, 111 | "weave_cni": { 112 | Type: schema.TypeString, 113 | Optional: true, 114 | }, 115 | "pod_infra_container": { 116 | Type: schema.TypeString, 117 | Optional: true, 118 | }, 119 | "ingress": { 120 | Type: schema.TypeString, 121 | Optional: true, 122 | }, 123 | "ingress_backend": { 124 | Type: schema.TypeString, 125 | Optional: true, 126 | }, 127 | "metrics_server": { 128 | Type: schema.TypeString, 129 | Optional: true, 130 | }, 131 | "windows_pod_infra_container": { 132 | Type: schema.TypeString, 133 | Optional: true, 134 | }, 135 | "aci_cni_deploy_container": { 136 | Type: schema.TypeString, 137 | Optional: true, 138 | }, 139 | "aci_host_container": { 140 | Type: schema.TypeString, 141 | Optional: true, 142 | }, 143 | "aci_opflex_container": { 144 | Type: schema.TypeString, 145 | Optional: true, 146 | }, 147 | "aci_mcast_container": { 148 | Type: schema.TypeString, 149 | Optional: true, 150 | }, 151 | "aci_ovs_container": { 152 | Type: schema.TypeString, 153 | Optional: true, 154 | }, 155 | "aci_controller_container": { 156 | Type: schema.TypeString, 157 | Optional: true, 158 | }, 159 | } 160 | return s 161 | } 162 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_dns_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterDNSNodelocalConf *rancher.Nodelocal 12 | testRKEClusterDNSNodelocalInterface []interface{} 13 | testRKEClusterDNSConf *rancher.DNSConfig 14 | testRKEClusterDNSInterface []interface{} 15 | ) 16 | 17 | func init() { 18 | testRKEClusterDNSNodelocalConf = &rancher.Nodelocal{ 19 | NodeSelector: map[string]string{ 20 | "sel1": "value1", 21 | "sel2": "value2", 22 | }, 23 | IPAddress: "ip_address", 24 | } 25 | testRKEClusterDNSNodelocalInterface = []interface{}{ 26 | map[string]interface{}{ 27 | "node_selector": map[string]interface{}{ 28 | "sel1": "value1", 29 | "sel2": "value2", 30 | }, 31 | "ip_address": "ip_address", 32 | }, 33 | } 34 | testRKEClusterDNSConf = &rancher.DNSConfig{ 35 | Nodelocal: testRKEClusterDNSNodelocalConf, 36 | NodeSelector: map[string]string{ 37 | "sel1": "value1", 38 | "sel2": "value2", 39 | }, 40 | Provider: "kube-dns", 41 | ReverseCIDRs: []string{"rev1", "rev2"}, 42 | UpstreamNameservers: []string{"up1", "up2"}, 43 | } 44 | testRKEClusterDNSInterface = []interface{}{ 45 | map[string]interface{}{ 46 | "nodelocal": testRKEClusterDNSNodelocalInterface, 47 | "node_selector": map[string]interface{}{ 48 | "sel1": "value1", 49 | "sel2": "value2", 50 | }, 51 | "provider": "kube-dns", 52 | "reverse_cidrs": []interface{}{"rev1", "rev2"}, 53 | "upstream_nameservers": []interface{}{"up1", "up2"}, 54 | }, 55 | } 56 | } 57 | 58 | func TestFlattenRKEClusterDNSNodelocal(t *testing.T) { 59 | 60 | cases := []struct { 61 | Input *rancher.Nodelocal 62 | ExpectedOutput []interface{} 63 | }{ 64 | { 65 | testRKEClusterDNSNodelocalConf, 66 | testRKEClusterDNSNodelocalInterface, 67 | }, 68 | } 69 | 70 | for _, tc := range cases { 71 | output := flattenRKEClusterDNSNodelocal(tc.Input) 72 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 73 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 74 | tc.ExpectedOutput, output) 75 | } 76 | } 77 | } 78 | 79 | func TestFlattenRKEClusterDNS(t *testing.T) { 80 | 81 | cases := []struct { 82 | Input *rancher.DNSConfig 83 | ExpectedOutput []interface{} 84 | }{ 85 | { 86 | testRKEClusterDNSConf, 87 | testRKEClusterDNSInterface, 88 | }, 89 | } 90 | 91 | for _, tc := range cases { 92 | output := flattenRKEClusterDNS(tc.Input) 93 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 94 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 95 | tc.ExpectedOutput, output) 96 | } 97 | } 98 | } 99 | 100 | func TestExpandRKEClusterDNSNodelocal(t *testing.T) { 101 | 102 | cases := []struct { 103 | Input []interface{} 104 | ExpectedOutput *rancher.Nodelocal 105 | }{ 106 | { 107 | testRKEClusterDNSNodelocalInterface, 108 | testRKEClusterDNSNodelocalConf, 109 | }, 110 | } 111 | 112 | for _, tc := range cases { 113 | output := expandRKEClusterDNSNodelocal(tc.Input) 114 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 115 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 116 | tc.ExpectedOutput, output) 117 | } 118 | } 119 | } 120 | 121 | func TestExpandRKEClusterDNS(t *testing.T) { 122 | 123 | cases := []struct { 124 | Input []interface{} 125 | ExpectedOutput *rancher.DNSConfig 126 | }{ 127 | { 128 | testRKEClusterDNSInterface, 129 | testRKEClusterDNSConf, 130 | }, 131 | } 132 | 133 | for _, tc := range cases { 134 | output := expandRKEClusterDNS(tc.Input) 135 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 136 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 137 | tc.ExpectedOutput, output) 138 | } 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /docs/guides/upgrade_to_0.13.md: -------------------------------------------------------------------------------- 1 | --- 2 | page_title: "Upgrade to terraform 0.13" 3 | --- 4 | 5 | # Upgrade to terraform 0.13 6 | 7 | RKE provider is already published at [rke terraform registry](https://registry.terraform.io/providers/rancher/rke) as verified provider. That means that the provider is compatible with terraform 0.13 and is automatically installed by it. As RKE provider had to be manually installated on terraform prior to 0.13, [in-house step](https://www.terraform.io/upgrade-guides/0-13.html#in-house-providers) is required on the terraform upgrade process. 8 | 9 | ## Steps 10 | 11 | These are the steps to properly update RKE provider to tf 0.13: 12 | 13 | 1. Before start, be sure that `terraform apply` doesn't show any diff 14 | 2. Update `version.tf` file adding provider definition with required version 15 | 16 | ``` 17 | terraform { 18 | required_providers { 19 | ... 20 | rke = { 21 | source = "rancher/rke" 22 | version = "1.1.0" 23 | } 24 | } 25 | ... 26 | } 27 | ``` 28 | 29 | 3. Execute `terraform 0.13upgrade` 30 | 31 | ``` 32 | $ terraform 0.13upgrade 33 | 34 | This command will update the configuration files in the given directory to use 35 | the new provider source features from Terraform v0.13. It will also highlight 36 | any providers for which the source cannot be detected, and advise how to 37 | proceed. 38 | 39 | We recommend using this command in a clean version control work tree, so that 40 | you can easily see the proposed changes as a diff against the latest commit. 41 | If you have uncommited changes already present, we recommend aborting this 42 | command and dealing with them before running this command again. 43 | 44 | Would you like to upgrade the module in the current directory? 45 | Only 'yes' will be accepted to confirm. 46 | 47 | Enter a value: yes 48 | 49 | ----------------------------------------------------------------------------- 50 | 51 | Upgrade complete! 52 | 53 | Use your version control system to review the proposed changes, make any 54 | necessary adjustments, and then commit. 55 | ``` 56 | 57 | 4. Replace previous in house provider definition on tfstate. tfstate will be updated, backup before proceed is recommended. [More info](https://www.terraform.io/upgrade-guides/0-13.html#in-house-providers) 58 | 59 | ``` 60 | $ terraform state replace-provider 'registry.terraform.io/-/rke' 'registry.terraform.io/rancher/rke' 61 | Terraform will perform the following actions: 62 | 63 | ~ Updating provider: 64 | - registry.terraform.io/-/rke 65 | + registry.terraform.io/rancher/rke 66 | 67 | Changing 1 resources: 68 | 69 | rke_cluster.cluster 70 | 71 | Do you want to make these changes? 72 | Only 'yes' will be accepted to continue. 73 | 74 | Enter a value: yes 75 | 76 | Successfully replaced provider for 1 resources. 77 | ``` 78 | 79 | 5. Init the provider 80 | 81 | ``` 82 | $ terraform init 83 | 84 | Initializing the backend... 85 | 86 | Initializing provider plugins... 87 | - Finding rancher/rke versions matching "1.1.0"... 88 | - Installing rancher/rke v1.1.0... 89 | - Installed rancher/rke v1.1.0 (signed by a HashiCorp partner, key ID 2EEB0F9AD44A135C) 90 | 91 | Partner and community providers are signed by their developers. 92 | If you'd like to know more about provider signing, you can read about it here: 93 | https://www.terraform.io/docs/plugins/signing.html 94 | 95 | Terraform has been successfully initialized! 96 | 97 | You may now begin working with Terraform. Try running "terraform plan" to see 98 | any changes that are required for your infrastructure. All Terraform commands 99 | should now work. 100 | 101 | If you ever set or change modules or backend configuration for Terraform, 102 | rerun this command to reinitialize your working directory. If you forget, other 103 | commands will detect it and remind you to do so if necessary. 104 | ``` 105 | 106 | More info at [terraform upgrade guide to 0.13](https://www.terraform.io/upgrade-guides/0-13.html) 107 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_services_etcd.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | //Schemas 8 | 9 | func rkeClusterServicesEtcdBackupConfigS3Fields() map[string]*schema.Schema { 10 | s := map[string]*schema.Schema{ 11 | "access_key": { 12 | Type: schema.TypeString, 13 | Optional: true, 14 | Sensitive: true, 15 | }, 16 | "bucket_name": { 17 | Type: schema.TypeString, 18 | Optional: true, 19 | }, 20 | "custom_ca": { 21 | Type: schema.TypeString, 22 | Optional: true, 23 | }, 24 | "endpoint": { 25 | Type: schema.TypeString, 26 | Optional: true, 27 | }, 28 | "folder": { 29 | Type: schema.TypeString, 30 | Optional: true, 31 | }, 32 | "region": { 33 | Type: schema.TypeString, 34 | Optional: true, 35 | }, 36 | "secret_key": { 37 | Type: schema.TypeString, 38 | Optional: true, 39 | Sensitive: true, 40 | }, 41 | } 42 | return s 43 | } 44 | 45 | func rkeClusterServicesEtcdBackupConfigFields() map[string]*schema.Schema { 46 | s := map[string]*schema.Schema{ 47 | "enabled": { 48 | Type: schema.TypeBool, 49 | Optional: true, 50 | Default: true, 51 | }, 52 | "interval_hours": { 53 | Type: schema.TypeInt, 54 | Optional: true, 55 | Default: 12, 56 | }, 57 | "retention": { 58 | Type: schema.TypeInt, 59 | Optional: true, 60 | Default: 6, 61 | }, 62 | "s3_backup_config": { 63 | Type: schema.TypeList, 64 | MaxItems: 1, 65 | Optional: true, 66 | Elem: &schema.Resource{ 67 | Schema: rkeClusterServicesEtcdBackupConfigS3Fields(), 68 | }, 69 | }, 70 | "safe_timestamp": { 71 | Type: schema.TypeBool, 72 | Optional: true, 73 | Default: false, 74 | }, 75 | "timeout": { 76 | Type: schema.TypeInt, 77 | Optional: true, 78 | Default: 300, 79 | }, 80 | } 81 | return s 82 | } 83 | 84 | func rkeClusterServicesEtcdFields() map[string]*schema.Schema { 85 | s := map[string]*schema.Schema{ 86 | "backup_config": { 87 | Type: schema.TypeList, 88 | MaxItems: 1, 89 | Optional: true, 90 | Computed: true, 91 | Elem: &schema.Resource{ 92 | Schema: rkeClusterServicesEtcdBackupConfigFields(), 93 | }, 94 | }, 95 | "ca_cert": { 96 | Type: schema.TypeString, 97 | Optional: true, 98 | Computed: true, 99 | Sensitive: true, 100 | }, 101 | "cert": { 102 | Type: schema.TypeString, 103 | Optional: true, 104 | Computed: true, 105 | Sensitive: true, 106 | }, 107 | "creation": { 108 | Type: schema.TypeString, 109 | Optional: true, 110 | Computed: true, 111 | }, 112 | "external_urls": { 113 | Type: schema.TypeList, 114 | Optional: true, 115 | Computed: true, 116 | Elem: &schema.Schema{ 117 | Type: schema.TypeString, 118 | }, 119 | }, 120 | "extra_args": { 121 | Type: schema.TypeMap, 122 | Optional: true, 123 | Computed: true, 124 | }, 125 | "extra_binds": { 126 | Type: schema.TypeList, 127 | Optional: true, 128 | Computed: true, 129 | Elem: &schema.Schema{ 130 | Type: schema.TypeString, 131 | }, 132 | }, 133 | "extra_env": { 134 | Type: schema.TypeList, 135 | Optional: true, 136 | Computed: true, 137 | Elem: &schema.Schema{ 138 | Type: schema.TypeString, 139 | }, 140 | }, 141 | "gid": { 142 | Type: schema.TypeInt, 143 | Optional: true, 144 | Default: 0, 145 | }, 146 | "image": { 147 | Type: schema.TypeString, 148 | Optional: true, 149 | Computed: true, 150 | }, 151 | "key": { 152 | Type: schema.TypeString, 153 | Optional: true, 154 | Computed: true, 155 | Sensitive: true, 156 | }, 157 | "path": { 158 | Type: schema.TypeString, 159 | Optional: true, 160 | Computed: true, 161 | }, 162 | "retention": { 163 | Type: schema.TypeString, 164 | Optional: true, 165 | Computed: true, 166 | }, 167 | "snapshot": { 168 | Type: schema.TypeBool, 169 | Optional: true, 170 | Default: true, 171 | }, 172 | "uid": { 173 | Type: schema.TypeInt, 174 | Optional: true, 175 | Default: 0, 176 | }, 177 | } 178 | return s 179 | } 180 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_cloud_provider_z_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterCloudProviderConfAzure rancher.CloudProvider 12 | testRKEClusterCloudProviderInterfaceAzure []interface{} 13 | testRKEClusterCloudProviderConfOpenstack rancher.CloudProvider 14 | testRKEClusterCloudProviderInterfaceOpenstack []interface{} 15 | testRKEClusterCloudProviderConfVsphere rancher.CloudProvider 16 | testRKEClusterCloudProviderInterfaceVsphere []interface{} 17 | testRKEClusterCloudProviderConf rancher.CloudProvider 18 | testRKEClusterCloudProviderInterface []interface{} 19 | ) 20 | 21 | func init() { 22 | testRKEClusterCloudProviderConfAzure = rancher.CloudProvider{ 23 | AzureCloudProvider: testRKEClusterCloudProviderAzureConf, 24 | Name: "azure-test", 25 | } 26 | testRKEClusterCloudProviderInterfaceAzure = []interface{}{ 27 | map[string]interface{}{ 28 | "azure_cloud_provider": testRKEClusterCloudProviderAzureInterface, 29 | "name": "azure-test", 30 | }, 31 | } 32 | testRKEClusterCloudProviderConfOpenstack = rancher.CloudProvider{ 33 | Name: "openstack-test", 34 | OpenstackCloudProvider: testRKEClusterCloudProviderOpenstackConf, 35 | } 36 | testRKEClusterCloudProviderInterfaceOpenstack = []interface{}{ 37 | map[string]interface{}{ 38 | "name": "openstack-test", 39 | "openstack_cloud_provider": testRKEClusterCloudProviderOpenstackInterface, 40 | }, 41 | } 42 | testRKEClusterCloudProviderConfVsphere = rancher.CloudProvider{ 43 | Name: "vsphere-test", 44 | VsphereCloudProvider: testRKEClusterCloudProviderVsphereConf, 45 | } 46 | testRKEClusterCloudProviderInterfaceVsphere = []interface{}{ 47 | map[string]interface{}{ 48 | "name": "vsphere-test", 49 | "vsphere_cloud_provider": testRKEClusterCloudProviderVsphereInterface, 50 | }, 51 | } 52 | testRKEClusterCloudProviderConf = rancher.CloudProvider{ 53 | CustomCloudProvider: "XXXXXXXXXXXX", 54 | Name: "test", 55 | } 56 | testRKEClusterCloudProviderInterface = []interface{}{ 57 | map[string]interface{}{ 58 | "custom_cloud_provider": "XXXXXXXXXXXX", 59 | "name": "test", 60 | }, 61 | } 62 | } 63 | 64 | func TestFlattenRKEClusterCloudProvider(t *testing.T) { 65 | 66 | cases := []struct { 67 | Input rancher.CloudProvider 68 | ExpectedOutput []interface{} 69 | }{ 70 | { 71 | testRKEClusterCloudProviderConfAzure, 72 | testRKEClusterCloudProviderInterfaceAzure, 73 | }, 74 | { 75 | testRKEClusterCloudProviderConfOpenstack, 76 | testRKEClusterCloudProviderInterfaceOpenstack, 77 | }, 78 | { 79 | testRKEClusterCloudProviderConfVsphere, 80 | testRKEClusterCloudProviderInterfaceVsphere, 81 | }, 82 | { 83 | testRKEClusterCloudProviderConf, 84 | testRKEClusterCloudProviderInterface, 85 | }, 86 | } 87 | 88 | for _, tc := range cases { 89 | output := flattenRKEClusterCloudProvider(tc.Input, tc.ExpectedOutput) 90 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 91 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 92 | tc.ExpectedOutput, output) 93 | } 94 | } 95 | } 96 | 97 | func TestExpandRKEClusterCloudProvider(t *testing.T) { 98 | 99 | cases := []struct { 100 | Input []interface{} 101 | ExpectedOutput rancher.CloudProvider 102 | }{ 103 | { 104 | testRKEClusterCloudProviderInterfaceAzure, 105 | testRKEClusterCloudProviderConfAzure, 106 | }, 107 | { 108 | testRKEClusterCloudProviderInterfaceOpenstack, 109 | testRKEClusterCloudProviderConfOpenstack, 110 | }, 111 | { 112 | testRKEClusterCloudProviderInterfaceVsphere, 113 | testRKEClusterCloudProviderConfVsphere, 114 | }, 115 | { 116 | testRKEClusterCloudProviderInterface, 117 | testRKEClusterCloudProviderConf, 118 | }, 119 | } 120 | 121 | for _, tc := range cases { 122 | output := expandRKEClusterCloudProvider(tc.Input) 123 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 124 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 125 | tc.ExpectedOutput, output) 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_cloud_provider_azure_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterCloudProviderAzureConf *rancher.AzureCloudProvider 12 | testRKEClusterCloudProviderAzureInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterCloudProviderAzureConf = &rancher.AzureCloudProvider{ 17 | AADClientID: "XXXXXXXX", 18 | AADClientSecret: "XXXXXXXXXXXX", 19 | SubscriptionID: "YYYYYYYY", 20 | TenantID: "ZZZZZZZZ", 21 | AADClientCertPassword: "password", 22 | AADClientCertPath: "/home/user/.ssh", 23 | Cloud: "cloud", 24 | CloudProviderBackoff: true, 25 | CloudProviderBackoffDuration: 30, 26 | CloudProviderBackoffExponent: 20, 27 | CloudProviderBackoffJitter: 10, 28 | CloudProviderBackoffRetries: 5, 29 | CloudProviderRateLimit: true, 30 | CloudProviderRateLimitBucket: 15, 31 | CloudProviderRateLimitQPS: 100, 32 | Location: "location", 33 | MaximumLoadBalancerRuleCount: 150, 34 | PrimaryAvailabilitySetName: "primary", 35 | PrimaryScaleSetName: "primary_scale", 36 | ResourceGroup: "resource_group", 37 | RouteTableName: "route_table_name", 38 | SecurityGroupName: "security_group_name", 39 | SubnetName: "subnet_name", 40 | UseInstanceMetadata: true, 41 | UseManagedIdentityExtension: true, 42 | VMType: "vm_type", 43 | VnetName: "vnet_name", 44 | VnetResourceGroup: "vnet_resource_group", 45 | } 46 | testRKEClusterCloudProviderAzureInterface = []interface{}{ 47 | map[string]interface{}{ 48 | "aad_client_id": "XXXXXXXX", 49 | "aad_client_secret": "XXXXXXXXXXXX", 50 | "subscription_id": "YYYYYYYY", 51 | "tenant_id": "ZZZZZZZZ", 52 | "aad_client_cert_password": "password", 53 | "aad_client_cert_path": "/home/user/.ssh", 54 | "cloud": "cloud", 55 | "cloud_provider_backoff": true, 56 | "cloud_provider_backoff_duration": 30, 57 | "cloud_provider_backoff_exponent": 20, 58 | "cloud_provider_backoff_jitter": 10, 59 | "cloud_provider_backoff_retries": 5, 60 | "cloud_provider_rate_limit": true, 61 | "cloud_provider_rate_limit_bucket": 15, 62 | "cloud_provider_rate_limit_qps": 100, 63 | "location": "location", 64 | "maximum_load_balancer_rule_count": 150, 65 | "primary_availability_set_name": "primary", 66 | "primary_scale_set_name": "primary_scale", 67 | "resource_group": "resource_group", 68 | "route_table_name": "route_table_name", 69 | "security_group_name": "security_group_name", 70 | "subnet_name": "subnet_name", 71 | "use_instance_metadata": true, 72 | "use_managed_identity_extension": true, 73 | "vm_type": "vm_type", 74 | "vnet_name": "vnet_name", 75 | "vnet_resource_group": "vnet_resource_group", 76 | }, 77 | } 78 | } 79 | 80 | func TestFlattenRKEClusterCloudProviderAzure(t *testing.T) { 81 | 82 | cases := []struct { 83 | Input *rancher.AzureCloudProvider 84 | ExpectedOutput []interface{} 85 | }{ 86 | { 87 | testRKEClusterCloudProviderAzureConf, 88 | testRKEClusterCloudProviderAzureInterface, 89 | }, 90 | } 91 | 92 | for _, tc := range cases { 93 | output := flattenRKEClusterCloudProviderAzure(tc.Input, testRKEClusterCloudProviderAzureInterface) 94 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 95 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 96 | tc.ExpectedOutput, output) 97 | } 98 | } 99 | } 100 | 101 | func TestExpandRKEClusterCloudProviderAzure(t *testing.T) { 102 | 103 | cases := []struct { 104 | Input []interface{} 105 | ExpectedOutput *rancher.AzureCloudProvider 106 | }{ 107 | { 108 | testRKEClusterCloudProviderAzureInterface, 109 | testRKEClusterCloudProviderAzureConf, 110 | }, 111 | } 112 | 113 | for _, tc := range cases { 114 | output := expandRKEClusterCloudProviderAzure(tc.Input) 115 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 116 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 117 | tc.ExpectedOutput, output) 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_cloud_provider_vsphere.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | const ( 8 | rkeClusterCloudProviderVsphereName = "vsphere" 9 | ) 10 | 11 | //Schemas 12 | 13 | func rkeClusterCloudProviderVsphereDiskFields() map[string]*schema.Schema { 14 | s := map[string]*schema.Schema{ 15 | "scsi_controller_type": { 16 | Type: schema.TypeString, 17 | Optional: true, 18 | }, 19 | } 20 | return s 21 | } 22 | 23 | func rkeClusterCloudProviderVsphereGlobalFields() map[string]*schema.Schema { 24 | s := map[string]*schema.Schema{ 25 | "datacenter": { 26 | Type: schema.TypeString, 27 | Optional: true, 28 | }, 29 | "datacenters": { 30 | Type: schema.TypeString, 31 | Optional: true, 32 | Computed: true, 33 | }, 34 | "datastore": { 35 | Type: schema.TypeString, 36 | Optional: true, 37 | }, 38 | "insecure_flag": { 39 | Type: schema.TypeBool, 40 | Optional: true, 41 | }, 42 | "password": { 43 | Type: schema.TypeString, 44 | Optional: true, 45 | Sensitive: true, 46 | }, 47 | "user": { 48 | Type: schema.TypeString, 49 | Optional: true, 50 | Sensitive: true, 51 | }, 52 | "port": { 53 | Type: schema.TypeString, 54 | Optional: true, 55 | }, 56 | "soap_roundtrip_count": { 57 | Type: schema.TypeInt, 58 | Optional: true, 59 | }, 60 | "working_dir": { 61 | Type: schema.TypeString, 62 | Optional: true, 63 | }, 64 | "vm_uuid": { 65 | Type: schema.TypeString, 66 | Optional: true, 67 | }, 68 | "vm_name": { 69 | Type: schema.TypeString, 70 | Optional: true, 71 | }, 72 | } 73 | return s 74 | } 75 | 76 | func rkeClusterCloudProviderVsphereNetworkFields() map[string]*schema.Schema { 77 | s := map[string]*schema.Schema{ 78 | "public_network": { 79 | Type: schema.TypeString, 80 | Optional: true, 81 | }, 82 | } 83 | return s 84 | } 85 | 86 | func rkeClusterCloudProviderVsphereVirtualCenterFields() map[string]*schema.Schema { 87 | s := map[string]*schema.Schema{ 88 | "datacenters": { 89 | Type: schema.TypeString, 90 | Required: true, 91 | }, 92 | "name": { // called server on original 93 | Type: schema.TypeString, 94 | Required: true, 95 | }, 96 | "password": { 97 | Type: schema.TypeString, 98 | Required: true, 99 | Sensitive: true, 100 | }, 101 | "user": { 102 | Type: schema.TypeString, 103 | Required: true, 104 | Sensitive: true, 105 | }, 106 | "port": { 107 | Type: schema.TypeString, 108 | Optional: true, 109 | }, 110 | "soap_roundtrip_count": { 111 | Type: schema.TypeInt, 112 | Optional: true, 113 | }, 114 | } 115 | return s 116 | } 117 | 118 | func rkeClusterCloudProviderVsphereWorkspaceFields() map[string]*schema.Schema { 119 | s := map[string]*schema.Schema{ 120 | "datacenter": { 121 | Type: schema.TypeString, 122 | Required: true, 123 | }, 124 | "server": { 125 | Type: schema.TypeString, 126 | Required: true, 127 | }, 128 | "default_datastore": { 129 | Type: schema.TypeString, 130 | Optional: true, 131 | }, 132 | "folder": { 133 | Type: schema.TypeString, 134 | Optional: true, 135 | }, 136 | "resourcepool_path": { 137 | Type: schema.TypeString, 138 | Optional: true, 139 | }, 140 | } 141 | return s 142 | } 143 | 144 | func rkeClusterCloudProviderVsphereFields() map[string]*schema.Schema { 145 | s := map[string]*schema.Schema{ 146 | "virtual_center": { 147 | Type: schema.TypeList, 148 | Required: true, 149 | Elem: &schema.Resource{ 150 | Schema: rkeClusterCloudProviderVsphereVirtualCenterFields(), 151 | }, 152 | }, 153 | "workspace": { 154 | Type: schema.TypeList, 155 | MaxItems: 1, 156 | Required: true, 157 | Elem: &schema.Resource{ 158 | Schema: rkeClusterCloudProviderVsphereWorkspaceFields(), 159 | }, 160 | }, 161 | "disk": { 162 | Type: schema.TypeList, 163 | MaxItems: 1, 164 | Optional: true, 165 | Computed: true, 166 | Elem: &schema.Resource{ 167 | Schema: rkeClusterCloudProviderVsphereDiskFields(), 168 | }, 169 | }, 170 | "global": { 171 | Type: schema.TypeList, 172 | MaxItems: 1, 173 | Optional: true, 174 | Computed: true, 175 | Elem: &schema.Resource{ 176 | Schema: rkeClusterCloudProviderVsphereGlobalFields(), 177 | }, 178 | }, 179 | "network": { 180 | Type: schema.TypeList, 181 | MaxItems: 1, 182 | Optional: true, 183 | Computed: true, 184 | Elem: &schema.Resource{ 185 | Schema: rkeClusterCloudProviderVsphereNetworkFields(), 186 | }, 187 | }, 188 | } 189 | return s 190 | } 191 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_cloud_provider_openstack.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | ) 6 | 7 | const ( 8 | rkeClusterCloudProviderOpenstackName = "openstack" 9 | ) 10 | 11 | //Schemas 12 | 13 | func rkeClusterCloudProviderOpenstackBlockStorageFields() map[string]*schema.Schema { 14 | s := map[string]*schema.Schema{ 15 | "bs_version": { 16 | Type: schema.TypeString, 17 | Optional: true, 18 | }, 19 | "ignore_volume_az": { 20 | Type: schema.TypeBool, 21 | Optional: true, 22 | }, 23 | "trust_device_path": { 24 | Type: schema.TypeBool, 25 | Optional: true, 26 | }, 27 | } 28 | 29 | return s 30 | } 31 | 32 | func rkeClusterCloudProviderOpenstackGlobalFields() map[string]*schema.Schema { 33 | s := map[string]*schema.Schema{ 34 | "auth_url": { 35 | Type: schema.TypeString, 36 | Required: true, 37 | }, 38 | "password": { 39 | Type: schema.TypeString, 40 | Required: true, 41 | Sensitive: true, 42 | }, 43 | "ca_file": { 44 | Type: schema.TypeString, 45 | Optional: true, 46 | }, 47 | "domain_id": { 48 | Type: schema.TypeString, 49 | Optional: true, 50 | Sensitive: true, 51 | }, 52 | "domain_name": { 53 | Type: schema.TypeString, 54 | Optional: true, 55 | }, 56 | "region": { 57 | Type: schema.TypeString, 58 | Optional: true, 59 | }, 60 | "tenant_id": { 61 | Type: schema.TypeString, 62 | Optional: true, 63 | Sensitive: true, 64 | }, 65 | "tenant_name": { 66 | Type: schema.TypeString, 67 | Optional: true, 68 | }, 69 | "trust_id": { 70 | Type: schema.TypeString, 71 | Optional: true, 72 | Sensitive: true, 73 | }, 74 | "username": { 75 | Type: schema.TypeString, 76 | Optional: true, 77 | }, 78 | "user_id": { 79 | Type: schema.TypeString, 80 | Optional: true, 81 | Sensitive: true, 82 | }, 83 | } 84 | return s 85 | } 86 | 87 | func rkeClusterCloudProviderOpenstackLoadBalancerFields() map[string]*schema.Schema { 88 | s := map[string]*schema.Schema{ 89 | "create_monitor": { 90 | Type: schema.TypeBool, 91 | Optional: true, 92 | }, 93 | "floating_network_id": { 94 | Type: schema.TypeString, 95 | Optional: true, 96 | }, 97 | "lb_method": { 98 | Type: schema.TypeString, 99 | Optional: true, 100 | }, 101 | "lb_provider": { 102 | Type: schema.TypeString, 103 | Optional: true, 104 | }, 105 | "lb_version": { 106 | Type: schema.TypeString, 107 | Optional: true, 108 | }, 109 | "manage_security_groups": { 110 | Type: schema.TypeBool, 111 | Optional: true, 112 | }, 113 | "monitor_delay": { 114 | Type: schema.TypeString, 115 | Optional: true, 116 | }, 117 | "monitor_max_retries": { 118 | Type: schema.TypeInt, 119 | Optional: true, 120 | }, 121 | "monitor_timeout": { 122 | Type: schema.TypeString, 123 | Optional: true, 124 | }, 125 | "subnet_id": { 126 | Type: schema.TypeString, 127 | Optional: true, 128 | }, 129 | "use_octavia": { 130 | Type: schema.TypeBool, 131 | Optional: true, 132 | }, 133 | } 134 | return s 135 | } 136 | 137 | func rkeClusterCloudProviderOpenstackMetadataFields() map[string]*schema.Schema { 138 | s := map[string]*schema.Schema{ 139 | "request_timeout": { 140 | Type: schema.TypeInt, 141 | Optional: true, 142 | }, 143 | "search_order": { 144 | Type: schema.TypeString, 145 | Optional: true, 146 | }, 147 | } 148 | return s 149 | } 150 | 151 | func rkeClusterCloudProviderOpenstackRouteFields() map[string]*schema.Schema { 152 | s := map[string]*schema.Schema{ 153 | "router_id": { 154 | Type: schema.TypeString, 155 | Optional: true, 156 | }, 157 | } 158 | return s 159 | } 160 | 161 | func rkeClusterCloudProviderOpenstackFields() map[string]*schema.Schema { 162 | s := map[string]*schema.Schema{ 163 | "global": { 164 | Type: schema.TypeList, 165 | MaxItems: 1, 166 | Required: true, 167 | Elem: &schema.Resource{ 168 | Schema: rkeClusterCloudProviderOpenstackGlobalFields(), 169 | }, 170 | }, 171 | "block_storage": { 172 | Type: schema.TypeList, 173 | MaxItems: 1, 174 | Optional: true, 175 | Computed: true, 176 | Elem: &schema.Resource{ 177 | Schema: rkeClusterCloudProviderOpenstackBlockStorageFields(), 178 | }, 179 | }, 180 | "load_balancer": { 181 | Type: schema.TypeList, 182 | MaxItems: 1, 183 | Optional: true, 184 | Computed: true, 185 | Elem: &schema.Resource{ 186 | Schema: rkeClusterCloudProviderOpenstackLoadBalancerFields(), 187 | }, 188 | }, 189 | "metadata": { 190 | Type: schema.TypeList, 191 | MaxItems: 1, 192 | Optional: true, 193 | Computed: true, 194 | Elem: &schema.Resource{ 195 | Schema: rkeClusterCloudProviderOpenstackMetadataFields(), 196 | }, 197 | }, 198 | "route": { 199 | Type: schema.TypeList, 200 | MaxItems: 1, 201 | Optional: true, 202 | Computed: true, 203 | Elem: &schema.Resource{ 204 | Schema: rkeClusterCloudProviderOpenstackRouteFields(), 205 | }, 206 | }, 207 | } 208 | return s 209 | } 210 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_node.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 6 | ) 7 | 8 | var ( 9 | rkeClusterNodesRoles = []string{"controlplane", "etcd", "worker"} 10 | ) 11 | 12 | //Schemas 13 | 14 | func rkeClusterNodeFields() map[string]*schema.Schema { 15 | s := map[string]*schema.Schema{ 16 | "address": { 17 | Type: schema.TypeString, 18 | Required: true, 19 | Description: "IP or FQDN that is fully resolvable and used for SSH communication", 20 | }, 21 | "role": { 22 | Type: schema.TypeList, 23 | Required: true, 24 | Description: "Node roles in k8s cluster [controlplane/worker/etcd])", 25 | Elem: &schema.Schema{ 26 | Type: schema.TypeString, 27 | ValidateFunc: validation.StringInSlice(rkeClusterNodesRoles, true), 28 | }, 29 | }, 30 | "roles": { 31 | Type: schema.TypeString, 32 | Optional: true, 33 | Deprecated: "Use role instead", 34 | Description: "Node role in kubernetes cluster [controlplane/worker/etcd], specified by a comma-separated string", 35 | ValidateFunc: validation.StringInSlice(rkeClusterNodesRoles, true), 36 | }, 37 | "user": { 38 | Type: schema.TypeString, 39 | Required: true, 40 | Sensitive: true, 41 | Description: "SSH user that will be used by RKE", 42 | }, 43 | "docker_socket": { 44 | Type: schema.TypeString, 45 | Optional: true, 46 | Description: "Docker socket on the node that will be used in tunneling", 47 | }, 48 | "hostname_override": { 49 | Type: schema.TypeString, 50 | Optional: true, 51 | Description: "Hostname override", 52 | }, 53 | "internal_address": { 54 | Type: schema.TypeString, 55 | Optional: true, 56 | Description: "Internal address that will be used for components communication", 57 | }, 58 | "labels": { 59 | Type: schema.TypeMap, 60 | Optional: true, 61 | Description: "Node Labels", 62 | }, 63 | "node_name": { 64 | Type: schema.TypeString, 65 | Optional: true, 66 | Description: "Name of the host provisioned via docker machine", 67 | }, 68 | "port": { 69 | Type: schema.TypeString, 70 | Optional: true, 71 | Description: "Port used for SSH communication", 72 | }, 73 | "ssh_agent_auth": { 74 | Type: schema.TypeBool, 75 | Optional: true, 76 | Computed: true, 77 | Description: "SSH Agent Auth enable", 78 | }, 79 | "ssh_cert": { 80 | Type: schema.TypeString, 81 | Optional: true, 82 | Sensitive: true, 83 | Description: "SSH Certificate", 84 | }, 85 | "ssh_cert_path": { 86 | Type: schema.TypeString, 87 | Optional: true, 88 | Description: "SSH Certificate path", 89 | }, 90 | "ssh_key": { 91 | Type: schema.TypeString, 92 | Optional: true, 93 | Sensitive: true, 94 | Description: "SSH Private Key", 95 | }, 96 | "ssh_key_path": { 97 | Type: schema.TypeString, 98 | Optional: true, 99 | Description: "SSH Private Key path", 100 | }, 101 | "taints": { 102 | Type: schema.TypeList, 103 | Optional: true, 104 | Description: "Node taints", 105 | Elem: &schema.Resource{ 106 | Schema: rkeClusterTaintFields(), 107 | }, 108 | }, 109 | } 110 | return s 111 | } 112 | 113 | func rkeClusterNodeComputedFields() map[string]*schema.Schema { 114 | s := map[string]*schema.Schema{ 115 | "node_name": { 116 | Type: schema.TypeString, 117 | Computed: true, 118 | }, 119 | "address": { 120 | Type: schema.TypeString, 121 | Computed: true, 122 | }, 123 | } 124 | return s 125 | } 126 | 127 | func rkeClusterNodeDrainInputFields() map[string]*schema.Schema { 128 | s := map[string]*schema.Schema{ 129 | "delete_local_data": { 130 | Type: schema.TypeBool, 131 | Optional: true, 132 | Computed: true, 133 | }, 134 | "force": { 135 | Type: schema.TypeBool, 136 | Optional: true, 137 | Computed: true, 138 | }, 139 | "grace_period": { 140 | Type: schema.TypeInt, 141 | Optional: true, 142 | Computed: true, 143 | }, 144 | "ignore_daemon_sets": { 145 | Type: schema.TypeBool, 146 | Optional: true, 147 | Computed: true, 148 | }, 149 | "timeout": { 150 | Type: schema.TypeInt, 151 | Optional: true, 152 | Computed: true, 153 | ValidateFunc: validation.IntBetween(1, 10800), 154 | }, 155 | } 156 | return s 157 | } 158 | 159 | func rkeClusterNodeUpgradeStrategyFields() map[string]*schema.Schema { 160 | s := map[string]*schema.Schema{ 161 | "drain": { 162 | Type: schema.TypeBool, 163 | Optional: true, 164 | Computed: true, 165 | }, 166 | "drain_input": { 167 | Type: schema.TypeList, 168 | MaxItems: 1, 169 | Optional: true, 170 | Computed: true, 171 | Elem: &schema.Resource{ 172 | Schema: rkeClusterNodeDrainInputFields(), 173 | }, 174 | }, 175 | "max_unavailable_controlplane": { 176 | Type: schema.TypeString, 177 | Optional: true, 178 | Computed: true, 179 | }, 180 | "max_unavailable_worker": { 181 | Type: schema.TypeString, 182 | Optional: true, 183 | Computed: true, 184 | }, 185 | } 186 | return s 187 | } 188 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_system_images_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterSystemImagesConf rancher.RKESystemImages 12 | testRKEClusterSystemImagesInterface []interface{} 13 | ) 14 | 15 | func init() { 16 | testRKEClusterSystemImagesConf = rancher.RKESystemImages{ 17 | Etcd: "etcd", 18 | Alpine: "alpine", 19 | NginxProxy: "nginx_proxy", 20 | CertDownloader: "cert_downloader", 21 | KubernetesServicesSidecar: "kubernetes_services_sidecar", 22 | KubeDNS: "kube_dns", 23 | DNSmasq: "dnsmasq", 24 | KubeDNSSidecar: "kube_dns_sidecar", 25 | KubeDNSAutoscaler: "kube_dns_autoscaler", 26 | CoreDNS: "coredns", 27 | CoreDNSAutoscaler: "coredns_autoscaler", 28 | Kubernetes: "kubernetes", 29 | Flannel: "flannel", 30 | FlannelCNI: "flannel_cni", 31 | CalicoNode: "calico_node", 32 | CalicoCNI: "calico_cni", 33 | CalicoControllers: "calico_controllers", 34 | CalicoCtl: "calico_ctl", 35 | CalicoFlexVol: "calico_flex_vol", 36 | CanalNode: "canal_node", 37 | CanalCNI: "canal_cni", 38 | CanalFlannel: "canal_flannel", 39 | CanalFlexVol: "canal_flex_vol", 40 | WeaveNode: "weave_node", 41 | WeaveCNI: "weave_cni", 42 | PodInfraContainer: "pod_infra_container", 43 | Ingress: "ingress", 44 | IngressBackend: "ingress_backend", 45 | MetricsServer: "metrics_server", 46 | WindowsPodInfraContainer: "windows_pod_infra_container", 47 | Nodelocal: "nodelocal", 48 | AciCniDeployContainer: "aci_cni_deploy_container", 49 | AciHostContainer: "aci_host_container", 50 | AciOpflexContainer: "aci_opflex_container", 51 | AciMcastContainer: "aci_mcast_container", 52 | AciOpenvSwitchContainer: "aci_ovs_container", 53 | AciControllerContainer: "aci_controller_container", 54 | } 55 | testRKEClusterSystemImagesInterface = []interface{}{ 56 | map[string]interface{}{ 57 | "etcd": "etcd", 58 | "alpine": "alpine", 59 | "nginx_proxy": "nginx_proxy", 60 | "cert_downloader": "cert_downloader", 61 | "kubernetes_services_sidecar": "kubernetes_services_sidecar", 62 | "kube_dns": "kube_dns", 63 | "dnsmasq": "dnsmasq", 64 | "kube_dns_sidecar": "kube_dns_sidecar", 65 | "kube_dns_autoscaler": "kube_dns_autoscaler", 66 | "coredns": "coredns", 67 | "coredns_autoscaler": "coredns_autoscaler", 68 | "kubernetes": "kubernetes", 69 | "flannel": "flannel", 70 | "flannel_cni": "flannel_cni", 71 | "calico_node": "calico_node", 72 | "calico_cni": "calico_cni", 73 | "calico_controllers": "calico_controllers", 74 | "calico_ctl": "calico_ctl", 75 | "calico_flex_vol": "calico_flex_vol", 76 | "canal_node": "canal_node", 77 | "canal_cni": "canal_cni", 78 | "canal_flannel": "canal_flannel", 79 | "canal_flex_vol": "canal_flex_vol", 80 | "weave_node": "weave_node", 81 | "weave_cni": "weave_cni", 82 | "pod_infra_container": "pod_infra_container", 83 | "ingress": "ingress", 84 | "ingress_backend": "ingress_backend", 85 | "metrics_server": "metrics_server", 86 | "windows_pod_infra_container": "windows_pod_infra_container", 87 | "nodelocal": "nodelocal", 88 | "aci_cni_deploy_container": "aci_cni_deploy_container", 89 | "aci_host_container": "aci_host_container", 90 | "aci_opflex_container": "aci_opflex_container", 91 | "aci_mcast_container": "aci_mcast_container", 92 | "aci_ovs_container": "aci_ovs_container", 93 | "aci_controller_container": "aci_controller_container", 94 | }, 95 | } 96 | } 97 | 98 | func TestFlattenRKEClusterSystemImages(t *testing.T) { 99 | 100 | cases := []struct { 101 | Input rancher.RKESystemImages 102 | ExpectedOutput []interface{} 103 | }{ 104 | { 105 | testRKEClusterSystemImagesConf, 106 | testRKEClusterSystemImagesInterface, 107 | }, 108 | } 109 | 110 | for _, tc := range cases { 111 | output := flattenRKEClusterSystemImages(tc.Input) 112 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 113 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 114 | tc.ExpectedOutput, output) 115 | } 116 | } 117 | } 118 | 119 | func TestExpandRKEClusterSystemImages(t *testing.T) { 120 | 121 | cases := []struct { 122 | Input []interface{} 123 | ExpectedOutput rancher.RKESystemImages 124 | }{ 125 | { 126 | testRKEClusterSystemImagesInterface, 127 | testRKEClusterSystemImagesConf, 128 | }, 129 | } 130 | 131 | for _, tc := range cases { 132 | output := expandRKEClusterSystemImages(tc.Input) 133 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 134 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 135 | tc.ExpectedOutput, output) 136 | } 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_cloud_provider_aws.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterCloudProviderAwsGlobal(in rancher.GlobalAwsOpts) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | obj["disable_security_group_ingress"] = in.DisableSecurityGroupIngress 13 | obj["disable_strict_zone_check"] = in.DisableStrictZoneCheck 14 | 15 | if len(in.ElbSecurityGroup) > 0 { 16 | obj["elb_security_group"] = in.ElbSecurityGroup 17 | } 18 | 19 | if len(in.KubernetesClusterID) > 0 { 20 | obj["kubernetes_cluster_id"] = in.KubernetesClusterID 21 | } 22 | 23 | if len(in.KubernetesClusterTag) > 0 { 24 | obj["kubernetes_cluster_tag"] = in.KubernetesClusterTag 25 | } 26 | 27 | if len(in.RoleARN) > 0 { 28 | obj["role_arn"] = in.RoleARN 29 | } 30 | 31 | if len(in.RouteTableID) > 0 { 32 | obj["route_table_id"] = in.RouteTableID 33 | } 34 | 35 | if len(in.SubnetID) > 0 { 36 | obj["subnet_id"] = in.SubnetID 37 | } 38 | 39 | if len(in.VPC) > 0 { 40 | obj["vpc"] = in.VPC 41 | } 42 | 43 | if len(in.Zone) > 0 { 44 | obj["zone"] = in.Zone 45 | } 46 | 47 | return []interface{}{obj} 48 | } 49 | 50 | func flattenRKEClusterCloudProviderAwsServiceOverride(in map[string]rancher.ServiceOverride) []interface{} { 51 | if len(in) == 0 { 52 | return []interface{}{} 53 | } 54 | 55 | out := make([]interface{}, len(in)) 56 | i := 0 57 | for key := range in { 58 | obj := make(map[string]interface{}) 59 | if len(in[key].Region) > 0 { 60 | obj["region"] = in[key].Region 61 | } 62 | 63 | if len(in[key].Service) > 0 { 64 | obj["service"] = in[key].Service 65 | } 66 | 67 | if len(in[key].SigningMethod) > 0 { 68 | obj["signing_method"] = in[key].SigningMethod 69 | } 70 | 71 | if len(in[key].SigningName) > 0 { 72 | obj["signing_name"] = in[key].SigningName 73 | } 74 | 75 | if len(in[key].SigningRegion) > 0 { 76 | obj["signing_region"] = in[key].SigningRegion 77 | } 78 | 79 | if len(in[key].URL) > 0 { 80 | obj["url"] = in[key].URL 81 | } 82 | out[i] = obj 83 | i++ 84 | } 85 | 86 | return out 87 | } 88 | 89 | func flattenRKEClusterCloudProviderAws(in *rancher.AWSCloudProvider) []interface{} { 90 | obj := make(map[string]interface{}) 91 | if in == nil { 92 | return []interface{}{} 93 | } 94 | 95 | obj["global"] = flattenRKEClusterCloudProviderAwsGlobal(in.Global) 96 | 97 | if len(in.ServiceOverride) > 0 { 98 | obj["service_override"] = flattenRKEClusterCloudProviderAwsServiceOverride(in.ServiceOverride) 99 | } 100 | 101 | return []interface{}{obj} 102 | } 103 | 104 | // Expanders 105 | 106 | func expandRKEClusterCloudProviderAwsGlobal(p []interface{}) rancher.GlobalAwsOpts { 107 | obj := rancher.GlobalAwsOpts{} 108 | if len(p) == 0 || p[0] == nil { 109 | return obj 110 | } 111 | in := p[0].(map[string]interface{}) 112 | 113 | if v, ok := in["disable_security_group_ingress"].(bool); ok { 114 | obj.DisableSecurityGroupIngress = v 115 | } 116 | 117 | if v, ok := in["disable_strict_zone_check"].(bool); ok { 118 | obj.DisableStrictZoneCheck = v 119 | } 120 | 121 | if v, ok := in["elb_security_group"].(string); ok && len(v) > 0 { 122 | obj.ElbSecurityGroup = v 123 | } 124 | 125 | if v, ok := in["kubernetes_cluster_id"].(string); ok && len(v) > 0 { 126 | obj.KubernetesClusterID = v 127 | } 128 | 129 | if v, ok := in["kubernetes_cluster_tag"].(string); ok && len(v) > 0 { 130 | obj.KubernetesClusterTag = v 131 | } 132 | 133 | if v, ok := in["role_arn"].(string); ok && len(v) > 0 { 134 | obj.RoleARN = v 135 | } 136 | 137 | if v, ok := in["route_table_id"].(string); ok && len(v) > 0 { 138 | obj.RouteTableID = v 139 | } 140 | 141 | if v, ok := in["subnet_id"].(string); ok && len(v) > 0 { 142 | obj.SubnetID = v 143 | } 144 | 145 | if v, ok := in["vpc"].(string); ok && len(v) > 0 { 146 | obj.VPC = v 147 | } 148 | 149 | if v, ok := in["zone"].(string); ok && len(v) > 0 { 150 | obj.Zone = v 151 | } 152 | 153 | return obj 154 | } 155 | 156 | func expandRKEClusterCloudProviderAwsServiceOverride(p []interface{}) map[string]rancher.ServiceOverride { 157 | if len(p) == 0 || p[0] == nil { 158 | return map[string]rancher.ServiceOverride{} 159 | } 160 | 161 | obj := make(map[string]rancher.ServiceOverride) 162 | 163 | for i := range p { 164 | in := p[i].(map[string]interface{}) 165 | aux := rancher.ServiceOverride{} 166 | key := in["service"].(string) 167 | 168 | if v, ok := in["region"].(string); ok && len(v) > 0 { 169 | aux.Region = v 170 | } 171 | 172 | if v, ok := in["service"].(string); ok && len(v) > 0 { 173 | aux.Service = v 174 | } 175 | 176 | if v, ok := in["signing_method"].(string); ok && len(v) > 0 { 177 | aux.SigningMethod = v 178 | } 179 | 180 | if v, ok := in["signing_name"].(string); ok && len(v) > 0 { 181 | aux.SigningName = v 182 | } 183 | 184 | if v, ok := in["signing_region"].(string); ok && len(v) > 0 { 185 | aux.SigningRegion = v 186 | } 187 | 188 | if v, ok := in["url"].(string); ok && len(v) > 0 { 189 | aux.URL = v 190 | } 191 | obj[key] = aux 192 | } 193 | return obj 194 | } 195 | 196 | func expandRKEClusterCloudProviderAws(p []interface{}) *rancher.AWSCloudProvider { 197 | obj := &rancher.AWSCloudProvider{} 198 | if len(p) == 0 || p[0] == nil { 199 | return obj 200 | } 201 | in := p[0].(map[string]interface{}) 202 | 203 | if v, ok := in["global"].([]interface{}); ok && len(v) > 0 { 204 | obj.Global = expandRKEClusterCloudProviderAwsGlobal(v) 205 | } 206 | 207 | if v, ok := in["service_override"].([]interface{}); ok && len(v) > 0 { 208 | obj.ServiceOverride = expandRKEClusterCloudProviderAwsServiceOverride(v) 209 | } 210 | 211 | return obj 212 | } 213 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_system_images.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | rancher "github.com/rancher/rke/types" 5 | ) 6 | 7 | // Flatteners 8 | 9 | func flattenRKEClusterSystemImages(in rancher.RKESystemImages) []interface{} { 10 | obj := make(map[string]interface{}) 11 | 12 | obj["etcd"] = in.Etcd 13 | obj["alpine"] = in.Alpine 14 | obj["nginx_proxy"] = in.NginxProxy 15 | obj["cert_downloader"] = in.CertDownloader 16 | obj["kubernetes_services_sidecar"] = in.KubernetesServicesSidecar 17 | obj["kube_dns"] = in.KubeDNS 18 | obj["dnsmasq"] = in.DNSmasq 19 | obj["kube_dns_sidecar"] = in.KubeDNSSidecar 20 | obj["kube_dns_autoscaler"] = in.KubeDNSAutoscaler 21 | obj["coredns"] = in.CoreDNS 22 | obj["coredns_autoscaler"] = in.CoreDNSAutoscaler 23 | obj["kubernetes"] = in.Kubernetes 24 | obj["flannel"] = in.Flannel 25 | obj["flannel_cni"] = in.FlannelCNI 26 | obj["calico_node"] = in.CalicoNode 27 | obj["calico_cni"] = in.CalicoCNI 28 | obj["calico_controllers"] = in.CalicoControllers 29 | obj["calico_ctl"] = in.CalicoCtl 30 | obj["calico_flex_vol"] = in.CalicoFlexVol 31 | obj["canal_node"] = in.CanalNode 32 | obj["canal_cni"] = in.CanalCNI 33 | obj["canal_flannel"] = in.CanalFlannel 34 | obj["canal_flex_vol"] = in.CanalFlexVol 35 | obj["weave_node"] = in.WeaveNode 36 | obj["weave_cni"] = in.WeaveCNI 37 | obj["pod_infra_container"] = in.PodInfraContainer 38 | obj["ingress"] = in.Ingress 39 | obj["ingress_backend"] = in.IngressBackend 40 | obj["metrics_server"] = in.MetricsServer 41 | obj["windows_pod_infra_container"] = in.WindowsPodInfraContainer 42 | obj["nodelocal"] = in.Nodelocal 43 | obj["aci_cni_deploy_container"] = in.AciCniDeployContainer 44 | obj["aci_host_container"] = in.AciHostContainer 45 | obj["aci_opflex_container"] = in.AciOpflexContainer 46 | obj["aci_mcast_container"] = in.AciMcastContainer 47 | obj["aci_ovs_container"] = in.AciOpenvSwitchContainer 48 | obj["aci_controller_container"] = in.AciControllerContainer 49 | 50 | return []interface{}{obj} 51 | } 52 | 53 | // Expanders 54 | 55 | func expandRKEClusterSystemImages(p []interface{}) rancher.RKESystemImages { 56 | obj := rancher.RKESystemImages{} 57 | if p == nil || len(p) == 0 || p[0] == nil { 58 | return obj 59 | } 60 | in := p[0].(map[string]interface{}) 61 | 62 | if v, ok := in["etcd"].(string); ok && len(v) > 0 { 63 | obj.Etcd = v 64 | } 65 | 66 | if v, ok := in["alpine"].(string); ok && len(v) > 0 { 67 | obj.Alpine = v 68 | } 69 | 70 | if v, ok := in["nginx_proxy"].(string); ok && len(v) > 0 { 71 | obj.NginxProxy = v 72 | } 73 | 74 | if v, ok := in["cert_downloader"].(string); ok && len(v) > 0 { 75 | obj.CertDownloader = v 76 | } 77 | 78 | if v, ok := in["kubernetes_services_sidecar"].(string); ok && len(v) > 0 { 79 | obj.KubernetesServicesSidecar = v 80 | } 81 | 82 | if v, ok := in["kube_dns"].(string); ok && len(v) > 0 { 83 | obj.KubeDNS = v 84 | } 85 | 86 | if v, ok := in["dnsmasq"].(string); ok && len(v) > 0 { 87 | obj.DNSmasq = v 88 | } 89 | 90 | if v, ok := in["kube_dns_sidecar"].(string); ok && len(v) > 0 { 91 | obj.KubeDNSSidecar = v 92 | } 93 | 94 | if v, ok := in["kube_dns_autoscaler"].(string); ok && len(v) > 0 { 95 | obj.KubeDNSAutoscaler = v 96 | } 97 | 98 | if v, ok := in["coredns"].(string); ok && len(v) > 0 { 99 | obj.CoreDNS = v 100 | } 101 | 102 | if v, ok := in["coredns_autoscaler"].(string); ok && len(v) > 0 { 103 | obj.CoreDNSAutoscaler = v 104 | } 105 | 106 | if v, ok := in["kubernetes"].(string); ok && len(v) > 0 { 107 | obj.Kubernetes = v 108 | } 109 | 110 | if v, ok := in["flannel"].(string); ok && len(v) > 0 { 111 | obj.Flannel = v 112 | } 113 | 114 | if v, ok := in["flannel_cni"].(string); ok && len(v) > 0 { 115 | obj.FlannelCNI = v 116 | } 117 | 118 | if v, ok := in["calico_node"].(string); ok && len(v) > 0 { 119 | obj.CalicoNode = v 120 | } 121 | 122 | if v, ok := in["calico_cni"].(string); ok && len(v) > 0 { 123 | obj.CalicoCNI = v 124 | } 125 | 126 | if v, ok := in["calico_controllers"].(string); ok && len(v) > 0 { 127 | obj.CalicoControllers = v 128 | } 129 | 130 | if v, ok := in["calico_ctl"].(string); ok && len(v) > 0 { 131 | obj.CalicoCtl = v 132 | } 133 | 134 | if v, ok := in["calico_flex_vol"].(string); ok && len(v) > 0 { 135 | obj.CalicoFlexVol = v 136 | } 137 | 138 | if v, ok := in["canal_node"].(string); ok && len(v) > 0 { 139 | obj.CanalNode = v 140 | } 141 | 142 | if v, ok := in["canal_cni"].(string); ok && len(v) > 0 { 143 | obj.CanalCNI = v 144 | } 145 | 146 | if v, ok := in["canal_flannel"].(string); ok && len(v) > 0 { 147 | obj.CanalFlannel = v 148 | } 149 | 150 | if v, ok := in["canal_flex_vol"].(string); ok && len(v) > 0 { 151 | obj.CanalFlexVol = v 152 | } 153 | 154 | if v, ok := in["weave_node"].(string); ok && len(v) > 0 { 155 | obj.WeaveNode = v 156 | } 157 | 158 | if v, ok := in["weave_cni"].(string); ok && len(v) > 0 { 159 | obj.WeaveCNI = v 160 | } 161 | 162 | if v, ok := in["pod_infra_container"].(string); ok && len(v) > 0 { 163 | obj.PodInfraContainer = v 164 | } 165 | 166 | if v, ok := in["ingress"].(string); ok && len(v) > 0 { 167 | obj.Ingress = v 168 | } 169 | 170 | if v, ok := in["ingress_backend"].(string); ok && len(v) > 0 { 171 | obj.IngressBackend = v 172 | } 173 | 174 | if v, ok := in["metrics_server"].(string); ok && len(v) > 0 { 175 | obj.MetricsServer = v 176 | } 177 | 178 | if v, ok := in["windows_pod_infra_container"].(string); ok && len(v) > 0 { 179 | obj.WindowsPodInfraContainer = v 180 | } 181 | 182 | if v, ok := in["nodelocal"].(string); ok && len(v) > 0 { 183 | obj.Nodelocal = v 184 | } 185 | 186 | if v, ok := in["aci_cni_deploy_container"].(string); ok && len(v) > 0 { 187 | obj.AciCniDeployContainer = v 188 | } 189 | 190 | if v, ok := in["aci_host_container"].(string); ok && len(v) > 0 { 191 | obj.AciHostContainer = v 192 | } 193 | 194 | if v, ok := in["aci_opflex_container"].(string); ok && len(v) > 0 { 195 | obj.AciOpflexContainer = v 196 | } 197 | 198 | if v, ok := in["aci_mcast_container"].(string); ok && len(v) > 0 { 199 | obj.AciMcastContainer = v 200 | } 201 | 202 | if v, ok := in["aci_ovs_container"].(string); ok && len(v) > 0 { 203 | obj.AciOpenvSwitchContainer = v 204 | } 205 | 206 | if v, ok := in["aci_controller_container"].(string); ok && len(v) > 0 { 207 | obj.AciControllerContainer = v 208 | } 209 | 210 | return obj 211 | } 212 | -------------------------------------------------------------------------------- /rke/structure_rke_cluster_cloud_provider_aws_test.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | rancher "github.com/rancher/rke/types" 8 | ) 9 | 10 | var ( 11 | testRKEClusterCloudProviderAwsGlobalConf rancher.GlobalAwsOpts 12 | testRKEClusterCloudProviderAwsGlobalInterface []interface{} 13 | testRKEClusterCloudProviderAwsServiceOverrideConf map[string]rancher.ServiceOverride 14 | testRKEClusterCloudProviderAwsServiceOverrideInterface []interface{} 15 | testRKEClusterCloudProviderAwsConf *rancher.AWSCloudProvider 16 | testRKEClusterCloudProviderAwsInterface []interface{} 17 | ) 18 | 19 | func init() { 20 | testRKEClusterCloudProviderAwsGlobalConf = rancher.GlobalAwsOpts{ 21 | DisableSecurityGroupIngress: true, 22 | DisableStrictZoneCheck: true, 23 | ElbSecurityGroup: "elb_group", 24 | KubernetesClusterID: "k8s_id", 25 | KubernetesClusterTag: "k8s_tag", 26 | RoleARN: "role_arn", 27 | RouteTableID: "route_table_id", 28 | SubnetID: "subnet_id", 29 | VPC: "vpc", 30 | Zone: "zone", 31 | } 32 | testRKEClusterCloudProviderAwsGlobalInterface = []interface{}{ 33 | map[string]interface{}{ 34 | "disable_security_group_ingress": true, 35 | "disable_strict_zone_check": true, 36 | "elb_security_group": "elb_group", 37 | "kubernetes_cluster_id": "k8s_id", 38 | "kubernetes_cluster_tag": "k8s_tag", 39 | "role_arn": "role_arn", 40 | "route_table_id": "route_table_id", 41 | "subnet_id": "subnet_id", 42 | "vpc": "vpc", 43 | "zone": "zone", 44 | }, 45 | } 46 | testRKEClusterCloudProviderAwsServiceOverrideConf = map[string]rancher.ServiceOverride{ 47 | "service": { 48 | Region: "region", 49 | Service: "service", 50 | SigningMethod: "signing_method", 51 | SigningName: "signing_name", 52 | SigningRegion: "signing_region", 53 | URL: "url", 54 | }, 55 | } 56 | testRKEClusterCloudProviderAwsServiceOverrideInterface = []interface{}{ 57 | map[string]interface{}{ 58 | "region": "region", 59 | "service": "service", 60 | "signing_method": "signing_method", 61 | "signing_name": "signing_name", 62 | "signing_region": "signing_region", 63 | "url": "url", 64 | }, 65 | } 66 | testRKEClusterCloudProviderAwsConf = &rancher.AWSCloudProvider{ 67 | Global: testRKEClusterCloudProviderAwsGlobalConf, 68 | ServiceOverride: testRKEClusterCloudProviderAwsServiceOverrideConf, 69 | } 70 | testRKEClusterCloudProviderAwsInterface = []interface{}{ 71 | map[string]interface{}{ 72 | "global": testRKEClusterCloudProviderAwsGlobalInterface, 73 | "service_override": testRKEClusterCloudProviderAwsServiceOverrideInterface, 74 | }, 75 | } 76 | } 77 | 78 | func TestFlattenRKEClusterCloudProviderAwsGlobal(t *testing.T) { 79 | 80 | cases := []struct { 81 | Input rancher.GlobalAwsOpts 82 | ExpectedOutput []interface{} 83 | }{ 84 | { 85 | testRKEClusterCloudProviderAwsGlobalConf, 86 | testRKEClusterCloudProviderAwsGlobalInterface, 87 | }, 88 | } 89 | 90 | for _, tc := range cases { 91 | output := flattenRKEClusterCloudProviderAwsGlobal(tc.Input) 92 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 93 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 94 | tc.ExpectedOutput, output) 95 | } 96 | } 97 | } 98 | 99 | func TestFlattenRKEClusterCloudProviderAwsServiceOverride(t *testing.T) { 100 | 101 | cases := []struct { 102 | Input map[string]rancher.ServiceOverride 103 | ExpectedOutput []interface{} 104 | }{ 105 | { 106 | testRKEClusterCloudProviderAwsServiceOverrideConf, 107 | testRKEClusterCloudProviderAwsServiceOverrideInterface, 108 | }, 109 | } 110 | 111 | for _, tc := range cases { 112 | output := flattenRKEClusterCloudProviderAwsServiceOverride(tc.Input) 113 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 114 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 115 | tc.ExpectedOutput, output) 116 | } 117 | } 118 | } 119 | 120 | func TestFlattenRKEClusterCloudProviderAws(t *testing.T) { 121 | 122 | cases := []struct { 123 | Input *rancher.AWSCloudProvider 124 | ExpectedOutput []interface{} 125 | }{ 126 | { 127 | testRKEClusterCloudProviderAwsConf, 128 | testRKEClusterCloudProviderAwsInterface, 129 | }, 130 | } 131 | 132 | for _, tc := range cases { 133 | output := flattenRKEClusterCloudProviderAws(tc.Input) 134 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 135 | t.Fatalf("Unexpected output from flattener.\nExpected: %#v\nGiven: %#v", 136 | tc.ExpectedOutput, output) 137 | } 138 | } 139 | } 140 | 141 | func TestExpandRKEClusterCloudProviderAwsGlobal(t *testing.T) { 142 | 143 | cases := []struct { 144 | Input []interface{} 145 | ExpectedOutput rancher.GlobalAwsOpts 146 | }{ 147 | { 148 | testRKEClusterCloudProviderAwsGlobalInterface, 149 | testRKEClusterCloudProviderAwsGlobalConf, 150 | }, 151 | } 152 | 153 | for _, tc := range cases { 154 | output := expandRKEClusterCloudProviderAwsGlobal(tc.Input) 155 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 156 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 157 | tc.ExpectedOutput, output) 158 | } 159 | } 160 | } 161 | 162 | func TestExpandRKEClusterCloudProviderAwsServiceOverride(t *testing.T) { 163 | 164 | cases := []struct { 165 | Input []interface{} 166 | ExpectedOutput map[string]rancher.ServiceOverride 167 | }{ 168 | { 169 | testRKEClusterCloudProviderAwsServiceOverrideInterface, 170 | testRKEClusterCloudProviderAwsServiceOverrideConf, 171 | }, 172 | } 173 | 174 | for _, tc := range cases { 175 | output := expandRKEClusterCloudProviderAwsServiceOverride(tc.Input) 176 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 177 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 178 | tc.ExpectedOutput, output) 179 | } 180 | } 181 | } 182 | 183 | func TestExpandRKEClusterCloudProviderAws(t *testing.T) { 184 | 185 | cases := []struct { 186 | Input []interface{} 187 | ExpectedOutput *rancher.AWSCloudProvider 188 | }{ 189 | { 190 | testRKEClusterCloudProviderAwsInterface, 191 | testRKEClusterCloudProviderAwsConf, 192 | }, 193 | } 194 | 195 | for _, tc := range cases { 196 | output := expandRKEClusterCloudProviderAws(tc.Input) 197 | if !reflect.DeepEqual(output, tc.ExpectedOutput) { 198 | t.Fatalf("Unexpected output from expander.\nExpected: %#v\nGiven: %#v", 199 | tc.ExpectedOutput, output) 200 | } 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /rke/schema_rke_cluster_network.go: -------------------------------------------------------------------------------- 1 | package rke 2 | 3 | import ( 4 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" 5 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" 6 | ) 7 | 8 | const ( 9 | rkeClusterNetworkPluginCalicoName = "calico" 10 | rkeClusterNetworkPluginCanalName = "canal" 11 | rkeClusterNetworkPluginFlannelName = "flannel" 12 | rkeClusterNetworkPluginNonelName = "none" 13 | rkeClusterNetworkPluginWeaveName = "weave" 14 | rkeClusterNetworkPluginAciName = "aci" 15 | ) 16 | 17 | var ( 18 | rkeClusterNetworkPluginDefault = rkeClusterNetworkPluginCanalName 19 | rkeClusterNetworkPluginList = []string{ 20 | rkeClusterNetworkPluginCalicoName, 21 | rkeClusterNetworkPluginCanalName, 22 | rkeClusterNetworkPluginFlannelName, 23 | rkeClusterNetworkPluginNonelName, 24 | rkeClusterNetworkPluginWeaveName, 25 | rkeClusterNetworkPluginAciName, 26 | } 27 | ) 28 | 29 | //Schemas 30 | 31 | func rkeClusterNetworkCalicoFields() map[string]*schema.Schema { 32 | s := map[string]*schema.Schema{ 33 | "cloud_provider": { 34 | Type: schema.TypeString, 35 | Optional: true, 36 | Computed: true, 37 | }, 38 | } 39 | return s 40 | } 41 | 42 | func rkeClusterNetworkCanalFields() map[string]*schema.Schema { 43 | s := map[string]*schema.Schema{ 44 | "iface": { 45 | Type: schema.TypeString, 46 | Optional: true, 47 | Computed: true, 48 | }, 49 | } 50 | return s 51 | } 52 | 53 | func rkeClusterNetworkFlannelFields() map[string]*schema.Schema { 54 | s := map[string]*schema.Schema{ 55 | "iface": { 56 | Type: schema.TypeString, 57 | Optional: true, 58 | Computed: true, 59 | }, 60 | } 61 | return s 62 | } 63 | 64 | func rkeClusterNetworkWeaveFields() map[string]*schema.Schema { 65 | s := map[string]*schema.Schema{ 66 | "password": { 67 | Type: schema.TypeString, 68 | Required: true, 69 | }, 70 | } 71 | return s 72 | } 73 | 74 | func rkeClusterNetworkAciFields() map[string]*schema.Schema { 75 | s := map[string]*schema.Schema{ 76 | "system_id": { 77 | Type: schema.TypeString, 78 | Required: true, 79 | }, 80 | "apic_hosts": { 81 | Type: schema.TypeList, 82 | Required: true, 83 | Elem: &schema.Schema{ 84 | Type: schema.TypeString, 85 | }, 86 | }, 87 | "token": { 88 | Type: schema.TypeString, 89 | Required: true, 90 | Sensitive: true, 91 | }, 92 | "apic_user_name": { 93 | Type: schema.TypeString, 94 | Required: true, 95 | }, 96 | "apic_user_key": { 97 | Type: schema.TypeString, 98 | Required: true, 99 | Sensitive: true, 100 | }, 101 | "apic_user_crt": { 102 | Type: schema.TypeString, 103 | Required: true, 104 | Sensitive: true, 105 | }, 106 | "encap_type": { 107 | Type: schema.TypeString, 108 | Required: true, 109 | }, 110 | "mcast_range_start": { 111 | Type: schema.TypeString, 112 | Required: true, 113 | }, 114 | "mcast_range_end": { 115 | Type: schema.TypeString, 116 | Required: true, 117 | }, 118 | "aep": { 119 | Type: schema.TypeString, 120 | Required: true, 121 | }, 122 | "vrf_name": { 123 | Type: schema.TypeString, 124 | Required: true, 125 | }, 126 | "vrf_tenant": { 127 | Type: schema.TypeString, 128 | Required: true, 129 | }, 130 | "l3out": { 131 | Type: schema.TypeString, 132 | Required: true, 133 | }, 134 | "node_subnet": { 135 | Type: schema.TypeString, 136 | Required: true, 137 | }, 138 | "l3out_external_networks": { 139 | Type: schema.TypeList, 140 | Required: true, 141 | Elem: &schema.Schema{ 142 | Type: schema.TypeString, 143 | }, 144 | }, 145 | "extern_dynamic": { 146 | Type: schema.TypeString, 147 | Required: true, 148 | }, 149 | "extern_static": { 150 | Type: schema.TypeString, 151 | Required: true, 152 | }, 153 | "node_svc_subnet": { 154 | Type: schema.TypeString, 155 | Required: true, 156 | }, 157 | "kube_api_vlan": { 158 | Type: schema.TypeString, 159 | Required: true, 160 | }, 161 | "service_vlan": { 162 | Type: schema.TypeString, 163 | Required: true, 164 | }, 165 | "infra_vlan": { 166 | Type: schema.TypeString, 167 | Required: true, 168 | }, 169 | "snat_port_range_start": { 170 | Type: schema.TypeString, 171 | Optional: true, 172 | }, 173 | "snat_port_range_end": { 174 | Type: schema.TypeString, 175 | Optional: true, 176 | }, 177 | "snat_ports_per_node": { 178 | Type: schema.TypeString, 179 | Optional: true, 180 | }, 181 | } 182 | return s 183 | } 184 | 185 | func rkeClusterNetworkFields() map[string]*schema.Schema { 186 | s := map[string]*schema.Schema{ 187 | "calico_network_provider": { 188 | Type: schema.TypeList, 189 | MaxItems: 1, 190 | Optional: true, 191 | Description: "Calico network provider config", 192 | Elem: &schema.Resource{ 193 | Schema: rkeClusterNetworkCalicoFields(), 194 | }, 195 | }, 196 | "canal_network_provider": { 197 | Type: schema.TypeList, 198 | MaxItems: 1, 199 | Optional: true, 200 | Description: "Canal network provider config", 201 | Elem: &schema.Resource{ 202 | Schema: rkeClusterNetworkCanalFields(), 203 | }, 204 | }, 205 | "flannel_network_provider": { 206 | Type: schema.TypeList, 207 | MaxItems: 1, 208 | Optional: true, 209 | Description: "Flannel network provider config", 210 | Elem: &schema.Resource{ 211 | Schema: rkeClusterNetworkFlannelFields(), 212 | }, 213 | }, 214 | "weave_network_provider": { 215 | Type: schema.TypeList, 216 | MaxItems: 1, 217 | Optional: true, 218 | Description: "Weave network provider config", 219 | Elem: &schema.Resource{ 220 | Schema: rkeClusterNetworkWeaveFields(), 221 | }, 222 | }, 223 | "aci_network_provider": { 224 | Type: schema.TypeList, 225 | MaxItems: 1, 226 | Optional: true, 227 | Description: "Aci network provider config", 228 | Elem: &schema.Resource{ 229 | Schema: rkeClusterNetworkAciFields(), 230 | }, 231 | }, 232 | "mtu": { 233 | Type: schema.TypeInt, 234 | Optional: true, 235 | Default: 0, 236 | Description: "Network provider MTU", 237 | ValidateFunc: validation.IntBetween(0, 9000), 238 | }, 239 | "options": { 240 | Type: schema.TypeMap, 241 | Optional: true, 242 | Computed: true, 243 | Description: "Network provider options", 244 | }, 245 | "plugin": { 246 | Type: schema.TypeString, 247 | Optional: true, 248 | Default: rkeClusterNetworkPluginDefault, 249 | Description: "Network provider plugin", 250 | ValidateFunc: validation.StringInSlice(rkeClusterNetworkPluginList, true), 251 | }, 252 | "enable_br_netfilter": { 253 | Type: schema.TypeBool, 254 | Optional: true, 255 | Default: true, 256 | Description: "Enable/Disable br_netfilter on nodes", 257 | }, 258 | } 259 | return s 260 | } 261 | --------------------------------------------------------------------------------