├── README.md ├── rancher-lab ├── .gitignore ├── README.md ├── cloud.tf ├── main.tf ├── nodes │ ├── common │ │ ├── kickstart.yaml │ │ └── metadata.yaml │ ├── nfs │ │ └── cloudinit │ │ │ └── userdata.yaml │ └── rancher │ │ └── cloudinit │ │ └── userdata.yaml ├── scripts │ └── DnsSetRecord.ps1 ├── variables.tf └── vsphere_data.tf ├── template-prep-adv ├── README.md ├── main.tf ├── template │ └── kickstart.yaml ├── variables.tf └── vpshere_data.tf ├── template-prep ├── main.tf ├── template │ └── kickstart.yaml ├── variables.tf └── vpshere_data.tf ├── tools ├── OVA_upload │ ├── .gitignore │ └── transfer_template.ps1 ├── README.md └── winrm-config └── vsphere-cloudinit ├── .gitignore ├── README.md ├── cloudinit ├── kickstart.yaml ├── metadata.yaml └── userdata.yaml ├── main.tf ├── outputs.tf ├── variables.tf └── vsphere_data.tf /README.md: -------------------------------------------------------------------------------- 1 | # Linoproject terraform projects 2 | 3 | ## Contents 4 | 0. Tools [link](./tools) 5 | 1. Terraform, vSphere, cloud-init [link](./vsphere-cloudinit) 6 | 2. Template [link](./template-prep) 7 | 3. Template Rancher [link](./template-prep) 8 | -------------------------------------------------------------------------------- /rancher-lab/.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | 11 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 12 | # .tfvars files are managed as part of configuration and so should be included in 13 | # version control. 14 | # 15 | # example.tfvars 16 | terraform.tfvars 17 | 18 | # Ignore override files as they are usually used to override resources locally and so 19 | # are not checked in 20 | override.tf 21 | override.tf.json 22 | *_override.tf 23 | *_override.tf.json 24 | 25 | # Include override files you do wish to add to version control using negated pattern 26 | # 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* -------------------------------------------------------------------------------- /rancher-lab/README.md: -------------------------------------------------------------------------------- 1 | # Deploy Rancher with Cloud-init and Terraform 2 | 3 | The full example is here: [link](https://blog.linoproject.net/deliver-rancher-with-cloud-init-and-terraform/) 4 | 5 | In order to test this code you must have: 6 | 1. Windows Server 2008-2012 with Active Directory and DNS (or a Windows DNS) 7 | 2. A prepared template with the basic components for Rancher [link](./../template-prep-adv) 8 | 3. terraform.tvars as the follwing example: 9 | 10 | ```ruby 11 | vsphere_env = { 12 | user = "administrator@vsphere.local" 13 | password = "SuperPassword1!" 14 | server = "vcenter.yourdomain.lab" 15 | } 16 | 17 | domain_env = { 18 | user = ".\\Administrator" 19 | password = "SuperPassword1!" 20 | dns_server = "" 21 | domain_name = "yourdomain.lab" 22 | } 23 | 24 | vms = { 25 | rancher = { 26 | vCPU = 2 27 | vMEM = 4096 28 | vmname = "rancher" 29 | 30 | datastore = "datastore1" 31 | datacenter = "HomeLabWorkload" 32 | network = "lablan" 33 | cluster = "workload" 34 | template = "ubuntu1804templateCloudInitAdv" // Here the name of template built template-pre-adv 35 | ip = "" 36 | netmask = "24" 37 | 38 | hostname = "rancher01" 39 | domain_name = "yourdomain.lab" 40 | user = "local" 41 | password = "SuperPassword1!" 42 | } 43 | 44 | } 45 | ``` 46 | -------------------------------------------------------------------------------- /rancher-lab/cloud.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linoproject/terraform/339505a183c86f4cd6defe28bff1d4e1a1b00d6e/rancher-lab/cloud.tf -------------------------------------------------------------------------------- /rancher-lab/main.tf: -------------------------------------------------------------------------------- 1 | 2 | provider "vsphere" { 3 | user = var.vsphere_env.user 4 | password = var.vsphere_env.password 5 | vsphere_server = var.vsphere_env.server 6 | 7 | # If you have a self-signed cert 8 | allow_unverified_ssl = true 9 | } 10 | 11 | 12 | resource vsphere_virtual_machine "allvms" { 13 | for_each = var.vms 14 | 15 | resource_pool_id = data.vsphere_compute_cluster.this[each.key].resource_pool_id 16 | datastore_id = data.vsphere_datastore.this[each.key].id 17 | 18 | name = each.value.vmname 19 | num_cpus = each.value.vCPU 20 | memory = each.value.vMEM 21 | 22 | guest_id = data.vsphere_virtual_machine.template[each.key].guest_id 23 | scsi_type = data.vsphere_virtual_machine.template[each.key].scsi_type 24 | 25 | cdrom { 26 | client_device = true 27 | } 28 | 29 | network_interface { 30 | network_id = data.vsphere_network.this[each.key].id 31 | adapter_type = data.vsphere_virtual_machine.template[each.key].network_interface_types[0] 32 | } 33 | wait_for_guest_net_timeout = 0 34 | 35 | disk { 36 | label = "disk0" 37 | size = data.vsphere_virtual_machine.template[each.key].disks.0.size 38 | eagerly_scrub = data.vsphere_virtual_machine.template[each.key].disks.0.eagerly_scrub 39 | thin_provisioned = data.vsphere_virtual_machine.template[each.key].disks.0.thin_provisioned 40 | } 41 | 42 | clone { 43 | template_uuid = data.vsphere_virtual_machine.template[each.key].id 44 | } 45 | 46 | 47 | extra_config = { 48 | "guestinfo.metadata" = base64encode(data.template_file.metadataconfig[each.key].rendered) 49 | "guestinfo.metadata.encoding" = "base64" 50 | "guestinfo.userdata" = base64encode(data.template_file.userdataconfig[each.key].rendered) 51 | "guestinfo.userdata.encoding" = "base64" 52 | } 53 | 54 | provisioner "remote-exec" { 55 | inline = [ 56 | "sudo cloud-init status --wait" 57 | ] 58 | connection { 59 | host = each.value.ip 60 | type = "ssh" 61 | user = each.value.user 62 | password = each.value.password 63 | } 64 | } 65 | 66 | 67 | 68 | 69 | } 70 | 71 | resource "null_resource" "dnsrecord" { 72 | for_each = var.vms 73 | 74 | provisioner "file" { 75 | content = data.template_file.dnsrecord[each.key].rendered 76 | destination = "/Temp/DNSRecord.ps1" 77 | 78 | connection { 79 | type = "winrm" 80 | user = var.domain_env.user 81 | password = var.domain_env.password 82 | host = var.domain_env.dns_server 83 | insecure = true 84 | use_ntlm = true 85 | https = false 86 | } 87 | } 88 | 89 | 90 | provisioner "remote-exec" { 91 | inline = [ 92 | "Powershell.exe /Temp/DNSRecord.ps1" 93 | ] 94 | connection { 95 | type = "winrm" 96 | user = var.domain_env.user 97 | password = var.domain_env.password 98 | host = var.domain_env.dns_server 99 | insecure = true 100 | use_ntlm = true 101 | https = false 102 | } 103 | } 104 | 105 | } 106 | resource "null_resource" "install_terraform" { 107 | 108 | 109 | provisioner "remote-exec" { 110 | inline = [ 111 | "sudo su -c /home/ubuntu/install.sh ubuntu" 112 | ] 113 | connection { 114 | host = var.vms["rancher"].ip 115 | type = "ssh" 116 | user = var.vms["rancher"].user 117 | password = var.vms["rancher"].password 118 | } 119 | } 120 | 121 | depends_on = [vsphere_virtual_machine.allvms["rancher"]] 122 | } -------------------------------------------------------------------------------- /rancher-lab/nodes/common/kickstart.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | chpasswd: #Change your local password here 3 | list: | 4 | ${user}:${password} 5 | expire: false 6 | users: 7 | - default #Define a default user 8 | - name: ${user} 9 | gecos: ${user} 10 | lock_passwd: false 11 | groups: sudo, users, admin 12 | shell: /bin/bash 13 | sudo: ['ALL=(ALL) NOPASSWD:ALL'] 14 | system_info: 15 | default_user: 16 | name: ubuntu 17 | lock_passwd: false 18 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 19 | #disable_root: false #Enable root acce 20 | ssh_pwauth: yes #Use pwd to access (otherwise follow official doc to use ssh-keys) 21 | random_seed: 22 | file: /dev/urandom 23 | command: ["pollinate", "-r", "-s", "https://entropy.ubuntu.com"] 24 | command_required: true 25 | package_upgrade: true 26 | packages: 27 | - python3-pip #Dependency package for cur 28 | runcmd: 29 | - curl -sSL https://raw.githubusercontent.com/vmware/cloud-init-vmware-guestinfo/master/install.sh | sh - #Install cloud-init 30 | power_state: 31 | timeout: 10 32 | mode: reboot 33 | -------------------------------------------------------------------------------- /rancher-lab/nodes/common/metadata.yaml: -------------------------------------------------------------------------------- 1 | local-hostname: ${hostname} 2 | instance-id: ${instance_id} 3 | network: 4 | version: 2 5 | ethernets: 6 | ens192: 7 | dhcp4: false #true to use dhcp 8 | addresses: 9 | - ${ip}/${netmask} 10 | gateway4: ${gw} # Set gw here 11 | nameservers: 12 | addresses: 13 | - ${dns} # Set DNS ip address here 14 | -------------------------------------------------------------------------------- /rancher-lab/nodes/nfs/cloudinit/userdata.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | packages: 3 | - nfs-kernel-server 4 | - docker 5 | - docker.io 6 | runcmd: 7 | - mkdir -p /mnt/sharedfolder 8 | -------------------------------------------------------------------------------- /rancher-lab/nodes/rancher/cloudinit/userdata.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | packages: 3 | # - docker 4 | # - docker.io 5 | # - apt-transport-https 6 | # users: 7 | # - name: composer 8 | # uid: 2000 9 | # groups: docker 10 | # sudo: ['ALL=(ALL) NOPASSWD:ALL'] 11 | # shell: /bin/bash 12 | # write_files: 13 | # - path: /home/composer/docker-compose.yml 14 | # permissions: 0644 15 | # owner: root 16 | # content: | 17 | # version: "3.7" 18 | # services: 19 | # rancher-server: 20 | # image: rancher/rancher 21 | # ports: 22 | # - 80:80 23 | # - 443:443 24 | # volumes: 25 | # - /opt/rancher:/var/lib/rancher 26 | # - /etc/opt/midl/cert:/container/certs 27 | # - /var/log/rancher/auditlog:/var/log/auditlog 28 | # restart: always 29 | 30 | # runcmd: 31 | # - curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 32 | # - chmod +x /usr/local/bin/docker-compose 33 | # - usermod -a -G docker local 34 | # - /usr/local/bin/docker-compose -f /home/composer/docker-compose.yml up -d 35 | # - chown -R composer:docker /home/composer/ 36 | 37 | write_files: 38 | - path: /home/ubuntu/rancher-cluster.yml 39 | owner: ubuntu 40 | content: | 41 | nodes: 42 | - address: ${ip} 43 | internal_address: ${ip} 44 | user: ubuntu 45 | role: [controlplane,etcd,worker] 46 | addon_job_timeout: 120 47 | - path: /home/ubuntu/install.sh 48 | permissions: '0755' 49 | owner: ubuntu 50 | content: | 51 | #!/bin/bash 52 | rke up --config /home/ubuntu/rancher-cluster.yml 53 | mkdir -p /home/ubuntu/.kube 54 | ln -s /home/ubuntu/kube_config_rancher-cluster.yml /home/ubuntu/.kube/config 55 | kubectl create namespace cert-manager 56 | kubectl apply --validate=false -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml 57 | helm repo add jetstack https://charts.jetstack.io 58 | helm repo update 59 | helm install cert-manager --namespace cert-manager --version v0.12.0 jetstack/cert-manager 60 | kubectl -n cert-manager rollout status deploy/cert-manager 61 | kubectl -n cert-manager rollout status deploy/cert-manager-webhook 62 | helm repo add rancher-latest https://releases.rancher.com/server-charts/latest 63 | helm repo update 64 | kubectl create namespace cattle-system 65 | helm install rancher rancher-latest/rancher --namespace cattle-system --set hostname=${hostname}.${domain_name} --set replicas=1 66 | while true; do curl -kv https://${hostname}.${domain_name} 2>&1 | grep -q "dynamiclistener-ca"; if [ $? != 0 ]; then echo "Rancher isn't ready yet"; sleep 5; continue; fi; break; done; echo "Rancher is Ready"; 67 | 68 | 69 | 70 | # runcmd: 71 | # - sudo su -c "ssh-keygen -b 2048 -t rsa -f /home/ubuntu/.ssh/id_rsa -N \"\"" ubuntu 72 | # - sudo su -c "cat /home/ubuntu/.ssh/id_rsa.pub >> /home/ubuntu/.ssh/authorized_keys" ubuntu 73 | # - sudo wget -O /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.0.2/rke_linux-amd64 && chmod +x /usr/local/bin/rke 74 | # - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 75 | # - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | tee -a /etc/apt/sources.list.d/kubernetes.list 76 | # - apt-get update && apt-get install -y kubectl 77 | # - wget -O /tmp/helm.tar.gz https://get.helm.sh/helm-v3.0.2-linux-amd64.tar.gz 78 | # - tar -C /tmp/ -zxf /tmp/helm.tar.gz && mv /tmp/linux-amd64/helm /usr/local/bin/helm && chmod +x /usr/local/bin/helm 79 | # - rm -rf /tmp/linux-amd64 && rm -f helm.tar.gz 80 | # - usermod -aG docker ubuntu 81 | 82 | -------------------------------------------------------------------------------- /rancher-lab/scripts/DnsSetRecord.ps1: -------------------------------------------------------------------------------- 1 | 2 | $RecordName = "${hostname}" 3 | $ZoneName = "${domain_name}" 4 | $ip = "${ip}" 5 | 6 | try { 7 | $res = Resolve-DnsName -Name "$RecordName" -ErrorAction Stop 8 | $OldObj = Get-DnsServerResourceRecord -Name $RecordName -ZoneName $ZoneName -RRType A 9 | $NewObj = $OldObj.clone() 10 | $NewObj.RecordData.IPv4Address = [System.Net.IPAddress]::parse($ip) 11 | Set-DnsServerResourceRecord -NewInputObject $NewObj -OldInputObject $OldObj -ZoneName $ZoneName 12 | Write-Host "DONE Modify A record $RecordName.$ZoneName -> $ip" 13 | 14 | } 15 | Catch { 16 | Add-DnsServerResourceRecordA -Name $RecordName -ZoneName $ZoneName -AllowUpdateAny -IPv4Address $ip -ErrorAction Stop 17 | Write-Host "DONE Add A record $RecordName.$ZoneName -> $ip" 18 | 19 | } 20 | -------------------------------------------------------------------------------- /rancher-lab/variables.tf: -------------------------------------------------------------------------------- 1 | #cloud variables 2 | 3 | variable "vsphere_env" { 4 | type = object({ 5 | server = string 6 | user = string 7 | password = string 8 | }) 9 | default = { 10 | server = "vcsa.local.lab" 11 | user = "administrator@vsphere.local" 12 | password = "SuperPassw0rd!" 13 | } 14 | } 15 | 16 | variable "domain_env" { 17 | type = object({ 18 | dns_server = string 19 | user = string 20 | password = string 21 | domain_name = string 22 | }) 23 | default = { 24 | dns_server = "dns1" 25 | user = "administrator" 26 | password = "SuperPassw0rd!" 27 | domain_name = "mydomain.tld" 28 | } 29 | } 30 | 31 | 32 | variable "vm_env" { 33 | type = object({ 34 | gw = string 35 | dns = string 36 | }) 37 | default = { 38 | gw = "192.168.200.254" 39 | dns = "192.168.200.10" 40 | } 41 | } 42 | 43 | variable "k8s_master_env" { 44 | type = object({ 45 | adminuser = string 46 | adminpwd = string 47 | }) 48 | default = { 49 | adminuser = "admin" 50 | adminpwd = "admin" 51 | } 52 | } 53 | 54 | variable "vms" { 55 | type = map(object({ 56 | vCPU = number 57 | vMEM = number 58 | vmname = string 59 | datastore = string 60 | network = string 61 | user = string 62 | password = string 63 | template = string 64 | cluster = string 65 | datacenter = string 66 | hostname = string 67 | domain_name = string 68 | ip = string 69 | netmask = string 70 | })) 71 | } 72 | 73 | -------------------------------------------------------------------------------- /rancher-lab/vsphere_data.tf: -------------------------------------------------------------------------------- 1 | data vsphere_datacenter "this" { 2 | for_each = var.vms 3 | 4 | name = each.value.datacenter 5 | } 6 | 7 | data vsphere_compute_cluster "this" { 8 | for_each = var.vms 9 | 10 | name = each.value.cluster 11 | datacenter_id = data.vsphere_datacenter.this[each.key].id 12 | } 13 | 14 | data vsphere_datastore "this" { 15 | for_each = var.vms 16 | 17 | name = each.value.datastore 18 | datacenter_id = data.vsphere_datacenter.this[each.key].id 19 | } 20 | 21 | data vsphere_network "this" { 22 | for_each = var.vms 23 | 24 | name = each.value.network 25 | datacenter_id = data.vsphere_datacenter.this[each.key].id 26 | } 27 | 28 | 29 | data vsphere_virtual_machine "template" { 30 | for_each = var.vms 31 | 32 | name = each.value.template 33 | datacenter_id = data.vsphere_datacenter.this[each.key].id 34 | } 35 | 36 | data template_file "metadataconfig" { 37 | for_each = var.vms 38 | 39 | # Main cloud-config configuration file. 40 | template = file("${path.module}/nodes/common/metadata.yaml") 41 | vars = { 42 | ip = "${each.value.ip}" 43 | netmask = "${each.value.netmask}" 44 | hostname = "${each.value.hostname}" 45 | instance_id = "${each.value.vmname}" 46 | gw = "${var.vm_env.gw}" 47 | dns = "${var.vm_env.dns}" 48 | 49 | } 50 | } 51 | 52 | data template_file "userdataconfig" { 53 | for_each = var.vms 54 | 55 | template = file("${path.module}/nodes/${each.value.vmname}/cloudinit/userdata.yaml") 56 | vars = { 57 | ip = "${each.value.ip}" 58 | hostname = "${each.value.hostname}" 59 | domain_name = "${each.value.domain_name}" 60 | } 61 | } 62 | 63 | 64 | data template_file "kickstartconfig" { 65 | for_each = var.vms 66 | 67 | # Main cloud-config configuration file. 68 | template = file("${path.module}/nodes/common/kickstart.yaml") 69 | vars = { 70 | user = "${each.value.user}" 71 | password = "${each.value.password}" 72 | } 73 | } 74 | 75 | data template_file "dnsrecord" { 76 | for_each = var.vms 77 | 78 | # Main cloud-config configuration file. 79 | template = file("${path.module}/scripts/DnsSetRecord.ps1") 80 | vars = { 81 | hostname = "${each.value.hostname}" 82 | domain_name = "${each.value.domain_name}" 83 | ip = "${each.value.ip}" 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /template-prep-adv/README.md: -------------------------------------------------------------------------------- 1 | # Template preparation for Rancher 2 | 3 | In order to prepare the template just: 4 | 1. Write terraform.tfvars file 5 | 2. Run template preparation and wait for VM configuration competition 6 | 3. Poweroff VM and convert as a template (or clone as Template) naming the template: ubuntu1804templateCloudInitAdv 7 | 8 | Here an example of terraform.tfvars file: 9 | ```ruby 10 | vsphere_env = { 11 | user = "administrator@vsphere.local" 12 | password = "SuperPassword1!" 13 | server = "vcenter.yourdomain.lab" 14 | } 15 | 16 | template = { 17 | 18 | vCPU = 1 19 | vMEM = 1024 20 | vmname = "tpl01-adv" 21 | 22 | datastore = "datastore1" 23 | datacenter = "HomeLabWorkload" 24 | network = "lablan" 25 | cluster = "workload" 26 | template = "ubuntu1804template" 27 | 28 | 29 | hostname = "tpl01-adv" 30 | domain_name = "yourdomain.lab" 31 | user = "local" 32 | password = "SuperPassword1!" 33 | } 34 | ``` 35 | 36 | TODO: I'll push a PowerCLI sequence for automatic template creation. -------------------------------------------------------------------------------- /template-prep-adv/main.tf: -------------------------------------------------------------------------------- 1 | provider "vsphere" { 2 | user = var.vsphere_env.user 3 | password = var.vsphere_env.password 4 | vsphere_server = var.vsphere_env.server 5 | 6 | # If you have a self-signed cert 7 | allow_unverified_ssl = true 8 | } 9 | 10 | 11 | resource vsphere_virtual_machine "template" { 12 | 13 | resource_pool_id = data.vsphere_compute_cluster.this.resource_pool_id 14 | datastore_id = data.vsphere_datastore.this.id 15 | 16 | name = var.template.vmname 17 | num_cpus = var.template.vCPU 18 | memory = var.template.vMEM 19 | 20 | guest_id = data.vsphere_virtual_machine.template.guest_id 21 | scsi_type = data.vsphere_virtual_machine.template.scsi_type 22 | 23 | cdrom { 24 | client_device = true 25 | } 26 | 27 | network_interface { 28 | network_id = data.vsphere_network.this.id 29 | adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0] 30 | } 31 | wait_for_guest_net_timeout = 0 32 | 33 | disk { 34 | label = "disk0" 35 | size = data.vsphere_virtual_machine.template.disks.0.size 36 | eagerly_scrub = data.vsphere_virtual_machine.template.disks.0.eagerly_scrub 37 | thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned 38 | } 39 | 40 | clone { 41 | template_uuid = data.vsphere_virtual_machine.template.id 42 | } 43 | 44 | vapp { 45 | properties ={ 46 | hostname = var.template.hostname 47 | user-data = base64encode(data.template_file.kickstartconfig.rendered) 48 | } 49 | } 50 | 51 | } 52 | 53 | -------------------------------------------------------------------------------- /template-prep-adv/template/kickstart.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | chpasswd: #Change your local password here 3 | list: | 4 | ${user}:${password} 5 | expire: false 6 | users: 7 | - default #Define a default user 8 | - name: ${user} 9 | gecos: ${user} 10 | lock_passwd: false 11 | groups: sudo, users, admin 12 | shell: /bin/bash 13 | sudo: ['ALL=(ALL) NOPASSWD:ALL'] 14 | system_info: 15 | default_user: 16 | name: ubuntu 17 | lock_passwd: false 18 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 19 | #disable_root: false #Enable root acce 20 | ssh_pwauth: yes #Use pwd to access (otherwise follow official doc to use ssh-keys) 21 | random_seed: 22 | file: /dev/urandom 23 | command: ["pollinate", "-r", "-s", "https://entropy.ubuntu.com"] 24 | command_required: true 25 | package_upgrade: true 26 | packages: 27 | - python3-pip #Dependency package for cur 28 | - docker 29 | - docker.io 30 | - apt-transport-https 31 | runcmd: 32 | - curl -sSL https://raw.githubusercontent.com/vmware/cloud-init-vmware-guestinfo/master/install.sh | sh - #Install cloud-init 33 | - sudo su -c "ssh-keygen -b 2048 -t rsa -f /home/ubuntu/.ssh/id_rsa -N \"\"" ubuntu 34 | - sudo su -c "cat /home/ubuntu/.ssh/id_rsa.pub >> /home/ubuntu/.ssh/authorized_keys" ubuntu 35 | - sudo wget -O /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.0.2/rke_linux-amd64 && chmod +x /usr/local/bin/rke 36 | - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 37 | - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | tee -a /etc/apt/sources.list.d/kubernetes.list 38 | - apt-get update && apt-get install -y kubectl 39 | - wget -O /tmp/helm.tar.gz https://get.helm.sh/helm-v3.0.2-linux-amd64.tar.gz 40 | - tar -C /tmp/ -zxf /tmp/helm.tar.gz && mv /tmp/linux-amd64/helm /usr/local/bin/helm && chmod +x /usr/local/bin/helm 41 | - rm -rf /tmp/linux-amd64 && rm -f helm.tar.gz 42 | - usermod -aG docker ubuntu 43 | power_state: 44 | timeout: 5 45 | mode: reboot 46 | -------------------------------------------------------------------------------- /template-prep-adv/variables.tf: -------------------------------------------------------------------------------- 1 | #cloud variables 2 | 3 | variable "vsphere_env" { 4 | type = object({ 5 | server = string 6 | user = string 7 | password = string 8 | }) 9 | default = { 10 | server = "vcsa.local.lab" 11 | user = "administrator@vsphere.local" 12 | password = "SuperPassw0rd!" 13 | } 14 | } 15 | 16 | variable "template" { 17 | type = object({ 18 | vCPU = number 19 | vMEM = number 20 | vmname = string 21 | datastore = string 22 | network = string 23 | user = string 24 | password = string 25 | template = string 26 | cluster = string 27 | datacenter = string 28 | hostname = string 29 | }) 30 | } 31 | 32 | -------------------------------------------------------------------------------- /template-prep-adv/vpshere_data.tf: -------------------------------------------------------------------------------- 1 | data vsphere_datacenter "this" { 2 | name = var.template.datacenter 3 | } 4 | 5 | data vsphere_compute_cluster "this" { 6 | 7 | name = var.template.cluster 8 | datacenter_id = data.vsphere_datacenter.this.id 9 | } 10 | 11 | data vsphere_datastore "this" { 12 | 13 | name = var.template.datastore 14 | datacenter_id = data.vsphere_datacenter.this.id 15 | } 16 | 17 | data vsphere_network "this" { 18 | 19 | name = var.template.network 20 | datacenter_id = data.vsphere_datacenter.this.id 21 | } 22 | 23 | 24 | data vsphere_virtual_machine "template" { 25 | 26 | name = var.template.template 27 | datacenter_id = data.vsphere_datacenter.this.id 28 | } 29 | 30 | 31 | data template_file "kickstartconfig" { 32 | 33 | # Main cloud-config configuration file. 34 | template = file("${path.module}/template/kickstart.yaml") 35 | vars = { 36 | user = "${var.template.user}" 37 | password = "${var.template.password}" 38 | } 39 | } -------------------------------------------------------------------------------- /template-prep/main.tf: -------------------------------------------------------------------------------- 1 | provider "vsphere" { 2 | user = var.vsphere_env.user 3 | password = var.vsphere_env.password 4 | vsphere_server = var.vsphere_env.server 5 | 6 | # If you have a self-signed cert 7 | allow_unverified_ssl = true 8 | } 9 | 10 | 11 | resource vsphere_virtual_machine "template" { 12 | 13 | resource_pool_id = data.vsphere_compute_cluster.this.resource_pool_id 14 | datastore_id = data.vsphere_datastore.this.id 15 | 16 | name = var.template.vmname 17 | num_cpus = var.template.vCPU 18 | memory = var.template.vMEM 19 | 20 | guest_id = data.vsphere_virtual_machine.template.guest_id 21 | scsi_type = data.vsphere_virtual_machine.template.scsi_type 22 | 23 | cdrom { 24 | client_device = true 25 | } 26 | 27 | network_interface { 28 | network_id = data.vsphere_network.this.id 29 | adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0] 30 | } 31 | wait_for_guest_net_timeout = 0 32 | 33 | disk { 34 | label = "disk0" 35 | size = data.vsphere_virtual_machine.template.disks.0.size 36 | eagerly_scrub = data.vsphere_virtual_machine.template.disks.0.eagerly_scrub 37 | thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned 38 | } 39 | 40 | clone { 41 | template_uuid = data.vsphere_virtual_machine.template.id 42 | } 43 | 44 | vapp { 45 | properties ={ 46 | hostname = var.template.hostname 47 | user-data = base64encode(data.template_file.kickstartconfig.rendered) 48 | } 49 | } 50 | 51 | } 52 | 53 | -------------------------------------------------------------------------------- /template-prep/template/kickstart.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | chpasswd: #Change your local password here 3 | list: | 4 | ${user}:${password} 5 | expire: false 6 | users: 7 | - default #Define a default user 8 | - name: ${user} 9 | gecos: ${user} 10 | lock_passwd: false 11 | groups: sudo, users, admin 12 | shell: /bin/bash 13 | sudo: ['ALL=(ALL) NOPASSWD:ALL'] 14 | system_info: 15 | default_user: 16 | name: ubuntu 17 | lock_passwd: false 18 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 19 | #disable_root: false #Enable root acce 20 | ssh_pwauth: yes #Use pwd to access (otherwise follow official doc to use ssh-keys) 21 | random_seed: 22 | file: /dev/urandom 23 | command: ["pollinate", "-r", "-s", "https://entropy.ubuntu.com"] 24 | command_required: true 25 | package_upgrade: true 26 | packages: 27 | - python3-pip #Dependency package for cur 28 | runcmd: 29 | - curl -sSL https://raw.githubusercontent.com/vmware/cloud-init-vmware-guestinfo/master/install.sh | sh - #Install cloud-init 30 | power_state: 31 | timeout: 5 32 | mode: reboot 33 | -------------------------------------------------------------------------------- /template-prep/variables.tf: -------------------------------------------------------------------------------- 1 | #cloud variables 2 | 3 | variable "vsphere_env" { 4 | type = object({ 5 | server = string 6 | user = string 7 | password = string 8 | }) 9 | default = { 10 | server = "vcsa.local.lab" 11 | user = "administrator@vsphere.local" 12 | password = "SuperPassw0rd!" 13 | } 14 | } 15 | 16 | variable "template" { 17 | type = object({ 18 | vCPU = number 19 | vMEM = number 20 | vmname = string 21 | datastore = string 22 | network = string 23 | user = string 24 | password = string 25 | template = string 26 | cluster = string 27 | datacenter = string 28 | hostname = string 29 | }) 30 | } 31 | 32 | -------------------------------------------------------------------------------- /template-prep/vpshere_data.tf: -------------------------------------------------------------------------------- 1 | data vsphere_datacenter "this" { 2 | name = var.template.datacenter 3 | } 4 | 5 | data vsphere_compute_cluster "this" { 6 | 7 | name = var.template.cluster 8 | datacenter_id = data.vsphere_datacenter.this.id 9 | } 10 | 11 | data vsphere_datastore "this" { 12 | 13 | name = var.template.datastore 14 | datacenter_id = data.vsphere_datacenter.this.id 15 | } 16 | 17 | data vsphere_network "this" { 18 | 19 | name = var.template.network 20 | datacenter_id = data.vsphere_datacenter.this.id 21 | } 22 | 23 | 24 | data vsphere_virtual_machine "template" { 25 | 26 | name = var.template.template 27 | datacenter_id = data.vsphere_datacenter.this.id 28 | } 29 | 30 | 31 | data template_file "kickstartconfig" { 32 | 33 | # Main cloud-config configuration file. 34 | template = file("${path.module}/template/kickstart.yaml") 35 | vars = { 36 | user = "${var.template.user}" 37 | password = "${var.template.password}" 38 | } 39 | } -------------------------------------------------------------------------------- /tools/OVA_upload/.gitignore: -------------------------------------------------------------------------------- 1 | *.ova -------------------------------------------------------------------------------- /tools/OVA_upload/transfer_template.ps1: -------------------------------------------------------------------------------- 1 | function UplodaOVATemplate { 2 | Param ( 3 | [Parameter(Mandatory = $true)][String]$vCenterFQDN, 4 | [String]$OVADownloadUri = 'https://cloud-images.ubuntu.com/releases/18.04/release/ubuntu-18.04-server-cloudimg-amd64.ova', 5 | [String]$OVAFileName = 'ubuntu-18.04-server-cloudimg-amd64.ova', 6 | [String]$DestinationTemplateName = 'ubuntu1804template', 7 | [Parameter(Mandatory = $true)][String] $DestinationNetwork, 8 | [Parameter(Mandatory = $true)][String] $DestinationDatastoreName, 9 | [String]$DestinationClusterName, 10 | [String]$DiskFormat = 'thin' 11 | ) 12 | 13 | if (-not (Test-Path ("./"+$OVAFileName))){ 14 | Invoke-WebRequest -Uri $OVADownloadUri -OutFile $OVAFileName 15 | } 16 | 17 | $vCenterConn = Connect-VIServer -Server $vCenterFQDN 18 | if ($DestinationClusterName -eq $null) { 19 | $vmhost = Get-VMHost -Server $vCenterConn | Select-Object -First 1 20 | } 21 | else { 22 | $vmhost = Get-Cluster -Name $DestinationClusterName -Server $vCenterConn | Get-VMHost | Get-Random 23 | } 24 | 25 | $DestinationDatastore = get-datastore -Name $DestinationDatastoreName 26 | $ovfConfig = Get-OvfConfiguration $OVAFileName 27 | $ovfConfig.NetworkMapping.VM_Network.Value = $DestinationNetwork 28 | Import-VApp -Source $OVAFileName -VMHost $vmhost -OvfConfiguration $ovfConfig -datastore $DestinationDatastore -DiskStorageFormat $DiskFormat -name $DestinationTemplateName -Server $vCenterConn 29 | 30 | Disconnect-VIServer -Server $vCenterConn -Force 31 | } 32 | 33 | -------------------------------------------------------------------------------- /tools/README.md: -------------------------------------------------------------------------------- 1 | # Tools 2 | 3 | ## Contents 4 | 1. Upload OVA template w PowerCLI[link](./OVA_upload) 5 | 2. Example of WinRM configuration -------------------------------------------------------------------------------- /tools/winrm-config: -------------------------------------------------------------------------------- 1 | Config 2 | MaxEnvelopeSizekb = 500 3 | MaxTimeoutms = 60000 4 | MaxBatchItems = 32000 5 | MaxProviderRequests = 4294967295 6 | Client 7 | NetworkDelayms = 5000 8 | URLPrefix = wsman 9 | AllowUnencrypted = true 10 | Auth 11 | Basic = true 12 | Digest = true 13 | Kerberos = true 14 | Negotiate = true 15 | Certificate = true 16 | CredSSP = false 17 | DefaultPorts 18 | HTTP = 5985 19 | HTTPS = 5986 20 | TrustedHosts = * 21 | Service 22 | RootSDDL = O:NSG:BAD:P(A;;GA;;;BA)(A;;GR;;;IU)S:P(AU;FA;GA;;;WD)(AU;SA;GXGW;;; 23 | MaxConcurrentOperations = 4294967295 24 | MaxConcurrentOperationsPerUser = 1500 25 | EnumerationTimeoutms = 240000 26 | MaxConnections = 300 27 | MaxPacketRetrievalTimeSeconds = 120 28 | AllowUnencrypted = true 29 | Auth 30 | Basic = true 31 | Kerberos = true 32 | Negotiate = true 33 | Certificate = false 34 | CredSSP = false 35 | CbtHardeningLevel = Relaxed 36 | DefaultPorts 37 | HTTP = 5985 38 | HTTPS = 5986 39 | IPv4Filter = * 40 | IPv6Filter = * 41 | EnableCompatibilityHttpListener = true 42 | EnableCompatibilityHttpsListener = false 43 | CertificateThumbprint 44 | AllowRemoteAccess = true 45 | Winrs 46 | AllowRemoteShellAccess = true 47 | IdleTimeout = 7200000 48 | MaxConcurrentUsers = 2147483647 49 | MaxShellRunTime = 2147483647 50 | MaxProcessesPerShell = 2147483647 51 | MaxMemoryPerShellMB = 2147483647 52 | MaxShellsPerUser = 2147483647 53 | 54 | winrm s winrm/config/client '@{TrustedHosts="*"}' 55 | -------------------------------------------------------------------------------- /vsphere-cloudinit/.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | 11 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 12 | # .tfvars files are managed as part of configuration and so should be included in 13 | # version control. 14 | # 15 | terraform.tfvars 16 | -------------------------------------------------------------------------------- /vsphere-cloudinit/README.md: -------------------------------------------------------------------------------- 1 | *H1 Terraform vSphere and Cloud-init 2 | 3 | *H2 Usage 4 | WIP 5 | -------------------------------------------------------------------------------- /vsphere-cloudinit/cloudinit/kickstart.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | chpasswd: #Change your local password here 3 | list: | 4 | local:MySuperPassw0rd! 5 | expire: false 6 | users: 7 | - default #Define a default user 8 | - name: local 9 | gecos: Local 10 | lock_passwd: false 11 | groups: sudo, users, admin 12 | shell: /bin/bash 13 | sudo: ['ALL=(ALL) NOPASSWD:ALL'] 14 | system_info: 15 | default_user: 16 | name: default-user 17 | lock_passwd: false 18 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 19 | #disable_root: false #Enable root acce 20 | ssh_pwauth: yes #Use pwd to access (otherwise follow official doc to use ssh-keys) 21 | random_seed: 22 | file: /dev/urandom 23 | command: ["pollinate", "-r", "-s", "https://entropy.ubuntu.com"] 24 | command_required: true 25 | package_upgrade: true 26 | packages: 27 | - python3-pip #Dependency package for cur 28 | runcmd: 29 | - curl -sSL https://raw.githubusercontent.com/vmware/cloud-init-vmware-guestinfo/master/install.sh | sh - #Install cloud-init 30 | power_state: 31 | timeout: 30 32 | mode: reboot 33 | -------------------------------------------------------------------------------- /vsphere-cloudinit/cloudinit/metadata.yaml: -------------------------------------------------------------------------------- 1 | local-hostname: ubuntu-01 2 | instance-id: ubuntu01 3 | network: 4 | version: 2 5 | ethernets: 6 | ens192: 7 | dhcp4: false #true to use dhcp 8 | addresses: 9 | - 192.168.200.70/24 #Set you ip here 10 | gateway4: 192.168.200.254 # Set gw here 11 | nameservers: 12 | addresses: 13 | - 192.168.200.10 # Set DNS ip address here 14 | -------------------------------------------------------------------------------- /vsphere-cloudinit/cloudinit/userdata.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | packages: 3 | - nfs-kernel-server 4 | - docker 5 | - docker.io 6 | runcmd: 7 | - mkdir -p /mnt/sharedfolder 8 | -------------------------------------------------------------------------------- /vsphere-cloudinit/main.tf: -------------------------------------------------------------------------------- 1 | 2 | provider "vsphere" { 3 | user = var.vsphere_user 4 | password = var.vsphere_password 5 | vsphere_server = var.vsphere_server 6 | 7 | # If you have a self-signed cert 8 | allow_unverified_ssl = true 9 | } 10 | 11 | 12 | resource vsphere_virtual_machine "this" { 13 | name = var.hostname 14 | resource_pool_id = data.vsphere_compute_cluster.this.resource_pool_id 15 | datastore_id = data.vsphere_datastore.this.id 16 | 17 | num_cpus = 2 18 | memory = 1024 19 | guest_id = data.vsphere_virtual_machine.template.guest_id 20 | scsi_type = data.vsphere_virtual_machine.template.scsi_type 21 | 22 | cdrom { 23 | client_device = true 24 | } 25 | 26 | network_interface { 27 | network_id = data.vsphere_network.this.id 28 | adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0] 29 | } 30 | wait_for_guest_net_timeout = 0 31 | 32 | disk { 33 | label = "disk0" 34 | size = data.vsphere_virtual_machine.template.disks.0.size 35 | eagerly_scrub = data.vsphere_virtual_machine.template.disks.0.eagerly_scrub 36 | thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned 37 | } 38 | 39 | clone { 40 | template_uuid = data.vsphere_virtual_machine.template.id 41 | } 42 | 43 | 44 | 45 | vapp { 46 | properties ={ 47 | hostname = var.hostname 48 | user-data = base64encode(file("${path.module}/cloudinit/kickstart.yaml")) 49 | } 50 | } 51 | 52 | extra_config = { 53 | "guestinfo.metadata" = base64encode(file("${path.module}/cloudinit/metadata.yaml")) 54 | "guestinfo.metadata.encoding" = "base64" 55 | "guestinfo.userdata" = base64encode(file("${path.module}/cloudinit/userdata.yaml")) 56 | "guestinfo.userdata.encoding" = "base64" 57 | } 58 | } -------------------------------------------------------------------------------- /vsphere-cloudinit/outputs.tf: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /vsphere-cloudinit/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # VMWARE PROVIDER VARIABLES 3 | # These are used to connect to vCenter. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "vsphere_server" { 7 | type = string 8 | } 9 | 10 | variable "vsphere_user" { 11 | type = string 12 | } 13 | 14 | variable "vsphere_password" { 15 | type = string 16 | } 17 | 18 | # --------------------------------------------------------------------------------------------------------------------- 19 | # VMWARE DATA SOURCE VARIABLES 20 | # These are used to discover unmanaged resources used during deployment. 21 | # --------------------------------------------------------------------------------------------------------------------- 22 | 23 | variable datacenter_name { 24 | type = string 25 | description = "The name of the vSphere Datacenter into which resources will be created." 26 | } 27 | 28 | variable cluster_name { 29 | type = string 30 | description = "The vSphere Cluster into which resources will be created." 31 | } 32 | 33 | variable datastore_name { 34 | type = string 35 | description = "The vSphere Datastore into which resources will be created." 36 | } 37 | 38 | variable datastore_cluster_name { 39 | type = string 40 | default = "" 41 | } 42 | 43 | variable vm_network_name { 44 | type = string 45 | } 46 | 47 | variable template_name { 48 | type = string 49 | } 50 | 51 | # --------------------------------------------------------------------------------------------------------------------- 52 | # VMWARE RESOURCE VARIABLES 53 | # Variables used during the creation of resources in vSphere. 54 | # --------------------------------------------------------------------------------------------------------------------- 55 | 56 | variable hostname { 57 | type = string 58 | default = "ubuntu" 59 | description = "The virtual machine name in vCenter." 60 | } 61 | 62 | variable nameservers { 63 | type = list 64 | default = [] 65 | } 66 | 67 | variable ip { 68 | type = string 69 | default = "192.168.200.40" 70 | } 71 | 72 | 73 | -------------------------------------------------------------------------------- /vsphere-cloudinit/vsphere_data.tf: -------------------------------------------------------------------------------- 1 | data vsphere_datacenter "this" { 2 | name = var.datacenter_name 3 | } 4 | 5 | data vsphere_compute_cluster "this" { 6 | name = var.cluster_name 7 | datacenter_id = data.vsphere_datacenter.this.id 8 | } 9 | 10 | data vsphere_datastore "this" { 11 | name = var.datastore_name 12 | datacenter_id = data.vsphere_datacenter.this.id 13 | } 14 | 15 | /* 16 | data vsphere_datastore_cluster "this" { 17 | name = var.datastore_name 18 | datacenter_id = data.vsphere_datacenter.this.id 19 | } 20 | */ 21 | 22 | data vsphere_network "this" { 23 | name = var.vm_network_name 24 | datacenter_id = data.vsphere_datacenter.this.id 25 | } 26 | 27 | data vsphere_virtual_machine "template" { 28 | name = var.template_name 29 | datacenter_id = data.vsphere_datacenter.this.id 30 | } 31 | --------------------------------------------------------------------------------