├── .gitignore ├── .travis.yml ├── CONTRIBUTING.md ├── Makefile ├── README.md ├── static ├── icp_ce_minimal.png ├── icp_ce_minimal_with_lb.png └── icp_ibmcloud.png └── templates ├── README.md ├── icp-ce-minimal ├── README.md ├── camvariables.json ├── icp-deploy.tf ├── instances.tf ├── main.tf ├── scripts │ └── bootstrap.sh ├── security_group.tf ├── terraform-minimal-example.tfvars └── variables.tf ├── icp-ce-with-loadbalancers ├── README.md ├── camvariables.json ├── icp-deploy.tf ├── instances.tf ├── lbaas.tf ├── main.tf ├── scripts │ └── bootstrap.sh ├── security_group.tf ├── terraform-minimal-example.tfvars └── variables.tf └── icp-ee ├── README.md ├── camvariables.json ├── cfc-certs └── README.md ├── file_storage.tf ├── icp-deploy.tf ├── icp-install └── README.md ├── instances.tf ├── lbaas.tf ├── main.tf ├── scripts ├── bootstrap.sh └── load_image.sh ├── security_group.tf └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # .tfvars files 9 | *.tfvars 10 | 11 | # generated key files 12 | *.pem 13 | 14 | 15 | logs/*.* 16 | # For mac users 17 | .history 18 | .DS_Store 19 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | language: minimal 3 | 4 | os: 5 | - linux 6 | 7 | env: 8 | global: 9 | - TERRAFORM_VER="0.11.13" 10 | - TF_VAR_deployment="travisci${TRAVIS_JOB_ID}" 11 | - TF_VAR_sl_username="${sl_username}" 12 | - TF_VAR_sl_api_key="${sl_api_key}" 13 | - REG_IMAGE="ibmcom-amd64/icp-inception:3.1.2-ee" 14 | - ICP_VALIDATION_BRANCH=minimal 15 | matrix: 16 | ## Community Edition Builds 17 | # CE minimal 18 | - NAME: "CE Minimal" 19 | TERRAFORM_DIR: "${TRAVIS_BUILD_DIR}/templates/icp-ce-minimal" 20 | TERRAFORM_VARS_FILE: "${TERRAFORM_DIR}/terraform-minimal-example.tfvars" 21 | # CE With loadbalancers 22 | - NAME: "CE with loadbalancer" 23 | TERRAFORM_DIR: "${TRAVIS_BUILD_DIR}/templates/icp-ce-with-loadbalancers" 24 | TERRAFORM_VARS_FILE: "${TERRAFORM_DIR}/terraform-minimal-example.tfvars" 25 | # TODO: Add enterprise edition 26 | 27 | # Ensure we have ibmcloud terraform provider 28 | before_install: 29 | - mkdir -p ~/.terraform.d/plugins 30 | - wget -P /tmp/ https://github.com/IBM-Cloud/terraform-provider-ibm/releases/download/v0.16.1/linux_amd64.zip 31 | - unzip /tmp/linux_amd64.zip -d ~/.terraform.d/plugins 32 | 33 | # Init the build harness 34 | before_script: 35 | - make init 36 | 37 | # Deploy the environment if changes in the relevant template 38 | script: 39 | - make deploy-icp-if-tfchange 40 | - make validate-icp 41 | 42 | 43 | # When everything has completed we can clean up the environment 44 | after_script: 45 | - make cleanup 46 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contributing to IBM Cloud Architecture reference applications 2 | Anyone can contribute to IBM Cloud Architecture reference applications and their associated projects, whether you are an IBMer or not. 3 | We welcome your collaboration & contributions happily, as our reference applications are meant to reflect your real world scenarios. 4 | There are multiple ways to contribute: report bugs and improvement suggestions, improve documentation, and contribute code. 5 | 6 | 7 | ## Bug reports, documentation changes, and feature requests 8 | 9 | If you would like to contribute your experience with an IBM Cloud Architecture project back to the project in the form of encountered bug reports, necessary documentation changes, or new feature requests, this can be done through the use of the repository's [**Issues**](#) list. 10 | 11 | Before opening a new issue, please reference the existing list to make sure a similar or duplicate item does not already exist. Otherwise, please be as explicit as possible when creating the new item and be sure to include the following: 12 | 13 | - **Bug reports** 14 | - Specific Project Version 15 | - Deployment environment 16 | - A minimal, but complete, setup of steps to recreate the problem 17 | - **Documentation changes** 18 | - URL to existing incorrect or incomplete documentation (either in the project's GitHub repo or external product documentation) 19 | - Updates required to correct current inconsistency 20 | - If possible, a link to a project fork, sample, or workflow to expose the gap in documentation. 21 | - **Feature requests** 22 | - Complete description of project feature request, including but not limited to, components of the existing project that are impacted, as well as additional components that may need to be created. 23 | - A minimal, but complete, setup of steps to recreate environment necessary to identify the new feature's current gap. 24 | 25 | The more explicit and thorough you are in opening GitHub Issues, the more efficient your interaction with the maintainers will be. When creating the GitHub Issue for your bug report, documentation change, or feature request, be sure to add as many relevant labels as necessary (that are defined for that specific project). These will vary by project, but will be helpful to the maintainers in quickly triaging your new GitHub issues. 26 | 27 | ## Code contributions 28 | 29 | We really value contributions, and to maximize the impact of code contributions, we request that any contributions follow the guidelines below. If you are new to open source contribution and would like some more pointers or guidance, you may want to check out [**Your First PR**](http://yourfirstpr.github.io/) and [**First Timers Only**](https://www.firsttimersonly.com/). These are a few projects that help on-board new contributors to the overall process. 30 | 31 | ### Coding and Pull Requests best practices 32 | - Please ensure you follow the coding standard and code formatting used throughout the existing code base. 33 | - This may vary project by project, but any specific diversion from normal language standards will be explicitly noted. 34 | - One feature / bug fix / documentation update per pull request 35 | - Always pull the latest changes from upstream and rebase before creating any pull request. 36 | - New pull requests should be created against the `integration` branch of the repository, if available. 37 | - This ensures new code is included in full-stack integration tests before being merged into the `master` branch 38 | - All new features must be accompanied by associated tests. 39 | - Make sure all tests pass locally before submitting a pull request. 40 | - Include tests with every feature enhancement, improve tests with every bug fix 41 | 42 | ### Github and git flow 43 | 44 | The internet is littered with guides and information on how to use and understand git. 45 | However, here's a compact guide that follows the suggested workflow 46 | 47 | ![Github flow](https://ibm-cloud-architecture.github.io/assets/img/github_flow.png) 48 | 49 | 1. Fork the desired repo in github. 50 | 51 | 2. Clone your repo to your local computer. 52 | 53 | 3. Add the upstream repository 54 | 55 | Note: Guide for step 1-3 here: [forking a repo](https://help.github.com/articles/fork-a-repo/) 56 | 57 | 4. Create new development branch off the targeted upstream branch. This will often be `master`. 58 | 59 | ``` 60 | git checkout -b master 61 | ``` 62 | 63 | 5. Do your work: 64 | - Write your code 65 | - Write your tests 66 | - Pass your tests locally 67 | - Commit your intermediate changes as you go and as appropriate 68 | - Repeat until satisfied 69 | 70 | 6. Fetch latest upstream changes (in case other changes had been delivered upstream while you were developing your new feature). 71 | 72 | ``` 73 | git fetch upstream 74 | ``` 75 | 7. Rebase to the latest upstream changes, resolving any conflicts. This will 'replay' your local commits, one by one, after the changes delivered upstream while you were locally developing, letting you manually resolve any conflict. 76 | 77 | ``` 78 | git branch --set-upstream-to=upstream/master 79 | git rebase 80 | ``` 81 | Instructions on how to manually resolve a conflict and commit the new change or skip your local replayed commit will be presented on screen by the git CLI. 82 | 83 | 8. Push the changes to your repository 84 | 85 | ``` 86 | git push origin 87 | ``` 88 | 89 | 9. Create a pull request against the same targeted upstream branch. 90 | 91 | [Creating a pull request](https://help.github.com/articles/creating-a-pull-request/) 92 | 93 | Once the pull request has been reviewed, accepted and merged into the main github repository, you should synchronise your remote and local forked github repository `master` branch with the upstream master branch. To do so: 94 | 95 | 10. Pull to your local forked repository the latest changes upstream (that is, the pull request). 96 | 97 | ``` 98 | git pull upstream master 99 | ``` 100 | 101 | 11. Push those latest upstream changes pulled locally to your remote forked repository. 102 | 103 | ``` 104 | git push origin master 105 | ``` 106 | 107 | ### What happens next? 108 | - All pull requests will be automatically built and unit tested by travis-ci, when implemented by that specific project. 109 | - You can determine if a given project is enabled for travis-ci unit tests by the existence of a `.travis.yml` file in the root of the repository or branch. 110 | - When in use, all travis-ci unit tests must pass completely before any further review or discussion takes place. 111 | - The repository maintainer will then inspect the commit and, if accepted, will pull the code into the upstream branch. 112 | - Should a maintainer or reviewer ask for changes to be made to the pull request, these can be made locally and pushed to your forked repository and branch. 113 | - Commits passing this stage will make it into the next release cycle for the given project. 114 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Define build harness branch 2 | BUILD_HARNESS_ORG = hans-moen 3 | BUILD_HARNESS_BRANCH = hktest 4 | 5 | # Define the template and vars file used by the build-harness terraform module 6 | TERRAFORM_DIR ?= 7 | TERRAFORM_VARS_FILE ?= 8 | 9 | # GITHUB_USER containing '@' char must be escaped with '%40' 10 | GITHUB_USER := $(shell echo $(GITHUB_USER) | sed 's/@/%40/g') 11 | GITHUB_TOKEN ?= 12 | 13 | # There are many permutations of templates and tfvar example files. 14 | # Only run the templates that has changes 15 | DO_DEPLOY ?= $(shell git diff --name-only $(TRAVIS_COMMIT_RANGE) | grep -q -E "$(shell basename $(TERRAFORM_DIR))/(.*.tf)|(.*.tfvars)" && echo yes || echo no) 16 | 17 | 18 | .PHONY: default 19 | default:: init; 20 | 21 | .PHONY: init\: 22 | init:: 23 | ifndef GITHUB_USER 24 | $(info GITHUB_USER not defined) 25 | exit -1 26 | endif 27 | $(info Using GITHUB_USER=$(GITHUB_USER)) 28 | ifndef GITHUB_TOKEN 29 | $(info GITHUB_TOKEN not defined) 30 | exit -1 31 | endif 32 | ifndef TERRAFORM_DIR 33 | $(info TERRAFORM_DIR not defined) 34 | exit -1 35 | endif 36 | $(info Using TERRAFORM_DIR=$(TERRAFORM_DIR)) 37 | ifndef TERRAFORM_VARS_FILE 38 | $(info TERRAFORM_VARS_FILE not defined) 39 | exit -1 40 | endif 41 | $(info Using TERRAFORM_VARS_FILE=$(TERRAFORM_VARS_FILE)) 42 | 43 | -include $(shell curl -so .build-harness -H "Authorization: token $(GITHUB_TOKEN)" -H "Accept: application/vnd.github.v3.raw" "https://raw.github.ibm.com/ICP-DevOps/build-harness/master/templates/Makefile.build-harness"; echo .build-harness) 44 | 45 | .PHONY: validate-tf 46 | ## Validate a given terraform template directory without deploying 47 | validate-tf: 48 | @$(SELF) -s terraform:validate TERRAFORM_VARS_FILE=$(TERRAFORM_VARS_FILE) TERRAFORM_DIR=$(TERRAFORM_DIR) 49 | 50 | .PHONY: deploy-icp-if-tfchange 51 | deploy-icp-if-tfchange: 52 | git diff --name-only $(TRAVIS_COMMIT_RANGE) 53 | ifeq "$(DO_DEPLOY)" "no" 54 | $(info No changes in templates for or example tfvars in $(basename $(TERRAFORM_DIR)), just doing basic syntax validation.) 55 | $(SELF) validate-tf 56 | else 57 | $(SELF) deploy-icp 58 | endif 59 | 60 | .PHONY: deploy-icp 61 | ## Deploy a given terraform template directory with a given terraform VARS file 62 | deploy-icp: 63 | @$(SELF) -s terraform:apply TERRAFORM_VARS_FILE=$(TERRAFORM_VARS_FILE) TERRAFORM_DIR=$(TERRAFORM_DIR) 64 | 65 | .PHONY: validate-icp 66 | validate-icp: 67 | ifeq "$(DO_DEPLOY)" "no" 68 | $(info ICP Not deployed, skipping validation tests) 69 | else ifeq "$(TRAVIS_TEST_RESULT)" "1" 70 | $(error Will not run validation on failed deployment) 71 | else 72 | $(info Running validation test) 73 | @export SERVER=$(shell $(SELF) -s terraform:output TERRAFORM_OUTPUT_VAR=icp_console_host) ; \ 74 | export USERNAME=$(shell $(SELF) -s terraform:output TERRAFORM_OUTPUT_VAR=icp_admin_username) ; \ 75 | export PASSWORD=$(shell $(SELF) -s terraform:output TERRAFORM_OUTPUT_VAR=icp_admin_password) ; \ 76 | $(SELF) -s validateicp:runall 77 | endif 78 | 79 | 80 | .PHONY: cleanup 81 | ## Delete the environment 82 | cleanup: 83 | @$(SELF) -s terraform:destroy 84 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Terraform ICP IBM Cloud 2 | 3 | This repository contains a collection of Terraform templates. The Terraform example configurations uses the [IBM Cloud provider](https://ibm-cloud.github.io/tf-ibm-docs/index.html) to provision virtual machines on IBM Cloud Infrastructure (SoftLayer) 4 | and [Terraform Module ICP Deploy](https://github.com/ibm-cloud-architecture/terraform-module-icp-deploy) to prepare VSIs and deploy [IBM Cloud Private](https://www.ibm.com/cloud-computing/products/ibm-cloud-private/) on them. These Terraform templates automate best practices learned from installing ICP on IBM Cloud Infrastructure. 5 | 6 | ## Pre-requisites 7 | 8 | * Working copy of [Terraform](https://www.terraform.io/intro/getting-started/install.html) 9 | * As of this writing, IBM Cloud Terraform provider is not in the main Terraform repository and must be installed manually. See [these steps](https://ibm-cloud.github.io/tf-ibm-docs/index.html#using-terraform-with-the-ibm-cloud-provider). We tested this automation script against v0.9.1 of the Terraform provider. 10 | * Select a template that most closely matches your desired target environment from the [available templates](templates) 11 | 12 | 13 | 14 | ### Using the Terraform templates 15 | 16 | 1. git clone the repository 17 | 18 | 1. Navigate to the desired [template directory](templates) 19 | 20 | 1. Create a `terraform.tfvars` file to reflect your environment. Please see specific for the template you select. 21 | 22 | 1. Run `terraform init` to download depenencies (modules and plugins) 23 | 24 | 1. Run `terraform plan` to investigate deployment plan 25 | 26 | 1. Run `terraform apply` to start deployment. 27 | 28 | ## Selecting the right template 29 | 30 | We currently have three templates available 31 | 32 | - [icp-ce-minimal](templates/icp-ce-minimal) 33 | * This template will deploy ICP Community Edition with a minimal amount of Virtual Machines and a minimal amount of services enabled 34 | * Additional ICP services such as logging, monitoring and istio can be enabled as well as dedicated management nodes can be added with minor configuration changes 35 | * This template is suitable for a quick view of basic ICP and Kubernetes functionality, and simple PoCs and verifications 36 | 37 | - [icp-ce-with-loadbalancers](templates/icp-ce-with-loadbalancers) 38 | * Like the `icp-ce-minimal` template, this will deploy a minimal environment, but in this template Loadbalancers will also be created. This creates a topology more similar to the `icp-ee` environment, where external loadbalancers are a central part of the network design, but with less services and resources active 39 | * This template is suitable for validation tests and PoCs where external loadbalancer functionality is required 40 | 41 | - [icp-ee](templates/icp-ee) 42 | * This template deploys a more robust environment, with control plane in a high availabilty configuration 43 | * By default a separate boot node is provisioned and all SSH communication goes through this 44 | * This configuration requires access to ICP Enterprise Edition, typically supplied as a tarball 45 | 46 | 47 | Follow the link to these templates for more detailed information about them. 48 | -------------------------------------------------------------------------------- /static/icp_ce_minimal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/terraform-icp-ibmcloud/c51dd807b4d795a054c5435d80751e670966e30f/static/icp_ce_minimal.png -------------------------------------------------------------------------------- /static/icp_ce_minimal_with_lb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/terraform-icp-ibmcloud/c51dd807b4d795a054c5435d80751e670966e30f/static/icp_ce_minimal_with_lb.png -------------------------------------------------------------------------------- /static/icp_ibmcloud.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/terraform-icp-ibmcloud/c51dd807b4d795a054c5435d80751e670966e30f/static/icp_ibmcloud.png -------------------------------------------------------------------------------- /templates/README.md: -------------------------------------------------------------------------------- 1 | # Terraform ICP IBM Cloud 2 | 3 | We have a collection of templates that can stand up a ICP environment in IBMCloud Infrastructure with minimal input. 4 | 5 | ## Selecting the right template 6 | 7 | We currently have three templates available 8 | 9 | - [icp-ce-minimal](icp-ce-minimal) 10 | * This template will deploy ICP Community Edition with a minimal amount of Virtual Machines and a minimal amount of services enabled 11 | * Additional ICP services such as logging, monitoring and istio can be enabled as well as dedicated management nodes can be added with minor configuration changes 12 | * This template is suitable for a quick view of basic ICP and Kubernetes functionality, and simple PoCs and verifications 13 | 14 | - [icp-ce-with-loadbalancers](icp-ce-with-loadbalancers) 15 | * Like the `icp-ce-minimal` template, this will deploy a minimal environment, but in this template Loadbalancers will also be created. This creates a topology more similar to the `icp-ee` environment, where external loadbalancers are a central part of the network design, but with less services and resources active 16 | * This template is suitable for validation tests and PoCs where external loadbalancer functionality is required 17 | 18 | - [icp-ee](icp-ee) 19 | * This template deploys a more robust environment, with control plane in a high availabilty configuration 20 | * By default a separate boot node is provisioned and all SSH communication goes through this 21 | * This configuration requires access to ICP Enterprise Edition, typically supplied as a tarball 22 | 23 | 24 | Follow the link to these templates for more detailed information about them. 25 | -------------------------------------------------------------------------------- /templates/icp-ce-minimal/README.md: -------------------------------------------------------------------------------- 1 | # Terraform ICP IBM Cloud 2 | 3 | This Terraform example configurations uses the [IBM Cloud provider](https://ibm-cloud.github.io/tf-ibm-docs/index.html) to provision virtual machines on IBM Cloud Infrastructure (SoftLayer) 4 | and [Terraform Module ICP Deploy](https://github.com/ibm-cloud-architecture/terraform-module-icp-deploy) to prepare VSIs and deploy [IBM Cloud Private](https://www.ibm.com/cloud-computing/products/ibm-cloud-private/) version 3.1.0 or later. This Terraform template automates best practices learned from installing ICP on IBM Cloud Infrastructure. 5 | 6 | ## Deployment overview 7 | This template creates an environment where 8 | - Cluster is deployed directly on public network and is accessed on the VMs public IP 9 | - There are no load balancers, but applications can be accessed via NodePort on public IP of proxy node 10 | - Most ICP services disabled (some can be activated via `terraform.tfvars` settings as described below) 11 | - Minimal VM sizes 12 | - No separate boot node 13 | - No management node (can be enabled via `terraform.tfvars` settings as described below) 14 | - No Vulnerability Advisor node and vulnerability advisor service disabled by default 15 | 16 | ## Architecture Diagram 17 | 18 | ![Architecture](../../static/icp_ce_minimal.png) 19 | 20 | ## Pre-requisites 21 | 22 | * Working copy of [Terraform](https://www.terraform.io/intro/getting-started/install.html) 23 | * As of this writing, IBM Cloud Terraform provider is not in the main Terraform repository and must be installed manually. See [these steps](https://ibm-cloud.github.io/tf-ibm-docs/index.html#using-terraform-with-the-ibm-cloud-provider). The templates have been tested with Terraform version 0.11.7 and the IBM Cloud provider version 0.11.3. 24 | * The template is tested on VSIs based on Ubuntu 16.04. RHEL is not supported in this automation. 25 | 26 | 27 | ### Using the Terraform templates 28 | 29 | 1. git clone the repository 30 | 31 | 1. Navigate to the template directory `templates/icp-ce-minimal` 32 | 33 | 1. Create a `terraform.tfvars` file to reflect your environment. Please see [variables.tf](variables.tf) and below tables for variable names and descriptions. Here is an example `terraform.tfvars` file: 34 | 35 | 36 | ``` 37 | sl_username = "" 38 | sl_api_key = "" 39 | datacenter = "dal13" 40 | key_name = ["my-ssh-key"] 41 | ``` 42 | 43 | 1. Run `terraform init` to download depenencies (modules and plugins) 44 | 45 | 1. Run `terraform plan` to investigate deployment plan 46 | 47 | 1. Run `terraform apply` to start deployment. 48 | 49 | 50 | ### Automation Notes 51 | 52 | #### What does the automation do 53 | 1. Create the virtual machines as defined in `variables.tf` and `terraform.tfvars` 54 | - Use cloud-init to add a user `icpdeploy` with a randomly generated ssh-key 55 | - Configure a separate hard disk to be used by docker 56 | 2. Create security groups and rules for cluster communication as declared in [security_group.tf](security_group.tf) 57 | 3. Handover to the [icp-deploy](https://github.com/ibm-cloud-architecture/terraform-module-icp-deploy) terraform module as declared in the [icp-deploy.tf](icp-deploy.tf) file 58 | 59 | #### What does the icp deploy module do 60 | 1. It uses the provided ssh key which has been generated for the `icpdeploy` user to ssh from the terraform controller to all cluster nodes to install ICP prerequisites 61 | 2. It generates a new ssh keypair for ICP Boot(master) node to ICP cluster communication and distributes the public key to the cluster nodes. This key is used by the ICP Ansible installer. 62 | 3. It populates the necessary `/etc/hosts` file on the boot node 63 | 4. It generates the ICP cluster hosts file based on information provided in [icp-deploy.tf](icp-deploy.tf) 64 | 5. It generates the ICP cluster `config.yaml` file based on information provided in [icp-deploy.tf](icp-deploy.tf) 65 | 66 | #### Security Groups 67 | 68 | The automation leverages Security Groups to lock down public and private access to the cluster. 69 | 70 | - SSH is allowed to all cluster nodes to ease exploration and investigation 71 | - UDP and TCP port 30000 - 32767 are allowed on proxy node to enable use of [NodePort](https://www.ibm.com/support/knowledgecenter/en/SSBS6K_3.1.0/manage_applications/expose_app.html) 72 | - Inbound communication to the master node is permitted on [ports relevant to the ICP service](https://www.ibm.com/support/knowledgecenter/en/SSBS6K_3.1.0/supported_system_config/required_ports.html) 73 | - All outbound communication is allowed. 74 | - All other communication is only permitted between cluster nodes. 75 | 76 | ### Terraform configuration 77 | 78 | Please see [variables.tf](variables.tf) for additional parameters. 79 | 80 | | name | required | value | 81 | |----------------|------------|--------------| 82 | | `sl_username` | yes | Username for IBM Cloud infrastructure account | 83 | | `sl_api_key` | yes | API Key for IBM Cloud infrastructure account | 84 | | `key_name` | no | Array of SSH keys to add to `root` for all created VSI instances. Note that the automation generates its own SSH keys so these are additional keys that can be used for access | 85 | | `datacenter` | yes | Datacenter to place all objects in | 86 | | `os_reference_code` | yes | OS to install on the VSIs. Use the [API](https://api.softlayer.com/rest/v3/SoftLayer_Virtual_Guest_Block_Device_Template_Group/getVhdImportSoftwareDescriptions.json?objectMask=referenceCode) to determine valid values. Only Ubuntu 16.04 was tested. Note that the boot node OS can be specified separately (defaults to `UBUNTU_16_64` to save licensing costs). | 87 | | `icp_inception_image` | no | The ICP installer image to use. This corresponds to the version of ICP to install. Defaults to 3.1.0 | 88 | | `docker_package_location` | no | The local path to where the IBM-provided docker installation binary is saved. If not specified and using Ubuntu, will install latest `docker-ce` off public repo. | 89 | | `private_network_only` | no | Specify true to remove the cluster from the public network. If public network access is disabled, note that to allow outbound internet access you will require a Gateway Appliance on the VLAN to do Source NAT. Additionally, the automation requires SSH access to the boot node to provision ICP, so a VPN tunnel may be required. The LBaaS for both the master and the control plane will still be provisioned on the public internet, but the cluster nodes will not have public addresses configured. | 90 | | `private_vlan_router_hostname` | no | Private VLAN router to place all VSIs behind. e.g. bcr01a. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. This option should be used when setting `private_network_only` to true along with `private_vlan_number` using a private VLAN that is routed with a Gateway Appliance. | 91 | | `private_vlan_number` | no | Private VLAN number to place all VSIs on. e.g. 1211. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. This option should be used when setting `private_network_only` to true along with `private_vlan_router_hostname`, using a private VLAN that is routed with a Gateway Appliance.| 92 | | `public_vlan_router_hostname` | no | Public VLAN router to place all VSIs behind. e.g. fcr01a. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. | 93 | | `public_vlan_number` | no | Public VLAN number to place all VSIs on. e.g. 1211. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. | 94 | | `icppassword` | no | ICP administrator password. One will be generated if not set. | 95 | | `deployment` | no | Identifier prefix added to the host names of all your infrastructure resources for organising/naming ease | 96 | 97 | ### Configuration examples 98 | 99 | 1. terraform.tfvars which does not add a SSH key and uses all default values. This is the minimum configuration possible. 100 | 101 | ``` 102 | sl_username = "" 103 | sl_api_key = "" 104 | datacenter = "dal13" 105 | ``` 106 | 107 | 2. terraform.tfvars which adds a SSH key to the root user and uses all default values. 108 | 109 | ``` 110 | sl_username = "" 111 | sl_api_key = "" 112 | datacenter = "dal13" 113 | key_name = ["my-ssh-key"] 114 | ``` 115 | 116 | 3. terraform.tfvars which adds a management node and some additional services (metering, monitoring and logging) 117 | 118 | ``` 119 | sl_username = "" 120 | sl_api_key = "" 121 | key_name = ["my-ssh-key"] 122 | 123 | # Disable most management services except metering, monitoring and logging 124 | disabled_management_services = ["istio", "vulnerability-advisor", "storage-glusterfs", "storage-minio", "custom-metrics-adapter", "image-security-enforcement"] 125 | 126 | # Enabling metering, monitoring and logging requires additinal resources, 127 | # so we will enable 1 dedicated management node 128 | mgmt = { 129 | nodes = "1" 130 | } 131 | 132 | ``` 133 | 134 | 4. terraform.tfvars which adds additional worker nodes, a management node and some additional services (metering, monitoring and logging) 135 | 136 | ``` 137 | sl_username = "" 138 | sl_api_key = "" 139 | key_name = ["my-ssh-key"] 140 | 141 | # Disable most management services except metering, monitoring and logging 142 | disabled_management_services = ["istio", "vulnerability-advisor", "storage-glusterfs", "storage-minio", "custom-metrics-adapter", "image-security-enforcement"] 143 | 144 | # Enabling metering, monitoring and logging requires additinal resources, 145 | # so we will enable 1 dedicated management node 146 | mgmt = { 147 | nodes = "1" 148 | } 149 | worker = { 150 | nodes = "6" 151 | } 152 | 153 | ``` 154 | -------------------------------------------------------------------------------- /templates/icp-ce-minimal/camvariables.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "sl_username", 4 | "label": "SoftLayer Username", 5 | "description": "Username for IBM Cloud infrastructure account", 6 | "hidden": false, 7 | "immutable": false, 8 | "required": true, 9 | "secured": false, 10 | "type": "string" 11 | }, 12 | { 13 | "name": "sl_api_key", 14 | "label": "SoftLayer API Key", 15 | "description": "API Key for IBM Cloud infrastructure account", 16 | "hidden": false, 17 | "immutable": false, 18 | "required": true, 19 | "secured": true, 20 | "type": "string" 21 | }, 22 | { 23 | "name": "os_reference_code", 24 | "label": "OS to install on the VSIs", 25 | "description": "OS to install on the VSIs. Use the API to determine valid values. Only Ubuntu 16.04 was tested", 26 | "default": "UBUNTU_16_64", 27 | "hidden": false, 28 | "immutable": false, 29 | "required": true, 30 | "secured": false, 31 | "type": "string" 32 | }, 33 | { 34 | "name": "deployment", 35 | "label": "ICP deployment Name", 36 | "description": "Prefix of names for IBM Cloud ICP cluster resources; The name prefix must be 1-52 alphanumeric characters and dash", 37 | "default": "icp", 38 | "hidden": false, 39 | "immutable": false, 40 | "required": false, 41 | "secured": false, 42 | "type": "string", 43 | "regex": "^[A-Za-z0-9-]{1,52}$" 44 | }, 45 | { 46 | "name": "key_name", 47 | "label": "Array of SSH keys for created resources", 48 | "description": "Array of SSH keys to add to root for all created VSI instances", 49 | "default": [], 50 | "hidden": false, 51 | "immutable": false, 52 | "required": false, 53 | "secured": false, 54 | "type": "list" 55 | }, 56 | { 57 | "name": "datacenter", 58 | "label": "SoftLayer Datacenter", 59 | "description": "SoftLayer Datacenter to deploy your resources to", 60 | "default": "mil01", 61 | "hidden": false, 62 | "immutable": false, 63 | "required": true, 64 | "secured": false, 65 | "type": "string" 66 | }, 67 | { 68 | "name": "domain", 69 | "label": "Domain to be used on the VMs", 70 | "description": "Specify domain name to be used for linux customization on the VMs, or leave blank to use .icp", 71 | "default": "icp.com", 72 | "hidden": false, 73 | "immutable": false, 74 | "required": false, 75 | "secured": false, 76 | "type": "string" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /templates/icp-ce-minimal/icp-deploy.tf: -------------------------------------------------------------------------------- 1 | ################################## 2 | ### Deploy ICP to cluster 3 | ################################## 4 | module "icpprovision" { 5 | source = "github.com/ibm-cloud-architecture/terraform-module-icp-deploy.git?ref=2.3.5" 6 | 7 | # Provide IP addresses for boot, master, mgmt, va, proxy and workers 8 | boot-node = "${ibm_compute_vm_instance.icp-master.ipv4_address}" 9 | icp-host-groups = { 10 | master = ["${ibm_compute_vm_instance.icp-master.*.ipv4_address}"] 11 | proxy = ["${ibm_compute_vm_instance.icp-proxy.*.ipv4_address}"] 12 | worker = ["${ibm_compute_vm_instance.icp-worker.*.ipv4_address}"] 13 | management = ["${ibm_compute_vm_instance.icp-mgmt.*.ipv4_address}"] 14 | va = ["${ibm_compute_vm_instance.icp-va.*.ipv4_address}"] 15 | } 16 | 17 | # Provide desired ICP version to provision 18 | icp-version = "${var.icp_inception_image}" 19 | 20 | /* Workaround for terraform issue #10857 21 | When this is fixed, we can work this out automatically */ 22 | cluster_size = "${var.master["nodes"] + var.worker["nodes"] + var.proxy["nodes"] + var.mgmt["nodes"] + var.va["nodes"]}" 23 | 24 | ################################################################################################################################### 25 | ## You can feed in arbitrary configuration items in the icp_configuration map. 26 | ## Available configuration items availble from https://www.ibm.com/support/knowledgecenter/SSBS6K_3.1.0/installing/config_yaml.html 27 | icp_configuration = { 28 | "network_cidr" = "${var.network_cidr}" 29 | "service_cluster_ip_range" = "${var.service_network_cidr}" 30 | # "cluster_CA_domain" = "${ibm_lbaas.master-lbaas.vip}" 31 | "cluster_name" = "${var.deployment}" 32 | "calico_ip_autodetection_method" = "interface=eth1" 33 | 34 | # An admin password will be generated if not supplied in terraform.tfvars 35 | "default_admin_password" = "${local.icppassword}" 36 | 37 | # This is the list of disabled management services 38 | "management_services" = "${local.disabled_management_services}" 39 | } 40 | 41 | # We will let terraform generate a new ssh keypair 42 | # for boot master to communicate with worker and proxy nodes 43 | # during ICP deployment 44 | generate_key = true 45 | 46 | # SSH user and key for terraform to connect to newly created VMs 47 | # ssh_key is the private key corresponding to the public assumed to be included in the template 48 | ssh_user = "icpdeploy" 49 | ssh_key_base64 = "${base64encode(tls_private_key.installkey.private_key_pem)}" 50 | ssh_agent = false 51 | 52 | } 53 | 54 | output "icp_console_host" { 55 | value = "${element(ibm_compute_vm_instance.icp-master.*.ipv4_address, 0)}" 56 | } 57 | 58 | output "icp_console_url" { 59 | value = "https://${element(ibm_compute_vm_instance.icp-master.*.ipv4_address, 0)}:8443" 60 | } 61 | 62 | output "icp_proxy_host" { 63 | value = "${element(ibm_compute_vm_instance.icp-proxy.*.ipv4_address, 0)}" 64 | } 65 | 66 | output "kubernetes_api_url" { 67 | value = "https://${element(ibm_compute_vm_instance.icp-master.*.ipv4_address, 0)}:8001" 68 | } 69 | 70 | output "icp_admin_username" { 71 | value = "admin" 72 | } 73 | 74 | output "icp_admin_password" { 75 | value = "${local.icppassword}" 76 | } 77 | -------------------------------------------------------------------------------- /templates/icp-ce-minimal/instances.tf: -------------------------------------------------------------------------------- 1 | ######################################################### 2 | ## Get VLAN IDs if we need to provision to specific VLANs 3 | ######################################################## 4 | data "ibm_network_vlan" "private_vlan" { 5 | count = "${var.private_vlan_router_hostname != "" ? 1 : 0}" 6 | router_hostname = "${var.private_vlan_router_hostname}.${var.datacenter}" 7 | number = "${var.private_vlan_number}" 8 | } 9 | 10 | data "ibm_network_vlan" "public_vlan" { 11 | count = "${var.private_network_only != true && var.public_vlan_router_hostname != "" ? 1 : 0}" 12 | router_hostname = "${var.public_vlan_router_hostname}.${var.datacenter}" 13 | number = "${var.public_vlan_number}" 14 | } 15 | 16 | locals { 17 | private_vlan_id = "${element(concat(data.ibm_network_vlan.private_vlan.*.id, list("-1")), 0) }" 18 | public_vlan_id = "${element(concat(data.ibm_network_vlan.public_vlan.*.id, list("-1")), 0)}" 19 | } 20 | 21 | ############################################## 22 | ## Provision boot node 23 | ############################################## 24 | 25 | resource "ibm_compute_vm_instance" "icp-boot" { 26 | count = "${var.boot["nodes"]}" 27 | hostname = "${var.deployment}-boot-${random_id.clusterid.hex}" 28 | domain = "${var.domain != "" ? var.domain : "${var.deployment}.icp"}" 29 | 30 | os_reference_code = "${var.os_reference_code}" 31 | 32 | datacenter = "${var.datacenter}" 33 | 34 | cores = "${var.boot["cpu_cores"]}" 35 | memory = "${var.boot["memory"]}" 36 | 37 | network_speed = "${var.boot["network_speed"]}" 38 | 39 | local_disk = "${var.boot["local_disk"]}" 40 | disks = [ 41 | "${var.boot["disk_size"]}", 42 | "${var.boot["docker_vol_size"]}" 43 | ] 44 | 45 | tags = [ 46 | "${var.deployment}", 47 | "icp-boot", 48 | "${random_id.clusterid.hex}" 49 | ] 50 | 51 | hourly_billing = "${var.boot["hourly_billing"]}" 52 | private_network_only = "${var.private_network_only}" 53 | public_vlan_id = "${local.public_vlan_id}" 54 | private_vlan_id = "${local.private_vlan_id}" 55 | 56 | public_security_group_ids = ["${compact(concat( 57 | ibm_security_group.cluster_public.*.id, 58 | list("${var.private_network_only != true ? ibm_security_group.boot_node_public.id : "" }") 59 | ))}"] 60 | 61 | private_security_group_ids = ["${compact(concat( 62 | list("${ibm_security_group.cluster_private.id}"), 63 | ibm_security_group.boot_node_public.*.id 64 | ))}"] 65 | 66 | # Permit an ssh loging for the key owner. 67 | # You can have multiple keys defined. 68 | ssh_key_ids = ["${data.ibm_compute_ssh_key.public_key.*.id}"] 69 | 70 | user_metadata = < /etc/docker/daemon.json < IP Management > VLANs in the portal. Leave blank to let the system choose. This option should be used when setting `private_network_only` to true along with `private_vlan_number` using a private VLAN that is routed with a Gateway Appliance. | 103 | | `private_vlan_number` | no | Private VLAN number to place all VSIs on. e.g. 1211. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. This option should be used when setting `private_network_only` to true along with `private_vlan_router_hostname`, using a private VLAN that is routed with a Gateway Appliance.| 104 | | `public_vlan_router_hostname` | no | Public VLAN router to place all VSIs behind. e.g. fcr01a. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. | 105 | | `public_vlan_number` | no | Public VLAN number to place all VSIs on. e.g. 1211. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. | 106 | | `icppassword` | no | ICP administrator password. One will be generated if not set. | 107 | | `deployment` | no | Identifier prefix added to the host names of all your infrastructure resources for organising/naming ease | 108 | 109 | ### Configuration examples 110 | 111 | 1. terraform.tfvars which does not add a SSH key and uses all default values. This is the minimum configuration possible. 112 | 113 | ``` 114 | sl_username = "" 115 | sl_api_key = "" 116 | datacenter = "dal13" 117 | ``` 118 | 119 | 2. terraform.tfvars which adds a SSH key to the root user and uses all default values. 120 | 121 | ``` 122 | sl_username = "" 123 | sl_api_key = "" 124 | datacenter = "dal13" 125 | key_name = ["my-ssh-key"] 126 | ``` 127 | 128 | 3. terraform.tfvars which adds a management node and some additional services (metering, monitoring and logging) 129 | 130 | ``` 131 | sl_username = "" 132 | sl_api_key = "" 133 | key_name = ["my-ssh-key"] 134 | 135 | # Disable most management services except metering, monitoring and logging 136 | disabled_management_services = ["istio", "vulnerability-advisor", "storage-glusterfs", "storage-minio", "custom-metrics-adapter", "image-security-enforcement"] 137 | 138 | # Enabling metering, monitoring and logging requires additinal resources, 139 | # so we will enable 1 dedicated management node 140 | mgmt = { 141 | nodes = "1" 142 | } 143 | 144 | ``` 145 | 146 | 4. terraform.tfvars which adds additional worker nodes, a management node and some additional services (metering, monitoring and logging) 147 | 148 | ``` 149 | sl_username = "" 150 | sl_api_key = "" 151 | key_name = ["my-ssh-key"] 152 | 153 | # Disable most management services except metering, monitoring and logging 154 | disabled_management_services = ["istio", "vulnerability-advisor", "storage-glusterfs", "storage-minio", "custom-metrics-adapter", "image-security-enforcement"] 155 | 156 | # Enabling metering, monitoring and logging requires additinal resources, 157 | # so we will enable 1 dedicated management node 158 | mgmt = { 159 | nodes = "1" 160 | } 161 | worker = { 162 | nodes = "6" 163 | } 164 | 165 | ``` 166 | -------------------------------------------------------------------------------- /templates/icp-ce-with-loadbalancers/camvariables.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "sl_username", 4 | "label": "SoftLayer Username", 5 | "description": "Username for IBM Cloud infrastructure account", 6 | "hidden": false, 7 | "immutable": false, 8 | "required": true, 9 | "secured": false, 10 | "type": "string" 11 | }, 12 | { 13 | "name": "sl_api_key", 14 | "label": "SoftLayer API Key", 15 | "description": "API Key for IBM Cloud infrastructure account", 16 | "hidden": false, 17 | "immutable": false, 18 | "required": true, 19 | "secured": true, 20 | "type": "string" 21 | }, 22 | { 23 | "name": "os_reference_code", 24 | "label": "OS to install on the VSIs", 25 | "description": "OS to install on the VSIs. Use the API to determine valid values. Only Ubuntu 16.04 was tested", 26 | "default": "UBUNTU_16_64", 27 | "hidden": false, 28 | "immutable": false, 29 | "required": true, 30 | "secured": false, 31 | "type": "string" 32 | }, 33 | { 34 | "name": "deployment", 35 | "label": "ICP deployment Name", 36 | "description": "Prefix of names for IBM Cloud ICP cluster resources; The name prefix must be 1-52 alphanumeric characters and dash", 37 | "default": "icp", 38 | "hidden": false, 39 | "immutable": false, 40 | "required": false, 41 | "secured": false, 42 | "type": "string", 43 | "regex": "^[A-Za-z0-9-]{1,52}$" 44 | }, 45 | { 46 | "name": "key_name", 47 | "label": "Array of SSH keys for created resources", 48 | "description": "Array of SSH keys to add to root for all created VSI instances", 49 | "default": [], 50 | "hidden": false, 51 | "immutable": false, 52 | "required": false, 53 | "secured": false, 54 | "type": "list" 55 | }, 56 | { 57 | "name": "datacenter", 58 | "label": "SoftLayer Datacenter", 59 | "description": "SoftLayer Datacenter to deploy your resources to", 60 | "default": "mil01", 61 | "hidden": false, 62 | "immutable": false, 63 | "required": true, 64 | "secured": false, 65 | "type": "string" 66 | }, 67 | { 68 | "name": "domain", 69 | "label": "Domain to be used on the VMs", 70 | "description": "Specify domain name to be used for linux customization on the VMs, or leave blank to use .icp", 71 | "default": "icp.com", 72 | "hidden": false, 73 | "immutable": false, 74 | "required": false, 75 | "secured": false, 76 | "type": "string" 77 | } 78 | ] 79 | -------------------------------------------------------------------------------- /templates/icp-ce-with-loadbalancers/icp-deploy.tf: -------------------------------------------------------------------------------- 1 | ################################## 2 | ### Deploy ICP to cluster 3 | ################################## 4 | module "icpprovision" { 5 | source = "github.com/ibm-cloud-architecture/terraform-module-icp-deploy.git?ref=2.3.5" 6 | 7 | # Provide IP addresses for boot, master, mgmt, va, proxy and workers 8 | boot-node = "${ibm_compute_vm_instance.icp-master.ipv4_address_private}" 9 | bastion_host = "${ibm_compute_vm_instance.icp-master.ipv4_address}" 10 | icp-host-groups = { 11 | master = ["${ibm_compute_vm_instance.icp-master.*.ipv4_address_private}"] 12 | proxy = ["${ibm_compute_vm_instance.icp-proxy.*.ipv4_address_private}"] 13 | worker = ["${ibm_compute_vm_instance.icp-worker.*.ipv4_address_private}"] 14 | management = ["${ibm_compute_vm_instance.icp-mgmt.*.ipv4_address_private}"] 15 | va = ["${ibm_compute_vm_instance.icp-va.*.ipv4_address_private}"] 16 | } 17 | 18 | # Provide desired ICP version to provision 19 | icp-version = "${var.icp_inception_image}" 20 | 21 | /* Workaround for terraform issue #10857 22 | When this is fixed, we can work this out automatically */ 23 | cluster_size = "${var.master["nodes"] + var.worker["nodes"] + var.proxy["nodes"] + var.mgmt["nodes"] + var.va["nodes"]}" 24 | 25 | ################################################################################################################################### 26 | ## You can feed in arbitrary configuration items in the icp_configuration map. 27 | ## Available configuration items availble from https://www.ibm.com/support/knowledgecenter/SSBS6K_3.1.0/installing/config_yaml.html 28 | icp_configuration = { 29 | "network_cidr" = "${var.network_cidr}" 30 | "service_cluster_ip_range" = "${var.service_network_cidr}" 31 | "cluster_lb_address" = "${ibm_lbaas.master-lbaas.vip}" 32 | "proxy_lb_address" = "${ibm_lbaas.proxy-lbaas.vip}" 33 | "cluster_CA_domain" = "${ibm_lbaas.master-lbaas.vip}" 34 | "cluster_name" = "${var.deployment}" 35 | "calico_ip_autodetection_method" = "interface=eth0" 36 | 37 | # An admin password will be generated if not supplied in terraform.tfvars 38 | "default_admin_password" = "${local.icppassword}" 39 | 40 | # This is the list of disabled management services 41 | "management_services" = "${local.disabled_management_services}" 42 | } 43 | 44 | # We will let terraform generate a new ssh keypair 45 | # for boot master to communicate with worker and proxy nodes 46 | # during ICP deployment 47 | generate_key = true 48 | 49 | # SSH user and key for terraform to connect to newly created VMs 50 | # ssh_key is the private key corresponding to the public assumed to be included in the template 51 | ssh_user = "icpdeploy" 52 | ssh_key_base64 = "${base64encode(tls_private_key.installkey.private_key_pem)}" 53 | ssh_agent = false 54 | } 55 | 56 | 57 | output "icp_console_host" { 58 | value = "${ibm_lbaas.master-lbaas.vip}" 59 | } 60 | 61 | output "icp_proxy_host" { 62 | value = "${ibm_lbaas.proxy-lbaas.vip}" 63 | } 64 | 65 | output "icp_console_url" { 66 | value = "https://${ibm_lbaas.master-lbaas.vip}:8443" 67 | } 68 | 69 | output "icp_registry_url" { 70 | value = "${ibm_lbaas.master-lbaas.vip}:8500" 71 | } 72 | 73 | output "kubernetes_api_url" { 74 | value = "https://${ibm_lbaas.master-lbaas.vip}:8001" 75 | } 76 | 77 | output "icp_admin_username" { 78 | value = "admin" 79 | } 80 | 81 | output "icp_admin_password" { 82 | value = "${local.icppassword}" 83 | } 84 | -------------------------------------------------------------------------------- /templates/icp-ce-with-loadbalancers/instances.tf: -------------------------------------------------------------------------------- 1 | ######################################################### 2 | ## Get VLAN IDs if we need to provision to specific VLANs 3 | ######################################################## 4 | data "ibm_network_vlan" "private_vlan" { 5 | count = "${var.private_vlan_router_hostname != "" ? 1 : 0}" 6 | router_hostname = "${var.private_vlan_router_hostname}.${var.datacenter}" 7 | number = "${var.private_vlan_number}" 8 | } 9 | 10 | data "ibm_network_vlan" "public_vlan" { 11 | count = "${var.private_network_only != true && var.public_vlan_router_hostname != "" ? 1 : 0}" 12 | router_hostname = "${var.public_vlan_router_hostname}.${var.datacenter}" 13 | number = "${var.public_vlan_number}" 14 | } 15 | 16 | locals { 17 | private_vlan_id = "${element(concat(data.ibm_network_vlan.private_vlan.*.id, list("-1")), 0) }" 18 | public_vlan_id = "${element(concat(data.ibm_network_vlan.public_vlan.*.id, list("-1")), 0)}" 19 | } 20 | 21 | ############################################## 22 | ## Provision boot node 23 | ############################################## 24 | 25 | resource "ibm_compute_vm_instance" "icp-boot" { 26 | count = "${var.boot["nodes"]}" 27 | hostname = "${var.deployment}-boot-${random_id.clusterid.hex}" 28 | domain = "${var.domain != "" ? var.domain : "${var.deployment}.icp"}" 29 | 30 | os_reference_code = "${var.os_reference_code}" 31 | 32 | datacenter = "${var.datacenter}" 33 | 34 | cores = "${var.boot["cpu_cores"]}" 35 | memory = "${var.boot["memory"]}" 36 | 37 | network_speed = "${var.boot["network_speed"]}" 38 | 39 | local_disk = "${var.boot["local_disk"]}" 40 | disks = [ 41 | "${var.boot["disk_size"]}", 42 | "${var.boot["docker_vol_size"]}" 43 | ] 44 | 45 | tags = [ 46 | "${var.deployment}", 47 | "icp-boot", 48 | "${random_id.clusterid.hex}" 49 | ] 50 | 51 | hourly_billing = "${var.boot["hourly_billing"]}" 52 | private_network_only = "${var.private_network_only}" 53 | public_vlan_id = "${local.public_vlan_id}" 54 | private_vlan_id = "${local.private_vlan_id}" 55 | 56 | public_security_group_ids = ["${compact(concat( 57 | ibm_security_group.cluster_public.*.id, 58 | list("${var.private_network_only != true ? ibm_security_group.boot_node_public.id : "" }") 59 | ))}"] 60 | 61 | private_security_group_ids = ["${compact(concat( 62 | list("${ibm_security_group.cluster_private.id}"), 63 | ibm_security_group.boot_node_public.*.id 64 | ))}"] 65 | 66 | # Permit an ssh loging for the key owner. 67 | # You can have multiple keys defined. 68 | ssh_key_ids = ["${data.ibm_compute_ssh_key.public_key.*.id}"] 69 | 70 | user_metadata = < /etc/docker/daemon.json < IP Management > VLANs in the portal. Leave blank to let the system choose. This option should be used when setting `private_network_only` to true along with `private_vlan_number` using a private VLAN that is routed with a Gateway Appliance. | 132 | | `private_vlan_number` | no | Private VLAN number to place all VSIs on. e.g. 1211. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. This option should be used when setting `private_network_only` to true along with `private_vlan_router_hostname`, using a private VLAN that is routed with a Gateway Appliance.| 133 | | `public_vlan_router_hostname` | no | Public VLAN router to place all VSIs behind. e.g. fcr01a. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. | 134 | | `public_vlan_number` | no | Public VLAN number to place all VSIs on. e.g. 1211. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose. | 135 | | `icppassword` | no | ICP administrator password. One will be generated if not set. | 136 | | `deployment` | no | Identifier prefix added to the host names of all your infrastructure resources for organising/naming ease | 137 | 138 | 139 | ### Setup IBM Cloud File Storage to Host ICP Binaries 140 | 141 | #### Create File Storage 142 | 143 | 1. From the [IBM Cloud Console](https://console.bluemix.net), select [Infrastructure](https://control.bluemix.net) from the left sidebar menu. 144 | 2. In the IBM Cloud Infrastructure page, expand the **Storage** dropdown and select [File Storage](https://control.bluemix.net/storage/file). 145 | 3. Select [Order File Storage](https://control.bluemix.net/storage/order?storageType=FILE) from the upper-right side of the window. 146 | 4. Select the datacenter which you will deploy your IBM Cloud Private cluster into, a minimum of at least 20GB of storage size, and the desired amount of IOPS (_generally 0.25 or 2 are sufficient_). Then click **Place Order**. 147 | 5. Once created, click on your File Storage instance from the list shown at https://control.bluemix.net/storage/file. 148 | 6. Make note of the **Mount Point** field as this will be used later on. 149 | 7. Click on the **Actions** dropdown from the upper-right and select **Authorize Host**. 150 | 8. You can authorize specific devices, subnets or IP addresses to communicate with your file storage instance. If you will have a number of systems deploying Terraform-based ICP installations, authorizing by Subnet is the preferred option. If you are only doing one or two installations, authorizing by specific Devices can be the more secure option. Determine your preferred method here and authorize hosts so that your jump server VM will be able to communicate with the file storage. 151 | 9. Click **Submit**. 152 | 153 | #### Create Jump Server for file uploads 154 | 155 | You will now need to create jump server to upload the initial files into IBM Cloud and then onto the network-attached IBM Cloud File Storage 156 | 157 | 1. Go to the [Device List](https://control.bluemix.net/devices) and click [Order Devices](https://console.bluemix.net/catalog/). 158 | 2. Select to create a **Virtual Server** and then a **Public Virtual Server**. 159 | 3. Select a Location that matches as closely as possible to the Datacenter selected for your previously-created File Storage. 160 | 4. The **Balanced B1.2x4** profile is the minimum recommended option for the jump server in this case. 161 | 5. You will want to add an SSH Key to the system to login later on. 162 | 6. Ubuntu is the preferred option for Linux distributions, but others are acceptable as well. However, licensing may be an issue with other Linux distributions. 163 | 7. Select **100 GB** of SAN for the **Attached Storage Disks**. 164 | 8. For the **Private Security Group** options, you will want to ensure that **allow_ssh**, **allow_outbound**, and **allow_all** are selected for necessary access to internal Linux distribution update mirrors. 165 | 9. For the **Public Security Group** option, you will want to check **allow_ssh** to copy files into the system. 166 | 9. Click **Provision**. 167 | 168 | #### Copy Tarball into IBM Cloud 169 | 170 | 1. You will need to download the appropriate version of the IBM Cloud Private binaries, either externally from Passport Advantage or internally from Extreme Leverage. Once you have downloaded them, the files named `ibm-cloud-private-x86_64-3.1.0.tar.gz` and `icp-docker-18.03.1_x86_64` (or specific to your desired version to be installed) can be placed in the `icp-install` directory. 171 | 2. Once the Jump Server has been provisioned, verify that you can SSH into the system using the specified SSH key at instance provisioning time and the associated username (generally **root**). 172 | 2. Once you have verified SSH signin, now copy the files you have in the `icp-install` directory to the remote machine via `scp`. Note you will need to create the remote directory that you specify in the `scp` command. 173 | `$ scp -r -i ~/.ssh/your_ssh_key icp-install root@{Jump_Server_IP_Address}:/root/icp-install` 174 | 175 | #### Mount and Copy to File Storage 176 | 177 | Once the files have been copied from your local system to your jump server, you can now mount and copy the files into your file storage. 178 | 179 | 1. On the Jump Server, ensure that the necessary packages are installed to support NFS mounts: 180 | 1. For Ubuntu servers, run `sudo apt install -y nfs-common`. 181 | 2. For RHEL servers, run `sudo yum -y install nfs-utils`. 182 | 2. Create a mount point directory on the system. This is generally done underneath the `/mnt` parent directory, similar to `mkdir /mnt/filestorage`. 183 | 3. Recalling the **Mount Point** from the earlier File Storage details screen, you can now mount the file storage to the jump server via `mount {File Storage Mount Point} /mnt/filestorage`. 184 | 4. Validate the mount succeeded by running a simple `touch /mnt/filestorage/test.txt` command. 185 | 5. Create any neccessary sub-directories in `/mnt/filestorage` for how you would like to arrange your stored binaries. 186 | 6. Copy the files into the mounted directory. Due to the nature of the large files and across network distances, the normal Unix copy command, `cp`, isn't the most preferred option. Instead you can use `rsync` to see file status as items are copied over. You can run this command similar to the normal copy, but with the benefit of receiving progress indicator updates. 187 | `$ rsync -ahr --progress /root/icp-install /mnt/filestorage/icp-files` 188 | 189 | #### Update terraform.tfvars 190 | 191 | Once the files have been copied into the IBM Cloud File Storage instance, you will need to update your `terraform.tfvars` file to point to the remotely-stored binaries. 192 | 193 | 1. In `terraform.tfvars`, create a line similar to the following: 194 | `image_location = "nfs:{File Storage Mount Point}/{your created subdirectories}/ibm-cloud-private-x86_64-2.1.0.3.tar.gz"` 195 | 2. Now you are ready to run your `terraform plan` & `terraform apply` commands! 196 | 3. If you will no longer need to copy files into the IBM Cloud File Storage instance, your jump server can be destroyed. 197 | 198 | 199 | ### Configuration examples 200 | 201 | 1. terraform.tfvars which does not add a SSH key and uses all default values. This is the minimum configuration possible. 202 | 203 | ``` 204 | sl_username = "" 205 | sl_api_key = "" 206 | datacenter = "dal13" 207 | icp_inception_image = "ibmcom/icp-inception-amd64:3.1.0-ee" 208 | image_location = "nfs:fsf-dal1301i-fz.adn.networklayer.com:/IBM02SVnnnnn_n/data01/files/icp/ibm-cloud-private-x86_64-3.1.0.tar.gz" 209 | ``` 210 | 211 | 2. terraform.tfvars which adds a SSH key to the root user and uses all default values. 212 | 213 | ``` 214 | sl_username = "" 215 | sl_api_key = "" 216 | datacenter = "dal13" 217 | key_name = ["my-ssh-key"] 218 | 219 | icp_inception_image = "ibmcom/icp-inception-amd64:3.1.0-ee" 220 | image_location = "nfs:fsf-dal1301i-fz.adn.networklayer.com:/IBM02SVnnnnn_n/data01/files/icp/ibm-cloud-private-x86_64-3.1.0.tar.gz" 221 | ``` 222 | 223 | 3. terraform.tfvars with Vulnerability Advisor enabled 224 | 225 | ``` 226 | sl_username = "" 227 | sl_api_key = "" 228 | datacenter = "dal13" 229 | key_name = ["my-ssh-key"] 230 | 231 | icp_inception_image = "ibmcom/icp-inception-amd64:3.1.0-ee" 232 | image_location = "nfs:fsf-dal1301i-fz.adn.networklayer.com:/IBM02SVnnnnn_n/data01/files/icp/ibm-cloud-private-x86_64-3.1.0.tar.gz" 233 | 234 | # Disable the management services we don't want, but remove vulnerability advisor from default list 235 | disabled_management_services = ["istio", "storage-glusterfs", "storage-minio"] 236 | 237 | # Enable dedicated VA node for Vulnerability Advisor 238 | va = { 239 | nodes = "1" 240 | } 241 | 242 | ``` 243 | -------------------------------------------------------------------------------- /templates/icp-ee/camvariables.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "sl_username", 4 | "label": "SoftLayer Username", 5 | "description": "Username for IBM Cloud infrastructure account", 6 | "hidden": false, 7 | "immutable": false, 8 | "required": true, 9 | "secured": false, 10 | "type": "string" 11 | }, 12 | { 13 | "name": "sl_api_key", 14 | "label": "SoftLayer API Key", 15 | "description": "API Key for IBM Cloud infrastructure account", 16 | "hidden": false, 17 | "immutable": false, 18 | "required": true, 19 | "secured": true, 20 | "type": "string" 21 | }, 22 | { 23 | "name": "icp_inception_image", 24 | "label": "ICP inception image", 25 | "description": "The ICP installer image to use. This corresponds to the version of ICP to install", 26 | "default": "ibmcom/icp-inception-amd64:3.1.0-ee", 27 | "hidden": false, 28 | "immutable": false, 29 | "required": true, 30 | "secured": false, 31 | "type": "string" 32 | }, 33 | { 34 | "name": "image_location", 35 | "label": "ICP image location", 36 | "description": "URI for the ICP image package location", 37 | "hidden": false, 38 | "immutable": false, 39 | "required": false, 40 | "secured": false, 41 | "type": "string" 42 | }, 43 | { 44 | "name": "os_reference_code", 45 | "label": "OS to install on the VSIs", 46 | "description": "OS to install on the VSIs. Use the API to determine valid values. Only Ubuntu 16.04 was tested", 47 | "default": "UBUNTU_16_64", 48 | "hidden": false, 49 | "immutable": false, 50 | "required": true, 51 | "secured": false, 52 | "type": "string" 53 | }, 54 | { 55 | "name": "deployment", 56 | "label": "ICP deployment Name", 57 | "description": "Prefix of names for IBM Cloud ICP cluster resources; The name prefix must be 1-52 alphanumeric characters and dash", 58 | "default": "icp", 59 | "hidden": false, 60 | "immutable": false, 61 | "required": false, 62 | "secured": false, 63 | "type": "string", 64 | "regex": "^[A-Za-z0-9-]{1,52}$" 65 | }, 66 | { 67 | "name": "key_name", 68 | "label": "Array of SSH keys for created resources", 69 | "description": "Array of SSH keys to add to root for all created VSI instances", 70 | "default": [], 71 | "hidden": false, 72 | "immutable": false, 73 | "required": false, 74 | "secured": false, 75 | "type": "list" 76 | }, 77 | { 78 | "name": "datacenter", 79 | "label": "SoftLayer Datacenter", 80 | "description": "SoftLayer Datacenter to deploy your resources to", 81 | "default": "mil01", 82 | "hidden": false, 83 | "immutable": false, 84 | "required": true, 85 | "secured": false, 86 | "type": "string" 87 | }, 88 | { 89 | "name": "domain", 90 | "label": "Domain to be used on the VMs", 91 | "description": "Specify domain name to be used for linux customization on the VMs, or leave blank to use .icp", 92 | "default": "icp.com", 93 | "hidden": false, 94 | "immutable": false, 95 | "required": false, 96 | "secured": false, 97 | "type": "string" 98 | } 99 | ] 100 | -------------------------------------------------------------------------------- /templates/icp-ee/cfc-certs/README.md: -------------------------------------------------------------------------------- 1 | add TLS certificates here 2 | 3 | * icp-router.crt 4 | * icp-router.key 5 | -------------------------------------------------------------------------------- /templates/icp-ee/file_storage.tf: -------------------------------------------------------------------------------- 1 | # /var/lib/registry 2 | resource "ibm_storage_file" "fs_registry" { 3 | type = "${var.fs_registry["type"]}" 4 | datacenter = "${var.datacenter}" 5 | capacity = "${var.fs_registry["size"]}" 6 | iops = "${var.fs_registry["iops"]}" 7 | hourly_billing = "${var.fs_registry["hourly_billing"]}" 8 | 9 | tags = [ 10 | "${var.deployment}", 11 | "fs-registry", 12 | "${random_id.clusterid.hex}" 13 | ] 14 | 15 | notes = "/var/lib/registry for ICP cluster ${random_id.clusterid.hex}" 16 | } 17 | 18 | #/var/lib/icp/audit 19 | resource "ibm_storage_file" "fs_audit" { 20 | type = "${var.fs_audit["type"]}" 21 | datacenter = "${var.datacenter}" 22 | capacity = "${var.fs_audit["size"]}" 23 | iops = "${var.fs_audit["iops"]}" 24 | hourly_billing = "${var.fs_audit["hourly_billing"]}" 25 | 26 | tags = [ 27 | "${var.deployment}", 28 | "fs-audit", 29 | "${random_id.clusterid.hex}" 30 | ] 31 | 32 | notes = "/var/lib/icp/audit for ICP cluster ${random_id.clusterid.hex}" 33 | } 34 | -------------------------------------------------------------------------------- /templates/icp-ee/icp-deploy.tf: -------------------------------------------------------------------------------- 1 | ################################## 2 | ### Deploy ICP to cluster 3 | ################################## 4 | module "icpprovision" { 5 | source = "github.com/ibm-cloud-architecture/terraform-module-icp-deploy.git?ref=3.1.1" 6 | 7 | # Provide IP addresses for boot, master, mgmt, va, proxy and workers 8 | boot-node = "${ibm_compute_vm_instance.icp-boot.ipv4_address_private}" 9 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 10 | icp-host-groups = { 11 | master = ["${ibm_compute_vm_instance.icp-master.*.ipv4_address_private}"] 12 | proxy = "${slice(concat(ibm_compute_vm_instance.icp-proxy.*.ipv4_address_private, 13 | ibm_compute_vm_instance.icp-master.*.ipv4_address_private), 14 | var.proxy["nodes"] > 0 ? 0 : length(ibm_compute_vm_instance.icp-proxy.*.ipv4_address_private), 15 | var.proxy["nodes"] > 0 ? length(ibm_compute_vm_instance.icp-proxy.*.ipv4_address_private) : 16 | length(ibm_compute_vm_instance.icp-proxy.*.ipv4_address_private) + 17 | length(ibm_compute_vm_instance.icp-master.*.ipv4_address_private))}" 18 | 19 | worker = ["${ibm_compute_vm_instance.icp-worker.*.ipv4_address_private}"] 20 | 21 | // make the master nodes managements nodes if we don't have any specified 22 | management = "${slice(concat(ibm_compute_vm_instance.icp-mgmt.*.ipv4_address_private, 23 | ibm_compute_vm_instance.icp-master.*.ipv4_address_private), 24 | var.mgmt["nodes"] > 0 ? 0 : length(ibm_compute_vm_instance.icp-mgmt.*.ipv4_address_private), 25 | var.mgmt["nodes"] > 0 ? length(ibm_compute_vm_instance.icp-mgmt.*.ipv4_address_private) : 26 | length(ibm_compute_vm_instance.icp-mgmt.*.ipv4_address_private) + 27 | length(ibm_compute_vm_instance.icp-master.*.ipv4_address_private))}" 28 | 29 | va = ["${ibm_compute_vm_instance.icp-va.*.ipv4_address_private}"] 30 | } 31 | 32 | icp-inception = "${local.icp-version}" 33 | 34 | image_location = "${var.image_location}" 35 | image_location_user = "${var.image_location_user}" 36 | image_location_pass = "${var.image_location_password}" 37 | 38 | /* Workaround for terraform issue #10857 39 | When this is fixed, we can work this out automatically */ 40 | cluster_size = "${1 + var.master["nodes"] + var.worker["nodes"] + var.proxy["nodes"] + var.mgmt["nodes"] + var.va["nodes"]}" 41 | 42 | ################################################################################################################################### 43 | ## You can feed in arbitrary configuration items in the icp_configuration map. 44 | ## Available configuration items availble from https://www.ibm.com/support/knowledgecenter/SSBS6K_3.1.0/installing/config_yaml.html 45 | icp_configuration = { 46 | "network_cidr" = "${var.network_cidr}" 47 | "service_cluster_ip_range" = "${var.service_network_cidr}" 48 | "cluster_lb_address" = "${ibm_lbaas.master-lbaas.vip}" 49 | "proxy_lb_address" = "${ibm_lbaas.proxy-lbaas.vip}" 50 | "cluster_CA_domain" = "${var.cluster_cname != "" ? "${var.cluster_cname}" : "${ibm_lbaas.master-lbaas.vip}"}" 51 | "cluster_name" = "${var.deployment}" 52 | "calico_ip_autodetection_method" = "interface=eth0" 53 | 54 | # An admin password will be generated if not supplied in terraform.tfvars 55 | "default_admin_password" = "${local.icppassword}" 56 | 57 | # This is the list of disabled management services 58 | "management_services" = "${local.disabled_management_services}" 59 | 60 | "private_registry_enabled" = "${local.registry_server != "" ? "true" : "false" }" 61 | "private_registry_server" = "${local.registry_server}" 62 | "image_repo" = "${local.image_repo}" # Will either be our private repo or external repo 63 | "docker_username" = "${local.docker_username}" # Will either be username generated by us or supplied by user 64 | "docker_password" = "${local.docker_password}" # Will either be username generated by us or supplied by user 65 | } 66 | 67 | # We will let terraform generate a new ssh keypair 68 | # for boot master to communicate with worker and proxy nodes 69 | # during ICP deployment 70 | generate_key = true 71 | 72 | # SSH user and key for terraform to connect to newly created VMs 73 | # ssh_key is the private key corresponding to the public assumed to be included in the template 74 | ssh_user = "icpdeploy" 75 | ssh_key_base64 = "${base64encode(tls_private_key.installkey.private_key_pem)}" 76 | ssh_agent = false 77 | 78 | # Make sure to wait for image load to complete 79 | # hooks = { 80 | # "boot-preconfig" = [ 81 | # "while [ ! -f /opt/ibm/.imageload_complete ]; do sleep 5; done" 82 | # ] 83 | # } 84 | 85 | ## Alternative approach 86 | # hooks = { 87 | # "cluster-preconfig" = ["echo No hook"] 88 | # "cluster-postconfig" = ["echo No hook"] 89 | # "preinstall" = ["echo No hook"] 90 | # "postinstall" = ["echo No hook"] 91 | # "boot-preconfig" = [ 92 | # # "${var.image_location == "" ? "exit 0" : "echo Getting archives"}", 93 | # "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 1; done", 94 | # "sudo mv /tmp/load_image.sh /opt/ibm/scripts/", 95 | # "sudo chmod a+x /opt/ibm/scripts/load_image.sh", 96 | # "/opt/ibm/scripts/load_image.sh -p ${var.image_location} -r ${local.registry_server} -c ${local.docker_password}" 97 | # ] 98 | # } 99 | 100 | } 101 | 102 | output "icp_console_host" { 103 | value = "${ibm_lbaas.master-lbaas.vip}" 104 | } 105 | 106 | output "icp_proxy_host" { 107 | value = "${ibm_lbaas.proxy-lbaas.vip}" 108 | } 109 | 110 | output "icp_console_url" { 111 | value = "https://${ibm_lbaas.master-lbaas.vip}:8443" 112 | } 113 | 114 | output "icp_registry_url" { 115 | value = "${ibm_lbaas.master-lbaas.vip}:8500" 116 | } 117 | 118 | output "kubernetes_api_url" { 119 | value = "https://${ibm_lbaas.master-lbaas.vip}:8001" 120 | } 121 | 122 | output "icp_admin_username" { 123 | value = "admin" 124 | } 125 | 126 | output "icp_admin_password" { 127 | value = "${local.icppassword}" 128 | } 129 | -------------------------------------------------------------------------------- /templates/icp-ee/icp-install/README.md: -------------------------------------------------------------------------------- 1 | add ICP installation binaries here 2 | -------------------------------------------------------------------------------- /templates/icp-ee/instances.tf: -------------------------------------------------------------------------------- 1 | ######################################################### 2 | ## Get VLAN IDs if we need to provision to specific VLANs 3 | ######################################################## 4 | data "ibm_network_vlan" "private_vlan" { 5 | count = "${var.private_vlan_router_hostname != "" ? 1 : 0}" 6 | router_hostname = "${var.private_vlan_router_hostname}.${var.datacenter}" 7 | number = "${var.private_vlan_number}" 8 | } 9 | 10 | data "ibm_network_vlan" "public_vlan" { 11 | count = "${var.private_network_only != true && var.public_vlan_router_hostname != "" ? 1 : 0}" 12 | router_hostname = "${var.public_vlan_router_hostname}.${var.datacenter}" 13 | number = "${var.public_vlan_number}" 14 | } 15 | 16 | locals { 17 | private_vlan_id = "${element(concat(data.ibm_network_vlan.private_vlan.*.id, list("-1")), 0) }" 18 | public_vlan_id = "${element(concat(data.ibm_network_vlan.public_vlan.*.id, list("-1")), 0)}" 19 | } 20 | 21 | ############################################## 22 | ## Provision boot node 23 | ############################################## 24 | 25 | resource "ibm_compute_vm_instance" "icp-boot" { 26 | hostname = "${var.deployment}-boot-${random_id.clusterid.hex}" 27 | domain = "${var.domain}" 28 | 29 | os_reference_code = "${var.os_reference_code}" 30 | 31 | datacenter = "${var.datacenter}" 32 | 33 | cores = "${var.boot["cpu_cores"]}" 34 | memory = "${var.boot["memory"]}" 35 | 36 | network_speed = "${var.boot["network_speed"]}" 37 | 38 | local_disk = "${var.boot["local_disk"]}" 39 | disks = [ 40 | "${var.boot["disk_size"]}", 41 | "${var.boot["docker_vol_size"]}" 42 | ] 43 | 44 | tags = [ 45 | "${var.deployment}", 46 | "icp-boot", 47 | "${random_id.clusterid.hex}" 48 | ] 49 | 50 | hourly_billing = "${var.boot["hourly_billing"]}" 51 | private_network_only = "${var.private_network_only}" 52 | public_vlan_id = "${local.public_vlan_id}" 53 | private_vlan_id = "${local.private_vlan_id}" 54 | 55 | public_security_group_ids = ["${compact(concat( 56 | ibm_security_group.cluster_public.*.id, 57 | list("${var.private_network_only != true ? ibm_security_group.boot_node_public.id : "" }") 58 | ))}"] 59 | 60 | private_security_group_ids = ["${compact(concat( 61 | list("${ibm_security_group.cluster_private.id}"), 62 | ibm_security_group.boot_node_public.*.id 63 | ))}"] 64 | 65 | # Permit an ssh loging for the key owner. 66 | # You can have multiple keys defined. 67 | ssh_key_ids = ["${data.ibm_compute_ssh_key.public_key.*.id}"] 68 | 69 | user_metadata = < 1 ? " 188 | - ['${ibm_storage_file.fs_registry.mountpoint}', /var/lib/registry, nfs, defaults, 0, 0] 189 | - ['${ibm_storage_file.fs_audit.mountpoint}', /var/lib/icp/audit, nfs, defaults, 0, 0] 190 | " 191 | : 192 | "" } 193 | runcmd: 194 | - /opt/ibm/scripts/bootstrap.sh -u icpdeploy ${local.docker_package_uri != "" ? "-p ${local.docker_package_uri}" : "" } -d /dev/xvdc 195 | - mkdir -p /var/lib/registry 196 | - mkdir -p /var/lib/icp/audit 197 | - echo '${ibm_storage_file.fs_registry.mountpoint} /var/lib/registry nfs defaults 0 0' | tee -a /etc/fstab 198 | - echo '${ibm_storage_file.fs_audit.mountpoint} /var/lib/icp/audit nfs defaults 0 0' | tee -a /etc/fstab 199 | - sudo mount -a 200 | - echo '${ibm_compute_vm_instance.icp-boot.ipv4_address_private} ${var.deployment}-boot-${random_id.clusterid.hex}.${var.domain}' >> /etc/hosts 201 | EOF 202 | 203 | # Permit an ssh loging for the key owner. 204 | # You can have multiple keys defined. 205 | ssh_key_ids = ["${data.ibm_compute_ssh_key.public_key.*.id}"] 206 | 207 | notes = "Master node for ICP deployment" 208 | 209 | lifecycle { 210 | ignore_changes = [ 211 | "private_vlan_id", 212 | "public_vlan_id" 213 | ] 214 | } 215 | 216 | provisioner "file" { 217 | # copy the local docker installation package if it's set 218 | connection { 219 | host = "${self.ipv4_address_private}" 220 | user = "icpdeploy" 221 | private_key = "${tls_private_key.installkey.private_key_pem}" 222 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 223 | } 224 | 225 | source = "${var.docker_package_location != "" ? "${var.docker_package_location}" : "${path.module}/icp-install/README.md"}" 226 | destination = "${local.docker_package_uri != "" ? "${local.docker_package_uri}" : "/dev/null" }" 227 | } 228 | 229 | # wait until cloud-init finishes 230 | provisioner "remote-exec" { 231 | connection { 232 | host = "${self.ipv4_address_private}" 233 | user = "icpdeploy" 234 | private_key = "${tls_private_key.installkey.private_key_pem}" 235 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 236 | } 237 | 238 | inline = [ 239 | "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 1; done" 240 | ] 241 | } 242 | } 243 | 244 | resource "ibm_compute_vm_instance" "icp-mgmt" { 245 | count = "${var.mgmt["nodes"]}" 246 | 247 | hostname = "${format("${lower(var.deployment)}-mgmt%02d-${random_id.clusterid.hex}", count.index + 1) }" 248 | domain = "${var.domain}" 249 | 250 | os_reference_code = "${var.os_reference_code}" 251 | datacenter = "${var.datacenter}" 252 | 253 | cores = "${var.mgmt["cpu_cores"]}" 254 | memory = "${var.mgmt["memory"]}" 255 | 256 | local_disk = "${var.mgmt["local_disk"]}" 257 | disks = [ 258 | "${var.mgmt["disk_size"]}", 259 | "${var.mgmt["docker_vol_size"]}" 260 | ] 261 | 262 | network_speed = "${var.mgmt["network_speed"]}" 263 | private_network_only = "${var.private_network_only}" 264 | public_vlan_id = "${local.public_vlan_id}" 265 | private_vlan_id = "${local.private_vlan_id}" 266 | 267 | public_security_group_ids = ["${compact(concat( 268 | ibm_security_group.cluster_public.*.id 269 | ))}"] 270 | 271 | private_security_group_ids = [ 272 | "${ibm_security_group.cluster_private.id}" 273 | ] 274 | 275 | tags = [ 276 | "${var.deployment}", 277 | "icp-management", 278 | "${random_id.clusterid.hex}" 279 | ] 280 | 281 | # Permit an ssh loging for the key owner. 282 | # You can have multiple keys defined. 283 | ssh_key_ids = ["${data.ibm_compute_ssh_key.public_key.*.id}"] 284 | 285 | user_metadata = <> /etc/hosts 306 | EOF 307 | 308 | hourly_billing = "${var.mgmt["hourly_billing"]}" 309 | 310 | notes = "Management node for ICP deployment" 311 | 312 | lifecycle { 313 | ignore_changes = [ 314 | "private_vlan_id", 315 | "public_vlan_id" 316 | ] 317 | } 318 | 319 | provisioner "file" { 320 | # copy the local docker installation package if it's set 321 | connection { 322 | host = "${self.ipv4_address_private}" 323 | user = "icpdeploy" 324 | private_key = "${tls_private_key.installkey.private_key_pem}" 325 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 326 | } 327 | 328 | source = "${var.docker_package_location != "" ? "${var.docker_package_location}" : "${path.module}/icp-install/README.md"}" 329 | destination = "${local.docker_package_uri != "" ? "${local.docker_package_uri}" : "/dev/null" }" 330 | } 331 | 332 | # wait until cloud-init finishes 333 | provisioner "remote-exec" { 334 | connection { 335 | host = "${self.ipv4_address_private}" 336 | user = "icpdeploy" 337 | private_key = "${tls_private_key.installkey.private_key_pem}" 338 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 339 | } 340 | 341 | inline = [ 342 | "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 1; done" 343 | ] 344 | } 345 | } 346 | 347 | resource "ibm_compute_vm_instance" "icp-va" { 348 | count = "${var.va["nodes"]}" 349 | 350 | hostname = "${format("${lower(var.deployment)}-va%02d-${random_id.clusterid.hex}", count.index + 1) }" 351 | domain = "${var.domain}" 352 | 353 | os_reference_code = "${var.os_reference_code}" 354 | 355 | datacenter = "${var.datacenter}" 356 | cores = "${var.va["cpu_cores"]}" 357 | memory = "${var.va["memory"]}" 358 | 359 | network_speed = "${var.va["network_speed"]}" 360 | private_network_only = "${var.private_network_only}" 361 | public_vlan_id = "${local.public_vlan_id}" 362 | private_vlan_id = "${local.private_vlan_id}" 363 | 364 | public_security_group_ids = ["${compact(concat( 365 | ibm_security_group.cluster_public.*.id 366 | ))}"] 367 | 368 | private_security_group_ids = [ 369 | "${ibm_security_group.cluster_private.id}" 370 | ] 371 | 372 | local_disk = "${var.va["local_disk"]}" 373 | disks = [ 374 | "${var.va["disk_size"]}", 375 | "${var.va["docker_vol_size"]}" 376 | ] 377 | 378 | tags = [ 379 | "${var.deployment}", 380 | "icp-management", 381 | "${random_id.clusterid.hex}" 382 | ] 383 | 384 | hourly_billing = "${var.va["hourly_billing"]}" 385 | user_metadata = <> /etc/hosts 406 | EOF 407 | 408 | # Permit an ssh loging for the key owner. 409 | # You can have multiple keys defined. 410 | ssh_key_ids = ["${data.ibm_compute_ssh_key.public_key.*.id}"] 411 | 412 | notes = "Vulnerability Advisor node for ICP deployment" 413 | lifecycle { 414 | ignore_changes = [ 415 | "private_vlan_id", 416 | "public_vlan_id" 417 | ] 418 | } 419 | 420 | provisioner "file" { 421 | # copy the local docker installation package if it's set 422 | connection { 423 | host = "${self.ipv4_address_private}" 424 | user = "icpdeploy" 425 | private_key = "${tls_private_key.installkey.private_key_pem}" 426 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 427 | } 428 | 429 | source = "${var.docker_package_location != "" ? "${var.docker_package_location}" : "${path.module}/icp-install/README.md"}" 430 | destination = "${local.docker_package_uri != "" ? "${local.docker_package_uri}" : "/dev/null" }" 431 | } 432 | 433 | # wait until cloud-init finishes 434 | provisioner "remote-exec" { 435 | connection { 436 | host = "${self.ipv4_address_private}" 437 | user = "icpdeploy" 438 | private_key = "${tls_private_key.installkey.private_key_pem}" 439 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 440 | } 441 | 442 | inline = [ 443 | "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 1; done" 444 | ] 445 | } 446 | } 447 | 448 | resource "ibm_compute_vm_instance" "icp-proxy" { 449 | count = "${var.proxy["nodes"]}" 450 | 451 | hostname = "${format("${lower(var.deployment)}-proxy%02d-${random_id.clusterid.hex}", count.index + 1) }" 452 | domain = "${var.domain}" 453 | 454 | os_reference_code = "${var.os_reference_code}" 455 | 456 | datacenter = "${var.datacenter}" 457 | cores = "${var.proxy["cpu_cores"]}" 458 | memory = "${var.proxy["memory"]}" 459 | hourly_billing = "${var.proxy["hourly_billing"]}" 460 | tags = [ 461 | "${var.deployment}", 462 | "icp-proxy", 463 | "${random_id.clusterid.hex}" 464 | ] 465 | 466 | network_speed = "${var.proxy["network_speed"]}" 467 | private_network_only = "${var.private_network_only}" 468 | public_vlan_id = "${local.public_vlan_id}" 469 | private_vlan_id = "${local.private_vlan_id}" 470 | 471 | public_security_group_ids = ["${compact(concat( 472 | ibm_security_group.cluster_public.*.id 473 | ))}"] 474 | 475 | private_security_group_ids = [ 476 | "${ibm_security_group.cluster_private.id}", 477 | "${ibm_security_group.proxy_group.id}" 478 | ] 479 | 480 | local_disk = "${var.proxy["local_disk"]}" 481 | disks = [ 482 | "${var.proxy["disk_size"]}", 483 | "${var.proxy["docker_vol_size"]}" 484 | ] 485 | 486 | user_metadata = <> /etc/hosts 507 | EOF 508 | 509 | # Permit an ssh loging for the key owner. 510 | # You can have multiple keys defined. 511 | ssh_key_ids = ["${data.ibm_compute_ssh_key.public_key.*.id}"] 512 | 513 | notes = "Proxy node for ICP deployment" 514 | 515 | lifecycle { 516 | ignore_changes = [ 517 | "private_vlan_id", 518 | "public_vlan_id" 519 | ] 520 | } 521 | 522 | provisioner "file" { 523 | # copy the local docker installation package if it's set 524 | connection { 525 | host = "${self.ipv4_address_private}" 526 | user = "icpdeploy" 527 | private_key = "${tls_private_key.installkey.private_key_pem}" 528 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 529 | } 530 | 531 | source = "${var.docker_package_location != "" ? "${var.docker_package_location}" : "${path.module}/icp-install/README.md"}" 532 | destination = "${local.docker_package_uri != "" ? "${local.docker_package_uri}" : "/dev/null" }" 533 | } 534 | 535 | # wait until cloud-init finishes 536 | provisioner "remote-exec" { 537 | connection { 538 | host = "${self.ipv4_address_private}" 539 | user = "icpdeploy" 540 | private_key = "${tls_private_key.installkey.private_key_pem}" 541 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 542 | } 543 | 544 | inline = [ 545 | "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 1; done" 546 | ] 547 | } 548 | } 549 | 550 | 551 | resource "ibm_compute_vm_instance" "icp-worker" { 552 | count = "${var.worker["nodes"]}" 553 | 554 | hostname = "${format("${lower(var.deployment)}-worker%02d-${random_id.clusterid.hex}", count.index + 1) }" 555 | domain = "${var.domain}" 556 | 557 | os_reference_code = "${var.os_reference_code}" 558 | 559 | datacenter = "${var.datacenter}" 560 | 561 | cores = "${var.worker["cpu_cores"]}" 562 | memory = "${var.worker["memory"]}" 563 | 564 | network_speed = "${var.worker["network_speed"]}" 565 | private_network_only = "${var.private_network_only}" 566 | public_vlan_id = "${local.public_vlan_id}" 567 | private_vlan_id = "${local.private_vlan_id}" 568 | 569 | public_security_group_ids = ["${compact(concat( 570 | ibm_security_group.cluster_public.*.id 571 | ))}"] 572 | 573 | private_security_group_ids = [ 574 | "${ibm_security_group.cluster_private.id}" 575 | ] 576 | 577 | local_disk = "${var.worker["local_disk"]}" 578 | disks = ["${compact( 579 | concat( 580 | list( 581 | var.worker["disk_size"], 582 | var.worker["docker_vol_size"], 583 | var.worker["additional_disk"] != 0 ? "${var.worker["additional_disk"]}" : "") 584 | ))}"] 585 | 586 | 587 | hourly_billing = "${var.worker["hourly_billing"]}" 588 | tags = [ 589 | "${var.deployment}", 590 | "icp-worker", 591 | "${random_id.clusterid.hex}" 592 | ] 593 | 594 | user_metadata = <> /etc/hosts 615 | EOF 616 | 617 | # Permit an ssh loging for the key owner. 618 | # You can have multiple keys defined. 619 | ssh_key_ids = ["${data.ibm_compute_ssh_key.public_key.*.id}"] 620 | 621 | notes = "Worker node for ICP deployment" 622 | 623 | lifecycle { 624 | ignore_changes = [ 625 | "private_vlan_id", 626 | "public_vlan_id" 627 | ] 628 | } 629 | 630 | provisioner "file" { 631 | # copy the local docker installation package if it's set 632 | connection { 633 | host = "${self.ipv4_address_private}" 634 | user = "icpdeploy" 635 | private_key = "${tls_private_key.installkey.private_key_pem}" 636 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 637 | } 638 | 639 | source = "${var.docker_package_location != "" ? "${var.docker_package_location}" : "${path.module}/icp-install/README.md"}" 640 | destination = "${local.docker_package_uri != "" ? "${local.docker_package_uri}" : "/dev/null" }" 641 | } 642 | 643 | # wait until cloud-init finishes 644 | provisioner "remote-exec" { 645 | connection { 646 | host = "${self.ipv4_address_private}" 647 | user = "icpdeploy" 648 | private_key = "${tls_private_key.installkey.private_key_pem}" 649 | bastion_host = "${var.private_network_only ? ibm_compute_vm_instance.icp-boot.ipv4_address_private : ibm_compute_vm_instance.icp-boot.ipv4_address}" 650 | } 651 | 652 | inline = [ 653 | "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 1; done" 654 | ] 655 | } 656 | } 657 | -------------------------------------------------------------------------------- /templates/icp-ee/lbaas.tf: -------------------------------------------------------------------------------- 1 | resource "ibm_lbaas" "proxy-lbaas" { 2 | name = "${var.deployment}-proxy-${random_id.clusterid.hex}" 3 | description = "load balancer for ICP proxy" 4 | 5 | subnets = ["${element(concat(ibm_compute_vm_instance.icp-proxy.*.private_subnet_id, 6 | ibm_compute_vm_instance.icp-master.*.private_subnet_id), 7 | 0)}"] 8 | protocols = [ 9 | { 10 | frontend_protocol = "TCP" 11 | frontend_port = 443 12 | 13 | backend_protocol = "TCP" 14 | backend_port = 443 15 | }, 16 | { 17 | frontend_protocol = "TCP" 18 | frontend_port = 80 19 | 20 | backend_protocol = "TCP" 21 | backend_port = 80 22 | } 23 | ] 24 | } 25 | 26 | resource "ibm_lbaas_server_instance_attachment" "icp_proxy" { 27 | count = "${var.proxy["nodes"] > 0 ? var.proxy["nodes"] : var.master["nodes"]}" 28 | private_ip_address = "${element(concat(ibm_compute_vm_instance.icp-proxy.*.ipv4_address_private, ibm_compute_vm_instance.icp-master.*.ipv4_address_private), count.index)}" 29 | lbaas_id = "${ibm_lbaas.proxy-lbaas.id}" 30 | } 31 | 32 | resource "ibm_lbaas" "master-lbaas" { 33 | name = "${var.deployment}-mastr-${random_id.clusterid.hex}" 34 | description = "load balancer for ICP master" 35 | 36 | subnets = ["${ibm_compute_vm_instance.icp-master.0.private_subnet_id}"] 37 | protocols = [ 38 | { 39 | frontend_protocol = "TCP" 40 | frontend_port = 8443 41 | 42 | backend_protocol = "TCP" 43 | backend_port = 8443 44 | }, 45 | { 46 | frontend_protocol = "TCP" 47 | frontend_port = 8001 48 | 49 | backend_protocol = "TCP" 50 | backend_port = 8001 51 | }, 52 | { 53 | frontend_protocol = "TCP" 54 | frontend_port = 8500 55 | 56 | backend_protocol = "TCP" 57 | backend_port = 8500 58 | }, 59 | { 60 | frontend_protocol = "TCP" 61 | frontend_port = 8600 62 | 63 | backend_protocol = "TCP" 64 | backend_port = 8600 65 | }, 66 | { 67 | frontend_protocol = "TCP" 68 | frontend_port = 9443 69 | 70 | backend_protocol = "TCP" 71 | backend_port = 9443 72 | } 73 | ] 74 | } 75 | 76 | 77 | 78 | resource "ibm_lbaas_server_instance_attachment" "icp_master" { 79 | count = "${var.master["nodes"]}" 80 | private_ip_address = "${element(ibm_compute_vm_instance.icp-master.*.ipv4_address_private, count.index)}" 81 | lbaas_id = "${ibm_lbaas.master-lbaas.id}" 82 | } 83 | -------------------------------------------------------------------------------- /templates/icp-ee/main.tf: -------------------------------------------------------------------------------- 1 | provider "ibm" { 2 | } 3 | 4 | locals { 5 | # Set the local filename of the docker package if we're uploading it 6 | docker_package_uri = "${var.docker_package_location != "" ? "/tmp/${basename(var.docker_package_location)}" : "" }" 7 | 8 | # The storage IDs that will be 9 | master_fs_ids = "${compact( 10 | concat( 11 | ibm_storage_file.fs_audit.*.id, 12 | ibm_storage_file.fs_registry.*.id, 13 | list("")) 14 | )}" 15 | 16 | icppassword = "${var.icppassword != "" ? "${var.icppassword}" : "${random_id.adminpassword.hex}"}" 17 | 18 | 19 | ####### 20 | ## Intermediate interpolations for the private registry 21 | ## Whether we are provided with details of an external, or we create one ourselves 22 | ## the image_repo and docker_username / docker_password will always be available and consistent 23 | ####### 24 | 25 | # If we stand up a image registry what will the registry_server name and namespace be 26 | registry_server = "${var.registry_server != "" ? "${var.registry_server}" : "${var.deployment}-boot-${random_id.clusterid.hex}.${var.domain}"}" 27 | namespace = "${dirname(var.icp_inception_image)}" # This will typically return ibmcom 28 | 29 | # The final image repo will be either interpolated from what supplied in icp_inception_image or 30 | image_repo = "${var.registry_server == "" ? "" : "${local.registry_server}/${local.namespace}"}" 31 | icp-version = "${format("%s%s%s", "${local.docker_username != "" ? "${local.docker_username}:${local.docker_password}@" : ""}", 32 | "${var.registry_server != "" ? "${var.registry_server}/" : ""}", 33 | "${var.icp_inception_image}")}" 34 | 35 | # If we're using external registry we need to be supplied registry_username and registry_password 36 | docker_username = "${var.registry_username != "" ? var.registry_username : ""}" 37 | docker_password = "${var.registry_password != "" ? var.registry_password : ""}" 38 | 39 | # This is just to have a long list of disabled items to use in icp-deploy.tf 40 | disabled_list = "${list("disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled","disabled")}" 41 | 42 | disabled_management_services = "${zipmap(var.disabled_management_services, slice(local.disabled_list, 0, length(var.disabled_management_services)))}" 43 | } 44 | 45 | # Create a unique random clusterid for this cluster 46 | resource "random_id" "clusterid" { 47 | byte_length = "4" 48 | } 49 | 50 | # Create a SSH key for SSH communication from terraform to VMs 51 | resource "tls_private_key" "installkey" { 52 | algorithm = "RSA" 53 | } 54 | 55 | data "ibm_compute_ssh_key" "public_key" { 56 | count = "${length(var.key_name)}" 57 | label = "${element(var.key_name, count.index)}" 58 | } 59 | 60 | # Generate a random string in case user wants us to generate admin password 61 | resource "random_id" "adminpassword" { 62 | byte_length = "16" 63 | } 64 | -------------------------------------------------------------------------------- /templates/icp-ee/scripts/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ubuntu_install(){ 4 | # attempt to retry apt-get update until cloud-init gives up the apt lock 5 | until apt-get update; do 6 | sleep 2 7 | done 8 | 9 | until apt-get install -y \ 10 | unzip \ 11 | python \ 12 | python-yaml \ 13 | thin-provisioning-tools \ 14 | nfs-client \ 15 | lvm2; do 16 | sleep 2 17 | done 18 | } 19 | 20 | crlinux_install() { 21 | yum install -y \ 22 | unzip \ 23 | PyYAML \ 24 | device-mapper \ 25 | libseccomp \ 26 | libtool-ltdl \ 27 | libcgroup \ 28 | iptables \ 29 | device-mapper-persistent-data \ 30 | nfs-util \ 31 | lvm2 32 | } 33 | 34 | docker_install() { 35 | if docker --version; then 36 | echo "Docker already installed. Exiting" 37 | return 0 38 | fi 39 | 40 | if [ -z "${package_location}" -a "${OSLEVEL}" == "ubuntu" ]; then 41 | # if we're on ubuntu, we can install docker-ce off of the repo 42 | apt-get install -y \ 43 | apt-transport-https \ 44 | ca-certificates \ 45 | curl \ 46 | software-properties-common 47 | 48 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - 49 | 50 | add-apt-repository \ 51 | "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ 52 | $(lsb_release -cs) \ 53 | stable" 54 | 55 | apt-get update && apt-get install -y docker-ce 56 | elif [ ! -z "${package_location}" ]; then 57 | while [ ! -f "${package_location}" ]; do 58 | echo "Waiting for docker package at ${package_location} ... " 59 | sleep 1 60 | done 61 | 62 | echo "Install docker from ${package_location}" 63 | chmod u+x "${package_location}" 64 | 65 | # loop here until file provisioner is done copying the package 66 | until ${package_location} --install; do 67 | sleep 2 68 | done 69 | else 70 | return 0 71 | fi 72 | 73 | partprobe 74 | lsblk 75 | systemctl enable docker 76 | storage_driver=`docker info | grep 'Storage Driver:' | cut -d: -f2 | sed -e 's/\s//g'` 77 | echo "storage driver is ${storage_driver}" 78 | if [ "${storage_driver}" == "devicemapper" ]; then 79 | # check if loop lvm mode is enabled 80 | if [ -z `docker info | grep 'loop file'` ]; then 81 | echo "Direct-lvm mode is configured." 82 | return 0 83 | fi 84 | 85 | # TODO if docker block device is not provided, make sure we use overlay2 storage driver 86 | if [ -z "${docker_disk}" ]; then 87 | echo "docker loop-lvm mode is configured and a docker block device was not specified! This is not recommended for production!" 88 | return 0 89 | fi 90 | 91 | echo "A docker disk ${docker_disk} is provided, setting up direct-lvm mode ..." 92 | 93 | # docker installer uses devicemapper already 94 | cat > /etc/docker/daemon.json <&2 48 | exit 1 49 | fi 50 | 51 | # Download the file using auth if provided 52 | echo "Downloading ${image_url}" >&2 53 | mkdir -p ${sourcedir} 54 | wget --continue ${username:+--user} ${username} ${password:+--password} ${password} \ 55 | -O ${sourcedir}/${filename} "${image_url}" 56 | 57 | # Set the image file name if we're on the same platform 58 | if [[ ${filename} =~ .*$(uname -m).* ]]; then 59 | echo "Setting image_file to ${sourcedir}/${filename}" 60 | image_file="${sourcedir}/${filename}" 61 | fi 62 | elif [[ "${package_location:0:3}" == "nfs" ]]; then 63 | # Separate out the filename and path 64 | sourcedir="/opt/ibm/cluster/images" 65 | nfs_mount=$(dirname ${package_location:4}) 66 | image_file="${sourcedir}/$(basename ${package_location})" 67 | sudo mkdir -p ${sourcedir} 68 | 69 | # Mount 70 | sudo mount.nfs $nfs_mount $sourcedir 71 | if [ $? -ne 0 ]; then 72 | echo "An error occurred mounting the NFS server. Mount point: $nfs_mount" 73 | exit 1 74 | fi 75 | else 76 | # This must be uploaded from local file, terraform should have copied it to /tmp 77 | sourcedir="/opt/ibm/cluster/images" 78 | image_file="/tmp/$(basename ${package_location})" 79 | sudo mkdir -p ${sourcedir} 80 | sudo mv ${image_file} ${sourcedir}/ 81 | fi 82 | 83 | echo "Unpacking ${image_file} ..." 84 | pv --interval 10 ${image_file} | tar zxf - -O | sudo docker load 85 | 86 | -------------------------------------------------------------------------------- /templates/icp-ee/security_group.tf: -------------------------------------------------------------------------------- 1 | resource "ibm_security_group" "cluster_private" { 2 | name = "${var.deployment}-cluster-priv-${random_id.clusterid.hex}" 3 | description = "allow intercluster communication" 4 | } 5 | 6 | resource "ibm_security_group_rule" "allow_ingress_from_self_priv" { 7 | direction = "ingress" 8 | ether_type = "IPv4" 9 | remote_group_id = "${ibm_security_group.cluster_private.id}" 10 | security_group_id = "${ibm_security_group.cluster_private.id}" 11 | } 12 | 13 | resource "ibm_security_group_rule" "allow_cluster_egress_private" { 14 | direction = "egress" 15 | ether_type = "IPv4" 16 | security_group_id = "${ibm_security_group.cluster_private.id}" 17 | } 18 | 19 | resource "ibm_security_group" "cluster_public" { 20 | count = "${var.private_network_only ? 0 : 1}" 21 | name = "${var.deployment}-cluster-pub-${random_id.clusterid.hex}" 22 | description = "allow intercluster communication" 23 | } 24 | 25 | resource "ibm_security_group_rule" "allow_ingress_from_self_pub" { 26 | count = "${var.private_network_only ? 0 : 1}" 27 | direction = "ingress" 28 | ether_type = "IPv4" 29 | remote_group_id = "${ibm_security_group.cluster_public.id}" 30 | security_group_id = "${ibm_security_group.cluster_public.id}" 31 | } 32 | 33 | resource "ibm_security_group_rule" "allow_cluster_public" { 34 | count = "${var.private_network_only ? 0 : 1}" 35 | direction = "egress" 36 | ether_type = "IPv4" 37 | security_group_id = "${ibm_security_group.cluster_public.id}" 38 | } 39 | 40 | resource "ibm_security_group" "master_group" { 41 | name = "${var.deployment}-master-${random_id.clusterid.hex}" 42 | description = "allow incoming to master" 43 | } 44 | 45 | # restrict incoming on ports to LBaaS private subnet 46 | resource "ibm_security_group_rule" "allow_port_8443" { 47 | direction = "ingress" 48 | ether_type = "IPv4" 49 | protocol = "tcp" 50 | port_range_min = 8443 51 | port_range_max = 8443 52 | security_group_id = "${ibm_security_group.master_group.id}" 53 | #remote_ip = "${ibm_compute_vm_instance.icp-master.0.private_subnet}" 54 | # Sometimes LBaaS can be placed on a different subnet 55 | remote_ip = "0.0.0.0/0" 56 | } 57 | 58 | # restrict to LBaaS private subnet 59 | resource "ibm_security_group_rule" "allow_port_8500" { 60 | direction = "ingress" 61 | ether_type = "IPv4" 62 | protocol = "tcp" 63 | port_range_min = 8500 64 | port_range_max = 8500 65 | security_group_id = "${ibm_security_group.master_group.id}" 66 | # remote_ip = "${ibm_compute_vm_instance.icp-master.0.private_subnet}" 67 | # Sometimes LBaaS can be placed on a different subnet 68 | remote_ip = "0.0.0.0/0" 69 | } 70 | 71 | # restrict to LBaaS private subnet 72 | resource "ibm_security_group_rule" "allow_port_8600" { 73 | direction = "ingress" 74 | ether_type = "IPv4" 75 | protocol = "tcp" 76 | port_range_min = 8600 77 | port_range_max = 8600 78 | security_group_id = "${ibm_security_group.master_group.id}" 79 | # remote_ip = "${ibm_compute_vm_instance.icp-master.0.private_subnet}" 80 | # Sometimes LBaaS can be placed on a different subnet 81 | remote_ip = "0.0.0.0/0" 82 | } 83 | 84 | # TODO restrict to LBaaS private subnet 85 | resource "ibm_security_group_rule" "allow_port_8001" { 86 | direction = "ingress" 87 | ether_type = "IPv4" 88 | protocol = "tcp" 89 | port_range_min = 8001 90 | port_range_max = 8001 91 | security_group_id = "${ibm_security_group.master_group.id}" 92 | # remote_ip = "${ibm_compute_vm_instance.icp-master.0.private_subnet}" 93 | # Sometimes LBaaS can be placed on a different subnet 94 | remote_ip = "0.0.0.0/0" 95 | } 96 | 97 | 98 | # restrict to LBaaS private subnet 99 | resource "ibm_security_group_rule" "allow_port_9443" { 100 | direction = "ingress" 101 | ether_type = "IPv4" 102 | protocol = "tcp" 103 | port_range_min = 9443 104 | port_range_max = 9443 105 | security_group_id = "${ibm_security_group.master_group.id}" 106 | # remote_ip = "${ibm_compute_vm_instance.icp-master.0.private_subnet}" 107 | # Sometimes LBaaS can be placed on a different subnet 108 | remote_ip = "0.0.0.0/0" 109 | } 110 | 111 | resource "ibm_security_group_rule" "master_node_allow_outbound_public" { 112 | direction = "egress" 113 | ether_type = "IPv4" 114 | security_group_id = "${ibm_security_group.master_group.id}" 115 | } 116 | 117 | # restrict to LBaaS private subnet 118 | resource "ibm_security_group_rule" "allow_port_80" { 119 | direction = "ingress" 120 | ether_type = "IPv4" 121 | protocol = "tcp" 122 | port_range_min = 80 123 | port_range_max = 80 124 | security_group_id = "${ibm_security_group.proxy_group.id}" 125 | # Sometimes LBaaS can be placed on a different subnet 126 | remote_ip = "0.0.0.0/0" 127 | } 128 | 129 | # restrict to LBaaS private subnet 130 | resource "ibm_security_group_rule" "allow_port_443" { 131 | direction = "ingress" 132 | ether_type = "IPv4" 133 | protocol = "tcp" 134 | port_range_min = 443 135 | port_range_max = 443 136 | security_group_id = "${ibm_security_group.proxy_group.id}" 137 | # Sometimes LBaaS can be placed on a different subnet 138 | remote_ip = "0.0.0.0/0" 139 | } 140 | 141 | resource "ibm_security_group" "proxy_group" { 142 | name = "${var.deployment}-proxy-${random_id.clusterid.hex}" 143 | description = "allow incoming to proxy" 144 | } 145 | 146 | resource "ibm_security_group" "boot_node_public" { 147 | name = "${var.deployment}-boot-${random_id.clusterid.hex}" 148 | description = "allow incoming ssh" 149 | } 150 | 151 | # TODO restrict to allowed CIDR 152 | resource "ibm_security_group_rule" "allow_ssh" { 153 | direction = "ingress" 154 | ether_type = "IPv4" 155 | protocol = "tcp" 156 | port_range_min = 22 157 | port_range_max = 22 158 | security_group_id = "${ibm_security_group.boot_node_public.id}" 159 | } 160 | -------------------------------------------------------------------------------- /templates/icp-ee/variables.tf: -------------------------------------------------------------------------------- 1 | ##### SoftLayer/IBMCloud Access Credentials ###### 2 | 3 | variable "key_name" { 4 | description = "Name or reference of SSH key to provision IBM Cloud instances with" 5 | default = [] 6 | } 7 | 8 | ##### Common VM specifications ###### 9 | # Provide values for these in terraform.tfvars 10 | variable "datacenter" { } 11 | 12 | variable "private_vlan_router_hostname" { 13 | default = "" 14 | description = "Private VLAN router to place all VMs behind. e.g. bcr01a. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose." 15 | } 16 | 17 | variable "private_vlan_number" { 18 | default = -1 19 | description = "Private VLAN number to place all VMs on. e.g. 1211. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose." 20 | } 21 | 22 | variable "public_vlan_router_hostname" { 23 | default = "" 24 | description = "Public VLAN router to place all VMs behind. e.g. fcr01a. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose." 25 | } 26 | 27 | variable "public_vlan_number" { 28 | default = -1 29 | description = "Public VLAN number to place all VMs on. e.g. 1171. See Network > IP Management > VLANs in the portal. Leave blank to let the system choose." 30 | } 31 | 32 | variable "deployment" { 33 | description = "Identifier prefix added to the host names." 34 | default = "icp" 35 | } 36 | 37 | variable "os_reference_code" { 38 | description = "IBM Cloud OS reference code to determine OS, version, word length" 39 | default = "UBUNTU_16_64" 40 | } 41 | 42 | variable "domain" { 43 | description = "Specify domain name to be used for linux customization on the VMs, or leave blank to use .icp" 44 | default = "" 45 | } 46 | 47 | variable "private_network_only" { 48 | description = "Specify false to place the cluster on the public network. If public network access is disabled, you will require a NAT gateway device like a Gateway Appliance on the VLAN." 49 | default = false 50 | } 51 | 52 | ##### ICP Instance details ###### 53 | 54 | variable "boot" { 55 | type = "map" 56 | 57 | default = { 58 | cpu_cores = "4" 59 | memory = "4096" 60 | 61 | disk_size = "100" // GB 62 | docker_vol_size = "100" // GB 63 | local_disk = true 64 | os_reference_code = "UBUNTU_16_64" 65 | 66 | network_speed = "1000" 67 | 68 | hourly_billing = true 69 | } 70 | } 71 | 72 | variable "master" { 73 | type = "map" 74 | 75 | default = { 76 | nodes = "3" 77 | 78 | cpu_cores = "8" 79 | memory = "16384" 80 | 81 | disk_size = "100" // GB 82 | docker_vol_size = "100" // GB 83 | local_disk = false 84 | 85 | network_speed = "1000" 86 | 87 | hourly_billing = true 88 | } 89 | } 90 | 91 | variable "mgmt" { 92 | type = "map" 93 | 94 | default = { 95 | nodes = "1" 96 | 97 | cpu_cores = "4" 98 | memory = "16384" 99 | 100 | disk_size = "100" // GB 101 | docker_vol_size = "100" // GB 102 | local_disk = false 103 | 104 | network_speed = "1000" 105 | 106 | hourly_billing=true 107 | } 108 | } 109 | 110 | variable "proxy" { 111 | type = "map" 112 | 113 | default = { 114 | nodes = "3" 115 | 116 | cpu_cores = "2" 117 | memory = "4096" 118 | 119 | disk_size = "100" // GB 120 | docker_vol_size = "100" // GB 121 | local_disk = false 122 | 123 | network_speed= "1000" 124 | 125 | hourly_billing = true 126 | } 127 | } 128 | 129 | variable "va" { 130 | type = "map" 131 | 132 | default = { 133 | nodes = "0" 134 | 135 | cpu_cores = "4" 136 | memory = "8192" 137 | 138 | disk_size = "100" // GB 139 | docker_vol_size = "100" // GB 140 | local_disk = false 141 | 142 | network_speed = "1000" 143 | 144 | hourly_billing = true 145 | } 146 | } 147 | 148 | 149 | variable "worker" { 150 | type = "map" 151 | 152 | default = { 153 | nodes = "3" 154 | 155 | cpu_cores = "4" 156 | memory = "16384" 157 | 158 | disk_size = "100" // GB, 25 or 100 159 | docker_vol_size = "100" // GB 160 | additional_disk = "0" // GB, if you want an additional block device, set to non-zero 161 | 162 | local_disk = false 163 | 164 | network_speed= "1000" 165 | 166 | hourly_billing = true 167 | } 168 | } 169 | 170 | variable "fs_audit" { 171 | default = { 172 | type = "Endurance" 173 | size = "20" 174 | hourly_billing = true 175 | iops = 0.25 176 | } 177 | } 178 | 179 | variable "fs_registry" { 180 | default = { 181 | type = "Endurance" 182 | size = "100" 183 | hourly_billing = true 184 | iops = 2 185 | } 186 | } 187 | 188 | variable "docker_package_location" { 189 | description = "URI for docker package location, e.g. http:///icp-docker-17.09_x86_64.bin or nfs:/icp-docker-17.09_x86_64.bin" 190 | default = "" 191 | } 192 | 193 | variable "image_location" { 194 | description = "URI for image package location, e.g. http:///ibm-cloud-private-x86_64-2.1.0.2.tar.gz or nfs:/ibm-cloud-private-x86_64-2.1.0.2.tar.gz" 195 | default = "" 196 | } 197 | 198 | variable "image_location_user" { 199 | description = "Username if required by image_location i.e. authenticated http source" 200 | default = "" 201 | } 202 | 203 | variable "image_location_password" { 204 | description = "Password if required by image_location i.e. authenticated http source" 205 | default = "" 206 | } 207 | 208 | variable "icppassword" { 209 | description = "Password for the initial admin user in ICP; blank to generate" 210 | default = "" 211 | } 212 | 213 | variable "icp_inception_image" { 214 | description = "ICP image to use for installation" 215 | default = "ibmcom/icp-inception-amd64:3.1.0-ee" 216 | } 217 | 218 | variable "cluster_cname" { 219 | default = "" 220 | } 221 | 222 | variable "registry_server" { 223 | default = "" 224 | } 225 | 226 | variable "registry_username" { 227 | default = "" 228 | } 229 | 230 | variable "registry_password" { 231 | default = "" 232 | } 233 | 234 | variable "network_cidr" { 235 | description = "Pod network CIDR " 236 | default = "172.20.0.0/16" 237 | } 238 | 239 | variable "service_network_cidr" { 240 | description = "Service network CIDR " 241 | default = "172.21.0.0/16" 242 | } 243 | 244 | # The following services can be disabled for 3.1 245 | # custom-metrics-adapter, image-security-enforcement, istio, metering, monitoring, service-catalog, storage-minio, storage-glusterfs, and vulnerability-advisor 246 | variable "disabled_management_services" { 247 | description = "List of management services to disable" 248 | type = "list" 249 | default = ["istio", "vulnerability-advisor", "storage-glusterfs", "storage-minio"] 250 | } 251 | --------------------------------------------------------------------------------