├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── ansible-webserver ├── README.md ├── ansible.cfg ├── create.yml ├── destroy.yml ├── group_vars │ └── all.yml └── index.html ├── ibmcloud-vpnserver ├── README.md ├── ansible.cfg ├── create.yml ├── destroy.yml ├── group_vars │ └── all.yml └── roles │ ├── ibmcloud-certificates │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── requirements.yml │ └── tasks │ │ ├── create.yml │ │ ├── destroy.yml │ │ └── main.yml │ ├── ibmcloud-vpc │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── requirements.yml │ └── tasks │ │ ├── create.yml │ │ ├── destroy.yml │ │ └── main.yml │ └── ibmcloud-vpn-restapi │ ├── defaults │ └── main.yml │ ├── meta │ └── requirements.yml │ ├── tasks │ ├── create.yml │ ├── destroy.yml │ └── main.yml │ └── templates │ └── ovpn.j2 ├── renovate.json ├── terraform-hpvs ├── README.md ├── attestation │ ├── README.md │ ├── compose │ │ └── docker-compose.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── contract-expiry │ ├── README.md │ ├── compose │ │ └── docker-compose.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── create-contract-download-encryption │ ├── README.md │ ├── compose │ │ └── docker-compose.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── create-contract-dynamic-registry │ ├── README.md │ ├── compose │ │ └── docker-compose.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── create-contract │ ├── README.md │ ├── compose │ │ └── docker-compose.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── dynamic-registry │ ├── README.md │ ├── compose │ │ └── docker-compose.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── fhe-helayers-sdk │ ├── README.md │ ├── compose │ │ └── docker-compose.yaml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── hello-world │ ├── README.md │ ├── compose │ │ └── docker-compose.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── ibm-cloud-logging │ ├── README.md │ ├── compose │ │ └── docker-compose.yaml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── log-encryption │ ├── README.md │ ├── compose │ │ ├── bin │ │ │ ├── encrypt-basic.sh │ │ │ └── example.sh │ │ └── docker-compose.yml │ ├── support │ │ └── decrypt-basic.sh │ ├── terraform.tf │ └── variables.tf ├── mongodb │ ├── README.md │ ├── compose │ │ └── docker-compose.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── nginx-hello │ ├── README.md │ ├── cloud │ │ ├── my-settings.auto.tfvars-template │ │ ├── terraform.tf │ │ └── variables.tf │ ├── images │ │ └── nginx.jpg │ ├── onprem │ │ ├── add_known_host.sh │ │ ├── domain_update.xsl │ │ ├── my-settings.auto.tfvars-template │ │ ├── port-forward.sh │ │ ├── terraform.tf │ │ ├── variables.tf │ │ └── volume_update.xsl │ └── user_data │ │ ├── compose │ │ └── docker-compose.yml │ │ ├── my-settings.auto.tfvars-template │ │ ├── terraform.tf │ │ └── variables.tf ├── postgresql-cluster │ ├── README.md │ ├── compose_master │ │ └── docker-compose.yml │ ├── terraform.tf │ └── variables.tf ├── postgresql │ ├── README.md │ ├── compose │ │ └── docker-compose.yml │ ├── terraform.tf │ └── variables.tf ├── sample-daytrader │ ├── README.md │ ├── compose │ │ └── pod.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf └── sample-paynow │ ├── README.md │ ├── compose │ └── pod.yml │ ├── my-settings.auto.tfvars-template │ ├── terraform.tf │ └── variables.tf ├── terraform-single ├── README.md ├── locals.tf ├── main.tf ├── my-settings.auto.tfvars-template ├── outputs.tf ├── provider.tf ├── variables.tf └── versions.tf └── terraform-vpnserver ├── README.md ├── main.tf ├── my-settings.auto.tfvars-template ├── phase1 ├── main.tf ├── provider.tf └── variables.tf ├── phase2 ├── locals.tf ├── main.tf ├── provider.tf └── variables.tf └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | **/.cache 2 | **/.terraform 3 | **/.terraform.lock.hcl 4 | **/*.tfvars 5 | **/*.pub 6 | **/*.rsa 7 | .DS_Store 8 | **/terraform.tfstate 9 | **/terraform.tfstate.backup 10 | **/.terraform.tfstate.lock.info 11 | ibmcloud-vpnserver/*.pem 12 | ibmcloud-vpnserver/*.key 13 | ibmcloud-vpnserver/*.ovpn 14 | ibmcloud-vpnserver/*.password 15 | terraform-hpvs/**/.env 16 | terraform-hpvs/log-encryption/compose/key.pub 17 | terraform-vpnserver/*.ovpn 18 | build 19 | **/.vscode -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | This is an open source project, and we appreciate your help! 4 | 5 | We use the GitHub issue tracker to discuss new features and non-trivial bugs. 6 | 7 | To contribute code, documentation, or tests, please submit a pull request to 8 | the GitHub repository. Generally, we expect two maintainers to review your pull 9 | request before it is approved for merging. For more details, see the 10 | [MAINTAINERS](MAINTAINERS.md) page. 11 | 12 | Contributions are subject to the [Developer Certificate of Origin, Version 1.1](https://developercertificate.org/) and the [Apache License, Version 2](https://www.apache.org/licenses/LICENSE-2.0.txt). 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LinuxONE and z/OS automation 2 | 3 | Ansible/Terraform examples for IBM Cloud LinuxONE and z/OS VSIs. Each of them are stand-alone so pick up 4 | the closest to your needs. Also do not hesistate to contribure new examples. 5 | 6 | These are not created for any direct usage in production but as base for your own automation. Specially 7 | notice that they might expose ports to the Internet which might be ok for a demo but not for production. So 8 | ensure you review the steps before using in production. 9 | 10 | ## Ansible or Terraform 11 | 12 | IBM Cloud Ansible is implemented as Terraform wrapper so they both basically 13 | allow you to create any IaaS/PaaS/SaaS in the IBM Cloud. 14 | 15 | Ansible will also allow you to do more actions such as installing SW on created VSI. 16 | Also Ansible will, in general, tolerate that you have created parts of the 17 | infrastructure manually without having to import it as in Terraform´s case. 18 | 19 | Therefore, in general I preffer Ansible over Terraform BUT if you are only creating 20 | infrastructure and it is a large project (such as creating multiple VSIs for OCP) 21 | then Terraform will be faster. This is because Terraform is aware of the dependencies 22 | and will try to create more things in parallel. 23 | 24 | ## More info 25 | 26 | - [IBM Cloud ansible collection](https://github.com/IBM-Cloud/ansible-collection-ibm) 27 | - [IBM Cloud Terraform plugin](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs) 28 | -------------------------------------------------------------------------------- /ansible-webserver/README.md: -------------------------------------------------------------------------------- 1 | # Deploys web server in zVSI with Ansible 2 | 3 | ## Preparations 4 | 5 | 1. Install Python3 6 | 2. Install [RedHat Ansible] 2.9+ 7 | - `pip install "ansible>=2.9.2"` 8 | 3. Install IBM cloud ansible 9 | - `ansible-galaxy collection install ibm.cloudcollection` 10 | 4. Adjust [ansible settings](group_vars/all.yml) 11 | 5. Ensure you have an `IC_API_KEY` environment variable set up with your 12 | IBM Cloud API key 13 | - this will likelly require a paying account 14 | - you can create an API account by visiting the [IBM Cloud API keys page](https://cloud.ibm.com/iam/apikeys). Ensure you have 15 | selected the account you want to use before creating the key as the key will be associtated to the account you have selected 16 | at the time of creation. 17 | - If you have downloaded your `apikey.json` file from the IBM Cloud UI you may use this command: 18 | `export IC_API_KEY=$(cat ~/apikey.json | jq -r .apikey)` 19 | 20 | ## Create 21 | 22 | - **zLinux**: `ansible-playbook create.yml` 23 | - **z/OS**: `ansible-playbook create.yml -e os_type=zos` **NOTE**: limited availability. 24 | 25 | If you want to use a different region add `-e region=` to the above command. Example: `-e region=br-sao` 26 | 27 | ## Destroy 28 | 29 | 1. `ansible-playbook destroy.yml` 30 | - Note: VPC and subnetwork will not be deleted - comment in last two tasks in 31 | [destroy.yml](destroy.yml) if you want them deleted. -------------------------------------------------------------------------------- /ansible-webserver/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | fact_caching = jsonfile 3 | fact_caching_connection = .cache 4 | 5 | [ssh_connection] 6 | pipelining = True -------------------------------------------------------------------------------- /ansible-webserver/create.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Conditional flush cache 3 | hosts: localhost 4 | gather_facts: false 5 | tasks: 6 | - name: Clear cache if we have changed the target zone 7 | when: (cache_zone is undefined) or (zone != cache_zone) 8 | ansible.builtin.meta: clear_facts 9 | - name: Create IBM Cloud VPC VSI 10 | hosts: localhost 11 | environment: 12 | IC_REGION: "{{ region }}" 13 | tasks: 14 | - name: Save zone as fact 15 | ansible.builtin.set_fact: 16 | cacheable: true 17 | cache_zone: "{{ zone }}" 18 | 19 | - name: Create Resource Group 20 | when: resource_group_name is defined 21 | ibm.cloudcollection.ibm_resource_group: 22 | name: "{{ resource_group_name }}" 23 | state: available 24 | register: ibm_resource_group_output 25 | 26 | - name: Create VPC 27 | when: vpc is undefined 28 | ibm.cloudcollection.ibm_is_vpc: 29 | name: "{{ name_prefix }}-vpc" 30 | state: available 31 | id: "{{ vpc.id | default(omit) }}" 32 | resource_group: "{{ ibm_resource_group_output.resource.id | default(omit) }}" 33 | register: vpc_create_output 34 | 35 | - name: Configure Security Group Rule to open SSH and web server on the VSI 36 | when: vpc is undefined 37 | ibm.cloudcollection.ibm_is_security_group_rule: 38 | state: available 39 | group: "{{ vpc_create_output.resource.default_security_group }}" 40 | direction: inbound 41 | remote: 0.0.0.0/0 42 | # tcp: 43 | # - port_max: 22 44 | # port_min: 22 45 | # - port_max: 80 46 | # port_min: 80 47 | # - port_max: 443 48 | # port_min: 443 49 | 50 | - name: Save VPC as fact 51 | when: vpc is undefined 52 | ansible.builtin.set_fact: 53 | cacheable: true 54 | vpc: "{{ vpc_create_output.resource }}" 55 | 56 | - name: Create VPC Subnet 57 | when: subnet is undefined 58 | ibm.cloudcollection.ibm_is_subnet: 59 | name: "{{ name_prefix }}-{{ zone }}" 60 | state: available 61 | id: "{{ subnet.id | default(omit) }}" 62 | vpc: "{{ vpc.id }}" 63 | total_ipv4_address_count: "{{ total_ipv4_address_count }}" 64 | zone: "{{ zone }}" 65 | resource_group: "{{ vpc.resource_group }}" 66 | register: subnet_create_output 67 | 68 | - name: Save VPC Subnet as fact 69 | when: subnet is undefined 70 | ansible.builtin.set_fact: 71 | cacheable: true 72 | subnet: "{{ subnet_create_output.resource }}" 73 | 74 | - name: Upload SSH Key 75 | when: ssh_key is undefined 76 | ibm.cloudcollection.ibm_is_ssh_key: 77 | name: "{{ ssh_key_name }}" 78 | public_key: "{{ ssh_public_key }}" 79 | id: "{{ ssh_key.id | default(omit) }}" 80 | resource_group: "{{ vpc.resource_group }}" 81 | register: ssh_key_create_output 82 | 83 | - name: Save SSH Key as fact 84 | when: ssh_key is undefined 85 | ansible.builtin.set_fact: 86 | cacheable: true 87 | ssh_key: "{{ ssh_key_create_output.resource }}" 88 | 89 | - name: Retrieve image list 90 | when: image_dict is undefined 91 | ibm.cloudcollection.ibm_is_images_info: 92 | register: images_list 93 | 94 | - name: Set VM image name/id dictionary fact 95 | when: image_dict is undefined 96 | ansible.builtin.set_fact: 97 | cacheable: true 98 | image_dict: "{{ images_list.resource.images | 99 | items2dict(key_name='name', value_name='id') }}" 100 | 101 | # - name: show image_dict 102 | # debug: 103 | # var: image_dict 104 | 105 | - name: Create VSI 106 | when: (vsi is undefined) or (vsi == None) or (not cache_vsi) 107 | ibm.cloudcollection.ibm_is_instance: 108 | name: "{{ instance.name }}" 109 | state: available 110 | id: "{{ vsi.id | default(omit) }}" 111 | vpc: "{{ vpc.id }}" 112 | profile: "{{ instance.profile }}" 113 | image: "{{ (image_dict | dict2items | selectattr('key', 'match', instance.image) | list | first).value }}" 114 | keys: 115 | - "{{ ssh_key.id }}" 116 | primary_network_interface: 117 | - subnet: "{{ subnet.id }}" 118 | zone: "{{ zone }}" 119 | resource_group: "{{ vpc.resource_group }}" 120 | register: vsi_create_output 121 | 122 | - name: Save VSI as fact 123 | when: vsi_create_output.resource is defined 124 | ansible.builtin.set_fact: 125 | cacheable: true 126 | vsi: "{{ vsi_create_output.resource }}" 127 | 128 | - name: Configure Floating IP Address 129 | when: (fip is undefined) or (fip == None) or (not cache_vsi) 130 | ibm.cloudcollection.ibm_is_floating_ip: 131 | name: "{{ name_prefix }}-fip" 132 | state: available 133 | id: "{{ fip.id | default(omit) }}" 134 | target: "{{ vsi.primary_network_interface[0]['id'] }}" 135 | resource_group: "{{ vpc.resource_group }}" 136 | register: fip_create_output 137 | 138 | - name: Save Floating IP as fact 139 | when: fip_create_output.resource is defined 140 | ansible.builtin.set_fact: 141 | cacheable: true 142 | fip: "{{ fip_create_output.resource }}" 143 | 144 | - name: Print Floating IP Address 145 | ansible.builtin.debug: 146 | msg: "IP Address: {{ fip.address }}" 147 | 148 | - name: Add VSI to Ansible inventory 149 | ansible.builtin.add_host: 150 | name: "{{ fip.address }}" 151 | ansible_user: "{{ instance.username | default('root') }}" 152 | groups: new_vsi 153 | ansible_ssh_extra_args: "-o StrictHostKeyChecking=no" 154 | ansible_interpreter_python_fallback: "{{ instance.python_interpreters | default(omit) }}" 155 | 156 | - name: Check Ansible connection to new VSI 157 | hosts: new_vsi 158 | gather_facts: false 159 | tasks: 160 | - name: Wait for VSI to become reachable over SSH 161 | ansible.builtin.wait_for_connection: 162 | 163 | - name: Deploy application 164 | hosts: new_vsi 165 | tasks: 166 | - name: Collect OS information 167 | when: os_type == "zlinux" 168 | ansible.builtin.command: cat /proc/sysinfo 169 | changed_when: false 170 | register: sysinfo 171 | 172 | - name: Print OS information 173 | when: sysinfo.stdout_lines is defined 174 | ansible.builtin.debug: 175 | var: sysinfo.stdout_lines 176 | 177 | - name: Install web server (state=present is optional) 178 | when: os_type == "zlinux" 179 | ansible.builtin.apt: 180 | name: nginx 181 | state: present 182 | update_cache: true 183 | 184 | - name: Upload webpage 185 | when: os_type == "zLinux" 186 | ansible.builtin.copy: 187 | src: index.html 188 | dest: /var/www/html/index.html 189 | mode: '0644' 190 | -------------------------------------------------------------------------------- /ansible-webserver/destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Conditional flush cache 3 | hosts: localhost 4 | gather_facts: false 5 | tasks: 6 | - name: Clear cache if we have changed the target zone 7 | when: (cache_zone is undefined) or (zone != cache_zone) 8 | ansible.builtin.meta: clear_facts 9 | - name: Destroy IBM Cloud VPC VSI 10 | hosts: localhost 11 | environment: 12 | IC_REGION: "{{ region }}" 13 | tasks: 14 | - name: Release Floating IP 15 | ibm.cloudcollection.ibm_is_floating_ip: 16 | state: absent 17 | id: "{{ fip.id }}" 18 | resource_group: "{{ vpc.resource_group }}" 19 | target: "{{ vsi.primary_network_interface[0]['id'] }}" 20 | when: (fip is defined) and (fip != None) 21 | 22 | - name: Remove Floating IP fact 23 | when: fip is defined 24 | ansible.builtin.set_fact: 25 | cacheable: true 26 | fip: null 27 | 28 | - name: Remove VSI 29 | ibm.cloudcollection.ibm_is_instance: 30 | state: absent 31 | id: "{{ vsi.id }}" 32 | vpc: "{{ vpc.id }}" 33 | profile: "{{ instance.profile }}" 34 | image: "{{ (image_dict | dict2items | selectattr('key', 'match', instance.image) | list | first).value }}" 35 | keys: 36 | - "{{ ssh_key.id }}" 37 | primary_network_interface: 38 | - subnet: "{{ subnet.id }}" 39 | zone: "{{ zone }}" 40 | resource_group: "{{ vpc.resource_group }}" 41 | when: (vsi is defined) and (vsi != None) 42 | 43 | - name: Remove VSI fact 44 | when: vsi is defined 45 | ansible.builtin.set_fact: 46 | cacheable: true 47 | vsi: null 48 | 49 | # - name: Remove SSH Key 50 | # ibm.cloudcollection.ibm_is_ssh_key: 51 | # state: absent 52 | # id: "{{ ssh_key.id }}" 53 | # resource_group: "{{ vpc.resource_group }}" 54 | # when: ssh_key is defined 55 | 56 | # - name: Remove VPC Subnet 57 | # ibm.cloudcollection.ibm_is_subnet: 58 | # state: absent 59 | # id: "{{ subnet.id }}" 60 | # resource_group: "{{ vpc.resource_group }}" 61 | # when: subnet is defined 62 | 63 | # - name: Remove VPC 64 | # ibm.cloudcollection.ibm_is_vpc: 65 | # state: absent 66 | # id: "{{ vpc.id }}" 67 | # resource_group: "{{ vpc.resource_group }}" 68 | # when: vpc is defined 69 | -------------------------------------------------------------------------------- /ansible-webserver/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | os_type: zlinux 3 | # os_type: zos 4 | 5 | # Name prefix for the created objects (VPC, subnetwork, VSI) 6 | name_prefix: "{{ os_type }}-ansible" 7 | 8 | # - VSI name for the instance 9 | # - Profile name for the VSI 10 | # - Regular expression for the VSI image name 11 | # Note: If multiple matches are found the first one is taken 12 | instance_zlinux: 13 | name: "{{ name_prefix }}-zlinux-vsi" 14 | profile: bz2-2x8 15 | image: ibm-ubuntu.*s390x.* 16 | instance_zos: 17 | name: "{{ name_prefix }}-zos-vsi" 18 | profile: mz2-2x16 19 | image: ibm-zos.* 20 | username: IBMUSER 21 | python_interpreters: 22 | - /usr/lpp/IBM/cyp/v3r9/pyz/bin/python3 23 | - /usr/lpp/IBM/cyp/v3r8/pyz/bin/python3 24 | instance: "{{ lookup('vars', 'instance_' + os_type) }}" 25 | 26 | # Name of the uploaded key 27 | # Please notice that the same key might not be uploaded twice 28 | # under different names 29 | ssh_key_name: "{{ ansible_user_id | lower }}" 30 | ssh_public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" 31 | 32 | # Size of the created subnetwork 33 | total_ipv4_address_count: 256 34 | 35 | # Region to deploy to 36 | region: "jp-tok" 37 | # region: "eu-gb" 38 | # region: "br-sao" 39 | 40 | # Zone within the region 41 | zone: "{{ region }}-1" 42 | 43 | # If cache_vsi=True then VSI creation will only be attempted if not cached 44 | # cache_vsi: True 45 | cache_vsi: false 46 | 47 | # The resource group to be used under the IBM Cloud Account 48 | # resource_group_name: "{{ name_prefix }}-resources" 49 | -------------------------------------------------------------------------------- /ansible-webserver/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 15 | 16 | 17 | 18 |

Time required to deploy this application

19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 |
StepTraditional On-premissesLinuxONE VPC
Create Subnetwork1 week30s
Create LPAR1-4 weeksNA
Create KVM1-5 days40 seconds
Install webserver1 hour20 seconds
Publish webserver1 week10 seconds
52 | 53 | 54 | -------------------------------------------------------------------------------- /ibmcloud-vpnserver/README.md: -------------------------------------------------------------------------------- 1 | # Deploys a client-to-site VPN server (Beta) in VPC with Ansible 2 | 3 | This is a sample playbook for generating a client-to-site VPN server on IBM Cloud. This creates a single subnetwork VPC 4 | and deploys a client-to-site VPN server into it. This playbook produces certificates for use by the VPN server and client. 5 | It does not establish separate, by userid, login credentials, so anyone with the resulting ovpn file will be able to 6 | establish a VPN to the environment. Finally this playbook uses the IBM Cloud API to deploy the client-to-site VPN server. 7 | Once the ibm.cloudcollection ansible collection has been enriched to deploy the VPN server, it should be used instead of this 8 | project. 9 | 10 | ## Preparations 11 | 12 | 1. Install Python3 13 | 2. Install [RedHat Ansible] 2.9+ 14 | - `pip install "ansible>=2.9.2"` 15 | 3. Install ansible collections 16 | - `ansible-galaxy collection install ibm.cloudcollection` 17 | - `ansible-galaxy collection install community.crypto` 18 | 4. Adjust [ansible settings](group_vars/all.yml) 19 | 5. Ensure you have an `IC_API_KEY` environment variable set up with your 20 | IBM Cloud API key 21 | - this will likely require a paying account 22 | - you can create an API account by visiting the [IBM Cloud API keys page](https://cloud.ibm.com/iam/apikeys). Ensure you have 23 | selected the account you want to use before creating the key as the key will be associated to the account you have selected 24 | at the time of creation. 25 | - If you have downloaded your `apikey.json` file from the IBM Cloud UI you may use this command: 26 | `export IC_API_KEY=$(cat ~/apikey.json | jq -r .apikey)` 27 | 6. Ensure you have setup the appropriate IAM service-to-service authorizations to allow a client-to-site VPN server to access a 28 | Certificate Manager. [https://cloud.ibm.com/docs/vpc?topic=vpc-client-to-site-authentication#creating-iam-service-to-service](https://cloud.ibm.com/docs/vpc?topic=vpc-client-to-site-authentication#creating-iam-service-to-service) 29 | 30 | ## Create 31 | 32 | - `ansible-playbook create.yml` 33 | 34 | If you want to use a different region add `-e region=` to the above command. Example: `-e region=br-sao` 35 | 36 | ## Destroy 37 | 38 | 1. `ansible-playbook destroy.yml` 39 | - Note: The client-to-site VPN server (Beta), VPC and subnetwork, and Certificate Manager will not be deleted by default - adjust settings in [ansible settings](group_vars/all.yml) 40 | if you want them deleted. 41 | -------------------------------------------------------------------------------- /ibmcloud-vpnserver/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | fact_caching = jsonfile 3 | fact_caching_connection = .cache 4 | 5 | [ssh_connection] 6 | pipelining = True -------------------------------------------------------------------------------- /ibmcloud-vpnserver/create.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Preparing a client-to-site VPN server (Beta) on IBM Cloud 3 | hosts: localhost 4 | connection: local 5 | roles: 6 | - ibmcloud-vpc 7 | - ibmcloud-certificates 8 | - ibmcloud-vpn-restapi 9 | environment: 10 | - IC_REGION: "{{ region }}" -------------------------------------------------------------------------------- /ibmcloud-vpnserver/destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Destroying a client-to-site VPN server (Beta) on IBM Cloud 3 | hosts: localhost 4 | connection: local 5 | vars: 6 | - mode: "destroy" 7 | roles: 8 | - ibmcloud-vpn-restapi 9 | - ibmcloud-vpc 10 | - ibmcloud-certificates 11 | environment: 12 | - IC_REGION: "{{ region }}" -------------------------------------------------------------------------------- /ibmcloud-vpnserver/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | # Region to deploy 2 | region: "jp-tok" 3 | # region: "br-sao" 4 | # region: "eu-gb" 5 | 6 | certificate_service_name: "CertificateManager-vpnserver" 7 | 8 | # The region in which to deploy the certificate manager. Note that the free plan service 9 | # is not available in all regions. 10 | certificate_service_region: "us-south" 11 | 12 | # The certificate authority you wish to use for your VPN server and client. 13 | ca: 14 | x509_certificate_file: "{{region}}.ca.pem" 15 | x509_key_file: "{{region}}.ca.key" 16 | passphrase: "{{lookup('password', 'ibmcloud.ca.password chars=ascii_letters,hexdigits length=12')}}" 17 | common_name: "VPN Server CA for {{region}}" 18 | 19 | # The server certificate you wish to use for your VPN server. 20 | vpnserver: 21 | x509_certificate_file: "{{region}}.vpnserver.pem" 22 | x509_key_file: "{{region}}.vpnserver.key" 23 | common_name: "VPN Server for {{region}}" 24 | 25 | # The client certificate you wish to use for your VPN client. 26 | vpnclient: 27 | x509_certificate_file: "{{region}}.vpnclient.pem" 28 | x509_key_file: "{{region}}.vpnclient.key" 29 | common_name: "VPN Client for {{region}}" 30 | 31 | 32 | # Name for the VPC 33 | vpc_name: vpn-ansible 34 | 35 | # components for the VPC created objects (VPC, subnetworks, security group, VPN server) 36 | vpn-ansible: 37 | subnets: 38 | - name: "{{ vpc_name }}-subnet-1" 39 | zone: "{{region}}-1" 40 | security_group: 41 | tcp: 42 | - 443 43 | - 80 44 | - 22 45 | vpn: 46 | name: "{{ vpc_name }}-vpnserver" 47 | zone: "{{region}}-1" 48 | client_ip_pool: "10.2.0.0/16" 49 | 50 | # Name for the resulting OpenVPN client configuration file. 51 | ovpn_config_file: "{{region}}.ovpn" 52 | 53 | # If destroy_vpn is enabled then the destroy playbook will delete the client-to-site VPN server (Beta) 54 | # destroy_vpn: True 55 | destroy_vpn: False 56 | 57 | # If destroy_vpc is enabled then the destroy playbook will delete the subnets and the vpc. 58 | # Note that destroy_vpn must also be set to True for the VPC to be successfully deleted. 59 | # destroy_vpc: True 60 | destroy_vpc: False 61 | 62 | # If destroy_certmgr is enabled then the destroy playbook will delete the Certificate Manager resource 63 | # destroy_certmgr: True 64 | destroy_certmgr: False -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-certificates/defaults/main.yml: -------------------------------------------------------------------------------- 1 | certificate_service_name: "CertificateManager-vpnserver" 2 | certificate_service_region: "{{region}}" 3 | ca: 4 | x509_certificate_file: "ibmcloud.ca.pem" 5 | x509_key_file: "ibmcloud.ca.key" 6 | passphrase: "{{lookup('password', 'ibmcloud.ca.password chars=ascii_letters,hexdigits length=12')}}" 7 | common_name: "VPN Server CA" 8 | vpnserver: 9 | x509_certificate_file: "ibmcloud.vpnserver.pem" 10 | x509_key_file: "ibmcloud.vpnserver.key" 11 | common_name: "VPN Server" 12 | vpnclient: 13 | x509_certificate_file: "ibmcloud.vpnclient.pem" 14 | x509_key_file: "ibmcloud.vpnclient.key" 15 | common_name: "VPN Client" 16 | 17 | destroy_certmgr: false 18 | 19 | mode: "create" -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-certificates/meta/requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ibm.cloudcollection 3 | version: 1.49.0 4 | - name: community.crypto 5 | version: 2.15.1 6 | -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-certificates/tasks/create.yml: -------------------------------------------------------------------------------- 1 | 2 | - when: (cms is undefined) or (cms.keys() | length < 1) 3 | block: 4 | - name: provision certificate manager resource instance 5 | ibm.cloudcollection.ibm_resource_instance: 6 | name: "{{ certificate_service_name }}" 7 | location: "{{ certificate_service_region }}" 8 | service: "cloudcerts" 9 | plan: "free" 10 | register: resource_instance_output 11 | 12 | - name: Save certificate manager resource instance as fact 13 | set_fact: 14 | cacheable: True 15 | cms: "{{ resource_instance_output.resource }}" 16 | when: resource_instance_output.rc==0 17 | 18 | - name: Retrieve list of certificates already deployed 19 | ibm.cloudcollection.ibm_certificate_manager_certificates_info: 20 | certificate_manager_instance_id: "{{cms.id}}" 21 | region: "{{ certificate_service_region }}" 22 | register: cms_certificates_info 23 | 24 | - debug: 25 | var: cms_certificates_info 26 | 27 | - name: Save certificate manager resource instance as fact 28 | set_fact: 29 | cacheable: True 30 | cert_dict: "{{ cms_certificates_info.resource.certificates | 31 | items2dict(key_name='name', value_name='cert_id') }}" 32 | when: cms_certificates_info.rc==0 33 | 34 | - debug: 35 | var: cert_dict 36 | 37 | - when: ca.common_name not in cert_dict.keys() 38 | block: 39 | - name: test for private key file 40 | stat: 41 | path: "{{ ca.x509_key_file }}" 42 | register: ca_x509_key_test 43 | - name: Create private key with password protection 44 | community.crypto.openssl_privatekey: 45 | path: "{{ ca.x509_key_file }}" 46 | passphrase: "{{ ca.passphrase }}" 47 | cipher: auto 48 | when: not ca_x509_key_test.stat.exists 49 | - name: test for certificate file 50 | stat: 51 | path: "{{ ca.x509_certificate_file }}" 52 | register: ca_x509_cert_test 53 | - name: Create certificate signing request (CSR) for CA certificate 54 | community.crypto.openssl_csr_pipe: 55 | privatekey_path: "{{ ca.x509_key_file }}" 56 | privatekey_passphrase: "{{ ca.passphrase }}" 57 | common_name: "{{ ca.common_name }}" 58 | use_common_name_for_san: false 59 | basic_constraints: 60 | - 'CA:TRUE' 61 | basic_constraints_critical: yes 62 | key_usage: 63 | - keyCertSign 64 | key_usage_critical: true 65 | register: ca_csr 66 | when: not ca_x509_cert_test.stat.exists 67 | 68 | - name: Create self-signed CA certificate from CSR 69 | community.crypto.x509_certificate: 70 | path: "{{ ca.x509_certificate_file }}" 71 | csr_content: "{{ ca_csr.csr }}" 72 | privatekey_path: "{{ ca.x509_key_file }}" 73 | privatekey_passphrase: "{{ ca.passphrase }}" 74 | provider: selfsigned 75 | register: ca_certificate_output 76 | when: not ca_x509_cert_test.stat.exists 77 | 78 | - name: import our CA to {{ certificate_service_name }} 79 | ibm.cloudcollection.ibm_certificate_manager_import: 80 | certificate_manager_instance_id: "{{ cms.id }}" 81 | name: "{{ ca.common_name }}" 82 | region: "{{ certificate_service_region }}" 83 | data: 84 | content: "{{ lookup('file','{{ ca.x509_certificate_file }}') }}" 85 | 86 | - when: vpnserver.common_name not in cert_dict.keys() 87 | block: 88 | - name: test for private key file 89 | stat: 90 | path: "{{ vpnserver.x509_key_file }}" 91 | register: vpnserver_x509_key_test 92 | - name: Create private key for new VPN Server certificate 93 | community.crypto.openssl_privatekey: 94 | path: "{{ vpnserver.x509_key_file }}" 95 | when: not vpnserver_x509_key_test.stat.exists 96 | - name: test for cert file 97 | stat: 98 | path: "{{vpnserver.x509_certificate_file}}" 99 | register: vpnserver_cert_test 100 | - name: Create certificate signing request (CSR) for new VPN server certificate 101 | community.crypto.openssl_csr_pipe: 102 | privatekey_path: "{{ vpnserver.x509_key_file }}" 103 | common_name: "{{ vpnserver.common_name }}" 104 | key_usage: 105 | - digitalSignature 106 | - Key Encipherment 107 | extended_key_usage: 108 | - TLS Web Server Authentication 109 | register: csr 110 | when: not vpnserver_cert_test.stat.exists 111 | - name: Sign certificate with our CA 112 | community.crypto.x509_certificate: 113 | csr_content: "{{ csr.csr }}" 114 | provider: ownca 115 | path: "{{vpnserver.x509_certificate_file}}" 116 | ownca_path: "{{ ca.x509_certificate_file }}" 117 | ownca_privatekey_path: "{{ ca.x509_key_file }}" 118 | ownca_privatekey_passphrase: "{{ ca.passphrase }}" 119 | ownca_not_after: +365d # valid for one year 120 | ownca_not_before: "-1d" # valid since yesterday 121 | when: not vpnserver_cert_test.stat.exists 122 | - name: import our VPN server certificate to {{ certificate_service_name }} 123 | ibm.cloudcollection.ibm_certificate_manager_import: 124 | certificate_manager_instance_id: "{{ cms.id }}" 125 | name: "{{ vpnserver.common_name }}" 126 | region: "{{ certificate_service_region }}" 127 | data: 128 | content: "{{ lookup('file','{{ vpnserver.x509_certificate_file }}') }}" 129 | intermediate: "{{ lookup('file','{{ ca.x509_certificate_file }}') }}" 130 | priv_key: "{{ lookup('file','{{ vpnserver.x509_key_file }}') }}" 131 | - when: vpnclient.common_name not in cert_dict.keys() 132 | block: 133 | - name: test for private key file 134 | stat: 135 | path: "{{ vpnclient.x509_key_file }}" 136 | register: vpnclient_key_test 137 | - name: Create private key for client certificate 138 | community.crypto.openssl_privatekey: 139 | path: "{{ vpnclient.x509_key_file }}" 140 | when: not vpnclient_key_test.stat.exists 141 | - name: test for cert file 142 | stat: 143 | path: "{{ vpnclient.x509_certificate_file }}" 144 | register: vpnclient_cert_test 145 | - name: Create certificate signing request (CSR) for new client certificate 146 | community.crypto.openssl_csr_pipe: 147 | privatekey_path: "{{ vpnclient.x509_key_file }}" 148 | common_name: "{{vpnclient.common_name}}" 149 | key_usage: 150 | - digitalSignature 151 | extended_key_usage: 152 | - TLS Web Client Authentication 153 | register: csr 154 | when: not vpnclient_cert_test.stat.exists 155 | - name: Sign client certificate with our CA 156 | community.crypto.x509_certificate: 157 | csr_content: "{{ csr.csr }}" 158 | provider: ownca 159 | path: "{{ vpnclient.x509_certificate_file}}" 160 | ownca_path: "{{ ca.x509_certificate_file }}" 161 | ownca_privatekey_path: "{{ ca.x509_key_file }}" 162 | ownca_privatekey_passphrase: "{{ ca.passphrase }}" 163 | ownca_not_after: +365d # valid for one year 164 | ownca_not_before: "-1d" # valid since yesterday 165 | when: not vpnclient_cert_test.stat.exists 166 | - name: import our VPN client certificate to {{ certificate_service_name }} 167 | ibm.cloudcollection.ibm_certificate_manager_import: 168 | certificate_manager_instance_id: "{{ cms.id }}" 169 | name: "{{ vpnclient.common_name }}" 170 | region: "{{ certificate_service_region }}" 171 | data: 172 | content: "{{ lookup('file',vpnclient.x509_certificate_file) }}" 173 | intermediate: "{{ lookup('file','{{ ca.x509_certificate_file }}') }}" 174 | -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-certificates/tasks/destroy.yml: -------------------------------------------------------------------------------- 1 | 2 | - when: (cms is defined) and (cms.keys() | length > 0) and (destroy_certmgr is true) 3 | block: 4 | - name: destroy certificate manager resource instance 5 | ibm.cloudcollection.ibm_resource_instance: 6 | name: "{{ certificate_service_name }}" 7 | location: "{{ certificate_service_region }}" 8 | service: "cloudcerts" 9 | plan: "free" 10 | state: "absent" 11 | - set_fact: 12 | cacheable: True 13 | cert_dict: {} 14 | - name: Clear certificate manager fact 15 | set_fact: 16 | cacheable: True 17 | cms: {} -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-certificates/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: create.yml 2 | when: mode == "create" 3 | 4 | - include: destroy.yml 5 | when: mode == "destroy" and destroy_certmgr is true -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpc/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # Name for the VPC 2 | vpc_name: zlinux-ansible 3 | 4 | # Name for the components for the VPC created objects (VPC, subnetwork, VSI) 5 | zlinux-ansible: 6 | subnets: 7 | - name: "{{ vpc_name }}-subnet-1" 8 | zone: "{{region}}-1" 9 | security_group: 10 | tcp: 11 | - 443 12 | - 80 13 | - 22 14 | 15 | # If destroy_vpc is enabled then the destroy playbook will delete the subnets and the vpc 16 | destroy_vpc: False 17 | 18 | # Size of the created subnetwork 19 | total_ipv4_address_count: 256 20 | 21 | mode: "create" -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpc/meta/requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ibm.cloudcollection 3 | version: 1.49.0 4 | -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpc/tasks/create.yml: -------------------------------------------------------------------------------- 1 | 2 | - when: (vpc is undefined) or (vpc.keys() | length < 1) 3 | block: 4 | - name: Create VPC 5 | ibm.cloudcollection.ibm_is_vpc: 6 | name: "{{ vpc_name }}" 7 | state: available 8 | id: "{{ vpc.id | default(omit) }}" 9 | register: vpc_create_output 10 | 11 | - name: Create security group rules 12 | ibm.cloudcollection.ibm_is_security_group_rule: 13 | state: available 14 | group: "{{ vpc_create_output.resource.default_security_group }}" 15 | direction: inbound 16 | remote: 0.0.0.0/0 17 | tcp: 18 | - port_max: "{{ item }}" 19 | port_min: "{{ item }}" 20 | loop: "{{ lookup('vars',vpc_name).security_group.tcp }}" 21 | 22 | - name: Save VPC as fact 23 | set_fact: 24 | cacheable: True 25 | vpc: "{{ vpc_create_output.resource }}" 26 | 27 | - set_fact: 28 | update_subnet: true 29 | when: (subnet_dict is undefined) or (subnet_dict.keys() | length < 1) 30 | 31 | - when: update_subnet is defined 32 | name: Create VPC Subnets 33 | ibm.cloudcollection.ibm_is_subnet: 34 | name: "{{ item.name }}" 35 | state: available 36 | vpc: "{{ vpc.id }}" 37 | total_ipv4_address_count: "{{ total_ipv4_address_count }}" 38 | zone: "{{ item.zone }}" 39 | register: subnet_create_output 40 | loop: "{{ lookup('vars',vpc_name).subnets }}" 41 | 42 | - name: Save VPC subnets as fact 43 | set_fact: 44 | cacheable: True 45 | subnet_dict: "{{ subnet_dict | default({}) | combine ({item.resource.zone: item.resource.id }) }}" 46 | with_items: "{{ subnet_create_output.results }}" 47 | when: update_subnet is defined -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpc/tasks/destroy.yml: -------------------------------------------------------------------------------- 1 | - name: Remove VPC Subnets 2 | when: (subnet_dict is defined) and (subnet_dict.keys() | length > 0) and (destroy_vpc is true) 3 | block: 4 | - ibm.cloudcollection.ibm_is_subnet: 5 | state: absent 6 | id: "{{ item.value }}" 7 | loop: "{{ subnet_dict|dict2items }}" 8 | 9 | - set_fact: 10 | cacheable: True 11 | subnet_dict: {} 12 | 13 | - name: Remove VPC 14 | when: (vpc is defined) and (vpc.keys() | length > 0) and (destroy_vpc is true) 15 | block: 16 | - ibm.cloudcollection.ibm_is_vpc: 17 | state: absent 18 | id: "{{ vpc.id }}" 19 | 20 | - name: Clear VPC fact 21 | set_fact: 22 | cacheable: True 23 | vpc: {} -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpc/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: create.yml 2 | when: mode == "create" 3 | 4 | - include: destroy.yml 5 | when: mode == "destroy" and destroy_vpc is true -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpn-restapi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | ibmcloud_iam_api_endpoint: "https://iam.cloud.ibm.com" 2 | ibmcloud_api_version: "2022-05-25" 3 | destroy_vpn: False 4 | ovpn_config_file: "ibmcloud.ovpn" 5 | 6 | certificate_service_region: "{{region}}" 7 | -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpn-restapi/meta/requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ibm.cloudcollection 3 | version: 1.49.0 4 | -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpn-restapi/tasks/create.yml: -------------------------------------------------------------------------------- 1 | 2 | - when: (vpn_server is undefined) or (vpn_server.keys() | length < 1) 3 | block: 4 | - name: Retrieve list of certificates already deployed 5 | ibm.cloudcollection.ibm_certificate_manager_certificates_info: 6 | certificate_manager_instance_id: "{{cms.id}}" 7 | region: "{{ certificate_service_region }}" 8 | register: cms_certificates_info 9 | 10 | - name: Save certificate manager resource instance as fact 11 | set_fact: 12 | cacheable: True 13 | cert_dict: "{{ cms_certificates_info.resource.certificates | 14 | items2dict(key_name='name', value_name='cert_id') }}" 15 | when: cms_certificates_info.rc==0 16 | - name: "Retrieve IBM Cloud vpc address prefixes" 17 | ibm.cloudcollection.ibm_is_vpc_address_prefixes_info: 18 | vpc: "{{vpc.id}}" 19 | register: get_addresses_output 20 | - debug: 21 | var: get_addresses_output 22 | - name: create vpn gateway 23 | uri: 24 | method: POST 25 | url: "{{ ansible_env.IBMCLOUD_IS_NG_API_ENDPOINT | default('https://'+region+'.iaas.cloud.ibm.com') }}/v1/vpn_servers?version={{ibmcloud_api_version}}&generation=2&maturity=beta" 26 | headers: 27 | Authorization: "Bearer {{ IAM_TOKEN }} " 28 | Content-Type: "application/json" 29 | accept: "application/json" 30 | body_format: json 31 | body: 32 | certificate: 33 | crn: "{{cert_dict[vpnserver.common_name]}}" 34 | client_authentication: 35 | - method: "certificate" 36 | client_ca: 37 | crn: "{{cert_dict[vpnclient.common_name]}}" 38 | client_idle_timeout: 600 39 | client_ip_pool: "{{ lookup('vars',vpc_name).vpn.client_ip_pool }}" 40 | enable_split_tunneling: true 41 | name: "{{ lookup('vars',vpc_name).vpn.name }}" 42 | port: 443 43 | protocol: "tcp" 44 | resource_group: 45 | id: "{{ vpc.resource_group}}" 46 | subnets: 47 | - id: "{{ subnet_dict[lookup('vars',vpc_name).vpn.zone] }}" 48 | status_code: 49 | - 200 50 | - 201 51 | register: vpn_server_create_response 52 | - name: Retrieve vpn gateway information 53 | uri: 54 | method: GET 55 | url: "{{ ansible_env.IBMCLOUD_IS_NG_API_ENDPOINT | default('https://'+region+'.iaas.cloud.ibm.com') }}/v1/vpn_servers/{{vpn_server_create_response.json.id }}?version={{ibmcloud_api_version}}&generation=2&maturity=beta" 56 | headers: 57 | Authorization: "Bearer {{ IAM_TOKEN }} " 58 | Content-Type: "application/json" 59 | accept: "application/json" 60 | register: vpn_server_output 61 | - name: Set address routes for the VPC into the VPN server 62 | uri: 63 | method: POST 64 | url: "{{ ansible_env.IBMCLOUD_IS_NG_API_ENDPOINT | default('https://'+region+'.iaas.cloud.ibm.com') }}/v1/vpn_servers/{{vpn_server_create_response.json.id }}/routes?version={{ibmcloud_api_version}}&generation=2&maturity=beta" 65 | headers: 66 | Authorization: "Bearer {{ IAM_TOKEN }} " 67 | Content-Type: "application/json" 68 | accept: "application/json" 69 | body_format: json 70 | body: 71 | name: "{{item.name}}" 72 | destination: "{{item.cidr}}" 73 | action: "translate" 74 | status_code: 75 | - 200 76 | - 201 77 | loop: "{{get_addresses_output.resource.address_prefixes}}" 78 | - name: Set vpn gateway information as fact 79 | set_fact: 80 | cacheable: True 81 | vpn_server: "{{vpn_server_output.json}}" 82 | 83 | - name: "Create the {{ovpn_config_file}} OpenVPN profile" 84 | template: 85 | src: ovpn.j2 86 | dest: "{{ovpn_config_file}}" -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpn-restapi/tasks/destroy.yml: -------------------------------------------------------------------------------- 1 | - when: (vpn_server is defined) and (vpn_server.keys() | length > 0) and (destroy_vpn is true) 2 | block: 3 | - name: delete vpn gateway 4 | uri: 5 | method: DELETE 6 | url: "{{ ansible_env.IBMCLOUD_IS_NG_API_ENDPOINT | default('https://'+region+'.iaas.cloud.ibm.com') }}/v1/vpn_servers/{{vpn_server.id}}?version={{ibmcloud_api_version}}&generation=2&maturity=beta" 7 | headers: 8 | Authorization: "Bearer {{ IAM_TOKEN }} " 9 | Content-Type: "application/json" 10 | accept: "application/json" 11 | status_code: 12 | - 200 13 | - 202 14 | - name: Clear vpn fact 15 | set_fact: 16 | cacheable: True 17 | vpn_server: {} -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpn-restapi/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: "Get an IAM token" 2 | uri: 3 | method: POST 4 | url: "{{ibmcloud_iam_api_endpoint}}/identity/token" 5 | body_format: json 6 | body: "grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey={{ ansible_env.IC_API_KEY }}" 7 | headers: 8 | Content-Type: "application/x-www-form-urlencoded" 9 | register: iam_resp 10 | 11 | - name: "Set IAM_TOKEN" 12 | set_fact: 13 | IAM_TOKEN: "{{ iam_resp['json']['access_token'] }}" 14 | 15 | - include: create.yml 16 | when: mode == "create" 17 | 18 | - include: destroy.yml 19 | when: mode == "destroy" and destroy_vpn is true -------------------------------------------------------------------------------- /ibmcloud-vpnserver/roles/ibmcloud-vpn-restapi/templates/ovpn.j2: -------------------------------------------------------------------------------- 1 | client 2 | dev tun 3 | proto {{vpn_server.protocol}} 4 | port {{vpn_server.port}} 5 | remote {{vpn_server.hostname}} 6 | resolv-retry infinite 7 | remote-cert-tls server 8 | nobind 9 | 10 | auth SHA256 11 | cipher AES-256-GCM 12 | verb 3 13 | 14 | 15 | {{lookup('file', ca.x509_certificate_file) }} 16 | 17 | 18 | 19 | {{lookup('file', vpnclient.x509_certificate_file) }} 20 | 21 | 22 | 23 | {{lookup('file', vpnclient.x509_key_file) }} 24 | 25 | 26 | reneg-sec 0 27 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /terraform-hpvs/README.md: -------------------------------------------------------------------------------- 1 | ## IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC Examples 2 | 3 | ## Preparation 4 | 5 | 1. Make sure to have the [OpenSSL](https://www.openssl.org/) binary installed (see [details](#openssl)) 6 | 2. Install the [terraform CLI](https://learn.hashicorp.com/tutorials/terraform/install-cli) for your environment 7 | 3. Follow the description for the [IBM Cloud Provider](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs#authentication) to get your API key 8 | 4. Follow the description for [HPCR](https://cloud.ibm.com/docs/vpc?topic=vpc-logging-for-hyper-protect-virtual-servers-for-vpc) to setup a logging instance. 9 | 5. Set either environment variables or fill the template file according to the example README 10 | 11 | ## Examples 12 | 13 | Follow the README in the subdirectory of the examples for further instructions: 14 | 15 | - [hello-world](hello-world/README.md) - a minimal hello world example 16 | - [nginx](nginx-hello/README.md) - a minimal hello world example using nginx 17 | 18 | 19 | ## OpenSSL 20 | 21 | The [terraform provider](https://registry.terraform.io/providers/ibm-hyper-protect/hpcr/) leverages the [OpenSSL](https://www.openssl.org/) binary for all cryptographic operations in favour of the [golang crypto](https://pkg.go.dev/crypto) packages. This is because the golang libraries are not [FIPS](https://en.wikipedia.org/wiki/FIPS_140-2) certified whereas there exist certified OpenSSL binaries a customer can select. 22 | 23 | Make sure to have the binaries installed for your platform. **Note:** on some platforms the default binary is actually a [LibreSSL](https://www.libressl.org/) installation, which is **not** compatible. 24 | 25 | Verify your installation via running: 26 | 27 | ```bash 28 | openssl version 29 | ``` 30 | 31 | this should give an output similar to the following: 32 | 33 | ```text 34 | OpenSSL 1.1.1q 5 Jul 2022 35 | ``` 36 | 37 | In case you cannot change the OpenSSL binary in the path, you may override the version used by the [terraform provider](https://registry.terraform.io/providers/ibm-hyper-protect/hpcr/) by setting the `OPENSSL_BIN` environment variable to the absolute path of the correct binary, e.g. 38 | 39 | ```cmd 40 | OPENSSL_BIN="C:\Program Files\OpenSSL-Win64\bin\openssl.exe" 41 | ``` 42 | -------------------------------------------------------------------------------- /terraform-hpvs/attestation/README.md: -------------------------------------------------------------------------------- 1 | ## Attestation Sample 2 | 3 | This sample deploys the [attestation](https://hub.docker.com/_/attestation) example as a [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se). 4 | 5 | ### Prerequisite 6 | 7 | Prepare your environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 8 | 9 | ### Settings 10 | 11 | Use one of the following options to set you settings: 12 | 13 | #### Template file 14 | 15 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 16 | 2. Fill the values in `my-settings.auto.tfvars` 17 | 18 | #### Environment variables 19 | 20 | Set the following environment variables: 21 | 22 | ```text 23 | IC_API_KEY= 24 | TF_VAR_zone= 25 | TF_VAR_region= 26 | TF_VAR_icl_iam_apikey= 27 | TF_VAR_icl_hostname= 28 | ``` 29 | 30 | ### Run the Example 31 | 32 | Initialize terraform: 33 | 34 | ```bash 35 | terraform init 36 | ``` 37 | 38 | Deploy the example: 39 | 40 | ```bash 41 | terraform apply 42 | ``` 43 | 44 | This will create a sample virtual server instance. Monitor your ICL instance for logs. 45 | 46 | Destroy the created resources: 47 | 48 | ```bash 49 | terraform destroy 50 | ``` 51 | -------------------------------------------------------------------------------- /terraform-hpvs/attestation/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.0' 2 | services: 3 | busybox: 4 | image: registry.access.redhat.com/ubi8/ubi@sha256:e721f98a49e731f0bd64f8e89c229e1dbb38c46265d92849b3e0bedaf5f81139 5 | command: | 6 | curl -v -X "PUT" "${S3_URL}" -H "Authorization: ${AUTHORIZATION}" -H "Content-Type: text/plain" -d @/var/hyperprotect/se-checksums.txt.enc 7 | volumes: 8 | - /var/hyperprotect/:/var/hyperprotect/:ro 9 | environment: 10 | - AUTHORIZATION 11 | - S3_URL 12 | -------------------------------------------------------------------------------- /terraform-hpvs/attestation/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | ibmcloud_api_key="Your IBM Cloud API key" 2 | region="ca-tor" # Region to deploy to. Options include eu-gb, jp-tok, br-sao, ca-tor, us-east 3 | zone="2" # Zone within region to create the HPVS into. 4 | icl_hostname="Your ICL hostname" 5 | icl_iam_apikey="Your IBM Cloud API key" 6 | -------------------------------------------------------------------------------- /terraform-hpvs/attestation/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.6" 6 | } 7 | 8 | tls = { 9 | source = "hashicorp/tls" 10 | version = ">= 4.0.1" 11 | } 12 | 13 | random = { 14 | source = "hashicorp/random" 15 | version = ">= 3.4.3" 16 | } 17 | 18 | time = { 19 | source = "hashicorp/time" 20 | version = ">= 0.9.1" 21 | } 22 | 23 | ibm = { 24 | source = "IBM-Cloud/ibm" 25 | version = ">= 1.37.1" 26 | } 27 | } 28 | } 29 | 30 | # make sure to target the correct region and zone 31 | provider "ibm" { 32 | region = var.region 33 | zone = "${var.region}-${var.zone}" 34 | } 35 | 36 | resource "random_uuid" "attestation_uuid" { 37 | } 38 | 39 | locals { 40 | # some reusable tags that identify the resources created by his sample 41 | tags = ["hpcr", "sample", var.prefix] 42 | } 43 | 44 | # the VPC 45 | resource "ibm_is_vpc" "attestation_vpc" { 46 | name = format("%s-vpc", var.prefix) 47 | tags = local.tags 48 | } 49 | 50 | # locate the COS instance 51 | data "ibm_resource_instance" "attestation_cos_instance" { 52 | name = "secure-execution" 53 | location = "global" 54 | service = "cloud-object-storage" 55 | } 56 | 57 | # create a bucket we use to upload the attestation record 58 | resource "ibm_cos_bucket" "attestation_cos_bucket" { 59 | resource_instance_id = data.ibm_resource_instance.attestation_cos_instance.id 60 | bucket_name = format("%s-bucket", var.prefix) 61 | region_location = var.region 62 | storage_class = "standard" 63 | } 64 | 65 | # get the authentication token we use to upload the attestation record 66 | data "ibm_iam_auth_token" "attestation_auth_token" { 67 | } 68 | 69 | # the security group 70 | resource "ibm_is_security_group" "attestation_security_group" { 71 | name = format("%s-security-group", var.prefix) 72 | vpc = ibm_is_vpc.attestation_vpc.id 73 | tags = local.tags 74 | } 75 | 76 | # rule that allows the VSI to make outbound connections, this is required 77 | # to connect to the logDNA instance as well as to docker to pull the image 78 | resource "ibm_is_security_group_rule" "attestation_outbound" { 79 | group = ibm_is_security_group.attestation_security_group.id 80 | direction = "outbound" 81 | remote = "0.0.0.0/0" 82 | } 83 | 84 | # the subnet 85 | resource "ibm_is_subnet" "attestation_subnet" { 86 | name = format("%s-subnet", var.prefix) 87 | vpc = ibm_is_vpc.attestation_vpc.id 88 | total_ipv4_address_count = 256 89 | zone = "${var.region}-${var.zone}" 90 | tags = local.tags 91 | } 92 | 93 | # we use a gateway to allow the VSI to connect to the internet to logDNA 94 | # and docker. Without a gateway the VSI would need a floating IP. Without 95 | # either the VSI will not be able to connect to the internet despite 96 | # an outbound rule 97 | resource "ibm_is_public_gateway" "attestation_gateway" { 98 | name = format("%s-gateway", var.prefix) 99 | vpc = ibm_is_vpc.attestation_vpc.id 100 | zone = "${var.region}-${var.zone}" 101 | tags = local.tags 102 | } 103 | 104 | # attach the gateway to the subnet 105 | resource "ibm_is_subnet_public_gateway_attachment" "attestation_gateway_attachment" { 106 | subnet = ibm_is_subnet.attestation_subnet.id 107 | public_gateway = ibm_is_public_gateway.attestation_gateway.id 108 | } 109 | 110 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 111 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 112 | resource "hpcr_tgz" "contract" { 113 | folder = "compose" 114 | } 115 | 116 | locals { 117 | # URL to the attestation object 118 | attestationKey = format("%s.enc", random_uuid.attestation_uuid.result) 119 | attestationURL = format("https://%s/%s/%s", ibm_cos_bucket.attestation_cos_bucket.s3_endpoint_public, urlencode(ibm_cos_bucket.attestation_cos_bucket.bucket_name), urlencode(local.attestationKey)) 120 | # contract in clear text 121 | contract = yamlencode({ 122 | "env" : { 123 | "type" : "env", 124 | "logging" : { 125 | "logRouter" : { 126 | "hostname" : var.icl_hostname, 127 | "iamApiKey" : var.icl_iam_apikey, 128 | } 129 | }, 130 | "env" : { 131 | "AUTHORIZATION" : data.ibm_iam_auth_token.attestation_auth_token.iam_access_token, 132 | "S3_URL" : local.attestationURL 133 | } 134 | }, 135 | "workload" : { 136 | "type" : "workload", 137 | "compose" : { 138 | "archive" : hpcr_tgz.contract.rendered 139 | } 140 | }, 141 | "attestationPublicKey" : tls_private_key.attestation_enc_rsa_key.public_key_pem 142 | }) 143 | } 144 | 145 | # create a key pair for the purpose of encrypting the attestation record 146 | resource "tls_private_key" "attestation_enc_rsa_key" { 147 | algorithm = "RSA" 148 | rsa_bits = 4096 149 | } 150 | 151 | # create a random key pair, because for formal reasons we need to pass an SSH key into the VSI. It will not be used, that's why 152 | # it can be random 153 | resource "tls_private_key" "attestation_rsa_key" { 154 | algorithm = "RSA" 155 | rsa_bits = 4096 156 | } 157 | 158 | # we only need this because VPC expects this 159 | resource "ibm_is_ssh_key" "attestation_sshkey" { 160 | name = format("%s-key", var.prefix) 161 | public_key = tls_private_key.attestation_rsa_key.public_key_openssh 162 | tags = local.tags 163 | } 164 | 165 | # locate all public image 166 | data "ibm_is_images" "hyper_protect_images" { 167 | visibility = "public" 168 | status = "available" 169 | } 170 | 171 | # locate the latest hyper protect image 172 | data "hpcr_image" "hyper_protect_image" { 173 | images = jsonencode(data.ibm_is_images.hyper_protect_images.images) 174 | } 175 | 176 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 177 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 178 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 179 | resource "hpcr_contract_encrypted" "contract" { 180 | contract = local.contract 181 | } 182 | 183 | # construct the VSI 184 | resource "ibm_is_instance" "attestation_vsi" { 185 | name = format("%s-vsi", var.prefix) 186 | image = data.hpcr_image.hyper_protect_image.image 187 | profile = var.profile 188 | keys = [ibm_is_ssh_key.attestation_sshkey.id] 189 | vpc = ibm_is_vpc.attestation_vpc.id 190 | tags = local.tags 191 | zone = "${var.region}-${var.zone}" 192 | 193 | # the user data field carries the encrypted contract, so all information visible at the hypervisor layer is encrypted 194 | user_data = hpcr_contract_encrypted.contract.rendered 195 | 196 | primary_network_interface { 197 | name = "eth0" 198 | subnet = ibm_is_subnet.attestation_subnet.id 199 | security_groups = [ibm_is_security_group.attestation_security_group.id] 200 | } 201 | 202 | } 203 | 204 | 205 | # huhh, this is not nice 206 | resource "time_sleep" "wait_for_attestation" { 207 | depends_on = [ 208 | ibm_is_instance.attestation_vsi 209 | ] 210 | 211 | create_duration = "45s" 212 | } 213 | 214 | data "ibm_cos_bucket_object" "attestation_record" { 215 | bucket_crn = ibm_cos_bucket.attestation_cos_bucket.crn 216 | bucket_location = ibm_cos_bucket.attestation_cos_bucket.region_location 217 | key = local.attestationKey 218 | endpoint_type = "public" 219 | depends_on = [ 220 | time_sleep.wait_for_attestation 221 | ] 222 | } 223 | 224 | data "hpcr_attestation" "attestation_decoded" { 225 | attestation = data.ibm_cos_bucket_object.attestation_record.body 226 | privkey = tls_private_key.attestation_enc_rsa_key.private_key_pem 227 | } 228 | 229 | resource "local_file" "contract" { 230 | content = hpcr_contract_encrypted.contract.rendered 231 | filename = "${path.module}/build/contract.yml" 232 | } 233 | 234 | resource "local_file" "attestation_pub_key" { 235 | content = tls_private_key.attestation_enc_rsa_key.public_key_pem 236 | filename = "${path.module}/build/attestation.pub" 237 | } 238 | 239 | resource "local_file" "attestation_priv_key" { 240 | content = tls_private_key.attestation_enc_rsa_key.private_key_pem_pkcs8 241 | filename = "${path.module}/build/attestation" 242 | } 243 | 244 | resource "local_file" "attestation_record" { 245 | content = jsonencode(data.hpcr_attestation.attestation_decoded.checksums) 246 | filename = "${path.module}/build/se-checksums.json" 247 | } -------------------------------------------------------------------------------- /terraform-hpvs/attestation/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = <<-DESC 3 | Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: 4 | https://cloud.ibm.com/iam#/apikeys 5 | DESC 6 | sensitive = true 7 | } 8 | 9 | variable "region" { 10 | type = string 11 | description = "Region to deploy to, e.g. eu-gb" 12 | 13 | validation { 14 | condition = (var.region == "eu-gb" || 15 | var.region == "br-sao" || 16 | var.region == "ca-tor" || 17 | var.region == "jp-tok" || 18 | var.region == "us-east") 19 | error_message = "Value of region must be one of eu-gb/br-sao/ca-tor/jp-tok/us-east." 20 | } 21 | } 22 | 23 | variable "zone" { 24 | type = string 25 | default = "2" 26 | description = "Zone to deploy to, e.g. 2." 27 | 28 | validation { 29 | condition = (var.zone == "1" || 30 | var.zone == "2" || 31 | var.zone == "3") 32 | error_message = "Value of zone must be one of 1/2/3." 33 | } 34 | } 35 | 36 | variable "icl_hostname" { 37 | type = string 38 | description = <<-DESC 39 | Host of IBM Cloud Logs. This can be 40 | obtained from cloud logs tab under Logging instances 41 | DESC 42 | } 43 | 44 | variable "icl_iam_apikey" { 45 | type = string 46 | sensitive = true 47 | description = <<-DESC 48 | This can be obtained from Access(IAM) under Manage 49 | DESC 50 | } 51 | 52 | variable "prefix" { 53 | type = string 54 | default = "hpcr-sample-attestation" 55 | description = "Prefix to be attached to name of all generated resources" 56 | } 57 | 58 | variable "profile" { 59 | type = string 60 | default = "bz2e-1x4" 61 | description = <<-DESC 62 | Profile used for the VSI. This has to be a secure execution 63 | profile in the format Xz2e-YxZ, e.g. bz2e-1x4 64 | DESC 65 | } 66 | -------------------------------------------------------------------------------- /terraform-hpvs/contract-expiry/README.md: -------------------------------------------------------------------------------- 1 | ## Contract generation with contract expiry example 2 | 3 | This sample creates an encrypted and signed contract with expiry enabled and stores it locally in a file. In addition this example identifies 4 | the latest version of HPCR in the VPC cloud and then downloads the matching encryption certifcicate. 5 | 6 | ### Prerequisite 7 | 8 | Prepare your environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 9 | 10 | ### Settings 11 | 12 | #### Prerequisites 13 | 14 | 1. Generate private key using the following commnad: 15 | ```bash 16 | openssl genrsa -out private.pem 4096 17 | ``` 18 | 2. Generate CA private key using the following command: 19 | ```bash 20 | openssl genrsa -out personal_ca.key 2048 21 | ``` 22 | 3. Generate CA certificate using the following command: 23 | ```bash 24 | openssl req -new -x509 -days 365 -key personal_ca.key -out personal_ca.crt 25 | ``` 26 | 27 | Use one of the following options to set your settings: 28 | 29 | #### Template file 30 | 31 | 1. Copy contents of `my-settings.auto.tfvars-template` to `my-settings.auto.tfvars`. 32 | ```bash 33 | cp my-settings.auto.tfvars-template my-settings.auto.tfvars 34 | ``` 35 | 2. Update `my-settings.auto.tfvars` to appropriate values. 36 | 37 | #### Environment variables 38 | 39 | Set the following environment variables: 40 | 41 | ```text 42 | TF_VAR_icl_iam_apikey="" 43 | TF_VAR_icl_hostname="" 44 | 45 | TF_VAR_hpcr_csr_country="" 46 | TF_VAR_hpcr_csr_state="" 47 | TF_VAR_hpcr_csr_location="" 48 | TF_VAR_hpcr_csr_org="" 49 | TF_VAR_hpcr_csr_unit="" 50 | TF_VAR_hpcr_csr_domain="" 51 | TF_VAR_hpcr_csr_mail="" 52 | 53 | TF_VAR_hpcr_private_key_path="" 54 | TF_VAR_hpcr_contract_expiry_days= 55 | TF_VAR_hpcr_ca_privatekey_path="" 56 | TF_VAR_hpcr_cacert_path="" 57 | ``` 58 | 59 | ### Run the Example 60 | 61 | Initialize terraform: 62 | 63 | ```bash 64 | terraform init 65 | ``` 66 | 67 | Deploy the example: 68 | 69 | ```bash 70 | terraform apply 71 | ``` 72 | 73 | The contract will be persisted in the `build/contract.yml` folder for further use. -------------------------------------------------------------------------------- /terraform-hpvs/contract-expiry/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | helloworld: 3 | image: docker.io/library/hello-world@sha256:4f53e2564790c8e7856ec08e384732aa38dc43c52f02952483e3f003afbf23db 4 | -------------------------------------------------------------------------------- /terraform-hpvs/contract-expiry/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | icl_hostname="" 2 | icl_iam_apikey="" 3 | 4 | hpcr_csr_country="" 5 | hpcr_csr_state="" 6 | hpcr_csr_location="" 7 | hpcr_csr_org="" 8 | hpcr_csr_unit="" 9 | hpcr_csr_domain="" 10 | hpcr_csr_mail="" 11 | 12 | hpcr_private_key_path="" 13 | hpcr_contract_expiry_days= 14 | hpcr_ca_privatekey_path="" 15 | hpcr_cacert_path="" 16 | -------------------------------------------------------------------------------- /terraform-hpvs/contract-expiry/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.5.0" 6 | } 7 | } 8 | } 9 | 10 | resource "hpcr_tgz" "contract" { 11 | folder = "compose" 12 | } 13 | 14 | locals { 15 | # contract in clear text 16 | contract = yamlencode({ 17 | "env" : { 18 | "type" : "env", 19 | "logging" : { 20 | "logRouter" : { 21 | "hostname" : var.icl_hostname, 22 | "iamApiKey" : var.icl_iam_apikey, 23 | }, 24 | }, 25 | }, 26 | "workload" : { 27 | "type" : "workload", 28 | "compose" : { 29 | "archive" : hpcr_tgz.contract.rendered 30 | } 31 | } 32 | }) 33 | 34 | csrParams = { 35 | "country": var.hpcr_csr_country, 36 | "state": var.hpcr_csr_state, 37 | "location": var.hpcr_csr_location, 38 | "org": var.hpcr_csr_org, 39 | "unit": var.hpcr_csr_unit, 40 | "domain": var.hpcr_csr_domain, 41 | "mail": var.hpcr_csr_mail 42 | } 43 | } 44 | 45 | resource "hpcr_contract_encrypted_contract_expiry" "contract" { 46 | contract = local.contract 47 | privkey= file(var.hpcr_private_key_path) 48 | expiry = var.hpcr_contract_expiry_days 49 | cakey = file(var.hpcr_ca_privatekey_path) 50 | cacert = file(var.hpcr_cacert_path) 51 | csrparams = local.csrParams 52 | } 53 | 54 | resource "local_file" "contract" { 55 | content = hpcr_contract_encrypted_contract_expiry.contract.rendered 56 | filename = "${path.module}/build/contract.yml" 57 | } -------------------------------------------------------------------------------- /terraform-hpvs/contract-expiry/variables.tf: -------------------------------------------------------------------------------- 1 | variable "hpcr_private_key_path" { 2 | type = string 3 | description = "Path of private key for signature" 4 | } 5 | 6 | variable "hpcr_ca_privatekey_path" { 7 | type = string 8 | description = "Path to CA private key" 9 | } 10 | 11 | variable "hpcr_cacert_path" { 12 | type = string 13 | description = "Path to CA certificate" 14 | } 15 | 16 | variable "hpcr_csr_country" { 17 | type = string 18 | description = "HPCR CSR country" 19 | } 20 | 21 | variable "hpcr_csr_state" { 22 | type = string 23 | description = "HPCR CSR state" 24 | } 25 | 26 | variable "hpcr_csr_location" { 27 | type = string 28 | description = "HPCR CSR location" 29 | } 30 | 31 | variable "hpcr_csr_org" { 32 | type = string 33 | description = "HPCR CSR org" 34 | } 35 | 36 | variable "hpcr_csr_unit" { 37 | type = string 38 | description = "HPCR CSR unit" 39 | } 40 | 41 | variable "hpcr_csr_domain" { 42 | type = string 43 | description = "HPCR CSR domain" 44 | } 45 | 46 | variable "hpcr_csr_mail" { 47 | type = string 48 | description = "HPCR CSR Mail ID" 49 | } 50 | 51 | variable "hpcr_contract_expiry_days" { 52 | type = number 53 | description = "Number of days for contract to expire" 54 | } 55 | 56 | variable "icl_hostname" { 57 | type = string 58 | description = <<-DESC 59 | Host of IBM Cloud Logs. This can be 60 | obtained from cloud logs tab under Logging instances 61 | DESC 62 | } 63 | 64 | variable "icl_iam_apikey" { 65 | type = string 66 | sensitive = true 67 | description = <<-DESC 68 | This can be obtained from Access(IAM) under Manage 69 | DESC 70 | } -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-download-encryption/README.md: -------------------------------------------------------------------------------- 1 | ## Contract generation example 2 | 3 | This sample creates an encrypted and signed contract and stores it locally in a file. In addition this example identifies 4 | the latest version of HPCR in the VPC cloud and then downloads the matching encryption certifcicate. 5 | 6 | ### Prerequisite 7 | 8 | Prepare your environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 9 | 10 | ### Settings 11 | 12 | Use one of the following options to set you settings: 13 | 14 | #### Template file 15 | 16 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 17 | 2. Fill the values in `my-settings.auto.tfvars` 18 | 19 | #### Environment variables 20 | 21 | Set the following environment variables: 22 | 23 | ```text 24 | TF_VAR_icl_iam_apikey= 25 | TF_VAR_icl_hostname= 26 | ``` 27 | 28 | ### Run the Example 29 | 30 | Initialize terraform: 31 | 32 | ```bash 33 | terraform init 34 | ``` 35 | 36 | Deploy the example: 37 | 38 | ```bash 39 | terraform apply 40 | ``` 41 | 42 | The contract will be persisted in the `build/contract.yml` folder for further use. 43 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-download-encryption/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | helloworld: 3 | image: docker.io/library/hello-world@sha256:4f53e2564790c8e7856ec08e384732aa38dc43c52f02952483e3f003afbf23db 4 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-download-encryption/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | icl_hostname="Your ICL hostname" 2 | icl_iam_apikey="Your IBM Cloud API key" 3 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-download-encryption/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.12" 6 | } 7 | ibm = { 8 | source = "IBM-Cloud/ibm" 9 | version = ">= 1.37.1" 10 | } 11 | } 12 | } 13 | 14 | # make sure to target the correct region and zone 15 | provider "ibm" { 16 | region = var.region 17 | zone = "${var.region}-${var.zone}" 18 | ibmcloud_api_key = var.ibmcloud_api_key 19 | } 20 | 21 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 22 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 23 | resource "hpcr_tgz" "contract" { 24 | folder = "compose" 25 | } 26 | 27 | # locate all public image 28 | data "ibm_is_images" "hyper_protect_images" { 29 | visibility = "public" 30 | status = "available" 31 | } 32 | 33 | # locate the latest hyper protect image from the list of available images 34 | data "hpcr_image" "hyper_protect_image" { 35 | images = jsonencode(data.ibm_is_images.hyper_protect_images.images) 36 | } 37 | 38 | # load the certificate for the selected image versions 39 | # in this case we only download the certificate for the selected version of the image 40 | data "hpcr_encryption_certs" "enc_certs" { 41 | versions = [data.hpcr_image.hyper_protect_image.version] 42 | } 43 | 44 | locals { 45 | # contract in clear text 46 | contract = yamlencode({ 47 | "env" : { 48 | "type" : "env", 49 | "logging" : { 50 | "logRouter" : { 51 | "hostname" : var.icl_hostname, 52 | "iamApiKey" : var.icl_iam_apikey, 53 | } 54 | } 55 | }, 56 | "workload" : { 57 | "type" : "workload", 58 | "compose" : { 59 | "archive" : hpcr_tgz.contract.rendered 60 | } 61 | } 62 | }) 63 | } 64 | 65 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 66 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 67 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 68 | resource "hpcr_contract_encrypted" "contract" { 69 | contract = local.contract 70 | cert = data.hpcr_encryption_certs.enc_certs.certs[data.hpcr_image.hyper_protect_image.version] 71 | } 72 | 73 | resource "local_file" "contract" { 74 | content = hpcr_contract_encrypted.contract.rendered 75 | filename = "${path.module}/build/contract.yml" 76 | } 77 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-download-encryption/variables.tf: -------------------------------------------------------------------------------- 1 | variable "icl_hostname" { 2 | type = string 3 | description = <<-DESC 4 | Host of IBM Cloud Logs. This can be 5 | obtained from cloud logs tab under Logging instances 6 | DESC 7 | } 8 | 9 | variable "icl_iam_apikey" { 10 | type = string 11 | sensitive = true 12 | description = <<-DESC 13 | This can be obtained from Access(IAM) under Manage 14 | DESC 15 | } 16 | 17 | variable "ibmcloud_api_key" { 18 | description = <<-DESC 19 | Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: 20 | https://cloud.ibm.com/iam#/apikeys 21 | DESC 22 | sensitive = true 23 | } 24 | 25 | variable "region" { 26 | type = string 27 | description = "Region to deploy to, e.g. eu-gb" 28 | 29 | validation { 30 | condition = (var.region == "eu-gb" || 31 | var.region == "br-sao" || 32 | var.region == "ca-tor" || 33 | var.region == "jp-tok" || 34 | var.region == "us-east") 35 | error_message = "Value of region must be one of eu-gb/br-sao/ca-tor/jp-tok/us-east." 36 | } 37 | } 38 | 39 | variable "zone" { 40 | type = string 41 | default = "2" 42 | description = "Zone to deploy to, e.g. 2." 43 | 44 | validation { 45 | condition = (var.zone == "1" || 46 | var.zone == "2" || 47 | var.zone == "3") 48 | error_message = "Value of zone must be one of 1/2/3." 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-dynamic-registry/README.md: -------------------------------------------------------------------------------- 1 | ## Contract generation example with support for dynamic container registry definition 2 | 3 | This sample creates an encrypted and signed contract and stores it locally in a file. You can later use the contract to provision a HPVS for VPC instance. 4 | The contract will define the container registry and the credentials for pulling your workload container image. 5 | 6 | ### Prerequisite 7 | 8 | Prepare your environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 9 | 10 | ### Define your settings 11 | 12 | Define your settings: 13 | - icl_hostname: The host name of your ICL Log instance which you provisioned previously 14 | - icl_iam_apikey: The API key of your Log instance 15 | - registry: The container registry to pull your workload container image from 16 | - pull_username: The container registry username for pulling your workload container image 17 | - pull_password: The container registry password for pulling your workload container image 18 | 19 | The settings are defined in form of Terraform variables. 20 | 21 | Use one of the following options to define the variables: 22 | 23 | #### Define the variables in a template file 24 | 25 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 26 | 2. Fill the values in `my-settings.auto.tfvars` 27 | 28 | #### Define environment variables 29 | 30 | Set the following environment variables: 31 | 32 | ```text 33 | TF_VAR_icl_iam_apikey= 34 | TF_VAR_icl_hostname= 35 | TF_VAR_registry= 36 | TF_VAR_pull_username= 37 | TF_VAR_pull_password= 38 | ``` 39 | 40 | ### Define your workload 41 | 42 | Create the file `compose\docker-compose.yml` for your workload. Specify at least the container image digest and use the `${REGISTRY}` variable to reference the container registry defined in your settings, e.g.: 43 | 44 | ``` 45 | services: 46 | helloworld: 47 | image: ${REGISTRY}/hpse-docker-hello-world-s390x@sha256:43c500c5f85fc450060b804851992314778e35cadff03cb63042f593687b7347 48 | ``` 49 | 50 | ### Run the Example 51 | 52 | Initialize terraform: 53 | 54 | ```bash 55 | terraform init 56 | ``` 57 | 58 | Deploy the example: 59 | 60 | ```bash 61 | terraform apply 62 | ``` 63 | 64 | ### Further steps 65 | 66 | The contract will be written to the file `build/contract.yml` and can now be used for e.g. provisining a HPVS for VPC instance. 67 | 68 | Note that you will need to create a public gateway in your VPC before creating the HPVS for VPC instance. This is necessary to allow the HPVS for VPC instance to reach your Log instance through the public gateway. 69 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-dynamic-registry/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | helloworld: 3 | image: ${REGISTRY}/hpse-docker-hello-world-s390x@sha256:43c500c5f85fc450060b804851992314778e35cadff03cb63042f593687b7347 4 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-dynamic-registry/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | icl_hostname="Your ICL hostname" 2 | icl_iam_apikey="Your IBM Cloud API key" 3 | registry="Prefix for the dynamic registry" # e.g. docker.io/library or us.icr.io 4 | pull_username="Username for registry" # Username with read access to the container registry 5 | pull_password="Password for registry" # Password with read access to the container registry 6 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-dynamic-registry/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.1" 6 | } 7 | } 8 | } 9 | 10 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 11 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 12 | resource "hpcr_tgz" "contract" { 13 | folder = "compose" 14 | } 15 | 16 | locals { 17 | # contract in clear text 18 | contract = yamlencode({ 19 | "env" : { 20 | "type" : "env", 21 | "logging" : { 22 | "logRouter" : { 23 | "hostname" : var.icl_hostname, 24 | "iamApiKey" : var.icl_iam_apikey, 25 | } 26 | }, 27 | "auths" : { 28 | (var.registry) : { 29 | "username" : var.pull_username, 30 | "password" : var.pull_password 31 | } 32 | }, 33 | "env" : { 34 | "REGISTRY" : var.registry 35 | } 36 | }, 37 | "workload" : { 38 | "type" : "workload", 39 | "compose" : { 40 | "archive" : hpcr_tgz.contract.rendered 41 | } 42 | } 43 | }) 44 | } 45 | 46 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 47 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 48 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 49 | resource "hpcr_contract_encrypted" "contract" { 50 | contract = local.contract 51 | } 52 | 53 | resource "local_file" "contract" { 54 | content = hpcr_contract_encrypted.contract.rendered 55 | filename = "${path.module}/build/contract.yml" 56 | } -------------------------------------------------------------------------------- /terraform-hpvs/create-contract-dynamic-registry/variables.tf: -------------------------------------------------------------------------------- 1 | variable "icl_hostname" { 2 | type = string 3 | description = <<-DESC 4 | Host of IBM Cloud Logs. This can be 5 | obtained from cloud logs tab under Logging instances 6 | DESC 7 | } 8 | 9 | variable "icl_iam_apikey" { 10 | type = string 11 | sensitive = true 12 | description = <<-DESC 13 | This can be obtained from Access(IAM) under Manage 14 | DESC 15 | } 16 | 17 | variable "registry" { 18 | type = string 19 | description = <<-DESC 20 | Prefix of the container registry used to pull the image 21 | DESC 22 | } 23 | 24 | variable "pull_username" { 25 | type = string 26 | description = <<-DESC 27 | Username to pull from the above registry 28 | DESC 29 | } 30 | 31 | variable "pull_password" { 32 | type = string 33 | description = <<-DESC 34 | Password to pull from the above registry 35 | DESC 36 | } 37 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract/README.md: -------------------------------------------------------------------------------- 1 | ## Contract generation example 2 | 3 | This sample creates an encrypted and signed contract and stores it locally in a file. 4 | 5 | ### Prerequisite 6 | 7 | Prepare your environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 8 | 9 | ### Settings 10 | 11 | Use one of the following options to set you settings: 12 | 13 | #### Template file 14 | 15 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 16 | 2. Fill the values in `my-settings.auto.tfvars` 17 | 18 | #### Environment variables 19 | 20 | Set the following environment variables: 21 | 22 | ```text 23 | TF_VAR_icl_iam_apikey= 24 | TF_VAR_icl_hostname= 25 | ``` 26 | 27 | ### Run the Example 28 | 29 | Initialize terraform: 30 | 31 | ```bash 32 | terraform init 33 | ``` 34 | 35 | Deploy the example: 36 | 37 | ```bash 38 | terraform apply 39 | ``` 40 | 41 | The contract will be persisted in the `build/contract.yml` folder for further use. 42 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | helloworld: 3 | image: docker.io/library/hello-world@sha256:4f53e2564790c8e7856ec08e384732aa38dc43c52f02952483e3f003afbf23db 4 | -------------------------------------------------------------------------------- /terraform-hpvs/create-contract/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | icl_hostname="Your ICL hostname" 2 | icl_iam_apikey="Your IBM Cloud API key" -------------------------------------------------------------------------------- /terraform-hpvs/create-contract/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.1" 6 | } 7 | } 8 | } 9 | 10 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 11 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 12 | resource "hpcr_tgz" "contract" { 13 | folder = "compose" 14 | } 15 | 16 | locals { 17 | # contract in clear text 18 | contract = yamlencode({ 19 | "env" : { 20 | "type" : "env", 21 | "logging" : { 22 | "logRouter" : { 23 | "hostname" : var.icl_hostname, 24 | "iamApiKey" : var.icl_iam_apikey, 25 | } 26 | } 27 | }, 28 | "workload" : { 29 | "type" : "workload", 30 | "compose" : { 31 | "archive" : hpcr_tgz.contract.rendered 32 | } 33 | } 34 | }) 35 | } 36 | 37 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 38 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 39 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 40 | resource "hpcr_contract_encrypted" "contract" { 41 | contract = local.contract 42 | } 43 | 44 | resource "local_file" "contract" { 45 | content = hpcr_contract_encrypted.contract.rendered 46 | filename = "${path.module}/build/contract.yml" 47 | } -------------------------------------------------------------------------------- /terraform-hpvs/create-contract/variables.tf: -------------------------------------------------------------------------------- 1 | variable "icl_hostname" { 2 | type = string 3 | description = <<-DESC 4 | Host of IBM Cloud Logs. This can be 5 | obtained from cloud logs tab under Logging instances 6 | DESC 7 | } 8 | 9 | variable "icl_iam_apikey" { 10 | type = string 11 | sensitive = true 12 | description = <<-DESC 13 | This can be obtained from Access(IAM) under Manage 14 | DESC 15 | } 16 | -------------------------------------------------------------------------------- /terraform-hpvs/dynamic-registry/README.md: -------------------------------------------------------------------------------- 1 | ## Dynamic Registry Sample 2 | 3 | This sample deploys the [hello-world](https://hub.docker.com/_/hello-world) example as a [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se). 4 | 5 | ### Prerequisite 6 | 7 | Prepare your environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 8 | 9 | ## Usecase 10 | 11 | This sample demonstrates how to use a dynamic registry reference. 12 | 13 | ### Explict Registry Reference 14 | 15 | Typically the docker registry is referenced via the full docker URL in the compose file, e.g. like so: 16 | 17 | ```yaml 18 | services: 19 | helloworld: 20 | image: docker.io/library/hello-world@sha256:53f1bbee2f52c39e41682ee1d388285290c5c8a76cc92b42687eecf38e0af3f0 21 | ``` 22 | 23 | Note that `docker.io/library/` is the registry prefix, `hello-world` the identifier of the OCI image in that registry and `sha256:53f1bbee2f52c39e41682ee1d388285290c5c8a76cc92b42687eecf38e0af3f0` is the unique identifier of the version of the image. 24 | 25 | In such a case the role deciding about the registry (and the associated pull credentials) is the workload provider, since both the registry reference as well as the pull credentials are part of the workload section. 26 | 27 | ### Dynamic Registry Reference 28 | 29 | There exist usecases in which the registry is **not know** when the workload section is pre-encrypted, e.g. when the workload provider wants to allow the deployer to use a registry mirror or a private container registry. 30 | 31 | In such a case it is possible to dynamically override the registry as well as the pull credentials. This is an coordinated effort between the workload provider and the deployer. 32 | 33 | **Note:** the templating approach only works for a [compose](https://pages.github.ibm.com/ZaaS/hpse-contract-schema/#allOf_i1_workload_compose) based workload and also only for images referenced via a digest (i.e. no support for DCT based workloads) 34 | 35 | #### Workload Provider 36 | 37 | The workload provider marks the registry as dynamic by using a replacement variable in the docker compose file: 38 | 39 | ```yaml 40 | services: 41 | helloworld: 42 | image: ${REGISTRY}/hpse-docker-hello-world-s390x@sha256:43c500c5f85fc450060b804851992314778e35cadff03cb63042f593687b7347 43 | 44 | ``` 45 | 46 | Note that the digest of the image is identical across registries, so the workload provider can lock down the desired version of the image by setting the key, independent of what registry is actually being used. The feature to uses tokens in the compose file is a native feature of the [compose specification](https://docs.docker.com/compose/compose-file/#interpolation). 47 | 48 | Now the workload provider can prepare (encrypt) the workload section **without** specifying the pull secrets for that registry. 49 | 50 | #### Env Provider 51 | 52 | The env provider fills in the missing information about the registry and the associated pull secrets. 53 | 54 | The registry is set as an environment variable. Both the env provider as well as the workload provider can provided pieces to the overall environment and these are overlayed (with workload taking precedence) 55 | 56 | The pull credentials are passed in via an `auth` section in the environment part of the contract. Just as environment variables these sections are overlayer with the workload section taking precedence. 57 | 58 | ```yaml 59 | --- 60 | env: 61 | type: env 62 | auths: 63 | de.icr.io: 64 | username: xxx 65 | password: yyy 66 | env: 67 | REGISTRY: de.icr.io 68 | ``` 69 | 70 | -------------------------------------------------------------------------------- /terraform-hpvs/dynamic-registry/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | helloworld: 3 | image: ${REGISTRY}/hpse-docker-hello-world-s390x@sha256:43c500c5f85fc450060b804851992314778e35cadff03cb63042f593687b7347 4 | 5 | -------------------------------------------------------------------------------- /terraform-hpvs/dynamic-registry/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | ibmcloud_api_key="Your IBM Cloud API key" 2 | region="ca-tor" # Region to deploy to. Options include eu-gb, jp-tok, br-sao, ca-tor, us-east 3 | zone="2" # Zone within region to create the HPVS into. 4 | icl_hostname="Your ICL hostname" 5 | icl_iam_apikey="Your IBM Cloud API key" 6 | registry="Prefix for the dynamic registry" # e.g. docker.io/library 7 | pull_username="Username for registry" # Username with read access to the container registry 8 | pull_password="Password for registry" # Password with read access to the container registry 9 | 10 | -------------------------------------------------------------------------------- /terraform-hpvs/dynamic-registry/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.4" 6 | } 7 | 8 | tls = { 9 | source = "hashicorp/tls" 10 | version = ">= 4.0.1" 11 | } 12 | 13 | ibm = { 14 | source = "IBM-Cloud/ibm" 15 | version = ">= 1.37.1" 16 | } 17 | } 18 | } 19 | 20 | # make sure to target the correct region and zone 21 | provider "ibm" { 22 | region = var.region 23 | zone = "${var.region}-${var.zone}" 24 | } 25 | 26 | locals { 27 | # some reusable tags that identify the resources created by his sample 28 | tags = ["hpcr", "sample", var.prefix] 29 | } 30 | 31 | # the VPC 32 | resource "ibm_is_vpc" "dynamic_registry_vpc" { 33 | name = format("%s-vpc", var.prefix) 34 | tags = local.tags 35 | } 36 | 37 | # the security group 38 | resource "ibm_is_security_group" "dynamic_registry_security_group" { 39 | name = format("%s-security-group", var.prefix) 40 | vpc = ibm_is_vpc.dynamic_registry_vpc.id 41 | tags = local.tags 42 | } 43 | 44 | # rule that allows the VSI to make outbound connections, this is required 45 | # to connect to the logDNA instance as well as to docker to pull the image 46 | resource "ibm_is_security_group_rule" "dynamic_registry_outbound" { 47 | group = ibm_is_security_group.dynamic_registry_security_group.id 48 | direction = "outbound" 49 | remote = "0.0.0.0/0" 50 | } 51 | 52 | # the subnet 53 | resource "ibm_is_subnet" "dynamic_registry_subnet" { 54 | name = format("%s-subnet", var.prefix) 55 | vpc = ibm_is_vpc.dynamic_registry_vpc.id 56 | total_ipv4_address_count = 256 57 | zone = "${var.region}-${var.zone}" 58 | tags = local.tags 59 | } 60 | 61 | # we use a gateway to allow the VSI to connect to the internet to logDNA 62 | # and docker. Without a gateway the VSI would need a floating IP. Without 63 | # either the VSI will not be able to connect to the internet despite 64 | # an outbound rule 65 | resource "ibm_is_public_gateway" "dynamic_registry_gateway" { 66 | name = format("%s-gateway", var.prefix) 67 | vpc = ibm_is_vpc.dynamic_registry_vpc.id 68 | zone = "${var.region}-${var.zone}" 69 | tags = local.tags 70 | } 71 | 72 | # attach the gateway to the subnet 73 | resource "ibm_is_subnet_public_gateway_attachment" "dynamic_registry_gateway_attachment" { 74 | subnet = ibm_is_subnet.dynamic_registry_subnet.id 75 | public_gateway = ibm_is_public_gateway.dynamic_registry_gateway.id 76 | } 77 | 78 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 79 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 80 | resource "hpcr_tgz" "contract" { 81 | folder = "compose" 82 | } 83 | 84 | locals { 85 | # contract in clear text 86 | contract = yamlencode({ 87 | "env" : { 88 | "type" : "env", 89 | "logging" : { 90 | "logRouter" : { 91 | "hostname" : var.icl_hostname, 92 | "iamApiKey" : var.icl_iam_apikey, 93 | } 94 | }, 95 | "auths" : { 96 | (var.registry) : { 97 | "username" : var.pull_username, 98 | "password" : var.pull_password 99 | } 100 | }, 101 | "env" : { 102 | "REGISTRY" : var.registry 103 | } 104 | }, 105 | "workload" : { 106 | "type" : "workload", 107 | "compose" : { 108 | "archive" : hpcr_tgz.contract.rendered 109 | } 110 | } 111 | }) 112 | } 113 | 114 | # create a random key pair, because for formal reasons we need to pass an SSH key into the VSI. It will not be used, that's why 115 | # it can be random 116 | resource "tls_private_key" "dynamic_registry_rsa_key" { 117 | algorithm = "RSA" 118 | rsa_bits = 4096 119 | } 120 | 121 | # we only need this because VPC expects this 122 | resource "ibm_is_ssh_key" "dynamic_registry_sshkey" { 123 | name = format("%s-key", var.prefix) 124 | public_key = tls_private_key.dynamic_registry_rsa_key.public_key_openssh 125 | tags = local.tags 126 | } 127 | 128 | # locate all public image 129 | data "ibm_is_images" "hyper_protect_images" { 130 | visibility = "public" 131 | status = "available" 132 | } 133 | 134 | # locate the latest hyper protect image 135 | data "hpcr_image" "hyper_protect_image" { 136 | images = jsonencode(data.ibm_is_images.hyper_protect_images.images) 137 | } 138 | 139 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 140 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 141 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 142 | resource "hpcr_contract_encrypted" "contract" { 143 | contract = local.contract 144 | cert = file(var.certificate) 145 | } 146 | 147 | # construct the VSI 148 | resource "ibm_is_instance" "dynamic_registry_vsi" { 149 | name = format("%s-vsi", var.prefix) 150 | image = data.hpcr_image.hyper_protect_image.image 151 | profile = var.profile 152 | keys = [ibm_is_ssh_key.dynamic_registry_sshkey.id] 153 | vpc = ibm_is_vpc.dynamic_registry_vpc.id 154 | tags = local.tags 155 | zone = "${var.region}-${var.zone}" 156 | 157 | # the user data field carries the encrypted contract, so all information visible at the hypervisor layer is encrypted 158 | user_data = hpcr_contract_encrypted.contract.rendered 159 | 160 | primary_network_interface { 161 | name = "eth0" 162 | subnet = ibm_is_subnet.dynamic_registry_subnet.id 163 | security_groups = [ibm_is_security_group.dynamic_registry_security_group.id] 164 | } 165 | 166 | } 167 | -------------------------------------------------------------------------------- /terraform-hpvs/dynamic-registry/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = <<-DESC 3 | Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: 4 | https://cloud.ibm.com/iam#/apikeys 5 | DESC 6 | sensitive = true 7 | } 8 | 9 | variable "region" { 10 | type = string 11 | description = "Region to deploy to, e.g. eu-gb" 12 | 13 | validation { 14 | condition = (var.region == "eu-gb" || 15 | var.region == "br-sao" || 16 | var.region == "ca-tor" || 17 | var.region == "jp-tok" || 18 | var.region == "us-south" || 19 | var.region == "us-east") 20 | error_message = "Value of region must be one of eu-gb/br-sao/ca-tor/jp-tok/us-east." 21 | } 22 | } 23 | 24 | variable "zone" { 25 | type = string 26 | default = "2" 27 | description = "Zone to deploy to, e.g. 2." 28 | 29 | validation { 30 | condition = (var.zone == "1" || 31 | var.zone == "2" || 32 | var.zone == "3") 33 | error_message = "Value of zone must be one of 1/2/3." 34 | } 35 | } 36 | 37 | variable "icl_hostname" { 38 | type = string 39 | description = <<-DESC 40 | Host of IBM Cloud Logs. This can be 41 | obtained from cloud logs tab under Logging instances 42 | DESC 43 | } 44 | 45 | variable "icl_iam_apikey" { 46 | type = string 47 | sensitive = true 48 | description = <<-DESC 49 | This can be obtained from Access(IAM) under Manage 50 | DESC 51 | } 52 | 53 | variable "prefix" { 54 | type = string 55 | default = "hpcr-sample-dynamic-registry" 56 | description = "Prefix to be attached to name of all generated resources" 57 | } 58 | 59 | variable "profile" { 60 | type = string 61 | default = "bz2e-1x4" 62 | description = <<-DESC 63 | Profile used for the VSI. This has to be a secure execution 64 | profile in the format Xz2e-YxZ, e.g. bz2e-1x4 65 | DESC 66 | } 67 | 68 | variable "registry" { 69 | type = string 70 | description = <<-DESC 71 | Prefix of the container registry used to pull the image 72 | DESC 73 | } 74 | 75 | variable "pull_username" { 76 | type = string 77 | description = <<-DESC 78 | Username to pull from the above registry 79 | DESC 80 | } 81 | 82 | variable "pull_password" { 83 | type = string 84 | description = <<-DESC 85 | Password to pull from the above registry 86 | DESC 87 | } 88 | 89 | variable "certificate" { 90 | type = string 91 | description = "path to the certificate file" 92 | } -------------------------------------------------------------------------------- /terraform-hpvs/fhe-helayers-sdk/README.md: -------------------------------------------------------------------------------- 1 | # Get Started with IBM HElayers and IBM Hyper Protect Virtual Server for VPC 2 | 3 | This folder contains the code for the tutorial [Get started with IBM HElayers and IBM Hyper Protect Virtual Servers for VPC](https://developer.ibm.com/tutorials/awb-get-started-ibm-helayers-sdk-hyper-protect-virtual-servers-vpc/). Follow this tutorial to deploy IBM HElayers SDK 1.5.2.0 in IBM Hyper Protect Virtual Server for VPC and to combine Fully Homomorphic Encryption and Confidential Computing. 4 | -------------------------------------------------------------------------------- /terraform-hpvs/fhe-helayers-sdk/compose/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | fileserver: 4 | image: icr.io/helayers/helayers-pylab-s390x@sha256:c60a70a480255c854756d3b2ee4bfc292c93cd9ee399f0a7de1454d6bc57664c 5 | restart: always 6 | ports: 7 | - 8888:8888 -------------------------------------------------------------------------------- /terraform-hpvs/fhe-helayers-sdk/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | prefix="fhe-on-hpvs" 2 | ibmcloud_api_key="Your IBM Cloud API key" 3 | region="eu-gb" # Region to deploy to. Options include eu-gb, jp-tok, br-sao, ca-tor, us-east 4 | zone="3" # Zone within region to create the HPVS into. 5 | icl_hostname="Your ICL hostname" 6 | icl_iam_apikey="Your IBM Cloud API key" -------------------------------------------------------------------------------- /terraform-hpvs/fhe-helayers-sdk/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.6" 6 | } 7 | 8 | tls = { 9 | source = "hashicorp/tls" 10 | version = ">= 4.0.1" 11 | } 12 | 13 | ibm = { 14 | source = "IBM-Cloud/ibm" 15 | version = ">= 1.37.1" 16 | } 17 | } 18 | } 19 | 20 | # make sure to target the correct region and zone 21 | provider "ibm" { 22 | region = var.region 23 | zone = "${var.region}-${var.zone}" 24 | ibmcloud_api_key = var.ibmcloud_api_key 25 | } 26 | 27 | locals { 28 | # some reusable tags that identify the resources created by his sample 29 | tags = ["hpcr", "sample", var.prefix] 30 | } 31 | 32 | # the VPC 33 | resource "ibm_is_vpc" "fhe_hpvs_vpc" { 34 | name = format("%s-vpc", var.prefix) 35 | tags = local.tags 36 | } 37 | 38 | # the security group 39 | resource "ibm_is_security_group" "fhe_hpvs_security_group" { 40 | name = format("%s-security-group", var.prefix) 41 | vpc = ibm_is_vpc.fhe_hpvs_vpc.id 42 | tags = local.tags 43 | } 44 | 45 | # rule that allows the VSI to make outbound connections, this is required 46 | # to connect to the logDNA instance as well as to docker to pull the image 47 | resource "ibm_is_security_group_rule" "fhe_hpvs_outbound" { 48 | group = ibm_is_security_group.fhe_hpvs_security_group.id 49 | direction = "outbound" 50 | remote = "0.0.0.0/0" 51 | } 52 | 53 | # rule that allows the VSI to accept inbound connections, this is required 54 | resource "ibm_is_security_group_rule" "fhe_hpvs_inbound" { 55 | group = ibm_is_security_group.fhe_hpvs_security_group.id 56 | direction = "inbound" 57 | remote = "0.0.0.0/0" 58 | } 59 | 60 | # the subnet 61 | resource "ibm_is_subnet" "fhe_hpvs_subnet" { 62 | name = format("%s-subnet", var.prefix) 63 | vpc = ibm_is_vpc.fhe_hpvs_vpc.id 64 | total_ipv4_address_count = 256 65 | zone = "${var.region}-${var.zone}" 66 | tags = local.tags 67 | } 68 | 69 | # we use a gateway to allow the VSI to connect to the internet to logDNA 70 | # and docker. Without a gateway the VSI would need a floating IP. Without 71 | # either the VSI will not be able to connect to the internet despite 72 | # an outbound rule 73 | resource "ibm_is_public_gateway" "fhe_hpvs_gateway" { 74 | name = format("%s-gateway", var.prefix) 75 | vpc = ibm_is_vpc.fhe_hpvs_vpc.id 76 | zone = "${var.region}-${var.zone}" 77 | tags = local.tags 78 | } 79 | 80 | # attach the gateway to the subnet 81 | resource "ibm_is_subnet_public_gateway_attachment" "fhe_hpvs_gateway_attachment" { 82 | subnet = ibm_is_subnet.fhe_hpvs_subnet.id 83 | public_gateway = ibm_is_public_gateway.fhe_hpvs_gateway.id 84 | } 85 | 86 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 87 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 88 | resource "hpcr_tgz" "contract" { 89 | folder = "compose" 90 | } 91 | 92 | locals { 93 | # contract in clear text 94 | contract = yamlencode({ 95 | "env" : { 96 | "type" : "env", 97 | "logging" : { 98 | "logRouter" : { 99 | "hostname" : var.icl_hostname, 100 | "iamApiKey" : var.icl_iam_apikey, 101 | } 102 | } 103 | }, 104 | "workload" : { 105 | "type" : "workload", 106 | "compose" : { 107 | "archive" : hpcr_tgz.contract.rendered 108 | } 109 | } 110 | }) 111 | } 112 | 113 | # create a random key pair, because for formal reasons we need to pass an SSH key into the VSI. It will not be used, that's why 114 | # it can be random 115 | resource "tls_private_key" "fhe_hpvs_rsa_key" { 116 | algorithm = "RSA" 117 | rsa_bits = 4096 118 | } 119 | 120 | # we only need this because VPC expects this 121 | resource "ibm_is_ssh_key" "fhe_hpvs_sshkey" { 122 | name = format("%s-key", var.prefix) 123 | public_key = tls_private_key.fhe_hpvs_rsa_key.public_key_openssh 124 | tags = local.tags 125 | } 126 | 127 | # locate all public image 128 | data "ibm_is_images" "hyper_protect_images" { 129 | visibility = "public" 130 | status = "available" 131 | } 132 | 133 | # locate the latest hyper protect image 134 | data "hpcr_image" "hyper_protect_image" { 135 | images = jsonencode(data.ibm_is_images.hyper_protect_images.images) 136 | } 137 | 138 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 139 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 140 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 141 | resource "hpcr_contract_encrypted" "contract" { 142 | contract = local.contract 143 | } 144 | 145 | # construct the VSI 146 | resource "ibm_is_instance" "fhe_hpvs_vsi" { 147 | name = format("%s-vsi", var.prefix) 148 | image = data.hpcr_image.hyper_protect_image.image 149 | profile = var.profile 150 | keys = [ibm_is_ssh_key.fhe_hpvs_sshkey.id] 151 | vpc = ibm_is_vpc.fhe_hpvs_vpc.id 152 | tags = local.tags 153 | zone = "${var.region}-${var.zone}" 154 | 155 | # the user data field carries the encrypted contract, so all information visible at the hypervisor layer is encrypted 156 | user_data = hpcr_contract_encrypted.contract.rendered 157 | 158 | primary_network_interface { 159 | name = "eth0" 160 | subnet = ibm_is_subnet.fhe_hpvs_subnet.id 161 | security_groups = [ibm_is_security_group.fhe_hpvs_security_group.id] 162 | } 163 | } 164 | 165 | resource "ibm_is_floating_ip" "fhe_hpvs_vsi_floatingip" { 166 | name = ibm_is_instance.fhe_hpvs_vsi.name 167 | target = ibm_is_instance.fhe_hpvs_vsi.primary_network_interface[0].id 168 | } 169 | 170 | output "hpvs_vsi_floating_ip" { 171 | value = ibm_is_floating_ip.fhe_hpvs_vsi_floatingip.address 172 | } 173 | 174 | resource "local_file" "contract" { 175 | content = hpcr_contract_encrypted.contract.rendered 176 | filename = "${path.module}/build/contract.yml" 177 | } 178 | -------------------------------------------------------------------------------- /terraform-hpvs/fhe-helayers-sdk/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = <<-DESC 3 | Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: 4 | https://cloud.ibm.com/iam#/apikeys 5 | DESC 6 | sensitive = true 7 | } 8 | 9 | variable "region" { 10 | type = string 11 | description = "Region to deploy to, e.g. eu-gb" 12 | 13 | validation { 14 | condition = (var.region == "eu-gb" || 15 | var.region == "br-sao" || 16 | var.region == "ca-tor" || 17 | var.region == "jp-tok" || 18 | var.region == "us-east") 19 | error_message = "Value of region must be one of eu-gb/br-sao/ca-tor/jp-tok/us-east." 20 | } 21 | } 22 | 23 | variable "zone" { 24 | type = string 25 | default = "2" 26 | description = "Zone to deploy to, e.g. 2." 27 | 28 | validation { 29 | condition = (var.zone == "1" || 30 | var.zone == "2" || 31 | var.zone == "3") 32 | error_message = "Value of zone must be one of 1/2/3." 33 | } 34 | } 35 | 36 | variable "icl_hostname" { 37 | type = string 38 | description = <<-DESC 39 | Host of IBM Cloud Logs. This can be 40 | obtained from cloud logs tab under Logging instances 41 | DESC 42 | } 43 | 44 | variable "icl_iam_apikey" { 45 | type = string 46 | sensitive = true 47 | description = <<-DESC 48 | This can be obtained from Access(IAM) under Manage 49 | DESC 50 | } 51 | 52 | variable "prefix" { 53 | type = string 54 | default = "fhe-on-hpvs" 55 | description = "Prefix to be attached to name of all generated resources" 56 | } 57 | 58 | variable "profile" { 59 | type = string 60 | default = "bz2e-1x4" 61 | description = <<-DESC 62 | Profile used for the VSI. This has to be a secure execution 63 | profile in the format Xz2e-YxZ, e.g. bz2e-1x4 64 | DESC 65 | } 66 | -------------------------------------------------------------------------------- /terraform-hpvs/hello-world/README.md: -------------------------------------------------------------------------------- 1 | ## Hello-World Sample 2 | 3 | This sample deploys the [hello-world](https://hub.docker.com/_/hello-world) example as a [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se). 4 | 5 | ### Prerequisite 6 | 7 | Prepare your environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 8 | 9 | ### Settings 10 | 11 | Use one of the following options to set you settings: 12 | 13 | #### Template file 14 | 15 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 16 | 2. Fill the values in `my-settings.auto.tfvars` 17 | 18 | #### Environment variables 19 | 20 | Set the following environment variables: 21 | 22 | ```text 23 | IC_API_KEY= 24 | TF_VAR_zone= 25 | TF_VAR_region= 26 | TF_VAR_icl_iam_apikey= 27 | TF_VAR_icl_hostname= 28 | ``` 29 | 30 | ### Run the Example 31 | 32 | Initialize terraform: 33 | 34 | ```bash 35 | terraform init 36 | ``` 37 | 38 | Deploy the example: 39 | 40 | ```bash 41 | terraform apply 42 | ``` 43 | 44 | This will create a sample virtual server instance. Monitor your ICL instance for logs. 45 | 46 | Destroy the created resources: 47 | 48 | ```bash 49 | terraform destroy 50 | ``` 51 | -------------------------------------------------------------------------------- /terraform-hpvs/hello-world/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | helloworld: 3 | image: docker.io/library/hello-world@sha256:4f53e2564790c8e7856ec08e384732aa38dc43c52f02952483e3f003afbf23db 4 | -------------------------------------------------------------------------------- /terraform-hpvs/hello-world/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | ibmcloud_api_key="Your IBM Cloud API key" 2 | region="ca-tor" # Region to deploy to. Options include eu-gb, jp-tok, br-sao, ca-tor, us-east 3 | zone="2" # Zone within region to create the HPVS into. 4 | icl_hostname="Your ICL hostname" 5 | icl_iam_apikey="Your IBM Cloud API key" 6 | -------------------------------------------------------------------------------- /terraform-hpvs/hello-world/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.4" 6 | } 7 | 8 | tls = { 9 | source = "hashicorp/tls" 10 | version = ">= 4.0.1" 11 | } 12 | 13 | ibm = { 14 | source = "IBM-Cloud/ibm" 15 | version = ">= 1.37.1" 16 | } 17 | } 18 | } 19 | 20 | # make sure to target the correct region and zone 21 | provider "ibm" { 22 | region = var.region 23 | zone = "${var.region}-${var.zone}" 24 | } 25 | 26 | locals { 27 | # some reusable tags that identify the resources created by his sample 28 | tags = ["hpcr", "sample", var.prefix] 29 | } 30 | 31 | # the VPC 32 | resource "ibm_is_vpc" "hello_world_vpc" { 33 | name = format("%s-vpc", var.prefix) 34 | tags = local.tags 35 | } 36 | 37 | # the security group 38 | resource "ibm_is_security_group" "hello_world_security_group" { 39 | name = format("%s-security-group", var.prefix) 40 | vpc = ibm_is_vpc.hello_world_vpc.id 41 | tags = local.tags 42 | } 43 | 44 | # rule that allows the VSI to make outbound connections, this is required 45 | # to connect to the logDNA instance as well as to docker to pull the image 46 | resource "ibm_is_security_group_rule" "hello_world_outbound" { 47 | group = ibm_is_security_group.hello_world_security_group.id 48 | direction = "outbound" 49 | remote = "0.0.0.0/0" 50 | } 51 | 52 | # the subnet 53 | resource "ibm_is_subnet" "hello_world_subnet" { 54 | name = format("%s-subnet", var.prefix) 55 | vpc = ibm_is_vpc.hello_world_vpc.id 56 | total_ipv4_address_count = 256 57 | zone = "${var.region}-${var.zone}" 58 | tags = local.tags 59 | } 60 | 61 | # we use a gateway to allow the VSI to connect to the internet to logDNA 62 | # and docker. Without a gateway the VSI would need a floating IP. Without 63 | # either the VSI will not be able to connect to the internet despite 64 | # an outbound rule 65 | resource "ibm_is_public_gateway" "hello_world_gateway" { 66 | name = format("%s-gateway", var.prefix) 67 | vpc = ibm_is_vpc.hello_world_vpc.id 68 | zone = "${var.region}-${var.zone}" 69 | tags = local.tags 70 | } 71 | 72 | # attach the gateway to the subnet 73 | resource "ibm_is_subnet_public_gateway_attachment" "hello_world_gateway_attachment" { 74 | subnet = ibm_is_subnet.hello_world_subnet.id 75 | public_gateway = ibm_is_public_gateway.hello_world_gateway.id 76 | } 77 | 78 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 79 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 80 | resource "hpcr_tgz" "contract" { 81 | folder = "compose" 82 | } 83 | 84 | locals { 85 | # contract in clear text 86 | contract = yamlencode({ 87 | "env" : { 88 | "type" : "env", 89 | "logging" : { 90 | "logRouter" : { 91 | "hostname" : var.icl_hostname, 92 | "iamApiKey" : var.icl_iam_apikey, 93 | } 94 | } 95 | }, 96 | "workload" : { 97 | "type" : "workload", 98 | "compose" : { 99 | "archive" : hpcr_tgz.contract.rendered 100 | } 101 | } 102 | }) 103 | } 104 | 105 | # create a random key pair, because for formal reasons we need to pass an SSH key into the VSI. It will not be used, that's why 106 | # it can be random 107 | resource "tls_private_key" "hello_world_rsa_key" { 108 | algorithm = "RSA" 109 | rsa_bits = 4096 110 | } 111 | 112 | # we only need this because VPC expects this 113 | resource "ibm_is_ssh_key" "hello_world_sshkey" { 114 | name = format("%s-key", var.prefix) 115 | public_key = tls_private_key.hello_world_rsa_key.public_key_openssh 116 | tags = local.tags 117 | } 118 | 119 | # locate all public image 120 | data "ibm_is_images" "hyper_protect_images" { 121 | visibility = "public" 122 | status = "available" 123 | } 124 | 125 | # locate the latest hyper protect image 126 | data "hpcr_image" "hyper_protect_image" { 127 | images = jsonencode(data.ibm_is_images.hyper_protect_images.images) 128 | } 129 | 130 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 131 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 132 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 133 | resource "hpcr_contract_encrypted" "contract" { 134 | contract = local.contract 135 | } 136 | 137 | # construct the VSI 138 | resource "ibm_is_instance" "hello_world_vsi" { 139 | name = format("%s-vsi", var.prefix) 140 | image = data.hpcr_image.hyper_protect_image.image 141 | profile = var.profile 142 | keys = [ibm_is_ssh_key.hello_world_sshkey.id] 143 | vpc = ibm_is_vpc.hello_world_vpc.id 144 | tags = local.tags 145 | zone = "${var.region}-${var.zone}" 146 | 147 | # the user data field carries the encrypted contract, so all information visible at the hypervisor layer is encrypted 148 | user_data = hpcr_contract_encrypted.contract.rendered 149 | 150 | primary_network_interface { 151 | name = "eth0" 152 | subnet = ibm_is_subnet.hello_world_subnet.id 153 | security_groups = [ibm_is_security_group.hello_world_security_group.id] 154 | } 155 | 156 | } 157 | -------------------------------------------------------------------------------- /terraform-hpvs/hello-world/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = <<-DESC 3 | Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: 4 | https://cloud.ibm.com/iam#/apikeys 5 | DESC 6 | sensitive = true 7 | } 8 | 9 | variable "region" { 10 | type = string 11 | description = "Region to deploy to, e.g. eu-gb" 12 | 13 | validation { 14 | condition = (var.region == "eu-gb" || 15 | var.region == "br-sao" || 16 | var.region == "ca-tor" || 17 | var.region == "jp-tok" || 18 | var.region == "us-east") 19 | error_message = "Value of region must be one of eu-gb/br-sao/ca-tor/jp-tok/us-east." 20 | } 21 | } 22 | 23 | variable "zone" { 24 | type = string 25 | default = "2" 26 | description = "Zone to deploy to, e.g. 2." 27 | 28 | validation { 29 | condition = (var.zone == "1" || 30 | var.zone == "2" || 31 | var.zone == "3") 32 | error_message = "Value of zone must be one of 1/2/3." 33 | } 34 | } 35 | 36 | variable "icl_hostname" { 37 | type = string 38 | description = <<-DESC 39 | Host of IBM Cloud Logs. This can be 40 | obtained from cloud logs tab under Logging instances 41 | DESC 42 | } 43 | 44 | variable "icl_iam_apikey" { 45 | type = string 46 | sensitive = true 47 | description = <<-DESC 48 | This can be obtained from Access(IAM) under Manage 49 | DESC 50 | } 51 | 52 | variable "prefix" { 53 | type = string 54 | default = "hpcr-sample-hello-world" 55 | description = "Prefix to be attached to name of all generated resources" 56 | } 57 | 58 | variable "profile" { 59 | type = string 60 | default = "bz2e-1x4" 61 | description = <<-DESC 62 | Profile used for the VSI. This has to be a secure execution 63 | profile in the format Xz2e-YxZ, e.g. bz2e-1x4 64 | DESC 65 | } 66 | -------------------------------------------------------------------------------- /terraform-hpvs/ibm-cloud-logging/README.md: -------------------------------------------------------------------------------- 1 | ## Contract generation example 2 | 3 | This sample creates an encrypted and signed contract and stores it locally in a file. 4 | 5 | ### Prerequisite 6 | 7 | Prepare your environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 8 | 9 | ### Settings 10 | 11 | Use one of the following options to set you settings: 12 | 13 | #### Template file 14 | 15 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 16 | 2. Fill the values in `my-settings.auto.tfvars` 17 | 18 | #### Environment variables 19 | 20 | Set the following environment variables: 21 | 22 | ```text 23 | TF_VAR_icl_iam_apikey= 24 | TF_VAR_icl_hostname= 25 | ``` 26 | 27 | ### Run the Example 28 | 29 | Initialize terraform: 30 | 31 | ```bash 32 | terraform init 33 | ``` 34 | 35 | Deploy the example: 36 | 37 | ```bash 38 | terraform apply 39 | ``` 40 | 41 | The contract will be persisted in the `build/contract.yml` folder for further use. 42 | -------------------------------------------------------------------------------- /terraform-hpvs/ibm-cloud-logging/compose/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | helloworld: 3 | image: docker.io/library/hello-world@sha256:4f53e2564790c8e7856ec08e384732aa38dc43c52f02952483e3f003afbf23db 4 | -------------------------------------------------------------------------------- /terraform-hpvs/ibm-cloud-logging/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | icl_iam_apikey="Your IBM Cloud Logs IAM API Key" 2 | icl_hostname="Your IBM Cloud Logs Hostname" 3 | -------------------------------------------------------------------------------- /terraform-hpvs/ibm-cloud-logging/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.8.0" 6 | } 7 | } 8 | } 9 | 10 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 11 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 12 | resource "hpcr_tgz" "contract" { 13 | folder = "compose" 14 | } 15 | 16 | locals { 17 | # contract in clear text 18 | contract = yamlencode({ 19 | "env" : { 20 | "type" : "env", 21 | "logging" : { 22 | "logRouter" : { 23 | "hostname" : var.icl_hostname, 24 | "iamApiKey" : var.icl_iam_apikey 25 | } 26 | } 27 | }, 28 | "workload" : { 29 | "type" : "workload", 30 | "compose" : { 31 | "archive" : hpcr_tgz.contract.rendered 32 | } 33 | } 34 | }) 35 | } 36 | 37 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 38 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 39 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 40 | resource "hpcr_contract_encrypted" "contract" { 41 | contract = local.contract 42 | } 43 | 44 | resource "local_file" "contract" { 45 | content = hpcr_contract_encrypted.contract.rendered 46 | filename = "${path.module}/build/contract.yml" 47 | } -------------------------------------------------------------------------------- /terraform-hpvs/ibm-cloud-logging/variables.tf: -------------------------------------------------------------------------------- 1 | variable "icl_iam_apikey" { 2 | type = string 3 | description = "IAM Key of IBM Cloud Logs" 4 | } 5 | 6 | variable "icl_hostname" { 7 | type = string 8 | sensitive = true 9 | description = "Hostname of IBM Cloud Logs Instance" 10 | } -------------------------------------------------------------------------------- /terraform-hpvs/log-encryption/README.md: -------------------------------------------------------------------------------- 1 | ## Encrypted Log Messages Example 2 | 3 | In this project we investigate how a deployer of a [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se) can selectively encrypt log messages that are produced by a container. Per default log messages are sent via TLS to a logging backend but appear in clear text in that backend. We recommend to not include any sensitive information in these log message for that reason. 4 | 5 | Our solution assumes the following: 6 | 7 | - sensitive data is encrypted via hybrid encryption, e.g. by using the public part of an asymmetric key and a random passphrase per encryption step 8 | - encryption requires an explicit coding step in the container that produces the log 9 | - the result of the encryption step is a string that can be downloaded from the logging backend and decrypted using the private key that is in the hand of the deployer 10 | 11 | Implementation decisions: 12 | 13 | - we auto-generate the public/private keypair as part of this example. This is not required, the keypair could also be created out of band 14 | - we use the `hyper-protect-basic` token approach to implement hybrid encryption, because other parts of the [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se) offering also use that schema. This is however an arbitrary decision, you may decide to use a different approach, e.g. using [gpg](https://www.gnupg.org/), the fundamental workflow stays the same. 15 | - the docker container requires the `openssl` binary to implement the encryption step, ideally the binary should be part of the docker container. However in order to be able to use the off-the-shelf docker [ubuntu](https://hub.docker.com/_/ubuntu) image we install the `openssl` binary at container startup time. This is **not suggested** for production use. 16 | 17 | Implementation outline: 18 | 19 | This sample deploys a container as a [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se). 20 | 21 | The deployment will auto create a private/public key-pair. The public part will be added to the [contract](https://cloud.ibm.com/docs/vpc?topic=vpc-about-contract_se) and then mounted into the container. The container in turn uses it to **encrypt selected logging messages**. These logging messages will appear in their encrypted version on the logging backend. 22 | 23 | The deployer can now read the encrypted logs from the logging backend and decrypt them via the provided private key. 24 | 25 | FAQ: 26 | 27 | - *How is the public key embedded into the contract?* The public key is stored as a file in the same folder as the `docker-compose.yml` file, you may also use a subdirectory. The contract preparation step generates a `tgz` file out of this folder and embeds it as `base64` in the contract document. The server will untar this data, so the public key resides in the server side file system next to the compose file. From there it can be referenced via a `volumes` field and mounted into the container 28 | 29 | - *What is the encryption/decryption algorithm?* The `hyper-protect-basic` encryption algorithm is described as part of the [attestation](https://cloud.ibm.com/docs/vpc?topic=vpc-about-attestation) documentation. For convenience this example provides the [encrypt-basic.sh](./compose/bin/encrypt-basic.sh) script to encrypt records and the [decrypt-basic.sh](./support/decrypt-basic.sh) script to decrypt the records. 30 | 31 | ### Prerequisite 32 | 33 | Prepare your environment (also refer to the [generic REAME](../README.md). Make sure to setup IBM Cloud Logs Instance. 34 | 35 | ### Settings 36 | 37 | Use one of the following options to set you settings: 38 | 39 | #### Environment variables 40 | 41 | Set the following environment variables: 42 | 43 | ```text 44 | IC_API_KEY= 45 | TF_VAR_zone= 46 | TF_VAR_region= 47 | TF_VAR_icl_iam_apikey= 48 | TF_VAR_icl_hostname= 49 | TF_VAR_artifactory_user= 50 | TF_VAR_artifactory_key= 51 | ``` 52 | 53 | ### Run the Example 54 | 55 | Initialize terraform: 56 | 57 | ```bash 58 | terraform init 59 | ``` 60 | 61 | Deploy the example: 62 | 63 | ```bash 64 | terraform apply 65 | ``` 66 | 67 | This will create a sample virtual server instance. Monitor your ICL instance for logs. 68 | 69 | Destroy the created resources: 70 | 71 | ```bash 72 | terraform destroy 73 | ``` 74 | 75 | The encrypted message can be decrypted by running the [decrypt-basic.sh](./support/decrypt-basic.sh) command: 76 | 77 | ```bash 78 | echo hyper-protect-basic.rdf...EqM | decrypt-basic.sh build/key.priv 79 | ``` 80 | -------------------------------------------------------------------------------- /terraform-hpvs/log-encryption/compose/bin/encrypt-basic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Usage: 4 | # se-encrypt-basic.sh 5 | 6 | usage() { 7 | echo "Usage: $(basename "$0") " 8 | exit 1 9 | } 10 | 11 | # read clear-text from STDIN 12 | test "$#" -eq 1 || usage 13 | key_file="$1" 14 | cleartext_file="$(mktemp)" 15 | trap 'rm "$cleartext_file"' EXIT 16 | cat > "$cleartext_file" 17 | 18 | set -eu -o pipefail 19 | 20 | # create random password 32 Byte 21 | password="$(openssl rand 32 | base64 -w0)" 22 | 23 | # encrypt password with public rsa key 24 | password_enc="$( 25 | echo -n "$password" | base64 -d | openssl rsautl \ 26 | -encrypt \ 27 | -inkey "$key_file" \ 28 | -pubin \ 29 | | base64 -w0)" 30 | 31 | # encrypt cleartext-file with random password 32 | cleartext_enc="$( 33 | echo -n "$password" | base64 -d | openssl enc \ 34 | -aes-256-cbc \ 35 | -pbkdf2 \ 36 | -pass stdin \ 37 | -in "$cleartext_file" \ 38 | | base64 -w0)" 39 | 40 | echo "hyper-protect-basic.${password_enc}.${cleartext_enc}" 41 | -------------------------------------------------------------------------------- /terraform-hpvs/log-encryption/compose/bin/example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | apt-get update 5 | apt-get install -y openssl 6 | 7 | echo "unencrypted message" 8 | echo "encrypted message" | /bin/bash /var/logging/bin/encrypt-basic.sh /var/logging/logging.pub 9 | -------------------------------------------------------------------------------- /terraform-hpvs/log-encryption/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | logging: 3 | image: docker.io/library/ubuntu@sha256:20fa2d7bb4de7723f542be5923b06c4d704370f0390e4ae9e1c833c8785644c1 4 | volumes: 5 | - ./key.pub:/var/logging/logging.pub:ro 6 | - ./bin/:/var/logging/bin/:ro 7 | command: bash /var/logging/bin/example.sh -------------------------------------------------------------------------------- /terraform-hpvs/log-encryption/support/decrypt-basic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | key_file=$1 4 | 5 | TMPDIR=$(mktemp -d) 6 | 7 | data_file=${TMPDIR}/data 8 | cat > "$data_file" 9 | 10 | password_enc=${TMPDIR}/password_enc 11 | ciphertext_enc=${TMPDIR}/ciphertext_enc 12 | 13 | cut -d. -f 2 "$data_file" | base64 -d > "$password_enc" 14 | cut -d. -f 3 "$data_file" | base64 -d > "$ciphertext_enc" 15 | 16 | openssl rsautl -decrypt -inkey "${key_file}" -in $password_enc | openssl aes-256-cbc -d -pbkdf2 -in $ciphertext_enc -pass stdin 17 | 18 | rm -rf "$TMPDIR" -------------------------------------------------------------------------------- /terraform-hpvs/log-encryption/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.4" 6 | } 7 | 8 | tls = { 9 | source = "hashicorp/tls" 10 | version = ">= 4.0.1" 11 | } 12 | 13 | ibm = { 14 | source = "IBM-Cloud/ibm" 15 | version = ">= 1.37.1" 16 | } 17 | } 18 | } 19 | 20 | # make sure to target the correct region and zone 21 | provider "ibm" { 22 | region = var.region 23 | zone = "${var.region}-${var.zone}" 24 | } 25 | 26 | # create the encyption key 27 | resource "tls_private_key" "log_encryption_logging_key" { 28 | algorithm = "RSA" 29 | rsa_bits = 4096 30 | } 31 | 32 | resource "local_file" "log_encryption_logging_private_key" { 33 | content = tls_private_key.log_encryption_logging_key.private_key_pem 34 | filename = "../build/key.priv" 35 | } 36 | 37 | resource "local_file" "log_encryption_logging_public_key" { 38 | content = tls_private_key.log_encryption_logging_key.public_key_pem 39 | filename = "./compose/key.pub" 40 | } 41 | 42 | locals { 43 | # some reusable tags that identify the resources created by his sample 44 | tags = ["hpcr", "sample", var.prefix] 45 | } 46 | 47 | # the VPC 48 | resource "ibm_is_vpc" "log_encryption_vpc" { 49 | name = format("%s-vpc", var.prefix) 50 | tags = local.tags 51 | } 52 | 53 | # the security group 54 | resource "ibm_is_security_group" "log_encryption_security_group" { 55 | name = format("%s-security-group", var.prefix) 56 | vpc = ibm_is_vpc.log_encryption_vpc.id 57 | tags = local.tags 58 | } 59 | 60 | # rule that allows the VSI to make outbound connections, this is required 61 | # to connect to the logDNA instance as well as to docker to pull the image 62 | resource "ibm_is_security_group_rule" "log_encryption_outbound" { 63 | group = ibm_is_security_group.log_encryption_security_group.id 64 | direction = "outbound" 65 | remote = "0.0.0.0/0" 66 | } 67 | 68 | # the subnet 69 | resource "ibm_is_subnet" "log_encryption_subnet" { 70 | name = format("%s-subnet", var.prefix) 71 | vpc = ibm_is_vpc.log_encryption_vpc.id 72 | total_ipv4_address_count = 256 73 | zone = "${var.region}-${var.zone}" 74 | tags = local.tags 75 | } 76 | 77 | # we use a gateway to allow the VSI to connect to the internet to logDNA 78 | # and docker. Without a gateway the VSI would need a floating IP. Without 79 | # either the VSI will not be able to connect to the internet despite 80 | # an outbound rule 81 | resource "ibm_is_public_gateway" "log_encryption_gateway" { 82 | name = format("%s-gateway", var.prefix) 83 | vpc = ibm_is_vpc.log_encryption_vpc.id 84 | zone = "${var.region}-${var.zone}" 85 | tags = local.tags 86 | } 87 | 88 | # attach the gateway to the subnet 89 | resource "ibm_is_subnet_public_gateway_attachment" "log_encryption_gateway_attachment" { 90 | subnet = ibm_is_subnet.log_encryption_subnet.id 91 | public_gateway = ibm_is_public_gateway.log_encryption_gateway.id 92 | } 93 | 94 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 95 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 96 | resource "hpcr_tgz" "contract" { 97 | folder = "compose" 98 | depends_on = [local_file.log_encryption_logging_public_key] 99 | } 100 | 101 | locals { 102 | # contract in clear text 103 | contract = yamlencode({ 104 | "env" : { 105 | "type" : "env", 106 | "logging" : { 107 | "logRouter" : { 108 | "hostname" : var.icl_hostname, 109 | "iamApiKey" : var.icl_iam_apikey, 110 | } 111 | } 112 | }, 113 | "workload" : { 114 | "type" : "workload", 115 | "compose" : { 116 | "archive" : hpcr_tgz.contract.rendered 117 | }, 118 | } 119 | }) 120 | } 121 | 122 | # create a random key pair, because for formal reasons we need to pass an SSH key into the VSI. It will not be used, that's why 123 | # it can be random 124 | resource "tls_private_key" "log_encryption_rsa_key" { 125 | algorithm = "RSA" 126 | rsa_bits = 4096 127 | } 128 | 129 | # we only need this because VPC expects this 130 | resource "ibm_is_ssh_key" "log_encryption_sshkey" { 131 | name = format("%s-key", var.prefix) 132 | public_key = tls_private_key.log_encryption_rsa_key.public_key_openssh 133 | tags = local.tags 134 | } 135 | 136 | # locate all public image 137 | data "ibm_is_images" "hyper_protect_images" { 138 | visibility = "public" 139 | status = "available" 140 | } 141 | 142 | # locate the latest hyper protect image 143 | data "hpcr_image" "hyper_protect_image" { 144 | images = jsonencode(data.ibm_is_images.hyper_protect_images.images) 145 | } 146 | 147 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 148 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 149 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 150 | resource "hpcr_contract_encrypted" "contract" { 151 | contract = local.contract 152 | } 153 | 154 | # construct the VSI 155 | resource "ibm_is_instance" "log_encryption_vsi" { 156 | name = format("%s-vsi", var.prefix) 157 | image = data.hpcr_image.hyper_protect_image.image 158 | profile = var.profile 159 | keys = [ibm_is_ssh_key.log_encryption_sshkey.id] 160 | vpc = ibm_is_vpc.log_encryption_vpc.id 161 | tags = local.tags 162 | zone = "${var.region}-${var.zone}" 163 | 164 | # the user data field carries the encrypted contract, so all information visible at the hypervisor layer is encrypted 165 | user_data = hpcr_contract_encrypted.contract.rendered 166 | 167 | primary_network_interface { 168 | name = "eth0" 169 | subnet = ibm_is_subnet.log_encryption_subnet.id 170 | security_groups = [ibm_is_security_group.log_encryption_security_group.id] 171 | } 172 | 173 | } 174 | -------------------------------------------------------------------------------- /terraform-hpvs/log-encryption/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | description = "Region to deploy to, e.g. eu-gb." 4 | } 5 | 6 | variable "zone" { 7 | type = string 8 | description = "Zone to deploy to, e.g. 2." 9 | } 10 | 11 | variable "icl_hostname" { 12 | type = string 13 | description = <<-DESC 14 | Host of IBM Cloud Logs. This can be 15 | obtained from cloud logs tab under Logging instances 16 | DESC 17 | } 18 | 19 | variable "icl_iam_apikey" { 20 | type = string 21 | sensitive = true 22 | description = <<-DESC 23 | This can be obtained from Access(IAM) under Manage 24 | DESC 25 | } 26 | 27 | variable "prefix" { 28 | type = string 29 | description = "Prefix for all generated resources. Make sure to have a custom image with that name." 30 | default = "log-encryption" 31 | } 32 | 33 | variable "profile" { 34 | type = string 35 | description = "Profile used for the VSI, this has to be a secure execution profile in the format Xz2e-YxZ, e.g. bz2e-1x4." 36 | default = "bz2e-1x4" 37 | } 38 | -------------------------------------------------------------------------------- /terraform-hpvs/mongodb/README.md: -------------------------------------------------------------------------------- 1 | ## MongoDB - Running s390x version of MongoDB 2 | 3 | This sample deploys 3 instances of [mongodb](https://hub.docker.com/r/s390x/mongo/) on [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se) across 3 avaiability zones of a given region 4 | 5 | ### Prerequisite 6 | 7 | Prepare your environment according to [these steps](https://github.com/ibm-hyper-protect/linuxone-vsi-automation-samples/blob/master/terraform-hpvs/README.md). Make sure to setup IBM Cloud Logs Instance. 8 | 9 | ### Settings 10 | 11 | Use one of the following methods to apply the settings: 12 | 13 | #### Method_A: Using a template file 14 | 15 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 16 | 2. Fill the values in `my-settings.auto.tfvars` 17 | 18 | #### Method_B: Setting the environment variables 19 | 20 | Set the following environment variables: 21 | 22 | ```text 23 | IC_API_KEY= 24 | TF_VAR_zone= 25 | TF_VAR_region= 26 | TF_VAR_icl_iam_apikey= 27 | TF_VAR_icl_hostname= 28 | TF_VAR_mongo_user= 29 | TF_VAR_mongo_password= 30 | ``` 31 | 32 | ### Run the Example 33 | 34 | - Initialize terraform: 35 | 36 | ```bash 37 | terraform init 38 | ``` 39 | 40 | - Deploy the mongodb container 41 | 42 | ```bash 43 | terraform apply 44 | ``` 45 | 46 | This will create 3 instances of Hyper Protect Virtual Server for VPC instances across 3 availability zones of a given region and prints the public IP address of each VSIs as an output to the console. You could use clients such as `mongosh` or `MongoDB Compass` to connect to the database. 47 | 48 | Before starting to use this setup as a cluster, login to any of the MongoDB instances and then setup the replica set. 49 | 50 | ```text 51 | mongodb://:27017 52 | 53 | Note: This establishes a session to MongoDB instance 54 | ``` 55 | Once inside the instance, execute below commands at `test>` prompt 56 | ``` 57 | test> rs.show 58 | 59 | test> rs.initiate({ _id: " rs.status() 66 | 67 | This command should give an array with 3 instances of MongoDB having 1 PRIMARY and 2 SECONDARY 68 | ``` 69 | Example: 70 | ``` 71 | test> rs.show 72 | 73 | test> rs.initiate({ _id: "replicaSet01", members: [ {_id: 0, host: "xx.xx.xx.xx:27017"}, {_id: 1, host: "yy.yy.yy.yy:27017"}, {_id: 2, host: "zz.zz.zz.zz.118:27017"} ] }) 74 | { 75 | ok: 1, 76 | '$clusterTime': { 77 | clusterTime: Timestamp({ t: 1663579880, i: 1 }), 78 | signature: { 79 | hash: Binary(Buffer.from("0000000000000000000000000000000000000000", "hex"), 0), 80 | keyId: Long("0") 81 | } 82 | }, 83 | operationTime: Timestamp({ t: 1663579880, i: 1 }) 84 | } 85 | 86 | test> rs.status() 87 | { 88 | set: 'replicaSet01', 89 | date: ISODate("2022-09-19T09:31:25.003Z"), 90 | myState: 2, 91 | term: Long("0"), 92 | ..... 93 | ...... 94 | members: [ 95 | { 96 | _id: 0, 97 | name: 'xx.xx.xx.xx:27017', 98 | health: 1, 99 | state: 2, 100 | stateStr: 'SECONDARY', 101 | ... 102 | ... 103 | }, 104 | { 105 | _id: 1, 106 | name: 'yy.yy.yy.yy:27017', 107 | health: 1, 108 | state: 1, 109 | stateStr: 'PRIMARY', 110 | ... 111 | ... 112 | }, 113 | { 114 | _id: 2, 115 | name: 'zz.zz.zz.zz:27017', 116 | health: 1, 117 | state: 2, 118 | stateStr: 'SECONDARY', 119 | }``` 120 | 121 | TEST: You could reboot the `PRIMARY` node and see that the cluster will have a new `PRIMARY` elected. 122 | 123 | After setting up the cluster, exit and then login to the cluster by passing-in `IP:port` of all the 3 instances as given below: 124 | 125 | ``` 126 | mongosh mongodb://xx.xx.xx.xx:27017,yy.yy.yy.yy:27017,zz.zz.zz.zz:27017 127 | ``` 128 | - Destroy the created resources: 129 | 130 | ```bash 131 | terraform destroy 132 | ``` 133 | 134 | **TODO**: Currently the auth is disabled while setting up replication. This repo will be updated once there is support for enabling authentication. 135 | -------------------------------------------------------------------------------- /terraform-hpvs/mongodb/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | mongo: 3 | image: docker.io/s390x/mongo@sha256:2fcffe9edbf6334c159f565e15d870dc7681cd9dc527cce3d59a457333925777 4 | ports: 5 | - "27017:27017" 6 | command: mongod --replSet ${MONGO_REPLICA_SET_NAME} --bind_ip_all 7 | # TODO : Enable when keyFile support is added 8 | environment: 9 | #- MONGO_INITDB_ROOT_USERNAME=${MONGO_INITDB_ROOT_USERNAME:-mongouser} 10 | #- MONGO_INITDB_ROOT_PASSWORD=${MONGO_INITDB_ROOT_PASSWORD:-mongouser} 11 | - MONGO_REPLICA_SET_NAME=${MONGO_REPLICA_SET_NAME:-replicaSet01} 12 | -------------------------------------------------------------------------------- /terraform-hpvs/mongodb/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | ibmcloud_api_key = "Your IBM Cloud API key" 2 | region="ca-tor" # Region to deploy to. Options include eu-gb, jp-tok, br-sao, ca-tor 3 | zone=1 # Zone within region to create the HPVS into. 4 | icl_hostname="Your ICL hostname" 5 | icl_iam_apikey="Your IBM Cloud API key" 6 | mongo_user="UserID that is used to login to MongoDB instance" 7 | mongo_password="Password that is used to login to MongoDB instance" 8 | # Any other variable you want to set (see variables.tf) 9 | -------------------------------------------------------------------------------- /terraform-hpvs/mongodb/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.4" 6 | } 7 | 8 | tls = { 9 | source = "hashicorp/tls" 10 | version = ">= 4.0.1" 11 | } 12 | 13 | ibm = { 14 | source = "IBM-Cloud/ibm" 15 | version = ">= 1.37.1" 16 | } 17 | } 18 | } 19 | 20 | # Make sure to target the correct region and zone 21 | provider "ibm" { 22 | region = var.region 23 | ibmcloud_api_key = var.ibmcloud_api_key 24 | } 25 | 26 | locals { 27 | # some reusable tags that identify the resources created 28 | tags = ["mongodb", var.prefix] 29 | } 30 | 31 | # The VPC 32 | resource "ibm_is_vpc" "mongodb_vpc" { 33 | name = format("%s-vpc", var.prefix) 34 | tags = local.tags 35 | } 36 | 37 | # The security group 38 | resource "ibm_is_security_group" "mongodb_security_group" { 39 | name = format("%s-security-group", var.prefix) 40 | vpc = ibm_is_vpc.mongodb_vpc.id 41 | tags = local.tags 42 | } 43 | 44 | # Rule that allows the VSI to make outbound connections, this is required 45 | # to connect to the logDNA instance as well as to docker to pull the image 46 | resource "ibm_is_security_group_rule" "mongodb_outbound" { 47 | group = ibm_is_security_group.mongodb_security_group.id 48 | direction = "outbound" 49 | remote = "0.0.0.0/0" 50 | } 51 | 52 | # Rule that allows inbound traffic to the mongo server 53 | resource "ibm_is_security_group_rule" "mongodb_inbound" { 54 | group = ibm_is_security_group.mongodb_security_group.id 55 | direction = "inbound" 56 | remote = "0.0.0.0/0" 57 | tcp { 58 | port_min = 27017 59 | port_max = 27017 60 | } 61 | } 62 | 63 | # The subnet 64 | resource "ibm_is_subnet" "mongodb_subnet" { 65 | for_each = toset(["1", "2", "3"]) 66 | name = format("%s-subnet-%s", var.prefix, each.key) 67 | vpc = ibm_is_vpc.mongodb_vpc.id 68 | total_ipv4_address_count = 256 69 | zone = "${var.region}-${each.key}" 70 | tags = local.tags 71 | } 72 | 73 | # Archive of the folder containing docker-compose file. This folder could create 74 | # additional resources such as files to be mounted into containers, environment 75 | # files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 76 | resource "hpcr_tgz" "contract" { 77 | folder = "compose" 78 | } 79 | 80 | locals { 81 | # contract in clear text 82 | contract = yamlencode({ 83 | "env" : { 84 | "type" : "env", 85 | "logging" : { 86 | "logRouter" : { 87 | "hostname" : var.icl_hostname, 88 | "iamApiKey" : var.icl_iam_apikey, 89 | } 90 | }, 91 | "env" : { 92 | "MONGO_INITDB_ROOT_USERNAME" : var.mongo_user, 93 | "MONGO_INITDB_ROOT_PASSWORD" : var.mongo_password, 94 | "MONGO_REPLICA_SET_NAME" : var.mongo_replica_set_name, 95 | } 96 | }, 97 | "workload" : { 98 | "type" : "workload", 99 | "compose" : { 100 | "archive" : hpcr_tgz.contract.rendered 101 | } 102 | } 103 | }) 104 | } 105 | 106 | # Create a random key pair. For formal reasons, we need to pass an SSH key 107 | # into the VSI. It will not be used, that's why it can be random 108 | resource "tls_private_key" "mongodb_rsa_key" { 109 | algorithm = "RSA" 110 | rsa_bits = 4096 111 | } 112 | 113 | # We only need this because VPC expects this 114 | resource "ibm_is_ssh_key" "mongodb_sshkey" { 115 | name = format("%s-key", var.prefix) 116 | public_key = tls_private_key.mongodb_rsa_key.public_key_openssh 117 | tags = local.tags 118 | } 119 | 120 | # locate all public image 121 | data "ibm_is_images" "hyper_protect_images" { 122 | visibility = "public" 123 | status = "available" 124 | } 125 | 126 | # locate the latest hyper protect image 127 | data "hpcr_image" "hyper_protect_image" { 128 | images = jsonencode(data.ibm_is_images.hyper_protect_images.images) 129 | } 130 | 131 | # In this step, we encrypt the fields of the contract and sign the env and workload field. 132 | # The certificate to execute the encryption is built into the provider and matches the 133 | # latest hyper protect image. If required, it can be overridden. 134 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 135 | resource "hpcr_contract_encrypted" "contract" { 136 | contract = local.contract 137 | } 138 | 139 | # Construct the VSI 140 | resource "ibm_is_instance" "mongodb_vsi" { 141 | for_each = toset(["1", "2", "3"]) 142 | name = format("%s-vsi-%s", var.prefix, each.key) 143 | image = data.hpcr_image.hyper_protect_image.image 144 | profile = var.profile 145 | keys = [ibm_is_ssh_key.mongodb_sshkey.id] 146 | vpc = ibm_is_vpc.mongodb_vpc.id 147 | tags = local.tags 148 | zone = "${var.region}-${each.key}" 149 | 150 | # user data field carries the encrypted contract, so all information visible at 151 | # the hypervisor layer is encrypted 152 | user_data = hpcr_contract_encrypted.contract.rendered 153 | 154 | primary_network_interface { 155 | name = "eth0" 156 | subnet = ibm_is_subnet.mongodb_subnet[each.key].id 157 | security_groups = [ibm_is_security_group.mongodb_security_group.id] 158 | } 159 | } 160 | 161 | # Attach a floating IP since VSI needs to push logs to logDNA server 162 | resource "ibm_is_floating_ip" "mongodb_floating_ip" { 163 | for_each = toset(["1", "2", "3"]) 164 | name = format("%s-floating-ip-%s", var.prefix, each.key) 165 | target = ibm_is_instance.mongodb_vsi[each.key].primary_network_interface[0].id 166 | tags = local.tags 167 | } 168 | 169 | # Log the floating IP for convenience 170 | output "ip_vsi_1" { 171 | value = resource.ibm_is_floating_ip.mongodb_floating_ip[1].address 172 | description = "The public IP address of the VSI_1" 173 | } 174 | 175 | # Log the floating IP for convenience 176 | output "ip_vsi_2" { 177 | value = resource.ibm_is_floating_ip.mongodb_floating_ip[2].address 178 | description = "The public IP address of the VSI_2" 179 | } 180 | 181 | # Log the floating IP for convenience 182 | output "ip_vsi_3" { 183 | value = resource.ibm_is_floating_ip.mongodb_floating_ip[3].address 184 | description = "The public IP address of the VSI_3" 185 | } 186 | -------------------------------------------------------------------------------- /terraform-hpvs/mongodb/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = <<-DESC 3 | Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: 4 | https://cloud.ibm.com/iam#/apikeys 5 | DESC 6 | } 7 | 8 | variable "region" { 9 | type = string 10 | description = "Region to deploy to, e.g. eu-gb" 11 | 12 | validation { 13 | condition = (var.region == "eu-gb" || 14 | var.region == "br-sao" || 15 | var.region == "ca-tor" || 16 | var.region == "jp-tok") 17 | error_message = "Value of region must be one of eu-gb/br-sao/ca-tor/jp-tok." 18 | } 19 | } 20 | 21 | variable "icl_hostname" { 22 | type = string 23 | description = <<-DESC 24 | Host of IBM Cloud Logs. This can be 25 | obtained from cloud logs tab under Logging instances 26 | DESC 27 | } 28 | 29 | variable "icl_iam_apikey" { 30 | type = string 31 | sensitive = true 32 | description = <<-DESC 33 | This can be obtained from Access(IAM) under Manage 34 | DESC 35 | } 36 | 37 | variable "prefix" { 38 | type = string 39 | default = "s390x-mongodb" 40 | description = "Prefix to be attached to name of all generated resources" 41 | } 42 | 43 | variable "profile" { 44 | type = string 45 | default = "bz2e-1x4" 46 | description = <<-DESC 47 | Profile used for the VSI. This has to be a secure execution 48 | profile in the format Xz2e-YxZ, e.g. bz2e-1x4 49 | DESC 50 | } 51 | 52 | variable "mongo_user" { 53 | type = string 54 | default = "mongouser" 55 | description = "UserID that is used to login to MongoDB instance" 56 | } 57 | 58 | variable "mongo_password" { 59 | type = string 60 | default = "mongouser" 61 | description = "Password that is used to login to MongoDB instance" 62 | } 63 | 64 | variable "mongo_replica_set_name" { 65 | type = string 66 | default = "replicaSet01" 67 | description = "Replica set name that is used to configure MongoDB" 68 | } 69 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/README.md: -------------------------------------------------------------------------------- 1 | ## Hello-World Sample 2 | 3 | This sample deploys the [nginx-hello](https://hub.docker.com/r/nginxdemos/hello/) example as a [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se) or IBM Secure Execution Virtual Machine for an on-premise libvirt host on z15 or z16. 4 | 5 | ### Prerequisite 6 | 7 | Prepare your environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 8 | 9 | ### Prerequisite (on-premise only) 10 | 11 | Please install the following package. 12 | - Mac: [cdrtools](https://formulae.brew.sh/formula/cdrtools) 13 | - Linux: genisoimage 14 | - Windows: [cdrtfe](https://cdrtfe.sourceforge.io/) 15 | 16 | Please copy your ssh public key (the content of `~/.ssh/id_rsa.pub` typically) to a file `authorized_keys` in your on-premise libvirt host because [terraform connects `libvirtd` in your host through the ssh protocol](https://wiki.libvirt.org/page/SSHSetup). 17 | 18 | ### Settings 19 | 20 | Use one of the following options to set you settings under the `cloud` or `onprem` directory: 21 | 22 | #### Template file 23 | 24 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 25 | 2. Fill the values in `my-settings.auto.tfvars` 26 | 27 | #### Environment variables 28 | 29 | Set the following environment variables: 30 | 31 | Under the `cloud` directory 32 | ```text 33 | IC_API_KEY= 34 | TF_VAR_zone= 35 | TF_VAR_region= 36 | TF_VAR_icl_iam_apikey= 37 | TF_VAR_icl_hostname= 38 | ``` 39 | 40 | Under the `onprem` directory 41 | ```text 42 | TF_VAR_libvirt_host= 43 | TF_VAR_libvirt_user= 44 | TF_VAR_vsi_image= 45 | TF_VAR_ssh_private_key_path= 46 | TF_VAR_icl_iam_apikey= 47 | TF_VAR_icl_hostname= 48 | ``` 49 | 50 | ### Run the Example 51 | 52 | #### Deploy the example on IBM Cloud 53 | 54 | Go to the `cloud` directory: 55 | 56 | ```bash 57 | cd cloud 58 | ``` 59 | 60 | Initialize terraform: 61 | 62 | ```bash 63 | terraform init 64 | ``` 65 | 66 | Deploy the example: 67 | 68 | ```bash 69 | terraform apply 70 | ``` 71 | 72 | This will create a sample virtual server instance and prints the public IP address of your VSI as an output to the console. 73 | 74 | #### Register a libvirt hook for port forwarding to your on-premise host 75 | 76 | If you want to access the example (nginx) though a port forwarding, please copy the content of `onprem/port-forward.sh` to `/etc/libvirt/hooks/qemu` in your host, and then run the following command in your host as a root: 77 | 78 | ```bash 79 | systemctl restart libvirtd.service 80 | ``` 81 | 82 | #### Deploy the example on your on-premise host 83 | 84 | Go to the `onprem` directory: 85 | 86 | ```bash 87 | cd onprem 88 | ``` 89 | 90 | Register your host as a known host 91 | 92 | ```bash 93 | ./add_known_host.sh 94 | ``` 95 | 96 | Initialize terraform: 97 | 98 | ```bash 99 | terraform init 100 | ``` 101 | 102 | Deploy the example: 103 | 104 | ```bash 105 | terraform apply 106 | ``` 107 | 108 | This will create a secure-execution virtual machine on your on-premise host. If you registered a libvirt hook for port forwarding to your host, you can use the IP address of the host to access the example. 109 | 110 | Note: If the `terraform apply` is failing with the following error. Add `` to the `domain_update.xsl` file. The [domain_update.xsl](https://github.com/ibm-hyper-protect/linuxone-vsi-automation-samples/blob/master/terraform-hpvs/nginx-hello/onprem/domain_update.xsl) has been updated with this change. Refer the file for more details. 111 | 112 | ``` 113 | Error: error defining libvirt domain: unsupported configuration: machine type 's390-ccw-virtio-noble' does not support ACPI 114 | ``` 115 | 116 | #### Test if the example works 117 | 118 | Use your browser to access: 119 | 120 | ```text 121 | http:// 122 | ``` 123 | 124 | This will show a sceen like this: 125 | 126 | ![nginx](images/nginx.jpg) 127 | 128 | Destroy the created resources: 129 | 130 | ```bash 131 | terraform destroy 132 | ``` 133 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/cloud/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | ibmcloud_api_key="Your IBM Cloud API key" 2 | region="ca-tor" # Region to deploy to. Options include eu-gb, jp-tok, br-sao, ca-tor, us-east 3 | zone="2" # Zone within region to create the HPVS into. 4 | icl_hostname="Your ICL hostname" 5 | icl_iam_apikey="Your IBM Cloud API key" 6 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/cloud/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | tls = { 4 | source = "hashicorp/tls" 5 | version = ">= 4.0.1" 6 | } 7 | 8 | ibm = { 9 | source = "IBM-Cloud/ibm" 10 | version = ">= 1.37.1" 11 | } 12 | hpcr = { 13 | source = "ibm-hyper-protect/hpcr" 14 | version = ">= 0.1.4" 15 | } 16 | } 17 | } 18 | 19 | module "user_data" { 20 | source = "../user_data" 21 | icl_hostname = var.icl_hostname 22 | icl_iam_apikey = var.icl_iam_apikey 23 | } 24 | 25 | # make sure to target the correct region and zone 26 | provider "ibm" { 27 | region = var.region 28 | zone = "${var.region}-${var.zone}" 29 | } 30 | 31 | locals { 32 | # some reusable tags that identify the resources created by his sample 33 | tags = ["hpcr", "sample", var.prefix] 34 | } 35 | 36 | # the VPC 37 | resource "ibm_is_vpc" "hello_world_vpc" { 38 | name = format("%s-vpc", var.prefix) 39 | tags = local.tags 40 | } 41 | 42 | # the security group 43 | resource "ibm_is_security_group" "hello_world_security_group" { 44 | name = format("%s-security-group", var.prefix) 45 | vpc = ibm_is_vpc.hello_world_vpc.id 46 | tags = local.tags 47 | } 48 | 49 | # rule that allows the VSI to make outbound connections, this is required 50 | # to connect to the logDNA instance as well as to docker to pull the image 51 | resource "ibm_is_security_group_rule" "hello_world_outbound" { 52 | group = ibm_is_security_group.hello_world_security_group.id 53 | direction = "outbound" 54 | remote = "0.0.0.0/0" 55 | } 56 | 57 | # rule that allows inbound traffic to the nginx server 58 | resource "ibm_is_security_group_rule" "hello_world_inbound" { 59 | group = ibm_is_security_group.hello_world_security_group.id 60 | direction = "inbound" 61 | remote = "0.0.0.0/0" 62 | tcp { 63 | port_min = 80 64 | port_max = 80 65 | } 66 | } 67 | 68 | # the subnet 69 | resource "ibm_is_subnet" "hello_world_subnet" { 70 | name = format("%s-subnet", var.prefix) 71 | vpc = ibm_is_vpc.hello_world_vpc.id 72 | total_ipv4_address_count = 256 73 | zone = "${var.region}-${var.zone}" 74 | tags = local.tags 75 | } 76 | 77 | # create a random key pair, because for formal reasons we need to pass an SSH key into the VSI. It will not be used, that's why 78 | # it can be random 79 | resource "tls_private_key" "hello_world_rsa_key" { 80 | algorithm = "RSA" 81 | rsa_bits = 4096 82 | } 83 | 84 | # we only need this because VPC expects this 85 | resource "ibm_is_ssh_key" "hello_world_sshkey" { 86 | name = format("%s-key", var.prefix) 87 | public_key = tls_private_key.hello_world_rsa_key.public_key_openssh 88 | tags = local.tags 89 | } 90 | 91 | # locate all public image 92 | data "ibm_is_images" "hyper_protect_images" { 93 | visibility = "public" 94 | status = "available" 95 | } 96 | 97 | # locate the latest hyper protect image 98 | data "hpcr_image" "hyper_protect_image" { 99 | images = jsonencode(data.ibm_is_images.hyper_protect_images.images) 100 | } 101 | 102 | # construct the VSI 103 | resource "ibm_is_instance" "hello_world_vsi" { 104 | name = format("%s-vsi", var.prefix) 105 | image = data.hpcr_image.hyper_protect_image.image 106 | profile = var.profile 107 | keys = [ibm_is_ssh_key.hello_world_sshkey.id] 108 | vpc = ibm_is_vpc.hello_world_vpc.id 109 | tags = local.tags 110 | zone = "${var.region}-${var.zone}" 111 | 112 | # the user data field carries the encrypted contract, so all information visible at the hypervisor layer is encrypted 113 | user_data = module.user_data.user_data 114 | 115 | primary_network_interface { 116 | name = "eth0" 117 | subnet = ibm_is_subnet.hello_world_subnet.id 118 | security_groups = [ibm_is_security_group.hello_world_security_group.id] 119 | } 120 | 121 | } 122 | 123 | # attach a floating IP since we would like to access the embedded server via the internet 124 | resource "ibm_is_floating_ip" "hello_world_floating_ip" { 125 | name = format("%s-floating-ip", var.prefix) 126 | target = ibm_is_instance.hello_world_vsi.primary_network_interface[0].id 127 | tags = local.tags 128 | } 129 | 130 | # log the floating IP for convenience 131 | output "ip" { 132 | value = resource.ibm_is_floating_ip.hello_world_floating_ip.address 133 | description = "The public IP address of the VSI" 134 | } 135 | 136 | # output the contract as a plain text (debugging purpose) 137 | resource "local_file" "user_data_plain" { 138 | content = module.user_data.user_data_plan 139 | filename = "user-data-plain" 140 | } 141 | 142 | # output the contract (encrypted) 143 | resource "local_file" "user_data" { 144 | content = module.user_data.user_data 145 | filename = "user-data" 146 | } -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/cloud/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | description = "Region to deploy to, e.g. eu-gb" 4 | 5 | validation { 6 | condition = ( var.region == "eu-gb" || 7 | var.region == "br-sao" || 8 | var.region == "ca-tor" || 9 | var.region == "jp-tok" || 10 | var.region == "us-east" ) 11 | error_message = "Value of region must be one of eu-gb/br-sao/ca-tor/jp-tok/us-east." 12 | } 13 | } 14 | 15 | variable "zone" { 16 | type = string 17 | default = "2" 18 | description = "Zone to deploy to, e.g. 2." 19 | 20 | validation { 21 | condition = ( var.zone == "1" || 22 | var.zone == "2" || 23 | var.zone == "3") 24 | error_message = "Value of zone must be one of 1/2/3." 25 | } 26 | } 27 | 28 | variable "prefix" { 29 | type = string 30 | default = "hpcr-sample-nginx-hello" 31 | description = "Prefix to be attached to name of all generated resources" 32 | } 33 | 34 | variable "profile" { 35 | type = string 36 | default = "bz2e-1x4" 37 | description = <<-DESC 38 | Profile used for the VSI. This has to be a secure execution 39 | profile in the format Xz2e-YxZ, e.g. bz2e-1x4 40 | DESC 41 | } 42 | 43 | variable "icl_hostname" { 44 | type = string 45 | description = <<-DESC 46 | Host of IBM Cloud Logs. This can be 47 | obtained from cloud logs tab under Logging instances 48 | DESC 49 | } 50 | 51 | variable "icl_iam_apikey" { 52 | type = string 53 | sensitive = true 54 | description = <<-DESC 55 | This can be obtained from Access(IAM) under Manage 56 | DESC 57 | } -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/images/nginx.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-hyper-protect/linuxone-vsi-automation-samples/fe5c0518b8b335894eb642b196e558c05a8e8a7a/terraform-hpvs/nginx-hello/images/nginx.jpg -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/onprem/add_known_host.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | LIBVIRT_HOST=${1} 4 | 5 | echo Adding ${LIBVIRT_HOST} to known_hosts 6 | ssh-keyscan -H ${LIBVIRT_HOST} >> ~/.ssh/known_hosts 2> /dev/null 7 | echo "" 8 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/onprem/domain_update.xsl: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | on 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/onprem/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | libvirt_host="Your libvirt host name" 2 | libvirt_user="User name authorized by a SSH server in your libvirt host" 3 | vsi_image="Path to your VSI image" 4 | icl_hostname="Your ICL hostname" 5 | icl_iam_apikey="Your IBM Cloud API key" 6 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/onprem/port-forward.sh: -------------------------------------------------------------------------------- 1 | if [ "${1}" = "hpcr-sample-nginx-hello-domain" ]; then 2 | 3 | GUEST_IP=192.168.122.170 4 | GUEST_PORT=80 5 | HOST_PORT=80 6 | 7 | echo qemu-hook 8 | if [ "${2}" = "stopped" ] || [ "${2}" = "reconnect" ]; then 9 | /sbin/iptables -D LIBVIRT_INP -m state --state NEW -m tcp -p tcp --dport ${HOST_PORT} -j ACCEPT 10 | 11 | /sbin/iptables -D FORWARD -o virbr0 -p tcp -d $GUEST_IP --dport $GUEST_PORT -j ACCEPT 12 | /sbin/iptables -D FORWARD -p tcp -d $GUEST_IP -j LOG --log-level debug --log-prefix FORWARD 13 | 14 | /sbin/iptables -D LIBVIRT_FWI -o virbr0 -p tcp -d $GUEST_IP --dport $GUEST_PORT -j ACCEPT 15 | /sbin/iptables -D LIBVIRT_FWI -o virbr0 -d 192.168.122.0/24 -m state --state NEW -j ACCEPT 16 | 17 | /sbin/iptables -t nat -D PREROUTING -p tcp --dport $HOST_PORT -j DNAT --to $GUEST_IP:$GUEST_PORT 18 | /sbin/iptables -t nat -D PREROUTING -p tcp --dport $HOST_PORT -j LOG --log-level debug --log-prefix PREROUTING 19 | 20 | /sbin/iptables -t nat -D LIBVIRT_PRT -p tcp --dport $HOST_PORT -j MASQUERADE -s $GUEST_IP -d $GUEST_IP 21 | /sbin/iptables -t nat -D LIBVIRT_PRT -p tcp --dport $HOST_PORT -s $GUEST_IP -d $GUEST_IP -j LOG --log-level debug --log-prefix LIBVIRT_PRT 22 | fi 23 | if [ "${2}" = "start" ] || [ "${2}" = "reconnect" ]; then 24 | 25 | /sbin/iptables -I LIBVIRT_INP -m state --state NEW -m tcp -p tcp --dport ${HOST_PORT} -j ACCEPT 26 | 27 | /sbin/iptables -I FORWARD -o virbr0 -p tcp -d $GUEST_IP --dport $GUEST_PORT -j ACCEPT 28 | /sbin/iptables -I FORWARD -p tcp -d $GUEST_IP -j LOG --log-level debug --log-prefix FORWARD 29 | 30 | /sbin/iptables -t nat -I PREROUTING -p tcp --dport $HOST_PORT -j DNAT --to $GUEST_IP:$GUEST_PORT 31 | /sbin/iptables -t nat -I PREROUTING -p tcp --dport $HOST_PORT -j LOG --log-level debug --log-prefix PREROUTING 32 | fi 33 | fi -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/onprem/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | libvirt = { 4 | source = "dmacvicar/libvirt" 5 | } 6 | } 7 | } 8 | 9 | provider "libvirt" { 10 | uri = format("qemu+ssh://%s@%s/system?keyfile=%s&sshauth=privkey", var.libvirt_user, var.libvirt_host, urlencode(pathexpand(var.ssh_private_key_path))) 11 | } 12 | 13 | module "user_data" { 14 | source = "../user_data" 15 | icl_hostname = var.icl_hostname 16 | icl_iam_apikey = var.icl_iam_apikey 17 | } 18 | 19 | # output the contract as a plain text (debugging purpose) 20 | resource "local_file" "user_data_plain" { 21 | content = module.user_data.user_data_plan 22 | filename = "../build/cloud-init/user-data-plain" 23 | } 24 | 25 | resource "local_file" "meta_data" { 26 | content = yamlencode({ 27 | "local-hostname": var.hostname 28 | }) 29 | filename = "../build/cloud-init/meta-data" 30 | } 31 | 32 | resource "local_file" "vendor_data" { 33 | content = < 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 0 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/user_data/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | helloworld: 3 | image: docker.io/nginxdemos/hello@sha256:dedfbe85183df66f3fdc99accf53e1b2171908dffd4d6556603ba4810b1fce6e 4 | ports: 5 | - "80:80" 6 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/user_data/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | icl_hostname="Your ICL hostname" 2 | icl_iam_apikey="Your IBM Cloud API key" 3 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/user_data/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.4" 6 | } 7 | } 8 | } 9 | 10 | # archive of the folder containing docker-compose file. This folder could create additional resources such as files 11 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 12 | resource "hpcr_tgz" "contract" { 13 | folder = "${path.module}/compose" 14 | } 15 | 16 | locals { 17 | # contract in clear text 18 | contract = yamlencode({ 19 | "env" : { 20 | "type" : "env", 21 | "logging" : { 22 | "logRouter" : { 23 | "hostname" : var.icl_hostname, 24 | "iamApiKey" : var.icl_iam_apikey, 25 | } 26 | } 27 | }, 28 | "workload" : { 29 | "type" : "workload", 30 | "compose" : { 31 | "archive" : hpcr_tgz.contract.rendered 32 | } 33 | } 34 | }) 35 | } 36 | 37 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 38 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 39 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 40 | resource "hpcr_contract_encrypted" "contract" { 41 | contract = local.contract 42 | } 43 | 44 | output "user_data" { 45 | value = resource.hpcr_contract_encrypted.contract.rendered 46 | } 47 | 48 | output "user_data_plan" { 49 | value = local.contract 50 | } 51 | -------------------------------------------------------------------------------- /terraform-hpvs/nginx-hello/user_data/variables.tf: -------------------------------------------------------------------------------- 1 | variable "icl_hostname" { 2 | type = string 3 | description = <<-DESC 4 | Host of IBM Cloud Logs. This can be 5 | obtained from cloud logs tab under Logging instances 6 | DESC 7 | } 8 | 9 | variable "icl_iam_apikey" { 10 | type = string 11 | sensitive = true 12 | description = <<-DESC 13 | This can be obtained from Access(IAM) under Manage 14 | DESC 15 | } 16 | -------------------------------------------------------------------------------- /terraform-hpvs/postgresql-cluster/README.md: -------------------------------------------------------------------------------- 1 | ## Postgres - Running s390x version of Postgres 2 | 3 | This deploys a simple [postgres](https://hub.docker.com/r/s390x/postgres/) cluster on [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se). This will create a master instance and two slave instances. The postgres cluster only implements the function of master-slave backup. Slave instance backed up master instance. 4 | 5 | ### Prerequisite 6 | 7 | Prepare your environment according to [these steps](https://github.com/ibm-hyper-protect/linuxone-vsi-automation-samples/blob/master/terraform-hpvs/README.md). Make sure to setup IBM Cloud Logs Instance. 8 | 9 | ### Settings 10 | 11 | Set the following environment variables: 12 | 13 | ```text 14 | export IBMCLOUD_API_ENDPOINT=https://test.cloud.ibm.com 15 | export IBMCLOUD_IAM_API_ENDPOINT=https://iam.test.cloud.ibm.com 16 | export IBMCLOUD_IS_NG_API_ENDPOINT=https://us-south-*.cloud.ibm.com/v1 #set the value 17 | export IBMCLOUD_IS_API_ENDPOINT=https://us-south-*.cloud.ibm.com #set the value 18 | ``` 19 | 20 | Set the default vaule of variables.tf: 21 | 22 | ```text 23 | "ibmcloud_api_key" 24 | "region" 25 | "zone_master" 26 | "zone_slave_1" 27 | "zone_slave_2" 28 | "icl_iam_apikey" 29 | "icl_hostname" 30 | "prefix" 31 | "profile" 32 | "ssh_public_key" 33 | ``` 34 | 35 | Create a new folder `compose_slave`: 36 | 37 | ```text 38 | mkdir compose_slave 39 | ``` 40 | 41 | ### Run the Example 42 | 43 | - Initialize terraform: 44 | 45 | ```bash 46 | terraform init 47 | ``` 48 | 49 | - Deploy the postgres container 50 | 51 | ```bash 52 | terraform apply 53 | ``` 54 | 55 | This will create a master instance and two slave instances of Hyper Protect Virtual Server for VPC instances in given regions and prints the public IP address of the VSI as an output to the console. You could use clients `psql` to connect to the database. 56 | 57 | ```bash 58 | psql -h ${vsi_ip} -p 5432 -U postgres 59 | ``` 60 | 61 | - Destroy the created resources: 62 | 63 | ```bash 64 | terraform destroy 65 | ``` 66 | -------------------------------------------------------------------------------- /terraform-hpvs/postgresql-cluster/compose_master/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | postgresql: 3 | image: docker.io/library/postgres:12@sha256:d5433064852277f3187a591e02780d377c253f69bfbe3ca66c4cf3d58be83996 4 | ports: 5 | - "5432:5432" 6 | restart: always 7 | environment: 8 | - POSTGRES_HOST_AUTH_METHOD=trust 9 | command: 10 | - /bin/bash 11 | - -c 12 | - | 13 | docker-entrypoint.sh postgres & 14 | sleep 2 15 | su postgres -c "psql -c \"CREATE ROLE replica WITH REPLICATION PASSWORD 'testpassword' LOGIN;\"" 16 | su postgres -c "psql -c 'create database database_for_demo;'" 17 | su postgres -c "psql -c '\c database_for_demo;' -c 'CREATE TABLE TABLE_FOR_DEMO(ID INT PRIMARY KEY NOT NULL, NUMBER INT NOT NULL);'" 18 | su postgres -c "psql -c '\c database_for_demo;' -c 'INSERT INTO TABLE_FOR_DEMO (ID,NUMBER) VALUES (1, 1234567);'" 19 | su postgres -c "psql -c '\c database_for_demo;' -c 'INSERT INTO TABLE_FOR_DEMO (ID,NUMBER) VALUES (2, 7654321);'" 20 | sed -i '$$a host replication replica all trust' /var/lib/postgresql/data/pg_hba.conf 21 | kill %1 22 | wait 23 | docker-entrypoint.sh postgres -------------------------------------------------------------------------------- /terraform-hpvs/postgresql-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = "Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: https://cloud.ibm.com/iam#/apikeys" 3 | default = "*********" 4 | } 5 | 6 | variable "region" { 7 | type = string 8 | description = "Region to deploy to, e.g. eu-gb." 9 | default = "us-south" 10 | } 11 | 12 | variable "zone_master" { 13 | type = string 14 | description = "Zone to deploy master to, e.g. 1." 15 | default = "1" 16 | } 17 | 18 | variable "zone_slave_1" { 19 | type = string 20 | description = "Zone to deploy slave_1 to, e.g. 2." 21 | default = "2" 22 | } 23 | 24 | variable "zone_slave_2" { 25 | type = string 26 | description = "Zone to deploy slave_2 to, e.g. 3." 27 | default = "3" 28 | } 29 | 30 | variable "icl_hostname" { 31 | type = string 32 | description = <<-DESC 33 | Host of IBM Cloud Logs. This can be 34 | obtained from cloud logs tab under Logging instances 35 | DESC 36 | } 37 | 38 | variable "icl_iam_apikey" { 39 | type = string 40 | sensitive = true 41 | description = <<-DESC 42 | This can be obtained from Access(IAM) under Manage 43 | DESC 44 | } 45 | 46 | variable "prefix" { 47 | type = string 48 | description = "Prefix for all generated resources. Make sure to have a custom image with that name." 49 | default = "zvsi-sample-postgres" 50 | } 51 | 52 | variable "profile" { 53 | type = string 54 | description = "Profile used for the VSI, this is a profile for normal zlinux VSI , the profile in the format Xz2-YxZ, e.g. bz2-1x4." 55 | default = "bz2e-1x4" 56 | } 57 | 58 | variable "ssh_public_key" { 59 | default = "~/.ssh/id_rsa.pub" 60 | } 61 | -------------------------------------------------------------------------------- /terraform-hpvs/postgresql/README.md: -------------------------------------------------------------------------------- 1 | ## Postgres - Running s390x version of Postgres 2 | ​ 3 | This sample deploys one instance of [postgres](https://hub.docker.com/r/s390x/postgres/) on [IBM Cloud Hyper Protect Virtual Server for IBM Cloud VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-about-se) in a given region. 4 | ​ 5 | ### Prerequisite 6 | ​ 7 | Prepare your environment according to [these steps](https://github.com/ibm-hyper-protect/linuxone-vsi-automation-samples/blob/master/terraform-hpvs/README.md). Make sure to setup IBM Cloud Logs Instance. 8 | ​ 9 | ### Settings 10 | ​ 11 | Set the default vaule of variables.tf: 12 | ​ 13 | ```text 14 | "ibmcloud_api_key" 15 | "region" 16 | "zone" 17 | "icl_iam_apikey" 18 | "icl_hostname" 19 | "prefix" 20 | "profile" 21 | "ssh_public_key" 22 | ``` 23 | ​ 24 | ### Run the Example 25 | ​ 26 | - Initialize terraform: 27 | ​ 28 | ```bash 29 | terraform init 30 | ``` 31 | ​ 32 | - Deploy the postgres container 33 | ​ 34 | ```bash 35 | terraform apply 36 | ``` 37 | ​ 38 | This will create one instances of Hyper Protect Virtual Server for VPC instances in a given region and prints the public IP address of the VSI as an output to the console. You could use clients `psql` to connect to the database. 39 | ​ 40 | ```bash 41 | psql -h ${vsi_ip} -p 5432 -U postgres 42 | ``` 43 | ​ 44 | - Destroy the created resources: 45 | ​ 46 | ```bash 47 | terraform destroy 48 | ``` 49 | -------------------------------------------------------------------------------- /terraform-hpvs/postgresql/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | postgresql: 3 | image: docker.io/library/postgres:12@sha256:429c57f9ae8f379601df6df7448b0c2c8df4da23d7ba2cb57fee8da8f262eda0 4 | ports: 5 | - "5432:5432" 6 | environment: 7 | - POSTGRES_HOST_AUTH_METHOD=trust -------------------------------------------------------------------------------- /terraform-hpvs/postgresql/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | ibm = { 4 | source = "IBM-Cloud/ibm" 5 | version = ">= 1.37.1" 6 | } 7 | hpcr = { 8 | source = "ibm-hyper-protect/hpcr" 9 | version = ">= 0.1.4" 10 | } 11 | } 12 | } 13 | # make sure to target the correct region and zone 14 | provider "ibm" { 15 | ibmcloud_api_key = var.ibmcloud_api_key 16 | region = var.region 17 | zone = "${var.region}-${var.zone}" 18 | } 19 | locals { 20 | # some reusable tags that identify the resources created by his sample 21 | tags = ["zvsi", "sample", var.prefix] 22 | } 23 | # the VPC 24 | resource "ibm_is_vpc" "postgres_vpc" { 25 | name = format("%s-vpc", var.prefix) 26 | tags = local.tags 27 | } 28 | # the security group 29 | resource "ibm_is_security_group" "postgres_security_group" { 30 | name = format("%s-security-group", var.prefix) 31 | vpc = ibm_is_vpc.postgres_vpc.id 32 | tags = local.tags 33 | } 34 | resource "ibm_is_security_group_rule" "postgres_outbound" { 35 | group = ibm_is_security_group.postgres_security_group.id 36 | direction = "outbound" 37 | remote = "0.0.0.0/0" 38 | } 39 | # Rule that allows inbound traffic to the postgres server 40 | # to connect to the logDNA instance as well as to docker to pull the image 41 | resource "ibm_is_security_group_rule" "postgres_inbound" { 42 | group = ibm_is_security_group.postgres_security_group.id 43 | direction = "inbound" 44 | remote = "0.0.0.0/0" 45 | tcp { 46 | port_min = 5432 47 | port_max = 5432 48 | } 49 | } 50 | # the subnet 51 | resource "ibm_is_subnet" "postgres_subnet" { 52 | name = format("%s-subnet", var.prefix) 53 | vpc = ibm_is_vpc.postgres_vpc.id 54 | total_ipv4_address_count = 256 55 | zone = "${var.region}-${var.zone}" 56 | tags = local.tags 57 | } 58 | resource "hpcr_tgz" "contract" { 59 | folder = "compose" 60 | } 61 | locals { 62 | # contract in clear text 63 | contract = yamlencode({ 64 | "env" : { 65 | "type" : "env", 66 | "logging" : { 67 | "logRouter" : { 68 | "hostname" : var.icl_hostname, 69 | "iamApiKey" : var.icl_iam_apikey, 70 | } 71 | }, 72 | }, 73 | "workload" : { 74 | "type" : "workload", 75 | "compose" : { 76 | "archive" : hpcr_tgz.contract.rendered 77 | } 78 | } 79 | }) 80 | } 81 | resource "ibm_is_ssh_key" "postgres_sshkey" { 82 | name = "key-terr" 83 | public_key = file(var.ssh_public_key) 84 | tags = local.tags 85 | } 86 | # locate all public image 87 | data "ibm_is_images" "hyper_protect_images" { 88 | visibility = "public" 89 | status = "available" 90 | } 91 | 92 | # locate the latest hyper protect image 93 | data "hpcr_image" "hyper_protect_image" { 94 | images = jsonencode(data.ibm_is_images.hyper_protect_images.images) 95 | } 96 | resource "hpcr_contract_encrypted" "contract" { 97 | contract = local.contract 98 | } 99 | # construct the VSI 100 | resource "ibm_is_instance" "postgres_vsi" { 101 | name = format("%s-vsi", var.prefix) 102 | image = data.hpcr_image.hyper_protect_image.image 103 | profile = var.profile 104 | keys = ["${ibm_is_ssh_key.postgres_sshkey.id}"] 105 | vpc = ibm_is_vpc.postgres_vpc.id 106 | tags = local.tags 107 | zone = "${var.region}-${var.zone}" 108 | user_data = hpcr_contract_encrypted.contract.rendered 109 | primary_network_interface { 110 | name = "eth0" 111 | subnet = ibm_is_subnet.postgres_subnet.id 112 | security_groups = [ibm_is_security_group.postgres_security_group.id] 113 | } 114 | } 115 | resource "ibm_is_floating_ip" "postgres_fip" { 116 | name = format("%s-vsi", var.prefix) 117 | target = ibm_is_instance.postgres_vsi.primary_network_interface[0].id 118 | } 119 | output "sshcommand" { 120 | value = resource.ibm_is_floating_ip.postgres_fip.address 121 | description = "The public IP address of the VSI" 122 | } -------------------------------------------------------------------------------- /terraform-hpvs/postgresql/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = "Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: https://cloud.ibm.com/iam#/apikeys" 3 | default = "********" 4 | } 5 | variable "region" { 6 | type = string 7 | description = "Region to deploy to, e.g. eu-gb." 8 | default = "us-south" 9 | } 10 | variable "zone" { 11 | type = string 12 | description = "Zone to deploy to, e.g. 2." 13 | default = "2" 14 | } 15 | variable "icl_hostname" { 16 | type = string 17 | description = <<-DESC 18 | Host of IBM Cloud Logs. This can be 19 | obtained from cloud logs tab under Logging instances 20 | DESC 21 | } 22 | 23 | variable "icl_iam_apikey" { 24 | type = string 25 | sensitive = true 26 | description = <<-DESC 27 | This can be obtained from Access(IAM) under Manage 28 | DESC 29 | } 30 | variable "prefix" { 31 | type = string 32 | description = "Prefix for all generated resources. Make sure to have a custom image with that name." 33 | default = "zvsi-sample-postgres" 34 | } 35 | variable "profile" { 36 | type = string 37 | description = "Profile used for the VSI, this is a profile for normal zlinux VSI , the profile in the format Xz2-YxZ, e.g. bz2-1x4." 38 | default = "bz2e-1x4" 39 | } 40 | variable "ssh_public_key" { 41 | default = "~/.ssh/id_rsa.pub" 42 | } 43 | -------------------------------------------------------------------------------- /terraform-hpvs/sample-daytrader/README.md: -------------------------------------------------------------------------------- 1 | ## Contract generation example for the Daytrader sample application 2 | 3 | This sample creates an encrypted and signed contract and stores it locally in a file. You can later use the contract to provision a HPVS for VPC instance. 4 | The contract will define the container image, the container registry and the credentials for pulling your workload container image. 5 | 6 | ### Build the daytrader sample application 7 | On LinuxONE, e.g. a virtual server for VPC with s390x architecture, build the container image for the DayTrader sample application. 8 | 9 | To do so, clone or [download](https://github.com/OpenLiberty/sample.daytrader8/archive/master.zip) this [repository](https://github.com/OpenLiberty/sample.daytrader8/). 10 | 11 | From inside the sample.daytrader8 directory, build the application with the following commands: 12 | ``` 13 | mvn clean package 14 | docker build . -t daytrader:s390x 15 | ``` 16 | 17 | Then tag and push the resulting container image to your container registry. 18 | 19 | ### Prerequisite 20 | 21 | Prepare your local environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 22 | 23 | ### Define your settings 24 | 25 | Define your settings: 26 | - icl_hostname: The host name of your ICL Log instance which you provisioned previously 27 | - icl_iam_apikey: The API key of your Log instance 28 | - registry: The container registry where the workload container image is pulled from, e.g. `us.icr.io` 29 | - pull_username: The container registry username for pulling your workload container image 30 | - pull_password: The container registry password for pulling your workload container image 31 | 32 | The settings are defined in form of Terraform variables. 33 | 34 | Define the variables in a template file: 35 | 36 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 37 | 2. Fill the values in `my-settings.auto.tfvars` 38 | 39 | ### Define your workload 40 | 41 | Create the file `compose\pod.yml` for your workload. Adapt the value for `image` to reference your container registry and your container image including the digest, e.g.: 42 | 43 | ``` 44 | apiVersion: v1 45 | kind: Pod 46 | metadata: 47 | name: daytrader 48 | spec: 49 | containers: 50 | - name: daytrader 51 | image: us.icr.io/sample/daytrader@sha256:5f4f20aee41e27858a8ed320faed6c2eb8b62dd4bf3e1737f54575a756c7a5da 52 | ports: 53 | - containerPort: 9080 54 | hostPort: 9080 55 | protocol: tcp 56 | ``` 57 | 58 | ### Create the contract 59 | 60 | ```bash 61 | terraform init 62 | terraform apply 63 | ``` 64 | 65 | ### Further steps 66 | 67 | The contract will be written to the file `build/contract.yml` and can now be used for e.g. provisining a HPVS for VPC instance. 68 | 69 | Note that you will need to create a public gateway in your VPC before creating the HPVS for VPC instance. This is necessary to allow the HPVS for VPC instance to reach your Log instance through the public gateway. Also assign a floating IP to your HPVS for VPC instance. 70 | 71 | Once the instance is started, you can access the application at: `http://:9080/daytrader` 72 | 73 | After provisioning the HPVS for VPC instance you can use JMeter to test your daytrader application. To do so follow [these instructions](https://github.com/OpenLiberty/sample.daytrader8/blob/main/README_LOAD_TEST.md). 74 | -------------------------------------------------------------------------------- /terraform-hpvs/sample-daytrader/compose/pod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: daytrader 5 | spec: 6 | containers: 7 | - name: daytrader10 8 | image: us.icr.io/sample/daytrader@sha256:5f4f20aee41e27858a8ed320faed6c2eb8b62dd4bf3e1737f54575a756c7a5da 9 | ports: 10 | - containerPort: 9080 11 | hostPort: 9080 12 | protocol: tcp 13 | -------------------------------------------------------------------------------- /terraform-hpvs/sample-daytrader/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | icl_hostname="Your ICL hostname" 2 | icl_iam_apikey="Your IBM Cloud API key" 3 | registry="Prefix for the dynamic registry" # e.g. docker.io/library or us.icr.io 4 | pull_username="Username for registry" # Username with read access to the container registry 5 | pull_password="Password for registry" # Password with read access to the container registry 6 | -------------------------------------------------------------------------------- /terraform-hpvs/sample-daytrader/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.6" 6 | } 7 | } 8 | } 9 | 10 | # archive of the folder containing the pod.yml file. This folder could create additional resources such as files 11 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 12 | resource "hpcr_tgz" "contract" { 13 | folder = "compose" 14 | } 15 | 16 | locals { 17 | # contract in clear text 18 | contract = yamlencode({ 19 | "env" : { 20 | "type" : "env", 21 | "logging" : { 22 | "logRouter" : { 23 | "hostname" : var.icl_hostname, 24 | "iamApiKey" : var.icl_iam_apikey, 25 | } 26 | }, 27 | "auths" : { 28 | (var.registry) : { 29 | "username" : var.pull_username, 30 | "password" : var.pull_password 31 | } 32 | }, 33 | "env" : { 34 | "REGISTRY" : var.registry 35 | } 36 | }, 37 | "workload" : { 38 | "type" : "workload", 39 | "play" : { 40 | "archive" : hpcr_tgz.contract.rendered 41 | } 42 | } 43 | }) 44 | } 45 | 46 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 47 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 48 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 49 | resource "hpcr_contract_encrypted" "contract" { 50 | contract = local.contract 51 | } 52 | 53 | resource "local_file" "contract" { 54 | content = hpcr_contract_encrypted.contract.rendered 55 | filename = "${path.module}/build/contract.yml" 56 | } -------------------------------------------------------------------------------- /terraform-hpvs/sample-daytrader/variables.tf: -------------------------------------------------------------------------------- 1 | variable "icl_hostname" { 2 | type = string 3 | description = <<-DESC 4 | Host of IBM Cloud Logs. This can be 5 | obtained from cloud logs tab under Logging instances 6 | DESC 7 | } 8 | 9 | variable "icl_iam_apikey" { 10 | type = string 11 | sensitive = true 12 | description = <<-DESC 13 | This can be obtained from Access(IAM) under Manage 14 | DESC 15 | } 16 | 17 | variable "registry" { 18 | type = string 19 | description = <<-DESC 20 | Prefix of the container registry used to pull the image 21 | DESC 22 | } 23 | 24 | variable "pull_username" { 25 | type = string 26 | description = <<-DESC 27 | Username to pull from the above registry 28 | DESC 29 | } 30 | 31 | variable "pull_password" { 32 | type = string 33 | description = <<-DESC 34 | Password to pull from the above registry 35 | DESC 36 | } 37 | -------------------------------------------------------------------------------- /terraform-hpvs/sample-paynow/README.md: -------------------------------------------------------------------------------- 1 | ## Contract generation example for the PayNow sample application 2 | 3 | This sample creates an encrypted and signed contract and stores it locally in a file. You can later use the contract to provision a HPVS for VPC instance. 4 | The contract will define the container image, the container registry and the credentials for pulling your workload container image, as well as a server certificate and server key. 5 | 6 | For more information, see this [tutorial](https://cloud.ibm.com/docs/vpc?topic=vpc-financial-transaction-confidential-computing-on-hyper-protect-virtual-server-for-vpc) and the [PayNow sample application](https://github.com/ibm-hyper-protect/paynow-website). 7 | 8 | ### Prerequisite 9 | 10 | Prepare your local environment according to [these steps](../README.md). Make sure to setup IBM Cloud Logs Instance. 11 | 12 | ### Define your settings 13 | 14 | In file `compose\pod.yml` adapt the value for `image` to reference your container registry and your container image including the digest. 15 | 16 | Define your settings: 17 | - icl_hostname: The host name of your ICL Log instance which you provisioned previously 18 | - icl_iam_apikey: The API key of your Log instance 19 | - registry: The container registry where the workload container image is pulled from, e.g. `us.icr.io` 20 | - pull_username: The container registry username for pulling your workload container image 21 | - pull_password: The container registry password for pulling your workload container image 22 | - server_cert: The base64-encoded SSL server certificate 23 | - server_key: The base64-encoded SSL server key 24 | 25 | The settings are defined in form of Terraform variables in a template file: 26 | 27 | 1. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 28 | 2. Fill the values in `my-settings.auto.tfvars` 29 | 30 | ### Create the contract 31 | 32 | ```bash 33 | terraform init 34 | terraform apply 35 | ``` 36 | 37 | ### Further steps 38 | 39 | The contract will be written to the file `build/contract.yml` and can now be used for e.g. provisining a HPVS for VPC instance. 40 | 41 | Note that you will need to create a public gateway in your VPC before creating the HPVS for VPC instance. This is necessary to allow the HPVS for VPC instance to reach your Log instance through the public gateway. Also assign a floating IP to your HPVS for VPC instance. 42 | -------------------------------------------------------------------------------- /terraform-hpvs/sample-paynow/compose/pod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: paynow 5 | spec: 6 | containers: 7 | - name: main 8 | image: ghcr.io/ibm-hyper-protect/paynow-website@sha256:ddba7d52d058f46b184d67783e8c55999a8d439a1eb11d3d5314cd787a928bc3 9 | ports: 10 | - containerPort: 8080 11 | hostPort: 8080 12 | protocol: tcp 13 | - containerPort: 8443 14 | hostPort: 8443 15 | protocol: tcp 16 | envFrom: 17 | - configMapRef: 18 | name: contract.config.map 19 | optional: false 20 | volumeMounts: 21 | - name: hyperprotect 22 | mountPath: /var/hyperprotect/ 23 | restartPolicy: Never 24 | volumes: 25 | - name: hyperprotect 26 | hostPath: 27 | path: /var/hyperprotect/ 28 | -------------------------------------------------------------------------------- /terraform-hpvs/sample-paynow/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | icl_hostname="Your ICL hostname" 2 | icl_iam_apikey="Your IBM Cloud API key" 3 | registry="Prefix for the dynamic registry" # e.g. docker.io/library or us.icr.io 4 | pull_username="Username for registry" # Username with read access to the container registry 5 | pull_password="Password for registry" # Password with read access to the container registry 6 | server_cert="Base64-encoded server certificate" 7 | server_key="Base64-encoded server key" 8 | -------------------------------------------------------------------------------- /terraform-hpvs/sample-paynow/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hpcr = { 4 | source = "ibm-hyper-protect/hpcr" 5 | version = ">= 0.1.6" 6 | } 7 | } 8 | } 9 | 10 | # archive of the folder containing the pod.yml file. This folder could create additional resources such as files 11 | # to be mounted into containers, environment files etc. This is why all of these files get bundled in a tgz file (base64 encoded) 12 | resource "hpcr_tgz" "contract" { 13 | folder = "compose" 14 | } 15 | 16 | locals { 17 | # contract in clear text 18 | contract = yamlencode({ 19 | "env" : { 20 | "type" : "env", 21 | "logging" : { 22 | "logRouter" : { 23 | "hostname" : var.icl_hostname, 24 | "iamApiKey" : var.icl_iam_apikey, 25 | } 26 | }, 27 | "auths" : { 28 | (var.registry) : { 29 | "username" : var.pull_username, 30 | "password" : var.pull_password 31 | } 32 | }, 33 | "env" : { 34 | "REGISTRY" : var.registry, 35 | "CERT": var.server_cert, 36 | "KEY": var.server_key 37 | } 38 | }, 39 | "workload" : { 40 | "type" : "workload", 41 | "play" : { 42 | "archive" : hpcr_tgz.contract.rendered 43 | } 44 | } 45 | }) 46 | } 47 | 48 | # In this step we encrypt the fields of the contract and sign the env and workload field. The certificate to execute the 49 | # encryption it built into the provider and matches the latest HPCR image. If required it can be overridden. 50 | # We use a temporary, random keypair to execute the signature. This could also be overriden. 51 | resource "hpcr_contract_encrypted" "contract" { 52 | contract = local.contract 53 | } 54 | 55 | resource "local_file" "contract" { 56 | content = hpcr_contract_encrypted.contract.rendered 57 | filename = "${path.module}/build/contract.yml" 58 | } -------------------------------------------------------------------------------- /terraform-hpvs/sample-paynow/variables.tf: -------------------------------------------------------------------------------- 1 | variable "icl_hostname" { 2 | type = string 3 | description = <<-DESC 4 | Host of IBM Cloud Logs. This can be 5 | obtained from cloud logs tab under Logging instances 6 | DESC 7 | } 8 | 9 | variable "icl_iam_apikey" { 10 | type = string 11 | sensitive = true 12 | description = <<-DESC 13 | This can be obtained from Access(IAM) under Manage 14 | DESC 15 | } 16 | 17 | variable "registry" { 18 | type = string 19 | description = <<-DESC 20 | Prefix of the container registry used to pull the image 21 | DESC 22 | } 23 | 24 | variable "pull_username" { 25 | type = string 26 | description = <<-DESC 27 | Username to pull from the above registry 28 | DESC 29 | } 30 | 31 | variable "pull_password" { 32 | type = string 33 | description = <<-DESC 34 | Password to pull from the above registry 35 | DESC 36 | } 37 | 38 | variable "server_cert" { 39 | type = string 40 | description = <<-DESC 41 | Base64-encoded server certificate 42 | DESC 43 | } 44 | 45 | variable "server_key" { 46 | type = string 47 | description = <<-DESC 48 | Base64-encoded server key 49 | DESC 50 | } 51 | -------------------------------------------------------------------------------- /terraform-single/README.md: -------------------------------------------------------------------------------- 1 | # Deploys a single zVSI with Terraform 2 | 3 | ## Preparations 4 | 5 | 1. [Install Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) 6 | 2. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 7 | 3. Adjust [my-settings.auto.tfvars](my-settings.auto.tfvars) 8 | - set `ibmcloud_api_key=. 9 | - this will likelly require a paying account 10 | - you can create an API account by visiting the [IBM Cloud API keys page](https://cloud.ibm.com/iam/apikeys). Ensure you have 11 | selected the account you want to use before creating the key as the key will be associtated to the account you have selected 12 | at the time of creation. 13 | - If you have downloaded your `apikey.json` file from the IBM Cloud UI you may use this command: 14 | `export IC_API_KEY=$(cat ~/apikey.json | jq -r .apikey)` 15 | - set `os_type=zos` to deploy a z/OS instance 16 | - set `region=` to use a different region. Example `region=br-sao` 17 | 4. `terraform init` 18 | 19 | ## Create 20 | 21 | 1. `terraform apply` 22 | 23 | ## Destroy 24 | 25 | 1. `terraform destroy` -------------------------------------------------------------------------------- /terraform-single/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | full_zone = "${var.region}-${var.zone}" 3 | profile = var.profile != null ? var.profile : (var.os_type=="zos" ? "mz2-2x16" : "bz2-2x8") 4 | image_name = var.image_name != null ? var.image_name : (var.os_type=="zos" ? ".*zos.*" : ".*ubuntu.*s390x.*") 5 | } -------------------------------------------------------------------------------- /terraform-single/main.tf: -------------------------------------------------------------------------------- 1 | # Create a VPC 2 | resource "ibm_is_vpc" "testacc_vpc" { 3 | name = var.vpc 4 | } 5 | 6 | # ssh key 7 | resource "ibm_is_ssh_key" "testacc_sshkey" { 8 | name = var.ssh_public_key_name 9 | public_key = file(var.ssh_public_key) 10 | } 11 | 12 | # subnetwork 13 | resource "ibm_is_subnet" "testacc_subnet" { 14 | name = var.subnetwork_name 15 | vpc = ibm_is_vpc.testacc_vpc.id 16 | zone = local.full_zone 17 | total_ipv4_address_count = var.total_ipv4_address_count 18 | } 19 | 20 | # security group 21 | resource "ibm_is_security_group" "testacc_security_group" { 22 | name = var.vsi_name 23 | vpc = ibm_is_vpc.testacc_vpc.id 24 | } 25 | 26 | # rule that allows the VSI to make outbound connections, this is required 27 | # to connect to the logDNA instance as well as to docker to pull the image 28 | resource "ibm_is_security_group_rule" "testacc_security_group_rule_outbound" { 29 | group = ibm_is_security_group.testacc_security_group.id 30 | direction = "outbound" 31 | remote = "0.0.0.0/0" 32 | } 33 | 34 | # Configure Security Group Rule to open SSH 35 | resource "ibm_is_security_group_rule" "testacc_security_group_rule_ssh" { 36 | group = ibm_is_security_group.testacc_security_group.id 37 | direction = "inbound" 38 | remote = "0.0.0.0/0" 39 | tcp { 40 | port_min = 22 41 | port_max = 22 42 | } 43 | } 44 | 45 | # Configure Security Group Rule to open DB2 46 | resource "ibm_is_security_group_rule" "testacc_security_group_rule_db2" { 47 | group = ibm_is_security_group.testacc_security_group.id 48 | direction = "inbound" 49 | remote = "0.0.0.0/0" 50 | tcp { 51 | port_min = 8100 52 | port_max = 8100 53 | } 54 | } 55 | 56 | # Images 57 | data "ibm_is_images" "vpc_images" { 58 | } 59 | locals { 60 | image = [for image in data.ibm_is_images.vpc_images.images : image if length(regexall(local.image_name, image.name)) > 0][0] 61 | } 62 | 63 | # vsi 64 | resource "ibm_is_instance" "testacc_vsi" { 65 | name = var.vsi_name 66 | image = local.image.id 67 | profile = local.profile 68 | 69 | primary_network_interface { 70 | subnet = ibm_is_subnet.testacc_subnet.id 71 | security_groups = [ibm_is_security_group.testacc_security_group.id] 72 | } 73 | 74 | vpc = ibm_is_vpc.testacc_vpc.id 75 | zone = "${var.region}-${var.zone}" 76 | keys = [ibm_is_ssh_key.testacc_sshkey.id] 77 | } 78 | 79 | # Floating IP 80 | resource "ibm_is_floating_ip" "testacc_floatingip" { 81 | name = var.vsi_name 82 | target = ibm_is_instance.testacc_vsi.primary_network_interface[0].id 83 | } 84 | -------------------------------------------------------------------------------- /terraform-single/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | ibmcloud_api_key = "Your IBM Cloud API key" 2 | # os_type="zos" # Set if you want to create a z/OS 3 | # ssh_public_key_name = "ibmcloud" 4 | # ssh_public_key = "/path/to/ibmcloud.pub" 5 | # region="ca-tor" # Required for z/OS. Options are eu-gb, jp-tok, br-sao, ca-tor 6 | # image_name = ".*zos.*dev-test.*" # Required for z/OS 7 | # Any other variable you want to set (see variables.tf) 8 | -------------------------------------------------------------------------------- /terraform-single/outputs.tf: -------------------------------------------------------------------------------- 1 | # log the floating IP for convenience 2 | output "ip" { 3 | value = resource.ibm_is_floating_ip.testacc_floatingip.address 4 | description = "The public IP address of the VSI" 5 | } 6 | -------------------------------------------------------------------------------- /terraform-single/provider.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = "Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: https://cloud.ibm.com/iam#/apikeys" 3 | } 4 | 5 | terraform { 6 | required_providers { 7 | ibm = { 8 | source = "IBM-Cloud/ibm" 9 | version = ">= 1.43.0" 10 | } 11 | } 12 | } 13 | 14 | # Configure the IBM Provider 15 | provider "ibm" { 16 | ibmcloud_api_key = var.ibmcloud_api_key 17 | region = var.region 18 | } -------------------------------------------------------------------------------- /terraform-single/variables.tf: -------------------------------------------------------------------------------- 1 | variable "os_type" { 2 | default = "zlinux" 3 | } 4 | variable "region" { 5 | default = "jp-tok" 6 | } 7 | 8 | variable "zone" { 9 | default = "1" 10 | } 11 | 12 | variable "vpc" { 13 | default = "terraform-test" 14 | } 15 | 16 | variable "ssh_public_key_name" { 17 | default = "terraform-test" 18 | } 19 | 20 | variable "ssh_public_key" { 21 | default = "~/.ssh/id_rsa.pub" 22 | } 23 | 24 | variable "subnetwork_name" { 25 | default = "terraform-test" 26 | } 27 | 28 | variable "total_ipv4_address_count" { 29 | default = 256 30 | } 31 | 32 | variable "vsi_name" { 33 | default = "terraform-test" 34 | } 35 | 36 | variable "image_name" { 37 | # Regular expresions allowed 38 | default = null # Default depends on os_type - see locals.tf 39 | } 40 | 41 | variable "profile" { 42 | default = null # Default depends on os_type - see locals.tf 43 | } 44 | 45 | variable "security_group_name" { 46 | default = "terraform-test" 47 | } -------------------------------------------------------------------------------- /terraform-single/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | } -------------------------------------------------------------------------------- /terraform-vpnserver/README.md: -------------------------------------------------------------------------------- 1 | # Deploys a VPN Server with Terraform 2 | 3 | This is a sample Terraform configuration for generating a client-to-site VPN server on IBM Cloud. This creates a single subnetwork VPC 4 | and deploys a client-to-site VPN server into it. This configuration produces certificates for use by the VPN server and client and stores them in an IBM Cloud Secrets Manager instance (free plan). It does not establish separate, by userid, login credentials, so anyone with the resulting certificate file will be able to 5 | establish a VPN to the environment. Thanks to now archived [Helpers for Secrets Manager](https://github.com/we-work-in-the-cloud/terraform-ibm-secrets-manager) for samples on how to import certificates. Once the IBM Terraform provider has been enriched to import certificates directly, it should be used instead of this project. 6 | 7 | ## Preparations 8 | 9 | 1. [Install Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) 10 | 2. `cp my-settings.auto.tfvars-template my-settings.auto.tfvars` 11 | 3. Adjust [my-settings.auto.tfvars](my-settings.auto.tfvars) 12 | - set `ibmcloud_api_key=.` 13 | - this will likely require a paying account 14 | - you can create an API account by visiting the [IBM Cloud API keys page](https://cloud.ibm.com/iam/apikeys). Ensure you have 15 | selected the account you want to use before creating the key as the key will be associtated to the account you have selected 16 | at the time of creation. 17 | - If you have downloaded your `apikey.json` file from the IBM Cloud UI you may use this command: 18 | `export IC_API_KEY=$(cat ~/apikey.json | jq -r .apikey)` 19 | - set `region=` to use a different region. Example `region=br-sao` 20 | 4. Make sure your IAM polices allow for the IBM Cloud VPN Server to read Secret Manager resources. [https://cloud.ibm.com/docs/vpc?topic=vpc-client-to-site-authentication#creating-iam-service-to-service](https://cloud.ibm.com/docs/vpc?topic=vpc-client-to-site-authentication#creating-iam-service-to-service) 21 | 5. `terraform init` 22 | 23 | ## Create 24 | 25 | You must run the create in two phases as the URL of the Secrets Manager must be set once the resource is created and cannot be determined beforehand. 26 | 27 | 1. `terraform apply -target=module.phase1` 28 | 2. `terraform apply` 29 | 30 | ## Destroy 31 | 32 | 1. `terraform destroy` 33 | -------------------------------------------------------------------------------- /terraform-vpnserver/main.tf: -------------------------------------------------------------------------------- 1 | 2 | module "phase1" { 3 | 4 | source = "./phase1" 5 | 6 | region = var.region 7 | ibmcloud_api_key = var.ibmcloud_api_key 8 | cert_service_name = var.cert_service_name 9 | vpc = var.vpc 10 | } 11 | 12 | module "phase2" { 13 | 14 | source = "./phase2" 15 | 16 | cert_service_name = var.cert_service_name 17 | region = var.region 18 | ibmcloud_api_key = var.ibmcloud_api_key 19 | ca_cert_common_name = var.ca_cert_common_name 20 | vpnserver_cert_common_name = var.vpnserver_cert_common_name 21 | vpnclient_cert_common_name = var.vpnclient_cert_common_name 22 | zone = var.zone 23 | subnetwork_name = var.subnetwork_name 24 | total_ipv4_address_count = var.total_ipv4_address_count 25 | security_group_name = var.security_group_name 26 | vpn_port = var.vpn_port 27 | vpnserver_name = var.vpnserver_name 28 | vpnserver_client_ip_pool = var.vpnserver_client_ip_pool 29 | vpc_address_prefixes_map = module.phase1.vpc_address_prefixes_map 30 | vpc_guid = module.phase1.vpc_guid 31 | secrets_manager_guid = module.phase1.secrets_manager_guid 32 | } 33 | 34 | 35 | resource "local_file" "ovpn" { 36 | filename = "${var.region}.ovpn" 37 | content = "client\ndev tun\nproto ${module.phase2.vpn_protocol}\nport ${var.vpn_port}\nremote ${module.phase2.vpn_hostname}\nresolv-retry infinite\nremote-cert-tls server\nnobind\n\nauth SHA256\ncipher AES-256-GCM\nverb 3\nreneg-sec 0\n\n${module.phase2.vpn_ca_cert_content}\n\n${module.phase2.vpn_client_cert_content}\n\n${module.phase2.vpn_client_key_content}" 38 | } -------------------------------------------------------------------------------- /terraform-vpnserver/my-settings.auto.tfvars-template: -------------------------------------------------------------------------------- 1 | ibmcloud_api_key = "Your IBM Cloud API key" 2 | region="us-east" # Options are eu-gb, jp-tok, br-sao, ca-tor, us-east 3 | 4 | # Any other variable you want to set (see variables.tf) 5 | -------------------------------------------------------------------------------- /terraform-vpnserver/phase1/main.tf: -------------------------------------------------------------------------------- 1 | 2 | 3 | resource "ibm_resource_instance" "secrets_mgr" { 4 | name = var.cert_service_name 5 | location = var.region 6 | plan = "trial" 7 | service = "secrets-manager" 8 | } 9 | 10 | # Create a VPC 11 | resource "ibm_is_vpc" "vpc" { 12 | name = var.vpc 13 | } 14 | 15 | data "ibm_is_vpc_address_prefixes" "vpc_addresses" { 16 | vpc = ibm_is_vpc.vpc.id 17 | } 18 | 19 | output "secrets_manager_guid" { 20 | value = ibm_resource_instance.secrets_mgr.guid 21 | } 22 | 23 | output "vpc_guid" { 24 | value = ibm_is_vpc.vpc.id 25 | } 26 | 27 | output "vpc_address_prefixes_map" { 28 | value = tomap({ for addrs in data.ibm_is_vpc_address_prefixes.vpc_addresses.address_prefixes: addrs.id => addrs }) 29 | } -------------------------------------------------------------------------------- /terraform-vpnserver/phase1/provider.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = "Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: https://cloud.ibm.com/iam#/apikeys" 3 | } 4 | 5 | terraform { 6 | required_providers { 7 | ibm = { 8 | source = "IBM-Cloud/ibm" 9 | version = ">= 1.45.0" 10 | } 11 | } 12 | } 13 | 14 | # Configure the IBM Provider 15 | provider "ibm" { 16 | ibmcloud_api_key = var.ibmcloud_api_key 17 | region = var.region 18 | } -------------------------------------------------------------------------------- /terraform-vpnserver/phase1/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | } 4 | 5 | variable "cert_service_name" { 6 | type = string 7 | } 8 | 9 | variable "vpc" { 10 | type = string 11 | } 12 | -------------------------------------------------------------------------------- /terraform-vpnserver/phase2/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | full_vpnclient_cert_common_name = "${var.region} ${var.vpnclient_cert_common_name}" 3 | full_ca_cert_common_name = "${var.region} ${var.ca_cert_common_name}" 4 | full_vpnserver_cert_common_name = "${var.region} ${var.vpnserver_cert_common_name}" 5 | rest_endpoint = "https://${var.secrets_manager_guid}.${var.region}.secrets-manager.appdomain.cloud" 6 | full_zone = "${var.region}-${var.zone}" 7 | vpn_protocol = "udp" 8 | } -------------------------------------------------------------------------------- /terraform-vpnserver/phase2/main.tf: -------------------------------------------------------------------------------- 1 | 2 | data "ibm_iam_auth_token" "tokendata" {} 3 | 4 | provider "restapi" { 5 | uri = local.rest_endpoint 6 | debug = true 7 | write_returns_object = true 8 | headers = { 9 | Authorization = data.ibm_iam_auth_token.tokendata.iam_access_token 10 | } 11 | } 12 | 13 | # Create private key for certificate authority 14 | resource "tls_private_key" "ca_private_key" { 15 | algorithm = "RSA" 16 | rsa_bits = "2048" 17 | } 18 | 19 | # Create private key for vpn server 20 | resource "tls_private_key" "vpnserver_private_key" { 21 | algorithm = "RSA" 22 | rsa_bits = "2048" 23 | } 24 | 25 | # Create private key for vpn client 26 | resource "tls_private_key" "vpnclient_private_key" { 27 | algorithm = "RSA" 28 | rsa_bits = "2048" 29 | } 30 | 31 | # Create CA certificate 32 | resource "tls_self_signed_cert" "ca_cert" { 33 | private_key_pem = tls_private_key.ca_private_key.private_key_pem 34 | allowed_uses = [ 35 | "cert_signing" 36 | ] 37 | validity_period_hours = 8766*5 # 5 years 38 | subject { 39 | common_name = local.full_ca_cert_common_name 40 | } 41 | is_ca_certificate = true 42 | } 43 | 44 | # Create certificate request for VPN Server 45 | resource "tls_cert_request" "vpnserver_csr" { 46 | private_key_pem = tls_private_key.vpnserver_private_key.private_key_pem 47 | 48 | subject{ 49 | common_name = local.full_vpnserver_cert_common_name 50 | } 51 | 52 | } 53 | 54 | # Create certificate request for VPN Client 55 | resource "tls_cert_request" "vpnclient_csr" { 56 | private_key_pem = tls_private_key.vpnclient_private_key.private_key_pem 57 | 58 | subject{ 59 | common_name = local.full_vpnclient_cert_common_name 60 | } 61 | 62 | } 63 | 64 | # Sign the certificate request for VPN Server 65 | resource "tls_locally_signed_cert" "vpnserver_cert" { 66 | cert_request_pem = tls_cert_request.vpnserver_csr.cert_request_pem 67 | ca_private_key_pem = tls_self_signed_cert.ca_cert.private_key_pem 68 | ca_cert_pem = tls_self_signed_cert.ca_cert.cert_pem 69 | 70 | validity_period_hours = 8766*1 # 1 year 71 | 72 | allowed_uses = [ 73 | "key_encipherment", 74 | "digital_signature", 75 | "server_auth", 76 | ] 77 | } 78 | 79 | # Sign the certificate request for VPN client 80 | resource "tls_locally_signed_cert" "vpnclient_cert" { 81 | cert_request_pem = tls_cert_request.vpnclient_csr.cert_request_pem 82 | ca_private_key_pem = tls_self_signed_cert.ca_cert.private_key_pem 83 | ca_cert_pem = tls_self_signed_cert.ca_cert.cert_pem 84 | 85 | validity_period_hours = 8766*1 # 1 year 86 | 87 | allowed_uses = [ 88 | "key_encipherment", 89 | "digital_signature", 90 | "client_auth", 91 | ] 92 | } 93 | 94 | resource "restapi_object" "my_secret_group" { 95 | path = "/api/v1/secret_groups" 96 | data = jsonencode({ 97 | metadata = { 98 | collection_type = "application/vnd.ibm.secrets-manager.secret.group+json" 99 | collection_total = 1 100 | } 101 | resources = [{ 102 | name = "${var.cert_service_name}-secret-group" 103 | description = "Secrets group for ${var.cert_service_name}" 104 | }] 105 | }) 106 | id_attribute = "resources/0/id" 107 | debug = true 108 | } 109 | 110 | resource "restapi_object" "vpn_ca_cert" { 111 | path = "/api/v1/secrets/imported_cert" 112 | data = jsonencode({ 113 | metadata = { 114 | collection_type = "application/vnd.ibm.secrets-manager.secret+json" 115 | collection_total = 1 116 | } 117 | resources = [{ 118 | name = "${var.region}-imported-ca-cert" 119 | description = "${local.full_ca_cert_common_name}" 120 | secret_group_id = restapi_object.my_secret_group.id 121 | certificate = tls_self_signed_cert.ca_cert.cert_pem 122 | private_key = tls_self_signed_cert.ca_cert.private_key_pem 123 | intermediate = null 124 | }] 125 | }) 126 | id_attribute = "resources/0/id" 127 | debug = true 128 | } 129 | 130 | resource "restapi_object" "vpnserver_cert" { 131 | path = "/api/v1/secrets/imported_cert" 132 | data = jsonencode({ 133 | metadata = { 134 | collection_type = "application/vnd.ibm.secrets-manager.secret+json" 135 | collection_total = 1 136 | } 137 | resources = [{ 138 | name = "${var.region}-imported-vpnserver-cert" 139 | description = "${local.full_vpnserver_cert_common_name}" 140 | secret_group_id = restapi_object.my_secret_group.id 141 | certificate = tls_locally_signed_cert.vpnserver_cert.cert_pem 142 | private_key = tls_private_key.vpnserver_private_key.private_key_pem 143 | intermediate = tls_self_signed_cert.ca_cert.cert_pem 144 | }] 145 | }) 146 | id_attribute = "resources/0/id" 147 | debug = true 148 | } 149 | 150 | resource "restapi_object" "vpnclient_cert" { 151 | path = "/api/v1/secrets/imported_cert" 152 | data = jsonencode({ 153 | metadata = { 154 | collection_type = "application/vnd.ibm.secrets-manager.secret+json" 155 | collection_total = 1 156 | } 157 | resources = [{ 158 | name = "${var.region}-imported-vpnclient-cert" 159 | description = "${local.full_vpnclient_cert_common_name}" 160 | secret_group_id = restapi_object.my_secret_group.id 161 | certificate = tls_locally_signed_cert.vpnclient_cert.cert_pem 162 | private_key = tls_private_key.vpnclient_private_key.private_key_pem 163 | intermediate = tls_self_signed_cert.ca_cert.cert_pem 164 | }] 165 | }) 166 | id_attribute = "resources/0/id" 167 | debug = true 168 | } 169 | 170 | 171 | 172 | # subnetwork 173 | resource "ibm_is_subnet" "vpc_subnet" { 174 | name = var.subnetwork_name 175 | vpc = var.vpc_guid 176 | zone = local.full_zone 177 | total_ipv4_address_count = var.total_ipv4_address_count 178 | } 179 | 180 | # security group 181 | resource "ibm_is_security_group" "vpnserver_security_group" { 182 | name = var.security_group_name 183 | vpc = var.vpc_guid 184 | } 185 | 186 | # Configure Security Group Rule to open the VPN port 187 | resource "ibm_is_security_group_rule" "vpnserver_security_group_rule_vpn" { 188 | group = ibm_is_security_group.vpnserver_security_group.id 189 | direction = "inbound" 190 | remote = "0.0.0.0/0" 191 | udp { 192 | port_min = var.vpn_port 193 | port_max = var.vpn_port 194 | } 195 | } 196 | 197 | data "ibm_secrets_manager_secret" "vpnserver_secret" { 198 | instance_id = var.secrets_manager_guid 199 | secret_type = "imported_cert" 200 | secret_id = restapi_object.vpnserver_cert.id 201 | } 202 | 203 | data "ibm_secrets_manager_secret" "vpnclient_ca_secret" { 204 | instance_id = var.secrets_manager_guid 205 | secret_type = "imported_cert" 206 | secret_id = restapi_object.vpnclient_cert.id 207 | } 208 | 209 | 210 | resource "ibm_is_vpn_server" "vpn_server" { 211 | certificate_crn = data.ibm_secrets_manager_secret.vpnserver_secret.crn 212 | client_authentication { 213 | method = "certificate" 214 | client_ca_crn = data.ibm_secrets_manager_secret.vpnclient_ca_secret.crn 215 | } 216 | client_ip_pool = var.vpnserver_client_ip_pool 217 | enable_split_tunneling = true 218 | name = var.vpnserver_name 219 | port = var.vpn_port 220 | protocol = local.vpn_protocol 221 | subnets = [ibm_is_subnet.vpc_subnet.id] 222 | security_groups = [ibm_is_security_group.vpnserver_security_group.id] 223 | } 224 | 225 | 226 | resource "ibm_is_vpn_server_route" "server_routes" { 227 | for_each = var.vpc_address_prefixes_map 228 | vpn_server = ibm_is_vpn_server.vpn_server.id 229 | destination = each.value.cidr 230 | action = "translate" 231 | } 232 | 233 | output "vpn_protocol" { 234 | value = local.vpn_protocol 235 | } 236 | 237 | output "vpn_hostname" { 238 | value = ibm_is_vpn_server.vpn_server.hostname 239 | } 240 | 241 | output "vpn_ca_cert_content" { 242 | value = tls_self_signed_cert.ca_cert.cert_pem 243 | } 244 | 245 | output "vpn_client_cert_content" { 246 | value = tls_locally_signed_cert.vpnclient_cert.cert_pem 247 | } 248 | 249 | output "vpn_client_key_content" { 250 | value = tls_private_key.vpnclient_private_key.private_key_pem 251 | } -------------------------------------------------------------------------------- /terraform-vpnserver/phase2/provider.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | description = "Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: https://cloud.ibm.com/iam#/apikeys" 3 | } 4 | 5 | terraform { 6 | required_providers { 7 | restapi = { 8 | source = "Mastercard/restapi" 9 | version = ">= 1.17" 10 | } 11 | 12 | ibm = { 13 | source = "IBM-Cloud/ibm" 14 | version = ">= 1.45.0" 15 | } 16 | } 17 | } 18 | 19 | # Configure the IBM Provider 20 | provider "ibm" { 21 | ibmcloud_api_key = var.ibmcloud_api_key 22 | region = var.region 23 | } -------------------------------------------------------------------------------- /terraform-vpnserver/phase2/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cert_service_name" { 2 | type = string 3 | } 4 | 5 | variable "region" { 6 | type = string 7 | } 8 | 9 | variable "secrets_manager_guid" { 10 | type = string 11 | } 12 | 13 | variable "ca_cert_common_name" { 14 | type = string 15 | } 16 | 17 | variable "vpnserver_cert_common_name" { 18 | type = string 19 | } 20 | 21 | variable "vpnclient_cert_common_name" { 22 | type = string 23 | } 24 | 25 | variable "zone" { 26 | type = string 27 | } 28 | 29 | variable "subnetwork_name" { 30 | type = string 31 | } 32 | 33 | variable "total_ipv4_address_count" { 34 | type = number 35 | } 36 | 37 | variable "security_group_name" { 38 | type = string 39 | } 40 | 41 | variable "vpn_port" { 42 | type = number 43 | } 44 | 45 | variable "vpnserver_name" { 46 | type = string 47 | } 48 | 49 | variable "vpnserver_client_ip_pool" { 50 | type = string 51 | } 52 | 53 | variable "vpc_guid" { 54 | type = string 55 | } 56 | 57 | variable "vpc_address_prefixes_map" { 58 | type = map 59 | } -------------------------------------------------------------------------------- /terraform-vpnserver/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ibmcloud_api_key" { 2 | type = string 3 | description = "Enter your IBM Cloud API Key, you can get your IBM Cloud API key using: https://cloud.ibm.com/iam#/apikeys" 4 | } 5 | 6 | variable "region" { 7 | type = string 8 | default = "us-east" 9 | } 10 | 11 | variable "zone" { 12 | type = string 13 | default = "1" 14 | } 15 | 16 | variable "vpc" { 17 | type = string 18 | default = "terraform-test" 19 | } 20 | 21 | variable "subnetwork_name" { 22 | type = string 23 | default = "terraform-test" 24 | } 25 | 26 | variable "total_ipv4_address_count" { 27 | default = 256 28 | } 29 | 30 | variable "security_group_name" { 31 | type = string 32 | default = "terraform-test" 33 | } 34 | 35 | variable "vpn_port" { 36 | default = 1194 37 | } 38 | 39 | variable "cert_service_name" { 40 | type = string 41 | default = "SecretsManager-vpnserver" 42 | } 43 | 44 | variable "vpnserver_name" { 45 | type = string 46 | default = "terraform-test-vpnserver" 47 | } 48 | 49 | variable "vpnserver_client_ip_pool" { 50 | type = string 51 | default = "10.2.0.0/16" 52 | } 53 | 54 | variable "ca_cert_common_name" { 55 | type = string 56 | default = "VPN Server CA" 57 | } 58 | 59 | variable "vpnserver_cert_common_name" { 60 | type = string 61 | default = "VPN Server" 62 | } 63 | 64 | variable "vpnclient_cert_common_name" { 65 | type = string 66 | default = "VPN Client" 67 | } 68 | --------------------------------------------------------------------------------