├── LICENSE.txt ├── README.md ├── deployment.gif ├── env-vars.sample ├── main.tf ├── modules ├── ceph-client │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── ceph-deployer │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── ceph-mds │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── ceph-monitor │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── ceph-osd │ ├── main.tf │ ├── output.tf │ ├── storage │ │ ├── main.tf │ │ └── variables.tf │ └── variables.tf ├── network.full │ ├── main.tf │ ├── output.tf │ └── variables.tf └── network │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── outputs.tf ├── provider.tf ├── scripts ├── add_to_etc_hosts.sh ├── add_to_known_hosts.sh ├── ceph.config ├── ceph_client_setup.sh ├── ceph_deploy_client.sh ├── ceph_deploy_mds.sh ├── ceph_deploy_osd.sh ├── ceph_firewall_setup.sh ├── ceph_new_cluster.sh ├── ceph_yum_repo ├── delay.sh ├── install_ceph_deploy.sh ├── install_ssh_key.sh ├── vm_init.sh ├── vm_post_setup.sh ├── vm_pre_setup.sh ├── vm_setup.sh └── yum_repo_setup.sh ├── variables.ex1 └── variables.ex2 /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 2 | 3 | This software is dual-licensed to you under the Universal Permissive License (UPL) and Apache License 2.0. See below for license terms. You may choose either license, or both. 4 | ____________________________ 5 | The Universal Permissive License (UPL), Version 1.0 6 | Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 7 | 8 | Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this software, associated documentation and/or data (collectively the "Software"), free of charge and under any and all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or (ii) the Larger Works (as defined below), to deal in both 9 | 10 | (a) the Software, and 11 | (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software (each a "Larger Work" to which the Software is contributed by such licensors), 12 | 13 | without restriction, including without limitation the rights to copy, create derivative works of, display, perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms. 14 | 15 | This license is subject to the following condition: 16 | 17 | The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must be included in all copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 | 21 | The Apache Software License, Version 2.0 22 | Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 23 | 24 | Licensed under the Apache License, Version 2.0 (the "License"); You may not use this product except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. A copy of the license is also reproduced below. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 25 | 26 | Apache License 27 | 28 | Version 2.0, January 2004 29 | 30 | http://www.apache.org/licenses/ 31 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 32 | 1. Definitions. 33 | "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. 34 | "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. 35 | "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 36 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. 37 | "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. 38 | "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. 39 | "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). 40 | "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. 41 | "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." 42 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 43 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 44 | 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 45 | 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: 46 | You must give any other recipients of the Work or Derivative Works a copy of this License; and 47 | You must cause any modified files to carry prominent notices stating that You changed the files; and 48 | You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and 49 | If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. 50 | 51 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 52 | 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 53 | 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 54 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 55 | 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 56 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. 57 | END OF TERMS AND CONDITIONS 58 | 59 | APPENDIX: How to apply the Apache License to your work. 60 | 61 | To apply the Apache License to your work, attach the following 62 | boilerplate notice, with the fields enclosed by brackets "[]" 63 | replaced with your own identifying information. (Don't include 64 | the brackets!) The text should be enclosed in the appropriate 65 | comment syntax for the file format. We also recommend that a 66 | file or class name and description of purpose be included on the 67 | same "printed page" as the copyright notice for easier 68 | identification within third-party archives. 69 | 70 | Copyright [yyyy] [name of copyright owner] 71 | 72 | Licensed under the Apache License, Version 2.0 (the "License"); 73 | you may not use this file except in compliance with the License. 74 | You may obtain a copy of the License at 75 | 76 | http://www.apache.org/licenses/LICENSE-2.0 77 | 78 | Unless required by applicable law or agreed to in writing, software 79 | distributed under the License is distributed on an "AS IS" BASIS, 80 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 81 | See the License for the specific language governing permissions and 82 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [terraform]: https://terraform.io 2 | [oracle linux]: https://www.oracle.com/linux/index.html 3 | [ceph]: https://ceph.com/ 4 | [ceph rel note]: https://docs.oracle.com/cd/E52668_01/E66514/E66514.pdf 5 | [ceph 3.0 rel note]: https://blogs.oracle.com/linux/announcing-release-3-of-ceph-storage-for-oracle-linux 6 | [OCI]: https://cloud.oracle.com/cloud-infrastructure 7 | [oci provider]: https://github.com/oracle/terraform-provider-oci/releases 8 | [SSH key pair]: https://docs.us-phoenix-1.oraclecloud.com/Content/GSG/Tasks/creatingkeys.htm 9 | [API signing]: https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm 10 | [yum terraform]: http://public-yum.oracle.com/repo/OracleLinux/OL7/developer/x86_64/getPackage/terraform-0.11.3-1.el7.x86_64.rpm 11 | [yum oci provider]: http://public-yum.oracle.com/repo/OracleLinux/OL7/developer/x86_64/getPackage/terraform-provider-oci-2.0.7-1.el7.x86_64.rpm 12 | [ocids and keys]: https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm 13 | 14 | # Terraform Installer for Ceph in Oracle Cloud Infrastructure 15 | # Version: 1.1 16 | 17 | ## About 18 | 19 | The scripts in this repository allows you to streamline and/or replicate your Ceph deployment in Oracle Cloud Infrastructure (OCI). 20 | 21 | [Ceph][ceph] is an open source distributed storage system designed for performance, reliability and scalability. 22 | It provides interfaces for object, block, and file-level storage. 23 | Ceph is now widely used and fully supported on [Oracle Linux][oracle linux] as described in the [Release 3 of Ceph Storage for Oracle Linux][ceph 3.0 rel note]. 24 | 25 | [Terraform][terraform] is an Open Source Software (OSS) for building, changing, and versioning Cloud infrastructure safely and efficiently. 26 | [Terraform Provider for OCI][oci provider] allows one to create the necessary Infrastructure resources and configure them in OCI. 27 | 28 | The Terraform Installer for Ceph provides Terraform scripts for installing Ceph Clusters in [Oracle Cloud Infrastructure][OCI] (OCI). 29 | It consists of a set of Terraform scripts and modules, bash scripts, and example configurations that can 30 | be used to provision and configure the resources needed to run a Ceph Storage Cluster on OCI. 31 | 32 | ## Ceph Cluster Configuration Overview 33 | 34 | A typical Ceph Cluster includes multiple virtual or bare metal machines, referred to as nodes, serving the role of one (or more) of the following: 35 | - Deployer - for installing Ceph on all other nodes 36 | - Monitor - for maintaining the maps of the cluster state and authentication - typically 2 or 3 for high availability 37 | - Manager - for keeping track of the cluster state and exposing the cluster information 38 | - Object Storage Daemon (OSDs) - for storing and handling data - typically many of them for redundency 39 | - Metadata Server (MDS) - for storing metadata on behalf of the Ceph Filesystem 40 | 41 | Deploying Ceph involves creating infrastructure resources (e.g., compute, network, storage), setting them up for Ceph installation, installing various packages on all machines, 42 | and finally configuring and deploying the cluster. This requires a fair bit of knowledge about OCI and Ceph. Carrying out the entire process manually is tedious and error prone. 43 | 44 | However, by using the sctipts in this repository, you can create the necessary infrastructure resources and deploy a Ceph cluster using those resources. 45 | The behavior of the scripts are controlled by various configuration files. By changing the variables defined in these files, you can control what resources are created in OCI 46 | (e.g., the number and type of VMs to create for various Ceph nodes), how the Ceph Cluster is configured (e.g., the level of replication), etc. 47 | 48 | 49 | ## Creating a Ceph Cluster 50 | 51 | This README guides you through the following steps to accomplish the goal of creating a cluster shown in the pcture below: 52 | 53 | - Install Terraform 54 | - Download the scripts 55 | - Setup for access to OCI 56 | - Customize the Ceph Cluster 57 | - Execute the scripts 58 | - Login to a Ceph Admin node and check the status of the Cluster 59 | 60 | ![](./deployment.gif) 61 | 62 | ### Prerequisites 63 | Designate a machine to run Terraform. This machine needs to have the credentials (e.g., the .pem key files under ~/.oci) to access the your tenant in OCI. 64 | 65 | This machine should also have the rsa key pair generated for the Linux user on this machine. 66 | This key will be supplied during the creation of the compute nodes to allow the Linux user to perform password-less ssh logins the newly created VMs. 67 | 68 | If not, generate the key pair using: 69 | ``` 70 | $ ssh-keygen -t rsa 71 | ``` 72 | ### Install Terraform 73 | Add and/or enable the following yum repository: 74 | ``` 75 | [ol7_developer] 76 | name=Oracle Linux $releasever Development Packages ($basearch) 77 | baseurl=https://yum.oracle.com/repo/OracleLinux/OL7/developer/$basearch/ 78 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle 79 | gpgcheck=1 80 | enabled=1 81 | ``` 82 | Then execute: 83 | ``` 84 | $ yum install terraform 85 | $ yum install terraform-provider_oci 86 | ``` 87 | ### Download Scripts 88 | Download the Terraform Ceph scripts from this repository. 89 | ``` 90 | $ git clone https://github.com/oracle/terraform-ceph-installer.git 91 | $ cd terraform-ceph-installer 92 | ``` 93 | ### Setup for access to your OCI tenant 94 | Copy and edit the sample environment file to fill in the information particular to your tenancy which includes the tenant, region, user id, and the credentials to access OCI. 95 | This is the same information you would require to use the OCI-CLI. Please refer to [Required Keys and OCIDs][ocids and keys] to find out the OCIDs and generate the keys needed. 96 | 97 | Source it to export the variables in the file. 98 | ``` 99 | $ cp env-vars.sample env-vars 100 | $ vi env-vars 101 | $ . env-vars 102 | ``` 103 | 104 | ### Customize for your OCI tenant 105 | Create a copy (with a .tf extension) of one of the given examples and modify to fit your need and your environment. 106 | The example in variables.ex1 assumes that have existing VCN and subnets for your tenant in OCI and 107 | you deploy the cluster using those subnets. 108 | ``` 109 | $ cp variables.ex1 variables.tf 110 | ``` 111 | You will definitely need to edit the following variables: 112 | - existing_vcn_id - The ocid for the existing VCN. You can find it by login into you tenant using a browser. 113 | - existing_subnet_ids - The list of ocids for the existing subnets. You can find them by login into you tenant using a browser. 114 | You may also need to edit the following variables: 115 | - instance_os - The full name of the operating system that you want on all nodes. You can find it by login into you tenant using a browser. Use the latest availble one. Follow the same format included in the example. 116 | - instance_shapes - The shapes for the compute resources. Default is VM.Standard1.2 for all nodes. You can use different shapes for differnt types of nodes, i.e., deployer, monitor, osd, mds, and client. 117 | - create_volume - The default is true which means it will create a block storage for each OSD. If you choose a shape with NVMe drives for OSDs, change it to false. 118 | 119 | ### Execute Scripts 120 | ``` 121 | # Initialize your Terraform configuration including the modules 122 | $ terraform init 123 | # See what Terraform will do before actually doing it 124 | $ terraform plan 125 | # Provision resources and configure the Ceph cluster on OCI 126 | $ terraform apply 127 | ### Check Cluster Status 128 | ``` 129 | 130 | Upon the successful completion, the scripts will print out the names and IP addresses for all the compute nodes which can then be used to access the nodes. 131 | The tail end of the output will look like the following: 132 | 133 | ``` 134 | Outputs: 135 | ceph_client_hostname = [ 136 | test-ceph-client 137 | ] 138 | ceph_client_ip = [ 139 | 100.100.45.31 140 | ] 141 | ceph_deployer_hostname = test-ceph-deployer 142 | ceph_deployer_ip = 100.100.45.30 143 | ceph_mds_hostname_list = [ 144 | test-ceph-mds-0 145 | ] 146 | ceph_mds_ip_list = [ 147 | 100.100.45.49 148 | ] 149 | ceph_monitor_hostname_list = [ 150 | test-ceph-monitor-0, 151 | test-ceph-monitor-1 152 | ] 153 | ceph_monitor_ip_list = [ 154 | 100.100.45.33, 155 | 100.100.46.30 156 | ] 157 | ceph_osd_hostname_list = [ 158 | test-ceph-osd-0, 159 | test-ceph-osd-1, 160 | test-ceph-osd-2, 161 | test-ceph-osd-3 162 | ] 163 | ceph_osd_ip_list = [ 164 | 100.100.45.33, 165 | 100.100.45.32, 166 | 100.100.46.31, 167 | 100.100.48.30 168 | ] 169 | ``` 170 | 171 | 172 | If you need to list the IP addresses again in the future, enter: 173 | ``` 174 | $ terraform show 175 | ``` 176 | 177 | 178 | ### Check the status of the Cluster 179 | 180 | ```bash 181 | $ ssh -l opc 182 | $ ceph status 183 | $ df -h 184 | ``` 185 | 186 | ## Known issues and limitations 187 | * The scripts doesn't check for the validity of the input variables or any inconsistencies among them. 188 | Terraform will execute but fail if the compute shape or image doesn't exist in your environment or 189 | the specified subnet id for a compute node doesn't belong to the specified availability domain for the same node. 190 | It is your responsibility to make sure the inputs are valid and consistent with one another. 191 | * Uppercase letters on network resource names may cause problems. 192 | 193 | -------------------------------------------------------------------------------- /deployment.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle/terraform-ceph-installer/2308800c6d1ac7b7abcb8935ee24245f2e95b2fa/deployment.gif -------------------------------------------------------------------------------- /env-vars.sample: -------------------------------------------------------------------------------- 1 | 2 | ### Authentication 3 | export TF_VAR_tenancy_ocid= 4 | export TF_VAR_user_ocid= 5 | export TF_VAR_fingerprint= 6 | export TF_VAR_private_key_path= 7 | # Example: export TF_VAR_private_key_path=/root/.oraclebmc/bmcs_api_key.pem 8 | export TF_VAR_private_key_password= 9 | export TF_VAR_region= 10 | #Example: export TF_VAR_region=us-phoenix-1 11 | #Example: export TF_VAR_region=us-ashburn-1 12 | 13 | ### Compartment/Region 14 | export TF_VAR_compartment_ocid= 15 | 16 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | 2 | #------------------------------------------------------------------------------------------- 3 | # Create and/or Identify the Network / Sub-networks 4 | #------------------------------------------------------------------------------------------- 5 | module "ceph_network" { 6 | source = "modules/network" 7 | tenancy_ocid = "${var.tenancy_ocid}" 8 | compartment_id = "${var.compartment_ocid}" 9 | create_new_vcn = "${var.create_new_vcn}" 10 | existing_vcn_id = "${var.existing_vcn_id}" 11 | vcn_cidr = "${var.vcn_cidr}" 12 | vcn_name = "${var.vcn_name}" 13 | create_new_subnets = "${var.create_new_subnets}" 14 | new_subnet_count = "${var.new_subnet_count}" 15 | existing_subnet_ids = "${var.existing_subnet_ids}" 16 | availability_domain_index_list = "${var.availability_domain_index_list_for_subnets}" 17 | gateway_name = "${var.gateway_name}" 18 | route_table_name = "${var.route_table_name}" 19 | subnet_name_prefix = "${var.subnet_name_prefix}" 20 | subnet_cidr_blocks = "${var.subnet_cidr_blocks}" 21 | } 22 | 23 | #------------------------------------------------------------------------------------------- 24 | # Create and Setup the Ceph Deployer 25 | #------------------------------------------------------------------------------------------- 26 | module "ceph_deployer" { 27 | source = "modules/ceph-deployer/" 28 | tenancy_ocid = "${var.tenancy_ocid}" 29 | compartment_ocid = "${var.compartment_ocid}" 30 | instance_os = "${var.instance_os}" 31 | availability_domain_index = "${var.availability_domain_index_for_deployer[0]}" 32 | hostname = "${var.deployer_hostname}" 33 | shape = "${var.instance_shapes["deployer"]}" 34 | subnet_id = "${element(module.ceph_network.subnet_id_list, var.availability_domain_index_for_deployer[0]-1)}" 35 | ssh_public_key_file = "${var.ssh_public_key_file}" 36 | ssh_private_key_file = "${var.ssh_private_key_file}" 37 | ssh_username = "${var.ssh_username}" 38 | instance_create_timeout = "${var.instance_create_timeout}" 39 | scripts_src_directory = "${var.scripts_src_directory}" 40 | scripts_dst_directory = "${var.scripts_dst_directory}" 41 | } 42 | 43 | #------------------------------------------------------------------------------------------- 44 | # Create and Setup the Ceph Monitors 45 | #------------------------------------------------------------------------------------------- 46 | module "ceph_monitors" { 47 | source = "modules/ceph-monitor/" 48 | tenancy_ocid = "${var.tenancy_ocid}" 49 | compartment_ocid = "${var.compartment_ocid}" 50 | instance_count = "${var.monitor_instance_count}" 51 | instance_os = "${var.instance_os}" 52 | availability_domain_index_list = "${var.availability_domain_index_list_for_monitors}" 53 | hostname_prefix = "${var.monitor_hostname_prefix}" 54 | shape = "${var.instance_shapes["monitor"]}" 55 | subnet_id_list = "${module.ceph_network.subnet_id_list}" 56 | ssh_public_key_file = "${var.ssh_public_key_file}" 57 | ssh_private_key_file = "${var.ssh_private_key_file}" 58 | ssh_username = "${var.ssh_username}" 59 | instance_create_timeout = "${var.instance_create_timeout}" 60 | ceph_deployer_ip = "${module.ceph_deployer.ip}" 61 | scripts_src_directory = "${var.scripts_src_directory}" 62 | scripts_dst_directory = "${var.scripts_dst_directory}" 63 | deployer_deploy = "${module.ceph_deployer.deploy}" 64 | } 65 | 66 | #------------------------------------------------------------------------------------------- 67 | # Create and Setup the Ceph OSDs 68 | #------------------------------------------------------------------------------------------- 69 | module "ceph_osds" { 70 | source = "modules/ceph-osd/" 71 | tenancy_ocid = "${var.tenancy_ocid}" 72 | compartment_ocid = "${var.compartment_ocid}" 73 | instance_count = "${var.osd_instance_count}" 74 | instance_os = "${var.instance_os}" 75 | availability_domain_index_list = "${var.availability_domain_index_list_for_osds}" 76 | hostname_prefix = "${var.osd_hostname_prefix}" 77 | shape = "${var.instance_shapes["osd"]}" 78 | subnet_id_list = "${module.ceph_network.subnet_id_list}" 79 | ssh_public_key_file = "${var.ssh_public_key_file}" 80 | ssh_private_key_file = "${var.ssh_private_key_file}" 81 | ssh_username = "${var.ssh_username}" 82 | instance_create_timeout = "${var.instance_create_timeout}" 83 | ceph_deployer_ip = "${module.ceph_deployer.ip}" 84 | create_volume = "${var.create_volume}" 85 | volume_name_prefix = "${var.volume_name_prefix}" 86 | volume_size_in_gbs = "${var.volume_size_in_gbs}" 87 | volume_attachment_type = "${var.volume_attachment_type}" 88 | scripts_src_directory = "${var.scripts_src_directory}" 89 | scripts_dst_directory = "${var.scripts_dst_directory}" 90 | block_device_for_ceph = "${var.block_device_for_ceph}" 91 | deployer_deploy = "${module.ceph_deployer.deploy}" 92 | new_cluster= "${module.ceph_monitors.new_cluster}" 93 | } 94 | 95 | #------------------------------------------------------------------------------------------- 96 | # Create and Setup the Ceph MDSs 97 | #------------------------------------------------------------------------------------------- 98 | module "ceph_mds" { 99 | source = "modules/ceph-mds/" 100 | instance_count = "${var.mds_instance_count}" 101 | tenancy_ocid = "${var.tenancy_ocid}" 102 | compartment_ocid = "${var.compartment_ocid}" 103 | instance_os = "${var.instance_os}" 104 | availability_domain_index_list = "${var.availability_domain_index_list_for_mds}" 105 | hostname_prefix= "${var.mds_hostname_prefix}" 106 | shape = "${var.instance_shapes["mds"]}" 107 | subnet_id_list = "${module.ceph_network.subnet_id_list}" 108 | ssh_public_key_file = "${var.ssh_public_key_file}" 109 | ssh_private_key_file = "${var.ssh_private_key_file}" 110 | ssh_username = "${var.ssh_username}" 111 | instance_create_timeout = "${var.instance_create_timeout}" 112 | ceph_deployer_ip = "${module.ceph_deployer.ip}" 113 | scripts_src_directory = "${var.scripts_src_directory}" 114 | scripts_dst_directory = "${var.scripts_dst_directory}" 115 | deployer_deploy = "${module.ceph_deployer.deploy}" 116 | new_cluster = "${module.ceph_monitors.new_cluster}" 117 | } 118 | 119 | #------------------------------------------------------------------------------------------- 120 | # Create and Setup the Ceph Client 121 | #------------------------------------------------------------------------------------------- 122 | module "ceph_client" { 123 | source = "modules/ceph-client/" 124 | num_client = "${var.create_client}" 125 | tenancy_ocid = "${var.tenancy_ocid}" 126 | compartment_ocid = "${var.compartment_ocid}" 127 | instance_os = "${var.instance_os}" 128 | availability_domain_index = "${var.availability_domain_index_list_for_client[0]}" 129 | hostname = "${var.client_hostname}" 130 | shape = "${var.instance_shapes["client"]}" 131 | subnet_id = "${element(module.ceph_network.subnet_id_list, var.availability_domain_index_list_for_client[0] - 1)}" 132 | ssh_public_key_file = "${var.ssh_public_key_file}" 133 | ssh_private_key_file = "${var.ssh_private_key_file}" 134 | ssh_username = "${var.ssh_username}" 135 | instance_create_timeout = "${var.instance_create_timeout}" 136 | ceph_deployer_ip = "${module.ceph_deployer.ip}" 137 | scripts_src_directory = "${var.scripts_src_directory}" 138 | scripts_dst_directory = "${var.scripts_dst_directory}" 139 | deployer_deploy = "${module.ceph_deployer.deploy}" 140 | new_cluster = "${module.ceph_monitors.new_cluster}" 141 | osd_deploy = "${module.ceph_osds.deploy}" 142 | } 143 | -------------------------------------------------------------------------------- /modules/ceph-client/main.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------------ 2 | # Get a list of Availability Domains 3 | #------------------------------------------------------------------------------------ 4 | 5 | data "oci_identity_availability_domains" "ADs" { 6 | compartment_id = "${var.tenancy_ocid}" 7 | } 8 | 9 | #------------------------------------------------------------------------------------ 10 | # Get the OCID of the OS image to use 11 | #------------------------------------------------------------------------------------ 12 | data "oci_core_images" "image_ocid" { 13 | compartment_id = "${var.compartment_ocid}" 14 | display_name = "${var.instance_os}" 15 | } 16 | 17 | #------------------------------------------------------------------------------------ 18 | # Create Ceph Client Instance(s) 19 | #------------------------------------------------------------------------------------ 20 | resource "oci_core_instance" "instance" { 21 | count = "${var.num_client}" 22 | availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[var.availability_domain_index - 1],"name")}" 23 | compartment_id = "${var.compartment_ocid}" 24 | display_name = "${var.hostname}" 25 | hostname_label = "${var.hostname}" 26 | shape = "${var.shape}" 27 | subnet_id = "${var.subnet_id}" 28 | source_details { 29 | source_type = "image" 30 | source_id = "${lookup(data.oci_core_images.image_ocid.images[0], "id")}" 31 | } 32 | metadata { 33 | ssh_authorized_keys = "${file(var.ssh_public_key_file)}" 34 | } 35 | connection { 36 | host = "${self.private_ip}" 37 | type = "ssh" 38 | user = "${var.ssh_username}" 39 | private_key = "${file(var.ssh_private_key_file)}" 40 | } 41 | provisioner "remote-exec" { 42 | inline = [ 43 | " mkdir ~/${var.scripts_dst_directory}", 44 | ] 45 | } 46 | provisioner "file" { 47 | source = "${var.scripts_src_directory}/ceph.config" 48 | destination = "~/${var.scripts_dst_directory}/ceph.config" 49 | } 50 | provisioner "file" { 51 | source = "${var.scripts_src_directory}/vm_init.sh" 52 | destination = "~/${var.scripts_dst_directory}/vm_init.sh" 53 | } 54 | provisioner "file" { 55 | source = "${var.scripts_src_directory}/vm_pre_setup.sh" 56 | destination = "~/${var.scripts_dst_directory}/vm_pre_setup.sh" 57 | } 58 | provisioner "file" { 59 | source = "${var.scripts_src_directory}/vm_setup.sh" 60 | destination = "~/${var.scripts_dst_directory}/vm_setup.sh" 61 | } 62 | provisioner "file" { 63 | source = "${var.scripts_src_directory}/vm_post_setup.sh" 64 | destination = "~/${var.scripts_dst_directory}/vm_post_setup.sh" 65 | } 66 | provisioner "file" { 67 | source = "${var.scripts_src_directory}/yum_repo_setup.sh" 68 | destination = "~/${var.scripts_dst_directory}/yum_repo_setup.sh" 69 | } 70 | provisioner "file" { 71 | source = "${var.scripts_src_directory}/ceph_yum_repo" 72 | destination = "~/${var.scripts_dst_directory}/ceph_yum_repo" 73 | } 74 | provisioner "file" { 75 | source = "${var.scripts_src_directory}/ceph_firewall_setup.sh" 76 | destination = "~/${var.scripts_dst_directory}/ceph_firewall_setup.sh" 77 | } 78 | provisioner "file" { 79 | source = "${var.scripts_src_directory}/ceph_client_setup.sh" 80 | destination = "~/${var.scripts_dst_directory}/ceph_client_setup.sh" 81 | } 82 | timeouts { 83 | create = "${var.instance_create_timeout}" 84 | } 85 | } 86 | 87 | #------------------------------------------------------------------------------------ 88 | # Initialize the VM 89 | #------------------------------------------------------------------------------------ 90 | resource "null_resource" "vm_init" { 91 | depends_on = ["oci_core_instance.instance"] 92 | count = "${var.num_client}" 93 | provisioner "remote-exec" { 94 | connection { 95 | agent = false 96 | timeout = "30m" 97 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 98 | user = "${var.ssh_username}" 99 | private_key = "${file(var.ssh_private_key_file)}" 100 | } 101 | inline = [ 102 | "cd ${var.scripts_dst_directory}", 103 | "chmod +x vm_init.sh", 104 | "./vm_init.sh client" 105 | ] 106 | } 107 | } 108 | 109 | #------------------------------------------------------------------------------------ 110 | # Setup the VM. 111 | # Setup involves: 112 | # 1. Pre Setup 113 | # 2. Waiting (adding delay for the duration specified in the ceph.config file) 114 | # 3. Setup 115 | # 4. Waiting (adding delay for the duration specified in the ceph.config file) 116 | # 5. Post Setup 117 | #------------------------------------------------------------------------------------ 118 | resource "null_resource" "vm_pre_setup" { 119 | depends_on = ["null_resource.vm_init"] 120 | count = "${var.num_client}" 121 | provisioner "remote-exec" { 122 | connection { 123 | agent = false 124 | timeout = "30m" 125 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 126 | user = "${var.ssh_username}" 127 | private_key = "${file(var.ssh_private_key_file)}" 128 | } 129 | inline = [ 130 | "cd ${var.scripts_dst_directory}", 131 | "chmod +x vm_pre_setup.sh", 132 | "./vm_pre_setup.sh client" 133 | ] 134 | } 135 | } 136 | 137 | resource "null_resource" "delay_before" { 138 | count = "${var.num_client}" 139 | provisioner "local-exec" { 140 | command = "cd ${var.scripts_src_directory}; ./delay.sh before_setup" 141 | } 142 | triggers = { 143 | "before" = "${element(null_resource.vm_pre_setup.*.id, count.index)}" 144 | 145 | } 146 | } 147 | 148 | resource "null_resource" "vm_setup" { 149 | depends_on = ["null_resource.delay_before"] 150 | count = "${var.num_client}" 151 | provisioner "remote-exec" { 152 | connection { 153 | agent = false 154 | timeout = "30m" 155 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 156 | user = "${var.ssh_username}" 157 | private_key = "${file(var.ssh_private_key_file)}" 158 | } 159 | inline = [ 160 | "cd ${var.scripts_dst_directory}", 161 | "chmod +x vm_setup.sh", 162 | "./vm_setup.sh client" 163 | ] 164 | } 165 | } 166 | 167 | resource "null_resource" "delay_after" { 168 | count = "${var.num_client}" 169 | provisioner "local-exec" { 170 | command = "cd ${var.scripts_src_directory}; ./delay.sh after_setup" 171 | } 172 | triggers = { 173 | "before" = "${element(null_resource.vm_setup.*.id, count.index)}" 174 | } 175 | } 176 | 177 | resource "null_resource" "vm_post_setup" { 178 | depends_on = ["null_resource.delay_after"] 179 | count = "${var.num_client}" 180 | provisioner "remote-exec" { 181 | connection { 182 | agent = false 183 | timeout = "30m" 184 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 185 | user = "${var.ssh_username}" 186 | private_key = "${file(var.ssh_private_key_file)}" 187 | } 188 | inline = [ 189 | "cd ${var.scripts_dst_directory}", 190 | "chmod +x vm_post_setup.sh", 191 | "./vm_post_setup.sh client" 192 | ] 193 | } 194 | } 195 | 196 | #------------------------------------------------------------------------------------ 197 | # Setup Ceph Client Instances 198 | #------------------------------------------------------------------------------------ 199 | resource "null_resource" "setup" { 200 | depends_on = ["null_resource.vm_post_setup"] 201 | count = "${var.num_client}" 202 | provisioner "remote-exec" { 203 | connection { 204 | agent = false 205 | timeout = "30m" 206 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 207 | user = "${var.ssh_username}" 208 | private_key = "${file(var.ssh_private_key_file)}" 209 | } 210 | inline = [ 211 | "cd ${var.scripts_dst_directory}", 212 | "chmod +x yum_repo_setup.sh", 213 | "./yum_repo_setup.sh", 214 | "chmod +x ceph_firewall_setup.sh", 215 | "./ceph_firewall_setup.sh client" 216 | ] 217 | } 218 | } 219 | 220 | #------------------------------------------------------------------------------------ 221 | # Passwordless SSH Setup (from deployer to OSDs) 222 | # - Get the ssh key from the Ceph Deployer Instance and install on OSDs 223 | #------------------------------------------------------------------------------------ 224 | resource "null_resource" "wait_for_deployer_deploy" { 225 | depends_on = ["null_resource.setup"] 226 | count = "${var.num_client}" 227 | provisioner "local-exec" { 228 | command = "echo 'Waited for Deployer Setup (${var.deployer_deploy}) to complete'" 229 | } 230 | } 231 | 232 | resource "null_resource" "copy_key" { 233 | count = "${var.num_client}" 234 | depends_on = ["null_resource.setup", "null_resource.wait_for_deployer_deploy"] 235 | provisioner "local-exec" { 236 | command = "${var.scripts_src_directory}/install_ssh_key.sh ${var.ceph_deployer_ip} ${oci_core_instance.instance.private_ip}" 237 | } 238 | } 239 | 240 | resource "null_resource" "add_to_deployer_known_hosts" { 241 | count = "${var.num_client}" 242 | depends_on = ["null_resource.copy_key"] 243 | provisioner "remote-exec" { 244 | connection { 245 | agent = false 246 | timeout = "30m" 247 | host = "${var.ceph_deployer_ip}" 248 | user = "${var.ssh_username}" 249 | private_key = "${file(var.ssh_private_key_file)}" 250 | } 251 | inline = [ 252 | "cd ${var.scripts_dst_directory}", 253 | "./add_to_etc_hosts.sh ${oci_core_instance.instance.private_ip} ${oci_core_instance.instance.hostname_label}", 254 | "./add_to_known_hosts.sh ${oci_core_instance.instance.private_ip} ${oci_core_instance.instance.hostname_label}", 255 | ] 256 | } 257 | } 258 | 259 | #------------------------------------------------------------------------------------ 260 | # Deploy Ceph which includes installing the packages, making it an admin node, 261 | # install keyfiles, make them readable, etc. 262 | #------------------------------------------------------------------------------------ 263 | resource "null_resource" "wait_for_cluster_create" { 264 | count = "${var.num_client}" 265 | provisioner "local-exec" { 266 | command = "echo 'Waited for create new cluster ${var.new_cluster} creation'" 267 | } 268 | } 269 | 270 | resource "null_resource" "deploy" { 271 | count = "${var.num_client}" 272 | depends_on = ["null_resource.add_to_deployer_known_hosts", "null_resource.wait_for_cluster_create"] 273 | provisioner "remote-exec" { 274 | connection { 275 | agent = false 276 | timeout = "30m" 277 | host = "${var.ceph_deployer_ip}" 278 | user = "${var.ssh_username}" 279 | private_key = "${file(var.ssh_private_key_file)}" 280 | } 281 | inline = [ 282 | "cd ${var.scripts_dst_directory}", 283 | "chmod +x ceph_deploy_client.sh", 284 | "./ceph_deploy_client.sh ${join(" ", oci_core_instance.instance.*.hostname_label)}" 285 | ] 286 | } 287 | } 288 | 289 | #------------------------------------------------------------------------------------ 290 | # Setup Clinet with block device, file Systems etc. 291 | #------------------------------------------------------------------------------------ 292 | resource "null_resource" "wait_for_osd_deploy" { 293 | count = "${var.num_client}" 294 | provisioner "local-exec" { 295 | command = "echo 'Waited for OSD deployment ${var.osd_deploy} to complete'" 296 | } 297 | } 298 | 299 | resource "null_resource" "client_setup" { 300 | depends_on = [ "null_resource.deploy", "null_resource.wait_for_osd_deploy" ] 301 | count = "${var.num_client}" 302 | provisioner "remote-exec" { 303 | connection { 304 | agent = false 305 | timeout = "30m" 306 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 307 | user = "${var.ssh_username}" 308 | private_key = "${file(var.ssh_private_key_file)}" 309 | } 310 | inline = [ 311 | "cd ${var.scripts_dst_directory}", 312 | "chmod +x ceph_client_setup.sh", 313 | "./ceph_client_setup.sh ${var.ssh_username}" 314 | ] 315 | } 316 | } 317 | -------------------------------------------------------------------------------- /modules/ceph-client/output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "ip" { 3 | value = "${oci_core_instance.instance.*.private_ip}" 4 | } 5 | 6 | output "hostname" { 7 | value = "${oci_core_instance.instance.*.hostname_label}" 8 | } 9 | -------------------------------------------------------------------------------- /modules/ceph-client/variables.tf: -------------------------------------------------------------------------------- 1 | #=============================================================== 2 | # Module Inputs 3 | #=============================================================== 4 | 5 | variable "tenancy_ocid" { 6 | description = "The OCI tenancy id" 7 | } 8 | 9 | variable "compartment_ocid" { 10 | description = "The OCI compartment id" 11 | } 12 | 13 | variable "instance_os" { 14 | description = "The Name of the Operating System for the client" 15 | } 16 | 17 | variable "num_client" { 18 | description = "The number of client to create. For now it can only be 0 or 1." 19 | } 20 | 21 | variable "availability_domain_index" { 22 | description = "The availability domain where the client will be created" 23 | } 24 | 25 | variable "hostname" { 26 | description = "The name of the instance" 27 | } 28 | 29 | variable "shape" { 30 | description = "The compute shape of the instances" 31 | } 32 | 33 | variable "subnet_id" { 34 | description = "The subnets where the instance will be created" 35 | } 36 | 37 | variable "ssh_public_key_file" { 38 | description = "The public key that will be installed on to the new instance(s) for ssh login" 39 | } 40 | 41 | variable "ssh_private_key_file" { 42 | description = "The private key that for ssh login to the new instance(s)" 43 | } 44 | 45 | variable "ssh_username" { 46 | description = "The username for ssh login to the instance(s)" 47 | } 48 | 49 | variable "ceph_deployer_ip" { 50 | description = "The IP of the Ceph deployer node" 51 | } 52 | 53 | variable "instance_create_timeout" { 54 | description = "The timeout value for instance creation" 55 | } 56 | 57 | variable "scripts_src_directory" { 58 | description = "Path to the directory where the scripts and config files are" 59 | } 60 | 61 | variable "scripts_dst_directory" { 62 | description = "Path to the directory where the scripts and config files will be copied to" 63 | } 64 | 65 | variable "deployer_deploy" { 66 | description = "A Synchronization primitive for letting TF know that the deployment for the deployer is complete" 67 | } 68 | 69 | variable "new_cluster" { 70 | description = "A Synchronization primitive for letting TF know that the creation of the new cluster is complete" 71 | } 72 | 73 | variable "osd_deploy" { 74 | description = "A Synchronization primitive for letting TF know that the OSD deployment is complete" 75 | } 76 | -------------------------------------------------------------------------------- /modules/ceph-deployer/main.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------------ 2 | # Get a list of Availability Domains 3 | #------------------------------------------------------------------------------------ 4 | data "oci_identity_availability_domains" "ADs" { 5 | compartment_id = "${var.tenancy_ocid}" 6 | } 7 | 8 | #------------------------------------------------------------------------------------ 9 | # Get the OCID of the OS image to use 10 | #------------------------------------------------------------------------------------ 11 | data "oci_core_images" "image_ocid" { 12 | compartment_id = "${var.compartment_ocid}" 13 | display_name = "${var.instance_os}" 14 | } 15 | 16 | #------------------------------------------------------------------------------------ 17 | # Create the Ceph Deployer Instance 18 | #------------------------------------------------------------------------------------ 19 | resource "oci_core_instance" "instance" { 20 | availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[var.availability_domain_index - 1],"name")}" 21 | compartment_id = "${var.compartment_ocid}" 22 | display_name = "${var.hostname}" 23 | hostname_label = "${var.hostname}" 24 | shape = "${var.shape}" 25 | subnet_id = "${var.subnet_id}" 26 | source_details { 27 | source_type = "image" 28 | source_id = "${lookup(data.oci_core_images.image_ocid.images[0], "id")}" 29 | } 30 | metadata { 31 | ssh_authorized_keys = "${file(var.ssh_public_key_file)}" 32 | } 33 | connection { 34 | host = "${self.private_ip}" 35 | type = "ssh" 36 | user = "${var.ssh_username}" 37 | private_key = "${file(var.ssh_private_key_file)}" 38 | } 39 | provisioner "remote-exec" { 40 | inline = [ 41 | " mkdir ~/${var.scripts_dst_directory}", 42 | ] 43 | } 44 | provisioner "file" { 45 | source = "${var.scripts_src_directory}/ceph.config" 46 | destination = "~/${var.scripts_dst_directory}/ceph.config" 47 | } 48 | provisioner "file" { 49 | source = "${var.scripts_src_directory}/vm_init.sh" 50 | destination = "~/${var.scripts_dst_directory}/vm_init.sh" 51 | } 52 | provisioner "file" { 53 | source = "${var.scripts_src_directory}/vm_pre_setup.sh" 54 | destination = "~/${var.scripts_dst_directory}/vm_pre_setup.sh" 55 | } 56 | provisioner "file" { 57 | source = "${var.scripts_src_directory}/vm_setup.sh" 58 | destination = "~/${var.scripts_dst_directory}/vm_setup.sh" 59 | } 60 | provisioner "file" { 61 | source = "${var.scripts_src_directory}/vm_post_setup.sh" 62 | destination = "~/${var.scripts_dst_directory}/vm_post_setup.sh" 63 | } 64 | provisioner "file" { 65 | source = "${var.scripts_src_directory}/add_to_known_hosts.sh" 66 | destination = "~/${var.scripts_dst_directory}/add_to_known_hosts.sh" 67 | } 68 | provisioner "file" { 69 | source = "${var.scripts_src_directory}/add_to_etc_hosts.sh" 70 | destination = "~/${var.scripts_dst_directory}/add_to_etc_hosts.sh" 71 | } 72 | provisioner "file" { 73 | source = "${var.scripts_src_directory}/install_ssh_key.sh" 74 | destination = "~/${var.scripts_dst_directory}/install_ssh_key.sh" 75 | } 76 | provisioner "file" { 77 | source = "${var.scripts_src_directory}/yum_repo_setup.sh" 78 | destination = "~/${var.scripts_dst_directory}/yum_repo_setup.sh" 79 | } 80 | provisioner "file" { 81 | source = "${var.scripts_src_directory}/ceph_yum_repo" 82 | destination = "~/${var.scripts_dst_directory}/ceph_yum_repo" 83 | } 84 | provisioner "file" { 85 | source = "${var.scripts_src_directory}/ceph_firewall_setup.sh" 86 | destination = "~/${var.scripts_dst_directory}/ceph_firewall_setup.sh" 87 | } 88 | provisioner "file" { 89 | source = "${var.scripts_src_directory}/install_ceph_deploy.sh" 90 | destination = "~/${var.scripts_dst_directory}/install_ceph_deploy.sh" 91 | } 92 | provisioner "file" { 93 | source = "${var.scripts_src_directory}/ceph_new_cluster.sh" 94 | destination = "~/${var.scripts_dst_directory}/ceph_new_cluster.sh" 95 | } 96 | provisioner "file" { 97 | source = "${var.scripts_src_directory}/ceph_deploy_osd.sh" 98 | destination = "~/${var.scripts_dst_directory}/ceph_deploy_osd.sh" 99 | } 100 | provisioner "file" { 101 | source = "${var.scripts_src_directory}/ceph_deploy_mds.sh" 102 | destination = "~/${var.scripts_dst_directory}/ceph_deploy_mds.sh" 103 | } 104 | provisioner "file" { 105 | source = "${var.scripts_src_directory}/ceph_deploy_client.sh" 106 | destination = "~/${var.scripts_dst_directory}/ceph_deploy_client.sh" 107 | } 108 | timeouts { 109 | create = "${var.instance_create_timeout}" 110 | } 111 | } 112 | 113 | #------------------------------------------------------------------------------------ 114 | # Initialize the VM 115 | #------------------------------------------------------------------------------------ 116 | resource "null_resource" "vm_init" { 117 | depends_on = ["oci_core_instance.instance"] 118 | provisioner "remote-exec" { 119 | connection { 120 | agent = false 121 | timeout = "30m" 122 | host = "${oci_core_instance.instance.private_ip}" 123 | user = "${var.ssh_username}" 124 | private_key = "${file(var.ssh_private_key_file)}" 125 | } 126 | inline = [ 127 | "chmod +x ~/${var.scripts_dst_directory}/add_to_etc_hosts.sh", 128 | "chmod +x ~/${var.scripts_dst_directory}/add_to_known_hosts.sh", 129 | "chmod +x ~/${var.scripts_dst_directory}/install_ssh_key.sh", 130 | "chmod +x ~/${var.scripts_dst_directory}/install_ceph_deploy.sh", 131 | "chmod +x ~/${var.scripts_dst_directory}/ceph_new_cluster.sh", 132 | "chmod +x ~/${var.scripts_dst_directory}/ceph_deploy_osd.sh", 133 | "chmod +x ~/${var.scripts_dst_directory}/ceph_deploy_mds.sh", 134 | "chmod +x ~/${var.scripts_dst_directory}/ceph_deploy_client.sh", 135 | "cd ${var.scripts_dst_directory}", 136 | "chmod +x vm_init.sh", 137 | "./vm_init.sh deployer" 138 | ] 139 | } 140 | } 141 | 142 | #------------------------------------------------------------------------------------ 143 | # Setup the VM. 144 | # Setup involves: 145 | # 1. Pre Setup 146 | # 2. Waiting (adding delay for the duration specified in the ceph.config file) 147 | # 3. Setup 148 | # 4. Waiting (adding delay for the duration specified in the ceph.config file) 149 | # 5. Post Setup 150 | #------------------------------------------------------------------------------------ 151 | resource "null_resource" "vm_pre_setup" { 152 | depends_on = ["null_resource.vm_init"] 153 | provisioner "remote-exec" { 154 | connection { 155 | agent = false 156 | timeout = "30m" 157 | host = "${oci_core_instance.instance.private_ip}" 158 | user = "${var.ssh_username}" 159 | private_key = "${file(var.ssh_private_key_file)}" 160 | } 161 | inline = [ 162 | "cd ${var.scripts_dst_directory}", 163 | "chmod +x vm_pre_setup.sh", 164 | "./vm_pre_setup.sh deployer" 165 | ] 166 | } 167 | } 168 | 169 | resource "null_resource" "delay_before" { 170 | provisioner "local-exec" { 171 | command = "cd ${var.scripts_src_directory}; ./delay.sh before_setup" 172 | } 173 | triggers = { 174 | "before" = "${null_resource.vm_pre_setup.id}" 175 | } 176 | } 177 | 178 | resource "null_resource" "vm_setup" { 179 | depends_on = ["null_resource.delay_before"] 180 | provisioner "remote-exec" { 181 | connection { 182 | agent = false 183 | timeout = "30m" 184 | host = "${oci_core_instance.instance.private_ip}" 185 | user = "${var.ssh_username}" 186 | private_key = "${file(var.ssh_private_key_file)}" 187 | } 188 | inline = [ 189 | "cd ${var.scripts_dst_directory}", 190 | "chmod +x vm_setup.sh", 191 | "./vm_setup.sh deployer" 192 | ] 193 | } 194 | } 195 | 196 | resource "null_resource" "delay_after" { 197 | provisioner "local-exec" { 198 | command = "cd ${var.scripts_src_directory}; ./delay.sh after_setup" 199 | } 200 | triggers = { 201 | "before" = "${null_resource.vm_setup.id}" 202 | } 203 | } 204 | 205 | resource "null_resource" "vm_post_setup" { 206 | depends_on = ["null_resource.delay_after"] 207 | provisioner "remote-exec" { 208 | connection { 209 | agent = false 210 | timeout = "30m" 211 | host = "${oci_core_instance.instance.private_ip}" 212 | user = "${var.ssh_username}" 213 | private_key = "${file(var.ssh_private_key_file)}" 214 | } 215 | inline = [ 216 | "cd ${var.scripts_dst_directory}", 217 | "chmod +x vm_post_setup.sh", 218 | "./vm_post_setup.sh deployer" 219 | ] 220 | } 221 | } 222 | 223 | #------------------------------------------------------------------------------------ 224 | # Setup Ceph Deployer Instance 225 | #------------------------------------------------------------------------------------ 226 | resource "null_resource" "setup" { 227 | depends_on = ["null_resource.vm_post_setup"] 228 | provisioner "remote-exec" { 229 | connection { 230 | agent = false 231 | timeout = "30m" 232 | host = "${oci_core_instance.instance.private_ip}" 233 | user = "${var.ssh_username}" 234 | private_key = "${file(var.ssh_private_key_file)}" 235 | } 236 | inline = [ 237 | "rm -rf ~/.ssh/id_rsa", 238 | "ssh-keygen -t rsa -q -P '' -f ~/.ssh/id_rsa", 239 | "cd ${var.scripts_dst_directory}", 240 | "chmod +x yum_repo_setup.sh", 241 | "./yum_repo_setup.sh", 242 | "chmod +x ceph_firewall_setup.sh", 243 | "./ceph_firewall_setup.sh deployer" 244 | ] 245 | } 246 | } 247 | 248 | #------------------------------------------------------------------------------------ 249 | # Deploy the Ceph Deployer Instance 250 | #------------------------------------------------------------------------------------ 251 | resource "null_resource" "deploy" { 252 | depends_on = ["null_resource.setup"] 253 | provisioner "remote-exec" { 254 | connection { 255 | agent = false 256 | timeout = "30m" 257 | host = "${oci_core_instance.instance.private_ip}" 258 | user = "${var.ssh_username}" 259 | private_key = "${file(var.ssh_private_key_file)}" 260 | } 261 | inline = [ 262 | "cd ${var.scripts_dst_directory}", 263 | "chmod +x install_ceph_deploy.sh", 264 | "./install_ceph_deploy.sh" 265 | ] 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /modules/ceph-deployer/output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "ip" { 3 | value = "${oci_core_instance.instance.private_ip}" 4 | } 5 | 6 | output "hostname" { 7 | value = "${oci_core_instance.instance.hostname_label}" 8 | } 9 | 10 | output "deploy" { 11 | value = "${null_resource.deploy.id}" 12 | } 13 | -------------------------------------------------------------------------------- /modules/ceph-deployer/variables.tf: -------------------------------------------------------------------------------- 1 | #=============================================================== 2 | # Module Inputs 3 | #=============================================================== 4 | 5 | variable "tenancy_ocid" { 6 | description = "The OCI tenancy id" 7 | } 8 | 9 | variable "compartment_ocid" { 10 | description = "The OCI compartment id" 11 | } 12 | 13 | variable "instance_os" { 14 | description = "The Name of the Operating System for the instance" 15 | } 16 | 17 | variable "availability_domain_index" { 18 | description = "The availability domain where the deployer will be created" 19 | } 20 | 21 | variable "hostname" { 22 | description = "The the name of the client" 23 | } 24 | 25 | variable "shape" { 26 | description = "The compute shape of the instance(s)" 27 | } 28 | 29 | variable "subnet_id" { 30 | description = "The subnet where the deployer will be created" 31 | } 32 | 33 | variable "ssh_public_key_file" { 34 | description = "The public key that will be installed on to the new instance(s) for ssh login" 35 | } 36 | 37 | variable "ssh_private_key_file" { 38 | description = "The private key that for ssh login to the new instance(s)" 39 | } 40 | 41 | variable "ssh_username" { 42 | 43 | description = "The username for ssh login to the instance(s)" 44 | } 45 | 46 | variable "instance_create_timeout" { 47 | description = "The timeout value for instance creation" 48 | } 49 | 50 | variable "scripts_src_directory" { 51 | description = "Path to the directory where the scripts and config files are" 52 | } 53 | 54 | variable "scripts_dst_directory" { 55 | description = "Path to the directory where the scripts and config files will be copied to" 56 | } 57 | -------------------------------------------------------------------------------- /modules/ceph-mds/main.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------------ 2 | # Get a list of Availability Domains 3 | #------------------------------------------------------------------------------------ 4 | data "oci_identity_availability_domains" "ADs" { 5 | compartment_id = "${var.tenancy_ocid}" 6 | } 7 | 8 | #------------------------------------------------------------------------------------ 9 | # Get the OCID of the OS image to use 10 | #------------------------------------------------------------------------------------ 11 | data "oci_core_images" "image_ocid" { 12 | compartment_id = "${var.compartment_ocid}" 13 | display_name = "${var.instance_os}" 14 | } 15 | 16 | #------------------------------------------------------------------------------------ 17 | # Create Ceph MDS Instance(s) 18 | #------------------------------------------------------------------------------------ 19 | resource "oci_core_instance" "instance" { 20 | count = "${var.instance_count}" 21 | availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[var.availability_domain_index_list[count.index] - 1],"name")}" 22 | compartment_id = "${var.compartment_ocid}" 23 | display_name = "${var.hostname_prefix}-${count.index}" 24 | hostname_label = "${var.hostname_prefix}-${count.index}" 25 | shape = "${var.shape}" 26 | subnet_id = "${var.subnet_id_list[var.availability_domain_index_list[count.index] - 1]}" 27 | source_details { 28 | source_type = "image" 29 | source_id = "${lookup(data.oci_core_images.image_ocid.images[0], "id")}" 30 | } 31 | metadata { 32 | ssh_authorized_keys = "${file(var.ssh_public_key_file)}" 33 | } 34 | connection { 35 | host = "${self.private_ip}" 36 | type = "ssh" 37 | user = "${var.ssh_username}" 38 | private_key = "${file(var.ssh_private_key_file)}" 39 | } 40 | provisioner "remote-exec" { 41 | inline = [ 42 | " mkdir ~/${var.scripts_dst_directory}", 43 | ] 44 | } 45 | provisioner "file" { 46 | source = "${var.scripts_src_directory}/ceph.config" 47 | destination = "~/${var.scripts_dst_directory}/ceph.config" 48 | } 49 | provisioner "file" { 50 | source = "${var.scripts_src_directory}/vm_init.sh" 51 | destination = "~/${var.scripts_dst_directory}/vm_init.sh" 52 | } 53 | provisioner "file" { 54 | source = "${var.scripts_src_directory}/vm_pre_setup.sh" 55 | destination = "~/${var.scripts_dst_directory}/vm_pre_setup.sh" 56 | } 57 | provisioner "file" { 58 | source = "${var.scripts_src_directory}/vm_setup.sh" 59 | destination = "~/${var.scripts_dst_directory}/vm_setup.sh" 60 | } 61 | provisioner "file" { 62 | source = "${var.scripts_src_directory}/vm_post_setup.sh" 63 | destination = "~/${var.scripts_dst_directory}/vm_post_setup.sh" 64 | } 65 | provisioner "file" { 66 | source = "${var.scripts_src_directory}/yum_repo_setup.sh" 67 | destination = "~/${var.scripts_dst_directory}/yum_repo_setup.sh" 68 | } 69 | provisioner "file" { 70 | source = "${var.scripts_src_directory}/ceph_yum_repo" 71 | destination = "~/${var.scripts_dst_directory}/ceph_yum_repo" 72 | } 73 | provisioner "file" { 74 | source = "${var.scripts_src_directory}/ceph_firewall_setup.sh" 75 | destination = "~/${var.scripts_dst_directory}/ceph_firewall_setup.sh" 76 | } 77 | timeouts { 78 | create = "${var.instance_create_timeout}" 79 | } 80 | } 81 | 82 | #------------------------------------------------------------------------------------ 83 | # Initialize the VM instances 84 | #------------------------------------------------------------------------------------ 85 | resource "null_resource" "vm_init" { 86 | depends_on = ["oci_core_instance.instance"] 87 | count = "${var.instance_count}" 88 | provisioner "remote-exec" { 89 | connection { 90 | agent = false 91 | timeout = "30m" 92 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 93 | user = "${var.ssh_username}" 94 | private_key = "${file(var.ssh_private_key_file)}" 95 | } 96 | inline = [ 97 | "cd ${var.scripts_dst_directory}", 98 | "chmod +x vm_init.sh", 99 | "./vm_init.sh mds" 100 | ] 101 | } 102 | } 103 | 104 | #------------------------------------------------------------------------------------ 105 | # Setup the VM. 106 | # Setup involves: 107 | # 1. Pre Setup 108 | # 2. Waiting (adding delay for the duration specified in the ceph.config file) 109 | # 3. Setup 110 | # 4. Waiting (adding delay for the duration specified in the ceph.config file) 111 | # 5. Post Setup 112 | #------------------------------------------------------------------------------------ 113 | resource "null_resource" "vm_pre_setup" { 114 | depends_on = ["null_resource.vm_init"] 115 | count = "${var.instance_count}" 116 | provisioner "remote-exec" { 117 | connection { 118 | agent = false 119 | timeout = "30m" 120 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 121 | user = "${var.ssh_username}" 122 | private_key = "${file(var.ssh_private_key_file)}" 123 | } 124 | inline = [ 125 | "cd ${var.scripts_dst_directory}", 126 | "chmod +x vm_pre_setup.sh", 127 | "./vm_pre_setup.sh mds" 128 | ] 129 | } 130 | } 131 | 132 | resource "null_resource" "delay_before" { 133 | count = "${var.instance_count}" 134 | provisioner "local-exec" { 135 | command = "cd ${var.scripts_src_directory}; ./delay.sh before_setup" 136 | } 137 | triggers = { 138 | "before" = "${element(null_resource.vm_pre_setup.*.id, count.index)}" 139 | } 140 | } 141 | 142 | resource "null_resource" "vm_setup" { 143 | depends_on = ["null_resource.delay_before"] 144 | count = "${var.instance_count}" 145 | provisioner "remote-exec" { 146 | connection { 147 | agent = false 148 | timeout = "30m" 149 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 150 | user = "${var.ssh_username}" 151 | private_key = "${file(var.ssh_private_key_file)}" 152 | } 153 | inline = [ 154 | "cd ${var.scripts_dst_directory}", 155 | "chmod +x vm_setup.sh", 156 | "./vm_setup.sh mds" 157 | ] 158 | } 159 | } 160 | 161 | resource "null_resource" "delay_after" { 162 | count = "${var.instance_count}" 163 | provisioner "local-exec" { 164 | command = "cd ${var.scripts_src_directory}; ./delay.sh after_setup" 165 | } 166 | triggers = { 167 | "before" = "${element(null_resource.vm_setup.*.id, count.index)}" 168 | } 169 | } 170 | 171 | resource "null_resource" "vm_post_setup" { 172 | depends_on = ["null_resource.delay_after"] 173 | count = "${var.instance_count}" 174 | provisioner "remote-exec" { 175 | connection { 176 | agent = false 177 | timeout = "30m" 178 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 179 | user = "${var.ssh_username}" 180 | private_key = "${file(var.ssh_private_key_file)}" 181 | } 182 | inline = [ 183 | "cd ${var.scripts_dst_directory}", 184 | "chmod +x vm_post_setup.sh", 185 | "./vm_post_setup.sh mds" 186 | ] 187 | } 188 | } 189 | 190 | #------------------------------------------------------------------------------------ 191 | # Setup Ceph MDS Instances 192 | #------------------------------------------------------------------------------------ 193 | resource "null_resource" "setup" { 194 | depends_on = ["null_resource.vm_post_setup"] 195 | count = "${var.instance_count}" 196 | provisioner "remote-exec" { 197 | connection { 198 | agent = false 199 | timeout = "30m" 200 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 201 | user = "${var.ssh_username}" 202 | private_key = "${file(var.ssh_private_key_file)}" 203 | } 204 | inline = [ 205 | "cd ${var.scripts_dst_directory}", 206 | "chmod +x yum_repo_setup.sh", 207 | "./yum_repo_setup.sh", 208 | "chmod +x ceph_firewall_setup.sh", 209 | "./ceph_firewall_setup.sh mds" 210 | ] 211 | } 212 | } 213 | 214 | #------------------------------------------------------------------------------------ 215 | # Passwordless SSH Setup (from deployer to OSDs) 216 | # - Get the ssh key from the Ceph Deployer Instance and install on OSDs 217 | #------------------------------------------------------------------------------------ 218 | resource "null_resource" "wait_for_deployer_deploy" { 219 | depends_on = ["null_resource.setup"] 220 | count = "${var.instance_count}" 221 | provisioner "local-exec" { 222 | command = "echo 'Waited for Deployer Setup (${var.deployer_deploy}) to complete'" 223 | } 224 | } 225 | 226 | resource "null_resource" "copy_key" { 227 | count = "${var.instance_count}" 228 | depends_on = ["null_resource.setup", "null_resource.wait_for_deployer_deploy"] 229 | provisioner "local-exec" { 230 | command = "${var.scripts_src_directory}/install_ssh_key.sh ${var.ceph_deployer_ip} ${oci_core_instance.instance.private_ip}" 231 | } 232 | } 233 | 234 | resource "null_resource" "add_to_deployer_known_hosts" { 235 | count = "${var.instance_count}" 236 | depends_on = ["null_resource.copy_key"] 237 | provisioner "remote-exec" { 238 | connection { 239 | agent = false 240 | timeout = "30m" 241 | host = "${var.ceph_deployer_ip}" 242 | user = "${var.ssh_username}" 243 | private_key = "${file(var.ssh_private_key_file)}" 244 | } 245 | inline = [ 246 | "cd ${var.scripts_dst_directory}", 247 | "./add_to_etc_hosts.sh ${oci_core_instance.instance.private_ip} ${oci_core_instance.instance.hostname_label}", 248 | "./add_to_known_hosts.sh ${oci_core_instance.instance.private_ip} ${oci_core_instance.instance.hostname_label}" 249 | ] 250 | } 251 | } 252 | 253 | #------------------------------------------------------------------------------------ 254 | # Deploy the package and configure from the ceph deployer 255 | #------------------------------------------------------------------------------------ 256 | resource "null_resource" "wait_for_cluster_create" { 257 | count = "${var.instance_count}" 258 | provisioner "local-exec" { 259 | command = "echo 'Waited for create new cluster ${var.new_cluster} creation'" 260 | } 261 | } 262 | 263 | resource "null_resource" "deploy" { 264 | count = "${var.instance_count}" 265 | depends_on = ["null_resource.add_to_deployer_known_hosts", "null_resource.wait_for_cluster_create"] 266 | provisioner "remote-exec" { 267 | connection { 268 | agent = false 269 | timeout = "30m" 270 | host = "${var.ceph_deployer_ip}" 271 | user = "${var.ssh_username}" 272 | private_key = "${file(var.ssh_private_key_file)}" 273 | } 274 | inline = [ 275 | "cd ${var.scripts_dst_directory}", 276 | "./ceph_deploy_mds.sh ${join(" ", oci_core_instance.instance.*.hostname_label)}" 277 | ] 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /modules/ceph-mds/output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "ip_list" { 3 | value = "${oci_core_instance.instance.*.private_ip}" 4 | } 5 | 6 | output "hostname_list" { 7 | value = "${oci_core_instance.instance.*.hostname_label}" 8 | } 9 | -------------------------------------------------------------------------------- /modules/ceph-mds/variables.tf: -------------------------------------------------------------------------------- 1 | #=============================================================== 2 | # Module Inputs 3 | #=============================================================== 4 | 5 | variable "tenancy_ocid" { 6 | description = "The OCI tenancy id" 7 | } 8 | 9 | variable "compartment_ocid" { 10 | description = "The OCI compartment id" 11 | } 12 | 13 | variable "instance_os" { 14 | description = "The Name of the Operating System for the instance" 15 | } 16 | 17 | variable "instance_count" { 18 | description = "The number of instances to create" 19 | } 20 | 21 | variable "availability_domain_index_list" { 22 | description = "The availability domain where the instance will be created" 23 | type = "list" 24 | } 25 | 26 | variable "hostname_prefix" { 27 | description = "The prefix to the instance names" 28 | } 29 | 30 | variable "shape" { 31 | description = "The compute shape of the instances" 32 | } 33 | 34 | variable "subnet_id_list" { 35 | description = "The subnets where the instance will be created" 36 | type = "list" 37 | } 38 | 39 | variable "ssh_public_key_file" { 40 | description = "The public key that will be installed on to the new instance(s) for ssh login" 41 | } 42 | 43 | variable "ssh_private_key_file" { 44 | description = "The private key that for ssh login to the new instance(s)" 45 | } 46 | 47 | variable "ssh_username" { 48 | description = "The username for ssh login to the instance(s)" 49 | } 50 | 51 | variable "ceph_deployer_ip" { 52 | description = "The IP of the Ceph deployer node" 53 | } 54 | 55 | variable "instance_create_timeout" { 56 | description = "The timeout value for instance creation" 57 | } 58 | 59 | variable "scripts_src_directory" { 60 | description = "Path to the directory where the scripts and config files are" 61 | } 62 | 63 | variable "scripts_dst_directory" { 64 | description = "Path to the directory where the scripts and config files will be copied to" 65 | } 66 | 67 | variable "deployer_deploy" { 68 | description = "A Synchronization primitive for letting TF know that the deployment for the deployer is complete" 69 | } 70 | 71 | variable "new_cluster" { 72 | description = "A Synchronization primitive for letting TF know that the creation of the new cluster is complete" 73 | } 74 | -------------------------------------------------------------------------------- /modules/ceph-monitor/main.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------------ 2 | # Get a list of Availability Domains 3 | #------------------------------------------------------------------------------------ 4 | data "oci_identity_availability_domains" "ADs" { 5 | compartment_id = "${var.tenancy_ocid}" 6 | } 7 | 8 | #------------------------------------------------------------------------------------ 9 | # Get the OCID of the OS image to use 10 | #------------------------------------------------------------------------------------ 11 | data "oci_core_images" "image_ocid" { 12 | compartment_id = "${var.compartment_ocid}" 13 | display_name = "${var.instance_os}" 14 | } 15 | 16 | #------------------------------------------------------------------------------------ 17 | # Create Ceph Monitor Server Instances 18 | #------------------------------------------------------------------------------------ 19 | resource "oci_core_instance" "instance" { 20 | count = "${var.instance_count}" 21 | availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[var.availability_domain_index_list[count.index] - 1],"name")}" 22 | compartment_id = "${var.compartment_ocid}" 23 | display_name = "${var.hostname_prefix}-${count.index}" 24 | hostname_label = "${var.hostname_prefix}-${count.index}" 25 | shape = "${var.shape}" 26 | subnet_id = "${var.subnet_id_list[var.availability_domain_index_list[count.index] - 1]}" 27 | source_details { 28 | source_type = "image" 29 | source_id = "${lookup(data.oci_core_images.image_ocid.images[0], "id")}" 30 | } 31 | metadata { 32 | ssh_authorized_keys = "${file(var.ssh_public_key_file)}" 33 | } 34 | connection { 35 | host = "${self.private_ip}" 36 | type = "ssh" 37 | user = "${var.ssh_username}" 38 | private_key = "${file(var.ssh_private_key_file)}" 39 | } 40 | provisioner "remote-exec" { 41 | inline = [ 42 | " mkdir ~/${var.scripts_dst_directory}", 43 | ] 44 | } 45 | provisioner "file" { 46 | source = "${var.scripts_src_directory}/ceph.config" 47 | destination = "~/${var.scripts_dst_directory}/ceph.config" 48 | } 49 | provisioner "file" { 50 | source = "${var.scripts_src_directory}/vm_init.sh" 51 | destination = "~/${var.scripts_dst_directory}/vm_init.sh" 52 | } 53 | provisioner "file" { 54 | source = "${var.scripts_src_directory}/vm_pre_setup.sh" 55 | destination = "~/${var.scripts_dst_directory}/vm_pre_setup.sh" 56 | } 57 | provisioner "file" { 58 | source = "${var.scripts_src_directory}/vm_setup.sh" 59 | destination = "~/${var.scripts_dst_directory}/vm_setup.sh" 60 | } 61 | provisioner "file" { 62 | source = "${var.scripts_src_directory}/vm_post_setup.sh" 63 | destination = "~/${var.scripts_dst_directory}/vm_post_setup.sh" 64 | } 65 | provisioner "file" { 66 | source = "${var.scripts_src_directory}/yum_repo_setup.sh" 67 | destination = "~/${var.scripts_dst_directory}/yum_repo_setup.sh" 68 | } 69 | provisioner "file" { 70 | source = "${var.scripts_src_directory}/ceph_yum_repo" 71 | destination = "~/${var.scripts_dst_directory}/ceph_yum_repo" 72 | } 73 | provisioner "file" { 74 | source = "${var.scripts_src_directory}/ceph_firewall_setup.sh" 75 | destination = "~/${var.scripts_dst_directory}/ceph_firewall_setup.sh" 76 | } 77 | timeouts { 78 | create = "${var.instance_create_timeout}" 79 | } 80 | } 81 | 82 | #------------------------------------------------------------------------------------ 83 | # Initialize the VM 84 | #------------------------------------------------------------------------------------ 85 | resource "null_resource" "vm_init" { 86 | depends_on = ["oci_core_instance.instance"] 87 | count = "${var.instance_count}" 88 | provisioner "remote-exec" { 89 | connection { 90 | agent = false 91 | timeout = "30m" 92 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 93 | user = "${var.ssh_username}" 94 | private_key = "${file(var.ssh_private_key_file)}" 95 | } 96 | inline = [ 97 | "cd ${var.scripts_dst_directory}", 98 | "chmod +x vm_init.sh", 99 | "./vm_init.sh monitor" 100 | ] 101 | } 102 | } 103 | 104 | #------------------------------------------------------------------------------------ 105 | # Setup the VM. 106 | # Setup involves: 107 | # 1. Pre Setup 108 | # 2. Waiting (adding delay for the duration specified in the ceph.config file) 109 | # 3. Setup 110 | # 4. Waiting (adding delay for the duration specified in the ceph.config file) 111 | # 5. Post Setup 112 | #------------------------------------------------------------------------------------ 113 | resource "null_resource" "vm_pre_setup" { 114 | depends_on = ["null_resource.vm_init"] 115 | count = "${var.instance_count}" 116 | provisioner "remote-exec" { 117 | connection { 118 | agent = false 119 | timeout = "30m" 120 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 121 | user = "${var.ssh_username}" 122 | private_key = "${file(var.ssh_private_key_file)}" 123 | } 124 | inline = [ 125 | "cd ${var.scripts_dst_directory}", 126 | "chmod +x vm_pre_setup.sh", 127 | "./vm_pre_setup.sh monitor" 128 | ] 129 | } 130 | } 131 | 132 | resource "null_resource" "delay_before" { 133 | count = "${var.instance_count}" 134 | provisioner "local-exec" { 135 | command = "cd ${var.scripts_src_directory}; ./delay.sh before_setup" 136 | } 137 | triggers = { 138 | "before" = "${element(null_resource.vm_pre_setup.*.id, count.index)}" 139 | } 140 | } 141 | 142 | resource "null_resource" "vm_setup" { 143 | depends_on = ["null_resource.delay_before"] 144 | count = "${var.instance_count}" 145 | provisioner "remote-exec" { 146 | connection { 147 | agent = false 148 | timeout = "30m" 149 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 150 | user = "${var.ssh_username}" 151 | private_key = "${file(var.ssh_private_key_file)}" 152 | } 153 | inline = [ 154 | "cd ${var.scripts_dst_directory}", 155 | "chmod +x vm_setup.sh", 156 | "./vm_setup.sh monitor" 157 | ] 158 | } 159 | } 160 | 161 | resource "null_resource" "delay_after" { 162 | count = "${var.instance_count}" 163 | provisioner "local-exec" { 164 | command = "cd ${var.scripts_src_directory}; ./delay.sh after_setup" 165 | } 166 | triggers = { 167 | "before" = "${element(null_resource.vm_setup.*.id, count.index)}" 168 | } 169 | } 170 | 171 | resource "null_resource" "vm_post_setup" { 172 | depends_on = ["null_resource.delay_after"] 173 | count = "${var.instance_count}" 174 | provisioner "remote-exec" { 175 | connection { 176 | agent = false 177 | timeout = "30m" 178 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 179 | user = "${var.ssh_username}" 180 | private_key = "${file(var.ssh_private_key_file)}" 181 | } 182 | inline = [ 183 | "cd ${var.scripts_dst_directory}", 184 | "chmod +x vm_post_setup.sh", 185 | "./vm_post_setup.sh monitor" 186 | ] 187 | } 188 | } 189 | 190 | #------------------------------------------------------------------------------------ 191 | # Setup Ceph Monitor Instances 192 | #------------------------------------------------------------------------------------ 193 | resource "null_resource" "setup" { 194 | depends_on = ["null_resource.vm_post_setup"] 195 | count = "${var.instance_count}" 196 | provisioner "remote-exec" { 197 | connection { 198 | agent = false 199 | timeout = "30m" 200 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 201 | user = "${var.ssh_username}" 202 | private_key = "${file(var.ssh_private_key_file)}" 203 | } 204 | inline = [ 205 | "cd ${var.scripts_dst_directory}", 206 | "chmod +x yum_repo_setup.sh", 207 | "./yum_repo_setup.sh", 208 | "chmod +x ceph_firewall_setup.sh", 209 | "./ceph_firewall_setup.sh monitor" 210 | ] 211 | } 212 | } 213 | 214 | #------------------------------------------------------------------------------------ 215 | # Passwordless SSH Setup 216 | # - Get the ssh key from the Ceph Deployer Instance and install on the Monitors 217 | #------------------------------------------------------------------------------------ 218 | resource "null_resource" "wait_for_deployer_deploy" { 219 | depends_on = ["null_resource.setup"] 220 | provisioner "local-exec" { 221 | command = "echo 'Waited for Deployer Ceph Deployment (${var.deployer_deploy}) to complete'" 222 | } 223 | } 224 | 225 | resource "null_resource" "copy_key" { 226 | depends_on = ["null_resource.setup", "null_resource.wait_for_deployer_deploy"] 227 | count = "${var.instance_count}" 228 | provisioner "local-exec" { 229 | command = "${var.scripts_src_directory}/install_ssh_key.sh ${var.ceph_deployer_ip} ${element(oci_core_instance.instance.*.private_ip, count.index)}" 230 | } 231 | } 232 | 233 | resource "null_resource" "add_to_deployer_known_hosts" { 234 | depends_on = ["null_resource.copy_key"] 235 | count = "${var.instance_count}" 236 | provisioner "remote-exec" { 237 | connection { 238 | agent = false 239 | timeout = "30m" 240 | host = "${var.ceph_deployer_ip}" 241 | user = "${var.ssh_username}" 242 | private_key = "${file(var.ssh_private_key_file)}" 243 | } 244 | inline = [ 245 | "cd ${var.scripts_dst_directory}", 246 | "./add_to_etc_hosts.sh ${element(oci_core_instance.instance.*.private_ip, count.index)} ${element(oci_core_instance.instance.*.hostname_label, count.index)}", 247 | "./add_to_known_hosts.sh ${element(oci_core_instance.instance.*.private_ip, count.index)} ${element(oci_core_instance.instance.*.hostname_label, count.index)}", 248 | ] 249 | } 250 | } 251 | 252 | #------------------------------------------------------------------------------------ 253 | # Create a new cluster 254 | #------------------------------------------------------------------------------------ 255 | resource "null_resource" "create_new_cluster" { 256 | depends_on = ["null_resource.add_to_deployer_known_hosts", "null_resource.wait_for_deployer_deploy"] 257 | provisioner "remote-exec" { 258 | connection { 259 | agent = false 260 | timeout = "30m" 261 | host = "${var.ceph_deployer_ip}" 262 | user = "${var.ssh_username}" 263 | private_key = "${file(var.ssh_private_key_file)}" 264 | } 265 | inline = [ 266 | "cd ${var.scripts_dst_directory}", 267 | "./ceph_new_cluster.sh ${join(" ", oci_core_instance.instance.*.hostname_label)}" 268 | ] 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /modules/ceph-monitor/output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "ip_list" { 3 | value = "${oci_core_instance.instance.*.private_ip}" 4 | } 5 | 6 | output "hostname_list" { 7 | value = "${oci_core_instance.instance.*.hostname_label}" 8 | } 9 | 10 | output "new_cluster" { 11 | value = "${null_resource.create_new_cluster.id}" 12 | } 13 | 14 | -------------------------------------------------------------------------------- /modules/ceph-monitor/variables.tf: -------------------------------------------------------------------------------- 1 | #=============================================================== 2 | # Module Inputs 3 | #=============================================================== 4 | 5 | variable "tenancy_ocid" { 6 | description = "The OCI tenancy id" 7 | } 8 | 9 | variable "compartment_ocid" { 10 | description = "The OCI compartment id" 11 | } 12 | 13 | variable "instance_os" { 14 | description = "The Name of the Operating System for all Monitors" 15 | } 16 | 17 | variable "instance_count" { 18 | description = "The Number of Monitors to create" 19 | } 20 | 21 | variable "availability_domain_index_list" { type = "list" 22 | description = "The availability domains where the Monitors will be created (as a list of indexes)" 23 | } 24 | 25 | variable "hostname_prefix" { 26 | description = "The prefix to the name of the instances. The name will be appended by an hyphen, followed by an integer starting at 0" 27 | } 28 | 29 | variable "shape" { 30 | description = "The compute shape of the instances" 31 | } 32 | 33 | variable "subnet_id_list" { type = "list" 34 | description = "The subnets where the instances will be created (as a list of indexes)" 35 | } 36 | 37 | variable "ssh_public_key_file" { 38 | description = "The public key that will be installed on to the new instance(s) for ssh login" 39 | } 40 | 41 | variable "ssh_private_key_file" { 42 | description = "The private key that for ssh login to the new instance(s)" 43 | } 44 | 45 | variable "ssh_username" { 46 | description = "The username for ssh login to the instance(s)" 47 | } 48 | 49 | variable "ceph_deployer_ip" { 50 | description = "The IP of the Ceph deployer node" 51 | } 52 | 53 | variable "instance_create_timeout" { 54 | description = "The timeout value for instance creation" 55 | } 56 | 57 | variable "scripts_src_directory" { 58 | description = "Path to the directory where the scripts and config files are" 59 | } 60 | 61 | variable "scripts_dst_directory" { 62 | description = "Path to the directory where the scripts and config files will be copied to" 63 | } 64 | 65 | variable "deployer_deploy" { 66 | description = "A Synchronization primitive for letting TF know that the deployer has completed Ceph deploy" 67 | } 68 | -------------------------------------------------------------------------------- /modules/ceph-osd/main.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------------ 2 | # Get a list of Availability Domains 3 | #------------------------------------------------------------------------------------ 4 | data "oci_identity_availability_domains" "ADs" { 5 | compartment_id = "${var.tenancy_ocid}" 6 | } 7 | 8 | #------------------------------------------------------------------------------------ 9 | # Get the OCID of the OS image to use 10 | #------------------------------------------------------------------------------------ 11 | data "oci_core_images" "image_ocid" { 12 | compartment_id = "${var.compartment_ocid}" 13 | display_name = "${var.instance_os}" 14 | } 15 | 16 | #------------------------------------------------------------------------------------ 17 | # Create Ceph OSD Server Instances 18 | #------------------------------------------------------------------------------------ 19 | resource "oci_core_instance" "instance" { 20 | count = "${var.instance_count}" 21 | availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[var.availability_domain_index_list[count.index] - 1],"name")}" 22 | compartment_id = "${var.compartment_ocid}" 23 | display_name = "${var.hostname_prefix}-${count.index}" 24 | hostname_label = "${var.hostname_prefix}-${count.index}" 25 | shape = "${var.shape}" 26 | subnet_id = "${var.subnet_id_list[var.availability_domain_index_list[count.index] - 1]}" 27 | source_details { 28 | source_type = "image" 29 | source_id = "${lookup(data.oci_core_images.image_ocid.images[0], "id")}" 30 | } 31 | metadata { 32 | ssh_authorized_keys = "${file(var.ssh_public_key_file)}" 33 | } 34 | connection { 35 | host = "${self.private_ip}" 36 | type = "ssh" 37 | user = "${var.ssh_username}" 38 | private_key = "${file(var.ssh_private_key_file)}" 39 | } 40 | provisioner "remote-exec" { 41 | inline = [ 42 | " mkdir ~/${var.scripts_dst_directory}", 43 | ] 44 | } 45 | provisioner "file" { 46 | source = "${var.scripts_src_directory}/ceph.config" 47 | destination = "~/${var.scripts_dst_directory}/ceph.config" 48 | } 49 | provisioner "file" { 50 | source = "${var.scripts_src_directory}/vm_init.sh" 51 | destination = "~/${var.scripts_dst_directory}/vm_init.sh" 52 | } 53 | provisioner "file" { 54 | source = "${var.scripts_src_directory}/vm_pre_setup.sh" 55 | destination = "~/${var.scripts_dst_directory}/vm_pre_setup.sh" 56 | } 57 | provisioner "file" { 58 | source = "${var.scripts_src_directory}/vm_setup.sh" 59 | destination = "~/${var.scripts_dst_directory}/vm_setup.sh" 60 | } 61 | provisioner "file" { 62 | source = "${var.scripts_src_directory}/vm_post_setup.sh" 63 | destination = "~/${var.scripts_dst_directory}/vm_post_setup.sh" 64 | } 65 | provisioner "file" { 66 | source = "${var.scripts_src_directory}/yum_repo_setup.sh" 67 | destination = "~/${var.scripts_dst_directory}/yum_repo_setup.sh" 68 | } 69 | provisioner "file" { 70 | source = "${var.scripts_src_directory}/ceph_yum_repo" 71 | destination = "~/${var.scripts_dst_directory}/ceph_yum_repo" 72 | } 73 | provisioner "file" { 74 | source = "${var.scripts_src_directory}/ceph_firewall_setup.sh" 75 | destination = "~/${var.scripts_dst_directory}/ceph_firewall_setup.sh" 76 | } 77 | timeouts { 78 | create = "${var.instance_create_timeout}" 79 | } 80 | } 81 | 82 | #----------------------------------------------------------------------------------- 83 | # Create Storage for the Ceph OSD Instances 84 | #------------------------------------------------------------------------------------ 85 | module "storage" { 86 | source = "./storage" 87 | instance_count = "${var.instance_count * var.create_volume}" 88 | instance_id = "${oci_core_instance.instance.*.id}" 89 | compartment_id = "${var.compartment_ocid}" 90 | availability_domain = "${data.oci_identity_availability_domains.ADs.availability_domains}" 91 | availability_domain_index = "${var.availability_domain_index_list}" 92 | volume_name_prefix = "${var.volume_name_prefix}" 93 | volume_size_in_gbs = "${var.volume_size_in_gbs}" 94 | volume_attachment_type = "${var.volume_attachment_type}" 95 | host_addresses = "${oci_core_instance.instance.*.private_ip}" 96 | ssh_private_key = "${file(var.ssh_private_key_file)}" 97 | } 98 | 99 | #------------------------------------------------------------------------------------ 100 | # Initialize the VM 101 | #------------------------------------------------------------------------------------ 102 | resource "null_resource" "vm_init" { 103 | depends_on = ["oci_core_instance.instance"] 104 | count = "${var.instance_count}" 105 | provisioner "remote-exec" { 106 | connection { 107 | agent = false 108 | timeout = "30m" 109 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 110 | user = "${var.ssh_username}" 111 | private_key = "${file(var.ssh_private_key_file)}" 112 | } 113 | inline = [ 114 | "cd ${var.scripts_dst_directory}", 115 | "chmod +x vm_init.sh", 116 | "./vm_init.sh osd" 117 | ] 118 | } 119 | } 120 | 121 | #------------------------------------------------------------------------------------ 122 | # Setup the VM. 123 | # Setup involves: 124 | # 1. Pre Setup 125 | # 2. Waiting (adding delay for the duration specified in the ceph.config file) 126 | # 3. Setup 127 | # 4. Waiting (adding delay for the duration specified in the ceph.config file) 128 | # 5. Post Setup 129 | #------------------------------------------------------------------------------------ 130 | resource "null_resource" "vm_pre_setup" { 131 | depends_on = ["null_resource.vm_init"] 132 | count = "${var.instance_count}" 133 | provisioner "remote-exec" { 134 | connection { 135 | agent = false 136 | timeout = "30m" 137 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 138 | user = "${var.ssh_username}" 139 | private_key = "${file(var.ssh_private_key_file)}" 140 | } 141 | inline = [ 142 | "cd ${var.scripts_dst_directory}", 143 | "chmod +x vm_pre_setup.sh", 144 | "./vm_pre_setup.sh osd" 145 | ] 146 | } 147 | } 148 | 149 | resource "null_resource" "delay_before" { 150 | count = "${var.instance_count}" 151 | provisioner "local-exec" { 152 | command = "cd ${var.scripts_src_directory}; ./delay.sh before_setup" 153 | } 154 | triggers = { 155 | "before" = "${element(null_resource.vm_pre_setup.*.id, count.index)}" 156 | } 157 | } 158 | 159 | resource "null_resource" "vm_setup" { 160 | depends_on = ["null_resource.delay_before"] 161 | count = "${var.instance_count}" 162 | provisioner "remote-exec" { 163 | connection { 164 | agent = false 165 | timeout = "30m" 166 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 167 | user = "${var.ssh_username}" 168 | private_key = "${file(var.ssh_private_key_file)}" 169 | } 170 | inline = [ 171 | "cd ${var.scripts_dst_directory}", 172 | "chmod +x vm_setup.sh", 173 | "./vm_setup.sh osd" 174 | ] 175 | } 176 | } 177 | 178 | resource "null_resource" "delay_after" { 179 | count = "${var.instance_count}" 180 | provisioner "local-exec" { 181 | command = "cd ${var.scripts_src_directory}; ./delay.sh after_setup" 182 | } 183 | triggers = { 184 | "before" = "${element(null_resource.vm_setup.*.id, count.index)}" 185 | } 186 | } 187 | 188 | resource "null_resource" "vm_post_setup" { 189 | depends_on = ["null_resource.delay_after"] 190 | count = "${var.instance_count}" 191 | provisioner "remote-exec" { 192 | connection { 193 | agent = false 194 | timeout = "30m" 195 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 196 | user = "${var.ssh_username}" 197 | private_key = "${file(var.ssh_private_key_file)}" 198 | } 199 | inline = [ 200 | "cd ${var.scripts_dst_directory}", 201 | "chmod +x vm_post_setup.sh", 202 | "./vm_post_setup.sh osd" 203 | ] 204 | } 205 | } 206 | #------------------------------------------------------------------------------------ 207 | # Setup Ceph OSD Instances 208 | #------------------------------------------------------------------------------------ 209 | resource "null_resource" "setup" { 210 | depends_on = ["null_resource.vm_post_setup"] 211 | count = "${var.instance_count}" 212 | provisioner "remote-exec" { 213 | connection { 214 | agent = false 215 | timeout = "30m" 216 | host = "${element(oci_core_instance.instance.*.private_ip, count.index)}" 217 | user = "${var.ssh_username}" 218 | private_key = "${file(var.ssh_private_key_file)}" 219 | } 220 | inline = [ 221 | "cd ${var.scripts_dst_directory}", 222 | "chmod +x yum_repo_setup.sh", 223 | "./yum_repo_setup.sh", 224 | "chmod +x ceph_firewall_setup.sh", 225 | "./ceph_firewall_setup.sh osd" 226 | ] 227 | } 228 | } 229 | 230 | #------------------------------------------------------------------------------------ 231 | # Passwordless SSH Setup (from deployer to OSDs) 232 | # - Get the ssh key from the Ceph Deployer Instance and install on OSDs 233 | #------------------------------------------------------------------------------------ 234 | resource "null_resource" "wait_for_deployer_deploy" { 235 | depends_on = ["null_resource.setup"] 236 | provisioner "local-exec" { 237 | command = "echo 'Waited for Deployer Deploy(${var.deployer_deploy}) to complete'" 238 | } 239 | } 240 | 241 | resource "null_resource" "copy_key" { 242 | depends_on = ["null_resource.setup", "null_resource.wait_for_deployer_deploy"] 243 | count = "${var.instance_count}" 244 | provisioner "local-exec" { 245 | command = "${var.scripts_src_directory}/install_ssh_key.sh ${var.ceph_deployer_ip} ${element(oci_core_instance.instance.*.private_ip, count.index)}" 246 | } 247 | } 248 | 249 | resource "null_resource" "add_to_deployer_known_hosts" { 250 | depends_on = ["null_resource.copy_key"] 251 | count = "${var.instance_count}" 252 | provisioner "remote-exec" { 253 | connection { 254 | agent = false 255 | timeout = "30m" 256 | host = "${var.ceph_deployer_ip}" 257 | user = "${var.ssh_username}" 258 | private_key = "${file(var.ssh_private_key_file)}" 259 | } 260 | inline = [ 261 | "cd ${var.scripts_dst_directory}", 262 | "./add_to_etc_hosts.sh ${element(oci_core_instance.instance.*.private_ip, count.index)} ${element(oci_core_instance.instance.*.hostname_label, count.index)}", 263 | "./add_to_known_hosts.sh ${element(oci_core_instance.instance.*.private_ip, count.index)} ${element(oci_core_instance.instance.*.hostname_label, count.index)}" 264 | ] 265 | } 266 | } 267 | 268 | #------------------------------------------------------------------------------------ 269 | # Deploy ceph on OSDs 270 | #------------------------------------------------------------------------------------ 271 | resource "null_resource" "wait_for_cluster_create" { 272 | provisioner "local-exec" { 273 | command = "echo 'Waited for create new cluster ${var.new_cluster} to complete'" 274 | } 275 | } 276 | 277 | resource "null_resource" "deploy" { 278 | depends_on = ["null_resource.add_to_deployer_known_hosts", "null_resource.wait_for_cluster_create"] 279 | provisioner "remote-exec" { 280 | connection { 281 | agent = false 282 | timeout = "30m" 283 | host = "${var.ceph_deployer_ip}" 284 | user = "${var.ssh_username}" 285 | private_key = "${file(var.ssh_private_key_file)}" 286 | } 287 | inline = [ 288 | "cd ${var.scripts_dst_directory}", 289 | "./ceph_deploy_osd.sh ${element(var.block_device_for_ceph, var.create_volume)} ${join(" ", oci_core_instance.instance.*.hostname_label)}" 290 | ] 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /modules/ceph-osd/output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "ip_list" { 3 | value = "${oci_core_instance.instance.*.private_ip}" 4 | } 5 | 6 | output "hostname_list" { 7 | value = "${oci_core_instance.instance.*.hostname_label}" 8 | } 9 | 10 | output "deploy" { 11 | value = "${null_resource.deploy.id}" 12 | } 13 | -------------------------------------------------------------------------------- /modules/ceph-osd/storage/main.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "oci_core_volume" "vol" { 3 | count = "${var.instance_count}" 4 | availability_domain = "${lookup(var.availability_domain[var.availability_domain_index[count.index] - 1], "name")}" 5 | compartment_id = "${var.compartment_id}" 6 | display_name = "${var.volume_name_prefix}-${count.index}" 7 | size_in_gbs = "${var.volume_size_in_gbs}" 8 | } 9 | 10 | resource "oci_core_volume_attachment" "attach" { 11 | depends_on = [ "oci_core_volume.vol" ] 12 | count = "${var.instance_count}" 13 | compartment_id = "${var.compartment_id}" 14 | attachment_type = "${var.volume_attachment_type}" 15 | instance_id = "${element(var.instance_id, count.index)}" 16 | volume_id = "${element(oci_core_volume.vol.*.id, count.index)}" 17 | } 18 | 19 | resource "null_resource" "iscsi-setup" { 20 | depends_on = ["oci_core_volume.vol", "oci_core_volume_attachment.attach"] 21 | count = "${var.instance_count}" 22 | provisioner "remote-exec" { 23 | connection { 24 | agent = false 25 | timeout = "30m" 26 | host = "${element(var.host_addresses, count.index)}" 27 | user = "${var.username}" 28 | private_key = "${var.ssh_private_key}" 29 | } 30 | inline = [ 31 | "sudo iscsiadm -m node -o new -T ${element(oci_core_volume_attachment.attach.*.iqn, count.index)} -p ${element(oci_core_volume_attachment.attach.*.ipv4, count.index)}:${element(oci_core_volume_attachment.attach.*.port, count.index)}", 32 | "sudo iscsiadm -m node -o update -T ${element(oci_core_volume_attachment.attach.*.iqn, count.index)} -n node.startup -v automatic", 33 | "echo sudo iscsiadm -m node -T ${element(oci_core_volume_attachment.attach.*.iqn, count.index)} -p ${element(oci_core_volume_attachment.attach.*.ipv4, count.index)}:${element(oci_core_volume_attachment.attach.*.port, count.index)} -l >> ~/.bashrc" 34 | ] 35 | } 36 | } 37 | 38 | -------------------------------------------------------------------------------- /modules/ceph-osd/storage/variables.tf: -------------------------------------------------------------------------------- 1 | #=============================================================== 2 | # Module Inputs 3 | #=============================================================== 4 | 5 | variable "compartment_id" { 6 | description = "The OCI compartment id" 7 | } 8 | 9 | variable "instance_id" { 10 | description = "The list of compute instance ids to which volume will be attached" 11 | type = "list" 12 | } 13 | 14 | variable "instance_count" { 15 | description = "The number of compute instances; This many number of volumes will be created" 16 | default = "1" 17 | } 18 | 19 | variable "username" { 20 | description = "The username for login to the instance(s)" 21 | default = "opc" 22 | } 23 | 24 | variable "ssh_private_key" { 25 | description = "The ssh private key for login to the instance(s)" 26 | } 27 | 28 | variable "availability_domain" { 29 | description = "The list of availability domains" 30 | type = "list" 31 | } 32 | 33 | variable "availability_domain_index" { 34 | description = "The index for the availability domain in the list; this is where the volume will be created" 35 | type = "list" 36 | } 37 | 38 | variable "volume_name_prefix" { 39 | description = "The prefix to the name of the volume. The name will be appended by an hyphen, followed by an integer starting at 0" 40 | } 41 | 42 | variable "volume_size_in_gbs" { 43 | description = "The size of the volume in GB" 44 | default = "1024" 45 | } 46 | 47 | variable "volume_attachment_type" { 48 | description = "The type of attachment to the instacne. Currently, iscsi is the only option" 49 | default = "iscsi" 50 | } 51 | 52 | variable "host_addresses" { 53 | description = "The IP address of the host instance " 54 | type = "list" 55 | } 56 | 57 | -------------------------------------------------------------------------------- /modules/ceph-osd/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | #=============================================================== 3 | # Module Inputs 4 | #=============================================================== 5 | 6 | variable "tenancy_ocid" { 7 | description = "The OCI tenancy id" 8 | } 9 | 10 | variable "compartment_ocid" { 11 | description = "The OCI compartment id" 12 | } 13 | 14 | variable "instance_os" { 15 | description = "The Name of the Operating System for all OSDs" 16 | } 17 | 18 | variable "instance_count" { 19 | description = "The Number of OSDs to create" 20 | } 21 | 22 | variable "availability_domain_index_list" { 23 | description = "The availability domains where the OSDs will be created (as a list of indexes)" 24 | type = "list" 25 | } 26 | 27 | variable "hostname_prefix" { 28 | description = "The prefix to the name of the instances. The name will be appended by an hyphen, followed by an integer starting at 0" 29 | } 30 | 31 | variable "shape" { 32 | description = "The compute shape of the instances" 33 | } 34 | 35 | variable "subnet_id_list" { 36 | description = "The subnets where the OSDs will be created (as a list of indexes)" 37 | type = "list" 38 | } 39 | 40 | variable "ssh_public_key_file" { 41 | description = "The public key that will be installed on to the new instance(s) for ssh login" 42 | } 43 | 44 | variable "ssh_private_key_file" { 45 | description = "The private key that for ssh login to the new instance(s)" 46 | } 47 | 48 | variable "ssh_username" { 49 | description = "The username for ssh login to the instance(s)" 50 | } 51 | 52 | variable "ceph_deployer_ip" { 53 | description = "The IP of the Ceph deployer node" 54 | } 55 | 56 | variable "instance_create_timeout" { 57 | description = "The timeout value for instance creation" 58 | } 59 | 60 | variable "create_volume" { 61 | description = "Controls whether or not to create a block storage. Create if the value is set to true." 62 | default = false 63 | } 64 | 65 | variable "volume_name_prefix" { 66 | description = "The prefix to the name of the volumes. The name will be appended by an hyphen, followed by an integer starting at 0" 67 | } 68 | 69 | variable "volume_size_in_gbs" { 70 | description = "The size of the volume in GB" 71 | } 72 | 73 | variable "volume_attachment_type" { 74 | description = "The type of attachment to the instance. Currently, iscsi is the only option" 75 | } 76 | 77 | variable "scripts_src_directory" { 78 | description = "Path to the directory where the scripts and config files are" 79 | } 80 | 81 | variable "scripts_dst_directory" { 82 | description = "Path to the directory where the scripts and config files will be copied to" 83 | } 84 | 85 | variable "block_device_for_ceph" { 86 | description = "A list of the names for the block deivices that can be used for Ceph. Currently we use two values: 'sbd' for block storage and 'nvme0n1 for instances with NVMe SSDs" 87 | type = "list" 88 | } 89 | 90 | variable "deployer_deploy" { 91 | description = "A Synchronization primitive for letting TF know that the deployment for the deployer is complete" 92 | } 93 | 94 | variable "new_cluster" { 95 | description = "A Synchronization primitive for letting TF know that the creation of the new cluster is complete" 96 | } 97 | -------------------------------------------------------------------------------- /modules/network.full/main.tf: -------------------------------------------------------------------------------- 1 | 2 | #------------------------------------------------------------------------------- 3 | # If both var.create_new_subnet AND var.create_new_vcn is set to true 4 | # 1. Create a new VCN 5 | # 2. Create a Gateway for that VCN 6 | #------------------------------------------------------------------------------- 7 | # Create a New VCN 8 | resource "oci_core_virtual_network" "vcn" { 9 | count = "${min(var.create_new_vcn, var.create_new_subnets)}" 10 | cidr_block = "${var.vcn_cidr}" 11 | compartment_id = "${var.compartment_id}" 12 | display_name = "${var.vcn_name}" 13 | dns_label = "${var.vcn_name}" 14 | } 15 | 16 | # Create a New Gateway 17 | resource "oci_core_internet_gateway" "gateway" { 18 | count = "${min(var.create_new_vcn, var.create_new_subnets)}" 19 | compartment_id = "${var.compartment_id}" 20 | display_name = "${var.gateway_name}" 21 | vcn_id = "${oci_core_virtual_network.vcn.id}" 22 | } 23 | 24 | locals { 25 | vcn_id = "${element(concat(oci_core_virtual_network.vcn.*.id, var.existing_vcn_id), 0)}" 26 | } 27 | 28 | #------------------------------------------------------------------------------- 29 | # Get the ID of the Internet Gateway to be used for the rest of the deployment 30 | #==================================================================================== 31 | # When a new VCN was created 32 | data "oci_core_internet_gateways" "igw_new" { 33 | count = "${min(var.create_new_vcn, var.create_new_subnets)}" 34 | depends_on = [ "oci_core_internet_gateway.gateway" ] 35 | compartment_id = "${var.compartment_id}" 36 | vcn_id = "${oci_core_virtual_network.vcn.id}" 37 | } 38 | 39 | # When an existing VCN is used 40 | data "oci_core_internet_gateways" "igw_existing" { 41 | count = "${1 - min(var.create_new_vcn, var.create_new_subnets)}" 42 | compartment_id = "${var.compartment_id}" 43 | vcn_id = "${var.existing_vcn_id[0]}" 44 | } 45 | 46 | locals { 47 | gateway_list_of_list = "${concat(data.oci_core_internet_gateways.igw_new.*.gateways, data.oci_core_internet_gateways.igw_existing.*.gateways)}" 48 | gateway_list = "${local.gateway_list_of_list[0]}" 49 | gateway_id = "${lookup(local.gateway_list[0], "id")}" 50 | } 51 | 52 | #------------------------------------------------------------------------------- 53 | # Get the DHCP Options to be used for the rest of the deployment 54 | #------------------------------------------------------------------------------- 55 | data "oci_core_dhcp_options" "dhcp_opt" { 56 | compartment_id = "${var.compartment_id}" 57 | vcn_id = "${local.vcn_id}" 58 | } 59 | 60 | locals { 61 | dhcp_options_id = "${lookup(data.oci_core_dhcp_options.dhcp_opt.options[0], "id")}" 62 | } 63 | 64 | #------------------------------------------------------------------------------- 65 | # If var.create_new_subnet is set to true 66 | # 1. Create a Route Table 67 | # 2. Create a Security List 68 | #------------------------------------------------------------------------------- 69 | resource "oci_core_route_table" "route_table" { 70 | count = "${var.create_new_subnets}" 71 | depends_on = [ "oci_core_internet_gateway.gateway" ] 72 | compartment_id = "${var.compartment_id}" 73 | vcn_id = "${local.vcn_id}" 74 | display_name = "${var.route_table_name}" 75 | route_rules { 76 | cidr_block = "0.0.0.0/0" 77 | network_entity_id = "${local.gateway_id}" 78 | } 79 | } 80 | 81 | resource "oci_core_security_list" "security_list" { 82 | count = "${var.create_new_subnets}" 83 | compartment_id = "${var.compartment_id}" 84 | display_name = "Security List" 85 | vcn_id = "${local.vcn_id}" 86 | egress_security_rules = [{ 87 | destination = "0.0.0.0/0" 88 | protocol = "all" 89 | }] 90 | ingress_security_rules { 91 | protocol = "6" // tcp 92 | source = "0.0.0.0/0" 93 | stateless = false 94 | tcp_options { 95 | "min" = 22 //ssh 96 | "max" = 22 97 | } 98 | } 99 | ingress_security_rules { 100 | protocol = "6" // tcp 101 | source = "0.0.0.0/0" 102 | stateless = false 103 | tcp_options { 104 | "min" = 80 //http 105 | "max" = 80 106 | } 107 | } 108 | ingress_security_rules { 109 | protocol = "6" // tcp 110 | source = "0.0.0.0/0" 111 | stateless = false 112 | tcp_options { 113 | "min" = 443 //https 114 | "max" = 443 115 | } 116 | } 117 | ingress_security_rules { 118 | protocol = "6" // tcp 119 | source = "10.0.0.0/16" 120 | stateless = false 121 | tcp_options { 122 | "min" = 6789 //ceph-monitors 123 | "max" = 6789 124 | } 125 | } 126 | ingress_security_rules { 127 | protocol = "6" // tcp 128 | source = "10.0.0.0/16" 129 | stateless = false 130 | tcp_options { 131 | "min" = 6800 //ceph-servers 132 | "max" = 7300 133 | } 134 | } 135 | ingress_security_rules { 136 | protocol = "6" // tcp 137 | source = "10.0.0.0/16" 138 | stateless = false 139 | tcp_options { 140 | "min" = 7480 //ceph-servers 141 | "max" = 7480 142 | } 143 | } 144 | ingress_security_rules { 145 | protocol = "6" // tcp 146 | source = "10.0.0.0/16" 147 | stateless = false 148 | tcp_options { 149 | "min" = 9000 //ceph-servers 150 | "max" = 9000 151 | } 152 | } 153 | } 154 | 155 | #------------------------------------------------------------------------------- 156 | # Get a list of Availability Domains 157 | #------------------------------------------------------------------------------- 158 | data "oci_identity_availability_domains" "ADs" { 159 | compartment_id = "${var.tenancy_ocid}" 160 | } 161 | 162 | #------------------------------------------------------------------------------- 163 | # If var.create_new_subnet is set to true 164 | # 1. Create the Subnets 165 | #------------------------------------------------------------------------------- 166 | resource "oci_core_subnet" "subnets" { 167 | count = "${var.new_subnet_count}" 168 | availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[element(var.availability_domain_index_list, count.index) - 1],"name")}" 169 | cidr_block = "${element(var.subnet_cidr_blocks, count.index)}" 170 | display_name = "${var.subnet_name_prefix}${count.index}" 171 | compartment_id = "${var.compartment_id}" 172 | vcn_id = "${local.vcn_id}" 173 | route_table_id = "${oci_core_route_table.route_table.id}" 174 | security_list_ids = [ "${oci_core_security_list.security_list.*.id}" ] 175 | dhcp_options_id = "${local.dhcp_options_id}" 176 | dns_label = "${var.subnet_name_prefix}${count.index}" 177 | } 178 | -------------------------------------------------------------------------------- /modules/network.full/output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "vcn_id" { 3 | value = "${local.vcn_id}" 4 | } 5 | 6 | output "gateway_id" { 7 | value = "${local.gateway_id}" 8 | } 9 | 10 | output "dhcp_options_id" { 11 | value = "${local.dhcp_options_id}" 12 | } 13 | 14 | output "route_table_id" { 15 | value = "${oci_core_route_table.route_table.*.id}" 16 | } 17 | 18 | output "security_list_id" { 19 | value = "${oci_core_security_list.security_list.*.id}" 20 | } 21 | 22 | output "subnet_id_list" { 23 | value = [ "${concat(oci_core_subnet.subnets.*.id, var.existing_subnet_ids)}" ] 24 | } 25 | -------------------------------------------------------------------------------- /modules/network.full/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | #=============================================================== 3 | # Module Inputs 4 | #=============================================================== 5 | 6 | variable "tenancy_ocid" { 7 | description = "The OCI tenancy id" 8 | } 9 | 10 | variable "compartment_id" { 11 | description = "The OCI compartment id" 12 | } 13 | 14 | variable "create_new_vcn" { 15 | description = "If true, a New VCN and Gateway will be created; If false, an existing VCN ID (in the same region) must be provided via the variable existing_vcn_id" 16 | default = false 17 | } 18 | 19 | variable existing_vcn_id { 20 | description = "If create_new_vcn is false, provide the id of an existing VCN to use" 21 | default = [ "" ] 22 | } 23 | 24 | variable "vcn_cidr" { 25 | description = "The CIDR for the new VCN (if created)" 26 | default = "10.0.0.0/16" 27 | } 28 | 29 | variable "vcn_name" { 30 | description = "The name for the new VCN (if created)" 31 | default = "cephvcn" 32 | } 33 | 34 | variable "gateway_name" { 35 | description = "The name of the gateway for the new VCN (if created)" 36 | default = "cephgw" 37 | } 38 | 39 | variable "route_table_name" { 40 | description = "The name of the route table for the new VCN (if created)" 41 | default = "cephrt" 42 | } 43 | 44 | variable "create_new_subnets" { 45 | description = "If true, new subnets will be created; If false, existing subnet ids must be provided via the variable existing_subnet_ids" 46 | default = false 47 | } 48 | 49 | variable "new_subnet_count" { 50 | description = "The nubner of subnets to create" 51 | default = "0" 52 | } 53 | 54 | variable existing_subnet_ids { 55 | description = "If create_new_subnets is false, provide the list of ids of existing subnets to use" 56 | default = [ "" ] 57 | } 58 | 59 | variable "availability_domain_index_list" { 60 | description = "Specifies the availability domain indexes for the subnets" 61 | default = ["1", "2", "3"] 62 | } 63 | 64 | variable "subnet_name_prefix" { 65 | description = "The prefix for the subnet names" 66 | default = "cephSub" 67 | } 68 | 69 | variable "subnet_cidr_blocks" { 70 | description = "The CIDR for the new subnets(if created)" 71 | default = [ "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24" ] 72 | } 73 | -------------------------------------------------------------------------------- /modules/network/main.tf: -------------------------------------------------------------------------------- 1 | 2 | #------------------------------------------------------------------------------- 3 | # If both var.create_new_subnet AND var.create_new_vcn is set to true 4 | # 1. Create a new VCN 5 | # 2. Create a Gateway for that VCN 6 | #------------------------------------------------------------------------------- 7 | # Create a New VCN 8 | resource "oci_core_virtual_network" "vcn" { 9 | count = "${min(var.create_new_vcn, var.create_new_subnets)}" 10 | cidr_block = "${var.vcn_cidr}" 11 | compartment_id = "${var.compartment_id}" 12 | display_name = "${var.vcn_name}" 13 | dns_label = "${var.vcn_name}" 14 | } 15 | 16 | # Create a New Gateway 17 | #resource "oci_core_internet_gateway" "gateway" { 18 | # count = "${min(var.create_new_vcn, var.create_new_subnets)}" 19 | # compartment_id = "${var.compartment_id}" 20 | # display_name = "${var.gateway_name}" 21 | # vcn_id = "${oci_core_virtual_network.vcn.id}" 22 | #} 23 | 24 | locals { 25 | vcn_id = "${element(concat(oci_core_virtual_network.vcn.*.id, var.existing_vcn_id), 0)}" 26 | } 27 | 28 | #------------------------------------------------------------------------------- 29 | # Get the ID of the Internet Gateway to be used for the rest of the deployment 30 | #==================================================================================== 31 | # When a new VCN was created 32 | #data "oci_core_internet_gateways" "igw_new" { 33 | # count = "${min(var.create_new_vcn, var.create_new_subnets)}" 34 | # depends_on = [ "oci_core_internet_gateway.gateway" ] 35 | # compartment_id = "${var.compartment_id}" 36 | # vcn_id = "${oci_core_virtual_network.vcn.id}" 37 | #} 38 | # 39 | ## When an existing VCN is used 40 | #data "oci_core_internet_gateways" "igw_existing" { 41 | # count = "${1 - min(var.create_new_vcn, var.create_new_subnets)}" 42 | # compartment_id = "${var.compartment_id}" 43 | # vcn_id = "${var.existing_vcn_id[0]}" 44 | #} 45 | # 46 | #locals { 47 | # gateway_list_of_list = "${concat(data.oci_core_internet_gateways.igw_new.*.gateways, data.oci_core_internet_gateways.igw_existing.*.gateways)}" 48 | # gateway_list = [ "${local.gateway_list_of_list[0]}" ] 49 | # gateway_id = "${lookup(local.gateway_list[0], "id")}" 50 | #} 51 | # 52 | #------------------------------------------------------------------------------- 53 | # Get the DHCP Options to be used for the rest of the deployment 54 | #------------------------------------------------------------------------------- 55 | data "oci_core_dhcp_options" "dhcp_opt" { 56 | compartment_id = "${var.compartment_id}" 57 | vcn_id = "${local.vcn_id}" 58 | } 59 | 60 | locals { 61 | # dhcp_options_id = "${lookup(data.oci_core_dhcp_options.dhcp_opt.options[0], "id")}" 62 | dhcp_options_id = "" 63 | } 64 | 65 | #------------------------------------------------------------------------------- 66 | # If var.create_new_subnet is set to true 67 | # 1. Create a Route Table 68 | # 2. Create a Security List 69 | #------------------------------------------------------------------------------- 70 | #resource "oci_core_route_table" "route_table" { 71 | # count = "${var.create_new_subnets}" 72 | # depends_on = [ "oci_core_internet_gateway.gateway" ] 73 | # compartment_id = "${var.compartment_id}" 74 | # vcn_id = "${local.vcn_id}" 75 | # display_name = "${var.route_table_name}" 76 | # route_rules { 77 | # cidr_block = "0.0.0.0/0" 78 | # network_entity_id = "${local.gateway_id}" 79 | # } 80 | #} 81 | 82 | data "oci_core_route_tables" "route_table" { 83 | compartment_id = "${var.compartment_id}" 84 | vcn_id = "${local.vcn_id}" 85 | } 86 | 87 | locals { 88 | #route_table_id = "${lookup(data.oci_core_route_tables.route_table.route_tables[0], "id")}" 89 | route_table_id = "" 90 | } 91 | 92 | 93 | resource "oci_core_security_list" "security_list" { 94 | count = "${var.create_new_subnets}" 95 | compartment_id = "${var.compartment_id}" 96 | display_name = "Security List" 97 | vcn_id = "${local.vcn_id}" 98 | egress_security_rules = [{ 99 | destination = "0.0.0.0/0" 100 | protocol = "all" 101 | }] 102 | ingress_security_rules { 103 | protocol = "6" // tcp 104 | source = "0.0.0.0/0" 105 | stateless = false 106 | tcp_options { 107 | "min" = 22 //ssh 108 | "max" = 22 109 | } 110 | } 111 | ingress_security_rules { 112 | protocol = "6" // tcp 113 | source = "0.0.0.0/0" 114 | stateless = false 115 | tcp_options { 116 | "min" = 80 //http 117 | "max" = 80 118 | } 119 | } 120 | ingress_security_rules { 121 | protocol = "6" // tcp 122 | source = "0.0.0.0/0" 123 | stateless = false 124 | tcp_options { 125 | "min" = 443 //https 126 | "max" = 443 127 | } 128 | } 129 | ingress_security_rules { 130 | protocol = "6" // tcp 131 | source = "10.0.0.0/16" 132 | stateless = false 133 | tcp_options { 134 | "min" = 6789 //ceph-monitors 135 | "max" = 6789 136 | } 137 | } 138 | ingress_security_rules { 139 | protocol = "6" // tcp 140 | source = "10.0.0.0/16" 141 | stateless = false 142 | tcp_options { 143 | "min" = 6800 //ceph-servers 144 | "max" = 7300 145 | } 146 | } 147 | ingress_security_rules { 148 | protocol = "6" // tcp 149 | source = "10.0.0.0/16" 150 | stateless = false 151 | tcp_options { 152 | "min" = 7480 //ceph-servers 153 | "max" = 7480 154 | } 155 | } 156 | ingress_security_rules { 157 | protocol = "6" // tcp 158 | source = "10.0.0.0/16" 159 | stateless = false 160 | tcp_options { 161 | "min" = 9000 //ceph-servers 162 | "max" = 9000 163 | } 164 | } 165 | } 166 | 167 | #------------------------------------------------------------------------------- 168 | # Get a list of Availability Domains 169 | #------------------------------------------------------------------------------- 170 | data "oci_identity_availability_domains" "ADs" { 171 | compartment_id = "${var.tenancy_ocid}" 172 | } 173 | 174 | #------------------------------------------------------------------------------- 175 | # If var.create_new_subnet is set to true 176 | # 1. Create the Subnets 177 | #------------------------------------------------------------------------------- 178 | resource "oci_core_subnet" "subnets" { 179 | count = "${var.new_subnet_count}" 180 | availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[element(var.availability_domain_index_list, count.index) - 1],"name")}" 181 | cidr_block = "${element(var.subnet_cidr_blocks, count.index)}" 182 | display_name = "${var.subnet_name_prefix}${count.index}" 183 | compartment_id = "${var.compartment_id}" 184 | vcn_id = "${local.vcn_id}" 185 | route_table_id = "${local.route_table_id}" 186 | security_list_ids = [ "${oci_core_security_list.security_list.*.id}" ] 187 | dhcp_options_id = "${local.dhcp_options_id}" 188 | dns_label = "${var.subnet_name_prefix}${count.index}" 189 | } 190 | -------------------------------------------------------------------------------- /modules/network/output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "vcn_id" { 3 | value = "${local.vcn_id}" 4 | } 5 | 6 | output "dhcp_options_id" { 7 | value = "${local.dhcp_options_id}" 8 | } 9 | 10 | output "security_list_id" { 11 | value = "${oci_core_security_list.security_list.*.id}" 12 | } 13 | 14 | output "subnet_id_list" { 15 | value = [ "${concat(oci_core_subnet.subnets.*.id, var.existing_subnet_ids)}" ] 16 | } 17 | -------------------------------------------------------------------------------- /modules/network/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | #=============================================================== 3 | # Module Inputs 4 | #=============================================================== 5 | 6 | variable "tenancy_ocid" { 7 | description = "The OCI tenancy id" 8 | } 9 | 10 | variable "compartment_id" { 11 | description = "The OCI compartment id" 12 | } 13 | 14 | variable "create_new_vcn" { 15 | description = "If true, a New VCN and Gateway will be created; If false, an existing VCN ID (in the same region) must be provided via the variable existing_vcn_id" 16 | default = false 17 | } 18 | 19 | variable existing_vcn_id { 20 | description = "If create_new_vcn is false, provide the id of an existing VCN to use" 21 | default = [ "" ] 22 | } 23 | 24 | variable "vcn_cidr" { 25 | description = "The CIDR for the new VCN (if created)" 26 | default = "10.0.0.0/16" 27 | } 28 | 29 | variable "vcn_name" { 30 | description = "The name for the new VCN (if created)" 31 | default = "cephvcn" 32 | } 33 | 34 | variable "gateway_name" { 35 | description = "The name of the gateway for the new VCN (if created)" 36 | default = "cephgw" 37 | } 38 | 39 | variable "route_table_name" { 40 | description = "The name of the route table for the new VCN (if created)" 41 | default = "cephrt" 42 | } 43 | 44 | variable "create_new_subnets" { 45 | description = "If true, new subnets will be created; If false, existing subnet ids must be provided via the variable existing_subnet_ids" 46 | default = false 47 | } 48 | 49 | variable "new_subnet_count" { 50 | description = "The nubner of subnets to create" 51 | default = "0" 52 | } 53 | 54 | variable existing_subnet_ids { 55 | description = "If create_new_subnets is false, provide the list of ids of existing subnets to use" 56 | default = [ "" ] 57 | } 58 | 59 | variable "availability_domain_index_list" { 60 | description = "Specifies the availability domain indexes for the subnets" 61 | default = ["1", "2", "3"] 62 | } 63 | 64 | variable "subnet_name_prefix" { 65 | description = "The prefix for the subnet names" 66 | default = "cephSub" 67 | } 68 | 69 | variable "subnet_cidr_blocks" { 70 | description = "The CIDR for the new subnets(if created)" 71 | default = [ "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24" ] 72 | } 73 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | 2 | output "vcn_id" { 3 | value = "${module.ceph_network.vcn_id}" 4 | } 5 | 6 | output "subnet_id_list" { 7 | value = "${module.ceph_network.subnet_id_list}" 8 | } 9 | 10 | output "ceph_deployer_ip" { 11 | value = "${module.ceph_deployer.ip}" 12 | } 13 | 14 | output "ceph_deployer_hostname" { 15 | value = "${module.ceph_deployer.hostname}" 16 | } 17 | 18 | output "ceph_monitor_ip_list" { 19 | value = "${module.ceph_monitors.ip_list}" 20 | } 21 | 22 | output "ceph_monitor_hostname_list" { 23 | value = "${module.ceph_monitors.hostname_list}" 24 | } 25 | 26 | output "ceph_osd_ip_list" { 27 | value = "${module.ceph_osds.ip_list}" 28 | } 29 | 30 | output "ceph_osd_hostname_list" { 31 | value = "${module.ceph_osds.hostname_list}" 32 | } 33 | 34 | output "ceph_mds_ip_list" { 35 | value = "${module.ceph_mds.ip_list}" 36 | } 37 | 38 | output "ceph_mds_hostname_list" { 39 | value = "${module.ceph_mds.hostname_list}" 40 | } 41 | 42 | output "ceph_client_ip" { 43 | value = "${module.ceph_client.ip}" 44 | } 45 | 46 | output "ceph_client_hostname" { 47 | value = "${module.ceph_client.hostname}" 48 | } 49 | -------------------------------------------------------------------------------- /provider.tf: -------------------------------------------------------------------------------- 1 | 2 | provider "oci" { 3 | tenancy_ocid = "${var.tenancy_ocid}" 4 | user_ocid = "${var.user_ocid}" 5 | fingerprint = "${var.fingerprint}" 6 | private_key_path = "${var.private_key_path}" 7 | private_key_password = "${var.private_key_password}" 8 | region = "${var.region}" 9 | disable_auto_retries = "true" 10 | } 11 | -------------------------------------------------------------------------------- /scripts/add_to_etc_hosts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | 5 | if [ -f ceph.config ]; then 6 | do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config) 7 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 8 | if [ "$do_vm_setup" != "yes" ]; then 9 | echo Ceph VM Setup is not done | tee -a $outfile 10 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 11 | exit 12 | fi 13 | fi 14 | 15 | ( 16 | flock 200 17 | echo "Adding: $1 $2 to /etc/hosts" 18 | sudo sed -i "/$2/d" /etc/hosts 19 | sudo sh -c "echo $1 $2 >> /etc/hosts" 20 | ) 200>.tf_script_etchost_lock 21 | -------------------------------------------------------------------------------- /scripts/add_to_known_hosts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | 5 | if [ -f ceph.config ]; then 6 | do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config) 7 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 8 | if [ "$do_vm_setup" != "yes" ]; then 9 | echo Ceph VM Setup is not done | tee -a $outfile 10 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 11 | exit 12 | fi 13 | fi 14 | 15 | ( 16 | flock 200 17 | 18 | while [[ $# -gt 0 ]] 19 | do 20 | echo "Adding: $1 to ~/.ssh/known_hosts" 21 | ssh-keygen -R $1 22 | ssh-keyscan $1 | grep ecdsa-sha2 >> ~/.ssh/known_hosts 23 | shift 24 | done 25 | ) 200>.tf_script_knownhost_lock 26 | -------------------------------------------------------------------------------- /scripts/ceph.config: -------------------------------------------------------------------------------- 1 | 2 | #### Don't put comments on the same line as variables. #### 3 | 4 | # To skip the vm setup before installing Ceph, use 'no', otherwise use 'yes' 5 | do_vm_setup=yes 6 | 7 | # The amount of delay before and after vm setup 8 | delay_sec_before_vm_setup=0 9 | delay_sec_after_vm_setup=0 10 | 11 | # To skip the Ceph installation use 'no', otherwise use 'yes' 12 | do_ceph_install=yes 13 | 14 | # To skip setting up rbd or cephfs at the Ceph Client, use 'no', otherwise use 'yes' 15 | do_client_rbd_setup=yes 16 | do_client_cephfs_setup=no 17 | 18 | # The output file 19 | outputfile_name=/tmp/terraform_ceph_install.out 20 | 21 | # Cluster parameters 22 | num_object_replica=3 23 | 24 | #For Jewel only 25 | rbd_default_features=3 26 | 27 | # Variables for creating the osd, rbd and mountpoint on the client 28 | pool_name=mypool 29 | pool_page_num=128 30 | pool_pgp_num=128 31 | rbd_name=myrbd 32 | rbd_size=400G 33 | filesystem_mount_point=/var/myceph 34 | 35 | # Variables for creating the filesystem 36 | fs_data_pool_name=cephfs_data 37 | fs_data_pool_size=64 38 | fs_metadata_pool_name=cephfs_metadata 39 | fs_metadata_pool_size=16 40 | ceph_fs_mount_point=/mnt/mycephfs 41 | -------------------------------------------------------------------------------- /scripts/ceph_client_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | pool_name=rbd 5 | pool_page_num=128 6 | pool_pgp_num=128 7 | rbd_name=vol01 8 | rbd_size=4096 9 | filesystem_mount_point=/var/vol01 10 | # Ceph FS related 11 | fs_data_pool_name=cephfs_data 12 | fs_data_pool_size=16 13 | fs_metadata_pool_name=cephfs_metadata 14 | fs_metadata_pool_size=16 15 | ceph_fs_mount_point=/mnt/mycephfs 16 | 17 | print_usage() 18 | { 19 | echo "" 20 | echo "Usage: $0 " 21 | echo " - the IP address of one of the monitors" 22 | echo "" 23 | exit 24 | } 25 | 26 | if [ $# -lt 1 ];then 27 | print_usage 28 | fi 29 | 30 | echo Executing $0 $* | tee -a $outfile 31 | 32 | if [ -f ceph.config ]; then 33 | do_client_rbd_setup=$(awk -F= '/^do_client_rbd_setup/{print $2}' ceph.config) 34 | do_client_cephfs_setup=$(awk -F= '/^do_client_cephfs_setup/{print $2}' ceph.config) 35 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 36 | pool_name=$(awk -F= '/^pool_name/{print $2}' ceph.config) 37 | pool_page_num=$(awk -F= '/^pool_page_num/{print $2}' ceph.config) 38 | pool_pgp_num=$(awk -F= '/^pool_pgp_num/{print $2}' ceph.config) 39 | rbd_name=$(awk -F= '/^rbd_name/{print $2}' ceph.config) 40 | rbd_size=$(awk -F= '/^rbd_size/{print $2}' ceph.config) 41 | filesystem_mount_point=$(awk -F= '/^filesystem_mount_point/{print $2}' ceph.config) 42 | fs_data_pool_name=$(awk -F= '/^fs_data_pool_name/{print $2}' ceph.config) 43 | fs_data_pool_size=$(awk -F= '/^fs_data_pool_size/{print $2}' ceph.config) 44 | fs_metadata_pool_name=$(awk -F= '/^fs_metadata_pool_name/{print $2}' ceph.config) 45 | fs_metadata_pool_size=$(awk -F= '/^fs_metadata_pool_size/{print $2}' ceph.config) 46 | ceph_fs_mount_point=$(awk -F= '/^ceph_fs_mount_point/{print $2}' ceph.config) 47 | fi 48 | 49 | echo Executing $0 | tee -a $outfile 50 | 51 | ceph_version=`ceph -v | cut -d " " -f 3,3` 52 | ceph_major_version=`echo $ceph_version | cut -d. -f 1,1` 53 | kernel_version=`uname -r | cut -d "-" -f 1,1 | cut -d "." -f 3-3` 54 | 55 | if [ "$do_client_rbd_setup" == "yes" ]; then 56 | device_name="/dev/rbd/$pool_name/$rbd_name" 57 | if [ $ceph_major_version -le 10 ]; then 58 | ceph osd pool create $pool_name $pool_page_num $pool_pgp_num | tee -a $outfile 59 | rbd create --size $rbd_size --pool $pool_name $rbd_name | tee -a $outfile 60 | sudo rbd map $rbd_name --pool $pool_name | tee -a $outfile 61 | sudo mkfs.ext4 -m0 $device_name | tee -a $outfile 62 | sudo mkdir $filesystem_mount_point 63 | sudo mount $device_name $filesystem_mount_point 64 | else 65 | ceph osd crush tunables optimal 66 | ceph osd pool create $pool_name $pool_page_num $pool_pgp_num | tee -a $outfile 67 | ceph osd pool application enable $pool_name rbd 68 | rbd pool init $pool_name 69 | rbd create $rbd_name --size $rbd_size --pool $pool_name --image-feature layering | tee -a $outfile 70 | rbd feature disable $pool_name/$rbd_name object-map fast-diff deep-flatten 71 | sudo rbd map $rbd_name --pool $pool_name | tee -a $outfile 72 | sudo mkfs.ext4 -m0 $device_name | tee -a $outfile 73 | sudo mkdir $filesystem_mount_point 74 | sudo mount $device_name $filesystem_mount_point 75 | fi 76 | else 77 | echo Skipping RBD Setup \[ At host: $(hostname) \] $0 | tee -a $outfile 78 | fi 79 | 80 | 81 | if [ "$do_client_cephfs_setup" == "yes" ]; then 82 | ceph osd pool create $fs_data_pool_name $fs_data_pool_size 83 | ceph osd pool create $fs_metadata_pool_name $fs_metadata_pool_size 84 | ceph fs new cephfs $fs_data_pool_name $fs_metadata_pool_name 85 | awk '/key/{print $3}' /etc/ceph/ceph.client.admin.keyring | sudo tee /etc/ceph/admin.secret 86 | sudo mkdir $ceph_fs_mount_point 87 | monitor_ip=`grep mon_host /etc/ceph/ceph.conf | awk '{print $NF}' | awk -F, '{print $1}'` 88 | sudo mount -t ceph $monitor_ip:6789:/ $ceph_fs_mount_point -o name=admin,secretfile=/etc/ceph/admin.secret 89 | fi 90 | -------------------------------------------------------------------------------- /scripts/ceph_deploy_client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | 5 | if [ -f ceph.config ]; then 6 | do_ceph_install=$(awk -F= '/^do_ceph_install/{print $2}' ceph.config) 7 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 8 | if [ "$do_ceph_install" != "yes" ]; then 9 | echo Ceph installation is not done | tee -a $outfile 10 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 11 | exit 12 | fi 13 | fi 14 | 15 | print_usage() 16 | { 17 | echo "" 18 | echo "Usage: $0 [ <....> ]" 19 | echo " - to contain the output of ceph deploy commands" 20 | echo " - the first hostname for client(s)" 21 | echo "" 22 | exit 23 | } 24 | 25 | if [ $# -lt 1 ];then 26 | print_usage 27 | fi 28 | 29 | echo Executing $0 $* | tee -a $outfile 30 | 31 | client1_hostname=$1 32 | hostname_list=$* 33 | 34 | cd ceph-deploy 35 | ceph-deploy install $hostname_list | tee -a $outfile 36 | ceph-deploy admin $hostname_list | tee -a $outfile 37 | 38 | ceph_version=`ceph -v | cut -d " " -f 3,3` 39 | ceph_major_version=`echo $ceph_version | cut -d. -f 1,1` 40 | 41 | 42 | for h in $hostname_list 43 | do 44 | if [ $ceph_major_version -le 10 ]; then 45 | ssh -l opc $h sudo chmod +r /etc/ceph/ceph.client.admin.keyring | tee -a $outfile 46 | else 47 | ssh -l opc $h sudo chmod +r /etc/ceph/ceph.client.admin.keyring | tee -a $outfile 48 | fi 49 | done 50 | -------------------------------------------------------------------------------- /scripts/ceph_deploy_mds.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | 5 | if [ -f ceph.config ]; then 6 | do_ceph_install=$(awk -F= '/^do_ceph_install/{print $2}' ceph.config) 7 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 8 | if [ "$do_ceph_install" != "yes" ]; then 9 | echo Ceph installation is not done | tee -a $outfile 10 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 11 | exit 12 | fi 13 | fi 14 | 15 | print_usage() 16 | { 17 | echo "" 18 | echo "Usage: $0 [ <....> ]" 19 | echo " - to contain the output of ceph deploy commands" 20 | echo " - the first hostname for client(s)" 21 | echo "" 22 | exit 23 | } 24 | 25 | if [ $# -lt 1 ];then 26 | print_usage 27 | fi 28 | 29 | echo Executing $0 $* | tee -a $outfile 30 | 31 | hostname_list=$* 32 | 33 | cd ceph-deploy 34 | ceph-deploy install $hostname_list | tee -a $outfile 35 | ceph-deploy config push $hostname_list | tee -a $outfile 36 | ceph-deploy --overwrite-conf mds create $hostname_list | tee -a $outfile 37 | -------------------------------------------------------------------------------- /scripts/ceph_deploy_osd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | 5 | if [ -f ceph.config ]; then 6 | do_ceph_install=$(awk -F= '/^do_ceph_install/{print $2}' ceph.config) 7 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 8 | if [ "$do_ceph_install" != "yes" ]; then 9 | echo Ceph installation is not done | tee -a $outfile 10 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 11 | exit 12 | fi 13 | fi 14 | 15 | print_usage() 16 | { 17 | echo "" 18 | echo "Usage: $0 [ <....> ]" 19 | echo " - the first hostname for osd(s)" 20 | echo "" 21 | exit 22 | } 23 | 24 | if [ $# -lt 2 ];then 25 | print_usage 26 | fi 27 | 28 | echo Executing $0 $* | tee -a $outfile 29 | 30 | device_name=$1 31 | shift 32 | hostname_list=$* 33 | 34 | 35 | cd ceph-deploy 36 | ceph-deploy install $hostname_list | tee -a $outfile 37 | 38 | ceph_version=`ceph -v | cut -d " " -f 3,3` 39 | ceph_major_version=`echo $ceph_version | cut -d. -f 1,1` 40 | 41 | for h in $hostname_list 42 | do 43 | if [ $ceph_major_version -le 10 ]; then 44 | ceph-deploy osd create --zap-disk --fs-type xfs $h:$device_name | tee -a $outfile 45 | else 46 | ceph-deploy osd create --data /dev/$device_name $h | tee -a $outfile 47 | fi 48 | done 49 | -------------------------------------------------------------------------------- /scripts/ceph_firewall_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | 5 | if [ -f ceph.config ]; then 6 | do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config) 7 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 8 | if [ "$do_vm_setup" != "yes" ]; then 9 | echo VM Setup is not done | tee -a $outfile 10 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 11 | exit 12 | fi 13 | fi 14 | 15 | print_usage() 16 | { 17 | echo "" 18 | echo "Usage: $0 " 19 | echo " - the type of ceph node: osd, monitor, deploer, mds, or client" 20 | echo "" 21 | exit 22 | } 23 | 24 | if [ $# -lt 1 ];then 25 | print_usage 26 | fi 27 | 28 | echo Executing $0 $* | tee -a $outfile 29 | 30 | type=$1 31 | echo "Setting up firewall for $type:" $(hostname) | tee -a $outfile 32 | 33 | #ceph_version=`ceph -v | cut -d " " -f 3,3` 34 | #ceph_major_version=`echo $ceph_version | cut -d. -f 1,1` 35 | #ceph_major_version=10 36 | 37 | if [ "$type" == "osd" ]; then 38 | sudo systemctl stop firewalld | tee -a $outfile 39 | sudo systemctl disable firewalld | tee -a $outfile 40 | 41 | # echo sudo firewall-cmd --zone=public --add-service=ceph --permanent | tee -a $outfile 42 | # sudo firewall-cmd --zone=public --add-service=ceph --permanent 43 | # if [ $ceph_major_version -le 10 ]; then 44 | # echo sudo firewall-cmd --zone=public --add-port=6800-7300/tcp --permanent | tee -a $outfile 45 | # sudo firewall-cmd --zone=public --add-port=6800-7300/tcp --permanent 46 | # else 47 | # echo sudo firewall-cmd --zone=public --add-service=ceph --permanent | tee -a $outfile 48 | # sudo firewall-cmd --zone=public --add-service=ceph --permanent 49 | # sudo firewall-cmd --zone=public --add-service=ceph-mon --permanent 50 | # fi 51 | fi 52 | 53 | if [ "$type" == "monitor" ]; then 54 | sudo systemctl stop firewalld | tee -a $outfile 55 | sudo systemctl disable firewalld | tee -a $outfile 56 | # echo sudo firewall-cmd --zone=public --add-service=ceph-mon --permanent | tee -a $outfile 57 | # sudo firewall-cmd --zone=public --add-service=ceph-mon --permanent 58 | # if [ $ceph_major_version -le 10 ]; then 59 | # echo sudo firewall-cmd --zone=public --add-port=6789/tcp --permanent | tee -a $outfile 60 | # sudo firewall-cmd --zone=public --add-port=6789/tcp --permanent | tee -a $outfile 61 | # else 62 | # echo sudo firewall-cmd --zone=public --add-service=ceph-mon --permanent | tee -a $outfile 63 | # sudo firewall-cmd --zone=public --add-service=ceph --permanent 64 | # sudo firewall-cmd --zone=public --add-service=ceph-mon --permanent 65 | # fi 66 | fi 67 | 68 | if [ "$type" == "mds" ]; then 69 | sudo systemctl stop firewalld | tee -a $outfile 70 | sudo systemctl disable firewalld | tee -a $outfile 71 | # echo sudo firewall-cmd --zone=public --add-service=ceph --permanent | tee -a $outfile 72 | # sudo firewall-cmd --zone=public --add-service=ceph --permanent 73 | fi 74 | 75 | if [ "$type" == "client" ]; then 76 | sudo systemctl stop firewalld | tee -a $outfile 77 | sudo systemctl disable firewalld | tee -a $outfile 78 | # echo sudo firewall-cmd --zone=public --add-service=ceph --permanent | tee -a $outfile 79 | # sudo firewall-cmd --zone=public --add-service=ceph --permanent 80 | fi 81 | 82 | if [ "$type" == "deployer" ]; then 83 | sudo systemctl stop firewalld | tee -a $outfile 84 | sudo systemctl disable firewalld | tee -a $outfile 85 | # echo sudo firewall-cmd --zone=public --add-service=ceph --permanent | tee -a $outfile 86 | # sudo firewall-cmd --zone=public --add-service=ceph --permanent 87 | fi 88 | 89 | 90 | #echo sudo firewall-cmd --reload | tee -a $outfile 91 | #sudo firewall-cmd --reload 92 | #echo sudo systemctl restart firewalld.service 93 | #sudo systemctl restart firewalld.service 94 | 95 | echo "Done .... $0" | tee -a $outfile 96 | -------------------------------------------------------------------------------- /scripts/ceph_new_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | num_object_replica=3 5 | rbd_default_features=3 6 | pool_page_num=128 7 | pool_pgp_num=128 8 | 9 | if [ -f ceph.config ]; then 10 | do_ceph_install=$(awk -F= '/^do_ceph_install/{print $2}' ceph.config) 11 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 12 | num_object_replica=$(awk -F= '/^num_object_replica/{print $2}' ceph.config) 13 | rbd_default_features=$(awk -F= '/^rbd_default_features/{print $2}' ceph.config) 14 | pool_page_num=$(awk -F= '/^pool_page_num/{print $2}' ceph.config) 15 | pool_pgp_num=$(awk -F= '/^pool_pgp_num/{print $2}' ceph.config) 16 | if [ "$do_ceph_install" != "yes" ]; then 17 | echo Ceph installation is not done | tee -a $outfile 18 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 19 | exit 20 | fi 21 | fi 22 | 23 | print_usage() 24 | { 25 | echo "" 26 | echo "Usage: $0 [ <....> ]" 27 | echo " - to contain the output of ceph deploy commands" 28 | echo " - the first hostname for monitor(s)" 29 | echo "" 30 | exit 31 | } 32 | 33 | if [ $# -lt 1 ];then 34 | print_usage 35 | fi 36 | 37 | echo Executing $0 $* | tee -a $outfile 38 | 39 | monitor1_hostname=$1 40 | hostname_list=$* 41 | 42 | mkdir ceph-deploy 43 | cd ceph-deploy 44 | ceph-deploy new $hostname_list | tee -a $outfile 45 | ceph-deploy install $hostname_list | tee -a $outfile 46 | 47 | ceph_version=`ceph -v | cut -d " " -f 3,3` 48 | ceph_major_version=`echo $ceph_version | cut -d. -f 1,1` 49 | 50 | if [ $ceph_major_version -le 10 ]; then 51 | echo osd pool default size = $num_object_replica | tee -a ceph.conf 52 | echo rbd default features = $rbd_default_features | tee -a ceph.conf 53 | ceph-deploy mon create-initial | tee -a $outfile 54 | ceph-deploy mon create $hostname_list | tee -a $outfile 55 | ceph-deploy gatherkeys $monitor1_hostname | tee -a $outfile 56 | for h in $hostname_list 57 | do 58 | ssh -l opc $h sudo chmod +r /etc/ceph/ceph.client.admin.keyring | tee -a $outfile 59 | done 60 | else 61 | echo osd pool default size = $num_object_replica | tee -a ceph.conf 62 | echo mon_allow_pool_delete = true | tee -a ceph.conf 63 | ceph-deploy --overwrite-conf mon create-initial | tee -a $outfile 64 | ceph-deploy --overwrite-conf admin $(hostname) | tee -a $outfile 65 | sudo chmod +r /etc/ceph/ceph.client.admin.keyring | tee -a $outfile 66 | ceph-deploy mgr create $(hostname) | tee -a $outfile 67 | fi 68 | -------------------------------------------------------------------------------- /scripts/ceph_yum_repo: -------------------------------------------------------------------------------- 1 | 2 | [my_custom_repo_for_ceph] 3 | name=Ceph Storage for Oracle Linux Release 3.0 - Oracle Linux 7.5 or later ($basearch) 4 | baseurl=http://xyz.myorg.com/ceph/luminous/OL7/x86_64/ 5 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle 6 | gpgcheck=1 7 | enabled=0 8 | 9 | -------------------------------------------------------------------------------- /scripts/delay.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #------------------------------------------------------------------------------- 4 | # Adds the necessary delays before or after a VM setup 5 | # It could be necessary, for example, if the VM is rebooted as part of the setup 6 | #------------------------------------------------------------------------------- 7 | 8 | outfile=/tmp/terraform-setup.out 9 | 10 | if [ -f ceph.config ]; then 11 | do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config) 12 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 13 | delay_sec_before_vm_setup=$(awk -F= '/^delay_sec_before_vm_setup/{print $2}' ceph.config) 14 | delay_sec_after_vm_setup=$(awk -F= '/^delay_sec_after_vm_setup/{print $2}' ceph.config) 15 | if [ "$do_vm_setup" != "yes" ]; then 16 | echo Skipping the execution of delay.sh \[ At host: $(hostname) \] $0 $* | tee -a $outfile 17 | exit 18 | fi 19 | fi 20 | 21 | print_usage() 22 | { 23 | echo "" 24 | echo "Usage: $0 " 25 | echo " - before_setup | after_setup" 26 | echo "" 27 | exit 28 | } 29 | 30 | if [ $# -lt 1 ];then 31 | print_usage 32 | fi 33 | 34 | when=$1 35 | 36 | if [ "$when" = "before_setup" ]; then 37 | echo "Sleeping for $delay_sec_before_vm_setup seconds" 38 | sleep $delay_sec_before_vm_setup 39 | fi 40 | 41 | if [ "$when" = "after_setup" ]; then 42 | echo "Sleeping for $delay_sec_after_vm_setup seconds" 43 | sleep $delay_sec_after_vm_setup 44 | fi 45 | -------------------------------------------------------------------------------- /scripts/install_ceph_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | 5 | if [ -f ceph.config ]; then 6 | do_ceph_install=$(awk -F= '/^do_ceph_install/{print $2}' ceph.config) 7 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 8 | if [ "$do_ceph_install" != "yes" ]; then 9 | echo Ceph installation is not done | tee -a $outfile 10 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 11 | exit 12 | fi 13 | fi 14 | 15 | echo Executing $0 $* | tee -a $outfile 16 | 17 | sudo yum -y install ceph-deploy | tee -a $outfile 18 | ceph-deploy install $(hostname) 19 | 20 | ceph_version=`ceph -v | cut -d " " -f 3,3` 21 | ceph_major_version=`echo $ceph_version | cut -d. -f 1,1` 22 | 23 | echo "Intalling Ceph version: $ceph_version" | tee -a $outfile 24 | -------------------------------------------------------------------------------- /scripts/install_ssh_key.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -f ceph.config ]; then 4 | do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config) 5 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 6 | if [ "$do_vm_setup" != "yes" ]; then 7 | echo Ceph VM Setup is not done | tee -a $outfile 8 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 9 | exit 10 | fi 11 | fi 12 | 13 | ssh $1 -o "StrictHostKeyChecking no" -l opc 'cat ~/.ssh/id_rsa.pub' | ssh $2 -o "StrictHostKeyChecking no" -l opc 'cat >> .ssh/authorized_keys' 14 | sleep 5 15 | -------------------------------------------------------------------------------- /scripts/vm_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #------------------------------------------------------------------------ 4 | # This file allows for the initialization of the newly created VMs 5 | # If the VM resides within a custom environment, it may need custom setups 6 | # Some commented out examples are shown below 7 | #------------------------------------------------------------------------ 8 | 9 | outfile=/tmp/terraform_ceph_install.out 10 | 11 | if [ -f ceph.config ]; then 12 | do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config) 13 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 14 | if [ "$do_vm_setup" != "yes" ]; then 15 | echo VM Setup is not done | tee -a $outfile 16 | echo Skipping ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 17 | exit 18 | fi 19 | fi 20 | 21 | print_usage() 22 | { 23 | echo "" 24 | echo "Usage: $0 " 25 | echo " - type of node (deployer|osd|monitor|mds|client)" 26 | echo "" 27 | exit 28 | } 29 | 30 | if [ $# -lt 1 ];then 31 | print_usage 32 | fi 33 | 34 | type=$1 35 | echo "Setting up VM for $type:" $(hostname) | tee -a $outfile 36 | 37 | #------------------------------------------------------------------------ 38 | # To setup the DNS via the /etc/resolv.conf file 39 | #------------------------------------------------------------------------ 40 | #grep -v "^search" /etc/resolv.conf | grep -v "^nameserver" > /tmp/etc.resolve.conf 41 | #echo "search us.oracle.com" >> /tmp/etc.resolve.conf 42 | #echo "nameserver nn.nn.nn.nn" >> /tmp/etc.resolve.conf 43 | #sudo cp -f /tmp/etc.resolve.conf /etc/resolv.conf 44 | #rm -f /tmp/etc.resolve.conf 45 | 46 | 47 | #------------------------------------------------------------------------ 48 | # To setup the proxy servers 49 | #------------------------------------------------------------------------ 50 | #echo "export http_proxy=http://my-proxy.us.oracle.com:80" >> ~/.bashrc 51 | #echo "export https_proxy=http://my-proxy.us.oracle.com:80" >> ~/.bashrc 52 | #echo "set -o vi" >> ~/.bashrc 53 | 54 | 55 | #------------------------------------------------------------------------ 56 | # To maintain the proxy environments when doing a sudo 57 | # Add a line to the /etc/sudoers file 58 | #------------------------------------------------------------------------ 59 | #sudo cp /etc/sudoers /etc/sudoers.orig 60 | #sudo sed '/Defaults env_keep += "LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY"/a Defaults env_keep += "ftp_proxy http_proxy https_proxy no_proxy"' /etc/sudoers > /tmp/etc.sudoers.modified 61 | #sudo cp /tmp/etc.sudoers.modified /etc/sudoers 62 | 63 | #------------------------------------------------------------------------ 64 | # To enter permissive mode for SELinux 65 | #------------------------------------------------------------------------ 66 | #sudo setenforce 0 67 | -------------------------------------------------------------------------------- /scripts/vm_post_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #------------------------------------------------------------------------ 4 | # This is part of three setup files that allow for setting up newly 5 | # created VMs after the VM is create and initialized. 6 | # The three files are: 7 | # 1. vm_pre_setup 8 | # 2. vm_setup, and 9 | # 3. vm_post_setup (this file) 10 | # These files can be used, for example, to install packages, update the 11 | # OS or change the kernel etc. 12 | #------------------------------------------------------------------------ 13 | 14 | outfile=/tmp/terraform_ceph_install.out 15 | 16 | if [ -f ceph.config ]; then 17 | do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config) 18 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 19 | if [ "$do_vm_setup" != "yes" ]; then 20 | echo VM Setup is not done | tee -a $outfile 21 | echo Skipping vm_post_setup... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 22 | exit 23 | fi 24 | fi 25 | 26 | print_usage() 27 | { 28 | echo "" 29 | echo "Usage: $0 " 30 | echo " - type of node (deployer|osd|monitor|mds|client)" 31 | echo "" 32 | exit 33 | } 34 | 35 | if [ $# -lt 1 ];then 36 | print_usage 37 | fi 38 | 39 | type=$1 40 | echo "Executing vm_post_setup for $type:" $(hostname) | tee -a $outfile 41 | -------------------------------------------------------------------------------- /scripts/vm_pre_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #------------------------------------------------------------------------ 4 | # This is part of three setup files that allow for setting up newly 5 | # created VMs after the VM is create and initialized. 6 | # The three files are: 7 | # 1. vm_pre_setup (this file) 8 | # 2. vm_setup, and 9 | # 3. vm_post_setup 10 | # These files can be used, for example, to install packages, update the 11 | # OS or change the kernel etc. 12 | #------------------------------------------------------------------------ 13 | 14 | outfile=/tmp/terraform_ceph_install.out 15 | 16 | if [ -f ceph.config ]; then 17 | do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config) 18 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 19 | if [ "$do_vm_setup" != "yes" ]; then 20 | echo VM Setup is not done | tee -a $outfile 21 | echo Skipping vm pre setup ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 22 | exit 23 | fi 24 | fi 25 | 26 | print_usage() 27 | { 28 | echo "" 29 | echo "Usage: $0 " 30 | echo " - type of node (deployer|osd|monitor|mds|client)" 31 | echo "" 32 | exit 33 | } 34 | 35 | if [ $# -lt 1 ];then 36 | print_usage 37 | fi 38 | 39 | type=$1 40 | echo "Executing vm_pre_setup for $type:" $(hostname) | tee -a $outfile 41 | -------------------------------------------------------------------------------- /scripts/vm_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #------------------------------------------------------------------------ 4 | # This is part of three setup files that allow for setting up newly 5 | # created VMs after the VM is create and initialized. 6 | # The three files are: 7 | # 1. vm_pre_setup 8 | # 2. vm_setup (this file), and 9 | # 3. vm_post_setup 10 | # These files can be used, for example, to install packages, update the 11 | # OS or change the kernel etc. 12 | #------------------------------------------------------------------------ 13 | 14 | outfile=/tmp/terraform_ceph_install.out 15 | 16 | if [ -f ceph.config ]; then 17 | do_vm_setup=$(awk -F= '/^do_vm_setup/{print $2}' ceph.config) 18 | outfile=$(awk -F= '/^outputfile_name/{print $2}' ceph.config) 19 | if [ "$do_vm_setup" != "yes" ]; then 20 | echo VM Setup is not done | tee -a $outfile 21 | echo Skipping vm setup ... \[ At host: $(hostname) \] $0 $* | tee -a $outfile 22 | exit 23 | fi 24 | fi 25 | 26 | print_usage() 27 | { 28 | echo "" 29 | echo "Usage: $0 " 30 | echo " - type of node (deployer|osd|monitor|mds|client)" 31 | echo "" 32 | exit 33 | } 34 | 35 | if [ $# -lt 1 ];then 36 | print_usage 37 | fi 38 | 39 | type=$1 40 | echo "Executing vm_setup for $type:" $(hostname) | tee -a $outfile 41 | -------------------------------------------------------------------------------- /scripts/yum_repo_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | outfile=/tmp/terraform_ceph_install.out 4 | 5 | # --------------------------------------------------------------- 6 | # Check to see if "ceph_yum_repo" has an entry that is enabled 7 | # If enabled, copy it to /etc/yum.repo.d/, disable ol7_ceph repository, and then yum install 8 | # If not enaalbed, enable ol7_reposotory and then yum install 9 | # --------------------------------------------------------------- 10 | if [ -f ceph_yum_repo ]; then 11 | is_enabled=`grep enabled ceph_yum_repo | grep 1` 12 | if [ "X$is_enabled" != "X" ]; then 13 | echo "Custom repo is enabled" | tee -a $outfile 14 | sudo yum-config-manager --disable ol7_ceph ol7_software_collections ol7_developer_EPEL | tee -a $outfile 15 | sudo yum-config-manager --enable ol7_latest ol7_optional_latest ol7_addons | tee -a $outfile 16 | sudo cp ceph_yum_repo /etc/yum.repos.d/ceph.repo 17 | else 18 | echo "Custom repo is disabled" | tee -a $outfile 19 | sudo yum-config-manager --disable ol7_software_collections ol7_developer_EPEL | tee -a $outfile 20 | sudo yum-config-manager --enable ol7_ceph ol7_latest ol7_optional_latest ol7_addons | tee -a $outfile 21 | fi 22 | else 23 | echo "Custom repo doesn't exist" | tee -a $outfile 24 | sudo yum-config-manager --disable ol7_software_collections ol7_developer_EPEL | tee -a $outfile 25 | sudo yum-config-manager --enable ol7_ceph ol7_latest ol7_optional_latest ol7_addons | tee -a $outfile 26 | fi 27 | -------------------------------------------------------------------------------- /variables.ex1: -------------------------------------------------------------------------------- 1 | 2 | #--------------------------------------------------------------- 3 | # Environment Specific Variables 4 | # - typcically defined in files like env-var 5 | #--------------------------------------------------------------- 6 | variable "tenancy_ocid" { } 7 | 8 | variable "user_ocid" { } 9 | 10 | variable "fingerprint" { } 11 | 12 | variable "private_key_path" { } 13 | 14 | variable "private_key_password" { } 15 | 16 | variable "region" { } 17 | 18 | variable "compartment_ocid" { } 19 | 20 | #--------------------------------------------------------------- 21 | # Variables used in this deployment - Common 22 | #--------------------------------------------------------------- 23 | variable "ssh_public_key_file" { 24 | description = "The public key that will be installed on to the new instance(s) for ssh login" 25 | default = "/root/.ssh/id_rsa.pub" 26 | } 27 | 28 | variable "ssh_private_key_file" { 29 | description = "The private key that for ssh login to the new instance(s)" 30 | default = "/root/.ssh/id_rsa" 31 | } 32 | 33 | #--------------------------------------------------------------- 34 | # Note: 35 | # Shapes like VM.Standard1.1 VM.Standard1.4 BM.Standard1.36 will need block storage 36 | # Shapes like VM.DenseIO1.4 BM.HighIO1.36 comes with NVMe SSD 37 | #--------------------------------------------------------------- 38 | variable "instance_shapes" { 39 | description = "The shapes of the instances. You can choose different shapes for deployer, monitor, osd, and client. But all instances of one type, e.g. monitor, will be the same." 40 | default = { 41 | "deployer" = "VM.Standard1.2" 42 | "monitor" = "VM.Standard1.2" 43 | "osd" = "VM.Standard1.2" 44 | "mds" = "VM.Standard1.2" 45 | "client" = "VM.Standard1.2" 46 | } 47 | } 48 | 49 | variable "create_volume" { 50 | description = "Controls whether or not to create a block storage for OSDs. Create if the value is set to true." 51 | default = true 52 | } 53 | 54 | # Note: Specifiy the full display name of the OS. Just the name (e.g., Linux) and version (e.g., 7.4) doesn't guarantee a particular image that will be compatible with the instance 55 | variable "instance_os" { 56 | description = "The Name of the Operating System for all instances" 57 | default = "Oracle-Linux-7.5-2018.07.20-0" 58 | } 59 | 60 | variable "instance_create_timeout" { 61 | description = "The timeout value for instance creation" 62 | default = "60m" 63 | } 64 | 65 | variable "scripts_src_directory" { 66 | description = "The path to the directory where are scripts and config files are." 67 | default = "scripts" 68 | } 69 | 70 | variable "scripts_dst_directory" { 71 | description = "The path to the directory where are scripts and config files will be copied." 72 | default = "terraform-scripts" 73 | } 74 | 75 | variable "ssh_username" { 76 | description = "The username for loging in via ssh to the VM. This is set by OCI." 77 | default = "opc" 78 | } 79 | 80 | #--------------------------------------------------------------- 81 | # Network related variables used in this deployment 82 | #--------------------------------------------------------------- 83 | variable "create_new_vcn" { 84 | description = "If true, a New VCN and Gateway will be created; If false, an existing VCN ID (in the same region) must be provided via the variable existing_vcn_id" 85 | default = false 86 | } 87 | 88 | variable "existing_vcn_id" { 89 | description = "If create_new_vcn is false, provide the id of an existing VCN to use" 90 | default = [ "ocid1.vcn.oc1.iad.aaa..........................." ] 91 | } 92 | 93 | variable "vcn_cidr" { 94 | description = "The CIDR for the new VCN (if created)" 95 | default = "10.0.0.0/16" 96 | } 97 | 98 | variable "vcn_name" { 99 | description = "The name for the new VCN (if created)" 100 | default = "cephvcn" 101 | } 102 | 103 | variable "create_new_subnets" { 104 | description = "If true, new subnets will be created; If false, existing subnet ids must be provided via the variable existing_subnet_ids" 105 | default = false 106 | } 107 | 108 | variable "new_subnet_count" { 109 | description = "The nubner of subnets to create" 110 | default = 0 111 | } 112 | 113 | variable "existing_subnet_ids" { 114 | description = "If create_new_subnets is false, provide the list of ids of existing subnets to use" 115 | default = [ "ocid1.subnet.oc1.iad.......................................", 116 | "ocid1.subnet.oc1.iad.......................................", 117 | "ocid1.subnet.oc1.iad......................................." ] 118 | } 119 | 120 | variable "availability_domain_index_list_for_subnets" { 121 | description = "Specifies the availability domain indexes for the subnets" 122 | default = ["1", "2", "3"] 123 | } 124 | 125 | variable "gateway_name" { 126 | description = "The name of the gateway for the new VCN (if created)" 127 | default = "cephgw" 128 | } 129 | 130 | variable "route_table_name" { 131 | description = "The name of the route table for the new VCN (if created)" 132 | default = "cephrt" 133 | } 134 | 135 | variable "subnet_name_prefix" { 136 | description = "The prefix for the subnet names" 137 | default = "cephsub" 138 | } 139 | 140 | variable "subnet_cidr_blocks" { 141 | description = "The CIDR for the new subnets(if created)" 142 | default = [ "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24" ] 143 | } 144 | 145 | #--------------------------------------------------------------- 146 | # Ceph Deployer related variables used in this deployment 147 | #--------------------------------------------------------------- 148 | variable "availability_domain_index_for_deployer" { 149 | description = "The availability domain where the depolyer will be created" 150 | default = [ "1" ] 151 | } 152 | 153 | variable "deployer_hostname" { 154 | description = "The name of the deployer instances" 155 | default = "test-ceph-deployer" 156 | } 157 | 158 | #--------------------------------------------------------------- 159 | # Ceph Monitor related variables used in this deployment 160 | #--------------------------------------------------------------- 161 | variable "monitor_instance_count" { 162 | description = "The Number of Monitor to create" 163 | default = "2" 164 | } 165 | 166 | variable "availability_domain_index_list_for_monitors" { 167 | description = "The list for availability domains where the monitors will be created. The size of the list has to be at least equal to the number of monitor instances" 168 | default = [ "1", "2", "3" ] 169 | } 170 | 171 | variable "monitor_hostname_prefix" { 172 | description = "The prefix for the name of the monitor instances" 173 | default = "test-ceph-monitor" 174 | } 175 | 176 | #--------------------------------------------------------------- 177 | # Ceph OSD related variables used in this deployment 178 | #--------------------------------------------------------------- 179 | variable "osd_instance_count" { 180 | description = "The Number of OSDs to create" 181 | default = "4" 182 | } 183 | 184 | variable "availability_domain_index_list_for_osds" { 185 | description = "The availability domains where the OSDs will be created (as a list of indexes)" 186 | default = [ "1", "2", "3", "1" ] 187 | } 188 | 189 | variable "osd_hostname_prefix" { 190 | description = "The prefix to the name of the OSD instances. The name will be appended by an hyphen, followed by an integer starting at 0" 191 | default = "test-ceph-osd" 192 | } 193 | 194 | variable "volume_name_prefix" { 195 | description = "The prefix for the name of storage block volumes" 196 | default = "ceph-volume" 197 | } 198 | 199 | variable "volume_size_in_gbs" { 200 | description = "The size of storage block volumes" 201 | default = "50" 202 | } 203 | 204 | variable "volume_attachment_type" { 205 | description = "The type of volume attachments" 206 | default = "iscsi" 207 | } 208 | 209 | variable "block_device_for_ceph" { 210 | description = "The name of the block device to be used by Ceph. If volumes are created, it will be sdb. Otherwise nvme0n1" 211 | default = [ "nvme0n1", "sdb" ] 212 | } 213 | 214 | #--------------------------------------------------------------- 215 | # Variables used in Ceph MDS deployment 216 | #--------------------------------------------------------------- 217 | variable "mds_instance_count" { 218 | description = "The Number of MDSs to create" 219 | default = "1" 220 | } 221 | 222 | variable "availability_domain_index_list_for_mds" { 223 | description = "The availability domain where the client will be created" 224 | default = [ "2", "3", "1" ] 225 | } 226 | 227 | variable "mds_hostname_prefix" { 228 | description = "The prefix for the name of the MDS instances" 229 | default = "test-ceph-mds" 230 | } 231 | 232 | #--------------------------------------------------------------- 233 | # Variables used in this Ceph Client deployment 234 | #--------------------------------------------------------------- 235 | variable "create_client" { 236 | description = "Whether to create a client or not." 237 | default = true 238 | } 239 | 240 | variable "availability_domain_index_list_for_client" { 241 | description = "The availability domain where the client will be created" 242 | default = [ "1" ] 243 | } 244 | 245 | variable "client_hostname" { 246 | description = "The prefix for the name of the monitor instances" 247 | default = "test-ceph-client" 248 | } 249 | -------------------------------------------------------------------------------- /variables.ex2: -------------------------------------------------------------------------------- 1 | 2 | #--------------------------------------------------------------- 3 | # Environment Specific Variables 4 | # - typcically defined in files like env-var 5 | #--------------------------------------------------------------- 6 | variable "tenancy_ocid" { } 7 | 8 | variable "user_ocid" { } 9 | 10 | variable "fingerprint" { } 11 | 12 | variable "private_key_path" { } 13 | 14 | variable "private_key_password" { } 15 | 16 | variable "region" { } 17 | 18 | variable "compartment_ocid" { } 19 | 20 | #--------------------------------------------------------------- 21 | # Variables used in this deployment - Common 22 | #--------------------------------------------------------------- 23 | variable "ssh_public_key_file" { 24 | description = "The public key that will be installed on to the new instance(s) for ssh login" 25 | default = "/root/.ssh/id_rsa.pub" 26 | } 27 | 28 | variable "ssh_private_key_file" { 29 | description = "The private key that for ssh login to the new instance(s)" 30 | default = "/root/.ssh/id_rsa" 31 | } 32 | 33 | #--------------------------------------------------------------- 34 | # Note: 35 | # Shapes like VM.Standard1.1 VM.Standard1.4 BM.Standard1.36 will need block storage 36 | # Shapes like VM.DenseIO1.4 BM.HighIO1.36 comes with NVMe SSD 37 | #--------------------------------------------------------------- 38 | variable "instance_shapes" { 39 | description = "The shapes of the instances. You can choose different shapes for deployer, monitor, osd, and client. But all instances of one type, e.g. monitor, will be the same." 40 | default = { 41 | "deployer" = "VM.Standard1.2" 42 | "monitor" = "VM.Standard1.2" 43 | "osd" = "BM.HighIO1.36" 44 | "client" = "VM.Standard1.2" 45 | } 46 | } 47 | 48 | variable "create_volume" { 49 | description = "Controls whether or not to create a block storage for OSDs. Create if the value is set to true." 50 | default = false 51 | } 52 | 53 | # Note: Specifiy the full display name of the OS. Just the name (e.g., Linux) and version (e.g., 7.4) doesn't guarantee a particular image that will be compatible with the instance 54 | variable "instance_os" { 55 | description = "The Name of the Operating System for all instances" 56 | default = "Oracle-Linux-7.4-2018.02.21-1" 57 | } 58 | 59 | variable "instance_create_timeout" { 60 | description = "The timeout value for instance creation" 61 | default = "60m" 62 | } 63 | 64 | variable "scripts_src_directory" { 65 | description = "The path to the directory where are scripts and config files are." 66 | default = "scripts" 67 | } 68 | 69 | variable "scripts_dst_directory" { 70 | description = "The path to the directory where are scripts and config files will be copied." 71 | default = "terraform-scripts" 72 | } 73 | 74 | variable "ssh_username" { 75 | description = "The username for loging in via ssh to the VM. This is set by OCI." 76 | default = "opc" 77 | } 78 | 79 | #--------------------------------------------------------------- 80 | # Network related variables used in this deployment 81 | #--------------------------------------------------------------- 82 | variable "network_module" { 83 | description = "The network module to use. If you want to create a network with its own internet gateway, use network.gateway otherwise network.nogateway" 84 | default = network.gateway 85 | } 86 | 87 | variable "create_new_vcn" { 88 | description = "If true, a New VCN and Gateway will be created; If false, an existing VCN ID (in the same region) must be provided via the variable existing_vcn_id" 89 | default = true 90 | } 91 | 92 | variable "existing_vcn_id" { 93 | description = "If create_new_vcn is false, provide the id of an existing VCN to use" 94 | default = [ "ocid1.vcn.oc1.iad.aaa..........................." ] 95 | } 96 | 97 | variable "vcn_cidr" { 98 | description = "The CIDR for the new VCN (if created)" 99 | default = "10.0.0.0/16" 100 | } 101 | 102 | variable "vcn_name" { 103 | description = "The name for the new VCN (if created)" 104 | default = "cephvcn" 105 | } 106 | 107 | variable "create_new_subnets" { 108 | description = "If true, new subnets will be created; If false, existing subnet ids must be provided via the variable existing_subnet_ids" 109 | default = true 110 | } 111 | 112 | variable "new_subnet_count" { 113 | description = "The nubner of subnets to create" 114 | default = 3 115 | } 116 | 117 | variable "existing_subnet_ids" { 118 | description = "If create_new_subnets is false, provide the list of ids of existing subnets to use" 119 | default = [ "ocid1.subnet.oc1.iad.......................................", 120 | "ocid1.subnet.oc1.iad.......................................", 121 | "ocid1.subnet.oc1.iad......................................." ] 122 | } 123 | 124 | variable "availability_domain_index_list_for_subnets" { 125 | description = "Specifies the availability domain indexes for the subnets" 126 | default = ["1", "2", "3"] 127 | } 128 | 129 | variable "gateway_name" { 130 | description = "The name of the gateway for the new VCN (if created)" 131 | default = "cephgw" 132 | } 133 | 134 | variable "route_table_name" { 135 | description = "The name of the route table for the new VCN (if created)" 136 | default = "cephrt" 137 | } 138 | 139 | variable "subnet_name_prefix" { 140 | description = "The prefix for the subnet names" 141 | default = "cephsub" 142 | } 143 | 144 | variable "subnet_cidr_blocks" { 145 | description = "The CIDR for the new subnets(if created)" 146 | default = [ "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24" ] 147 | } 148 | 149 | #--------------------------------------------------------------- 150 | # Ceph Deployer related variables used in this deployment 151 | #--------------------------------------------------------------- 152 | variable "availability_domain_index_for_deployer" { 153 | description = "The availability domain where the depolyer will be created" 154 | default = [ "1" ] 155 | } 156 | 157 | variable "deployer_hostname" { 158 | description = "The name of the deployer instances" 159 | default = "test-ceph-deployer" 160 | } 161 | 162 | #--------------------------------------------------------------- 163 | # Ceph Monitor related variables used in this deployment 164 | #--------------------------------------------------------------- 165 | variable "monitor_instance_count" { 166 | description = "The Number of Monitor to create" 167 | default = "2" 168 | } 169 | 170 | variable "availability_domain_index_list_for_monitors" { 171 | description = "The list for availability domains where the monitors will be created. The size of the list has to be at least equal to the number of monitor instances" 172 | default = [ "1", "2", "3" ] 173 | } 174 | 175 | variable "monitor_hostname_prefix" { 176 | description = "The prefix for the name of the monitor instances" 177 | default = "test-ceph-monitor" 178 | } 179 | 180 | #--------------------------------------------------------------- 181 | # Ceph OSD related variables used in this deployment 182 | #--------------------------------------------------------------- 183 | variable "osd_instance_count" { 184 | description = "The Number of OSDs to create" 185 | default = "3" 186 | } 187 | 188 | variable "availability_domain_index_list_for_osds" { 189 | description = "The availability domains where the OSDs will be created (as a list of indexes)" 190 | default = [ "1", "2", "3", "1" ] 191 | } 192 | 193 | variable "osd_hostname_prefix" { 194 | description = "The prefix to the name of the OSD instances. The name will be appended by an hyphen, followed by an integer starting at 0" 195 | default = "test-ceph-osd" 196 | } 197 | 198 | variable "volume_name_prefix" { 199 | description = "The prefix for the name of storage block volumes" 200 | default = "ceph-volume" 201 | } 202 | 203 | variable "volume_size_in_gbs" { 204 | description = "The size of storage block volumes" 205 | default = "50" 206 | } 207 | 208 | variable "volume_attachment_type" { 209 | description = "The type of volume attachments" 210 | default = "iscsi" 211 | } 212 | 213 | variable "block_device_for_ceph" { 214 | description = "The name of the block device to be used by Ceph. If volumes are created, it will be sdb. Otherwise nvme0n1" 215 | default = [ "nvme0n1", "sdb" ] 216 | } 217 | 218 | #--------------------------------------------------------------- 219 | # Variables used in Ceph MDS deployment 220 | #--------------------------------------------------------------- 221 | variable "mds_instance_count" { 222 | description = "The Number of MDSs to create" 223 | default = "1" 224 | } 225 | 226 | variable "availability_domain_index_list_for_mds" { 227 | description = "The availability domain where the client will be created" 228 | default = [ "1", "2", "3" ] 229 | } 230 | 231 | variable "mds_hostname_prefix" { 232 | description = "The prefix for the name of the MDS instances" 233 | default = "test-ceph-mds" 234 | } 235 | 236 | #--------------------------------------------------------------- 237 | # Variables used in this Ceph Client deployment 238 | #--------------------------------------------------------------- 239 | variable "create_client" { 240 | description = "Whether to create a client or not." 241 | default = true 242 | } 243 | 244 | variable "availability_domain_index_list_for_client" { 245 | description = "The availability domain where the client will be created" 246 | default = [ "1" ] 247 | } 248 | 249 | variable "client_hostname" { 250 | description = "The prefix for the name of the monitor instances" 251 | default = "test-ceph-client" 252 | } 253 | --------------------------------------------------------------------------------