├── .gitignore ├── Makefile ├── README.md ├── bin ├── resume.sh └── suspend.sh ├── examples ├── nip │ ├── Makefile │ ├── main.tf │ ├── output.tf │ ├── terraform.tfvars.example │ └── variables.tf ├── ocp │ └── terraform.tfvars.example ├── openshift-domain │ ├── main.tf │ └── variables.tf ├── openshift-infra │ ├── Makefile │ ├── main.tf │ └── variables.tf ├── openshift-network │ ├── main.tf │ └── variables.tf └── openshift │ ├── main.tf │ └── variables.tf ├── main.tf ├── modules ├── domain │ ├── README.md │ ├── certificates.tf │ ├── output.tf │ ├── public_lb.tf │ ├── public_record.tf │ ├── public_zone.tf │ └── variables.tf ├── infra │ ├── ami.tf │ ├── bastion.tf │ ├── bastion_eip.tf │ ├── bastion_role.tf │ ├── bastion_sg.tf │ ├── bastion_userdata.tf │ ├── compute_node.tf │ ├── compute_node_role.tf │ ├── ec2_role.tf │ ├── instance_market_options.tf │ ├── key_pair.tf │ ├── master.tf │ ├── master_domain.tf │ ├── master_lb.tf │ ├── master_listener.tf │ ├── master_role.tf │ ├── master_sg.tf │ ├── master_userdata.tf │ ├── node_sg.tf │ ├── output.tf │ ├── public_lb.tf │ ├── public_listener.tf │ ├── public_sg.tf │ ├── resources │ │ ├── bastion-init.yml │ │ └── master-init.yml │ ├── variables.tf │ └── vpc.tf ├── network │ ├── README.md │ ├── az.tf │ ├── output.tf │ ├── private_network.tf │ ├── provider.tf │ ├── public_network.tf │ ├── variables.tf │ └── vpc.tf └── openshift │ ├── bastion_config.tf │ ├── bastion_repos.tf │ ├── inventory.tf │ ├── main.tf │ ├── node_config.tf │ ├── openshift_applier.tf │ ├── output.tf │ ├── public_certificate.tf │ ├── region.tf │ ├── resources │ ├── bastion-config-playbook.yaml │ ├── bastion-config.sh │ ├── bastion-repos.sh │ ├── deploy-cluster.sh │ ├── node-config-playbook.yaml │ ├── node-config.sh │ ├── openshift-applier.sh │ ├── openshift-applier │ │ └── openshift-policies │ │ │ ├── config.yml │ │ │ └── private │ │ │ └── config.yml │ └── template-inventory.yaml │ └── variables.tf ├── output.tf ├── variables.tf └── vars └── .gitignore /.gitignore: -------------------------------------------------------------------------------- 1 | **/backend.tf 2 | .terraform/ 3 | work/ 4 | *.tfstate 5 | *.tfstate.* 6 | .envrc 7 | *.tfvars 8 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONEY: all install destroy 2 | 3 | key: 4 | @terraform output platform_private_key > /tmp/.openshift-`terraform output platform_name`.key 5 | @chmod 600 /tmp/.openshift-`terraform output platform_name`.key 6 | 7 | sshspec: 8 | @terraform output bastion_ssh_spec 9 | 10 | ssh: key 11 | @ssh `terraform output bastion_ssh_spec` -i /tmp/.openshift-`terraform output platform_name`.key 12 | 13 | console: 14 | @open `terraform output master_public_url` 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Terraform OpenShift Container Platform Module 2 | 3 | Builds OpenShift reference archtecture on AWS. 4 | 5 | It supports OCP and OKD 6 | 7 | ## Prerequisites 8 | 9 | If you want to use a custom domain for your OpenShift platform, 10 | 11 | * You need to create [Public Route53 Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html) for your platform domain. If your master domain is `mycluster.example.com`, a Public Route53 Zone named `mycluster.example.com` is required. 12 | 13 | When you use nip.io wildcard domain, you don't have to prepare public dns settings. See [a example](/examples/nip). 14 | 15 | If you are building a OCP cluster, 16 | 17 | * you need to know a subscription pool id for OCP. 18 | * you need to get access of Gold Images of Red Hat Atomic Host through the Red Hat Cloud Access program. See also https://access.redhat.com/articles/2962171 19 | 20 | ## Creates a cluster 21 | 22 | ### Sets terraform variables for creating openshift cluster. 23 | 24 | OCP: [OCP examples](/examples/ocp/terraform.tfvars.example) 25 | 26 | OKD: [OKD examples](/examples/nip/terraform.tfvars.example) 27 | 28 | ### Starts a cluster building 29 | 30 | ```bash 31 | terraform plan -var-file=xxx.tfvars 32 | terraform apply -var-file=xxx.tfvars 33 | ``` 34 | 35 | ### Accesses your cluster's admin console. 36 | 37 | Once your cluster is launched successfully, you can access your cluster's admin console. 38 | 39 | `make console` 40 | 41 | ## Tips 42 | 43 | ### SSH to Bastion 44 | 45 | ``` 46 | make ssh 47 | ``` 48 | 49 | ### Suspend / Resume instances 50 | 51 | Only for on-demand instances (not spot instances) 52 | 53 | ```bash 54 | # suspend 55 | ./bin/suspend.sh 56 | 57 | # resume 58 | ./bin/resume.sh 59 | ``` 60 | -------------------------------------------------------------------------------- /bin/resume.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | asg_names="$(terraform output platform_name)-bastion $(terraform output platform_name)-master $(terraform output platform_name)-compute" 4 | 5 | for asg_name in $asg_names; do 6 | 7 | compute_ids=$(aws ec2 describe-instances --filters "Name=tag:aws:autoscaling:groupName,Values=${asg_name}" --query "Reservations[].Instances[].[InstanceId]" --output text) 8 | 9 | for compute_id in $compute_ids; do 10 | echo return as healty: $compute_id : $asg_name 11 | 12 | aws ec2 start-instances --instance-ids $compute_id 13 | aws autoscaling set-instance-health --instance-id $compute_id --health-status Healthy 14 | done 15 | 16 | aws autoscaling resume-processes --auto-scaling-group-name ${asg_name} --scaling-processes Terminate 17 | done 18 | 19 | -------------------------------------------------------------------------------- /bin/suspend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | asg_names="$(terraform output platform_name)-bastion $(terraform output platform_name)-master $(terraform output platform_name)-compute" 4 | 5 | for asg_name in $asg_names; do 6 | aws autoscaling suspend-processes --auto-scaling-group-name ${asg_name} --scaling-processes Terminate 7 | 8 | compute_ids=$(aws ec2 describe-instances --filters "Name=tag:aws:autoscaling:groupName,Values=${asg_name}" --query "Reservations[].Instances[].[InstanceId]" --output text) 9 | 10 | if [ -n ${compute_ids} ]; then 11 | aws ec2 stop-instances --instance-ids $compute_ids 12 | fi 13 | done 14 | 15 | -------------------------------------------------------------------------------- /examples/nip/Makefile: -------------------------------------------------------------------------------- 1 | ../../Makefile -------------------------------------------------------------------------------- /examples/nip/main.tf: -------------------------------------------------------------------------------- 1 | module "network" { 2 | source = "../../modules/network" 3 | platform_name = "${var.platform_name}" 4 | } 5 | 6 | module "infra" { 7 | source = "../../modules/infra" 8 | 9 | platform_name = "${var.platform_name}" 10 | use_community = "${var.use_community}" 11 | 12 | platform_vpc_id = "${module.network.platform_vpc_id}" 13 | public_subnet_ids = ["${module.network.public_subnet_ids}"] 14 | private_subnet_ids = ["${module.network.private_subnet_ids}"] 15 | 16 | operator_cidrs = ["${var.operator_cidrs}"] 17 | public_cidrs = ["${var.public_cidrs}"] 18 | 19 | use_spot = "${var.use_spot}" 20 | 21 | master_count = "${var.master_count}" 22 | master_instance_type = "${var.master_instance_type}" 23 | compute_node_count = "${var.compute_node_count}" 24 | compute_node_instance_type = "${var.compute_node_instance_type}" 25 | } 26 | 27 | module "openshift" { 28 | source = "../../modules/openshift" 29 | 30 | platform_name = "${var.platform_name}" 31 | use_community = "${var.use_community}" 32 | 33 | bastion_ssh_user = "${module.infra.bastion_ssh_user}" 34 | bastion_endpoint = "${module.infra.bastion_endpoint}" 35 | platform_private_key = "${module.infra.platform_private_key}" 36 | rhn_username = "${var.rhn_username}" 37 | rhn_password = "${var.rhn_password}" 38 | rh_subscription_pool_id = "${var.rh_subscription_pool_id}" 39 | 40 | master_domain = "${module.infra.master_domain}" 41 | platform_domain = "${element(module.infra.platform_public_ip_set, 0)}.nip.io" 42 | } 43 | -------------------------------------------------------------------------------- /examples/nip/output.tf: -------------------------------------------------------------------------------- 1 | ../../output.tf -------------------------------------------------------------------------------- /examples/nip/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | platform_name = "okd" 2 | 3 | # Use OKD 4 | use_community = true 5 | 6 | # Use nip(default). Master: https://xx.xx.xx.xx.nip.io:8443/ 7 | # platform_domain = "" 8 | 9 | master_count = 1 10 | 11 | master_instance_type = "m4.large" 12 | 13 | compute_node_count = 1 14 | 15 | compute_node_instance_type = "m4.large" 16 | 17 | operator_cidrs = ["0.0.0.0/0"] 18 | 19 | public_cidrs = ["0.0.0.0/0"] 20 | 21 | use_spot = true 22 | -------------------------------------------------------------------------------- /examples/nip/variables.tf: -------------------------------------------------------------------------------- 1 | ../../variables.tf -------------------------------------------------------------------------------- /examples/ocp/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | # Master url can be accessed from: 2 | operator_cidrs = ["0.0.0.0/0"] 3 | 4 | # Serviced hosted in the cluster can be accessed from: 5 | public_cidrs = ["0.0.0.0/0"] 6 | 7 | # Use spot instance 8 | use_spot = true 9 | 10 | # Master instance count 11 | master_count = 1 12 | 13 | # Master instance type 14 | master_instance_type = "m4.xlarge" 15 | 16 | # Compute node count 17 | compute_node_count = 3 18 | 19 | # Compute instance type 20 | compute_node_instance_type = "m4.large" 21 | 22 | # Platform name 23 | platform_name = "mycluster" 24 | 25 | # If you want to use OKD, set true 26 | use_community = false 27 | 28 | # Platform domain. master: https://:8443/ 29 | platform_domain = "mycluster.example.com" 30 | 31 | # For let's encrypt: Doamin admin's email address: 32 | platform_domain_administrator_email = "admin@example.com" 33 | 34 | # Red Hat Network username: 35 | rhn_username = "admin@example.com" 36 | 37 | # Red Hat Network password: 38 | rhn_password = "xxxx" 39 | 40 | # OpenShift subscription's pool ID 41 | rh_subscription_pool_id = "xxxxxx" 42 | -------------------------------------------------------------------------------- /examples/openshift-domain/main.tf: -------------------------------------------------------------------------------- 1 | module "openshift_domain" { 2 | source = "../../modules/domain" 3 | platform_name = "${var.platform_name}" 4 | platform_domain = "${var.platform_domain}" 5 | platform_domain_administrator_email = "${var.platform_domain_administrator_email}" 6 | public_lb_arn = "${var.public_lb_arn}" 7 | } 8 | -------------------------------------------------------------------------------- /examples/openshift-domain/variables.tf: -------------------------------------------------------------------------------- 1 | ../../modules/domain/variables.tf -------------------------------------------------------------------------------- /examples/openshift-infra/Makefile: -------------------------------------------------------------------------------- 1 | sshspec: 2 | @echo `terraform output -module openshift_infra bastion_ssh_user`@`terraform output -module openshift_infra bastion_endpoint` 3 | 4 | key: 5 | @terraform output -module openshift_infra platform_private_key > /tmp/.$(TF_VAR_platform_name).infra.key 6 | @chmod 600 /tmp/.$(TF_VAR_platform_name).infra.key 7 | 8 | ssh: key 9 | @ssh `make sshspec` -i /tmp/.$(TF_VAR_platform_name).infra.key 10 | -------------------------------------------------------------------------------- /examples/openshift-infra/main.tf: -------------------------------------------------------------------------------- 1 | module "openshift_infra" { 2 | source = "../../modules/infra" 3 | 4 | platform_name = "${var.platform_name}" 5 | 6 | platform_vpc_id = "${var.platform_vpc_id}" 7 | public_subnet_ids = ["${var.public_subnet_ids}"] 8 | private_subnet_ids = ["${var.private_subnet_ids}"] 9 | 10 | operator_cidrs = ["0.0.0.0/0"] 11 | 12 | use_spot = true 13 | 14 | master_count = "${var.master_count}" 15 | } 16 | -------------------------------------------------------------------------------- /examples/openshift-infra/variables.tf: -------------------------------------------------------------------------------- 1 | ../../modules/infra/variables.tf -------------------------------------------------------------------------------- /examples/openshift-network/main.tf: -------------------------------------------------------------------------------- 1 | module "openshift_network" { 2 | source = "../../modules/network" 3 | platform_name = "${var.platform_name}" 4 | } 5 | -------------------------------------------------------------------------------- /examples/openshift-network/variables.tf: -------------------------------------------------------------------------------- 1 | ../../modules/network/variables.tf -------------------------------------------------------------------------------- /examples/openshift/main.tf: -------------------------------------------------------------------------------- 1 | module "openshift" { 2 | source = "../../modules/openshift" 3 | 4 | platform_name = "${var.platform_name}" 5 | bastion_ssh_user = "${var.bastion_ssh_user}" 6 | bastion_endpoint = "${var.bastion_endpoint}" 7 | platform_private_key = "${var.platform_private_key}" 8 | rhn_username = "${var.rhn_username}" 9 | rhn_password = "${var.rhn_password}" 10 | rh_subscription_pool_id = "${var.rh_subscription_pool_id}" 11 | master_domain = "${var.master_domain}" 12 | platform_domain = "${var.platform_domain}" 13 | public_certificate_pem = "${var.public_certificate_pem}" 14 | public_certificate_key = "${var.public_certificate_key}" 15 | public_certificate_intermediate_pem = "${var.public_certificate_intermediate_pem}" 16 | } 17 | -------------------------------------------------------------------------------- /examples/openshift/variables.tf: -------------------------------------------------------------------------------- 1 | ../../modules/openshift/variables.tf -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | module "network" { 2 | source = "modules/network" 3 | platform_name = "${var.platform_name}" 4 | availability_zones = "${var.availability_zones}" 5 | } 6 | 7 | module "infra" { 8 | source = "modules/infra" 9 | 10 | platform_name = "${var.platform_name}" 11 | use_community = "${var.use_community}" 12 | 13 | platform_vpc_id = "${module.network.platform_vpc_id}" 14 | public_subnet_ids = ["${module.network.public_subnet_ids}"] 15 | private_subnet_ids = ["${module.network.private_subnet_ids}"] 16 | 17 | operator_cidrs = ["${var.operator_cidrs}"] 18 | public_cidrs = ["${var.public_cidrs}"] 19 | 20 | use_spot = "${var.use_spot}" 21 | 22 | master_count = "${var.master_count}" 23 | master_instance_type = "${var.master_instance_type}" 24 | compute_node_count = "${var.compute_node_count}" 25 | compute_node_instance_type = "${var.compute_node_instance_type}" 26 | } 27 | 28 | module "domain" { 29 | source = "modules/domain" 30 | 31 | platform_name = "${var.platform_name}" 32 | platform_domain = "${var.platform_domain}" 33 | platform_domain_administrator_email = "${var.platform_domain_administrator_email}" 34 | public_lb_arn = "${module.infra.public_lb_arn}" 35 | } 36 | 37 | module "openshift" { 38 | source = "modules/openshift" 39 | 40 | platform_name = "${var.platform_name}" 41 | use_community = "${var.use_community}" 42 | 43 | bastion_ssh_user = "${module.infra.bastion_ssh_user}" 44 | bastion_endpoint = "${module.infra.bastion_endpoint}" 45 | platform_private_key = "${module.infra.platform_private_key}" 46 | rhn_username = "${var.rhn_username}" 47 | rhn_password = "${var.rhn_password}" 48 | rh_subscription_pool_id = "${var.rh_subscription_pool_id}" 49 | 50 | master_domain = "${module.infra.master_domain}" 51 | platform_domain = "${var.platform_domain}" 52 | public_certificate_pem = "${module.domain.public_certificate_pem}" 53 | public_certificate_key = "${module.domain.public_certificate_key}" 54 | public_certificate_intermediate_pem = "${module.domain.public_certificate_intermediate_pem}" 55 | 56 | identity_providers = "${var.identity_providers}" 57 | 58 | google_client_id = "${var.google_client_id}" 59 | google_client_secret = "${var.google_client_secret}" 60 | google_client_domain = "${var.google_client_domain}" 61 | } 62 | -------------------------------------------------------------------------------- /modules/domain/README.md: -------------------------------------------------------------------------------- 1 | # Terraform OpenShift Module / Domain 2 | 3 | Builds some domain settings for OpenShift platform. -------------------------------------------------------------------------------- /modules/domain/certificates.tf: -------------------------------------------------------------------------------- 1 | provider "acme" { 2 | server_url = "https://acme-v02.api.letsencrypt.org/directory" 3 | } 4 | 5 | resource "tls_private_key" "platform_domain_administrator" { 6 | algorithm = "RSA" 7 | } 8 | 9 | resource "acme_registration" "platform_domain_administrator" { 10 | account_key_pem = "${tls_private_key.platform_domain_administrator.private_key_pem}" 11 | email_address = "${var.platform_domain_administrator_email}" 12 | } 13 | 14 | resource "tls_private_key" "platform_domain_csr" { 15 | algorithm = "RSA" 16 | } 17 | 18 | resource "tls_cert_request" "platform_domain" { 19 | key_algorithm = "RSA" 20 | private_key_pem = "${tls_private_key.platform_domain_csr.private_key_pem}" 21 | 22 | dns_names = ["${var.platform_domain}"] 23 | 24 | subject { 25 | common_name = "*.${var.platform_domain}" 26 | } 27 | } 28 | 29 | resource "acme_certificate" "platform_domain" { 30 | account_key_pem = "${acme_registration.platform_domain_administrator.account_key_pem}" 31 | certificate_request_pem = "${tls_cert_request.platform_domain.cert_request_pem}" 32 | 33 | dns_challenge { 34 | provider = "route53" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /modules/domain/output.tf: -------------------------------------------------------------------------------- 1 | output "public_certificate_pem" { 2 | value = "${element(concat(acme_certificate.platform_domain.*.certificate_pem, list("")), 0)}" 3 | } 4 | 5 | output "public_certificate_key" { 6 | value = "${element(concat(tls_private_key.platform_domain_csr.*.private_key_pem, list("")), 0)}" 7 | } 8 | 9 | output "public_certificate_intermediate_pem" { 10 | value = "${element(concat(acme_certificate.platform_domain.*.issuer_pem, list("")), 0)}" 11 | } 12 | -------------------------------------------------------------------------------- /modules/domain/public_lb.tf: -------------------------------------------------------------------------------- 1 | data "aws_lb" "public" { 2 | arn = "${var.public_lb_arn}" 3 | } 4 | -------------------------------------------------------------------------------- /modules/domain/public_record.tf: -------------------------------------------------------------------------------- 1 | resource "aws_route53_record" "master_public" { 2 | zone_id = "${data.aws_route53_zone.public.zone_id}" 3 | name = "${var.platform_domain}" 4 | type = "A" 5 | 6 | alias { 7 | name = "${data.aws_lb.public.dns_name}" 8 | zone_id = "${data.aws_lb.public.zone_id}" 9 | evaluate_target_health = false 10 | } 11 | } 12 | 13 | resource "aws_route53_record" "public" { 14 | zone_id = "${data.aws_route53_zone.public.zone_id}" 15 | name = "*.${var.platform_domain}" 16 | type = "A" 17 | 18 | alias { 19 | name = "${data.aws_lb.public.dns_name}" 20 | zone_id = "${data.aws_lb.public.zone_id}" 21 | evaluate_target_health = false 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /modules/domain/public_zone.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "public" { 2 | name = "${var.platform_domain}." 3 | } 4 | -------------------------------------------------------------------------------- /modules/domain/variables.tf: -------------------------------------------------------------------------------- 1 | variable "platform_name" {} 2 | 3 | variable "platform_domain" {} 4 | 5 | variable "platform_domain_administrator_email" {} 6 | 7 | variable "public_lb_arn" {} 8 | -------------------------------------------------------------------------------- /modules/infra/ami.tf: -------------------------------------------------------------------------------- 1 | 2 | locals { 3 | base_image_owners = "${var.use_community ? "679593333241" : "309956199498"}" 4 | base_image_name = "${var.use_community ? "CentOS Linux 7 x86_64 HVM EBS *" : "RHEL-7.5_HVM_GA-????????-x86_64-*-Access2-*"}" 5 | base_image_id = "${data.aws_ami.base_image.image_id}" 6 | base_image_root_device_name = "${data.aws_ami.base_image.root_device_name}" 7 | } 8 | 9 | data "aws_ami" "base_image" { 10 | most_recent = true 11 | 12 | owners = ["${local.base_image_owners}"] 13 | 14 | filter { 15 | name = "architecture" 16 | values = ["x86_64"] 17 | } 18 | 19 | filter { 20 | name = "root-device-type" 21 | values = ["ebs"] 22 | } 23 | 24 | filter { 25 | name = "name" 26 | values = ["${local.base_image_name}"] 27 | } 28 | 29 | filter { 30 | name = "virtualization-type" 31 | values = ["hvm"] 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /modules/infra/bastion.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | bastion_ssh_user = "${(var.use_community) ? "centos" : "ec2-user"}" 3 | } 4 | 5 | resource "aws_launch_template" "bastion" { 6 | name_prefix = "${var.platform_name}-bastion-" 7 | 8 | block_device_mappings { 9 | device_name = "${local.base_image_root_device_name}" 10 | 11 | ebs { 12 | volume_size = 32 13 | } 14 | } 15 | 16 | image_id = "${local.base_image_id}" 17 | 18 | instance_market_options = "${local.spot_type[var.use_spot ? "enabled" : "disabled"]}" 19 | 20 | instance_type = "m4.large" 21 | 22 | iam_instance_profile { 23 | arn = "${aws_iam_instance_profile.bastion.arn}" 24 | } 25 | 26 | key_name = "${aws_key_pair.platform.id}" 27 | 28 | tag_specifications { 29 | resource_type = "instance" 30 | 31 | tags = "${map( 32 | "kubernetes.io/cluster/${var.platform_name}", "owned", 33 | "Name", "${var.platform_name}-bastion", 34 | "Role", "bastion" 35 | )}" 36 | } 37 | 38 | user_data = "${base64encode(data.template_file.bastion_init.rendered)}" 39 | 40 | vpc_security_group_ids = ["${aws_security_group.bastion.id}"] 41 | } 42 | 43 | resource "aws_autoscaling_group" "bastion" { 44 | name = "${var.platform_name}-bastion" 45 | vpc_zone_identifier = ["${var.public_subnet_ids}"] 46 | desired_capacity = 1 47 | max_size = 1 48 | min_size = 1 49 | 50 | launch_template = { 51 | id = "${aws_launch_template.bastion.id}" 52 | version = "$$Latest" 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /modules/infra/bastion_eip.tf: -------------------------------------------------------------------------------- 1 | resource "aws_eip" "bastion" { 2 | vpc = true 3 | 4 | tags = "${map( 5 | "kubernetes.io/cluster/${var.platform_name}", "owned", 6 | "Name", "${var.platform_name}-bastion", 7 | "Role", "bastion" 8 | )}" 9 | } 10 | -------------------------------------------------------------------------------- /modules/infra/bastion_role.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "bastion" { 2 | statement { 3 | actions = [ 4 | "ssm:DescribeAssociation", 5 | "ssm:GetDeployablePatchSnapshotForInstance", 6 | "ssm:GetDocument", 7 | "ssm:GetManifest", 8 | "ssm:GetParameters", 9 | "ssm:ListAssociations", 10 | "ssm:ListInstanceAssociations", 11 | "ssm:PutInventory", 12 | "ssm:PutComplianceItems", 13 | "ssm:PutConfigurePackageResult", 14 | "ssm:UpdateAssociationStatus", 15 | "ssm:UpdateInstanceAssociationStatus", 16 | "ssm:UpdateInstanceInformation", 17 | ] 18 | 19 | effect = "Allow" 20 | resources = ["*"] 21 | } 22 | 23 | statement { 24 | actions = [ 25 | "ec2messages:AcknowledgeMessage", 26 | "ec2messages:DeleteMessage", 27 | "ec2messages:FailMessage", 28 | "ec2messages:GetEndpoint", 29 | "ec2messages:GetMessages", 30 | "ec2messages:SendReply", 31 | ] 32 | 33 | effect = "Allow" 34 | resources = ["*"] 35 | } 36 | 37 | statement { 38 | actions = [ 39 | "cloudwatch:PutMetricData", 40 | ] 41 | 42 | effect = "Allow" 43 | resources = ["*"] 44 | } 45 | 46 | statement { 47 | actions = [ 48 | "ec2:Describe*", 49 | ] 50 | 51 | effect = "Allow" 52 | resources = ["*"] 53 | } 54 | 55 | statement { 56 | actions = [ 57 | "ec2:AllocateAddress", 58 | "ec2:AssociateAddress", 59 | ] 60 | 61 | effect = "Allow" 62 | resources = ["*"] 63 | } 64 | 65 | statement { 66 | actions = [ 67 | "ec2:AttachVolume", 68 | "ec2:AuthorizeSecurityGroupIngress", 69 | "ec2:CopyImage", 70 | "ec2:CreateImage", 71 | "ec2:CreateKeypair", 72 | "ec2:CreateSecurityGroup", 73 | "ec2:CreateSnapshot", 74 | "ec2:CreateTags", 75 | "ec2:CreateVolume", 76 | "ec2:DeleteKeyPair", 77 | "ec2:DeleteSecurityGroup", 78 | "ec2:DeleteSnapshot", 79 | "ec2:DeleteVolume", 80 | "ec2:DeregisterImage", 81 | "ec2:DescribeImageAttribute", 82 | "ec2:DescribeImages", 83 | "ec2:DescribeInstances", 84 | "ec2:DescribeRegions", 85 | "ec2:DescribeSecurityGroups", 86 | "ec2:DescribeSnapshots", 87 | "ec2:DescribeSubnets", 88 | "ec2:DescribeTags", 89 | "ec2:DescribeVolumes", 90 | "ec2:DetachVolume", 91 | "ec2:GetPasswordData", 92 | "ec2:ModifyImageAttribute", 93 | "ec2:ModifyInstanceAttribute", 94 | "ec2:ModifySnapshotAttribute", 95 | "ec2:RegisterImage", 96 | "ec2:RunInstances", 97 | "ec2:StopInstances", 98 | "ec2:TerminateInstances", 99 | ] 100 | 101 | effect = "Allow" 102 | resources = ["*"] 103 | } 104 | 105 | statement { 106 | actions = [ 107 | "ds:CreateComputer", 108 | "ds:DescribeDirectories", 109 | ] 110 | 111 | effect = "Allow" 112 | resources = ["*"] 113 | } 114 | 115 | statement { 116 | actions = [ 117 | "logs:CreateLogGroup", 118 | "logs:CreateLogStream", 119 | "logs:DescribeLogGroups", 120 | "logs:DescribeLogStreams", 121 | "logs:PutLogEvents", 122 | ] 123 | 124 | effect = "Allow" 125 | resources = ["*"] 126 | } 127 | 128 | statement { 129 | actions = [ 130 | "s3:PutObject", 131 | "s3:GetObject", 132 | "s3:AbortMultipartUpload", 133 | "s3:ListMultipartUploadParts", 134 | "s3:ListBucket", 135 | "s3:ListBucketMultipartUploads", 136 | ] 137 | 138 | effect = "Allow" 139 | resources = ["*"] 140 | } 141 | } 142 | 143 | resource "aws_iam_role" "bastion" { 144 | name = "${var.platform_name}-bastion-role" 145 | assume_role_policy = "${data.aws_iam_policy_document.ec2.json}" 146 | } 147 | 148 | resource "aws_iam_role_policy" "bastion" { 149 | name = "${var.platform_name}-bastion-policy" 150 | role = "${aws_iam_role.bastion.id}" 151 | policy = "${data.aws_iam_policy_document.bastion.json}" 152 | } 153 | 154 | resource "aws_iam_instance_profile" "bastion" { 155 | name = "${var.platform_name}-bastion-profile" 156 | role = "${aws_iam_role.bastion.name}" 157 | } 158 | -------------------------------------------------------------------------------- /modules/infra/bastion_sg.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "bastion" { 2 | name = "${var.platform_name}-bastion" 3 | description = "Bastion group for ${var.platform_name}" 4 | 5 | ingress { 6 | from_port = 22 7 | to_port = 22 8 | protocol = "tcp" 9 | cidr_blocks = ["${var.operator_cidrs}"] 10 | } 11 | 12 | egress { 13 | from_port = 0 14 | to_port = 0 15 | protocol = "-1" 16 | cidr_blocks = ["0.0.0.0/0"] 17 | } 18 | 19 | tags = "${map( 20 | "kubernetes.io/cluster/${var.platform_name}", "owned", 21 | "Name", "${var.platform_name}-bastion", 22 | "Role", "bastion" 23 | )}" 24 | 25 | vpc_id = "${data.aws_vpc.platform.id}" 26 | } 27 | -------------------------------------------------------------------------------- /modules/infra/bastion_userdata.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "bastion_init" { 2 | template = "${file("${path.module}/resources/bastion-init.yml")}" 3 | 4 | vars { 5 | platform_name = "${var.platform_name}" 6 | bastion_ssh_user = "${local.bastion_ssh_user}" 7 | platform_id_rsa = "${base64encode(data.tls_public_key.platform.private_key_pem)}" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /modules/infra/compute_node.tf: -------------------------------------------------------------------------------- 1 | resource "aws_launch_template" "compute_node" { 2 | name_prefix = "${var.platform_name}-compute-node-" 3 | 4 | block_device_mappings { 5 | device_name = "${local.base_image_root_device_name}" 6 | 7 | ebs { 8 | volume_size = 100 9 | } 10 | } 11 | 12 | image_id = "${local.base_image_id}" 13 | 14 | instance_market_options = "${local.spot_type[var.use_spot ? "enabled" : "disabled"]}" 15 | 16 | instance_type = "${var.compute_node_instance_type}" 17 | 18 | iam_instance_profile { 19 | arn = "${aws_iam_instance_profile.compute_node.arn}" 20 | } 21 | 22 | key_name = "${aws_key_pair.platform.id}" 23 | 24 | tag_specifications { 25 | resource_type = "instance" 26 | 27 | tags = "${map( 28 | "kubernetes.io/cluster/${var.platform_name}", "owned", 29 | "Name", "${var.platform_name}-compute_node", 30 | "Role", "node", 31 | "openshift_node_group_name", "node-config-compute" 32 | )}" 33 | } 34 | 35 | vpc_security_group_ids = ["${aws_security_group.node.id}"] 36 | } 37 | 38 | resource "aws_autoscaling_group" "compute_node" { 39 | name = "${var.platform_name}-compute" 40 | vpc_zone_identifier = ["${var.private_subnet_ids}"] 41 | desired_capacity = "${var.compute_node_count}" 42 | max_size = "${var.compute_node_count}" 43 | min_size = "${var.compute_node_count}" 44 | 45 | # TODO workaround 46 | launch_template = { 47 | id = "${aws_launch_template.compute_node.id}" 48 | version = "$$Latest" 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /modules/infra/compute_node_role.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "compute_node" { 2 | statement { 3 | actions = [ 4 | "ec2:*", 5 | "ec2:AttachVolume", 6 | "ssm:GetDocument", 7 | "ec2:DetachVolume", 8 | "elasticloadbalancing:*", 9 | ] 10 | 11 | effect = "Allow" 12 | resources = ["*"] 13 | } 14 | } 15 | 16 | resource "aws_iam_role" "compute_node" { 17 | name = "${var.platform_name}-compute-node-role" 18 | assume_role_policy = "${data.aws_iam_policy_document.ec2.json}" 19 | } 20 | 21 | resource "aws_iam_role_policy" "compute_node" { 22 | name = "${var.platform_name}-compute-node-policy" 23 | role = "${aws_iam_role.compute_node.id}" 24 | policy = "${data.aws_iam_policy_document.compute_node.json}" 25 | } 26 | 27 | resource "aws_iam_instance_profile" "compute_node" { 28 | name = "${var.platform_name}-compute-profile" 29 | role = "${aws_iam_role.compute_node.name}" 30 | } 31 | -------------------------------------------------------------------------------- /modules/infra/ec2_role.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "ec2" { 2 | statement { 3 | effect = "Allow" 4 | 5 | actions = ["sts:AssumeRole"] 6 | 7 | principals { 8 | type = "Service" 9 | identifiers = ["ec2.amazonaws.com"] 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /modules/infra/instance_market_options.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | spot_type = { 3 | enabled = [{ 4 | market_type = "spot" 5 | }] 6 | 7 | disabled = [] 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /modules/infra/key_pair.tf: -------------------------------------------------------------------------------- 1 | resource "tls_private_key" "platform" { 2 | algorithm = "RSA" 3 | rsa_bits = "4096" 4 | } 5 | 6 | data "tls_public_key" "platform" { 7 | private_key_pem = "${tls_private_key.platform.private_key_pem}" 8 | } 9 | 10 | resource "aws_key_pair" "platform" { 11 | key_name = "${var.platform_name}" 12 | public_key = "${data.tls_public_key.platform.public_key_openssh}" 13 | } 14 | -------------------------------------------------------------------------------- /modules/infra/master.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | master_security_groups = ["${aws_security_group.node.id}", "${aws_security_group.master_public.id}"] 3 | master_infra_security_groups = ["${aws_security_group.node.id}", "${aws_security_group.master_public.id}", "${aws_security_group.public.id}"] 4 | } 5 | 6 | resource "aws_launch_template" "master" { 7 | name_prefix = "${var.platform_name}-master-" 8 | 9 | block_device_mappings { 10 | device_name = "${local.base_image_root_device_name}" 11 | 12 | ebs { 13 | volume_size = 100 14 | } 15 | } 16 | 17 | image_id = "${local.base_image_id}" 18 | 19 | instance_market_options = "${local.spot_type[var.use_spot ? "enabled" : "disabled"]}" 20 | 21 | instance_type = "${var.master_instance_type}" 22 | 23 | iam_instance_profile { 24 | arn = "${aws_iam_instance_profile.master.arn}" 25 | } 26 | 27 | key_name = "${aws_key_pair.platform.id}" 28 | 29 | tag_specifications { 30 | resource_type = "instance" 31 | 32 | tags = "${map( 33 | "kubernetes.io/cluster/${var.platform_name}", "owned", 34 | "Name", "${var.platform_name}-master", 35 | "Role", "master,node", 36 | "openshift_node_group_name", "${var.infra_node_count > 0 ? "node-config-master" : "node-config-master-infra"}" 37 | )}" 38 | } 39 | 40 | user_data = "${base64encode(data.template_file.master.rendered)}" 41 | 42 | vpc_security_group_ids = ["${split(",", var.infra_node_count > 0 ? join(",", local.master_security_groups) : join(",", local.master_infra_security_groups))}"] 43 | } 44 | 45 | locals { 46 | master_target_groups = ["${aws_lb_target_group.master_public.arn}"] 47 | master_infra_target_groups = ["${aws_lb_target_group.master_public.arn}", "${aws_lb_target_group.http.arn}", "${aws_lb_target_group.https.arn}"] 48 | } 49 | 50 | resource "aws_autoscaling_group" "master" { 51 | name = "${var.platform_name}-master" 52 | vpc_zone_identifier = ["${var.private_subnet_ids}"] 53 | desired_capacity = "${var.master_count}" 54 | max_size = "${var.master_count}" 55 | min_size = "${var.master_count}" 56 | 57 | # TODO workaround 58 | target_group_arns = ["${split(",", var.infra_node_count > 0 ? join(",", local.master_target_groups) : join(",", local.master_infra_target_groups))}"] 59 | load_balancers = ["${aws_elb.master.name}"] 60 | 61 | launch_template = { 62 | id = "${aws_launch_template.master.id}" 63 | version = "$$Latest" 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /modules/infra/master_domain.tf: -------------------------------------------------------------------------------- 1 | resource "aws_route53_zone" "private" { 2 | name = "${var.platform_name}.internal" 3 | 4 | vpc { 5 | vpc_id = "${data.aws_vpc.platform.id}" 6 | } 7 | 8 | tags = "${map( 9 | "kubernetes.io/cluster/${var.platform_name}", "owned" 10 | )}" 11 | } 12 | 13 | resource "aws_route53_record" "master" { 14 | zone_id = "${aws_route53_zone.private.zone_id}" 15 | name = "master.${var.platform_name}.internal" 16 | type = "A" 17 | 18 | alias { 19 | name = "${aws_elb.master.dns_name}" 20 | zone_id = "${aws_elb.master.zone_id}" 21 | evaluate_target_health = false 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /modules/infra/master_lb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_elb" "master" { 2 | name = "${var.platform_name}-master" 3 | subnets = ["${var.private_subnet_ids}"] 4 | internal = true 5 | 6 | security_groups = ["${aws_security_group.node.id}"] 7 | 8 | listener { 9 | instance_port = 8443 10 | instance_protocol = "tcp" 11 | lb_port = 8443 12 | lb_protocol = "tcp" 13 | } 14 | 15 | health_check { 16 | healthy_threshold = 2 17 | unhealthy_threshold = 2 18 | timeout = 3 19 | target = "TCP:8443" 20 | interval = 30 21 | } 22 | 23 | cross_zone_load_balancing = true 24 | idle_timeout = 180 25 | connection_draining = true 26 | connection_draining_timeout = 180 27 | 28 | tags = "${map( 29 | "kubernetes.io/cluster/${var.platform_name}", "owned", 30 | "Name", "${var.platform_name}-master")}" 31 | } 32 | -------------------------------------------------------------------------------- /modules/infra/master_listener.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb_target_group" "master_public" { 2 | name = "${var.platform_name}-master-public" 3 | port = 8443 4 | protocol = "TCP" 5 | vpc_id = "${data.aws_vpc.platform.id}" 6 | deregistration_delay = 180 7 | 8 | health_check { 9 | interval = 30 10 | port = "traffic-port" 11 | protocol = "TCP" 12 | healthy_threshold = 3 13 | unhealthy_threshold = 3 14 | } 15 | } 16 | 17 | resource "aws_lb_listener" "master_public" { 18 | load_balancer_arn = "${aws_lb.public.arn}" 19 | port = 8443 20 | protocol = "TCP" 21 | 22 | default_action { 23 | target_group_arn = "${aws_lb_target_group.master_public.arn}" 24 | type = "forward" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /modules/infra/master_role.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "master" { 2 | statement { 3 | actions = [ 4 | "ec2:*", 5 | "ec2:AttachVolume", 6 | "ssm:GetDocument", 7 | "ec2:DetachVolume", 8 | "elasticloadbalancing:*", 9 | 10 | # letsencrypt 11 | "route53:*", 12 | 13 | "route53domains:*", 14 | "cloudfront:ListDistributions", 15 | "elasticloadbalancing:DescribeLoadBalancers", 16 | "elasticbeanstalk:DescribeEnvironments", 17 | "s3:ListBucket", 18 | "s3:GetBucketLocation", 19 | "s3:GetBucketWebsite", 20 | "ec2:DescribeVpcs", 21 | "ec2:DescribeRegions", 22 | "sns:ListTopics", 23 | "sns:ListSubscriptionsByTopic", 24 | "cloudwatch:DescribeAlarms", 25 | "cloudwatch:GetMetricStatistics", 26 | ] 27 | 28 | effect = "Allow" 29 | resources = ["*"] 30 | } 31 | } 32 | 33 | resource "aws_iam_role" "master" { 34 | name = "${var.platform_name}-master-role" 35 | assume_role_policy = "${data.aws_iam_policy_document.ec2.json}" 36 | } 37 | 38 | resource "aws_iam_role_policy" "master" { 39 | name = "${var.platform_name}-master-policy" 40 | role = "${aws_iam_role.master.id}" 41 | policy = "${data.aws_iam_policy_document.master.json}" 42 | } 43 | 44 | resource "aws_iam_instance_profile" "master" { 45 | name = "${var.platform_name}-master-profile" 46 | role = "${aws_iam_role.master.name}" 47 | } 48 | -------------------------------------------------------------------------------- /modules/infra/master_sg.tf: -------------------------------------------------------------------------------- 1 | # FOR Master Public LB 2 | resource "aws_security_group" "master_public" { 3 | name = "${var.platform_name}-master-public" 4 | description = "Master public group for ${var.platform_name}" 5 | 6 | ingress { 7 | from_port = 8443 8 | to_port = 8443 9 | protocol = "tcp" 10 | cidr_blocks = ["${var.operator_cidrs}"] 11 | } 12 | 13 | egress { 14 | from_port = 0 15 | to_port = 0 16 | protocol = "-1" 17 | cidr_blocks = ["0.0.0.0/0"] 18 | ipv6_cidr_blocks = ["::/0"] 19 | } 20 | 21 | tags = "${map( 22 | "kubernetes.io/cluster/${var.platform_name}", "owned", 23 | "Name", "${var.platform_name}-master-public" 24 | )}" 25 | 26 | vpc_id = "${data.aws_vpc.platform.id}" 27 | } 28 | -------------------------------------------------------------------------------- /modules/infra/master_userdata.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "master" { 2 | template = "${file("${path.module}/resources/master-init.yml")}" 3 | 4 | vars { 5 | platform_name = "${var.platform_name}" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /modules/infra/node_sg.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "node" { 2 | name = "${var.platform_name}-node" 3 | description = "Cluster node group for ${var.platform_name}" 4 | 5 | ingress { 6 | from_port = 0 7 | to_port = 0 8 | protocol = "-1" 9 | security_groups = ["${aws_security_group.bastion.id}"] 10 | } 11 | 12 | ingress { 13 | from_port = 0 14 | to_port = 0 15 | protocol = "-1" 16 | self = true 17 | } 18 | 19 | egress { 20 | from_port = 0 21 | to_port = 0 22 | protocol = "-1" 23 | cidr_blocks = ["0.0.0.0/0"] 24 | ipv6_cidr_blocks = ["::/0"] 25 | } 26 | 27 | tags = "${map( 28 | "kubernetes.io/cluster/${var.platform_name}", "owned", 29 | "Name", "${var.platform_name}-node", 30 | "Role", "node", 31 | )}" 32 | 33 | vpc_id = "${data.aws_vpc.platform.id}" 34 | } 35 | -------------------------------------------------------------------------------- /modules/infra/output.tf: -------------------------------------------------------------------------------- 1 | output "bastion_ssh_user" { 2 | value = "${local.bastion_ssh_user}" 3 | } 4 | 5 | output "bastion_endpoint" { 6 | value = "${aws_eip.bastion.public_ip}" 7 | } 8 | 9 | output "platform_private_key" { 10 | sensitive = true 11 | value = "${data.tls_public_key.platform.private_key_pem}" 12 | } 13 | 14 | output "public_lb_arn" { 15 | value = "${aws_lb.public.arn}" 16 | } 17 | 18 | output "master_domain" { 19 | value = "${aws_route53_record.master.name}" 20 | } 21 | 22 | output "platform_public_ip_set" { 23 | value = "${data.dns_a_record_set.platform_public_ip_set.addrs}" 24 | } 25 | -------------------------------------------------------------------------------- /modules/infra/public_lb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb" "public" { 2 | name = "${var.platform_name}-public" 3 | internal = false 4 | load_balancer_type = "network" 5 | subnets = ["${var.public_subnet_ids}"] 6 | enable_cross_zone_load_balancing = true 7 | 8 | tags = "${map( 9 | "kubernetes.io/cluster/${var.platform_name}", "owned", 10 | "Name", "${var.platform_name}-public" 11 | )}" 12 | } 13 | 14 | data "dns_a_record_set" "platform_public_ip_set" { 15 | host = "${aws_lb.public.dns_name}" 16 | } 17 | -------------------------------------------------------------------------------- /modules/infra/public_listener.tf: -------------------------------------------------------------------------------- 1 | # http 2 | resource "aws_lb_target_group" "http" { 3 | name = "${var.platform_name}-http" 4 | port = 80 5 | protocol = "TCP" 6 | vpc_id = "${data.aws_vpc.platform.id}" 7 | deregistration_delay = 180 8 | 9 | health_check { 10 | interval = 30 11 | port = "traffic-port" 12 | protocol = "TCP" 13 | healthy_threshold = 3 14 | unhealthy_threshold = 3 15 | } 16 | } 17 | 18 | resource "aws_lb_listener" "http" { 19 | load_balancer_arn = "${aws_lb.public.arn}" 20 | port = 80 21 | protocol = "TCP" 22 | 23 | default_action { 24 | target_group_arn = "${aws_lb_target_group.http.arn}" 25 | type = "forward" 26 | } 27 | } 28 | 29 | # https 30 | resource "aws_lb_target_group" "https" { 31 | name = "${var.platform_name}-https" 32 | port = 443 33 | protocol = "TCP" 34 | vpc_id = "${data.aws_vpc.platform.id}" 35 | deregistration_delay = 180 36 | 37 | health_check { 38 | interval = 30 39 | port = "traffic-port" 40 | protocol = "TCP" 41 | healthy_threshold = 3 42 | unhealthy_threshold = 3 43 | } 44 | } 45 | 46 | resource "aws_lb_listener" "https" { 47 | load_balancer_arn = "${aws_lb.public.arn}" 48 | port = 443 49 | protocol = "TCP" 50 | 51 | default_action { 52 | target_group_arn = "${aws_lb_target_group.https.arn}" 53 | type = "forward" 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /modules/infra/public_sg.tf: -------------------------------------------------------------------------------- 1 | # FOR Public LB 2 | resource "aws_security_group" "public" { 3 | name = "${var.platform_name}-public" 4 | description = "Public group for ${var.platform_name}" 5 | 6 | ingress { 7 | from_port = 80 8 | to_port = 80 9 | protocol = "tcp" 10 | cidr_blocks = ["${var.public_cidrs}"] 11 | } 12 | 13 | ingress { 14 | from_port = 443 15 | to_port = 443 16 | protocol = "tcp" 17 | cidr_blocks = ["${var.public_cidrs}"] 18 | } 19 | 20 | egress { 21 | from_port = 0 22 | to_port = 0 23 | protocol = "-1" 24 | cidr_blocks = ["0.0.0.0/0"] 25 | ipv6_cidr_blocks = ["::/0"] 26 | } 27 | 28 | tags = "${map( 29 | "kubernetes.io/cluster/${var.platform_name}", "owned", 30 | "Name", "${var.platform_name}-public" 31 | )}" 32 | 33 | vpc_id = "${data.aws_vpc.platform.id}" 34 | } 35 | -------------------------------------------------------------------------------- /modules/infra/resources/bastion-init.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | output: { all: "| tee -a /var/log/cloud-init-output.log" } 3 | write_files: 4 | - content: ${platform_id_rsa} 5 | encoding: base64 6 | path: /root/platform_id_rsa 7 | permissions: '0600' 8 | - content: | 9 | #!/bin/bash -ex 10 | REGION=`curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq .region | tr -d '"'` 11 | export AWS_DEFAULT_REGION=$${REGION} 12 | INSTANCE_ID=`curl http://169.254.169.254/latest/meta-data/instance-id` 13 | for ALLOC_ID in `aws ec2 describe-addresses --filter "Name=domain,Values=vpc" "Name=tag:Role,Values=bastion" "Name=tag:kubernetes.io/cluster/${platform_name},Values=owned" | /usr/local/bin/jq '.Addresses[].AllocationId' | cut -d '"' -f 2` 14 | do 15 | aws ec2 associate-address --instance-id $${INSTANCE_ID} --allocation-id $${ALLOC_ID} --no-allow-reassociation 16 | STATUS=$$? 17 | if [ 0 = $${STATUS} ] ; then 18 | exit 0 19 | fi 20 | done 21 | path: /root/arrocate-eip.sh 22 | permissions: '0700' 23 | - content: | 24 | #!/bin/bash -ex 25 | while [ ! -f /root/.provisioning-finished ] 26 | do 27 | echo -n "#" 28 | sleep 1 29 | done 30 | path: /root/ensure-provisioned.sh 31 | permissions: '0777' 32 | 33 | runcmd: 34 | - curl -o /usr/local/bin/jq -L https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 && chmod +x /usr/local/bin/jq 35 | - curl -kL https://bootstrap.pypa.io/get-pip.py | python 36 | - pip install awscli --upgrade 37 | - pip install boto3 --upgrade 38 | - pip install boto --upgrade 39 | - sh /root/arrocate-eip.sh 40 | - mkdir -p /home/${bastion_ssh_user}/.ssh 41 | - cp /root/platform_id_rsa /home/${bastion_ssh_user}/.ssh/id_rsa 42 | - chown ${bastion_ssh_user}:${bastion_ssh_user} /home/${bastion_ssh_user}/.ssh/id_rsa 43 | - touch /root/.provisioning-finished && chmod 644 /root/.provisioning-finished 44 | -------------------------------------------------------------------------------- /modules/infra/resources/master-init.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | output: { all: "| tee -a /var/log/cloud-init-output.log" } 3 | runcmd: 4 | - curl -o /usr/local/bin/jq -L https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 && chmod +x /usr/local/bin/jq 5 | - curl -kL https://bootstrap.pypa.io/get-pip.py | python 6 | - pip install awscli --upgrade 7 | - touch /root/.provisioning-finished && chmod 644 /root/.provisioning-finished 8 | -------------------------------------------------------------------------------- /modules/infra/variables.tf: -------------------------------------------------------------------------------- 1 | variable "platform_name" {} 2 | 3 | variable "platform_vpc_id" {} 4 | 5 | variable "public_subnet_ids" { 6 | type = "list" 7 | default = [] 8 | } 9 | 10 | variable "private_subnet_ids" { 11 | type = "list" 12 | default = [] 13 | } 14 | 15 | variable "operator_cidrs" { 16 | type = "list" 17 | default = ["0.0.0.0/0"] 18 | } 19 | 20 | variable "public_cidrs" { 21 | type = "list" 22 | default = ["0.0.0.0/0"] 23 | } 24 | 25 | variable "use_spot" { 26 | default = false 27 | } 28 | 29 | variable "use_community" { 30 | default = false 31 | } 32 | 33 | variable "master_count" { 34 | default = 1 35 | } 36 | 37 | variable "infra_node_count" { 38 | default = 0 39 | } 40 | 41 | variable "compute_node_count" { 42 | default = 3 43 | } 44 | 45 | variable "master_instance_type" { 46 | default = "m4.xlarge" 47 | } 48 | 49 | variable "infra_node_instance_type" { 50 | default = "m4.large" 51 | } 52 | 53 | variable "compute_node_instance_type" { 54 | default = "m4.large" 55 | } 56 | -------------------------------------------------------------------------------- /modules/infra/vpc.tf: -------------------------------------------------------------------------------- 1 | data "aws_vpc" "platform" { 2 | id = "${var.platform_vpc_id}" 3 | } 4 | -------------------------------------------------------------------------------- /modules/network/README.md: -------------------------------------------------------------------------------- 1 | # Terraform OpenShift Module / Network 2 | 3 | Builds network for OpenShift platform. 4 | 5 | * VPC 6 | * Subnets 7 | * Route Tables 8 | * Gateways -------------------------------------------------------------------------------- /modules/network/az.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" { 2 | state = "available" 3 | } 4 | 5 | locals { 6 | availability_zones = ["${split(",", length(var.availability_zones) > 0 ? join(",", var.availability_zones) : join(",", data.aws_availability_zones.available.names))}"] 7 | } 8 | -------------------------------------------------------------------------------- /modules/network/output.tf: -------------------------------------------------------------------------------- 1 | output "platform_vpc_id" { 2 | value = "${aws_vpc.platform.id}" 3 | } 4 | 5 | output "public_subnet_ids" { 6 | value = ["${aws_subnet.public.*.id}"] 7 | } 8 | 9 | output "private_subnet_ids" { 10 | value = ["${aws_subnet.private.*.id}"] 11 | } 12 | -------------------------------------------------------------------------------- /modules/network/private_network.tf: -------------------------------------------------------------------------------- 1 | # Private subnet: for instances / internal lb 2 | 3 | # For Outbound access 4 | locals { 5 | private_subnet_count = "${length(local.availability_zones)}" 6 | } 7 | 8 | resource "aws_subnet" "private" { 9 | count = "${local.private_subnet_count}" 10 | vpc_id = "${aws_vpc.platform.id}" 11 | availability_zone = "${element(local.availability_zones, count.index)}" 12 | cidr_block = "${cidrsubnet(aws_vpc.platform.cidr_block, ceil(log(local.private_subnet_count + local.public_subnet_count, 2)), count.index)}" 13 | 14 | map_public_ip_on_launch = true 15 | 16 | tags = "${map( 17 | "kubernetes.io/cluster/${var.platform_name}", "owned", 18 | "Name", "${var.platform_name}-private-${count.index}" 19 | )}" 20 | } 21 | 22 | resource "aws_route_table" "private" { 23 | vpc_id = "${aws_vpc.platform.id}" 24 | 25 | tags = "${map( 26 | "kubernetes.io/cluster/${var.platform_name}", "owned", 27 | "Name", "${var.platform_name}-private-rt" 28 | )}" 29 | } 30 | 31 | # Adds Egress Route to RouteTable 32 | 33 | resource "aws_route" "private_internet" { 34 | route_table_id = "${aws_route_table.private.id}" 35 | destination_cidr_block = "0.0.0.0/0" 36 | gateway_id = "${aws_internet_gateway.public_gw.id}" 37 | depends_on = ["aws_route_table.public"] 38 | } 39 | 40 | # RouteTable to Subnet 41 | resource "aws_route_table_association" "private" { 42 | count = "${local.private_subnet_count}" 43 | subnet_id = "${element(aws_subnet.private.*.id, count.index)}" 44 | route_table_id = "${aws_route_table.private.id}" 45 | } 46 | -------------------------------------------------------------------------------- /modules/network/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | } 3 | -------------------------------------------------------------------------------- /modules/network/public_network.tf: -------------------------------------------------------------------------------- 1 | # Public subnet: for router LB 2 | 3 | locals { 4 | public_subnet_count = "${length(local.availability_zones)}" 5 | } 6 | 7 | resource "aws_subnet" "public" { 8 | count = "${local.public_subnet_count}" 9 | availability_zone = "${element(local.availability_zones, count.index)}" 10 | vpc_id = "${aws_vpc.platform.id}" 11 | cidr_block = "${cidrsubnet(aws_vpc.platform.cidr_block, ceil(log(local.private_subnet_count + local.public_subnet_count, 2)), local.private_subnet_count + count.index)}" 12 | map_public_ip_on_launch = true 13 | 14 | tags = "${map( 15 | "kubernetes.io/cluster/${var.platform_name}", "owned", 16 | "Name", "${var.platform_name}-public-${count.index}" 17 | )}" 18 | } 19 | 20 | # Public access to the router 21 | resource "aws_internet_gateway" "public_gw" { 22 | vpc_id = "${aws_vpc.platform.id}" 23 | 24 | tags = "${map( 25 | "kubernetes.io/cluster/${var.platform_name}", "owned", 26 | "Name", "${var.platform_name}-public-gw" 27 | )}" 28 | } 29 | 30 | # Public route table: attach Internet gw for internet access. 31 | 32 | resource "aws_route_table" "public" { 33 | vpc_id = "${aws_vpc.platform.id}" 34 | 35 | tags = "${map( 36 | "kubernetes.io/cluster/${var.platform_name}", "owned", 37 | "Name", "${var.platform_name}-public-rt" 38 | )}" 39 | } 40 | 41 | resource "aws_route" "public_internet" { 42 | route_table_id = "${aws_route_table.public.id}" 43 | destination_cidr_block = "0.0.0.0/0" 44 | gateway_id = "${aws_internet_gateway.public_gw.id}" 45 | depends_on = ["aws_route_table.public"] 46 | } 47 | 48 | resource "aws_route_table_association" "public" { 49 | count = "${local.public_subnet_count}" 50 | subnet_id = "${element(aws_subnet.public.*.id, count.index)}" 51 | route_table_id = "${aws_route_table.public.id}" 52 | } 53 | -------------------------------------------------------------------------------- /modules/network/variables.tf: -------------------------------------------------------------------------------- 1 | variable "platform_name" {} 2 | 3 | variable "platform_cidr" { 4 | default = "10.0.0.0/16" 5 | } 6 | 7 | variable "availability_zones" { 8 | default = [] 9 | } 10 | -------------------------------------------------------------------------------- /modules/network/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "platform" { 2 | cidr_block = "${var.platform_cidr}" 3 | enable_dns_hostnames = true 4 | assign_generated_ipv6_cidr_block = true 5 | 6 | tags = "${map( 7 | "kubernetes.io/cluster/${var.platform_name}", "owned", 8 | "Name", "${var.platform_name}" 9 | )}" 10 | } 11 | -------------------------------------------------------------------------------- /modules/openshift/bastion_config.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "bastion_config_playbook" { 2 | template = "${file("${path.module}/resources/bastion-config-playbook.yaml")}" 3 | 4 | vars { 5 | openshift_major_version = "${var.openshift_major_version}" 6 | } 7 | } 8 | 9 | data "template_file" "bastion_config" { 10 | template = "${file("${path.module}/resources/bastion-config.sh")}" 11 | } 12 | 13 | resource "null_resource" "bastion_config" { 14 | provisioner "file" { 15 | content = "${data.template_file.bastion_config_playbook.rendered}" 16 | destination = "~/bastion-config-playbook.yaml" 17 | } 18 | 19 | provisioner "file" { 20 | content = "${data.template_file.bastion_config.rendered}" 21 | destination = "~/bastion-config.sh" 22 | } 23 | 24 | provisioner "remote-exec" { 25 | inline = [ 26 | "chmod +x ~/bastion-config.sh", 27 | "sh ~/bastion-config.sh", 28 | ] 29 | } 30 | 31 | connection { 32 | type = "ssh" 33 | user = "${var.bastion_ssh_user}" 34 | private_key = "${var.platform_private_key}" 35 | host = "${var.bastion_endpoint}" 36 | } 37 | 38 | triggers { 39 | playbook = "${data.template_file.bastion_config_playbook.rendered}" 40 | } 41 | 42 | depends_on = ["null_resource.bastion_repos"] 43 | } 44 | -------------------------------------------------------------------------------- /modules/openshift/bastion_repos.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "bastion_repos" { 2 | template = "${file("${path.module}/resources/bastion-repos.sh")}" 3 | 4 | vars { 5 | platform_name = "${var.platform_name}" 6 | rhn_username = "${var.rhn_username}" 7 | rhn_password = "${var.rhn_password}" 8 | rh_subscription_pool_id = "${var.rh_subscription_pool_id}" 9 | openshift_major_version = "${var.openshift_major_version}" 10 | } 11 | } 12 | 13 | resource "null_resource" "bastion_repos" { 14 | provisioner "file" { 15 | content = "${data.template_file.bastion_repos.rendered}" 16 | destination = "~/bastion-repos.sh" 17 | } 18 | 19 | provisioner "remote-exec" { 20 | inline = [ 21 | "chmod +x ~/bastion-repos.sh", 22 | "export USE_COMMUNITY=${var.use_community ? "true" : ""}", 23 | "sh ~/bastion-repos.sh", 24 | ] 25 | } 26 | 27 | connection { 28 | type = "ssh" 29 | user = "${var.bastion_ssh_user}" 30 | private_key = "${var.platform_private_key}" 31 | host = "${var.bastion_endpoint}" 32 | } 33 | 34 | triggers { 35 | script = "${data.template_file.bastion_repos.rendered}" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /modules/openshift/inventory.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "template_inventory" { 2 | template = "${file("${path.module}/resources/template-inventory.yaml")}" 3 | 4 | vars { 5 | platform_name = "${var.platform_name}" 6 | ansible_user = "${var.bastion_ssh_user}" 7 | rhn_username = "${var.rhn_username}" 8 | rhn_password = "${var.rhn_password}" 9 | rh_subscription_pool_id = "${var.rh_subscription_pool_id}" 10 | platform_domain = "${var.platform_domain}" 11 | master_domain = "${var.master_domain}" 12 | openshift_deployment_type = "${var.use_community ? "origin" : "openshift-enterprise"}" 13 | openshift_major_version = "${var.openshift_major_version}" 14 | openshift_repos_enable_testing = "${var.use_community ? "true" : "false"}" 15 | named_certificate = "${(var.public_certificate_pem == "") ? false : true}" 16 | use_allow_all_identity_provider = "${contains(var.identity_providers, "AllowAllIdentityProvider")}" 17 | use_google_identity_provider = "${contains(var.identity_providers, "GoogleIdentityProvider")}" 18 | google_client_id = "${var.google_client_id}" 19 | google_client_secret = "${var.google_client_secret}" 20 | google_client_domain = "${var.google_client_domain}" 21 | } 22 | } 23 | 24 | resource "null_resource" "template_inventory" { 25 | provisioner "file" { 26 | content = "${data.template_file.template_inventory.rendered}" 27 | destination = "~/template-inventory.yaml" 28 | } 29 | 30 | connection { 31 | type = "ssh" 32 | user = "${var.bastion_ssh_user}" 33 | private_key = "${var.platform_private_key}" 34 | host = "${var.bastion_endpoint}" 35 | } 36 | 37 | triggers { 38 | template_inventory = "${data.template_file.template_inventory.rendered}" 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /modules/openshift/main.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "deploy_cluster" { 2 | template = "${file("${path.module}/resources/deploy-cluster.sh")}" 3 | 4 | vars { 5 | platform_name = "${var.platform_name}" 6 | platform_aws_region = "${data.aws_region.current.name}" 7 | openshift_major_version = "${var.openshift_major_version}" 8 | } 9 | } 10 | 11 | resource "null_resource" "main" { 12 | provisioner "file" { 13 | content = "${data.template_file.deploy_cluster.rendered}" 14 | destination = "~/deploy-cluster.sh" 15 | } 16 | 17 | provisioner "remote-exec" { 18 | inline = [ 19 | "chmod +x ~/deploy-cluster.sh", 20 | "tmux new-session -d -s deploycluster ~/deploy-cluster.sh", 21 | "sleep 1", # https://stackoverflow.com/questions/36207752/how-can-i-start-a-remote-service-using-terraform-provisioning 22 | ] 23 | } 24 | 25 | connection { 26 | type = "ssh" 27 | user = "${var.bastion_ssh_user}" 28 | private_key = "${var.platform_private_key}" 29 | host = "${var.bastion_endpoint}" 30 | } 31 | 32 | triggers { 33 | inventory = "${data.template_file.template_inventory.rendered}" 34 | installer = "${data.template_file.deploy_cluster.rendered}" 35 | } 36 | 37 | depends_on = [ 38 | "null_resource.node_config", 39 | "null_resource.public_certificate", 40 | "null_resource.template_inventory", 41 | "null_resource.openshift_applier", 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /modules/openshift/node_config.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "node_config_playbook" { 2 | template = "${file("${path.module}/resources/node-config-playbook.yaml")}" 3 | 4 | vars { 5 | openshift_major_version = "${var.openshift_major_version}" 6 | rhn_username = "${var.rhn_username}" 7 | rhn_password = "${var.rhn_password}" 8 | rh_subscription_pool_id = "${var.rh_subscription_pool_id}" 9 | } 10 | } 11 | 12 | data "template_file" "node_config" { 13 | template = "${file("${path.module}/resources/node-config.sh")}" 14 | 15 | vars { 16 | platform_name = "${var.platform_name}" 17 | platform_aws_region = "${data.aws_region.current.name}" 18 | } 19 | } 20 | 21 | resource "null_resource" "node_config" { 22 | provisioner "file" { 23 | content = "${data.template_file.node_config_playbook.rendered}" 24 | destination = "~/node-config-playbook.yaml" 25 | } 26 | 27 | provisioner "file" { 28 | content = "${data.template_file.node_config.rendered}" 29 | destination = "~/node-config.sh" 30 | } 31 | 32 | provisioner "remote-exec" { 33 | inline = [ 34 | "chmod +x ~/node-config.sh", 35 | "sh ~/node-config.sh", 36 | ] 37 | } 38 | 39 | connection { 40 | type = "ssh" 41 | user = "${var.bastion_ssh_user}" 42 | private_key = "${var.platform_private_key}" 43 | host = "${var.bastion_endpoint}" 44 | } 45 | 46 | triggers { 47 | inventory = "${data.template_file.node_config_playbook.rendered}" 48 | } 49 | 50 | depends_on = ["null_resource.bastion_config", "null_resource.template_inventory"] 51 | } 52 | -------------------------------------------------------------------------------- /modules/openshift/openshift_applier.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "openshift_applier" { 2 | template = "${file("${path.module}/resources/openshift-applier.sh")}" 3 | 4 | vars { 5 | platform_name = "${var.platform_name}" 6 | platform_aws_region = "${data.aws_region.current.name}" 7 | } 8 | } 9 | 10 | resource "null_resource" "openshift_applier" { 11 | provisioner "file" { 12 | source = "${path.module}/resources/openshift-applier" 13 | destination = "~" 14 | } 15 | 16 | provisioner "file" { 17 | content = "${data.template_file.openshift_applier.rendered}" 18 | destination = "~/openshift-applier.sh" 19 | } 20 | 21 | connection { 22 | type = "ssh" 23 | user = "${var.bastion_ssh_user}" 24 | private_key = "${var.platform_private_key}" 25 | host = "${var.bastion_endpoint}" 26 | } 27 | 28 | triggers { 29 | script = "${data.template_file.openshift_applier.rendered}" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /modules/openshift/output.tf: -------------------------------------------------------------------------------- 1 | output "master_public_url" { 2 | value = "https://${var.platform_domain}:8443" 3 | } 4 | -------------------------------------------------------------------------------- /modules/openshift/public_certificate.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "public_certificate" { 2 | provisioner "file" { 3 | content = "${var.public_certificate_pem == "" ? "dummy" : var.public_certificate_pem}" 4 | destination = "~/public_certificate.pem" 5 | } 6 | 7 | provisioner "file" { 8 | content = "${var.public_certificate_key == "" ? "dummy" : var.public_certificate_key}" 9 | destination = "~/public_certificate.key" 10 | } 11 | 12 | provisioner "file" { 13 | content = "${var.public_certificate_intermediate_pem == "" ? "dummy" : var.public_certificate_intermediate_pem}" 14 | destination = "~/public_certificate_intermediate.pem" 15 | } 16 | 17 | connection { 18 | type = "ssh" 19 | user = "${var.bastion_ssh_user}" 20 | private_key = "${var.platform_private_key}" 21 | host = "${var.bastion_endpoint}" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /modules/openshift/region.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | -------------------------------------------------------------------------------- /modules/openshift/resources/bastion-config-playbook.yaml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | become: true 3 | tasks: 4 | - name: ensure a list of packages for OpenShift install 5 | yum: 6 | name: "{{ packages }}" 7 | vars: 8 | packages: 9 | - vim 10 | - git 11 | - tmux 12 | - name: ensure a list of packages for OCP 13 | yum: 14 | name: "{{ packages }}" 15 | vars: 16 | packages: 17 | - openshift-ansible 18 | - atomic-openshift-clients 19 | when: ansible_distribution in ['RedHat'] 20 | - name: ensure a list of packages for OKD 21 | yum: 22 | name: "{{ packages }}" 23 | vars: 24 | packages: 25 | - pyOpenSSL 26 | - python-lxml 27 | - java-1.8.0-openjdk-headless 28 | - patch 29 | when: ansible_distribution in ['CentOS'] 30 | - name: install openshift-ansible from git repo 31 | git: 32 | repo: 'https://github.com/openshift/openshift-ansible.git' 33 | dest: /usr/share/ansible/openshift-ansible 34 | version: release-${openshift_major_version} 35 | when: ansible_distribution in ['CentOS'] 36 | - name: download ocinventory 37 | get_url: 38 | url: https://github.com/literalice/openshift-inventory-utils/releases/download/v0.2/ocinventory_unix 39 | dest: /usr/local/bin/ocinventory 40 | mode: 0711 41 | -------------------------------------------------------------------------------- /modules/openshift/resources/bastion-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo yum -y install ansible 4 | ansible-playbook -i localhost, -c local ~/bastion-config-playbook.yaml 5 | -------------------------------------------------------------------------------- /modules/openshift/resources/bastion-repos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z $USE_COMMUNITY ]; then 4 | echo "It's a OCP" 5 | sudo subscription-manager register --username=${rhn_username} --password=${rhn_password} 6 | sudo subscription-manager attach --pool=${rh_subscription_pool_id} 7 | sudo subscription-manager repos --disable="*" 8 | sudo subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms" --enable="rhel-7-server-ansible-2.6-rpms" 9 | sudo subscription-manager repos --enable="rhel-7-server-ose-${openshift_major_version}-rpms" 10 | else 11 | echo "It's a OKD" 12 | sudo yum -y install centos-release-ansible26 13 | fi 14 | -------------------------------------------------------------------------------- /modules/openshift/resources/deploy-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export LANG=C 4 | export ANSIBLE_HOST_KEY_CHECKING=False 5 | export ANSIBLE_FORKS=5 6 | export ANSIBLE_PIPELINING=True 7 | 8 | export AWS_REGION="${platform_aws_region}" 9 | 10 | ocinventory -cluster "${platform_name}" -inventory $HOME/template-inventory.yaml > $HOME/inventory.yaml 11 | 12 | ansible-playbook -i $HOME/inventory.yaml $HOME/node-config-playbook.yaml || { echo "Error on register repos" ; exit 1 ; } 13 | 14 | cd /usr/share/ansible/openshift-ansible 15 | ansible-playbook -i $HOME/inventory.yaml playbooks/prerequisites.yml || { echo "Error on prerequisites" ; exit 1 ; } 16 | ansible-playbook -i $HOME/inventory.yaml playbooks/deploy_cluster.yml || { echo "Error on deploying cluster" ; exit 1 ; } 17 | 18 | cd ~ 19 | ansible-playbook -i $HOME/inventory.yaml $HOME/openshift-applier/openshift-policies/config.yml || { echo "Error on applier" ; exit 1 ; } 20 | -------------------------------------------------------------------------------- /modules/openshift/resources/node-config-playbook.yaml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | become: true 3 | tasks: 4 | - name: Register RHN 5 | redhat_subscription: 6 | username: ${rhn_username} 7 | password: ${rhn_password} 8 | pool_ids: ${rh_subscription_pool_id} 9 | state: present 10 | when: ansible_distribution in ['RedHat'] 11 | - name: Disable all RHSM repositories 12 | rhsm_repository: 13 | name: '*' 14 | state: disabled 15 | when: ansible_distribution in ['RedHat'] 16 | - name: Enable all repositories except required for openshift install 17 | rhsm_repository: 18 | name: "{{ item }}" 19 | state: enabled 20 | register: rhsm_repository 21 | with_items: 22 | - rhel-7-server-rpms 23 | - rhel-7-server-extras-rpms 24 | - rhel-7-server-ansible-2.6-rpms 25 | - rhel-7-server-ose-${openshift_major_version}-rpms 26 | when: ansible_distribution in ['RedHat'] 27 | - name: "disabled obsolete for yum" # https://github.com/openshift/origin/issues/20653 28 | lineinfile: 29 | dest: '/etc/yum.conf' 30 | state: present 31 | regexp: '^obsoletes=1$' 32 | line: 'obsoletes=0' 33 | when: ansible_distribution in ['CentOS'] 34 | - name: ensure a list of packages for OKD install 35 | yum: 36 | name: "{{ packages }}" 37 | vars: 38 | packages: 39 | - NetworkManager 40 | - docker-1.13.1 41 | when: ansible_distribution in ['CentOS'] 42 | - name: enable NetworkManager 43 | systemd: 44 | name: NetworkManager 45 | state: started 46 | enabled: yes 47 | when: ansible_distribution in ['CentOS'] 48 | - name: enable Docker 49 | systemd: 50 | name: docker 51 | state: started 52 | enabled: yes 53 | when: ansible_distribution in ['CentOS'] 54 | 55 | -------------------------------------------------------------------------------- /modules/openshift/resources/node-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export LANG=C 4 | export ANSIBLE_HOST_KEY_CHECKING=False 5 | export ANSIBLE_FORKS=5 6 | export ANSIBLE_PIPELINING=True 7 | 8 | export AWS_REGION="${platform_aws_region}" 9 | 10 | ocinventory -cluster "${platform_name}" -inventory $HOME/template-inventory.yaml > $HOME/inventory.yaml 11 | 12 | ansible-playbook -i $HOME/inventory.yaml $HOME/node-config-playbook.yaml || { echo "Error on register repos" ; exit 1 ; } 13 | -------------------------------------------------------------------------------- /modules/openshift/resources/openshift-applier.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export LANG=C 4 | export ANSIBLE_HOST_KEY_CHECKING=False 5 | export ANSIBLE_FORKS=5 6 | export ANSIBLE_PIPELINING=True 7 | 8 | export AWS_REGION="${platform_aws_region}" 9 | 10 | ocinventory -cluster "${platform_name}" -inventory $HOME/template-inventory.yaml > $HOME/inventory.yaml 11 | 12 | ansible-playbook -i $HOME/inventory.yaml openshift-applier/openshift-policies/config.yml || { echo "Error on applier" ; exit 1 ; } 13 | -------------------------------------------------------------------------------- /modules/openshift/resources/openshift-applier/openshift-policies/config.yml: -------------------------------------------------------------------------------- 1 | # playbooks/openshift-component_name/config.yml 2 | --- 3 | - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/init/main.yml 4 | 5 | - import_playbook: private/config.yml 6 | -------------------------------------------------------------------------------- /modules/openshift/resources/openshift-applier/openshift-policies/private/config.yml: -------------------------------------------------------------------------------- 1 | - name: OpenShift Users Installation 2 | hosts: oo_first_master 3 | tasks: 4 | - name: cluster-admin 5 | oc_adm_policy_user: 6 | user: "{{ item }}" 7 | resource_kind: cluster-role 8 | resource_name: cluster-admin 9 | state: present 10 | with_items: 11 | - "{{ openshift_cluster_admin_users }}" 12 | 13 | -------------------------------------------------------------------------------- /modules/openshift/resources/template-inventory.yaml: -------------------------------------------------------------------------------- 1 | OSEv3: 2 | children: 3 | masters: 4 | hosts: 5 | etcd: 6 | hosts: 7 | nodes: 8 | hosts: 9 | vars: 10 | timeout: 60 11 | ansible_user: ${ansible_user} 12 | ansible_ssh_user: ${ansible_user} 13 | ansible_become: true 14 | openshift_deployment_type: ${openshift_deployment_type} 15 | ${openshift_deployment_type == "openshift-enterprise" ? "#" : ""} openshift_additional_repos: 16 | ${openshift_deployment_type == "openshift-enterprise" ? "#" : ""} - {id: 'centos-okd-ci', name: 'centos-okd-ci', baseurl: 'https://rpms.svc.ci.openshift.org/openshift-origin-v3.11', gpgcheck: '0', enabled: '1'} 17 | openshift_release: "${openshift_major_version}" 18 | openshift_repos_enable_testing: ${openshift_repos_enable_testing} 19 | openshift_master_identity_providers: 20 | ${use_allow_all_identity_provider == "true" ? "" : "#"} - name: 'all' 21 | ${use_allow_all_identity_provider == "true" ? "" : "#"} login: true 22 | ${use_allow_all_identity_provider == "true" ? "" : "#"} challenge: true 23 | ${use_allow_all_identity_provider == "true" ? "" : "#"} kind: 'AllowAllPasswordIdentityProvider' 24 | ${use_google_identity_provider == "true" ? "" : "#"} - name: google 25 | ${use_google_identity_provider == "true" ? "" : "#"} challenge: false 26 | ${use_google_identity_provider == "true" ? "" : "#"} login: true 27 | ${use_google_identity_provider == "true" ? "" : "#"} mappingMethod: claim 28 | ${use_google_identity_provider == "true" ? "" : "#"} kind: GoogleIdentityProvider 29 | ${use_google_identity_provider == "true" ? "" : "#"} clientID: "${google_client_id}" 30 | ${use_google_identity_provider == "true" ? "" : "#"} clientSecret: "${google_client_secret}" 31 | ${use_google_identity_provider == "true" ? "" : "#"} hostedDomain: "${google_client_domain}" 32 | os_sdn_network_plugin_name: 'redhat/openshift-ovs-networkpolicy' 33 | openshift_disable_check: 'disk_availability,memory_availability,docker_image_availability' 34 | openshift_master_cluster_hostname: ${master_domain} 35 | openshift_master_cluster_public_hostname: ${platform_domain} 36 | openshift_master_default_subdomain: ${platform_domain} 37 | openshift_master_bootstrap_auto_approve: true 38 | openshift_cloudprovider_kind: aws 39 | openshift_clusterid: ${platform_name} 40 | openshift_logging_install_logging: true 41 | openshift_logging_es_memory_limit: 1024M 42 | openshift_logging_es_nodeselector: 43 | node-role.kubernetes.io/infra: "true" 44 | openshift_certificate_expiry_warning_days: 30 45 | openshift_master_admission_plugin_config: '{ "MutatingAdmissionWebhook": { "configuration": { "apiVersion": "apiserver.config.k8s.io/v1alpha1", "kubeConfigFile": "/etc/origin/master/admin.kubeconfig", "kind": "WebhookAdmission" } }, "ValidatingAdmissionWebhook": { "configuration": { "apiVersion": "apiserver.config.k8s.io/v1alpha1", "kubeConfigFile": "/etc/origin/master/admin.kubeconfig", "kind": "WebhookAdmission" } } }' 46 | openshift_cluster_admin_users: 47 | - admin 48 | ${openshift_deployment_type == "openshift-enterprise" ? "" : "#"} oreg_auth_user: "${rhn_username}" 49 | ${openshift_deployment_type == "openshift-enterprise" ? "" : "#"} oreg_auth_password: "${rhn_password}" 50 | ${openshift_deployment_type == "openshift-enterprise" ? "" : "#"} openshift_additional_registry_credentials: 51 | ${openshift_deployment_type == "openshift-enterprise" ? "" : "#"} - host: registry.connect.redhat.com 52 | ${openshift_deployment_type == "openshift-enterprise" ? "" : "#"} user: "${rhn_username}" 53 | ${openshift_deployment_type == "openshift-enterprise" ? "" : "#"} password: "${rhn_password}" 54 | ${openshift_deployment_type == "openshift-enterprise" ? "" : "#"} test_image: mongodb/enterprise-operator:0.3.2 55 | openshift_master_overwrite_named_certificates: ${named_certificate} 56 | ${named_certificate ? "" : "#"} openshift_master_named_certificates: 57 | ${named_certificate ? "" : "#"} - certfile: '/home/${ansible_user}/public_certificate.pem' 58 | ${named_certificate ? "" : "#"} keyfile: '/home/${ansible_user}/public_certificate.key' 59 | ${named_certificate ? "" : "#"} cafile: '/home/${ansible_user}/public_certificate_intermediate.pem' 60 | ${named_certificate ? "" : "#"} names: ['${platform_domain}'] 61 | ${named_certificate ? "" : "#"} openshift_hosted_router_certificate: 62 | ${named_certificate ? "" : "#"} certfile: '/home/${ansible_user}/public_certificate.pem' 63 | ${named_certificate ? "" : "#"} keyfile: '/home/${ansible_user}/public_certificate.key' 64 | ${named_certificate ? "" : "#"} cafile: '/home/${ansible_user}/public_certificate_intermediate.pem' 65 | -------------------------------------------------------------------------------- /modules/openshift/variables.tf: -------------------------------------------------------------------------------- 1 | variable "platform_name" {} 2 | 3 | variable "identity_providers" { 4 | type = "list" 5 | description = "The identity providers to enable (AllowAllIdentityProvider, GoogleIdentityProvider)" 6 | default = [ 7 | "AllowAllIdentityProvider" 8 | ] 9 | } 10 | 11 | variable "google_client_id" { 12 | type = "string" 13 | description = "The Google client id used by the GoogleIdentityProvider" 14 | default = "" 15 | } 16 | 17 | variable "google_client_secret" { 18 | type = "string" 19 | description = "The client secret used by the GoogleIdentityProvider" 20 | default = "" 21 | } 22 | 23 | variable "google_client_domain" { 24 | type = "string" 25 | description = "The domain used by the GoogleIdentityProvider" 26 | default = "" 27 | } 28 | 29 | variable "rh_subscription_pool_id" { 30 | description = "Red Hat subscription pool id for OpenShift Container Platform" 31 | default = "" 32 | } 33 | 34 | variable "rhn_username" { 35 | description = "Red Hat Network login username for registration system of the OpenShift Container Platform cluster" 36 | default = "" 37 | } 38 | 39 | variable "rhn_password" { 40 | description = "Red Hat Network login password for registration system of the OpenShift Container Platform cluster" 41 | default = "" 42 | } 43 | 44 | variable "bastion_ssh_user" {} 45 | 46 | variable "bastion_endpoint" {} 47 | 48 | variable "platform_private_key" {} 49 | 50 | variable "openshift_major_version" { 51 | default = "3.11" 52 | } 53 | 54 | variable "use_community" { 55 | default = false 56 | } 57 | 58 | variable "master_domain" {} 59 | 60 | variable "platform_domain" {} 61 | 62 | variable "public_certificate_pem" { 63 | default = "" 64 | } 65 | 66 | variable "public_certificate_key" { 67 | default = "" 68 | } 69 | 70 | variable "public_certificate_intermediate_pem" { 71 | default = "" 72 | } 73 | -------------------------------------------------------------------------------- /output.tf: -------------------------------------------------------------------------------- 1 | output "platform_name" { 2 | value = "${var.platform_name}" 3 | description = "This platform name" 4 | } 5 | 6 | output "master_public_url" { 7 | value = "${module.openshift.master_public_url}" 8 | description = "OpenShift master URL" 9 | } 10 | 11 | output "bastion_ssh_spec" { 12 | value = "${module.infra.bastion_ssh_user}@${module.infra.bastion_endpoint}" 13 | description = "Bastion SSH info for login. 'ssh `terraform output bastion_ssh`'" 14 | } 15 | 16 | output "platform_private_key" { 17 | sensitive = true 18 | value = "${module.infra.platform_private_key}" 19 | description = "private key for instances" 20 | } 21 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "platform_name" { 2 | description = "The name of the cluster that is used for tagging some resources" 3 | } 4 | 5 | variable "availability_zones" { 6 | type = "list" 7 | default = [] 8 | } 9 | 10 | variable "operator_cidrs" { 11 | type = "list" 12 | default = ["0.0.0.0/0"] 13 | description = "CIDRS that is allowed from which master api can be accessed" 14 | } 15 | 16 | variable "public_cidrs" { 17 | type = "list" 18 | default = ["0.0.0.0/0"] 19 | description = "CIDRS that is allowed from which public users can access served services in the cluster" 20 | } 21 | 22 | variable "use_spot" { 23 | default = false 24 | } 25 | 26 | variable "master_count" { 27 | default = 1 28 | } 29 | 30 | variable "compute_node_count" { 31 | default = 3 32 | } 33 | 34 | variable "master_instance_type" { 35 | default = "m4.xlarge" 36 | } 37 | 38 | variable "compute_node_instance_type" { 39 | default = "m4.large" 40 | } 41 | 42 | variable "use_community" { 43 | description = "Sets true if you want to install OKD." 44 | default = false 45 | } 46 | 47 | variable "rh_subscription_pool_id" { 48 | description = "Red Hat subscription pool id for OpenShift Container Platform" 49 | default = "" 50 | } 51 | 52 | variable "rhn_username" { 53 | description = "Red Hat Network login username for registration system of the OpenShift Container Platform cluster" 54 | default = "" 55 | } 56 | 57 | variable "rhn_password" { 58 | description = "Red Hat Network login password for registration system of the OpenShift Container Platform cluster" 59 | default = "" 60 | } 61 | 62 | # Domains 63 | 64 | variable "platform_domain" { 65 | description = "Public DNS subdomain for access to services served in the cluster" 66 | default = "" 67 | } 68 | 69 | variable "platform_domain_administrator_email" { 70 | default = "" 71 | } 72 | 73 | variable "identity_providers" { 74 | type = "list" 75 | description = "The identity providers to enable (AllowAllIdentityProvider, GoogleIdentityProvider)" 76 | default = [ 77 | "AllowAllIdentityProvider" 78 | ] 79 | } 80 | 81 | variable "google_client_id" { 82 | type = "string" 83 | description = "The Google client id used by the GoogleIdentityProvider" 84 | default = "" 85 | } 86 | 87 | variable "google_client_secret" { 88 | type = "string" 89 | description = "The client secret used by the GoogleIdentityProvider" 90 | default = "" 91 | } 92 | 93 | variable "google_client_domain" { 94 | type = "string" 95 | description = "The domain used by the GoogleIdentityProvider" 96 | default = "" 97 | } 98 | -------------------------------------------------------------------------------- /vars/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/literalice/terraform-aws-openshift/87f674cce663925c6aa561da398f8d48b36dba2f/vars/.gitignore --------------------------------------------------------------------------------