├── README.md
├── aws_la_cloudplayground_multiple_workers_version
├── README.md
├── alb_acm.tf
├── ansible.cfg
├── ansible_templates
│ ├── cred-privkey.j2
│ ├── install_jenkins.yaml
│ ├── install_worker.yaml
│ ├── inventory_aws
│ │ ├── README.md
│ │ └── tf_aws_ec2.yml
│ ├── jenkins_auth
│ └── node.j2
├── aws_get_cp_hostedzone
├── backend.tf
├── dns.tf
├── instances.tf
├── network_setup.tf
├── outputs.tf
├── providers.tf
└── variables.tf
├── aws_la_cloudplayground_version
├── README.md
├── alb_acm.tf
├── ansible.cfg
├── ansible_templates
│ ├── cred-privkey.j2
│ ├── install_jenkins.yaml
│ ├── install_worker.yaml
│ ├── jenkins_auth
│ └── node.j2
├── aws_get_cp_hostedzone
├── dns.tf
├── instances.tf
├── network_setup.tf
├── null_provisioners.tf
├── outputs.tf
└── variables.tf
├── course_lesson_resources
├── Lesson_AnsiblePlaybooks_Syntax
│ └── sample.yml
├── Lesson_AppDeploy_Part1
│ ├── instances.tf
│ └── ssm_resource_path_string
├── Lesson_AppDeploy_Part2
│ └── instances.tf
├── Lesson_AppDeploy_Part3
│ └── instances.tf
├── Lesson_ConfigMgmt_Provisioners
│ ├── ansible_modification.cfg
│ ├── jenkins-master-sample.yml
│ ├── jenkins-worker-sample.yml
│ ├── master_provisioner.tf
│ └── worker_provisioner.tf
├── Lesson_LoadBalance_ALB_EC2
│ ├── alb.tf
│ └── jenkins-master-sample.yml
├── Lesson_NetworkSetup_Part1
│ └── networks.tf
├── Lesson_NetworkSetup_Part2
│ └── network_pt2.tf
├── Lesson_NetworkSetup_Part3
│ └── network_pt3.tf
└── Lesson_PuttingBehindDNS_ALB_HTTPS
│ ├── acm.tf
│ ├── alb.tf
│ ├── dns.tf
│ └── dns_old.tf
├── iam_policies
├── terraform_deployment_iam_policy.json
└── terraform_deployment_lax_iam_policy.json
├── lab_deploying_dns_acm
├── README.md
├── alb_acm.tf
├── aws_get_cp_hostedzone
├── dns.tf
├── import_resources.tf
├── outputs.tf
└── variables.tf
├── lab_jenkins_master_worker
├── README.md
├── alb.tf
├── ansible.cfg
├── ansible_templates
│ ├── cred-privkey.j2
│ ├── gen_ssh_key.yaml
│ ├── install_jenkins.yaml
│ ├── install_worker.yaml
│ ├── inventory_aws
│ │ └── tf_aws_ec2.yml
│ ├── jenkins_auth
│ └── node.j2
├── instances.tf
├── network_setup.tf
├── outputs.tf
└── variables.tf
├── lab_network_vpc_peering
├── network_setup.tf
├── outputs.tf
└── variables.tf
└── terraform_v13_compatible_code
├── README.md
├── alb_acm.tf
├── ansible.cfg
├── ansible_templates
├── cred-privkey.j2
├── install_jenkins.yaml
├── install_worker.yaml
├── inventory_aws
│ ├── README.md
│ └── tf_aws_ec2.yml
├── jenkins_auth
└── node.j2
├── aws_get_cp_hostedzone
├── backend.tf
├── dns.tf
├── instances.tf
├── network_setup.tf
├── outputs.tf
├── providers.tf
└── variables.tf
/README.md:
--------------------------------------------------------------------------------
1 | # Deploying to AWS with Terraform and Ansible Repo
2 |
3 | This repo will contain code used and taught in the A Cloud Guru course named above.
4 |
5 | This course was originally created before Terrafom 0.13 was released however I have updated it to work with version 0.13 in the `terraform_v13_compatible_code` folder.
6 |
7 |
8 | ------
9 |
10 | For following along using Terraform 0.12 refer to the `aws_la_cloudplayground_multiple_workers_version`.
11 |
12 | Again, for following along using Terraform 0.13 refer to the `terraform_v13_compatible_code`.
13 |
14 | Although the folder naming convention in this repository should make sense for the most part, it was primarily created to be referred through the lessons on the A Cloud Guru website.
15 |
16 |
17 | For queries and feedback please reach out to ACG support: https://help.acloud.guru
18 |
19 | ## Warning
20 | 1. Parts of this repository expect users to obtain a Route53 domain name, which is available with ACG Playground tier subscription.
21 | 2. Following along and deploying resources in AWS as taught by this course WILL incur charges!!! Be sure to destroy any infrastructure that you do not need.
22 |
23 | ---
24 |
25 |
26 | Copyright 2020 A CLOUD GURU
27 |
28 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
29 |
30 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
31 |
32 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
33 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/README.md:
--------------------------------------------------------------------------------
1 |
2 |
Setup Requirements
3 |
4 | ```
5 | 1. Terraform binary => 0.12.x # wget -c https://releases.hashicorp.com/terraform/0.12.28/terraform_0.12.28_linux_amd64.zip
6 | 2. Python3 & PIP needs to be installed on all nodes(on most , modern Linux systems it's available by default) # yum -y install python3-pip
7 | 3. Ansible (install via pip) # pip3 install ansible --user
8 | 4. AWS CLI (install via pip) # pip3 install awscli --user
9 | 5. jq (install via package manager) - OPTIONAL # yum -y install jq
10 | ```
11 |
12 | `This project has been tested on MacOS(Mojave), CentOS7. Author provides no guarantees for working with other OS's,
13 | although the steps are generic enough that with little tweaking or even with no tweaking this might work
14 | on a range of OS's which support above 5 requirments.`
15 |
16 |
Notes and Instructions
17 |
18 | *For Terraform Part*
19 | ```
20 | The regional AWS providers are defined in providers.tf
21 | Terraform configuration and backend is defined in backend.tf.
22 |
23 |
24 | If you want to read and understand the deployment in sequence. Read through templates in the following order:
25 | 1. network_setup.tf
26 | 2. instances.tf --> local-exec provisioners in this templates kick-off Ansible playbooks in ansible_templates/
27 | 3. alb_acm.tf
28 | 4. dns.tf
29 | ```
30 | *S3 Backend*
31 | ```
32 | This project requires an S3 backend for storing Terraform state file, therefore in the terraform block in the backend.tf file you'll need to plug in the an actual bucket name before you can run "terraform init".
33 | Please also note that the "terraform" block does not allow usage of variables so values HAVE to be hardcoded.
34 | ```
35 | Sample command for bucket creation via CLI:
36 | ```
37 | aws s3api create-bucket --bucket
38 | ```
39 |
40 | Example
41 | ```
42 | aws s3api create-bucket --bucket myawesomebucketthatmayormaynotexistalready
43 | ```
44 |
45 |
Supplementary files
46 |
47 | ```
48 | 1. ansible.cfg #A modified Ansible default config file with SSH host key checking and warnings disabled
49 | 2. aws_get_cp_hostedzone #An AWS CLI command for fetching your hosted zone for DNS part of this project
50 | 3. null_provisioners.tf #For setting up and deleting Ansible inventory files
51 | 4. variables.tf #Defines variables and default values for them for the TF templates
52 | 5. outputs.tf #Defines the outputs presented at successful completion of execution of TF apply.
53 | ```
54 |
55 |
3 |
4 | ```
5 | 1. This configuration file is to be used in conjunction with enabling the dynamic inventory module in ansible.cfg file
6 | 2. Official documentation can be found here: https://docs.ansible.com/ansible/latest/collections/amazon/aws/aws_ec2_inventory.html
7 | ```
8 |
9 |
Notes and Instructions
10 |
11 | *How it works*
12 | Enabling the aws_ec2 inventory module as shown in the lesson "Configuring Terraform Provisioners and Config Mangagement via Ansible". This module once enabled uses this yaml configuration file to use the preconfigured aws credentials and poll for EC2 instances in the region(s) as set up in this YAML configuration file. So instead of hard-coding IP addresses in static inventory files, Ansible can get the IP addresses for EC2 instances on the fly using the tag names that we assigned to those instances when creating them via Terraform. How this Ansible module gets the tags you might ask, well that's also defined in this configuration file.
13 |
14 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/ansible_templates/inventory_aws/tf_aws_ec2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # aws ec2 ansible dynamic inventory plugin
3 | plugin: aws_ec2
4 |
5 | regions:
6 | - us-east-1
7 | - us-west-2
8 | # - us-east-2
9 | # set strict to False
10 | # if True this will make invalid entries
11 | # a fatal error
12 | strict: False
13 |
14 | keyed_groups:
15 | # each aws ec2 instance has it own instance tags. create
16 | # a tag variable from those tags for ansible to use.
17 | # if an EC2 tag:Name is acloudguru_machine, it'll be converted to the
18 | # Ansible tag variable name as follows: tag_Name_acloudguru_machine
19 | # which can then be passed as a variable value for the host via -e flag
20 | - key: tags
21 | prefix: tag
22 | #
23 | # the following keyed groups are from the aws url:
24 | # https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
25 | # below are some of the variable that can be used.
26 | # an example for instance_type:
27 | # aws_instance_type_t2_micro
28 | - key: architecture
29 | prefix: arch
30 | - key: tags.Applications
31 | separator: ''
32 | - key: instance_type
33 | prefix: aws_instance_type
34 | - key: placement.region
35 | prefix: aws_region
36 | - key: image_id
37 | prefix: aws_image
38 | - key: hypervisor
39 | prefix: aws_hypervisor
40 | - key: 'security_groups|json_query("[].group_id")'
41 | prefix: 'security_groups'
42 |
43 | hostnames:
44 | # a list in order of precedence for hostname variables.
45 | #
46 | - ip-address
47 | - dns-name
48 | - tag:Name
49 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/ansible_templates/jenkins_auth:
--------------------------------------------------------------------------------
1 | admin:password
2 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/ansible_templates/node.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | {{ ipv4 }}
4 |
5 | /home/ec2-user
6 | 1
7 | EXCLUSIVE
8 |
9 |
10 | {{ ipv4 }}
11 | 22
12 | JenkinsCreds-{{ ipv4 }}
13 | 60
14 | 10
15 | 15
16 |
17 | true
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/aws_get_cp_hostedzone:
--------------------------------------------------------------------------------
1 | aws route53 list-hosted-zones | jq -r .HostedZones[].Name | egrep "cmcloud*"
2 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/backend.tf:
--------------------------------------------------------------------------------
1 | #Set S3 backend for persisting TF state file remotely, ensure bucket already exits
2 | # And that AWS user being used by TF has read/write perms
3 | terraform {
4 | required_version = ">=0.12.0"
5 | required_providers {
6 | aws = ">=3.0.0"
7 | }
8 | backend "s3" {
9 | region = "us-east-1"
10 | profile = "default"
11 | key = "terraformstatefile"
12 | bucket = ""
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/dns.tf:
--------------------------------------------------------------------------------
1 | #DNS Configuration
2 | #Get already , publicly configured Hosted Zone on Route53 - MUST EXIST, check variables.tf for dns-name
3 | data "aws_route53_zone" "dns" {
4 | provider = aws.region-master
5 | name = var.dns-name
6 | }
7 |
8 | #Create record in hosted zone for ACM Certificate Domain verification
9 | resource "aws_route53_record" "cert_validation" {
10 | provider = aws.region-master
11 | for_each = {
12 | for val in aws_acm_certificate.jenkins-lb-https.domain_validation_options : val.domain_name => {
13 | name = val.resource_record_name
14 | record = val.resource_record_value
15 | type = val.resource_record_type
16 | }
17 | }
18 | name = each.value.name
19 | records = [each.value.record]
20 | ttl = 60
21 | type = each.value.type
22 | zone_id = data.aws_route53_zone.dns.zone_id
23 |
24 | }
25 |
26 | #Create Alias record towards ALB from Route53
27 | resource "aws_route53_record" "jenkins" {
28 | provider = aws.region-master
29 | zone_id = data.aws_route53_zone.dns.zone_id
30 | name = join(".", ["jenkins", data.aws_route53_zone.dns.name])
31 | type = "A"
32 | alias {
33 | name = aws_lb.application-lb.dns_name
34 | zone_id = aws_lb.application-lb.zone_id
35 | evaluate_target_health = true
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/instances.tf:
--------------------------------------------------------------------------------
1 | #Get Linux AMI ID using SSM Parameter endpoint in us-east-1
2 | data "aws_ssm_parameter" "linuxAmi" {
3 | provider = aws.region-master
4 | name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2"
5 | }
6 |
7 | #Get Linux AMI ID using SSM Parameter endpoint in us-west-2
8 | data "aws_ssm_parameter" "linuxAmiOregon" {
9 | provider = aws.region-worker
10 | name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2"
11 | }
12 |
13 | #Create key-pair for logging into EC2 in us-east-1
14 | resource "aws_key_pair" "master-key" {
15 | provider = aws.region-master
16 | key_name = "jenkins"
17 | public_key = file("~/.ssh/id_rsa.pub")
18 | }
19 |
20 | #Create key-pair for logging into EC2 in us-west-2
21 | resource "aws_key_pair" "worker-key" {
22 | provider = aws.region-worker
23 | key_name = "jenkins"
24 | public_key = file("~/.ssh/id_rsa.pub")
25 | }
26 |
27 | #Create and bootstrap EC2 in us-east-1
28 | resource "aws_instance" "jenkins-master" {
29 | provider = aws.region-master
30 | ami = data.aws_ssm_parameter.linuxAmi.value
31 | instance_type = var.instance-type
32 | key_name = aws_key_pair.master-key.key_name
33 | associate_public_ip_address = true
34 | vpc_security_group_ids = [aws_security_group.jenkins-sg.id]
35 | subnet_id = aws_subnet.subnet_1.id
36 | provisioner "local-exec" {
37 | command = < instance.public_ip
18 | }
19 | }
20 | output "Jenkins-Worker-Private-IPs" {
21 | value = {
22 | for instance in aws_instance.jenkins-worker-oregon :
23 | instance.id => instance.private_ip
24 | }
25 | }
26 |
27 | output "url" {
28 | value = aws_route53_record.jenkins.fqdn
29 | }
30 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/providers.tf:
--------------------------------------------------------------------------------
1 | #Defining multiple providers using "alias" parameter
2 | provider "aws" {
3 | profile = var.profile
4 | region = var.region-master
5 | alias = "region-master"
6 | }
7 |
8 | provider "aws" {
9 | profile = var.profile
10 | region = var.region-worker
11 | alias = "region-worker"
12 | }
13 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_multiple_workers_version/variables.tf:
--------------------------------------------------------------------------------
1 | variable "external_ip" {
2 | type = string
3 | default = "0.0.0.0/0"
4 | }
5 |
6 | variable "instance-type" {
7 | type = string
8 | default = "t3.micro"
9 | # validation {
10 | # condition = can(regex("[^t2]", var.instance-type))
11 | # error_message = "Instance type cannot be anything other than t2 or t3 type and also not t3a.micro."
12 | # }
13 | }
14 |
15 | variable "dns-name" {
16 | type = string
17 | default = "" # e.g "cmcloudlab1234.info."
18 | }
19 |
20 | variable "profile" {
21 | type = string
22 | default = "default"
23 | }
24 |
25 | variable "region-master" {
26 | type = string
27 | default = "us-east-1"
28 | }
29 |
30 | variable "region-worker" {
31 | type = string
32 | default = "us-west-2"
33 | }
34 |
35 | #How many Jenkins workers to spin up
36 | variable "workers-count" {
37 | type = number
38 | default = 1
39 | }
40 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/README.md:
--------------------------------------------------------------------------------
1 |
2 |
Setup Requirements
3 |
4 | ```
5 | 1. Terraform binary => 0.12.24
6 | 2. Python needs to be installed on all nodes(on most , modern Linux systems it's available by default)
7 | 3. Ansible (install via pip)
8 | 4. AWS CLI (install via pip)
9 | 5. jq (install via package manager) - OPTIONAL
10 | ```
11 |
12 | `This project has been tested on MacOS(Mojave), CentOS7. Author provides no guarantees for working with other OS's,
13 | although the steps are generic enough that with little tweaking or even with no tweaking this might work
14 | on a range of OS's which support above 5 requirments.`
15 |
16 |
Notes and Instructions
17 |
18 | *For Terraform Part*
19 | ```
20 | If you want to read and understand the deployment in sequence. Read through templates in the following order:
21 | 1. network_setup.tf
22 | 2. instances.tf --> local-exec provisioners in this templates kick-off Ansible playbooks in ansible_templates/
23 | 3. alb_acm.tf
24 | 4. dns.tf
25 | ```
26 |
27 | Important note: In the *instances.tf* be sure to plug in valid state file backup bucket name and key in the _terraform_ block, otherwise *terraform init* will fail for sure.
28 |
29 |
Supplementary files
30 |
31 | ```
32 | 1. ansible.cfg #A modified Ansible default config file with SSH host key checking and warnings disabled
33 | 2. aws_get_cp_hostedzone #An AWS CLI command for fetching your hosted zone for DNS part of this project
34 | 3. null_provisioners.tf #For setting up and deleting Ansible inventory files
35 | 4. variables.tf #Defines variables and default values for them for the TF templates
36 | 5. outputs.tf #Defines the outputs presented at successful completion of execution of TF apply.
37 | ```
38 |
39 |
Ansible playbooks
40 |
41 | ```
42 | 1. cred-privkey.j2 #Jinja template for creating Jenkins credentials via Jenkins API call(populates private key)
43 | 2. install_jenkins.yaml #Playbook for Jenkins Master
44 | 3. install_worker.yaml #Playbook for Jenkins Worker
45 | 4. node.j2 #Jinja templates for registering worker node with Jenkins Master via Jenkins CLI(populates IP)
46 | 5. jenkins_auth #Provides the file with preset credentials for our Jenkins Master
47 | ```
48 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/alb_acm.tf:
--------------------------------------------------------------------------------
1 | #ACM CONFIGURATION
2 | resource "aws_acm_certificate" "jenkins-lb-https" {
3 | provider = aws.region-master
4 | domain_name = join(".", ["jenkins", data.aws_route53_zone.dns.name])
5 | validation_method = "DNS"
6 | tags = {
7 | Name = "Jenkins-ACM"
8 | }
9 |
10 | }
11 |
12 | resource "aws_acm_certificate_validation" "cert" {
13 | provider = aws.region-master
14 | certificate_arn = aws_acm_certificate.jenkins-lb-https.arn
15 | validation_record_fqdns = [aws_route53_record.cert_validation.fqdn]
16 | }
17 |
18 | ####ACM CONFIG END
19 |
20 |
21 | resource "aws_lb" "application-lb" {
22 | provider = aws.region-master
23 | name = "jenkins-lb"
24 | internal = false
25 | load_balancer_type = "application"
26 | security_groups = [aws_security_group.lb-sg.id]
27 | subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id]
28 | tags = {
29 | Name = "Jenkins-LB"
30 | }
31 | }
32 |
33 | resource "aws_lb_target_group" "app-lb-tg" {
34 | provider = aws.region-master
35 | name = "app-lb-tg"
36 | port = 8080
37 | target_type = "instance"
38 | vpc_id = aws_vpc.vpc_master.id
39 | protocol = "HTTP"
40 | health_check {
41 | enabled = true
42 | interval = 10
43 | path = "/login"
44 | port = 8080
45 | protocol = "HTTP"
46 | matcher = "200-299"
47 | }
48 | tags = {
49 | Name = "jenkins-target-group"
50 | }
51 | }
52 |
53 | resource "aws_lb_listener" "jenkins-listener" {
54 | provider = aws.region-master
55 | load_balancer_arn = aws_lb.application-lb.arn
56 | ssl_policy = "ELBSecurityPolicy-2016-08"
57 | port = "443"
58 | protocol = "HTTPS"
59 | certificate_arn = aws_acm_certificate.jenkins-lb-https.arn
60 | default_action {
61 | type = "forward"
62 | target_group_arn = aws_lb_target_group.app-lb-tg.arn
63 | }
64 | }
65 |
66 | resource "aws_lb_listener" "jenkins-listener-http" {
67 | provider = aws.region-master
68 | load_balancer_arn = aws_lb.application-lb.arn
69 | port = "80"
70 | protocol = "HTTP"
71 | default_action {
72 | type = "redirect"
73 | redirect {
74 | port = "443"
75 | protocol = "HTTPS"
76 | status_code = "HTTP_301"
77 | }
78 | }
79 | }
80 |
81 | resource "aws_lb_target_group_attachment" "jenkins-master-attach" {
82 | provider = aws.region-master
83 | target_group_arn = aws_lb_target_group.app-lb-tg.arn
84 | target_id = aws_instance.jenkins-master.id
85 | port = 8080
86 | }
87 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/ansible.cfg:
--------------------------------------------------------------------------------
1 | # Example config file for ansible -- https://ansible.com/
2 | # =======================================================
3 |
4 | # Nearly all parameters can be overridden in ansible-playbook
5 | # or with command line flags. Ansible will read ANSIBLE_CONFIG,
6 | # ansible.cfg in the current working directory, .ansible.cfg in
7 | # the home directory, or /etc/ansible/ansible.cfg, whichever it
8 | # finds first
9 |
10 | # For a full list of available options, run ansible-config list or see the
11 | # documentation: https://docs.ansible.com/ansible/latest/reference_appendices/config.html.
12 |
13 | [defaults]
14 | #inventory = /etc/ansible/hosts
15 | #library = ~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
16 | #module_utils = ~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils
17 | #remote_tmp = ~/.ansible/tmp
18 | #local_tmp = ~/.ansible/tmp
19 | #forks = 5
20 | #poll_interval = 0.001
21 | #ask_pass = False
22 | #transport = smart
23 |
24 | # Plays will gather facts by default, which contain information about
25 | # the remote system.
26 | #
27 | # smart - gather by default, but don't regather if already gathered
28 | # implicit - gather by default, turn off with gather_facts: False
29 | # explicit - do not gather by default, must say gather_facts: True
30 | #gathering = implicit
31 |
32 | # This only affects the gathering done by a play's gather_facts directive,
33 | # by default gathering retrieves all facts subsets
34 | # all - gather all subsets
35 | # network - gather min and network facts
36 | # hardware - gather hardware facts (longest facts to retrieve)
37 | # virtual - gather min and virtual facts
38 | # facter - import facts from facter
39 | # ohai - import facts from ohai
40 | # You can combine them using comma (ex: network,virtual)
41 | # You can negate them using ! (ex: !hardware,!facter,!ohai)
42 | # A minimal set of facts is always gathered.
43 | #
44 | #gather_subset = all
45 |
46 | # some hardware related facts are collected
47 | # with a maximum timeout of 10 seconds. This
48 | # option lets you increase or decrease that
49 | # timeout to something more suitable for the
50 | # environment.
51 | #
52 | #gather_timeout = 10
53 |
54 | # Ansible facts are available inside the ansible_facts.* dictionary
55 | # namespace. This setting maintains the behaviour which was the default prior
56 | # to 2.5, duplicating these variables into the main namespace, each with a
57 | # prefix of 'ansible_'.
58 | # This variable is set to True by default for backwards compatibility. It
59 | # will be changed to a default of 'False' in a future release.
60 | #
61 | #inject_facts_as_vars = True
62 |
63 | # Paths to search for collections, colon separated
64 | # collections_paths = ~/.ansible/collections:/usr/share/ansible/collections
65 |
66 | # Paths to search for roles, colon separated
67 | #roles_path = ~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
68 |
69 | # Host key checking is enabled by default
70 | host_key_checking = False
71 |
72 | # You can only have one 'stdout' callback type enabled at a time. The default
73 | # is 'default'. The 'yaml' or 'debug' stdout callback plugins are easier to read.
74 | #
75 | #stdout_callback = default
76 | #stdout_callback = yaml
77 | #stdout_callback = debug
78 |
79 |
80 | # Ansible ships with some plugins that require whitelisting,
81 | # this is done to avoid running all of a type by default.
82 | # These setting lists those that you want enabled for your system.
83 | # Custom plugins should not need this unless plugin author disables them
84 | # by default.
85 | #
86 | # Enable callback plugins, they can output to stdout but cannot be 'stdout' type.
87 | #callback_whitelist = timer, mail
88 |
89 | # Determine whether includes in tasks and handlers are "static" by
90 | # default. As of 2.0, includes are dynamic by default. Setting these
91 | # values to True will make includes behave more like they did in the
92 | # 1.x versions.
93 | #
94 | #task_includes_static = False
95 | #handler_includes_static = False
96 |
97 | # Controls if a missing handler for a notification event is an error or a warning
98 | #error_on_missing_handler = True
99 |
100 | # Default timeout for connection plugins
101 | #timeout = 10
102 |
103 | # Default user to use for playbooks if user is not specified
104 | # Uses the connection plugin's default, normally the user currently executing Ansible,
105 | # unless a different user is specified here.
106 | #
107 | #remote_user = root
108 |
109 | # Logging is off by default unless this path is defined.
110 | #log_path = /var/log/ansible.log
111 |
112 | # Default module to use when running ad-hoc commands
113 | #module_name = command
114 |
115 | # Use this shell for commands executed under sudo.
116 | # you may need to change this to /bin/bash in rare instances
117 | # if sudo is constrained.
118 | #
119 | #executable = /bin/sh
120 |
121 | # By default, variables from roles will be visible in the global variable
122 | # scope. To prevent this, set the following option to True, and only
123 | # tasks and handlers within the role will see the variables there
124 | #
125 | #private_role_vars = False
126 |
127 | # List any Jinja2 extensions to enable here.
128 | #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
129 |
130 | # If set, always use this private key file for authentication, same as
131 | # if passing --private-key to ansible or ansible-playbook
132 | #
133 | #private_key_file = /path/to/file
134 |
135 | # If set, configures the path to the Vault password file as an alternative to
136 | # specifying --vault-password-file on the command line. This can also be
137 | # an executable script that returns the vault password to stdout.
138 | #
139 | #vault_password_file = /path/to/vault_password_file
140 |
141 | # Format of string {{ ansible_managed }} available within Jinja2
142 | # templates indicates to users editing templates files will be replaced.
143 | # replacing {file}, {host} and {uid} and strftime codes with proper values.
144 | #
145 | #ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
146 |
147 | # {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
148 | # in some situations so the default is a static string:
149 | #
150 | #ansible_managed = Ansible managed
151 |
152 | # By default, ansible-playbook will display "Skipping [host]" if it determines a task
153 | # should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
154 | # messages. NOTE: the task header will still be shown regardless of whether or not the
155 | # task is skipped.
156 | #
157 | #display_skipped_hosts = True
158 |
159 | # By default, if a task in a playbook does not include a name: field then
160 | # ansible-playbook will construct a header that includes the task's action but
161 | # not the task's args. This is a security feature because ansible cannot know
162 | # if the *module* considers an argument to be no_log at the time that the
163 | # header is printed. If your environment doesn't have a problem securing
164 | # stdout from ansible-playbook (or you have manually specified no_log in your
165 | # playbook on all of the tasks where you have secret information) then you can
166 | # safely set this to True to get more informative messages.
167 | #
168 | #display_args_to_stdout = False
169 |
170 | # Ansible will raise errors when attempting to dereference
171 | # Jinja2 variables that are not set in templates or action lines. Uncomment this line
172 | # to change this behavior.
173 | #
174 | #error_on_undefined_vars = False
175 |
176 | # Ansible may display warnings based on the configuration of the
177 | # system running ansible itself. This may include warnings about 3rd party packages or
178 | # other conditions that should be resolved if possible.
179 | # To disable these warnings, set the following value to False:
180 | #
181 | system_warnings = False
182 |
183 | # Ansible may display deprecation warnings for language
184 | # features that should no longer be used and will be removed in future versions.
185 | # To disable these warnings, set the following value to False:
186 | #
187 | deprecation_warnings = False
188 |
189 | # Ansible can optionally warn when usage of the shell and
190 | # command module appear to be simplified by using a default Ansible module
191 | # instead. These warnings can be silenced by adjusting the following
192 | # setting or adding warn=yes or warn=no to the end of the command line
193 | # parameter string. This will for example suggest using the git module
194 | # instead of shelling out to the git command.
195 | #
196 | command_warnings = False
197 |
198 |
199 | # set plugin path directories here, separate with colons
200 | #action_plugins = /usr/share/ansible/plugins/action
201 | #become_plugins = /usr/share/ansible/plugins/become
202 | #cache_plugins = /usr/share/ansible/plugins/cache
203 | #callback_plugins = /usr/share/ansible/plugins/callback
204 | #connection_plugins = /usr/share/ansible/plugins/connection
205 | #lookup_plugins = /usr/share/ansible/plugins/lookup
206 | #inventory_plugins = /usr/share/ansible/plugins/inventory
207 | #vars_plugins = /usr/share/ansible/plugins/vars
208 | #filter_plugins = /usr/share/ansible/plugins/filter
209 | #test_plugins = /usr/share/ansible/plugins/test
210 | #terminal_plugins = /usr/share/ansible/plugins/terminal
211 | #strategy_plugins = /usr/share/ansible/plugins/strategy
212 |
213 |
214 | # Ansible will use the 'linear' strategy but you may want to try another one.
215 | #strategy = linear
216 |
217 | # By default, callbacks are not loaded for /bin/ansible. Enable this if you
218 | # want, for example, a notification or logging callback to also apply to
219 | # /bin/ansible runs
220 | #
221 | #bin_ansible_callbacks = False
222 |
223 |
224 | # Don't like cows? that's unfortunate.
225 | # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
226 | #nocows = 1
227 |
228 | # Set which cowsay stencil you'd like to use by default. When set to 'random',
229 | # a random stencil will be selected for each task. The selection will be filtered
230 | # against the `cow_whitelist` option below.
231 | #
232 | #cow_selection = default
233 | #cow_selection = random
234 |
235 | # When using the 'random' option for cowsay, stencils will be restricted to this list.
236 | # it should be formatted as a comma-separated list with no spaces between names.
237 | # NOTE: line continuations here are for formatting purposes only, as the INI parser
238 | # in python does not support them.
239 | #
240 | #cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
241 | # hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
242 | # stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
243 |
244 | # Don't like colors either?
245 | # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
246 | #
247 | #nocolor = 1
248 |
249 | # If set to a persistent type (not 'memory', for example 'redis') fact values
250 | # from previous runs in Ansible will be stored. This may be useful when
251 | # wanting to use, for example, IP information from one group of servers
252 | # without having to talk to them in the same playbook run to get their
253 | # current IP information.
254 | #
255 | #fact_caching = memory
256 |
257 | # This option tells Ansible where to cache facts. The value is plugin dependent.
258 | # For the jsonfile plugin, it should be a path to a local directory.
259 | # For the redis plugin, the value is a host:port:database triplet: fact_caching_connection = localhost:6379:0
260 | #
261 | #fact_caching_connection=/tmp
262 |
263 | # retry files
264 | # When a playbook fails a .retry file can be created that will be placed in ~/
265 | # You can enable this feature by setting retry_files_enabled to True
266 | # and you can change the location of the files by setting retry_files_save_path
267 | #
268 | #retry_files_enabled = False
269 | #retry_files_save_path = ~/.ansible-retry
270 |
271 | # prevents logging of task data, off by default
272 | #no_log = False
273 |
274 | # prevents logging of tasks, but only on the targets, data is still logged on the master/controller
275 | #no_target_syslog = False
276 |
277 | # Controls whether Ansible will raise an error or warning if a task has no
278 | # choice but to create world readable temporary files to execute a module on
279 | # the remote machine. This option is False by default for security. Users may
280 | # turn this on to have behaviour more like Ansible prior to 2.1.x. See
281 | # https://docs.ansible.com/ansible/latest/user_guide/become.html#becoming-an-unprivileged-user
282 | # for more secure ways to fix this than enabling this option.
283 | #
284 | #allow_world_readable_tmpfiles = False
285 |
286 | # Controls what compression method is used for new-style ansible modules when
287 | # they are sent to the remote system. The compression types depend on having
288 | # support compiled into both the controller's python and the client's python.
289 | # The names should match with the python Zipfile compression types:
290 | # * ZIP_STORED (no compression. available everywhere)
291 | # * ZIP_DEFLATED (uses zlib, the default)
292 | # These values may be set per host via the ansible_module_compression inventory variable.
293 | #
294 | #module_compression = 'ZIP_DEFLATED'
295 |
296 | # This controls the cutoff point (in bytes) on --diff for files
297 | # set to 0 for unlimited (RAM may suffer!).
298 | #
299 | #max_diff_size = 104448
300 |
301 | # Controls showing custom stats at the end, off by default
302 | #show_custom_stats = False
303 |
304 | # Controls which files to ignore when using a directory as inventory with
305 | # possibly multiple sources (both static and dynamic)
306 | #
307 | #inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
308 |
309 | # This family of modules use an alternative execution path optimized for network appliances
310 | # only update this setting if you know how this works, otherwise it can break module execution
311 | #
312 | #network_group_modules=eos, nxos, ios, iosxr, junos, vyos
313 |
314 | # When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
315 | # a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
316 | # jinja2 templating language which will be run through the templating engine.
317 | # ENABLING THIS COULD BE A SECURITY RISK
318 | #
319 | #allow_unsafe_lookups = False
320 |
321 | # set default errors for all plays
322 | #any_errors_fatal = False
323 |
324 |
325 | [inventory]
326 | # List of enabled inventory plugins and the order in which they are used.
327 | #enable_plugins = host_list, script, auto, yaml, ini, toml
328 |
329 | # Ignore these extensions when parsing a directory as inventory source
330 | #ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
331 |
332 | # ignore files matching these patterns when parsing a directory as inventory source
333 | #ignore_patterns=
334 |
335 | # If 'True' unparsed inventory sources become fatal errors, otherwise they are warnings.
336 | #unparsed_is_failed = False
337 |
338 |
339 | [privilege_escalation]
340 | #become = False
341 | #become_method = sudo
342 | #become_ask_pass = False
343 |
344 |
345 | ## Connection Plugins ##
346 |
347 | # Settings for each connection plugin go under a section titled '[[plugin_name]_connection]'
348 | # To view available connection plugins, run ansible-doc -t connection -l
349 | # To view available options for a connection plugin, run ansible-doc -t connection [plugin_name]
350 | # https://docs.ansible.com/ansible/latest/plugins/connection.html
351 |
352 | [paramiko_connection]
353 | # uncomment this line to cause the paramiko connection plugin to not record new host
354 | # keys encountered. Increases performance on new host additions. Setting works independently of the
355 | # host key checking setting above.
356 | #record_host_keys=False
357 |
358 | # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
359 | # line to disable this behaviour.
360 | #pty = False
361 |
362 | # paramiko will default to looking for SSH keys initially when trying to
363 | # authenticate to remote devices. This is a problem for some network devices
364 | # that close the connection after a key failure. Uncomment this line to
365 | # disable the Paramiko look for keys function
366 | #look_for_keys = False
367 |
368 | # When using persistent connections with Paramiko, the connection runs in a
369 | # background process. If the host doesn't already have a valid SSH key, by
370 | # default Ansible will prompt to add the host key. This will cause connections
371 | # running in background processes to fail. Uncomment this line to have
372 | # Paramiko automatically add host keys.
373 | #host_key_auto_add = True
374 |
375 |
376 | [ssh_connection]
377 | # ssh arguments to use
378 | # Leaving off ControlPersist will result in poor performance, so use
379 | # paramiko on older platforms rather than removing it, -C controls compression use
380 | #ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
381 |
382 | # The base directory for the ControlPath sockets.
383 | # This is the "%(directory)s" in the control_path option
384 | #
385 | # Example:
386 | # control_path_dir = /tmp/.ansible/cp
387 | #control_path_dir = ~/.ansible/cp
388 |
389 | # The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
390 | # port and username (empty string in the config). The hash mitigates a common problem users
391 | # found with long hostnames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
392 | # In those cases, a "too long for Unix domain socket" ssh error would occur.
393 | #
394 | # Example:
395 | # control_path = %(directory)s/%%C
396 | #control_path =
397 |
398 | # Enabling pipelining reduces the number of SSH operations required to
399 | # execute a module on the remote server. This can result in a significant
400 | # performance improvement when enabled, however when using "sudo:" you must
401 | # first disable 'requiretty' in /etc/sudoers
402 | #
403 | # By default, this option is disabled to preserve compatibility with
404 | # sudoers configurations that have requiretty (the default on many distros).
405 | #
406 | #pipelining = False
407 |
408 | # Control the mechanism for transferring files (old)
409 | # * smart = try sftp and then try scp [default]
410 | # * True = use scp only
411 | # * False = use sftp only
412 | #scp_if_ssh = smart
413 |
414 | # Control the mechanism for transferring files (new)
415 | # If set, this will override the scp_if_ssh option
416 | # * sftp = use sftp to transfer files
417 | # * scp = use scp to transfer files
418 | # * piped = use 'dd' over SSH to transfer files
419 | # * smart = try sftp, scp, and piped, in that order [default]
420 | #transfer_method = smart
421 |
422 | # If False, sftp will not use batch mode to transfer files. This may cause some
423 | # types of file transfer failures impossible to catch however, and should
424 | # only be disabled if your sftp version has problems with batch mode
425 | #sftp_batch_mode = False
426 |
427 | # The -tt argument is passed to ssh when pipelining is not enabled because sudo
428 | # requires a tty by default.
429 | #usetty = True
430 |
431 | # Number of times to retry an SSH connection to a host, in case of UNREACHABLE.
432 | # For each retry attempt, there is an exponential backoff,
433 | # so after the first attempt there is 1s wait, then 2s, 4s etc. up to 30s (max).
434 | #retries = 3
435 |
436 |
437 | [persistent_connection]
438 | # Configures the persistent connection timeout value in seconds. This value is
439 | # how long the persistent connection will remain idle before it is destroyed.
440 | # If the connection doesn't receive a request before the timeout value
441 | # expires, the connection is shutdown. The default value is 30 seconds.
442 | #connect_timeout = 30
443 |
444 | # The command timeout value defines the amount of time to wait for a command
445 | # or RPC call before timing out. The value for the command timeout must
446 | # be less than the value of the persistent connection idle timeout (connect_timeout)
447 | # The default value is 30 second.
448 | #command_timeout = 30
449 |
450 |
451 | ## Become Plugins ##
452 |
453 | # Settings for become plugins go under a section named '[[plugin_name]_become_plugin]'
454 | # To view available become plugins, run ansible-doc -t become -l
455 | # To view available options for a specific plugin, run ansible-doc -t become [plugin_name]
456 | # https://docs.ansible.com/ansible/latest/plugins/become.html
457 |
458 | [sudo_become_plugin]
459 | #flags = -H -S -n
460 | #user = root
461 |
462 |
463 | [selinux]
464 | # file systems that require special treatment when dealing with security context
465 | # the default behaviour that copies the existing context or uses the user default
466 | # needs to be changed to use the file system dependent context.
467 | #special_context_filesystems=fuse,nfs,vboxsf,ramfs,9p,vfat
468 |
469 | # Set this to True to allow libvirt_lxc connections to work without SELinux.
470 | #libvirt_lxc_noseclabel = False
471 |
472 |
473 | [colors]
474 | #highlight = white
475 | #verbose = blue
476 | #warn = bright purple
477 | #error = red
478 | #debug = dark gray
479 | #deprecate = purple
480 | #skip = cyan
481 | #unreachable = red
482 | #ok = green
483 | #changed = yellow
484 | #diff_add = green
485 | #diff_remove = red
486 | #diff_lines = cyan
487 |
488 |
489 | [diff]
490 | # Always print diff when running ( same as always running with -D/--diff )
491 | #always = False
492 |
493 | # Set how many context lines to show in diff
494 | #context = 3
495 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/ansible_templates/cred-privkey.j2:
--------------------------------------------------------------------------------
1 |
2 | GLOBAL
3 | JenkinsCreds-{{ ipv4 }}
4 | Credentials for Workers
5 | ec2-user
6 |
7 |
8 | {{ priv_key }}
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/ansible_templates/install_jenkins.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: jenkins-master
3 | become: yes
4 | become_user: root
5 | tasks:
6 | - name: install dependencies
7 | yum:
8 | name: "{{ package }} "
9 | state: present
10 | vars:
11 | package:
12 | - wget
13 | - java-1.8.0-openjdk-devel
14 | - git
15 |
16 | - block:
17 | - name: clone repo
18 | git:
19 | #repo: https://github.com/linuxacademy/content-cje-pipelines.git
20 | repo: https://github.com/moosakhalid/jenkins-la-course.git
21 | dest: /tmp/jenkins
22 |
23 | - name: Setup Jenkins Repo
24 | get_url:
25 | url: https://pkg.jenkins.io/redhat/jenkins.repo
26 | dest: /etc/yum.repos.d/jenkins.repo
27 |
28 | - name: Import Jenkins Repo GPG key
29 | rpm_key:
30 | state: present
31 | key: https://pkg.jenkins.io/redhat/jenkins.io.key
32 |
33 | - name: install Jenkins
34 | yum:
35 | name: jenkins
36 | state: present
37 |
38 | - name: Temporarily Stop Jenkins
39 | service:
40 | name: jenkins
41 | state: stopped
42 |
43 | - name: Modify user shell
44 | user:
45 | name: jenkins
46 | shell: /bin/bash
47 |
48 |
49 | - name: Delete Jenkins default dir
50 | file:
51 | state: absent
52 | path: /var/lib/jenkins
53 |
54 | - name: Copy over cloned Jenkins /var/lib/jenkins
55 | copy:
56 | remote_src: yes
57 | src: /tmp/jenkins/jenkins
58 | dest: /var/lib
59 |
60 |
61 | - name: Restore jenkins user ownership on /var/lib/jenkins
62 | file:
63 | path: /var/lib/jenkins
64 | state: directory
65 | recurse: yes
66 | owner: jenkins
67 |
68 | - name: Start Jenkins
69 | service:
70 | name: jenkins
71 | state: started
72 | enabled: yes
73 |
74 | - name: Wait until Jenkins is up
75 | shell: result_first=1; while [[ $result_first != 0 ]]; do if [[ `grep 'Jenkins is fully up and running' /var/log/jenkins/jenkins.log` ]];then result_first=0;else sleep 4;fi;done
76 | register: result
77 | until: result.rc == 0
78 |
79 | rescue:
80 | - name: Delete Inventory file
81 | file:
82 | path: ansible_templates/inventory
83 | state: absent
84 | delegate_to: localhost
85 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/ansible_templates/install_worker.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: worker
3 | become: yes
4 | tasks:
5 | - block:
6 | - name: Generate SSH Keypair
7 | openssh_keypair:
8 | path: /home/ec2-user/.ssh/id_rsa
9 | type: rsa
10 | size: 2048
11 | group: ec2-user
12 | owner: ec2-user
13 |
14 | - name: Add own key to authorized_keys file
15 | shell: "cat /home/ec2-user/.ssh/id_rsa.pub >> /home/ec2-user/.ssh/authorized_keys && chmod 600 /home/ec2-user/.ssh/authorized_keys"
16 |
17 | - name: Copy over Jenkins Worker Node creation payload xml
18 | vars:
19 | ipv4: "{{ ansible_default_ipv4.address }}"
20 | template:
21 | src: node.j2
22 | dest: /home/ec2-user/node.xml
23 | owner: ec2-user
24 | mode: '0644'
25 | - name: Read generated private key id_rsa
26 | slurp:
27 | src: /home/ec2-user/.ssh/id_rsa
28 | register: pkey
29 | - debug:
30 | msg: "{{ pkey['content'] | b64decode }}"
31 | - name: Copy over creds.xml and create Jenkins credential
32 | vars:
33 | priv_key: "{{ pkey['content'] | b64decode }}"
34 | ipv4: "{{ ansible_default_ipv4.address }}"
35 | template:
36 | src: cred-privkey.j2
37 | dest: /home/ec2-user/creds.xml
38 |
39 | - name: install dependencies
40 | yum:
41 | name: "{{ package }} "
42 | vars:
43 | package:
44 | - wget
45 | - java-1.8.0-openjdk-devel
46 | - git
47 |
48 |
49 | - name: Get Jenkins CLI jar file
50 | vars:
51 | master: "{{ master_ip }}"
52 | get_url:
53 | url: "http://{{ master }}:8080/jnlpJars/jenkins-cli.jar"
54 | dest: /home/ec2-user/jenkins-cli.jar
55 | force: yes
56 | register: download
57 | ignore_errors: yes
58 | until: '"{{ download.status_code }}" == "200"'
59 | retries: 5
60 | delay: 5
61 |
62 | - name: Place Jenkins Auth file
63 | copy:
64 | src: jenkins_auth
65 | dest: /home/ec2-user/
66 | owner: ec2-user
67 | mode: '0600'
68 |
69 | - name: Create Jenkins Credential
70 | vars:
71 | master: "{{ master_ip }}"
72 | shell: "cat /home/ec2-user/creds.xml | java -jar /home/ec2-user/jenkins-cli.jar -auth @/home/ec2-user/jenkins_auth -s http://{{master}}:8080 create-credentials-by-xml system::system::jenkins _"
73 |
74 | - name: Register node as worker
75 | vars:
76 | master: "{{ master_ip }}"
77 | shell: 'cat /home/ec2-user/node.xml | java -jar /home/ec2-user/jenkins-cli.jar -auth @/home/ec2-user/jenkins_auth -s http://{{ master }}:8080 create-node'
78 |
79 | rescue:
80 | - name:
81 | file:
82 | path: ansible_templates/inventory_worker
83 | state: absent
84 | delegate_to: localhost
85 |
86 |
87 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/ansible_templates/jenkins_auth:
--------------------------------------------------------------------------------
1 | admin:password
2 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/ansible_templates/node.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | {{ ipv4 }}
4 |
5 | /home/ec2-user
6 | 1
7 | EXCLUSIVE
8 |
9 |
10 | {{ ipv4 }}
11 | 22
12 | JenkinsCreds-{{ ipv4 }}
13 | 60
14 | 10
15 | 15
16 |
17 | true
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/aws_get_cp_hostedzone:
--------------------------------------------------------------------------------
1 | aws route53 list-hosted-zones | jq -r .HostedZones[].Name | egrep "cmcloud*"
2 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/dns.tf:
--------------------------------------------------------------------------------
1 | #DNS Configuration
2 | #Get already , publicly configured Hosted Zone on Route53 - MUST EXIST, check variables.tf for dns-name
3 | data "aws_route53_zone" "dns" {
4 | provider = aws.region-master
5 | name = var.dns-name
6 | }
7 |
8 | #Create record in hosted zone for ACM Certificate Domain verification
9 | resource "aws_route53_record" "cert_validation" {
10 | provider = aws.region-master
11 | name = aws_acm_certificate.jenkins-lb-https.domain_validation_options.0.resource_record_name
12 | type = aws_acm_certificate.jenkins-lb-https.domain_validation_options.0.resource_record_type
13 | zone_id = data.aws_route53_zone.dns.zone_id
14 | records = [aws_acm_certificate.jenkins-lb-https.domain_validation_options.0.resource_record_value]
15 | ttl = 60
16 | }
17 |
18 | #Create Alias record towards ALB from Route53
19 | resource "aws_route53_record" "jenkins" {
20 | provider = aws.region-master
21 | zone_id = data.aws_route53_zone.dns.zone_id
22 | name = join(".", ["jenkins", data.aws_route53_zone.dns.name])
23 | type = "A"
24 | alias {
25 | name = aws_lb.application-lb.dns_name
26 | zone_id = aws_lb.application-lb.zone_id
27 | evaluate_target_health = true
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/instances.tf:
--------------------------------------------------------------------------------
1 | #Set S3 backend for persisting TF state file remotely, ensure bucket already exits
2 | # And that AWS user being used by TF has read/write perms
3 | terraform {
4 | backend "s3" {
5 | region = "us-east-1"
6 | profile = "default"
7 | key = "terraform-state-file/mystatefile.tfstate"
8 | bucket = "> ansible_templates/inventory
54 | ${self.public_ip} ansible_user=ec2-user
55 | EOF
56 | EOD
57 | }
58 |
59 | provisioner "local-exec" {
60 | command = "aws --profile ${var.profile} ec2 wait instance-status-ok --region ${var.region-master} --instance-ids ${self.id} && ansible-playbook -i ansible_templates/inventory ansible_templates/install_jenkins.yaml"
61 | }
62 | tags = {
63 | Name = "jenkins-master-tf"
64 | }
65 | }
66 |
67 | #Create EC2 in us-west-2
68 | resource "aws_instance" "jenkins-worker-oregon" {
69 | provider = aws.region-worker
70 | count = var.workers-count
71 | ami = data.aws_ssm_parameter.linuxAmiOregon.value
72 | instance_type = var.instance-type
73 | key_name = aws_key_pair.worker-key.key_name
74 | associate_public_ip_address = true
75 | vpc_security_group_ids = [aws_security_group.jenkins-sg-oregon.id]
76 | subnet_id = aws_subnet.subnet_1_oregon.id
77 | provisioner "local-exec" {
78 | command = <> ansible_templates/inventory_worker
80 | ${self.public_ip} ansible_user=ec2-user
81 | EOF
82 | EOD
83 | }
84 | provisioner "local-exec" {
85 | when = destroy
86 | command = "sed -i '/${self.public_ip}/d' ansible_templates/inventory_worker &> /dev/null || echo"
87 | }
88 | provisioner "local-exec" {
89 | command = "aws --profile ${var.profile} ec2 wait instance-status-ok --region ${var.region-worker} --instance-ids ${self.id} && ansible-playbook --extra-vars 'master_ip=${aws_instance.jenkins-master.private_ip} worker_priv_ip=${self.private_ip}' -i ansible_templates/inventory_worker -l ${self.public_ip} ansible_templates/install_worker.yaml"
90 | }
91 | tags = {
92 | Name = join("-", ["jenkins-worker-tf", count.index + 1])
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/network_setup.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | profile = var.profile
3 | region = var.region-master
4 | alias = "region-master"
5 | }
6 |
7 | provider "aws" {
8 | profile = var.profile
9 | region = var.region-worker
10 | alias = "region-worker"
11 | }
12 |
13 | #Create VPC in us-east-1
14 | resource "aws_vpc" "vpc_master" {
15 | provider = aws.region-master
16 | cidr_block = "10.0.0.0/16"
17 | enable_dns_support = true
18 | enable_dns_hostnames = true
19 | tags = {
20 | Name = "master-vpc-jenkins"
21 | }
22 |
23 | }
24 |
25 | #Create VPC in us-west-2
26 | resource "aws_vpc" "vpc_master_oregon" {
27 | provider = aws.region-worker
28 | cidr_block = "192.168.0.0/16"
29 | enable_dns_support = true
30 | enable_dns_hostnames = true
31 | tags = {
32 | Name = "worker-vpc-jenkins"
33 | }
34 |
35 | }
36 |
37 | #Initiate Peering connection request from us-east-1
38 | resource "aws_vpc_peering_connection" "uswest1-uswest2" {
39 | provider = aws.region-master
40 | peer_vpc_id = aws_vpc.vpc_master_oregon.id
41 | vpc_id = aws_vpc.vpc_master.id
42 | #auto_accept = true
43 | peer_region = var.region-worker
44 |
45 | }
46 |
47 | #Create IGW in us-east-1
48 | resource "aws_internet_gateway" "igw" {
49 | provider = aws.region-master
50 | vpc_id = aws_vpc.vpc_master.id
51 | }
52 |
53 | #Create IGW in us-west-2
54 | resource "aws_internet_gateway" "igw-oregon" {
55 | provider = aws.region-worker
56 | vpc_id = aws_vpc.vpc_master_oregon.id
57 | }
58 |
59 | #Accept VPC peering request in us-west-2 from us-east-1
60 | resource "aws_vpc_peering_connection_accepter" "accept_peering" {
61 | provider = aws.region-worker
62 | vpc_peering_connection_id = aws_vpc_peering_connection.uswest1-uswest2.id
63 | auto_accept = true
64 | }
65 |
66 | #Create route table in us-east-1
67 | resource "aws_route_table" "internet_route" {
68 | provider = aws.region-master
69 | vpc_id = aws_vpc.vpc_master.id
70 | route {
71 | cidr_block = "0.0.0.0/0"
72 | gateway_id = aws_internet_gateway.igw.id
73 | }
74 | }
75 |
76 | #Get all available AZ's in VPC for master region
77 | data "aws_availability_zones" "azs" {
78 | provider = aws.region-master
79 | state = "available"
80 | }
81 |
82 | #Create subnet # 1 in us-east-1
83 | resource "aws_subnet" "subnet_1" {
84 | provider = aws.region-master
85 | availability_zone = element(data.aws_availability_zones.azs.names, 0)
86 | vpc_id = aws_vpc.vpc_master.id
87 | cidr_block = "10.0.1.0/24"
88 | }
89 |
90 | #Create subnet #2 in us-east-1
91 | resource "aws_subnet" "subnet_2" {
92 | provider = aws.region-master
93 | vpc_id = aws_vpc.vpc_master.id
94 | availability_zone = element(data.aws_availability_zones.azs.names, 1)
95 | cidr_block = "10.0.2.0/24"
96 | }
97 |
98 |
99 | #Create subnet in us-west-2
100 | resource "aws_subnet" "subnet_1_oregon" {
101 | provider = aws.region-worker
102 | vpc_id = aws_vpc.vpc_master_oregon.id
103 | cidr_block = "192.168.1.0/24"
104 | }
105 |
106 | #Create route table in us-west-2
107 | resource "aws_route_table" "internet_route_oregon" {
108 | provider = aws.region-worker
109 | vpc_id = aws_vpc.vpc_master_oregon.id
110 | route {
111 | cidr_block = "0.0.0.0/0"
112 | gateway_id = aws_internet_gateway.igw-oregon.id
113 | }
114 | }
115 |
116 | #Create association between route table and subnet_1 in us-east-1
117 | resource "aws_route_table_association" "internet_association" {
118 | provider = aws.region-master
119 | subnet_id = aws_subnet.subnet_1.id
120 | route_table_id = aws_route_table.internet_route.id
121 | }
122 |
123 | #Create association between route table and subnet_1_oregon in us-west-2
124 | resource "aws_route_table_association" "internet_association_oregon" {
125 | provider = aws.region-worker
126 | subnet_id = aws_subnet.subnet_1_oregon.id
127 | route_table_id = aws_route_table.internet_route_oregon.id
128 | }
129 |
130 |
131 | #Create SG for allowing TCP/8080 from * and TCP/22 from your IP in us-east-1
132 | resource "aws_security_group" "jenkins-sg" {
133 | provider = aws.region-master
134 | name = "jenkins-sg"
135 | description = "Allow TCP/8080 & TCP/22"
136 | vpc_id = aws_vpc.vpc_master.id
137 | ingress {
138 | description = "Allow 22 from our public IP"
139 | from_port = 22
140 | to_port = 22
141 | protocol = "tcp"
142 | cidr_blocks = [var.external_ip]
143 | }
144 | ingress {
145 | description = "allow anyone on port 8080"
146 | from_port = 8080
147 | to_port = 8080
148 | protocol = "tcp"
149 | cidr_blocks = ["0.0.0.0/0"]
150 | }
151 | ingress {
152 | description = "allow traffic from us-west-2"
153 | from_port = 0
154 | to_port = 0
155 | protocol = "-1"
156 | cidr_blocks = ["192.168.1.0/24"]
157 | }
158 | egress {
159 | from_port = 0
160 | to_port = 0
161 | protocol = "-1"
162 | cidr_blocks = ["0.0.0.0/0"]
163 | }
164 | }
165 |
166 | #Create SG for LB, only TCP/80,TCP/443 and access to jenkins-sg
167 | resource "aws_security_group" "lb-sg" {
168 | provider = aws.region-master
169 | name = "lb-sg"
170 | description = "Allow 443 and traffic to Jenkins SG"
171 | vpc_id = aws_vpc.vpc_master.id
172 | ingress {
173 | description = "Allow 443 from anywhere"
174 | from_port = 443
175 | to_port = 443
176 | protocol = "tcp"
177 | cidr_blocks = ["0.0.0.0/0"]
178 | }
179 | ingress {
180 | description = "Allow 80 from anywhere for redirection"
181 | from_port = 80
182 | to_port = 80
183 | protocol = "tcp"
184 | cidr_blocks = ["0.0.0.0/0"]
185 | }
186 | ingress {
187 | description = "Allow traffic to jenkins-sg"
188 | from_port = 0
189 | to_port = 0
190 | protocol = "tcp"
191 | security_groups = [aws_security_group.jenkins-sg.id]
192 | }
193 | egress {
194 | from_port = 0
195 | to_port = 0
196 | protocol = "-1"
197 | cidr_blocks = ["0.0.0.0/0"]
198 | }
199 | }
200 |
201 | #Create SG for allowing TCP/22 from your IP in us-west-2
202 | resource "aws_security_group" "jenkins-sg-oregon" {
203 | provider = aws.region-worker
204 |
205 | name = "jenkins-sg-oregon"
206 | description = "Allow TCP/8080 & TCP/22"
207 | vpc_id = aws_vpc.vpc_master_oregon.id
208 | ingress {
209 | description = "Allow 22 from our public IP"
210 | from_port = 22
211 | to_port = 22
212 | protocol = "tcp"
213 | cidr_blocks = [var.external_ip]
214 | }
215 | ingress {
216 | description = "Allow traffic from us-east-1"
217 | from_port = 0
218 | to_port = 0
219 | protocol = "-1"
220 | cidr_blocks = ["10.0.1.0/24"]
221 | }
222 | egress {
223 | from_port = 0
224 | to_port = 0
225 | protocol = "-1"
226 | cidr_blocks = ["0.0.0.0/0"]
227 | }
228 | }
229 |
230 | #Create route in us-east-1 subnet for comms with peered VPC
231 | resource "aws_route" "to_us-west-2-peered" {
232 | provider = aws.region-master
233 | route_table_id = aws_route_table.internet_route.id
234 | destination_cidr_block = "192.168.1.0/24"
235 | vpc_peering_connection_id = aws_vpc_peering_connection.uswest1-uswest2.id
236 |
237 | }
238 | #Create route in us-west-2 subnet for comms with peered VPC
239 | resource "aws_route" "to_us-east-1-peered" {
240 | provider = aws.region-worker
241 | route_table_id = aws_route_table.internet_route_oregon.id
242 | destination_cidr_block = "10.0.1.0/24"
243 | vpc_peering_connection_id = aws_vpc_peering_connection.uswest1-uswest2.id
244 |
245 | }
246 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/null_provisioners.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "inventory_master" {
2 | triggers = {
3 | my_value1 = join(",", [aws_key_pair.master-key.key_name])
4 | }
5 | provisioner "local-exec" {
6 | command = < ansible_templates/inventory
8 | localhost ansible_connection=local
9 | [jenkins-master]
10 | EOF
11 | EOD
12 | }
13 | }
14 |
15 | resource "null_resource" "inventory_worker" {
16 | triggers = {
17 | my_value2 = join(",", [aws_key_pair.worker-key.key_name])
18 | }
19 | provisioner "local-exec" {
20 | command = < ansible_templates/inventory_worker
22 | localhost ansible_connection=local
23 | [worker]
24 | EOF
25 | EOD
26 | }
27 | }
28 |
29 | resource "null_resource" "del_inventory_master" {
30 | triggers = {
31 | my_value1 = join(",", [aws_key_pair.master-key.key_name])
32 | }
33 | provisioner "local-exec" {
34 | when = destroy
35 | command = "rm -f ansible_templates/inventory"
36 | }
37 | }
38 |
39 | resource "null_resource" "del_inventory_worker" {
40 | triggers = {
41 | my_value2 = join(",", [aws_key_pair.worker-key.key_name])
42 | }
43 | provisioner "local-exec" {
44 | when = destroy
45 | command = "rm -f ansible_templates/inventory_worker"
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/outputs.tf:
--------------------------------------------------------------------------------
1 | output "amiId-us-east-1" {
2 | value = data.aws_ssm_parameter.linuxAmi.value
3 | }
4 |
5 | output "amiId-us-west-2" {
6 | value = data.aws_ssm_parameter.linuxAmiOregon.value
7 | }
8 | output "publicIp-us-east-1" {
9 | value = aws_instance.jenkins-master.public_ip
10 | }
11 | output "privateIp-us-east-1" {
12 | value = aws_instance.jenkins-master.private_ip
13 | }
14 | output "publicIps-us-west-2" {
15 | value = {
16 | for instance in aws_instance.jenkins-worker-oregon :
17 | instance.id => instance.public_ip
18 | }
19 | }
20 | output "privateIps-us-west-2" {
21 | value = {
22 | for instance in aws_instance.jenkins-worker-oregon :
23 | instance.id => instance.private_ip
24 | }
25 | }
26 |
27 | output "url" {
28 | value = aws_route53_record.jenkins.fqdn
29 | }
30 |
--------------------------------------------------------------------------------
/aws_la_cloudplayground_version/variables.tf:
--------------------------------------------------------------------------------
1 | variable "external_ip" {
2 | type = string
3 | default = "0.0.0.0/0"
4 | }
5 |
6 | variable "instance-type" {
7 | type = string
8 | default = "t3.micro"
9 | # validation {
10 | # condition = can(regex("[^t2]", var.instance-type))
11 | # error_message = "Instance type cannot be anything other than t2 or t3 type and also not t3a.micro."
12 | # }
13 | }
14 |
15 | variable "dns-name" {
16 | type = string
17 | default = " instance.public_ip
57 | }
58 | }
59 |
60 |
61 |
--------------------------------------------------------------------------------
/course_lesson_resources/Lesson_ConfigMgmt_Provisioners/ansible_modification.cfg:
--------------------------------------------------------------------------------
1 | #Add these lines under the [default] section in ansible.cfg
2 | #This configuration assumes that the EC2 Dynamic Inventory Config file is at the path below
3 |
4 | inventory = ./ansible_templates/inventory_aws/tf_aws_ec2.yml
5 | enable_plugins = aws_ec2
6 | interpreter_python = auto_silent
7 |
--------------------------------------------------------------------------------
/course_lesson_resources/Lesson_ConfigMgmt_Provisioners/jenkins-master-sample.yml:
--------------------------------------------------------------------------------
1 | #Ansible Jenkins Master, sample playbook - jenkins-master-sample.yml
2 | ---
3 | - hosts: "{{ passed_in_hosts }}"
4 | become: yes
5 | remote_user: ec2-user
6 | become_user: root
7 | tasks:
8 | - name: install Git client
9 | yum:
10 | name: git
11 | state: present
12 |
--------------------------------------------------------------------------------
/course_lesson_resources/Lesson_ConfigMgmt_Provisioners/jenkins-worker-sample.yml:
--------------------------------------------------------------------------------
1 | #Ansible Jenkins Worker, sample playbook - jenkins-worker-sample.yml
2 | ---
3 | - hosts: "{{ passed_in_hosts }}"
4 | become: yes
5 | remote_user: ec2-user
6 | become_user: root
7 | tasks:
8 | - name: install jq, JSON parser
9 | yum:
10 | name: jq
11 | state: present
12 |
--------------------------------------------------------------------------------
/course_lesson_resources/Lesson_ConfigMgmt_Provisioners/master_provisioner.tf:
--------------------------------------------------------------------------------
1 | #The code below is ONLY the provisioner block which needs to be
2 | #inserted inside the resource block for Jenkins EC2 master Terraform
3 | #Jenkins Master Provisioner:
4 |
5 | provisioner "local-exec" {
6 | command = < {
13 | name = val.resource_record_name
14 | record = val.resource_record_value
15 | type = val.resource_record_type
16 | }
17 | }
18 | name = each.value.name
19 | records = [each.value.record]
20 | ttl = 60
21 | type = each.value.type
22 | zone_id = data.aws_route53_zone.dns.zone_id
23 | }
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/course_lesson_resources/Lesson_PuttingBehindDNS_ALB_HTTPS/dns_old.tf:
--------------------------------------------------------------------------------
1 | #DNS Configuration
2 |
3 | variable "dns-name" {
4 | type = string
5 | default = "" #example cmcloudlab1234.info.
6 | }
7 |
8 | #Get already , publicly configured Hosted Zone on Route53 - MUST EXIST
9 | data "aws_route53_zone" "dns" {
10 | provider = aws.region-master
11 | name = var.dns-name
12 | }
13 |
14 |
15 | #dns.tf
16 | #Create Alias record towards ALB from Route53
17 | resource "aws_route53_record" "jenkins" {
18 | provider = aws.region-master
19 | zone_id = data.aws_route53_zone.dns.zone_id
20 | name = join(".", ["jenkins", data.aws_route53_zone.dns.name])
21 | type = "A"
22 | alias {
23 | name = aws_lb.application-lb.dns_name
24 | zone_id = aws_lb.application-lb.zone_id
25 | evaluate_target_health = true
26 | }
27 | }
28 |
29 | #Add to outputs.tf for better segregation
30 |
31 | output "url" {
32 | value = aws_route53_record.jenkins.fqdn
33 | }
--------------------------------------------------------------------------------
/iam_policies/terraform_deployment_iam_policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "CustomPolicyForACGAWSTFCourse",
6 | "Action": [
7 | "ec2:Describe*",
8 | "ec2:Get*",
9 | "ec2:AcceptVpcPeeringConnection",
10 | "ec2:AttachInternetGateway",
11 | "ec2:AssociateRouteTable",
12 | "ec2:AuthorizeSecurityGroupEgress",
13 | "ec2:AuthorizeSecurityGroupIngress",
14 | "ec2:CreateInternetGateway",
15 | "ec2:CreateNetworkAcl",
16 | "ec2:CreateNetworkAclEntry",
17 | "ec2:CreateRoute",
18 | "ec2:CreateRouteTable",
19 | "ec2:CreateSecurityGroup",
20 | "ec2:CreateSubnet",
21 | "ec2:CreateTags",
22 | "ec2:CreateVpc",
23 | "ec2:CreateVpcPeeringConnection",
24 | "ec2:DeleteNetworkAcl",
25 | "ec2:DeleteNetworkAclEntry",
26 | "ec2:DeleteRoute",
27 | "ec2:DeleteRouteTable",
28 | "ec2:DeleteSecurityGroup",
29 | "ec2:DeleteSubnet",
30 | "ec2:DeleteTags",
31 | "ec2:DeleteVpc",
32 | "ec2:DeleteVpcPeeringConnection",
33 | "ec2:DetachInternetGateway",
34 | "ec2:DisassociateRouteTable",
35 | "ec2:DisassociateSubnetCidrBlock",
36 | "ec2:CreateKeyPair",
37 | "ec2:DeleteKeyPair",
38 | "ec2:DeleteInternetGateway",
39 | "ec2:ImportKeyPair",
40 | "ec2:ModifySubnetAttribute",
41 | "ec2:ModifyVpcAttribute",
42 | "ec2:ModifyVpcPeeringConnectionOptions",
43 | "ec2:RejectVpcPeeringConnection",
44 | "ec2:ReplaceNetworkAclAssociation",
45 | "ec2:ReplaceNetworkAclEntry",
46 | "ec2:ReplaceRoute",
47 | "ec2:ReplaceRouteTableAssociation",
48 | "ec2:RevokeSecurityGroupEgress",
49 | "ec2:RevokeSecurityGroupIngress",
50 | "ec2:RunInstances",
51 | "ec2:TerminateInstances",
52 | "ec2:UpdateSecurityGroupRuleDescriptionsEgress",
53 | "ec2:UpdateSecurityGroupRuleDescriptionsIngress",
54 | "acm:*",
55 | "elasticloadbalancing:AddListenerCertificates",
56 | "elasticloadbalancing:AddTags",
57 | "elasticloadbalancing:CreateListener",
58 | "elasticloadbalancing:CreateLoadBalancer",
59 | "elasticloadbalancing:CreateRule",
60 | "elasticloadbalancing:CreateTargetGroup",
61 | "elasticloadbalancing:DeleteListener",
62 | "elasticloadbalancing:DeleteLoadBalancer",
63 | "elasticloadbalancing:DeleteRule",
64 | "elasticloadbalancing:DeleteTargetGroup",
65 | "elasticloadbalancing:DeregisterTargets",
66 | "elasticloadbalancing:DescribeListenerCertificates",
67 | "elasticloadbalancing:DescribeListeners",
68 | "elasticloadbalancing:DescribeLoadBalancerAttributes",
69 | "elasticloadbalancing:DescribeLoadBalancers",
70 | "elasticloadbalancing:DescribeRules",
71 | "elasticloadbalancing:DescribeSSLPolicies",
72 | "elasticloadbalancing:DescribeTags",
73 | "elasticloadbalancing:DescribeTargetGroupAttributes",
74 | "elasticloadbalancing:DescribeTargetGroups",
75 | "elasticloadbalancing:DescribeTargetHealth",
76 | "elasticloadbalancing:ModifyListener",
77 | "elasticloadbalancing:ModifyLoadBalancerAttributes",
78 | "elasticloadbalancing:ModifyRule",
79 | "elasticloadbalancing:ModifyTargetGroup",
80 | "elasticloadbalancing:ModifyTargetGroupAttributes",
81 | "elasticloadbalancing:RegisterTargets",
82 | "elasticloadbalancing:RemoveListenerCertificates",
83 | "elasticloadbalancing:RemoveTags",
84 | "elasticloadbalancing:SetSecurityGroups",
85 | "elasticloadbalancing:SetSubnets",
86 | "route53:Get*",
87 | "route53:List*",
88 | "route53:ChangeResourceRecordSets",
89 | "ssm:Describe*",
90 | "ssm:GetParameter",
91 | "ssm:GetParameters",
92 | "ssm:GetParametersByPath",
93 | "s3:CreateBucket",
94 | "s3:DeleteBucket",
95 | "s3:DeleteObject",
96 | "s3:GetBucketLocation",
97 | "s3:GetObject",
98 | "s3:HeadBucket",
99 | "s3:ListBucket",
100 | "s3:PutObject"
101 | ],
102 | "Effect": "Allow",
103 | "Resource": "*"
104 | }
105 | ]
106 | }
107 |
--------------------------------------------------------------------------------
/iam_policies/terraform_deployment_lax_iam_policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [{
4 | "Sid": "CustomLaxPolicyForACGAWSTFCourse",
5 | "Action": [
6 | "ec2:*",
7 | "acm:*",
8 | "elasticloadbalancing:*",
9 | "ssm:Describe*",
10 | "ssm:Get*",
11 | "s3:*"
12 | ],
13 | "Effect": "Allow",
14 | "Resource": "*"
15 | }]
16 | }
17 |
--------------------------------------------------------------------------------
/lab_deploying_dns_acm/README.md:
--------------------------------------------------------------------------------
1 | # Setup Notes and instructions
2 |
3 |
4 | ```
5 | 1. These templates are to be used only in a pre-configured A Cloud Guru hands-on lab environment
6 | for the Hands-on lab:
7 | "Creating Route53 records(Alias) to route traffic to an ALB using Terraform"
8 | Trying to deploy or set it up anywhere else would fail as there's info from bootstrapped files
9 | that this lab needs.
10 |
11 | 2. Once you SSH into TerraformController node as cloud_user, look for file resource_ids.txt,
12 | plug in the values for TF data variables from it into template import_resources.tf
13 |
14 | 3. Get the publicly hosted domain name(ending with a dot) by executing command in file
15 | aws_get_cp_hostedzone on the same TerraformController node and plug it into dns-name
16 | variable in variables.tf
17 | ```
18 |
19 | Execute terraform init, fmt, validate , plan and then finally apply!
20 |
--------------------------------------------------------------------------------
/lab_deploying_dns_acm/alb_acm.tf:
--------------------------------------------------------------------------------
1 | #ACM CONFIGURATION
2 | resource "aws_acm_certificate" "aws-ssl-cert" {
3 | provider = aws.region-master
4 | domain_name = join(".", [var.site-name, data.aws_route53_zone.dns.name])
5 | validation_method = "DNS"
6 | tags = {
7 | Name = "Webservers-ACM"
8 | }
9 |
10 | }
11 |
12 | #Validates ACM issued certificate via Route53
13 | resource "aws_acm_certificate_validation" "cert" {
14 | provider = aws.region-master
15 | certificate_arn = aws_acm_certificate.aws-ssl-cert.arn
16 | for_each = aws_route53_record.cert_validation
17 | validation_record_fqdns = [aws_route53_record.cert_validation[each.key].fqdn]
18 | }
19 |
20 | ####ACM CONFIG END
21 |
22 |
23 | resource "aws_lb" "application-lb" {
24 | provider = aws.region-master
25 | name = "webservers-lb"
26 | internal = false
27 | load_balancer_type = "application"
28 | security_groups = [data.aws_security_group.security_group_lb.id]
29 | subnets = [data.aws_subnet.subnet_1.id, data.aws_subnet.subnet_2.id]
30 | tags = {
31 | Name = "Webservers-LB"
32 | }
33 | }
34 |
35 | resource "aws_lb_target_group" "app-lb-tg" {
36 | provider = aws.region-master
37 | name = "app-lb-tg"
38 | port = 80
39 | target_type = "instance"
40 | vpc_id = data.aws_vpc.vpc_master.id
41 | protocol = "HTTP"
42 | health_check {
43 | enabled = true
44 | interval = 10
45 | path = "/index.html"
46 | port = 80
47 | protocol = "HTTP"
48 | matcher = "200-399"
49 | }
50 | tags = {
51 | Name = "webserver-target-group"
52 | }
53 | }
54 |
55 | resource "aws_lb_listener" "lb-https-listener" {
56 | provider = aws.region-master
57 | load_balancer_arn = aws_lb.application-lb.arn
58 | ssl_policy = "ELBSecurityPolicy-2016-08"
59 | port = "443"
60 | protocol = "HTTPS"
61 | certificate_arn = aws_acm_certificate.aws-ssl-cert.arn
62 | default_action {
63 | type = "forward"
64 | target_group_arn = aws_lb_target_group.app-lb-tg.arn
65 | }
66 | }
67 |
68 | resource "aws_lb_listener" "lb-http-listener" {
69 | provider = aws.region-master
70 | load_balancer_arn = aws_lb.application-lb.arn
71 | port = "80"
72 | protocol = "HTTP"
73 | default_action {
74 | type = "redirect"
75 | redirect {
76 | port = "443"
77 | protocol = "HTTPS"
78 | status_code = "HTTP_301"
79 | }
80 | }
81 | }
82 |
83 | resource "aws_lb_target_group_attachment" "apache1-attach" {
84 | provider = aws.region-master
85 | target_group_arn = aws_lb_target_group.app-lb-tg.arn
86 | target_id = data.aws_instance.apache1.id
87 | port = 80
88 | }
89 |
90 | resource "aws_lb_target_group_attachment" "apache2-attach" {
91 | provider = aws.region-master
92 | target_group_arn = aws_lb_target_group.app-lb-tg.arn
93 | target_id = data.aws_instance.apache2.id
94 | port = 80
95 | }
96 |
--------------------------------------------------------------------------------
/lab_deploying_dns_acm/aws_get_cp_hostedzone:
--------------------------------------------------------------------------------
1 | aws route53 list-hosted-zones | jq -r .HostedZones[].Name | egrep "cmcloud*"
2 |
--------------------------------------------------------------------------------
/lab_deploying_dns_acm/dns.tf:
--------------------------------------------------------------------------------
1 | #DNS Configuration
2 | #Get already , publicly configured Hosted Zone on Route53 - MUST EXIST, check variables.tf for dns-name
3 | data "aws_route53_zone" "dns" {
4 | provider = aws.region-master
5 | name = var.dns-name
6 | }
7 |
8 | #Create record in hosted zone for ACM Certificate Domain verification
9 | resource "aws_route53_record" "cert_validation" {
10 | provider = aws.region-master
11 | for_each = {
12 | for val in aws_acm_certificate.aws-ssl-cert.domain_validation_options : val.domain_name => {
13 | name = val.resource_record_name
14 | record = val.resource_record_value
15 | type = val.resource_record_type
16 | }
17 | }
18 | name = each.value.name
19 | records = [each.value.record]
20 | ttl = 60
21 | type = each.value.type
22 | zone_id = data.aws_route53_zone.dns.zone_id
23 |
24 | }
25 |
26 | #Create Alias record towards ALB from Route53
27 | resource "aws_route53_record" "webservers" {
28 | provider = aws.region-master
29 | zone_id = data.aws_route53_zone.dns.zone_id
30 | name = join(".", [var.site-name, data.aws_route53_zone.dns.name])
31 | type = "A"
32 | alias {
33 | name = aws_lb.application-lb.dns_name
34 | zone_id = aws_lb.application-lb.zone_id
35 | evaluate_target_health = true
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/lab_deploying_dns_acm/import_resources.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | profile = var.profile
3 | region = var.region-master
4 | alias = "region-master"
5 | }
6 |
7 | data "aws_vpc" "vpc_master" {
8 | provider = aws.region-master
9 |
10 | id = ""
11 | }
12 |
13 | data "aws_security_group" "security_group_lb" {
14 | provider = aws.region-master
15 | id = ""
16 | }
17 |
18 | data "aws_subnet" "subnet_1" {
19 | provider = aws.region-master
20 |
21 | id = ""
22 | }
23 |
24 | data "aws_subnet" "subnet_2" {
25 | provider = aws.region-master
26 |
27 | id = ""
28 | }
29 | data "aws_instance" "apache1" {
30 | provider = aws.region-master
31 | instance_id = ""
32 | }
33 | data "aws_instance" "apache2" {
34 | provider = aws.region-master
35 | instance_id = ""
36 | }
37 |
38 |
--------------------------------------------------------------------------------
/lab_deploying_dns_acm/outputs.tf:
--------------------------------------------------------------------------------
1 | output "url" {
2 | value = aws_route53_record.webservers.fqdn
3 | }
4 |
5 | output "Apache1" {
6 | value = data.aws_instance.apache1.public_ip
7 | }
8 | output "Apache2" {
9 | value = data.aws_instance.apache2.public_ip
10 | }
11 |
12 | output "Application-LB-URL" {
13 | value = aws_lb.application-lb.dns_name
14 | }
15 |
--------------------------------------------------------------------------------
/lab_deploying_dns_acm/variables.tf:
--------------------------------------------------------------------------------
1 | variable "external_ip" {
2 | type = string
3 | default = "0.0.0.0/0"
4 | }
5 |
6 | variable "instance-type" {
7 | type = string
8 | default = "t3.micro"
9 | }
10 |
11 | variable "site-name" {
12 | type = string
13 | default = "mysite"
14 | }
15 | variable "dns-name" {
16 | type = string
17 | default = "" # e.g "cmcloudlab1234.info."
18 | }
19 |
20 | variable "profile" {
21 | type = string
22 | default = "default"
23 | }
24 |
25 | variable "region-master" {
26 | type = string
27 | default = "us-east-1"
28 | }
29 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/README.md:
--------------------------------------------------------------------------------
1 | # Setup notes
2 |
3 | ```
4 | 1. Please run the playbook "gen_ssh_key.yaml" , "ansible-playbook ansible_templates/gen_ssh_key.yaml"
5 | to generate SSH keypair(on local system) used in TF templates and Ansible for connecting to
6 | remote EC2 instances.
7 |
8 | 2. If you still want to initialize directory via "terraform init", then use the "-backend=false" flag,
9 | like so "terraform init -backend=false"
10 | ```
11 |
12 | ## GENERAL NOTE:
13 |
14 | ```
15 | The templates in this folder are meant to be used in the ACG Hands-on lab, however it should
16 | work in any AWS account, given that the above 3 conditions are met. However this has not been tested
17 | extensively and author gives no guarantees for it.
18 | ```
19 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/alb.tf:
--------------------------------------------------------------------------------
1 | resource "aws_lb" "application-lb" {
2 | provider = aws.region-master
3 | name = "jenkins-lb"
4 | internal = false
5 | load_balancer_type = "application"
6 | security_groups = [aws_security_group.lb-sg.id]
7 | subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id]
8 | tags = {
9 | Name = "Jenkins-LB"
10 | }
11 | }
12 |
13 | resource "aws_lb_target_group" "app-lb-tg" {
14 | provider = aws.region-master
15 | name = "app-lb-tg"
16 | port = 8080
17 | target_type = "instance"
18 | vpc_id = aws_vpc.vpc_useast.id
19 | protocol = "HTTP"
20 | health_check {
21 | enabled = true
22 | interval = 10
23 | path = "/login"
24 | port = 8080
25 | protocol = "HTTP"
26 | matcher = "200-299"
27 | }
28 | tags = {
29 | Name = "jenkins-target-group"
30 | }
31 | }
32 |
33 | resource "aws_lb_listener" "jenkins-listener-http" {
34 | provider = aws.region-master
35 | load_balancer_arn = aws_lb.application-lb.arn
36 | port = "80"
37 | protocol = "HTTP"
38 | default_action {
39 | type = "forward"
40 | target_group_arn = aws_lb_target_group.app-lb-tg.arn
41 | }
42 | }
43 |
44 | resource "aws_lb_target_group_attachment" "jenkins-master-attach" {
45 | provider = aws.region-master
46 | target_group_arn = aws_lb_target_group.app-lb-tg.arn
47 | target_id = aws_instance.jenkins-master.id
48 | port = 8080
49 | }
50 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/ansible.cfg:
--------------------------------------------------------------------------------
1 | # Example config file for ansible -- https://ansible.com/
2 | # =======================================================
3 |
4 | # Nearly all parameters can be overridden in ansible-playbook
5 | # or with command line flags. Ansible will read ANSIBLE_CONFIG,
6 | # ansible.cfg in the current working directory, .ansible.cfg in
7 | # the home directory, or /etc/ansible/ansible.cfg, whichever it
8 | # finds first
9 |
10 | # For a full list of available options, run ansible-config list or see the
11 | # documentation: https://docs.ansible.com/ansible/latest/reference_appendices/config.html.
12 |
13 | [defaults]
14 | inventory = ./ansible_templates/inventory_aws/tf_aws_ec2.yml
15 | enable_plugins = aws_ec2
16 | interpreter_python = auto_silent
17 | #library = ~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
18 | #module_utils = ~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils
19 | #remote_tmp = ~/.ansible/tmp
20 | #local_tmp = ~/.ansible/tmp
21 | #forks = 5
22 | #poll_interval = 0.001
23 | #ask_pass = False
24 | #transport = smart
25 | # Plays will gather facts by default, which contain information about
26 | # the remote system.
27 | #
28 | # smart - gather by default, but don't regather if already gathered
29 | # implicit - gather by default, turn off with gather_facts: False
30 | # explicit - do not gather by default, must say gather_facts: True
31 | #gathering = implicit
32 |
33 | # This only affects the gathering done by a play's gather_facts directive,
34 | # by default gathering retrieves all facts subsets
35 | # all - gather all subsets
36 | # network - gather min and network facts
37 | # hardware - gather hardware facts (longest facts to retrieve)
38 | # virtual - gather min and virtual facts
39 | # facter - import facts from facter
40 | # ohai - import facts from ohai
41 | # You can combine them using comma (ex: network,virtual)
42 | # You can negate them using ! (ex: !hardware,!facter,!ohai)
43 | # A minimal set of facts is always gathered.
44 | #
45 | #gather_subset = all
46 |
47 | # some hardware related facts are collected
48 | # with a maximum timeout of 10 seconds. This
49 | # option lets you increase or decrease that
50 | # timeout to something more suitable for the
51 | # environment.
52 | #
53 | #gather_timeout = 10
54 |
55 | # Ansible facts are available inside the ansible_facts.* dictionary
56 | # namespace. This setting maintains the behaviour which was the default prior
57 | # to 2.5, duplicating these variables into the main namespace, each with a
58 | # prefix of 'ansible_'.
59 | # This variable is set to True by default for backwards compatibility. It
60 | # will be changed to a default of 'False' in a future release.
61 | #
62 | #inject_facts_as_vars = True
63 |
64 | # Paths to search for collections, colon separated
65 | # collections_paths = ~/.ansible/collections:/usr/share/ansible/collections
66 |
67 | # Paths to search for roles, colon separated
68 | #roles_path = ~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
69 |
70 | # Host key checking is enabled by default
71 | host_key_checking = False
72 |
73 | # You can only have one 'stdout' callback type enabled at a time. The default
74 | # is 'default'. The 'yaml' or 'debug' stdout callback plugins are easier to read.
75 | #
76 | #stdout_callback = default
77 | #stdout_callback = yaml
78 | #stdout_callback = debug
79 |
80 |
81 | # Ansible ships with some plugins that require whitelisting,
82 | # this is done to avoid running all of a type by default.
83 | # These setting lists those that you want enabled for your system.
84 | # Custom plugins should not need this unless plugin author disables them
85 | # by default.
86 | #
87 | # Enable callback plugins, they can output to stdout but cannot be 'stdout' type.
88 | #callback_whitelist = timer, mail
89 |
90 | # Determine whether includes in tasks and handlers are "static" by
91 | # default. As of 2.0, includes are dynamic by default. Setting these
92 | # values to True will make includes behave more like they did in the
93 | # 1.x versions.
94 | #
95 | #task_includes_static = False
96 | #handler_includes_static = False
97 |
98 | # Controls if a missing handler for a notification event is an error or a warning
99 | #error_on_missing_handler = True
100 |
101 | # Default timeout for connection plugins
102 | #timeout = 10
103 |
104 | # Default user to use for playbooks if user is not specified
105 | # Uses the connection plugin's default, normally the user currently executing Ansible,
106 | # unless a different user is specified here.
107 | #
108 | #remote_user = root
109 |
110 | # Logging is off by default unless this path is defined.
111 | #log_path = /var/log/ansible.log
112 |
113 | # Default module to use when running ad-hoc commands
114 | #module_name = command
115 |
116 | # Use this shell for commands executed under sudo.
117 | # you may need to change this to /bin/bash in rare instances
118 | # if sudo is constrained.
119 | #
120 | #executable = /bin/sh
121 |
122 | # By default, variables from roles will be visible in the global variable
123 | # scope. To prevent this, set the following option to True, and only
124 | # tasks and handlers within the role will see the variables there
125 | #
126 | #private_role_vars = False
127 |
128 | # List any Jinja2 extensions to enable here.
129 | #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
130 |
131 | # If set, always use this private key file for authentication, same as
132 | # if passing --private-key to ansible or ansible-playbook
133 | #
134 | #private_key_file = /path/to/file
135 |
136 | # If set, configures the path to the Vault password file as an alternative to
137 | # specifying --vault-password-file on the command line. This can also be
138 | # an executable script that returns the vault password to stdout.
139 | #
140 | #vault_password_file = /path/to/vault_password_file
141 |
142 | # Format of string {{ ansible_managed }} available within Jinja2
143 | # templates indicates to users editing templates files will be replaced.
144 | # replacing {file}, {host} and {uid} and strftime codes with proper values.
145 | #
146 | #ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
147 |
148 | # {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
149 | # in some situations so the default is a static string:
150 | #
151 | #ansible_managed = Ansible managed
152 |
153 | # By default, ansible-playbook will display "Skipping [host]" if it determines a task
154 | # should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
155 | # messages. NOTE: the task header will still be shown regardless of whether or not the
156 | # task is skipped.
157 | #
158 | #display_skipped_hosts = True
159 |
160 | # By default, if a task in a playbook does not include a name: field then
161 | # ansible-playbook will construct a header that includes the task's action but
162 | # not the task's args. This is a security feature because ansible cannot know
163 | # if the *module* considers an argument to be no_log at the time that the
164 | # header is printed. If your environment doesn't have a problem securing
165 | # stdout from ansible-playbook (or you have manually specified no_log in your
166 | # playbook on all of the tasks where you have secret information) then you can
167 | # safely set this to True to get more informative messages.
168 | #
169 | #display_args_to_stdout = False
170 |
171 | # Ansible will raise errors when attempting to dereference
172 | # Jinja2 variables that are not set in templates or action lines. Uncomment this line
173 | # to change this behavior.
174 | #
175 | #error_on_undefined_vars = False
176 |
177 | # Ansible may display warnings based on the configuration of the
178 | # system running ansible itself. This may include warnings about 3rd party packages or
179 | # other conditions that should be resolved if possible.
180 | # To disable these warnings, set the following value to False:
181 | #
182 | system_warnings = False
183 |
184 | # Ansible may display deprecation warnings for language
185 | # features that should no longer be used and will be removed in future versions.
186 | # To disable these warnings, set the following value to False:
187 | #
188 | deprecation_warnings = False
189 |
190 | # Ansible can optionally warn when usage of the shell and
191 | # command module appear to be simplified by using a default Ansible module
192 | # instead. These warnings can be silenced by adjusting the following
193 | # setting or adding warn=yes or warn=no to the end of the command line
194 | # parameter string. This will for example suggest using the git module
195 | # instead of shelling out to the git command.
196 | #
197 | command_warnings = False
198 |
199 |
200 | # set plugin path directories here, separate with colons
201 | #action_plugins = /usr/share/ansible/plugins/action
202 | #become_plugins = /usr/share/ansible/plugins/become
203 | #cache_plugins = /usr/share/ansible/plugins/cache
204 | #callback_plugins = /usr/share/ansible/plugins/callback
205 | #connection_plugins = /usr/share/ansible/plugins/connection
206 | #lookup_plugins = /usr/share/ansible/plugins/lookup
207 | #inventory_plugins = /usr/share/ansible/plugins/inventory
208 | #vars_plugins = /usr/share/ansible/plugins/vars
209 | #filter_plugins = /usr/share/ansible/plugins/filter
210 | #test_plugins = /usr/share/ansible/plugins/test
211 | #terminal_plugins = /usr/share/ansible/plugins/terminal
212 | #strategy_plugins = /usr/share/ansible/plugins/strategy
213 |
214 |
215 | # Ansible will use the 'linear' strategy but you may want to try another one.
216 | #strategy = linear
217 |
218 | # By default, callbacks are not loaded for /bin/ansible. Enable this if you
219 | # want, for example, a notification or logging callback to also apply to
220 | # /bin/ansible runs
221 | #
222 | #bin_ansible_callbacks = False
223 |
224 |
225 | # Don't like cows? that's unfortunate.
226 | # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
227 | #nocows = 1
228 |
229 | # Set which cowsay stencil you'd like to use by default. When set to 'random',
230 | # a random stencil will be selected for each task. The selection will be filtered
231 | # against the `cow_whitelist` option below.
232 | #
233 | #cow_selection = default
234 | #cow_selection = random
235 |
236 | # When using the 'random' option for cowsay, stencils will be restricted to this list.
237 | # it should be formatted as a comma-separated list with no spaces between names.
238 | # NOTE: line continuations here are for formatting purposes only, as the INI parser
239 | # in python does not support them.
240 | #
241 | #cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
242 | # hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
243 | # stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
244 |
245 | # Don't like colors either?
246 | # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
247 | #
248 | #nocolor = 1
249 |
250 | # If set to a persistent type (not 'memory', for example 'redis') fact values
251 | # from previous runs in Ansible will be stored. This may be useful when
252 | # wanting to use, for example, IP information from one group of servers
253 | # without having to talk to them in the same playbook run to get their
254 | # current IP information.
255 | #
256 | #fact_caching = memory
257 |
258 | # This option tells Ansible where to cache facts. The value is plugin dependent.
259 | # For the jsonfile plugin, it should be a path to a local directory.
260 | # For the redis plugin, the value is a host:port:database triplet: fact_caching_connection = localhost:6379:0
261 | #
262 | #fact_caching_connection=/tmp
263 |
264 | # retry files
265 | # When a playbook fails a .retry file can be created that will be placed in ~/
266 | # You can enable this feature by setting retry_files_enabled to True
267 | # and you can change the location of the files by setting retry_files_save_path
268 | #
269 | #retry_files_enabled = False
270 | #retry_files_save_path = ~/.ansible-retry
271 |
272 | # prevents logging of task data, off by default
273 | #no_log = False
274 |
275 | # prevents logging of tasks, but only on the targets, data is still logged on the master/controller
276 | #no_target_syslog = False
277 |
278 | # Controls whether Ansible will raise an error or warning if a task has no
279 | # choice but to create world readable temporary files to execute a module on
280 | # the remote machine. This option is False by default for security. Users may
281 | # turn this on to have behaviour more like Ansible prior to 2.1.x. See
282 | # https://docs.ansible.com/ansible/latest/user_guide/become.html#becoming-an-unprivileged-user
283 | # for more secure ways to fix this than enabling this option.
284 | #
285 | #allow_world_readable_tmpfiles = False
286 |
287 | # Controls what compression method is used for new-style ansible modules when
288 | # they are sent to the remote system. The compression types depend on having
289 | # support compiled into both the controller's python and the client's python.
290 | # The names should match with the python Zipfile compression types:
291 | # * ZIP_STORED (no compression. available everywhere)
292 | # * ZIP_DEFLATED (uses zlib, the default)
293 | # These values may be set per host via the ansible_module_compression inventory variable.
294 | #
295 | #module_compression = 'ZIP_DEFLATED'
296 |
297 | # This controls the cutoff point (in bytes) on --diff for files
298 | # set to 0 for unlimited (RAM may suffer!).
299 | #
300 | #max_diff_size = 104448
301 |
302 | # Controls showing custom stats at the end, off by default
303 | #show_custom_stats = False
304 |
305 | # Controls which files to ignore when using a directory as inventory with
306 | # possibly multiple sources (both static and dynamic)
307 | #
308 | #inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
309 |
310 | # This family of modules use an alternative execution path optimized for network appliances
311 | # only update this setting if you know how this works, otherwise it can break module execution
312 | #
313 | #network_group_modules=eos, nxos, ios, iosxr, junos, vyos
314 |
315 | # When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
316 | # a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
317 | # jinja2 templating language which will be run through the templating engine.
318 | # ENABLING THIS COULD BE A SECURITY RISK
319 | #
320 | #allow_unsafe_lookups = False
321 |
322 | # set default errors for all plays
323 | #any_errors_fatal = False
324 |
325 |
326 | [inventory]
327 | # List of enabled inventory plugins and the order in which they are used.
328 | #enable_plugins = host_list, script, auto, yaml, ini, toml
329 |
330 | # Ignore these extensions when parsing a directory as inventory source
331 | #ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
332 |
333 | # ignore files matching these patterns when parsing a directory as inventory source
334 | #ignore_patterns=
335 |
336 | # If 'True' unparsed inventory sources become fatal errors, otherwise they are warnings.
337 | #unparsed_is_failed = False
338 |
339 |
340 | [privilege_escalation]
341 | #become = False
342 | #become_method = sudo
343 | #become_ask_pass = False
344 |
345 |
346 | ## Connection Plugins ##
347 |
348 | # Settings for each connection plugin go under a section titled '[[plugin_name]_connection]'
349 | # To view available connection plugins, run ansible-doc -t connection -l
350 | # To view available options for a connection plugin, run ansible-doc -t connection [plugin_name]
351 | # https://docs.ansible.com/ansible/latest/plugins/connection.html
352 |
353 | [paramiko_connection]
354 | # uncomment this line to cause the paramiko connection plugin to not record new host
355 | # keys encountered. Increases performance on new host additions. Setting works independently of the
356 | # host key checking setting above.
357 | #record_host_keys=False
358 |
359 | # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
360 | # line to disable this behaviour.
361 | #pty = False
362 |
363 | # paramiko will default to looking for SSH keys initially when trying to
364 | # authenticate to remote devices. This is a problem for some network devices
365 | # that close the connection after a key failure. Uncomment this line to
366 | # disable the Paramiko look for keys function
367 | #look_for_keys = False
368 |
369 | # When using persistent connections with Paramiko, the connection runs in a
370 | # background process. If the host doesn't already have a valid SSH key, by
371 | # default Ansible will prompt to add the host key. This will cause connections
372 | # running in background processes to fail. Uncomment this line to have
373 | # Paramiko automatically add host keys.
374 | #host_key_auto_add = True
375 |
376 |
377 | [ssh_connection]
378 | # ssh arguments to use
379 | # Leaving off ControlPersist will result in poor performance, so use
380 | # paramiko on older platforms rather than removing it, -C controls compression use
381 | #ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
382 |
383 | # The base directory for the ControlPath sockets.
384 | # This is the "%(directory)s" in the control_path option
385 | #
386 | # Example:
387 | # control_path_dir = /tmp/.ansible/cp
388 | #control_path_dir = ~/.ansible/cp
389 |
390 | # The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
391 | # port and username (empty string in the config). The hash mitigates a common problem users
392 | # found with long hostnames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
393 | # In those cases, a "too long for Unix domain socket" ssh error would occur.
394 | #
395 | # Example:
396 | # control_path = %(directory)s/%%C
397 | #control_path =
398 |
399 | # Enabling pipelining reduces the number of SSH operations required to
400 | # execute a module on the remote server. This can result in a significant
401 | # performance improvement when enabled, however when using "sudo:" you must
402 | # first disable 'requiretty' in /etc/sudoers
403 | #
404 | # By default, this option is disabled to preserve compatibility with
405 | # sudoers configurations that have requiretty (the default on many distros).
406 | #
407 | #pipelining = False
408 |
409 | # Control the mechanism for transferring files (old)
410 | # * smart = try sftp and then try scp [default]
411 | # * True = use scp only
412 | # * False = use sftp only
413 | #scp_if_ssh = smart
414 |
415 | # Control the mechanism for transferring files (new)
416 | # If set, this will override the scp_if_ssh option
417 | # * sftp = use sftp to transfer files
418 | # * scp = use scp to transfer files
419 | # * piped = use 'dd' over SSH to transfer files
420 | # * smart = try sftp, scp, and piped, in that order [default]
421 | #transfer_method = smart
422 |
423 | # If False, sftp will not use batch mode to transfer files. This may cause some
424 | # types of file transfer failures impossible to catch however, and should
425 | # only be disabled if your sftp version has problems with batch mode
426 | #sftp_batch_mode = False
427 |
428 | # The -tt argument is passed to ssh when pipelining is not enabled because sudo
429 | # requires a tty by default.
430 | #usetty = True
431 |
432 | # Number of times to retry an SSH connection to a host, in case of UNREACHABLE.
433 | # For each retry attempt, there is an exponential backoff,
434 | # so after the first attempt there is 1s wait, then 2s, 4s etc. up to 30s (max).
435 | #retries = 3
436 |
437 |
438 | [persistent_connection]
439 | # Configures the persistent connection timeout value in seconds. This value is
440 | # how long the persistent connection will remain idle before it is destroyed.
441 | # If the connection doesn't receive a request before the timeout value
442 | # expires, the connection is shutdown. The default value is 30 seconds.
443 | #connect_timeout = 30
444 |
445 | # The command timeout value defines the amount of time to wait for a command
446 | # or RPC call before timing out. The value for the command timeout must
447 | # be less than the value of the persistent connection idle timeout (connect_timeout)
448 | # The default value is 30 second.
449 | #command_timeout = 30
450 |
451 |
452 | ## Become Plugins ##
453 |
454 | # Settings for become plugins go under a section named '[[plugin_name]_become_plugin]'
455 | # To view available become plugins, run ansible-doc -t become -l
456 | # To view available options for a specific plugin, run ansible-doc -t become [plugin_name]
457 | # https://docs.ansible.com/ansible/latest/plugins/become.html
458 |
459 | [sudo_become_plugin]
460 | #flags = -H -S -n
461 | #user = root
462 |
463 |
464 | [selinux]
465 | # file systems that require special treatment when dealing with security context
466 | # the default behaviour that copies the existing context or uses the user default
467 | # needs to be changed to use the file system dependent context.
468 | #special_context_filesystems=fuse,nfs,vboxsf,ramfs,9p,vfat
469 |
470 | # Set this to True to allow libvirt_lxc connections to work without SELinux.
471 | #libvirt_lxc_noseclabel = False
472 |
473 |
474 | [colors]
475 | #highlight = white
476 | #verbose = blue
477 | #warn = bright purple
478 | #error = red
479 | #debug = dark gray
480 | #deprecate = purple
481 | #skip = cyan
482 | #unreachable = red
483 | #ok = green
484 | #changed = yellow
485 | #diff_add = green
486 | #diff_remove = red
487 | #diff_lines = cyan
488 |
489 |
490 | [diff]
491 | # Always print diff when running ( same as always running with -D/--diff )
492 | #always = False
493 |
494 | # Set how many context lines to show in diff
495 | #context = 3
496 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/ansible_templates/cred-privkey.j2:
--------------------------------------------------------------------------------
1 |
2 | GLOBAL
3 | JenkinsCreds-{{ ipv4 }}
4 | Credentials for Workers
5 | ec2-user
6 |
7 |
8 | {{ priv_key }}
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/ansible_templates/gen_ssh_key.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | tasks:
4 | - name: Generate an SSH keypair for using in Terraform templates
5 | openssh_keypair:
6 | path: ~/.ssh/id_rsa
7 | state: present
8 | type: rsa
9 | size: 2048
10 | force: no
11 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/ansible_templates/install_jenkins.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: "{{ passed_in_hosts }}"
3 | become: yes
4 | remote_user: ec2-user
5 | become_user: root
6 | tasks:
7 | - name: install dependencies
8 | yum:
9 | name: "{{ package }} "
10 | state: present
11 | vars:
12 | package:
13 | - wget
14 | - java-1.8.0-openjdk-devel
15 | - git
16 |
17 | - name: clone repo
18 | git:
19 | repo: https://github.com/linuxacademy/content-terraform-jenkins.git
20 | dest: /tmp/jenkins
21 |
22 | - name: Setup Jenkins Repo
23 | get_url:
24 | url: https://pkg.jenkins.io/redhat/jenkins.repo
25 | dest: /etc/yum.repos.d/jenkins.repo
26 |
27 | - name: Import Jenkins Repo GPG key
28 | rpm_key:
29 | state: present
30 | key: https://pkg.jenkins.io/redhat/jenkins.io.key
31 |
32 | - name: install Jenkins
33 | yum:
34 | name: jenkins
35 | state: present
36 |
37 | - name: Temporarily Stop Jenkins
38 | service:
39 | name: jenkins
40 | state: stopped
41 |
42 | - name: Delete Jenkins default dir
43 | file:
44 | state: absent
45 | path: /var/lib/jenkins
46 |
47 | - name: Copy over cloned Jenkins /var/lib/jenkins
48 | copy:
49 | remote_src: yes
50 | src: /tmp/jenkins/jenkins
51 | dest: /var/lib
52 |
53 |
54 | - name: Restore jenkins user ownership on /var/lib/jenkins
55 | file:
56 | path: /var/lib/jenkins
57 | state: directory
58 | recurse: yes
59 | owner: jenkins
60 |
61 | - name: Start Jenkins
62 | service:
63 | name: jenkins
64 | state: started
65 | enabled: yes
66 |
67 | - name: Wait until Jenkins is up
68 | shell: result_first=1; while [[ $result_first != 0 ]]; do if [[ `grep 'Jenkins is fully up and running' /var/log/jenkins/jenkins.log` ]];then result_first=0;else sleep 4;fi;done
69 | register: result
70 | until: result.rc == 0
71 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/ansible_templates/install_worker.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: "{{ passed_in_hosts }}"
3 | become: yes
4 | remote_user: ec2-user
5 | tasks:
6 | - name: Generate SSH Keypair
7 | openssh_keypair:
8 | path: /home/ec2-user/.ssh/id_rsa
9 | type: rsa
10 | size: 2048
11 | group: ec2-user
12 | owner: ec2-user
13 |
14 | - name: Add own key to authorized_keys file
15 | shell: "cat /home/ec2-user/.ssh/id_rsa.pub >> /home/ec2-user/.ssh/authorized_keys && chmod 600 /home/ec2-user/.ssh/authorized_keys"
16 |
17 | - name: Copy over Jenkins Worker Node creation payload xml
18 | vars:
19 | ipv4: "{{ ansible_default_ipv4.address }}"
20 | template:
21 | src: node.j2
22 | dest: /home/ec2-user/node.xml
23 | owner: ec2-user
24 | mode: '0644'
25 |
26 | - name: Read generated private key id_rsa
27 | slurp:
28 | src: /home/ec2-user/.ssh/id_rsa
29 | register: pkey
30 |
31 | - name: Copy over creds.xml and create Jenkins credential
32 | vars:
33 | priv_key: "{{ pkey['content'] | b64decode }}"
34 | ipv4: "{{ ansible_default_ipv4.address }}"
35 | template:
36 | src: cred-privkey.j2
37 | dest: /home/ec2-user/creds.xml
38 |
39 | - name: install dependencies
40 | yum:
41 | name: "{{ package }} "
42 | vars:
43 | package:
44 | - wget
45 | - java-1.8.0-openjdk-devel
46 | - git
47 |
48 | - name: Get Jenkins CLI jar file
49 | vars:
50 | master: "{{ master_ip }}"
51 | get_url:
52 | url: "http://{{ master }}:8080/jnlpJars/jenkins-cli.jar"
53 | dest: /home/ec2-user/jenkins-cli.jar
54 | force: yes
55 | register: download
56 | ignore_errors: yes
57 | until: download.status_code|default(200) == 200
58 | retries: 5
59 | delay: 5
60 |
61 | - name: Allow execute perm to jenkins-cli.jar
62 | file:
63 | path: /home/ec2-user/jenkins-cli.jar
64 | owner: ec2-user
65 | group: ec2-user
66 | mode: '0700'
67 |
68 | - name: Place Jenkins Auth file
69 | copy:
70 | src: jenkins_auth
71 | dest: /home/ec2-user/
72 | owner: ec2-user
73 | mode: '0600'
74 |
75 | - name: Create Jenkins Credential
76 | vars:
77 | master: "{{ master_ip }}"
78 | shell: "cat /home/ec2-user/creds.xml | java -jar /home/ec2-user/jenkins-cli.jar -auth @/home/ec2-user/jenkins_auth -s http://{{master}}:8080 create-credentials-by-xml system::system::jenkins _"
79 |
80 | - name: Register node as worker
81 | vars:
82 | master: "{{ master_ip }}"
83 | shell: 'cat /home/ec2-user/node.xml | java -jar /home/ec2-user/jenkins-cli.jar -auth @/home/ec2-user/jenkins_auth -s http://{{ master }}:8080 create-node'
84 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/ansible_templates/inventory_aws/tf_aws_ec2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # aws ec2 ansible dynamic inventory plugin
3 | plugin: aws_ec2
4 |
5 | regions:
6 | - us-east-1
7 | - us-west-2
8 | # - us-east-2
9 | # set strict to False
10 | # if True this will make invalid entries
11 | # a fatal error
12 | strict: False
13 |
14 | keyed_groups:
15 | # each aws ec2 instance has it own instance tags. create
16 | # a tag variable from those tags for ansible to use.
17 | # if an EC2 tag:Name is acloudguru_machine, it'll be converted to the
18 | # Ansible tag variable name as follows: tag_Name_acloudguru_machine
19 | # which can then be passed as a variable value for the host via -e flag
20 | - key: tags
21 | prefix: tag
22 | #
23 | # the following keyed groups are from the aws url:
24 | # https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
25 | # below are some of the variable that can be used.
26 | # an example for instance_type:
27 | # aws_instance_type_t2_micro
28 | - key: architecture
29 | prefix: arch
30 | - key: tags.Applications
31 | separator: ''
32 | - key: instance_type
33 | prefix: aws_instance_type
34 | - key: placement.region
35 | prefix: aws_region
36 | - key: image_id
37 | prefix: aws_image
38 | - key: hypervisor
39 | prefix: aws_hypervisor
40 | - key: 'security_groups|json_query("[].group_id")'
41 | prefix: 'security_groups'
42 |
43 | hostnames:
44 | # a list in order of precedence for hostname variables.
45 | #
46 | - ip-address
47 | - dns-name
48 | - tag:Name
49 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/ansible_templates/jenkins_auth:
--------------------------------------------------------------------------------
1 | admin:password
2 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/ansible_templates/node.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | {{ ipv4 }}
4 |
5 | /home/ec2-user
6 | 1
7 | EXCLUSIVE
8 |
9 |
10 | {{ ipv4 }}
11 | 22
12 | JenkinsCreds-{{ ipv4 }}
13 | 60
14 | 10
15 | 15
16 |
17 | true
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/instances.tf:
--------------------------------------------------------------------------------
1 | #Get Linux AMI ID using SSM Parameter endpoint in us-east-1
2 | data "aws_ssm_parameter" "JenkinsMasterAmi" {
3 | provider = aws.region-master
4 | name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2"
5 | }
6 |
7 | #Get Linux AMI ID using SSM Parameter endpoint in us-west-2
8 | data "aws_ssm_parameter" "JenkinsWorkerAmi" {
9 | provider = aws.region-worker
10 | name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2"
11 | }
12 |
13 | #Create key-pair for logging into EC2 in us-east-1
14 | resource "aws_key_pair" "master-key" {
15 | provider = aws.region-master
16 | key_name = "jenkins"
17 | public_key = file("~/.ssh/id_rsa.pub")
18 | }
19 |
20 | #Create key-pair for logging into EC2 in us-west-2
21 | resource "aws_key_pair" "worker-key" {
22 | provider = aws.region-worker
23 | key_name = "jenkins"
24 | public_key = file("~/.ssh/id_rsa.pub")
25 | }
26 |
27 | #Create and bootstrap EC2 in us-east-1
28 | resource "aws_instance" "jenkins-master" {
29 | provider = aws.region-master
30 | ami = data.aws_ssm_parameter.JenkinsMasterAmi.value
31 | instance_type = var.instance-type
32 | key_name = aws_key_pair.master-key.key_name
33 | associate_public_ip_address = true
34 | vpc_security_group_ids = [aws_security_group.jenkins-sg.id]
35 | subnet_id = aws_subnet.subnet_1.id
36 | provisioner "local-exec" {
37 | command = < instance.public_ip
15 | }
16 | }
17 | output "Jenkins-Worker-Private-IPs" {
18 | value = {
19 | for instance in aws_instance.jenkins-worker-oregon :
20 | instance.id => instance.private_ip
21 | }
22 | }
23 |
24 | output "Loadbalancer-DNS-URL" {
25 | value = aws_lb.application-lb.dns_name
26 | }
27 |
--------------------------------------------------------------------------------
/lab_jenkins_master_worker/variables.tf:
--------------------------------------------------------------------------------
1 | variable "external_ip" {
2 | type = string
3 | default = "0.0.0.0/0"
4 | }
5 |
6 | variable "instance-type" {
7 | type = string
8 | default = "t3.micro"
9 | }
10 |
11 | variable "profile" {
12 | type = string
13 | default = "default"
14 | }
15 |
16 | variable "region-master" {
17 | type = string
18 | default = "us-east-1"
19 | }
20 |
21 | variable "region-worker" {
22 | type = string
23 | default = "us-west-2"
24 | }
25 |
26 | #How many Jenkins workers to spin up
27 | variable "workers-count" {
28 | type = number
29 | default = 1
30 | }
31 |
--------------------------------------------------------------------------------
/lab_network_vpc_peering/network_setup.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | profile = var.profile
3 | region = var.region-master
4 | alias = "region-master"
5 | }
6 |
7 | provider "aws" {
8 | profile = var.profile
9 | region = var.region-worker
10 | alias = "region-worker"
11 | }
12 |
13 | #Create VPC in us-east-1
14 | resource "aws_vpc" "vpc_useast" {
15 | provider = aws.region-master
16 | cidr_block = "10.0.0.0/16"
17 | enable_dns_support = true
18 | enable_dns_hostnames = true
19 | tags = {
20 | Name = "master-vpc-jenkins"
21 | }
22 |
23 | }
24 |
25 | #Create VPC in us-west-2
26 | resource "aws_vpc" "vpc_uswest" {
27 | provider = aws.region-worker
28 | cidr_block = "192.168.0.0/16"
29 | enable_dns_support = true
30 | enable_dns_hostnames = true
31 | tags = {
32 | Name = "worker-vpc-jenkins"
33 | }
34 |
35 | }
36 |
37 | #Initiate Peering connection request from us-east-1
38 | resource "aws_vpc_peering_connection" "useast1-uswest-2" {
39 | provider = aws.region-master
40 | peer_vpc_id = aws_vpc.vpc_uswest.id
41 | vpc_id = aws_vpc.vpc_useast.id
42 | #auto_accept = true
43 | peer_region = var.region-worker
44 |
45 | }
46 |
47 | #Create IGW in us-east-1
48 | resource "aws_internet_gateway" "igw" {
49 | provider = aws.region-master
50 | vpc_id = aws_vpc.vpc_useast.id
51 | }
52 |
53 | #Create IGW in us-west-2
54 | resource "aws_internet_gateway" "igw-oregon" {
55 | provider = aws.region-worker
56 | vpc_id = aws_vpc.vpc_uswest.id
57 | }
58 |
59 | #Accept VPC peering request in us-west-2 from us-east-1
60 | resource "aws_vpc_peering_connection_accepter" "accept_peering" {
61 | provider = aws.region-worker
62 | vpc_peering_connection_id = aws_vpc_peering_connection.useast1-uswest-2.id
63 | auto_accept = true
64 | }
65 |
66 | #Create route table in us-east-1
67 | resource "aws_route_table" "internet_route" {
68 | provider = aws.region-master
69 | vpc_id = aws_vpc.vpc_useast.id
70 | route {
71 | cidr_block = "0.0.0.0/0"
72 | gateway_id = aws_internet_gateway.igw.id
73 | }
74 | route {
75 | cidr_block = "192.168.1.0/24"
76 | vpc_peering_connection_id = aws_vpc_peering_connection.useast1-uswest-2.id
77 | }
78 | lifecycle {
79 | ignore_changes = all
80 | }
81 | tags = {
82 | Name = "Master-Region-RT"
83 | }
84 | }
85 |
86 | #Overwrite default route table of VPC(Master) with our route table entries
87 | resource "aws_main_route_table_association" "set-master-default-rt-assoc" {
88 | provider = aws.region-master
89 | vpc_id = aws_vpc.vpc_useast.id
90 | route_table_id = aws_route_table.internet_route.id
91 | }
92 | #Get all available AZ's in VPC for master region
93 | data "aws_availability_zones" "azs" {
94 | provider = aws.region-master
95 | state = "available"
96 | }
97 |
98 | #Create subnet # 1 in us-east-1
99 | resource "aws_subnet" "subnet_1" {
100 | provider = aws.region-master
101 | availability_zone = element(data.aws_availability_zones.azs.names, 0)
102 | vpc_id = aws_vpc.vpc_useast.id
103 | cidr_block = "10.0.1.0/24"
104 | }
105 |
106 | #Create subnet #2 in us-east-1
107 | resource "aws_subnet" "subnet_2" {
108 | provider = aws.region-master
109 | vpc_id = aws_vpc.vpc_useast.id
110 | availability_zone = element(data.aws_availability_zones.azs.names, 1)
111 | cidr_block = "10.0.2.0/24"
112 | }
113 |
114 |
115 | #Create subnet in us-west-2
116 | resource "aws_subnet" "subnet_1_oregon" {
117 | provider = aws.region-worker
118 | vpc_id = aws_vpc.vpc_uswest.id
119 | cidr_block = "192.168.1.0/24"
120 | }
121 |
122 | #Create route table in us-west-2
123 | resource "aws_route_table" "internet_route_oregon" {
124 | provider = aws.region-worker
125 | vpc_id = aws_vpc.vpc_uswest.id
126 | route {
127 | cidr_block = "0.0.0.0/0"
128 | gateway_id = aws_internet_gateway.igw-oregon.id
129 | }
130 | route {
131 | cidr_block = "10.0.1.0/24"
132 | vpc_peering_connection_id = aws_vpc_peering_connection.useast1-uswest-2.id
133 | }
134 | lifecycle {
135 | ignore_changes = all
136 | }
137 | tags = {
138 | Name = "Worker-Region-RT"
139 | }
140 | }
141 |
142 | #Overwrite default route table of VPC(Worker) with our route table entries
143 | resource "aws_main_route_table_association" "set-worker-default-rt-assoc" {
144 | provider = aws.region-worker
145 | vpc_id = aws_vpc.vpc_uswest.id
146 | route_table_id = aws_route_table.internet_route_oregon.id
147 | }
148 |
149 |
150 | #Create SG for allowing TCP/8080 from * and TCP/22 from your IP in us-east-1
151 | resource "aws_security_group" "jenkins-sg" {
152 | provider = aws.region-master
153 | name = "jenkins-sg"
154 | description = "Allow TCP/8080 & TCP/22"
155 | vpc_id = aws_vpc.vpc_useast.id
156 | ingress {
157 | description = "Allow 22 from our public IP"
158 | from_port = 22
159 | to_port = 22
160 | protocol = "tcp"
161 | cidr_blocks = [var.external_ip]
162 | }
163 | ingress {
164 | description = "allow anyone on port 8080"
165 | from_port = 8080
166 | to_port = 8080
167 | protocol = "tcp"
168 | cidr_blocks = ["0.0.0.0/0"]
169 | }
170 | ingress {
171 | description = "allow traffic from us-west-2"
172 | from_port = 0
173 | to_port = 0
174 | protocol = "-1"
175 | cidr_blocks = ["192.168.1.0/24"]
176 | }
177 | egress {
178 | from_port = 0
179 | to_port = 0
180 | protocol = "-1"
181 | cidr_blocks = ["0.0.0.0/0"]
182 | }
183 | }
184 |
185 | #Create SG for LB, only TCP/80,TCP/443 and access to jenkins-sg
186 | resource "aws_security_group" "lb-sg" {
187 | provider = aws.region-master
188 | name = "lb-sg"
189 | description = "Allow 443 and traffic to Jenkins SG"
190 | vpc_id = aws_vpc.vpc_useast.id
191 | ingress {
192 | description = "Allow 443 from anywhere"
193 | from_port = 443
194 | to_port = 443
195 | protocol = "tcp"
196 | cidr_blocks = ["0.0.0.0/0"]
197 | }
198 | ingress {
199 | description = "Allow 80 from anywhere for redirection"
200 | from_port = 80
201 | to_port = 80
202 | protocol = "tcp"
203 | cidr_blocks = ["0.0.0.0/0"]
204 | }
205 | ingress {
206 | description = "Allow traffic to jenkins-sg"
207 | from_port = 0
208 | to_port = 0
209 | protocol = "tcp"
210 | security_groups = [aws_security_group.jenkins-sg.id]
211 | }
212 | egress {
213 | from_port = 0
214 | to_port = 0
215 | protocol = "-1"
216 | cidr_blocks = ["0.0.0.0/0"]
217 | }
218 | }
219 |
220 | #Create SG for allowing TCP/22 from your IP in us-west-2
221 | resource "aws_security_group" "jenkins-sg-oregon" {
222 | provider = aws.region-worker
223 |
224 | name = "jenkins-sg-oregon"
225 | description = "Allow TCP/8080 & TCP/22"
226 | vpc_id = aws_vpc.vpc_uswest.id
227 | ingress {
228 | description = "Allow 22 from our public IP"
229 | from_port = 22
230 | to_port = 22
231 | protocol = "tcp"
232 | cidr_blocks = [var.external_ip]
233 | }
234 | ingress {
235 | description = "Allow traffic from us-east-1"
236 | from_port = 0
237 | to_port = 0
238 | protocol = "-1"
239 | cidr_blocks = ["10.0.1.0/24"]
240 | }
241 | egress {
242 | from_port = 0
243 | to_port = 0
244 | protocol = "-1"
245 | cidr_blocks = ["0.0.0.0/0"]
246 | }
247 | }
248 |
--------------------------------------------------------------------------------
/lab_network_vpc_peering/outputs.tf:
--------------------------------------------------------------------------------
1 | output "VPC-ID-US-EAST-1" {
2 | value = aws_vpc.vpc_useast.id
3 | }
4 |
5 | output "VPC-ID-US-WEST-2" {
6 | value = aws_vpc.vpc_uswest.id
7 | }
8 |
9 | output "PEERING-CONNECTION-ID" {
10 | value = aws_vpc_peering_connection.useast1-uswest-2.id
11 | }
12 |
--------------------------------------------------------------------------------
/lab_network_vpc_peering/variables.tf:
--------------------------------------------------------------------------------
1 | variable "external_ip" {
2 | type = string
3 | default = "0.0.0.0/0"
4 | }
5 |
6 | variable "profile" {
7 | type = string
8 | default = "default"
9 | }
10 |
11 | variable "region-master" {
12 | type = string
13 | default = "us-east-1"
14 | }
15 |
16 | variable "region-worker" {
17 | type = string
18 | default = "us-west-2"
19 | }
20 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/README.md:
--------------------------------------------------------------------------------
1 |
2 |
Setup Requirements
3 |
4 | NOTE: For users following along in their own environments as opposed to the ACG provided environments, please install python's `boto3` module before proceeding.
5 |
6 |
7 | ```
8 | 1. Terraform binary => 0.13.x # wget -c https://releases.hashicorp.com/terraform/0.13.0/terraform_0.13.0_linux_amd64.zip
9 | 2. Python3 & PIP needs to be installed on all nodes(on most , modern Linux systems it's available by default) # yum -y install python3-pip
10 | 3. Ansible (install via pip) # pip3 install ansible --user
11 | 4. AWS CLI (install via pip) # pip3 install awscli --user
12 | 5. jq (install via package manager) - OPTIONAL # yum -y install jq
13 | ```
14 |
15 | `This project has been tested on MacOS(Mojave), CentOS7. Author provides no guarantees for working with other OS's,
16 | although the steps are generic enough that with little tweaking or even with no tweaking this might work
17 | on a range of OS's which support above 5 requirments.`
18 |
19 |
Notes and Instructions
20 |
21 | *For Terraform Part*
22 | ```
23 | The regional AWS providers are defined in providers.tf
24 | Terraform configuration and backend is defined in backend.tf.
25 |
26 |
27 | If you want to read and understand the deployment in sequence. Read through templates in the following order:
28 | 1. network_setup.tf
29 | 2. instances.tf --> local-exec provisioners in this templates kick-off Ansible playbooks in ansible_templates/
30 | 3. alb_acm.tf
31 | 4. dns.tf
32 | ```
33 | *S3 Backend*
34 | ```
35 | This project requires an S3 backend for storing Terraform state file, therefore in the terraform block in the backend.tf file you'll need to plug in the an actual bucket name before you can run "terraform init".
36 | Please also note that the "terraform" block does not allow usage of variables so values HAVE to be hardcoded.
37 | ```
38 | Sample command for bucket creation via CLI:
39 | ```
40 | aws s3api create-bucket --bucket
41 | ```
42 |
43 | Example
44 | ```
45 | aws s3api create-bucket --bucket myawesomebucketthatmayormaynotexistalready
46 | ```
47 |
48 |
Supplementary files
49 |
50 | ```
51 | 1. ansible.cfg #A modified Ansible default config file with SSH host key checking and warnings disabled
52 | 2. aws_get_cp_hostedzone #An AWS CLI command for fetching your hosted zone for DNS part of this project
53 | 3. null_provisioners.tf #For setting up and deleting Ansible inventory files
54 | 4. variables.tf #Defines variables and default values for them for the TF templates
55 | 5. outputs.tf #Defines the outputs presented at successful completion of execution of TF apply.
56 | ```
57 |
58 |
Ansible playbooks
59 |
60 | ```
61 | 1. cred-privkey.j2 #Jinja template for creating Jenkins credentials via Jenkins API call(populates private key)
62 | 2. install_jenkins.yaml #Playbook for Jenkins Master
63 | 3. install_worker.yaml #Playbook for Jenkins Worker
64 | 4. node.j2 #Jinja templates for registering worker node with Jenkins Master via Jenkins CLI(populates IP)
65 | 5. jenkins_auth #Provides the file with preset credentials for our Jenkins Master
66 | ```
67 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/alb_acm.tf:
--------------------------------------------------------------------------------
1 | #ACM CONFIGURATION
2 | #Creates ACM issues certificate and requests validation via DNS(Route53)
3 | resource "aws_acm_certificate" "jenkins-lb-https" {
4 | provider = aws.region-master
5 | domain_name = join(".", ["jenkins", data.aws_route53_zone.dns.name])
6 | validation_method = "DNS"
7 | tags = {
8 | Name = "Jenkins-ACM"
9 | }
10 |
11 | }
12 |
13 | #Validates ACM issued certificate via Route53
14 | resource "aws_acm_certificate_validation" "cert" {
15 | provider = aws.region-master
16 | certificate_arn = aws_acm_certificate.jenkins-lb-https.arn
17 | for_each = aws_route53_record.cert_validation
18 | validation_record_fqdns = [aws_route53_record.cert_validation[each.key].fqdn]
19 | }
20 |
21 | ####ACM CONFIG END
22 |
23 |
24 | resource "aws_lb" "application-lb" {
25 | provider = aws.region-master
26 | name = "jenkins-lb"
27 | internal = false
28 | load_balancer_type = "application"
29 | security_groups = [aws_security_group.lb-sg.id]
30 | subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id]
31 | tags = {
32 | Name = "Jenkins-LB"
33 | }
34 | }
35 |
36 | resource "aws_lb_target_group" "app-lb-tg" {
37 | provider = aws.region-master
38 | name = "app-lb-tg"
39 | port = 8080
40 | target_type = "instance"
41 | vpc_id = aws_vpc.vpc_master.id
42 | protocol = "HTTP"
43 | health_check {
44 | enabled = true
45 | interval = 10
46 | path = "/login"
47 | port = 8080
48 | protocol = "HTTP"
49 | matcher = "200-299"
50 | }
51 | tags = {
52 | Name = "jenkins-target-group"
53 | }
54 | }
55 |
56 | resource "aws_lb_listener" "jenkins-listener" {
57 | provider = aws.region-master
58 | load_balancer_arn = aws_lb.application-lb.arn
59 | ssl_policy = "ELBSecurityPolicy-2016-08"
60 | port = "443"
61 | protocol = "HTTPS"
62 | certificate_arn = aws_acm_certificate.jenkins-lb-https.arn
63 | default_action {
64 | type = "forward"
65 | target_group_arn = aws_lb_target_group.app-lb-tg.arn
66 | }
67 | }
68 |
69 | resource "aws_lb_listener" "jenkins-listener-http" {
70 | provider = aws.region-master
71 | load_balancer_arn = aws_lb.application-lb.arn
72 | port = "80"
73 | protocol = "HTTP"
74 | default_action {
75 | type = "redirect"
76 | redirect {
77 | port = "443"
78 | protocol = "HTTPS"
79 | status_code = "HTTP_301"
80 | }
81 | }
82 | }
83 |
84 | resource "aws_lb_target_group_attachment" "jenkins-master-attach" {
85 | provider = aws.region-master
86 | target_group_arn = aws_lb_target_group.app-lb-tg.arn
87 | target_id = aws_instance.jenkins-master.id
88 | port = 8080
89 | }
90 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/ansible.cfg:
--------------------------------------------------------------------------------
1 | # Example config file for ansible -- https://ansible.com/
2 | # =======================================================
3 |
4 | # Nearly all parameters can be overridden in ansible-playbook
5 | # or with command line flags. Ansible will read ANSIBLE_CONFIG,
6 | # ansible.cfg in the current working directory, .ansible.cfg in
7 | # the home directory, or /etc/ansible/ansible.cfg, whichever it
8 | # finds first
9 |
10 | # For a full list of available options, run ansible-config list or see the
11 | # documentation: https://docs.ansible.com/ansible/latest/reference_appendices/config.html.
12 |
13 | [defaults]
14 | inventory = ./ansible_templates/inventory_aws/tf_aws_ec2.yml
15 | enable_plugins = aws_ec2
16 | interpreter_python = auto_silent
17 | #library = ~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
18 | #module_utils = ~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils
19 | #remote_tmp = ~/.ansible/tmp
20 | #local_tmp = ~/.ansible/tmp
21 | #forks = 5
22 | #poll_interval = 0.001
23 | #ask_pass = False
24 | #transport = smart
25 | # Plays will gather facts by default, which contain information about
26 | # the remote system.
27 | #
28 | # smart - gather by default, but don't regather if already gathered
29 | # implicit - gather by default, turn off with gather_facts: False
30 | # explicit - do not gather by default, must say gather_facts: True
31 | #gathering = implicit
32 |
33 | # This only affects the gathering done by a play's gather_facts directive,
34 | # by default gathering retrieves all facts subsets
35 | # all - gather all subsets
36 | # network - gather min and network facts
37 | # hardware - gather hardware facts (longest facts to retrieve)
38 | # virtual - gather min and virtual facts
39 | # facter - import facts from facter
40 | # ohai - import facts from ohai
41 | # You can combine them using comma (ex: network,virtual)
42 | # You can negate them using ! (ex: !hardware,!facter,!ohai)
43 | # A minimal set of facts is always gathered.
44 | #
45 | #gather_subset = all
46 |
47 | # some hardware related facts are collected
48 | # with a maximum timeout of 10 seconds. This
49 | # option lets you increase or decrease that
50 | # timeout to something more suitable for the
51 | # environment.
52 | #
53 | #gather_timeout = 10
54 |
55 | # Ansible facts are available inside the ansible_facts.* dictionary
56 | # namespace. This setting maintains the behaviour which was the default prior
57 | # to 2.5, duplicating these variables into the main namespace, each with a
58 | # prefix of 'ansible_'.
59 | # This variable is set to True by default for backwards compatibility. It
60 | # will be changed to a default of 'False' in a future release.
61 | #
62 | #inject_facts_as_vars = True
63 |
64 | # Paths to search for collections, colon separated
65 | # collections_paths = ~/.ansible/collections:/usr/share/ansible/collections
66 |
67 | # Paths to search for roles, colon separated
68 | #roles_path = ~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
69 |
70 | # Host key checking is enabled by default
71 | host_key_checking = False
72 |
73 | # You can only have one 'stdout' callback type enabled at a time. The default
74 | # is 'default'. The 'yaml' or 'debug' stdout callback plugins are easier to read.
75 | #
76 | #stdout_callback = default
77 | #stdout_callback = yaml
78 | #stdout_callback = debug
79 |
80 |
81 | # Ansible ships with some plugins that require whitelisting,
82 | # this is done to avoid running all of a type by default.
83 | # These setting lists those that you want enabled for your system.
84 | # Custom plugins should not need this unless plugin author disables them
85 | # by default.
86 | #
87 | # Enable callback plugins, they can output to stdout but cannot be 'stdout' type.
88 | #callback_whitelist = timer, mail
89 |
90 | # Determine whether includes in tasks and handlers are "static" by
91 | # default. As of 2.0, includes are dynamic by default. Setting these
92 | # values to True will make includes behave more like they did in the
93 | # 1.x versions.
94 | #
95 | #task_includes_static = False
96 | #handler_includes_static = False
97 |
98 | # Controls if a missing handler for a notification event is an error or a warning
99 | #error_on_missing_handler = True
100 |
101 | # Default timeout for connection plugins
102 | #timeout = 10
103 |
104 | # Default user to use for playbooks if user is not specified
105 | # Uses the connection plugin's default, normally the user currently executing Ansible,
106 | # unless a different user is specified here.
107 | #
108 | #remote_user = root
109 |
110 | # Logging is off by default unless this path is defined.
111 | #log_path = /var/log/ansible.log
112 |
113 | # Default module to use when running ad-hoc commands
114 | #module_name = command
115 |
116 | # Use this shell for commands executed under sudo.
117 | # you may need to change this to /bin/bash in rare instances
118 | # if sudo is constrained.
119 | #
120 | #executable = /bin/sh
121 |
122 | # By default, variables from roles will be visible in the global variable
123 | # scope. To prevent this, set the following option to True, and only
124 | # tasks and handlers within the role will see the variables there
125 | #
126 | #private_role_vars = False
127 |
128 | # List any Jinja2 extensions to enable here.
129 | #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
130 |
131 | # If set, always use this private key file for authentication, same as
132 | # if passing --private-key to ansible or ansible-playbook
133 | #
134 | #private_key_file = /path/to/file
135 |
136 | # If set, configures the path to the Vault password file as an alternative to
137 | # specifying --vault-password-file on the command line. This can also be
138 | # an executable script that returns the vault password to stdout.
139 | #
140 | #vault_password_file = /path/to/vault_password_file
141 |
142 | # Format of string {{ ansible_managed }} available within Jinja2
143 | # templates indicates to users editing templates files will be replaced.
144 | # replacing {file}, {host} and {uid} and strftime codes with proper values.
145 | #
146 | #ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
147 |
148 | # {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
149 | # in some situations so the default is a static string:
150 | #
151 | #ansible_managed = Ansible managed
152 |
153 | # By default, ansible-playbook will display "Skipping [host]" if it determines a task
154 | # should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
155 | # messages. NOTE: the task header will still be shown regardless of whether or not the
156 | # task is skipped.
157 | #
158 | #display_skipped_hosts = True
159 |
160 | # By default, if a task in a playbook does not include a name: field then
161 | # ansible-playbook will construct a header that includes the task's action but
162 | # not the task's args. This is a security feature because ansible cannot know
163 | # if the *module* considers an argument to be no_log at the time that the
164 | # header is printed. If your environment doesn't have a problem securing
165 | # stdout from ansible-playbook (or you have manually specified no_log in your
166 | # playbook on all of the tasks where you have secret information) then you can
167 | # safely set this to True to get more informative messages.
168 | #
169 | #display_args_to_stdout = False
170 |
171 | # Ansible will raise errors when attempting to dereference
172 | # Jinja2 variables that are not set in templates or action lines. Uncomment this line
173 | # to change this behavior.
174 | #
175 | #error_on_undefined_vars = False
176 |
177 | # Ansible may display warnings based on the configuration of the
178 | # system running ansible itself. This may include warnings about 3rd party packages or
179 | # other conditions that should be resolved if possible.
180 | # To disable these warnings, set the following value to False:
181 | #
182 | system_warnings = False
183 |
184 | # Ansible may display deprecation warnings for language
185 | # features that should no longer be used and will be removed in future versions.
186 | # To disable these warnings, set the following value to False:
187 | #
188 | deprecation_warnings = False
189 |
190 | # Ansible can optionally warn when usage of the shell and
191 | # command module appear to be simplified by using a default Ansible module
192 | # instead. These warnings can be silenced by adjusting the following
193 | # setting or adding warn=yes or warn=no to the end of the command line
194 | # parameter string. This will for example suggest using the git module
195 | # instead of shelling out to the git command.
196 | #
197 | command_warnings = False
198 |
199 |
200 | # set plugin path directories here, separate with colons
201 | #action_plugins = /usr/share/ansible/plugins/action
202 | #become_plugins = /usr/share/ansible/plugins/become
203 | #cache_plugins = /usr/share/ansible/plugins/cache
204 | #callback_plugins = /usr/share/ansible/plugins/callback
205 | #connection_plugins = /usr/share/ansible/plugins/connection
206 | #lookup_plugins = /usr/share/ansible/plugins/lookup
207 | #inventory_plugins = /usr/share/ansible/plugins/inventory
208 | #vars_plugins = /usr/share/ansible/plugins/vars
209 | #filter_plugins = /usr/share/ansible/plugins/filter
210 | #test_plugins = /usr/share/ansible/plugins/test
211 | #terminal_plugins = /usr/share/ansible/plugins/terminal
212 | #strategy_plugins = /usr/share/ansible/plugins/strategy
213 |
214 |
215 | # Ansible will use the 'linear' strategy but you may want to try another one.
216 | #strategy = linear
217 |
218 | # By default, callbacks are not loaded for /bin/ansible. Enable this if you
219 | # want, for example, a notification or logging callback to also apply to
220 | # /bin/ansible runs
221 | #
222 | #bin_ansible_callbacks = False
223 |
224 |
225 | # Don't like cows? that's unfortunate.
226 | # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
227 | #nocows = 1
228 |
229 | # Set which cowsay stencil you'd like to use by default. When set to 'random',
230 | # a random stencil will be selected for each task. The selection will be filtered
231 | # against the `cow_whitelist` option below.
232 | #
233 | #cow_selection = default
234 | #cow_selection = random
235 |
236 | # When using the 'random' option for cowsay, stencils will be restricted to this list.
237 | # it should be formatted as a comma-separated list with no spaces between names.
238 | # NOTE: line continuations here are for formatting purposes only, as the INI parser
239 | # in python does not support them.
240 | #
241 | #cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
242 | # hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
243 | # stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
244 |
245 | # Don't like colors either?
246 | # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
247 | #
248 | #nocolor = 1
249 |
250 | # If set to a persistent type (not 'memory', for example 'redis') fact values
251 | # from previous runs in Ansible will be stored. This may be useful when
252 | # wanting to use, for example, IP information from one group of servers
253 | # without having to talk to them in the same playbook run to get their
254 | # current IP information.
255 | #
256 | #fact_caching = memory
257 |
258 | # This option tells Ansible where to cache facts. The value is plugin dependent.
259 | # For the jsonfile plugin, it should be a path to a local directory.
260 | # For the redis plugin, the value is a host:port:database triplet: fact_caching_connection = localhost:6379:0
261 | #
262 | #fact_caching_connection=/tmp
263 |
264 | # retry files
265 | # When a playbook fails a .retry file can be created that will be placed in ~/
266 | # You can enable this feature by setting retry_files_enabled to True
267 | # and you can change the location of the files by setting retry_files_save_path
268 | #
269 | #retry_files_enabled = False
270 | #retry_files_save_path = ~/.ansible-retry
271 |
272 | # prevents logging of task data, off by default
273 | #no_log = False
274 |
275 | # prevents logging of tasks, but only on the targets, data is still logged on the master/controller
276 | #no_target_syslog = False
277 |
278 | # Controls whether Ansible will raise an error or warning if a task has no
279 | # choice but to create world readable temporary files to execute a module on
280 | # the remote machine. This option is False by default for security. Users may
281 | # turn this on to have behaviour more like Ansible prior to 2.1.x. See
282 | # https://docs.ansible.com/ansible/latest/user_guide/become.html#becoming-an-unprivileged-user
283 | # for more secure ways to fix this than enabling this option.
284 | #
285 | #allow_world_readable_tmpfiles = False
286 |
287 | # Controls what compression method is used for new-style ansible modules when
288 | # they are sent to the remote system. The compression types depend on having
289 | # support compiled into both the controller's python and the client's python.
290 | # The names should match with the python Zipfile compression types:
291 | # * ZIP_STORED (no compression. available everywhere)
292 | # * ZIP_DEFLATED (uses zlib, the default)
293 | # These values may be set per host via the ansible_module_compression inventory variable.
294 | #
295 | #module_compression = 'ZIP_DEFLATED'
296 |
297 | # This controls the cutoff point (in bytes) on --diff for files
298 | # set to 0 for unlimited (RAM may suffer!).
299 | #
300 | #max_diff_size = 104448
301 |
302 | # Controls showing custom stats at the end, off by default
303 | #show_custom_stats = False
304 |
305 | # Controls which files to ignore when using a directory as inventory with
306 | # possibly multiple sources (both static and dynamic)
307 | #
308 | #inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
309 |
310 | # This family of modules use an alternative execution path optimized for network appliances
311 | # only update this setting if you know how this works, otherwise it can break module execution
312 | #
313 | #network_group_modules=eos, nxos, ios, iosxr, junos, vyos
314 |
315 | # When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
316 | # a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
317 | # jinja2 templating language which will be run through the templating engine.
318 | # ENABLING THIS COULD BE A SECURITY RISK
319 | #
320 | #allow_unsafe_lookups = False
321 |
322 | # set default errors for all plays
323 | #any_errors_fatal = False
324 |
325 |
326 | [inventory]
327 | # List of enabled inventory plugins and the order in which they are used.
328 | #enable_plugins = host_list, script, auto, yaml, ini, toml
329 |
330 | # Ignore these extensions when parsing a directory as inventory source
331 | #ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
332 |
333 | # ignore files matching these patterns when parsing a directory as inventory source
334 | #ignore_patterns=
335 |
336 | # If 'True' unparsed inventory sources become fatal errors, otherwise they are warnings.
337 | #unparsed_is_failed = False
338 |
339 |
340 | [privilege_escalation]
341 | #become = False
342 | #become_method = sudo
343 | #become_ask_pass = False
344 |
345 |
346 | ## Connection Plugins ##
347 |
348 | # Settings for each connection plugin go under a section titled '[[plugin_name]_connection]'
349 | # To view available connection plugins, run ansible-doc -t connection -l
350 | # To view available options for a connection plugin, run ansible-doc -t connection [plugin_name]
351 | # https://docs.ansible.com/ansible/latest/plugins/connection.html
352 |
353 | [paramiko_connection]
354 | # uncomment this line to cause the paramiko connection plugin to not record new host
355 | # keys encountered. Increases performance on new host additions. Setting works independently of the
356 | # host key checking setting above.
357 | #record_host_keys=False
358 |
359 | # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
360 | # line to disable this behaviour.
361 | #pty = False
362 |
363 | # paramiko will default to looking for SSH keys initially when trying to
364 | # authenticate to remote devices. This is a problem for some network devices
365 | # that close the connection after a key failure. Uncomment this line to
366 | # disable the Paramiko look for keys function
367 | #look_for_keys = False
368 |
369 | # When using persistent connections with Paramiko, the connection runs in a
370 | # background process. If the host doesn't already have a valid SSH key, by
371 | # default Ansible will prompt to add the host key. This will cause connections
372 | # running in background processes to fail. Uncomment this line to have
373 | # Paramiko automatically add host keys.
374 | #host_key_auto_add = True
375 |
376 |
377 | [ssh_connection]
378 | # ssh arguments to use
379 | # Leaving off ControlPersist will result in poor performance, so use
380 | # paramiko on older platforms rather than removing it, -C controls compression use
381 | #ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
382 |
383 | # The base directory for the ControlPath sockets.
384 | # This is the "%(directory)s" in the control_path option
385 | #
386 | # Example:
387 | # control_path_dir = /tmp/.ansible/cp
388 | #control_path_dir = ~/.ansible/cp
389 |
390 | # The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
391 | # port and username (empty string in the config). The hash mitigates a common problem users
392 | # found with long hostnames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
393 | # In those cases, a "too long for Unix domain socket" ssh error would occur.
394 | #
395 | # Example:
396 | # control_path = %(directory)s/%%C
397 | #control_path =
398 |
399 | # Enabling pipelining reduces the number of SSH operations required to
400 | # execute a module on the remote server. This can result in a significant
401 | # performance improvement when enabled, however when using "sudo:" you must
402 | # first disable 'requiretty' in /etc/sudoers
403 | #
404 | # By default, this option is disabled to preserve compatibility with
405 | # sudoers configurations that have requiretty (the default on many distros).
406 | #
407 | #pipelining = False
408 |
409 | # Control the mechanism for transferring files (old)
410 | # * smart = try sftp and then try scp [default]
411 | # * True = use scp only
412 | # * False = use sftp only
413 | #scp_if_ssh = smart
414 |
415 | # Control the mechanism for transferring files (new)
416 | # If set, this will override the scp_if_ssh option
417 | # * sftp = use sftp to transfer files
418 | # * scp = use scp to transfer files
419 | # * piped = use 'dd' over SSH to transfer files
420 | # * smart = try sftp, scp, and piped, in that order [default]
421 | #transfer_method = smart
422 |
423 | # If False, sftp will not use batch mode to transfer files. This may cause some
424 | # types of file transfer failures impossible to catch however, and should
425 | # only be disabled if your sftp version has problems with batch mode
426 | #sftp_batch_mode = False
427 |
428 | # The -tt argument is passed to ssh when pipelining is not enabled because sudo
429 | # requires a tty by default.
430 | #usetty = True
431 |
432 | # Number of times to retry an SSH connection to a host, in case of UNREACHABLE.
433 | # For each retry attempt, there is an exponential backoff,
434 | # so after the first attempt there is 1s wait, then 2s, 4s etc. up to 30s (max).
435 | #retries = 3
436 |
437 |
438 | [persistent_connection]
439 | # Configures the persistent connection timeout value in seconds. This value is
440 | # how long the persistent connection will remain idle before it is destroyed.
441 | # If the connection doesn't receive a request before the timeout value
442 | # expires, the connection is shutdown. The default value is 30 seconds.
443 | #connect_timeout = 30
444 |
445 | # The command timeout value defines the amount of time to wait for a command
446 | # or RPC call before timing out. The value for the command timeout must
447 | # be less than the value of the persistent connection idle timeout (connect_timeout)
448 | # The default value is 30 second.
449 | #command_timeout = 30
450 |
451 |
452 | ## Become Plugins ##
453 |
454 | # Settings for become plugins go under a section named '[[plugin_name]_become_plugin]'
455 | # To view available become plugins, run ansible-doc -t become -l
456 | # To view available options for a specific plugin, run ansible-doc -t become [plugin_name]
457 | # https://docs.ansible.com/ansible/latest/plugins/become.html
458 |
459 | [sudo_become_plugin]
460 | #flags = -H -S -n
461 | #user = root
462 |
463 |
464 | [selinux]
465 | # file systems that require special treatment when dealing with security context
466 | # the default behaviour that copies the existing context or uses the user default
467 | # needs to be changed to use the file system dependent context.
468 | #special_context_filesystems=fuse,nfs,vboxsf,ramfs,9p,vfat
469 |
470 | # Set this to True to allow libvirt_lxc connections to work without SELinux.
471 | #libvirt_lxc_noseclabel = False
472 |
473 |
474 | [colors]
475 | #highlight = white
476 | #verbose = blue
477 | #warn = bright purple
478 | #error = red
479 | #debug = dark gray
480 | #deprecate = purple
481 | #skip = cyan
482 | #unreachable = red
483 | #ok = green
484 | #changed = yellow
485 | #diff_add = green
486 | #diff_remove = red
487 | #diff_lines = cyan
488 |
489 |
490 | [diff]
491 | # Always print diff when running ( same as always running with -D/--diff )
492 | #always = False
493 |
494 | # Set how many context lines to show in diff
495 | #context = 3
496 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/ansible_templates/cred-privkey.j2:
--------------------------------------------------------------------------------
1 |
2 | GLOBAL
3 | JenkinsCreds-{{ ipv4 }}
4 | Credentials for Workers
5 | ec2-user
6 |
7 |
8 | {{ priv_key }}
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/ansible_templates/install_jenkins.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: "{{ passed_in_hosts }}"
3 | become: yes
4 | remote_user: ec2-user
5 | become_user: root
6 | tasks:
7 | - name: install dependencies
8 | yum:
9 | name: "{{ package }} "
10 | state: present
11 | vars:
12 | package:
13 | - wget
14 | - java-1.8.0-openjdk-devel
15 | - git
16 |
17 | - name: clone repo
18 | git:
19 | repo: https://github.com/linuxacademy/content-terraform-jenkins.git
20 | dest: /tmp/jenkins
21 |
22 | - name: Setup Jenkins Repo
23 | get_url:
24 | url: https://pkg.jenkins.io/redhat/jenkins.repo
25 | dest: /etc/yum.repos.d/jenkins.repo
26 |
27 | - name: Import Jenkins Repo GPG key
28 | rpm_key:
29 | state: present
30 | key: https://pkg.jenkins.io/redhat/jenkins.io.key
31 |
32 | - name: install Jenkins
33 | yum:
34 | name: jenkins
35 | state: present
36 |
37 | - name: Temporarily Stop Jenkins
38 | service:
39 | name: jenkins
40 | state: stopped
41 |
42 | - name: Modify user shell
43 | user:
44 | name: jenkins
45 | shell: /bin/bash
46 |
47 |
48 | - name: Delete Jenkins default dir
49 | file:
50 | state: absent
51 | path: /var/lib/jenkins
52 |
53 | - name: Copy over cloned Jenkins /var/lib/jenkins
54 | copy:
55 | remote_src: yes
56 | src: /tmp/jenkins/jenkins
57 | dest: /var/lib
58 |
59 |
60 | - name: Restore jenkins user ownership on /var/lib/jenkins
61 | file:
62 | path: /var/lib/jenkins
63 | state: directory
64 | recurse: yes
65 | owner: jenkins
66 |
67 | - name: Start Jenkins
68 | service:
69 | name: jenkins
70 | state: started
71 | enabled: yes
72 |
73 | - name: Wait until Jenkins is up
74 | shell: result_first=1; while [[ $result_first != 0 ]]; do if [[ `grep 'Jenkins is fully up and running' /var/log/jenkins/jenkins.log` ]];then result_first=0;else sleep 4;fi;done
75 | register: result
76 | until: result.rc == 0
77 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/ansible_templates/install_worker.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: "{{ passed_in_hosts }}"
3 | become: yes
4 | remote_user: ec2-user
5 | tasks:
6 | - name: Generate SSH Keypair
7 | openssh_keypair:
8 | path: /home/ec2-user/.ssh/id_rsa
9 | type: rsa
10 | size: 2048
11 | group: ec2-user
12 | owner: ec2-user
13 |
14 | - name: Add own key to authorized_keys file
15 | shell: "cat /home/ec2-user/.ssh/id_rsa.pub >> /home/ec2-user/.ssh/authorized_keys && chmod 600 /home/ec2-user/.ssh/authorized_keys"
16 |
17 | - name: Copy over Jenkins Worker Node creation payload xml
18 | vars:
19 | ipv4: "{{ ansible_default_ipv4.address }}"
20 | template:
21 | src: node.j2
22 | dest: /home/ec2-user/node.xml
23 | owner: ec2-user
24 | mode: '0644'
25 |
26 | - name: Read generated private key id_rsa
27 | slurp:
28 | src: /home/ec2-user/.ssh/id_rsa
29 | register: pkey
30 |
31 | - name: Copy over creds.xml and create Jenkins credential
32 | vars:
33 | priv_key: "{{ pkey['content'] | b64decode }}"
34 | ipv4: "{{ ansible_default_ipv4.address }}"
35 | template:
36 | src: cred-privkey.j2
37 | dest: /home/ec2-user/creds.xml
38 |
39 | - name: install dependencies
40 | yum:
41 | name: "{{ package }} "
42 | vars:
43 | package:
44 | - wget
45 | - java-1.8.0-openjdk-devel
46 | - git
47 |
48 | - name: Get Jenkins CLI jar file
49 | vars:
50 | master: "{{ master_ip }}"
51 | get_url:
52 | url: "http://{{ master }}:8080/jnlpJars/jenkins-cli.jar"
53 | dest: /home/ec2-user/jenkins-cli.jar
54 | force: yes
55 | register: download
56 | ignore_errors: yes
57 | until: download.status_code|default(200) == 200
58 | retries: 5
59 | delay: 5
60 |
61 | - name: Allow execute perm to jenkins-cli.jar
62 | file:
63 | path: /home/ec2-user/jenkins-cli.jar
64 | owner: ec2-user
65 | group: ec2-user
66 | mode: '0777'
67 |
68 | - name: Place Jenkins Auth file
69 | copy:
70 | src: jenkins_auth
71 | dest: /home/ec2-user/
72 | owner: ec2-user
73 | mode: '0600'
74 |
75 | - name: Create Jenkins Credential
76 | vars:
77 | master: "{{ master_ip }}"
78 | shell: "cat /home/ec2-user/creds.xml | java -jar /home/ec2-user/jenkins-cli.jar -auth @/home/ec2-user/jenkins_auth -s http://{{master}}:8080 create-credentials-by-xml system::system::jenkins _"
79 |
80 | - name: Register node as worker
81 | vars:
82 | master: "{{ master_ip }}"
83 | shell: 'cat /home/ec2-user/node.xml | java -jar /home/ec2-user/jenkins-cli.jar -auth @/home/ec2-user/jenkins_auth -s http://{{ master }}:8080 create-node'
84 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/ansible_templates/inventory_aws/README.md:
--------------------------------------------------------------------------------
1 |
2 |
Dynamic Ansible Inventory querying configuration
3 |
4 | ```
5 | 1. This configuration file is to be used in conjunction with enabling the dynamic inventory module in ansible.cfg file
6 | 2. Official documentation can be found here: https://docs.ansible.com/ansible/latest/collections/amazon/aws/aws_ec2_inventory.html
7 | ```
8 |
9 |
Notes and Instructions
10 |
11 | *How it works*
12 | Enabling the aws_ec2 inventory module as shown in the lesson "Configuring Terraform Provisioners and Config Mangagement via Ansible". This module once enabled uses this yaml configuration file to use the preconfigured aws credentials and poll for EC2 instances in the region(s) as set up in this YAML configuration file. So instead of hard-coding IP addresses in static inventory files, Ansible can get the IP addresses for EC2 instances on the fly using the tag names that we assigned to those instances when creating them via Terraform. How this Ansible module gets the tags you might ask, well that's also defined in this configuration file.
13 |
14 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/ansible_templates/inventory_aws/tf_aws_ec2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # aws ec2 ansible dynamic inventory plugin
3 | plugin: aws_ec2
4 |
5 | regions:
6 | - us-east-1
7 | - us-west-2
8 | - us-east-2
9 | - eu-west-1
10 | # set strict to False
11 | # if True this will make invalid entries
12 | # a fatal error
13 | strict: False
14 |
15 | keyed_groups:
16 | # each aws ec2 instance has it own instance tags. create
17 | # a tag variable from those tags for ansible to use.
18 | # if an EC2 tag:Name is acloudguru_machine, it'll be converted to the
19 | # Ansible tag variable name as follows: tag_Name_acloudguru_machine
20 | # which can then be passed as a variable value for the host via -e flag
21 | - key: tags
22 | prefix: tag
23 | #
24 | # the following keyed groups are from the aws url:
25 | # https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
26 | # below are some of the variable that can be used.
27 | # an example for instance_type:
28 | # aws_instance_type_t2_micro
29 | - key: architecture
30 | prefix: arch
31 | - key: tags.Applications
32 | separator: ''
33 | - key: instance_type
34 | prefix: aws_instance_type
35 | - key: placement.region
36 | prefix: aws_region
37 | - key: image_id
38 | prefix: aws_image
39 | - key: hypervisor
40 | prefix: aws_hypervisor
41 | - key: 'security_groups|json_query("[].group_id")'
42 | prefix: 'security_groups'
43 |
44 | hostnames:
45 | # a list in order of precedence for hostname variables.
46 | #
47 | - ip-address
48 | - dns-name
49 | - tag:Name
50 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/ansible_templates/jenkins_auth:
--------------------------------------------------------------------------------
1 | admin:password
2 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/ansible_templates/node.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | {{ ipv4 }}
4 |
5 | /home/ec2-user
6 | 1
7 | EXCLUSIVE
8 |
9 |
10 | {{ ipv4 }}
11 | 22
12 | JenkinsCreds-{{ ipv4 }}
13 | 60
14 | 10
15 | 15
16 |
17 | true
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/aws_get_cp_hostedzone:
--------------------------------------------------------------------------------
1 | aws route53 list-hosted-zones | jq -r .HostedZones[].Name | egrep "cmcloud*"
2 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/backend.tf:
--------------------------------------------------------------------------------
1 | #Set S3 backend for persisting TF state file remotely, ensure bucket already exits
2 | # And that AWS user being used by TF has read/write perms
3 | terraform {
4 | required_version = ">=0.12.0"
5 | required_providers {
6 | aws = ">=3.0.0"
7 | }
8 | backend "s3" {
9 | region = "us-east-1"
10 | profile = "default"
11 | key = "terraformstatefile"
12 | bucket = ""
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/dns.tf:
--------------------------------------------------------------------------------
1 | #DNS Configuration
2 | #Get already , publicly configured Hosted Zone on Route53 - MUST EXIST, check variables.tf for dns-name
3 | data "aws_route53_zone" "dns" {
4 | provider = aws.region-master
5 | name = var.dns-name
6 | }
7 |
8 | #Create record in hosted zone for ACM Certificate Domain verification
9 | resource "aws_route53_record" "cert_validation" {
10 | provider = aws.region-master
11 | for_each = {
12 | for val in aws_acm_certificate.jenkins-lb-https.domain_validation_options : val.domain_name => {
13 | name = val.resource_record_name
14 | record = val.resource_record_value
15 | type = val.resource_record_type
16 | }
17 | }
18 | name = each.value.name
19 | records = [each.value.record]
20 | ttl = 60
21 | type = each.value.type
22 | zone_id = data.aws_route53_zone.dns.zone_id
23 |
24 | }
25 |
26 | #Create Alias record towards ALB from Route53
27 | resource "aws_route53_record" "jenkins" {
28 | provider = aws.region-master
29 | zone_id = data.aws_route53_zone.dns.zone_id
30 | name = join(".", ["jenkins", data.aws_route53_zone.dns.name])
31 | type = "A"
32 | alias {
33 | name = aws_lb.application-lb.dns_name
34 | zone_id = aws_lb.application-lb.zone_id
35 | evaluate_target_health = true
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/instances.tf:
--------------------------------------------------------------------------------
1 | #Get Linux AMI ID using SSM Parameter endpoint in us-east-1
2 | data "aws_ssm_parameter" "linuxAmi" {
3 | provider = aws.region-master
4 | name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2"
5 | }
6 |
7 | #Get Linux AMI ID using SSM Parameter endpoint in us-west-2
8 | data "aws_ssm_parameter" "linuxAmiOregon" {
9 | provider = aws.region-worker
10 | name = "/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2"
11 | }
12 |
13 | #Create key-pair for logging into EC2 in us-east-1
14 | resource "aws_key_pair" "master-key" {
15 | provider = aws.region-master
16 | key_name = "jenkins-master"
17 | public_key = file("~/.ssh/id_rsa.pub")
18 | }
19 |
20 | #Create key-pair for logging into EC2 in us-west-2
21 | resource "aws_key_pair" "worker-key" {
22 | provider = aws.region-worker
23 | key_name = "jenkins-worker"
24 | public_key = file("~/.ssh/id_rsa.pub")
25 | }
26 |
27 | #Create and bootstrap EC2 in us-east-1
28 | resource "aws_instance" "jenkins-master" {
29 | provider = aws.region-master
30 | ami = data.aws_ssm_parameter.linuxAmi.value
31 | instance_type = var.instance-type
32 | key_name = aws_key_pair.master-key.key_name
33 | associate_public_ip_address = true
34 | vpc_security_group_ids = [aws_security_group.jenkins-sg.id]
35 | subnet_id = aws_subnet.subnet_1.id
36 | provisioner "local-exec" {
37 | command = < instance.public_ip
18 | }
19 | }
20 | output "Jenkins-Worker-Private-IPs" {
21 | value = {
22 | for instance in aws_instance.jenkins-worker-oregon :
23 | instance.id => instance.private_ip
24 | }
25 | }
26 |
27 | output "url" {
28 | value = aws_route53_record.jenkins.fqdn
29 | }
30 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/providers.tf:
--------------------------------------------------------------------------------
1 | #Defining multiple providers using "alias" parameter
2 | provider "aws" {
3 | profile = var.profile
4 | region = var.region-master
5 | alias = "region-master"
6 | }
7 |
8 | provider "aws" {
9 | profile = var.profile
10 | region = var.region-worker
11 | alias = "region-worker"
12 | }
13 |
--------------------------------------------------------------------------------
/terraform_v13_compatible_code/variables.tf:
--------------------------------------------------------------------------------
1 | variable "external_ip" {
2 | type = string
3 | default = "0.0.0.0/0"
4 | }
5 |
6 | variable "instance-type" {
7 | type = string
8 | default = "t3.micro"
9 | # validation {
10 | # condition = can(regex("[^t2]", var.instance-type))
11 | # error_message = "Instance type cannot be anything other than t2 or t3 type and also not t3a.micro."
12 | # }
13 | }
14 |
15 | variable "dns-name" {
16 | type = string
17 | default = "" # e.g "cmcloudlab1234.info."
18 | }
19 |
20 | variable "profile" {
21 | type = string
22 | default = "default"
23 | }
24 |
25 | variable "region-master" {
26 | type = string
27 | default = "us-east-1"
28 | }
29 |
30 | variable "region-worker" {
31 | type = string
32 | default = "us-west-2"
33 | }
34 |
35 | #How many Jenkins workers to spin up
36 | variable "workers-count" {
37 | type = number
38 | default = 1
39 | }
40 |
--------------------------------------------------------------------------------