├── .gitattributes
├── .gitignore
├── Chapter 2
├── CloudFormation
│ ├── iam_user_policy.json
│ ├── main.json
│ └── parameters.json
└── Terraform
│ ├── iam_user_policy.json
│ ├── outputs.tf
│ ├── resources.tf
│ ├── terraform.tfvars
│ └── variables.tf
├── Chapter 3
├── CodeCommit
│ └── salt
│ │ ├── minion.d
│ │ └── masterless.conf
│ │ ├── pillars
│ │ ├── top.sls
│ │ └── users.sls
│ │ └── states
│ │ ├── nginx
│ │ ├── files
│ │ │ └── default.conf
│ │ └── init.sls
│ │ ├── php-fpm
│ │ └── init.sls
│ │ ├── phptest
│ │ └── init.sls
│ │ ├── top.sls
│ │ └── users
│ │ ├── files
│ │ └── veselin.pub
│ │ └── init.sls
└── Terraform
│ ├── iam_user_policy.json
│ ├── outputs.tf
│ ├── resources.tf
│ ├── terraform.tfvars
│ └── variables.tf
├── Chapter 4
├── CodeCommit
│ ├── demo-app
│ │ ├── .gitignore
│ │ ├── Jenkinsfile
│ │ ├── src
│ │ │ └── index.php
│ │ └── tests
│ │ │ └── indexTest.php
│ └── salt
│ │ ├── minion.d
│ │ └── masterless.conf
│ │ ├── pillars
│ │ ├── nginx.sls
│ │ ├── top.sls
│ │ └── users.sls
│ │ └── states
│ │ ├── docker
│ │ └── init.sls
│ │ ├── jenkins
│ │ └── init.sls
│ │ ├── nginx
│ │ ├── files
│ │ │ └── jenkins.conf
│ │ └── init.sls
│ │ ├── top.sls
│ │ ├── users
│ │ ├── files
│ │ │ └── veselin.pub
│ │ └── init.sls
│ │ └── yum-s3
│ │ ├── files
│ │ ├── cob.conf
│ │ ├── cob.py
│ │ └── s3.repo
│ │ └── init.sls
└── Terraform
│ ├── iam_user_policy.json
│ ├── outputs.tf
│ ├── resources.tf
│ ├── terraform.tfvars
│ └── variables.tf
├── Chapter 5
├── CodeCommit
│ ├── demo-app-cdelivery
│ │ ├── Jenkinsfile
│ │ ├── packer
│ │ │ ├── demo-app.json
│ │ │ ├── demo-app_userdata.sh
│ │ │ └── demo-app_vars.json
│ │ └── serverspec
│ │ │ ├── .rspec
│ │ │ ├── Rakefile
│ │ │ └── spec
│ │ │ ├── localhost
│ │ │ └── demo-app_spec.rb
│ │ │ └── spec_helper.rb
│ ├── demo-app
│ │ ├── .gitignore
│ │ ├── Jenkinsfile
│ │ ├── src
│ │ │ └── index.php
│ │ └── tests
│ │ │ └── indexTest.php
│ └── salt
│ │ ├── minion.d
│ │ └── masterless.conf
│ │ ├── pillars
│ │ ├── nginx.sls
│ │ ├── top.sls
│ │ └── users.sls
│ │ └── states
│ │ ├── demo-app
│ │ └── init.sls
│ │ ├── docker
│ │ └── init.sls
│ │ ├── jenkins
│ │ └── init.sls
│ │ ├── nginx
│ │ ├── demo-app.sls
│ │ ├── files
│ │ │ ├── demo-app.conf
│ │ │ └── jenkins.conf
│ │ ├── init.sls
│ │ └── jenkins.sls
│ │ ├── packer
│ │ └── init.sls
│ │ ├── php-fpm
│ │ └── init.sls
│ │ ├── top.sls
│ │ ├── users
│ │ ├── files
│ │ │ └── veselin.pub
│ │ └── init.sls
│ │ └── yum-s3
│ │ ├── files
│ │ ├── cob.conf
│ │ ├── cob.py
│ │ └── s3.repo
│ │ └── init.sls
└── Terraform
│ ├── iam_user_policy.json
│ ├── outputs.tf
│ ├── resources.tf
│ ├── terraform.tfvars
│ └── variables.tf
├── Chapter 6
├── CodeCommit
│ ├── demo-app-cdelivery
│ │ ├── Jenkinsfile
│ │ ├── packer
│ │ │ ├── demo-app.json
│ │ │ ├── demo-app_userdata.sh
│ │ │ └── demo-app_vars.json
│ │ └── serverspec
│ │ │ ├── .rspec
│ │ │ ├── Rakefile
│ │ │ └── spec
│ │ │ ├── localhost
│ │ │ └── demo-app_spec.rb
│ │ │ └── spec_helper.rb
│ ├── demo-app-cdeployment
│ │ ├── Jenkinsfile
│ │ └── cdeployment.sh
│ ├── demo-app
│ │ ├── .gitignore
│ │ ├── Jenkinsfile
│ │ ├── src
│ │ │ └── index.php
│ │ └── tests
│ │ │ └── indexTest.php
│ └── salt
│ │ ├── minion.d
│ │ └── masterless.conf
│ │ ├── pillars
│ │ ├── nginx.sls
│ │ ├── top.sls
│ │ └── users.sls
│ │ └── states
│ │ ├── demo-app
│ │ └── init.sls
│ │ ├── docker
│ │ └── init.sls
│ │ ├── jenkins
│ │ └── init.sls
│ │ ├── nginx
│ │ ├── demo-app.sls
│ │ ├── files
│ │ │ ├── demo-app.conf
│ │ │ └── jenkins.conf
│ │ ├── init.sls
│ │ └── jenkins.sls
│ │ ├── packer
│ │ └── init.sls
│ │ ├── php-fpm
│ │ └── init.sls
│ │ ├── top.sls
│ │ ├── users
│ │ ├── files
│ │ │ └── veselin.pub
│ │ └── init.sls
│ │ └── yum-s3
│ │ ├── files
│ │ ├── cob.conf
│ │ ├── cob.py
│ │ └── s3.repo
│ │ └── init.sls
└── Terraform
│ ├── iam_user_policy.json
│ ├── outputs.tf
│ ├── resources.tf
│ ├── terraform.tfvars
│ └── variables.tf
├── Chapter 7
├── elk
│ ├── etc
│ │ ├── elasticsearch
│ │ │ └── elasticsearch.yml
│ │ ├── filebeat
│ │ │ └── filebeat.yml
│ │ └── logstash
│ │ │ └── conf.d
│ │ │ └── main.conf
│ └── opt
│ │ └── logstash
│ │ └── patterns
│ │ └── nginx
├── promjenkins
│ └── opt
│ │ └── prometheus
│ │ ├── alertmanager
│ │ └── alertmanager.yml
│ │ ├── executor
│ │ └── executor.sh
│ │ └── server
│ │ ├── prometheus.yml
│ │ └── rules
│ │ ├── disk.rules
│ │ └── keepalive.rules
└── webserver
│ └── user_data.sh
├── LICENSE
└── README.md
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
4 | # Custom for Visual Studio
5 | *.cs diff=csharp
6 |
7 | # Standard to msysgit
8 | *.doc diff=astextplain
9 | *.DOC diff=astextplain
10 | *.docx diff=astextplain
11 | *.DOCX diff=astextplain
12 | *.dot diff=astextplain
13 | *.DOT diff=astextplain
14 | *.pdf diff=astextplain
15 | *.PDF diff=astextplain
16 | *.rtf diff=astextplain
17 | *.RTF diff=astextplain
18 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Windows image file caches
2 | Thumbs.db
3 | ehthumbs.db
4 |
5 | # Folder config file
6 | Desktop.ini
7 |
8 | # Recycle Bin used on file shares
9 | $RECYCLE.BIN/
10 |
11 | # Windows Installer files
12 | *.cab
13 | *.msi
14 | *.msm
15 | *.msp
16 |
17 | # Windows shortcuts
18 | *.lnk
19 |
20 | # =========================
21 | # Operating System Files
22 | # =========================
23 |
24 | # OSX
25 | # =========================
26 |
27 | .DS_Store
28 | .AppleDouble
29 | .LSOverride
30 |
31 | # Thumbnails
32 | ._*
33 |
34 | # Files that might appear in the root of a volume
35 | .DocumentRevisions-V100
36 | .fseventsd
37 | .Spotlight-V100
38 | .TemporaryItems
39 | .Trashes
40 | .VolumeIcon.icns
41 |
42 | # Directories potentially created on remote AFP share
43 | .AppleDB
44 | .AppleDesktop
45 | Network Trash Folder
46 | Temporary Items
47 | .apdisk
48 |
--------------------------------------------------------------------------------
/Chapter 2/CloudFormation/iam_user_policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Effect": "Allow",
6 | "Action": [
7 | "cloudformation:CancelUpdateStack",
8 | "cloudformation:ContinueUpdateRollback",
9 | "cloudformation:Create*",
10 | "cloudformation:Describe*",
11 | "cloudformation:EstimateTemplateCost",
12 | "cloudformation:ExecuteChangeSet",
13 | "cloudformation:Get*",
14 | "cloudformation:List*",
15 | "cloudformation:PreviewStackUpdate",
16 | "cloudformation:SetStackPolicy",
17 | "cloudformation:SignalResource",
18 | "cloudformation:UpdateStack",
19 | "cloudformation:ValidateTemplate",
20 | "autoscaling:CreateAutoScalingGroup",
21 | "autoscaling:CreateLaunchConfiguration",
22 | "autoscaling:DeleteLaunchConfiguration",
23 | "autoscaling:Describe*",
24 | "autoscaling:UpdateAutoScalingGroup",
25 | "ec2:AllocateAddress",
26 | "ec2:AssociateAddress",
27 | "ec2:AssociateRouteTable",
28 | "ec2:AttachInternetGateway",
29 | "ec2:AuthorizeSecurityGroupEgress",
30 | "ec2:AuthorizeSecurityGroupIngress",
31 | "ec2:CreateInternetGateway",
32 | "ec2:CreateNatGateway",
33 | "ec2:CreateRoute",
34 | "ec2:CreateRouteTable",
35 | "ec2:CreateSecurityGroup",
36 | "ec2:CreateSubnet",
37 | "ec2:CreateTags",
38 | "ec2:CreateVpc",
39 | "ec2:Describe*",
40 | "ec2:Modify*",
41 | "ec2:RevokeSecurityGroupEgress",
42 | "elasticloadbalancing:CreateLoadBalancer",
43 | "elasticloadbalancing:CreateLoadBalancerListeners",
44 | "elasticloadbalancing:Describe*",
45 | "elasticloadbalancing:ModifyLoadBalancerAttributes",
46 | "elasticloadbalancing:SetLoadBalancerPoliciesOfListener",
47 | "rds:CreateDBInstance",
48 | "rds:CreateDBSubnetGroup",
49 | "rds:Describe*"
50 | ],
51 | "Resource": [
52 | "*"
53 | ]
54 | }
55 | ]
56 | }
57 |
--------------------------------------------------------------------------------
/Chapter 2/CloudFormation/main.json:
--------------------------------------------------------------------------------
1 | {
2 | "AWSTemplateFormatVersion" : "2010-09-09",
3 | "Description" : "Provisions EC2, ELB, ASG and RDS resources",
4 | "Parameters" : {
5 | "vpcCidr" : {
6 | "Description" : "VPC CIDR",
7 | "Type" : "String"
8 | },
9 | "vpcName" : {
10 | "Description" : "VPC name",
11 | "Type" : "String"
12 | },
13 | "awsAvailabilityZones" : {
14 | "Description" : "List of AZs",
15 | "Type" : "CommaDelimitedList"
16 | },
17 | "publicCidr" : {
18 | "Description" : "List of public subnet CIDRs",
19 | "Type" : "CommaDelimitedList"
20 | },
21 | "privateCidr" : {
22 | "Description" : "List of private subnet CIDRs",
23 | "Type" : "CommaDelimitedList"
24 | },
25 | "rdsIdentifier" : {
26 | "Description" : "RDS instance identifier",
27 | "Type" : "String"
28 | },
29 | "rdsStorageSize" : {
30 | "Description" : "Storage size in GB",
31 | "Type" : "Number",
32 | "MinValue" : "5"
33 | },
34 | "rdsStorageType" : {
35 | "Description" : "Storage type: standard/gp2",
36 | "Type" : "String",
37 | "AllowedValues" : ["standard", "gp2"]
38 | },
39 | "rdsEngine" : {
40 | "Description" : "RDS type",
41 | "Type" : "String",
42 | "AllowedValues" : ["postgres", "mysql"]
43 | },
44 | "rdsEngineVersion" : {
45 | "Description" : "RDS engine version",
46 | "Type" : "String"
47 | },
48 | "rdsInstanceClass" : {
49 | "Description" : "RDS instance class",
50 | "Type" : "String",
51 | "AllowedValues" : ["db.t2.micro", "db.t2.small", "db.t2.medium"]
52 | },
53 | "rdsUsername" : {
54 | "Description" : "RDS username",
55 | "Type" : "String"
56 | },
57 | "rdsPassword" : {
58 | "Description" : "RDS password",
59 | "Type" : "String",
60 | "NoEcho" : "true"
61 | },
62 | "rdsPort" : {
63 | "Description" : "RDS port number",
64 | "Type" : "Number"
65 | },
66 | "autoscalingGroupMinsize" : {
67 | "Description" : "Min size of the ASG",
68 | "Type" : "Number"
69 | },
70 | "autoscalingGroupMaxsize" : {
71 | "Description" : "Max size of the ASG",
72 | "Type" : "Number"
73 | },
74 | "autoscalingGroupInstanceType" : {
75 | "Description" : "EC2 instance type",
76 | "Type" : "String",
77 | "AllowedValues" : ["t2.nano", "t2.micro", "t2.small"]
78 | },
79 | "autoscalingGroupKeyname" : {
80 | "Description" : "EC2 ssh key name",
81 | "Type" : "AWS::EC2::KeyPair::KeyName"
82 | },
83 | "autoscalingGroupImageId" : {
84 | "Description" : "EC2 AMI ID",
85 | "Type" : "AWS::EC2::Image::Id"
86 | }
87 | },
88 | "Resources" : {
89 | "vpc" : {
90 | "Type" : "AWS::EC2::VPC",
91 | "Properties" : {
92 | "CidrBlock" : { "Ref" : "vpcCidr" },
93 | "EnableDnsSupport" : "true",
94 | "EnableDnsHostnames" : "true",
95 | "Tags" : [ { "Key" : "Name", "Value" : { "Ref" : "vpcName" } } ]
96 | }
97 | },
98 | "publicSubnet1" : {
99 | "Type" : "AWS::EC2::Subnet",
100 | "Properties" : {
101 | "AvailabilityZone" : { "Fn::Select" : [ "0", {"Ref" : "awsAvailabilityZones"} ] },
102 | "CidrBlock" : { "Fn::Select" : [ "0", {"Ref" : "publicCidr"} ] },
103 | "MapPublicIpOnLaunch" : "true",
104 | "Tags" : [ { "Key" : "Name", "Value" : "Public" } ],
105 | "VpcId" : { "Ref" : "vpc" }
106 | }
107 | },
108 | "publicSubnet2" : {
109 | "Type" : "AWS::EC2::Subnet",
110 | "Properties" : {
111 | "AvailabilityZone" : { "Fn::Select" : [ "1", {"Ref" : "awsAvailabilityZones"} ] },
112 | "CidrBlock" : { "Fn::Select" : [ "1", {"Ref" : "publicCidr"} ] },
113 | "MapPublicIpOnLaunch" : "true",
114 | "Tags" : [ { "Key" : "Name", "Value" : "Public" } ],
115 | "VpcId" : { "Ref" : "vpc" }
116 | }
117 | },
118 | "privateSubnet1" : {
119 | "Type" : "AWS::EC2::Subnet",
120 | "Properties" : {
121 | "AvailabilityZone" : { "Fn::Select" : [ "0", {"Ref" : "awsAvailabilityZones"} ] },
122 | "CidrBlock" : { "Fn::Select" : [ "0", {"Ref" : "privateCidr"} ] },
123 | "MapPublicIpOnLaunch" : "false",
124 | "Tags" : [ { "Key" : "Name", "Value" : "Private" } ],
125 | "VpcId" : { "Ref" : "vpc" }
126 | }
127 | },
128 | "privateSubnet2" : {
129 | "Type" : "AWS::EC2::Subnet",
130 | "Properties" : {
131 | "AvailabilityZone" : { "Fn::Select" : [ "1", {"Ref" : "awsAvailabilityZones"} ] },
132 | "CidrBlock" : { "Fn::Select" : [ "1", {"Ref" : "privateCidr"} ] },
133 | "MapPublicIpOnLaunch" : "false",
134 | "Tags" : [ { "Key" : "Name", "Value" : "Private" } ],
135 | "VpcId" : { "Ref" : "vpc" }
136 | }
137 | },
138 | "internetGateway" : {
139 | "Type" : "AWS::EC2::InternetGateway",
140 | "Properties" : {
141 | "Tags" : [ { "Key" : "Name", "Value" : { "Fn::Join" : [ " - ", [ { "Ref" : "vpcName" }, "IGW" ] ] } } ]
142 | }
143 | },
144 | "internetGatewayAttachment" : {
145 | "Type" : "AWS::EC2::VPCGatewayAttachment",
146 | "Properties" : {
147 | "InternetGatewayId" : { "Ref" : "internetGateway" },
148 | "VpcId" : { "Ref" : "vpc" }
149 | }
150 | },
151 | "natEip" : {
152 | "Type" : "AWS::EC2::EIP",
153 | "Properties" : {
154 | "Domain" : "vpc"
155 | }
156 | },
157 | "natGateway" : {
158 | "Type" : "AWS::EC2::NatGateway",
159 | "Properties" : {
160 | "AllocationId" : { "Fn::GetAtt" : ["natEip", "AllocationId"]},
161 | "SubnetId" : { "Ref" : "publicSubnet1" }
162 | },
163 | "DependsOn" : "internetGatewayAttachment"
164 | },
165 | "publicRouteTable" : {
166 | "Type" : "AWS::EC2::RouteTable",
167 | "Properties" : {
168 | "VpcId" : { "Ref" : "vpc" },
169 | "Tags" : [ { "Key" : "Name", "Value" : "Public" } ]
170 | }
171 | },
172 | "publicRouteTableRoute" : {
173 | "Type" : "AWS::EC2::Route",
174 | "Properties" : {
175 | "DestinationCidrBlock" : "0.0.0.0/0",
176 | "GatewayId" : { "Ref" : "internetGateway" },
177 | "RouteTableId" : { "Ref" : "publicRouteTable" }
178 | }
179 | },
180 | "privateRouteTable" : {
181 | "Type" : "AWS::EC2::RouteTable",
182 | "Properties" : {
183 | "VpcId" : { "Ref" : "vpc" },
184 | "Tags" : [ { "Key" : "Name", "Value" : "Private" } ]
185 | }
186 | },
187 | "privateRouteTableRoute" : {
188 | "Type" : "AWS::EC2::Route",
189 | "Properties" : {
190 | "DestinationCidrBlock" : "0.0.0.0/0",
191 | "NatGatewayId" : { "Ref" : "natGateway" },
192 | "RouteTableId" : { "Ref" : "privateRouteTable" }
193 | }
194 | },
195 | "publicSubnet1Association" : {
196 | "Type" : "AWS::EC2::SubnetRouteTableAssociation",
197 | "Properties" : {
198 | "RouteTableId" : { "Ref" : "publicRouteTable" },
199 | "SubnetId" : { "Ref" : "publicSubnet1" }
200 | }
201 | },
202 | "publicSubnet2Association" : {
203 | "Type" : "AWS::EC2::SubnetRouteTableAssociation",
204 | "Properties" : {
205 | "RouteTableId" : { "Ref" : "publicRouteTable" },
206 | "SubnetId" : { "Ref" : "publicSubnet2" }
207 | }
208 | },
209 | "privateSubnet1Association" : {
210 | "Type" : "AWS::EC2::SubnetRouteTableAssociation",
211 | "Properties" : {
212 | "RouteTableId" : { "Ref" : "privateRouteTable" },
213 | "SubnetId" : { "Ref" : "privateSubnet1" }
214 | }
215 | },
216 | "privateSubnet2Association" : {
217 | "Type" : "AWS::EC2::SubnetRouteTableAssociation",
218 | "Properties" : {
219 | "RouteTableId" : { "Ref" : "privateRouteTable" },
220 | "SubnetId" : { "Ref" : "privateSubnet2" }
221 | }
222 | },
223 | "rdsSecurityGroup" : {
224 | "Type" : "AWS::EC2::SecurityGroup",
225 | "Properties" : {
226 | "SecurityGroupIngress" : [
227 | {
228 | "ToPort" : { "Ref" : "rdsPort" },
229 | "FromPort" : { "Ref" : "rdsPort" },
230 | "IpProtocol" : "tcp",
231 | "SourceSecurityGroupId" : { "Ref" : "ec2SecurityGroup" }
232 | }
233 | ],
234 | "VpcId" : { "Ref" : "vpc" },
235 | "GroupDescription" : "RDS Security Group",
236 | "Tags" : [ { "Key" : "Name", "Value" : { "Ref" : "rdsIdentifier" } } ]
237 | }
238 | },
239 | "rdsSubnetGroup" : {
240 | "Type" : "AWS::RDS::DBSubnetGroup",
241 | "Properties" : {
242 | "SubnetIds" : [ { "Ref" : "privateSubnet1" }, { "Ref" : "privateSubnet2" } ],
243 | "DBSubnetGroupDescription" : "CloudFormation RDS subnet group",
244 | "Tags" : [ { "Key" : "Name", "Value" : { "Ref" : "rdsIdentifier" } } ]
245 | }
246 | },
247 | "rdsInstance" : {
248 | "Type" : "AWS::RDS::DBInstance",
249 | "Properties" : {
250 | "DBInstanceIdentifier" : { "Ref" : "rdsIdentifier" },
251 | "DBInstanceClass" : { "Ref" : "rdsInstanceClass" },
252 | "DBSubnetGroupName" : { "Ref" : "rdsSubnetGroup" },
253 | "Engine" : { "Ref" : "rdsEngine" },
254 | "EngineVersion" : { "Ref" : "rdsEngineVersion" },
255 | "MasterUserPassword" : { "Ref" : "rdsPassword" },
256 | "MasterUsername" : { "Ref" : "rdsUsername" },
257 | "StorageType" : { "Ref" : "rdsStorageType" },
258 | "AllocatedStorage" : { "Ref" : "rdsStorageSize" },
259 | "VPCSecurityGroups" : [ { "Ref" : "rdsSecurityGroup" } ],
260 | "Tags" : [ { "Key" : "Name", "Value" : { "Ref" : "rdsIdentifier" } } ]
261 | }
262 | },
263 | "elbSecurityGroup" : {
264 | "Type" : "AWS::EC2::SecurityGroup",
265 | "Properties" : {
266 | "SecurityGroupIngress" : [ { "ToPort" : "80", "FromPort" : "80", "IpProtocol" : "tcp", "CidrIp" : "0.0.0.0/0" } ],
267 | "VpcId" : { "Ref" : "vpc" },
268 | "GroupDescription" : "ELB Security Group",
269 | "Tags" : [ { "Key" : "Name", "Value" : "cloudformation-elb"} ]
270 | }
271 | },
272 | "elbInstance" : {
273 | "Type" : "AWS::ElasticLoadBalancing::LoadBalancer",
274 | "Properties" : {
275 | "LoadBalancerName" : "cloudformation-elb",
276 | "Listeners" : [ { "InstancePort" : "80", "InstanceProtocol" : "HTTP", "LoadBalancerPort" : "80", "Protocol" : "HTTP" } ],
277 | "SecurityGroups" : [ { "Ref" : "elbSecurityGroup" } ],
278 | "Subnets" : [ { "Ref" : "publicSubnet1" }, { "Ref" : "publicSubnet2" } ],
279 | "Tags" : [ { "Key" : "Name", "Value" : "cloudformation-elb" } ]
280 | }
281 | },
282 | "ec2SecurityGroup" : {
283 | "Type" : "AWS::EC2::SecurityGroup",
284 | "Properties" : {
285 | "SecurityGroupIngress" : [
286 | {
287 | "ToPort" : "80",
288 | "FromPort" : "80",
289 | "IpProtocol" : "tcp",
290 | "SourceSecurityGroupId" : { "Ref" : "elbSecurityGroup" }
291 | }
292 | ],
293 | "VpcId" : { "Ref" : "vpc" },
294 | "GroupDescription" : "Ec2 Security Group",
295 | "Tags" : [ { "Key" : "Name", "Value" : "cloudformation-ec2" } ]
296 | }
297 | },
298 | "launchConfiguration" : {
299 | "Type" : "AWS::AutoScaling::LaunchConfiguration",
300 | "Properties" : {
301 | "ImageId" : { "Ref": "autoscalingGroupImageId" },
302 | "InstanceType" : { "Ref" : "autoscalingGroupInstanceType" },
303 | "KeyName" : { "Ref" : "autoscalingGroupKeyname" },
304 | "SecurityGroups" : [ { "Ref" : "ec2SecurityGroup" } ],
305 | "UserData" : {
306 | "Fn::Base64" : {
307 | "Fn::Join" : [
308 | "\n",
309 | [
310 | "#!/bin/bash",
311 | "set -euf -o pipefail",
312 | "exec 1> >(logger -s -t $(basename $0)) 2>&1",
313 | "yum -y install nginx; chkconfig nginx on; service nginx start"
314 | ]
315 | ]
316 | }
317 | }
318 | }
319 | },
320 | "autoScalingGroup" : {
321 | "Type" : "AWS::AutoScaling::AutoScalingGroup",
322 | "Properties" : {
323 | "LaunchConfigurationName" : { "Ref" : "launchConfiguration" },
324 | "DesiredCapacity" : "1",
325 | "MinSize" : "1",
326 | "MaxSize" : "1",
327 | "LoadBalancerNames" : [ { "Ref" : "elbInstance" } ],
328 | "VPCZoneIdentifier" : [ { "Ref" : "privateSubnet1" }, { "Ref" : "privateSubnet2" } ],
329 | "Tags" : [ { "Key" : "Name", "Value" : "cloudformation-asg", "PropagateAtLaunch" : "true" } ]
330 | },
331 | "DependsOn" : "rdsInstance"
332 | }
333 | },
334 | "Outputs" : {
335 | "vpcId" : {
336 | "Description" : "VPC ID",
337 | "Value" : { "Ref" : "vpc" }
338 | },
339 | "natEip" : {
340 | "Description" : "NAT IP address",
341 | "Value" : { "Ref" : "natEip" }
342 | },
343 | "elbDns" : {
344 | "Description" : "ELB DNS",
345 | "Value" : { "Fn::GetAtt" : [ "elbInstance", "DNSName" ] }
346 | }
347 | }
348 | }
349 |
--------------------------------------------------------------------------------
/Chapter 2/CloudFormation/parameters.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "ParameterKey": "vpcCidr",
4 | "ParameterValue": "10.0.0.0/16"
5 | },
6 | {
7 | "ParameterKey": "vpcName",
8 | "ParameterValue": "CloudFormation"
9 | },
10 | {
11 | "ParameterKey": "awsAvailabilityZones",
12 | "ParameterValue": "us-east-1b,us-east-1c"
13 | },
14 | {
15 | "ParameterKey": "publicCidr",
16 | "ParameterValue": "10.0.1.0/24,10.0.3.0/24"
17 | },
18 | {
19 | "ParameterKey": "privateCidr",
20 | "ParameterValue": "10.0.2.0/24,10.0.4.0/24"
21 | },
22 | {
23 | "ParameterKey": "rdsIdentifier",
24 | "ParameterValue": "cloudformation"
25 | },
26 | {
27 | "ParameterKey": "rdsStorageSize",
28 | "ParameterValue": "5"
29 | },
30 | {
31 | "ParameterKey": "rdsStorageType",
32 | "ParameterValue": "gp2"
33 | },
34 | {
35 | "ParameterKey": "rdsEngine",
36 | "ParameterValue": "postgres"
37 | },
38 | {
39 | "ParameterKey": "rdsEngineVersion",
40 | "ParameterValue": "9.5.2"
41 | },
42 | {
43 | "ParameterKey": "rdsInstanceClass",
44 | "ParameterValue": "db.t2.micro"
45 | },
46 | {
47 | "ParameterKey": "rdsUsername",
48 | "ParameterValue": "root"
49 | },
50 | {
51 | "ParameterKey": "rdsPassword",
52 | "ParameterValue": "YouShouldChangeThisHerePassword"
53 | },
54 | {
55 | "ParameterKey": "rdsPort",
56 | "ParameterValue": "5432"
57 | },
58 | {
59 | "ParameterKey": "autoscalingGroupMinsize",
60 | "ParameterValue": "1"
61 | },
62 | {
63 | "ParameterKey": "autoscalingGroupMaxsize",
64 | "ParameterValue": "1"
65 | },
66 | {
67 | "ParameterKey": "autoscalingGroupInstanceType",
68 | "ParameterValue": "t2.nano"
69 | },
70 | {
71 | "ParameterKey": "autoscalingGroupKeyname",
72 | "ParameterValue": "cloudformation"
73 | },
74 | {
75 | "ParameterKey": "autoscalingGroupImageId",
76 | "ParameterValue": "ami-08111162"
77 | }
78 | ]
79 |
--------------------------------------------------------------------------------
/Chapter 2/Terraform/iam_user_policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "Stmt1461764665000",
6 | "Effect": "Allow",
7 | "Action": [
8 | "autoscaling:CreateAutoScalingGroup",
9 | "autoscaling:CreateLaunchConfiguration",
10 | "autoscaling:DeleteLaunchConfiguration",
11 | "autoscaling:Describe*",
12 | "autoscaling:UpdateAutoScalingGroup",
13 | "ec2:AllocateAddress",
14 | "ec2:AssociateAddress",
15 | "ec2:AssociateRouteTable",
16 | "ec2:AttachInternetGateway",
17 | "ec2:AuthorizeSecurityGroupEgress",
18 | "ec2:AuthorizeSecurityGroupIngress",
19 | "ec2:CreateInternetGateway",
20 | "ec2:CreateNatGateway",
21 | "ec2:CreateRoute",
22 | "ec2:CreateRouteTable",
23 | "ec2:CreateSecurityGroup",
24 | "ec2:CreateSubnet",
25 | "ec2:CreateTags",
26 | "ec2:CreateVpc",
27 | "ec2:Describe*",
28 | "ec2:ModifySubnetAttribute",
29 | "ec2:RevokeSecurityGroupEgress",
30 | "elasticloadbalancing:AddTags",
31 | "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
32 | "elasticloadbalancing:AttachLoadBalancerToSubnets",
33 | "elasticloadbalancing:CreateLoadBalancer",
34 | "elasticloadbalancing:CreateLoadBalancerListeners",
35 | "elasticloadbalancing:Describe*",
36 | "elasticloadbalancing:ModifyLoadBalancerAttributes",
37 | "rds:CreateDBInstance",
38 | "rds:CreateDBSubnetGroup",
39 | "rds:Describe*"
40 | ],
41 | "Resource": [
42 | "*"
43 | ]
44 | }
45 | ]
46 | }
47 |
--------------------------------------------------------------------------------
/Chapter 2/Terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "VPC ID" {
2 | value = "${aws_vpc.terraform-vpc.id}"
3 | }
4 |
5 | output "NAT EIP" {
6 | value = "${aws_nat_gateway.terraform-nat.public_ip}"
7 | }
8 |
9 | output "ELB URI" {
10 | value = "${aws_elb.terraform-elb.dns_name}"
11 | }
12 |
13 | output "RDS Endpoint" {
14 | value = "${aws_db_instance.terraform.endpoint}"
15 | }
16 |
--------------------------------------------------------------------------------
/Chapter 2/Terraform/resources.tf:
--------------------------------------------------------------------------------
1 | # Set a Provider
2 | provider "aws" {
3 | region = "${var.aws-region}"
4 | }
5 |
6 | ### VPC ###
7 |
8 | # Create a VPC
9 | resource "aws_vpc" "terraform-vpc" {
10 | cidr_block = "${var.vpc-cidr}"
11 |
12 | tags {
13 | Name = "${var.vpc-name}"
14 | }
15 | }
16 |
17 | # Create an Internet Gateway
18 | resource "aws_internet_gateway" "terraform-igw" {
19 | vpc_id = "${aws_vpc.terraform-vpc.id}"
20 | }
21 |
22 | # Create NAT
23 | resource "aws_eip" "nat-eip" {
24 | vpc = true
25 | }
26 |
27 | resource "aws_nat_gateway" "terraform-nat" {
28 | allocation_id = "${aws_eip.nat-eip.id}"
29 | subnet_id = "${aws_subnet.public-1.id}"
30 | depends_on = ["aws_internet_gateway.terraform-igw"]
31 | }
32 |
33 | # Create public and private route tables
34 | resource "aws_route_table" "public" {
35 | vpc_id = "${aws_vpc.terraform-vpc.id}"
36 | route {
37 | cidr_block = "0.0.0.0/0"
38 | gateway_id = "${aws_internet_gateway.terraform-igw.id}"
39 | }
40 |
41 | tags {
42 | Name = "Public"
43 | }
44 | }
45 |
46 | resource "aws_route_table" "private" {
47 | vpc_id = "${aws_vpc.terraform-vpc.id}"
48 | route {
49 | cidr_block = "0.0.0.0/0"
50 | nat_gateway_id = "${aws_nat_gateway.terraform-nat.id}"
51 | }
52 |
53 | tags {
54 | Name = "Private"
55 | }
56 | }
57 |
58 | # Create and associate public subnets with a route table
59 | resource "aws_subnet" "public-1" {
60 | vpc_id = "${aws_vpc.terraform-vpc.id}"
61 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 1)}"
62 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index)}"
63 | map_public_ip_on_launch = true
64 |
65 | tags {
66 | Name = "Public"
67 | }
68 | }
69 |
70 | resource "aws_route_table_association" "public-1" {
71 | subnet_id = "${aws_subnet.public-1.id}"
72 | route_table_id = "${aws_route_table.public.id}"
73 | }
74 |
75 | resource "aws_subnet" "public-2" {
76 | vpc_id = "${aws_vpc.terraform-vpc.id}"
77 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 3)}"
78 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index + 1)}"
79 | map_public_ip_on_launch = true
80 |
81 | tags {
82 | Name = "Public"
83 | }
84 | }
85 |
86 | resource "aws_route_table_association" "public-2" {
87 | subnet_id = "${aws_subnet.public-2.id}"
88 | route_table_id = "${aws_route_table.public.id}"
89 | }
90 |
91 | # Create and associate private subnets with a route table
92 | resource "aws_subnet" "private-1" {
93 | vpc_id = "${aws_vpc.terraform-vpc.id}"
94 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 2)}"
95 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index)}"
96 | map_public_ip_on_launch = false
97 |
98 | tags {
99 | Name = "Private"
100 | }
101 | }
102 |
103 | resource "aws_route_table_association" "private-1" {
104 | subnet_id = "${aws_subnet.private-1.id}"
105 | route_table_id = "${aws_route_table.private.id}"
106 | }
107 |
108 | resource "aws_subnet" "private-2" {
109 | vpc_id = "${aws_vpc.terraform-vpc.id}"
110 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 4)}"
111 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index + 1)}"
112 | map_public_ip_on_launch = false
113 |
114 | tags {
115 | Name = "Private"
116 | }
117 | }
118 |
119 | resource "aws_route_table_association" "private-2" {
120 | subnet_id = "${aws_subnet.private-2.id}"
121 | route_table_id = "${aws_route_table.private.id}"
122 | }
123 |
124 |
125 | ### RDS ###
126 |
127 | resource "aws_security_group" "terraform-rds" {
128 | name = "terraform-rds"
129 | description = "RDS security group"
130 | vpc_id = "${aws_vpc.terraform-vpc.id}"
131 |
132 | ingress {
133 | from_port = "${var.rds-port}"
134 | to_port = "${var.rds-port}"
135 | protocol = "tcp"
136 | security_groups = ["${aws_security_group.terraform-ec2.id}"]
137 | }
138 |
139 | egress {
140 | from_port = 0
141 | to_port = 0
142 | protocol = "-1"
143 | cidr_blocks = ["0.0.0.0/0"]
144 | }
145 | }
146 |
147 | resource "aws_db_subnet_group" "rds" {
148 | name = "rds_subnet_group"
149 | description = "RDS subnet group"
150 | subnet_ids = ["${aws_subnet.private-1.id}", "${aws_subnet.private-2.id}"]
151 | }
152 |
153 | resource "aws_db_instance" "terraform" {
154 | identifier = "${var.rds-identifier}"
155 | allocated_storage = "${var.rds-storage-size}"
156 | storage_type= "${var.rds-storage-type}"
157 | engine = "${var.rds-engine}"
158 | engine_version = "${var.rds-engine-version}"
159 | instance_class = "${var.rds-instance-class}"
160 | username = "${var.rds-username}"
161 | password = "${var.rds-password}"
162 | port = "${var.rds-port}"
163 | vpc_security_group_ids = ["${aws_security_group.terraform-rds.id}"]
164 | db_subnet_group_name = "${aws_db_subnet_group.rds.id}"
165 | }
166 |
167 |
168 | ### ELB ###
169 |
170 | resource "aws_security_group" "terraform-elb" {
171 | name = "terraform-elb"
172 | description = "ELB security group"
173 | vpc_id = "${aws_vpc.terraform-vpc.id}"
174 |
175 | ingress {
176 | from_port = "80"
177 | to_port = "80"
178 | protocol = "tcp"
179 | cidr_blocks = ["0.0.0.0/0"]
180 | }
181 |
182 | egress {
183 | from_port = 0
184 | to_port = 0
185 | protocol = "-1"
186 | cidr_blocks = ["0.0.0.0/0"]
187 | }
188 |
189 | }
190 |
191 | resource "aws_elb" "terraform-elb" {
192 | name = "terraform-elb"
193 | security_groups = ["${aws_security_group.terraform-elb.id}"]
194 | subnets = ["${aws_subnet.public-1.id}", "${aws_subnet.public-2.id}"]
195 |
196 | listener {
197 | instance_port = 80
198 | instance_protocol = "http"
199 | lb_port = 80
200 | lb_protocol = "http"
201 | }
202 |
203 | tags {
204 | Name = "terraform-elb"
205 | }
206 | }
207 |
208 |
209 | ### EC2 ###
210 |
211 | resource "aws_security_group" "terraform-ec2" {
212 | name = "terraform-ec2"
213 | description = "ec2 instance security group"
214 | vpc_id = "${aws_vpc.terraform-vpc.id}"
215 |
216 | ingress {
217 | from_port = "80"
218 | to_port = "80"
219 | protocol = "tcp"
220 | security_groups = ["${aws_security_group.terraform-elb.id}"]
221 | }
222 |
223 | egress {
224 | from_port = 0
225 | to_port = 0
226 | protocol = "-1"
227 | cidr_blocks = ["0.0.0.0/0"]
228 | }
229 |
230 | }
231 |
232 | resource "aws_launch_configuration" "terraform-lcfg" {
233 | image_id = "${var.autoscaling-group-image-id}"
234 | instance_type = "${var.autoscaling-group-instance-type}"
235 | key_name = "${var.autoscaling-group-key-name}"
236 | security_groups = ["${aws_security_group.terraform-ec2.id}"]
237 | user_data = "#!/bin/bash \n set -euf -o pipefail \n exec 1> >(logger -s -t $(basename $0)) 2>&1 \n yum -y install nginx; chkconfig nginx on; service nginx start"
238 |
239 | lifecycle {
240 | create_before_destroy = true
241 | }
242 | }
243 |
244 | resource "aws_autoscaling_group" "terraform-asg" {
245 | name = "terraform"
246 | launch_configuration = "${aws_launch_configuration.terraform-lcfg.id}"
247 | vpc_zone_identifier = ["${aws_subnet.private-1.id}", "${aws_subnet.private-2.id}"]
248 | min_size = "${var.autoscaling-group-minsize}"
249 | max_size = "${var.autoscaling-group-maxsize}"
250 | load_balancers = ["${aws_elb.terraform-elb.name}"]
251 | depends_on = ["aws_db_instance.terraform"]
252 |
253 | tag {
254 | key = "Name"
255 | value = "terraform"
256 | propagate_at_launch = true
257 | }
258 | }
259 |
--------------------------------------------------------------------------------
/Chapter 2/Terraform/terraform.tfvars:
--------------------------------------------------------------------------------
1 | autoscaling-group-image-id = "ami-08111162"
2 | autoscaling-group-instance-type = "t2.nano"
3 | autoscaling-group-key-name = "terraform"
4 | autoscaling-group-maxsize = "1"
5 | autoscaling-group-minsize = "1"
6 | aws-availability-zones = "us-east-1b,us-east-1c"
7 | aws-region = "us-east-1"
8 | rds-engine = "postgres"
9 | rds-engine-version = "9.5.2"
10 | rds-identifier = "terraform-rds"
11 | rds-instance-class = "db.t2.micro"
12 | rds-port = "5432"
13 | rds-storage-size = "5"
14 | rds-storage-type = "gp2"
15 | rds-username = "dbroot"
16 | rds-password = "donotusethispassword"
17 | vpc-cidr = "10.0.0.0/16"
18 | vpc-name = "Terraform"
19 |
--------------------------------------------------------------------------------
/Chapter 2/Terraform/variables.tf:
--------------------------------------------------------------------------------
1 | ### VPC ###
2 | variable "aws-region" {
3 | type = "string"
4 | description = "AWS region"
5 | }
6 | variable "aws-availability-zones" {
7 | type = "string"
8 | description = "AWS zones"
9 | }
10 | variable "vpc-cidr" {
11 | type = "string"
12 | description = "VPC CIDR"
13 | }
14 | variable "vpc-name" {
15 | type = "string"
16 | description = "VPC name"
17 | }
18 |
19 | ### RDS ###
20 | variable "rds-identifier" {
21 | type = "string"
22 | description = "RDS instance identifier"
23 | }
24 | variable "rds-storage-size" {
25 | type = "string"
26 | description = "Storage size in GB"
27 | }
28 | variable "rds-storage-type" {
29 | type = "string"
30 | description = "Storage type"
31 | }
32 | variable "rds-engine" {
33 | type = "string"
34 | description = "RDS type"
35 | }
36 | variable "rds-engine-version" {
37 | type = "string"
38 | description = "RDS version"
39 | }
40 | variable "rds-instance-class" {
41 | type = "string"
42 | description = "RDS instance class"
43 | }
44 | variable "rds-username" {
45 | type = "string"
46 | description = "RDS username"
47 | }
48 | variable "rds-password" {
49 | type = "string"
50 | description = "RDS password"
51 | }
52 | variable "rds-port" {
53 | type = "string"
54 | description = "RDS port number"
55 | }
56 |
57 | ### EC2 ###
58 | variable "autoscaling-group-minsize" {
59 | type = "string"
60 | description = "Min size of the ASG"
61 | }
62 | variable "autoscaling-group-maxsize" {
63 | type = "string"
64 | description = "Max size of the ASG"
65 | }
66 | variable "autoscaling-group-image-id" {
67 | type="string"
68 | description = "EC2 AMI identifier"
69 | }
70 | variable "autoscaling-group-instance-type" {
71 | type = "string"
72 | description = "EC2 instance type"
73 | }
74 | variable "autoscaling-group-key-name" {
75 | type = "string"
76 | description = "EC2 ssh key name"
77 | }
78 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/minion.d/masterless.conf:
--------------------------------------------------------------------------------
1 | file_client: local
2 | file_roots:
3 | base:
4 | - /srv/salt/states
5 | pillar_roots:
6 | base:
7 | - /srv/salt/pillars
8 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/pillars/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - users
4 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/pillars/users.sls:
--------------------------------------------------------------------------------
1 | users:
2 | veselin:
3 | uid: 5001
4 | password: '$1$wZ0gQOOo$HEN/gDGS85dEZM7QZVlFz/'
5 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/states/nginx/files/default.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name *.amazonaws.com;
4 |
5 | root /var/www/html;
6 | index index.php index.html index.htm;
7 |
8 | location / {
9 | try_files $uri $uri/ =404;
10 | }
11 |
12 | location ~ \.php$ {
13 | try_files $uri =404;
14 | fastcgi_split_path_info ^(.+\.php)(/.+)$;
15 | fastcgi_pass 127.0.0.1:9000;
16 | fastcgi_index index.php;
17 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
18 | include fastcgi_params;
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/states/nginx/init.sls:
--------------------------------------------------------------------------------
1 | nginx:
2 | pkg.installed: []
3 |
4 | service.running:
5 | - enable: True
6 | - reload: True
7 | - require:
8 | - pkg: nginx
9 |
10 | /etc/nginx/conf.d/default.conf:
11 | file.managed:
12 | - source: salt://nginx/files/default.conf
13 | - require:
14 | - pkg: nginx
15 | - require_in:
16 | - service: nginx
17 | - watch_in:
18 | - service: nginx
19 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/states/php-fpm/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - nginx
3 |
4 | php-fpm:
5 | pkg.installed:
6 | - name: php-fpm
7 | - require:
8 | - pkg: nginx
9 |
10 | service.running:
11 | - name: php-fpm
12 | - enable: True
13 | - reload: True
14 | - require_in:
15 | - service: nginx
16 |
17 | php-fpm_www.conf_1:
18 | file.replace:
19 | - name: /etc/php-fpm.d/www.conf
20 | - pattern: ^user = apache$
21 | - repl: user = nginx
22 | - require:
23 | - pkg: php-fpm
24 | - require_in:
25 | - service: php-fpm
26 | - watch_in:
27 | - service: php-fpm
28 |
29 | php-fpm_www.conf_2:
30 | file.replace:
31 | - name: /etc/php-fpm.d/www.conf
32 | - pattern: ^group = apache$
33 | - repl: group = nginx
34 | - require:
35 | - pkg: php-fpm
36 | - require_in:
37 | - service: php-fpm
38 | - watch_in:
39 | - service: php-fpm
40 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/states/phptest/init.sls:
--------------------------------------------------------------------------------
1 | {% set public_ipv4 = salt['cmd.shell']('ec2-metadata --public-ipv4 | awk \'{ print $2 }\'') %}
2 | {% set grains_ipv4 = salt['grains.get']('ipv4:0') %}
3 | {% set grains_os = salt['grains.get']('os') %}
4 | {% set grains_osmajorrelease = salt['grains.get']('osmajorrelease') %}
5 | {% set grains_num_cpus = salt['grains.get']('num_cpus') %}
6 | {% set grains_cpu_model = salt['grains.get']('cpu_model') %}
7 | {% set grains_mem_total = salt['grains.get']('mem_total') %}
8 |
9 | phptest:
10 | file.managed:
11 | - name: /var/www/html/index.php
12 | - makedirs: True
13 | - contents: |
14 | Hello from {{ grains_ipv4 }}/{{ public_ipv4 }} running PHP ' . phpversion() . ' on {{ grains_os }} {{ grains_osmajorrelease }}.
I come with {{ grains_num_cpus }} x {{ grains_cpu_model }} and {{ grains_mem_total }} MB of memory.
';
16 | phpinfo(INFO_LICENSE);
17 | ?>
18 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/states/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - users
4 | - nginx
5 | - php-fpm
6 | - phptest
7 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/states/users/files/veselin.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EA...
2 |
--------------------------------------------------------------------------------
/Chapter 3/CodeCommit/salt/states/users/init.sls:
--------------------------------------------------------------------------------
1 | veselin:
2 | user.present:
3 | - fullname: Veselin Kantsev
4 | - uid: {{ salt['pillar.get']('users:veselin:uid') }}
5 | - password: {{ salt['pillar.get']('users:veselin:password') }}
6 | - groups:
7 | - wheel
8 |
9 | ssh_auth.present:
10 | - user: veselin
11 | - source: salt://users/files/veselin.pub
12 | - require:
13 | - user: veselin
14 |
15 | sudoers:
16 | file.managed:
17 | - name: /etc/sudoers.d/wheel
18 | - contents: '%wheel ALL=(ALL) ALL'
19 |
--------------------------------------------------------------------------------
/Chapter 3/Terraform/iam_user_policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "Stmt1461764665000",
6 | "Effect": "Allow",
7 | "Action": [
8 | "autoscaling:CreateAutoScalingGroup",
9 | "autoscaling:CreateLaunchConfiguration",
10 | "autoscaling:DeleteLaunchConfiguration",
11 | "autoscaling:Describe*",
12 | "autoscaling:UpdateAutoScalingGroup",
13 | "ec2:AllocateAddress",
14 | "ec2:AssociateAddress",
15 | "ec2:AssociateRouteTable",
16 | "ec2:AttachInternetGateway",
17 | "ec2:AuthorizeSecurityGroupEgress",
18 | "ec2:AuthorizeSecurityGroupIngress",
19 | "ec2:CreateInternetGateway",
20 | "ec2:CreateRoute",
21 | "ec2:CreateRouteTable",
22 | "ec2:CreateSecurityGroup",
23 | "ec2:CreateSubnet",
24 | "ec2:CreateTags",
25 | "ec2:CreateVpc",
26 | "ec2:Describe*",
27 | "ec2:ModifySubnetAttribute",
28 | "ec2:RevokeSecurityGroupEgress",
29 | "elasticloadbalancing:AddTags",
30 | "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
31 | "elasticloadbalancing:AttachLoadBalancerToSubnets",
32 | "elasticloadbalancing:CreateLoadBalancer",
33 | "elasticloadbalancing:CreateLoadBalancerListeners",
34 | "elasticloadbalancing:Describe*",
35 | "elasticloadbalancing:ModifyLoadBalancerAttributes",
36 | "iam:AddRoleToInstanceProfile",
37 | "iam:CreateInstanceProfile",
38 | "iam:CreateRole",
39 | "iam:Get*",
40 | "iam:PassRole",
41 | "iam:PutRolePolicy"
42 | ],
43 | "Resource": [
44 | "*"
45 | ]
46 | }
47 | ]
48 | }
49 |
--------------------------------------------------------------------------------
/Chapter 3/Terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "VPC ID" {
2 | value = "${aws_vpc.terraform-vpc.id}"
3 | }
4 |
5 | output "ELB URI" {
6 | value = "${aws_elb.terraform-elb.dns_name}"
7 | }
8 |
--------------------------------------------------------------------------------
/Chapter 3/Terraform/resources.tf:
--------------------------------------------------------------------------------
1 | # Set a Provider
2 | provider "aws" {
3 | region = "${var.aws-region}"
4 | }
5 |
6 | ### VPC ###
7 |
8 | # Create a VPC
9 | resource "aws_vpc" "terraform-vpc" {
10 | cidr_block = "${var.vpc-cidr}"
11 |
12 | tags {
13 | Name = "${var.vpc-name}"
14 | }
15 | }
16 |
17 | # Create an Internet Gateway
18 | resource "aws_internet_gateway" "terraform-igw" {
19 | vpc_id = "${aws_vpc.terraform-vpc.id}"
20 | }
21 |
22 | # Create public route tables
23 | resource "aws_route_table" "public" {
24 | vpc_id = "${aws_vpc.terraform-vpc.id}"
25 | route {
26 | cidr_block = "0.0.0.0/0"
27 | gateway_id = "${aws_internet_gateway.terraform-igw.id}"
28 | }
29 |
30 | tags {
31 | Name = "Public"
32 | }
33 | }
34 |
35 | # Create and associate public subnets with a route table
36 | resource "aws_subnet" "public-1" {
37 | vpc_id = "${aws_vpc.terraform-vpc.id}"
38 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 1)}"
39 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index)}"
40 | map_public_ip_on_launch = true
41 |
42 | tags {
43 | Name = "Public"
44 | }
45 | }
46 |
47 | resource "aws_route_table_association" "public-1" {
48 | subnet_id = "${aws_subnet.public-1.id}"
49 | route_table_id = "${aws_route_table.public.id}"
50 | }
51 |
52 | resource "aws_subnet" "public-2" {
53 | vpc_id = "${aws_vpc.terraform-vpc.id}"
54 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 3)}"
55 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index + 1)}"
56 | map_public_ip_on_launch = true
57 |
58 | tags {
59 | Name = "Public"
60 | }
61 | }
62 |
63 | resource "aws_route_table_association" "public-2" {
64 | subnet_id = "${aws_subnet.public-2.id}"
65 | route_table_id = "${aws_route_table.public.id}"
66 | }
67 |
68 |
69 | ### ELB ###
70 |
71 | resource "aws_security_group" "terraform-elb" {
72 | name = "terraform-elb"
73 | description = "ELB security group"
74 | vpc_id = "${aws_vpc.terraform-vpc.id}"
75 |
76 | ingress {
77 | from_port = "80"
78 | to_port = "80"
79 | protocol = "tcp"
80 | cidr_blocks = ["0.0.0.0/0"]
81 | }
82 |
83 | egress {
84 | from_port = 0
85 | to_port = 0
86 | protocol = "-1"
87 | cidr_blocks = ["0.0.0.0/0"]
88 | }
89 |
90 | }
91 |
92 | resource "aws_elb" "terraform-elb" {
93 | name = "terraform-elb"
94 | security_groups = ["${aws_security_group.terraform-elb.id}"]
95 | subnets = ["${aws_subnet.public-1.id}", "${aws_subnet.public-2.id}"]
96 | cross_zone_load_balancing = true
97 |
98 | listener {
99 | instance_port = 80
100 | instance_protocol = "http"
101 | lb_port = 80
102 | lb_protocol = "http"
103 | }
104 |
105 | tags {
106 | Name = "terraform-elb"
107 | }
108 | }
109 |
110 |
111 | ### EC2 ###
112 |
113 | resource "aws_security_group" "terraform-ec2" {
114 | name = "terraform-ec2"
115 | description = "ec2 instance security group"
116 | vpc_id = "${aws_vpc.terraform-vpc.id}"
117 |
118 | ingress {
119 | from_port = "22"
120 | to_port = "22"
121 | protocol = "tcp"
122 | cidr_blocks = ["0.0.0.0/0"]
123 | }
124 |
125 | ingress {
126 | from_port = "80"
127 | to_port = "80"
128 | protocol = "tcp"
129 | security_groups = ["${aws_security_group.terraform-elb.id}"]
130 | }
131 |
132 | egress {
133 | from_port = 0
134 | to_port = 0
135 | protocol = "-1"
136 | cidr_blocks = ["0.0.0.0/0"]
137 | }
138 |
139 | }
140 |
141 |
142 | resource "aws_iam_role" "terraform-role" {
143 | name = "terraform-role"
144 | path = "/"
145 | assume_role_policy = < >(logger -s -t $(basename $0)) 2>&1
198 | # Install Git and set CodeComit connection settings
199 | # (required for access via IAM roles)
200 | yum -y install git
201 | git config --system credential.helper '!aws codecommit credential-helper $@'
202 | git config --system credential.UseHttpPath true
203 | # Clone the Salt repository
204 | git clone https://git-codecommit.us-east-1.amazonaws.com/v1/repos/salt /srv/salt; chmod 700 /srv/salt
205 | # Install SaltStack
206 | yum -y install https://repo.saltstack.com/yum/amazon/salt-amzn-repo-latest-1.ami.noarch.rpm
207 | yum clean expire-cache; yum -y install salt-minion; chkconfig salt-minion off
208 | # Put custom minion config in place (for enabling masterless mode)
209 | cp -r /srv/salt/minion.d /etc/salt/
210 | # Trigger a full Salt run
211 | salt-call state.apply
212 | EOF
213 |
214 | lifecycle {
215 | create_before_destroy = true
216 | }
217 | }
218 |
219 | resource "aws_autoscaling_group" "terraform-asg" {
220 | name = "terraform"
221 | launch_configuration = "${aws_launch_configuration.terraform-lcfg.id}"
222 | vpc_zone_identifier = ["${aws_subnet.public-1.id}", "${aws_subnet.public-2.id}"]
223 | min_size = "${var.autoscaling-group-minsize}"
224 | max_size = "${var.autoscaling-group-maxsize}"
225 | load_balancers = ["${aws_elb.terraform-elb.name}"]
226 |
227 | tag {
228 | key = "Name"
229 | value = "terraform"
230 | propagate_at_launch = true
231 | }
232 | }
233 |
--------------------------------------------------------------------------------
/Chapter 3/Terraform/terraform.tfvars:
--------------------------------------------------------------------------------
1 | autoscaling-group-image-id = "ami-08111162"
2 | autoscaling-group-instance-type = "t2.nano"
3 | autoscaling-group-key-name = "terraform"
4 | autoscaling-group-maxsize = "1"
5 | autoscaling-group-minsize = "1"
6 | aws-availability-zones = "us-east-1b,us-east-1c"
7 | aws-region = "us-east-1"
8 | vpc-cidr = "10.0.0.0/16"
9 | vpc-name = "Terraform"
10 |
--------------------------------------------------------------------------------
/Chapter 3/Terraform/variables.tf:
--------------------------------------------------------------------------------
1 | ### VPC ###
2 | variable "aws-region" {
3 | type = "string"
4 | description = "AWS region"
5 | }
6 | variable "aws-availability-zones" {
7 | type = "string"
8 | description = "AWS zones"
9 | }
10 | variable "vpc-cidr" {
11 | type = "string"
12 | description = "VPC CIDR"
13 | }
14 | variable "vpc-name" {
15 | type = "string"
16 | description = "VPC name"
17 | }
18 |
19 | ### EC2 ###
20 | variable "autoscaling-group-minsize" {
21 | type = "string"
22 | description = "Min size of the ASG"
23 | }
24 | variable "autoscaling-group-maxsize" {
25 | type = "string"
26 | description = "Max size of the ASG"
27 | }
28 | variable "autoscaling-group-image-id" {
29 | type="string"
30 | description = "EC2 AMI identifier"
31 | }
32 | variable "autoscaling-group-instance-type" {
33 | type = "string"
34 | description = "EC2 instance type"
35 | }
36 | variable "autoscaling-group-key-name" {
37 | type = "string"
38 | description = "EC2 ssh key name"
39 | }
40 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/demo-app/.gitignore:
--------------------------------------------------------------------------------
1 | rpm/
2 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/demo-app/Jenkinsfile:
--------------------------------------------------------------------------------
1 | #!groovy
2 |
3 | node {
4 |
5 | stage "Checkout Git repo"
6 | checkout scm
7 | stage "Run tests"
8 | sh "docker run -v \$(pwd):/app --rm phpunit/phpunit tests/"
9 | stage "Build RPM"
10 | sh "[ -d ./rpm ] || mkdir ./rpm"
11 | sh "docker run -v \$(pwd)/src:/data/demo-app -v \$(pwd)/rpm:/data/rpm --rm tenzer/fpm fpm -s dir -t rpm -n demo-app -v \$(git rev-parse --short HEAD) --description \"Demo PHP app\" --directories /var/www/demo-app --package /data/rpm/demo-app-\$(git rev-parse --short HEAD).rpm /data/demo-app=/var/www/"
12 | stage "Update YUM repo"
13 | sh "[ -d ~/repo/rpm/demo-app/ ] || mkdir -p ~/repo/rpm/demo-app/"
14 | sh "mv ./rpm/*.rpm ~/repo/rpm/demo-app/"
15 | sh "createrepo ~/repo/"
16 | sh "aws s3 sync ~/repo s3://MY_BUCKET_NAME/ --region us-east-1 --delete"
17 | stage "Check YUM repo"
18 | sh "yum clean all"
19 | sh "yum info demo-app-\$(git rev-parse --short HEAD)"
20 | }
21 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/demo-app/src/index.php:
--------------------------------------------------------------------------------
1 | assertEquals($expected, $actual);
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/minion.d/masterless.conf:
--------------------------------------------------------------------------------
1 | file_client: local
2 | file_roots:
3 | base:
4 | - /srv/salt/states
5 | pillar_roots:
6 | base:
7 | - /srv/salt/pillars
8 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/pillars/nginx.sls:
--------------------------------------------------------------------------------
1 | nginx:
2 | crt: |
3 | -----BEGIN CERTIFICATE-----
4 | MIIDGjCCAgKgAwIBAgIJAOyoVgqZyUcZMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV
5 | BAMTB2plbmtpbnMwHhcNMTYwNzA5MTQ1MjIwWhcNMjYwNzA3MTQ1MjIwWjASMRAw
6 | DgYDVQQDEwdqZW5raW5zMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
7 | rW0WfTqJjWuslGE5wmUfcDcEILADe/X7L4MbxpOs7EPlX4BQuU4bIJBnaP3taWgR
8 | 8X3+Mcje8emnP2iMh4NCngSqPLia77jP4zCfxDGxcd4giK1RQsufsVRdDoaQ4ibB
9 | y6gaC0VPmA8pCOZGTdh6S4WLxiMbKlaTC/LT92Z/0m4JXDtCPZMN3pPQL3aPE8yW
10 | xybo2fhnyozLsW4SgzBe92cSuROKIsYAaWv6WoWgSnN0vRASrEMPmiMzYindcMA0
11 | VIi9DfL1sQcsS90pBfPbX7AGe0I/rD0J8WEy5gXS+XTK9nPtVrt/Vu30yEDWiU7c
12 | C8vWI3qs6adb+s093EPw/QIDAQABo3MwcTAdBgNVHQ4EFgQU1ABZ6EroAlhhLeQS
13 | hFpa70vhmZQwQgYDVR0jBDswOYAU1ABZ6EroAlhhLeQShFpa70vhmZShFqQUMBIx
14 | EDAOBgNVBAMTB2plbmtpbnOCCQDsqFYKmclHGTAMBgNVHRMEBTADAQH/MA0GCSqG
15 | SIb3DQEBBQUAA4IBAQBbwLB4sCRlwEgL+ClYDdPIDp+3Uthf2tKt9VjlhF/zKxPD
16 | KcBOPVLxPNJoq4El/akTkbNpUhU2iS1jFS+tFA7Wxf8MqLr9Zc1aNmkoWzjqkHYS
17 | PLARumMruw4axiqCP2xtiqXhPMRvPVBTmhMeRsPBCA0QWCgCZmgLuo+xcg9cFBT3
18 | C2O6JY6qlb0XJv2y/7npPiFr2V18G2aweyaf/FvtFnODVmqSi+HecID8ZmH9OaN4
19 | 6HpZgJ0LOG7RUH9rVvPg9fdukQY20PhakAsXhOLdRp9ddODUA4dTG1ano3RW6f8n
20 | jJ5HhA0Cb0tRrezqHbdTeOB+91I731az3QmmgKbO
21 | -----END CERTIFICATE-----
22 | key: |
23 | -----BEGIN RSA PRIVATE KEY-----
24 | MIIEpAIBAAKCAQEArW0WfTqJjWuslGE5wmUfcDcEILADe/X7L4MbxpOs7EPlX4BQ
25 | uU4bIJBnaP3taWgR8X3+Mcje8emnP2iMh4NCngSqPLia77jP4zCfxDGxcd4giK1R
26 | QsufsVRdDoaQ4ibBy6gaC0VPmA8pCOZGTdh6S4WLxiMbKlaTC/LT92Z/0m4JXDtC
27 | PZMN3pPQL3aPE8yWxybo2fhnyozLsW4SgzBe92cSuROKIsYAaWv6WoWgSnN0vRAS
28 | rEMPmiMzYindcMA0VIi9DfL1sQcsS90pBfPbX7AGe0I/rD0J8WEy5gXS+XTK9nPt
29 | Vrt/Vu30yEDWiU7cC8vWI3qs6adb+s093EPw/QIDAQABAoIBAQCav7+UiNpavdym
30 | Hkd65d7ys7TUMhs5zpmPoM71F6ryu/b9i7L8Vuyv1wrfTc4+AyYXtdRPuizt8g9R
31 | 7kmPVhnohMMfIZ7nD6M415eIassqjwm6y+S51Javllbe8kZv9iNxRZPPwM4wIj78
32 | ePX82pDtuMGrUIIZ+lyGCe0IUob2Tc61lx4A1ai5KlU0aWfhzTOnBa3NWet5l2Gs
33 | bHryHcz/TFKKaYryG+LzKDU2yOrH07hz8IAMAlXFR9baZOwSn7T71SkNPYOvyuLp
34 | dm5vln8BcfkoEv6AU+Bi3+Pi7y1UeFXVfre2jIxWIH62bBAoBK10bgQZ3L7qE3+O
35 | XZamoLVlAoGBAOKlSZ66TkfPaHFgbg2I6L9CqdqFvlSu1XRXWEvl6hM4b22DrpC2
36 | G/mytGu0c+U93TQNY04JhgFDf9h61BIMj/awNyAODiMsRambAaqHCAi+Dgy6p5VK
37 | K1dmFLXbmu1qwabVhgZdCBl34g4ACtASAJGwTDEWpcDGcsocy72CXaKLAoGBAMPj
38 | PfOWuW9xrMEWfbvKXqruJ46bWyBnwaQBViSd/33O56rsLHUZRDg2JkDDA336U1o0
39 | eaRHQL7BLszCuvSQpzEVnamCVhXYZ9yzzVL7YdUk92TTKQqZCriB8Cx/TN6Lxci/
40 | AyDmLyHmrW2oU0YsaNWa9G73BMRA7X78rwkKwlOXAoGAKmJ+whBVU1iWT52Y9y8D
41 | V8E/wn4AehW4FWnAOXFltPJ45CIcIzPrR2cEFqBIjDZlh7Z5O77MMLBO2E0gG7/9
42 | rESICpaWTj2ZSX6TcTCPcBMazYaakHCuaknM1bWb44pzbJ/B0K7VNO4WeEfJvd+f
43 | +57coNF7bfGuxd1cvLQEjsECgYAQFKONT55Ba8+GulXwCJjk51AQAOjmLB5VXFa6
44 | As5qgYW7HlA3/K8A/lD9mAS9XsNg8FXcCo1iG3HAFWxLj2RqPyAGPlDa0j0UfR4p
45 | 5cEOQk6c1EjWOeILa39P59NjoY3HAQc8uCi+W7V4/wx3AkZI4sOWKGkXw0y70/p6
46 | f4ucWQKBgQDDrmTymslhh/9YH96bdlwFh4iOyrOy3ZZa4UNYLpSXU6owWZRH8KfT
47 | Nn9vWmUE5IB1r9or8GaY/ibMKrYwo6ulAOg3uJNmXL0mKZH2lJOsTEnKZD9d7AZH
48 | 6ESfi4Jwk4lsgNhI9hfxC9QAY8wI8yKFHu+5zK7WuI8EELAkvHj8pw==
49 | -----END RSA PRIVATE KEY-----
50 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/pillars/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - users
4 | - nginx
5 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/pillars/users.sls:
--------------------------------------------------------------------------------
1 | users:
2 | veselin:
3 | uid: 5001
4 | password: '$1$wZ0gQOOo$HEN/gDGS85dEZM7QZVlFz/'
5 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/docker/init.sls:
--------------------------------------------------------------------------------
1 | docker:
2 | pkg.installed: []
3 |
4 | service.running:
5 | - enable: True
6 | - reload: True
7 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/jenkins/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - docker
3 |
4 | jenkins_prereq:
5 | pkg.installed:
6 | - pkgs:
7 | - java-1.7.0-openjdk
8 | - gcc
9 | - make
10 | - createrepo
11 |
12 | jenkins:
13 | pkg.installed:
14 | - sources:
15 | - jenkins: http://mirrors.jenkins-ci.org/redhat-stable/jenkins-2.7.1-1.1.noarch.rpm
16 | - require:
17 | - pkg: jenkins_prereq
18 |
19 | user.present:
20 | - groups:
21 | - docker
22 | - require:
23 | - pkg: docker
24 |
25 | service.running:
26 | - enable: True
27 | - reload: True
28 | - require:
29 | - pkg: jenkins
30 | - user: jenkins
31 |
32 | file.append:
33 | - name: /etc/sysconfig/jenkins
34 | - text: |
35 | ### Salt config
36 | JENKINS_LISTEN_ADDRESS="127.0.0.1"
37 | JENKINS_AJP_PORT="-1"
38 | - require:
39 | - pkg: jenkins
40 | - require_in:
41 | - service: jenkins
42 | - watch_in:
43 | - service: jenkins
44 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/nginx/files/jenkins.conf:
--------------------------------------------------------------------------------
1 | upstream jenkins {
2 | server 127.0.0.1:8080 fail_timeout=0;
3 | }
4 |
5 | server {
6 | listen 80;
7 | server_name *.amazonaws.com;
8 | return 301 https://$host$request_uri;
9 | }
10 |
11 | server {
12 | listen 443 ssl;
13 | server_name *.amazonaws.com;
14 |
15 | ssl_certificate /etc/nginx/ssl/server.crt;
16 | ssl_certificate_key /etc/nginx/ssl/server.key;
17 |
18 | location / {
19 | proxy_set_header Host $host:$server_port;
20 | proxy_set_header X-Real-IP $remote_addr;
21 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
22 | proxy_set_header X-Forwarded-Proto $scheme;
23 | proxy_redirect http:// https://;
24 | proxy_pass http://jenkins;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/nginx/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - jenkins
3 |
4 | nginx:
5 | pkg.installed: []
6 |
7 | service.running:
8 | - enable: True
9 | - reload: True
10 | - require:
11 | - pkg: nginx
12 | - service: jenkins
13 |
14 | /etc/nginx/conf.d/jenkins.conf:
15 | file.managed:
16 | - source: salt://nginx/files/jenkins.conf
17 | - require:
18 | - pkg: nginx
19 | - require_in:
20 | - service: nginx
21 | - watch_in:
22 | - service: nginx
23 |
24 | {% for FIL in ['crt','key'] %}
25 | /etc/nginx/ssl/server.{{ FIL }}:
26 | file.managed:
27 | - makedirs: True
28 | - mode: 400
29 | - contents_pillar: nginx:{{ FIL }}
30 | - require:
31 | - pkg: nginx
32 | - require_in:
33 | - service: nginx
34 | - watch_in:
35 | - service: nginx
36 | {% endfor %}
37 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - users
4 | - yum-s3
5 | - jenkins
6 | - nginx
7 | - docker
8 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/users/files/veselin.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EA...
2 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/users/init.sls:
--------------------------------------------------------------------------------
1 | veselin:
2 | user.present:
3 | - fullname: Veselin Kantsev
4 | - uid: {{ salt['pillar.get']('users:veselin:uid') }}
5 | - password: {{ salt['pillar.get']('users:veselin:password') }}
6 | - groups:
7 | - wheel
8 |
9 | ssh_auth.present:
10 | - user: veselin
11 | - source: salt://users/files/veselin.pub
12 | - require:
13 | - user: veselin
14 |
15 | sudoers:
16 | file.managed:
17 | - name: /etc/sudoers.d/wheel
18 | - contents: '%wheel ALL=(ALL) ALL'
19 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/yum-s3/files/cob.conf:
--------------------------------------------------------------------------------
1 | ;
2 | ; Cob: yet another yum S3 plugin - /etc/yum/pluginconf.d/cob.conf
3 | ;
4 | ; Copyright 2014-2015, Henry Huang . All Rights Reserved.
5 | ;
6 | ; Licensed under the Apache License, Version 2.0 (the "License");
7 | ; you may not use this file except in compliance with the License.
8 | ; You may obtain a copy of the License at
9 | ;
10 | ; http://www.apache.org/licenses/LICENSE-2.0
11 | ;
12 | ; Unless required by applicable law or agreed to in writing, software
13 | ; distributed under the License is distributed on an "AS IS" BASIS,
14 | ; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | ; See the License for the specific language governing permissions and
16 | ; limitations under the License.
17 | ;
18 | [main]
19 | cachedir=/var/cache/yum/$basearch/$releasever
20 | keepcache=1
21 | debuglevel=4
22 | logfile=/var/log/yum.log
23 | exactarch=1
24 | obsoletes=0
25 | gpgcheck=0
26 | plugins=1
27 | distroverpkg=centos-release
28 | enabled=1
29 |
30 | [aws]
31 | # access_key =
32 | # secret_key =
33 | timeout = 60
34 | retries = 5
35 | metadata_server = http://169.254.169.254
36 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/yum-s3/files/cob.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # Cob: yet another yum S3 plugin
3 | #
4 | # Copyright 2014-2015, Henry Huang .
5 | #
6 | # Licensed under the Apache License, Version 2.0 (the "License");
7 | # you may not use this file except in compliance with the License.
8 | # You may obtain a copy of the License at
9 | #
10 | # http://www.apache.org/licenses/LICENSE-2.0
11 | #
12 | # Unless required by applicable law or agreed to in writing, software
13 | # distributed under the License is distributed on an "AS IS" BASIS,
14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | # See the License for the specific language governing permissions and
16 | # limitations under the License.
17 |
18 | __version__ = "0.3.1"
19 |
20 | import base64
21 | import hmac
22 | import json
23 | import re
24 | import socket
25 | import datetime
26 | import time
27 | import urllib2
28 | import urlparse
29 | from hashlib import sha256
30 | from email.message import Message
31 | from urlparse import urlsplit
32 |
33 | import yum.plugins
34 | from yum.yumRepo import YumRepository
35 |
36 | __all__ = ['requires_api_version',
37 | 'plugin_type',
38 | 'init_hook']
39 |
40 | requires_api_version = '2.5'
41 | plugin_type = yum.plugins.TYPE_CORE
42 |
43 | timeout = 60
44 | retries = 5
45 | metadata_server = "http://169.254.169.254"
46 |
47 | EMPTY_SHA256_HASH = (
48 | 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
49 |
50 |
51 | class HTTPHeaders(Message):
52 |
53 | # The __iter__ method is not available in python2.x, so we have
54 | # to port the py3 version.
55 | def __iter__(self):
56 | for field, value in self._headers:
57 | yield field
58 |
59 |
60 | class NoCredentialsError(Exception):
61 | """
62 | No credentials could be found
63 | """
64 | pass
65 |
66 |
67 | class NoRegionError(Exception):
68 | """
69 | No region could be found
70 | """
71 | pass
72 |
73 |
74 | class IncorrectCredentialsError(Exception):
75 |
76 | """
77 | Incorrect Credentials could be found"
78 | """
79 | pass
80 |
81 |
82 | class Credentials(object):
83 | def __init__(self, access_key, secret_key, token):
84 | self.access_key = access_key
85 | self.secret_key = secret_key
86 | self.token = token
87 |
88 |
89 | class HTTPRequest(object):
90 | def __init__(self, method, url, headers=None):
91 | self.method = method
92 | self.url = url
93 |
94 | if headers is None:
95 | self.headers = {}
96 | else:
97 | self.headers = headers
98 |
99 |
100 | class BaseSigner(object):
101 | def add_auth(self, request):
102 | raise NotImplementedError("add_auth")
103 |
104 |
105 | class S3SigV4Auth(BaseSigner):
106 | """
107 | Sign a S3 request with Signature V4.
108 | """
109 | def __init__(self, credentials, service_name, region_name, logger):
110 | self.credentials = credentials
111 | # We initialize these value here so the unit tests can have
112 | # valid values. But these will get overriden in ``add_auth``
113 | # later for real requests.
114 | now = datetime.datetime.utcnow()
115 | self.timestamp = now.strftime('%Y%m%dT%H%M%SZ')
116 | self._region_name = region_name
117 | self._service_name = service_name
118 | self._logger = logger
119 |
120 | def _sign(self, key, msg, hex=False):
121 | if hex:
122 | sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
123 | else:
124 | sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
125 | return sig
126 |
127 | def headers_to_sign(self, request):
128 | """
129 | Select the headers from the request that need to be included
130 | in the StringToSign.
131 | """
132 | header_map = HTTPHeaders()
133 | split = urlsplit(request.url)
134 | for name, value in request.headers.items():
135 | lname = name.lower()
136 | header_map[lname] = value
137 | if 'host' not in header_map:
138 | header_map['host'] = split.netloc
139 | return header_map
140 |
141 | def canonical_headers(self, headers_to_sign):
142 | """
143 | Return the headers that need to be included in the StringToSign
144 | in their canonical form by converting all header keys to lower
145 | case, sorting them in alphabetical order and then joining
146 | them into a string, separated by newlines.
147 | """
148 | headers = []
149 | sorted_header_names = sorted(set(headers_to_sign))
150 | for key in sorted_header_names:
151 | value = ','.join(v.strip() for v in
152 | sorted(headers_to_sign.get_all(key)))
153 | headers.append('%s:%s' % (key, value))
154 | return '\n'.join(headers)
155 |
156 | def signed_headers(self, headers_to_sign):
157 | l = ['%s' % n.lower().strip() for n in set(headers_to_sign)]
158 | l = sorted(l)
159 | return ';'.join(l)
160 |
161 | def canonical_request(self, request):
162 | cr = [request.method.upper()]
163 | path = self._normalize_url_path(urlsplit(request.url).path)
164 | cr.append(path)
165 | headers_to_sign = self.headers_to_sign(request)
166 | cr.append(self.canonical_headers(headers_to_sign) + '\n')
167 | cr.append(self.signed_headers(headers_to_sign))
168 | if 'X-Amz-Content-SHA256' in request.headers:
169 | body_checksum = request.headers['X-Amz-Content-SHA256']
170 | else:
171 | body_checksum = EMPTY_SHA256_HASH
172 | cr.append(body_checksum)
173 | return '\n'.join(cr)
174 |
175 | def _normalize_url_path(self, path):
176 | # For S3, we do not normalize the path.
177 | return path
178 |
179 | def scope(self, args):
180 | scope = [self.credentials.access_key]
181 | scope.append(self.timestamp[0:8])
182 | scope.append(self._region_name)
183 | scope.append(self._service_name)
184 | scope.append('aws4_request')
185 | return '/'.join(scope)
186 |
187 | def credential_scope(self, args):
188 | scope = []
189 | scope.append(self.timestamp[0:8])
190 | scope.append(self._region_name)
191 | scope.append(self._service_name)
192 | scope.append('aws4_request')
193 | return '/'.join(scope)
194 |
195 | def string_to_sign(self, request, canonical_request):
196 | """
197 | Return the canonical StringToSign as well as a dict
198 | containing the original version of all headers that
199 | were included in the StringToSign.
200 | """
201 | sts = ['AWS4-HMAC-SHA256']
202 | sts.append(self.timestamp)
203 | sts.append(self.credential_scope(request))
204 | sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())
205 | return '\n'.join(sts)
206 |
207 | def signature(self, string_to_sign):
208 | key = self.credentials.secret_key
209 | k_date = self._sign(('AWS4' + key).encode('utf-8'),
210 | self.timestamp[0:8])
211 | k_region = self._sign(k_date, self._region_name)
212 | k_service = self._sign(k_region, self._service_name)
213 | k_signing = self._sign(k_service, 'aws4_request')
214 | return self._sign(k_signing, string_to_sign, hex=True)
215 |
216 | def add_auth(self, request):
217 | if self.credentials is None:
218 | raise NoCredentialsError
219 | # Create a new timestamp for each signing event
220 | now = datetime.datetime.utcnow()
221 | self.timestamp = now.strftime('%Y%m%dT%H%M%SZ')
222 | # This could be a retry. Make sure the previous
223 | # authorization header is removed first.
224 | self._modify_request_before_signing(request)
225 | canonical_request = self.canonical_request(request)
226 | self._logger.info(3, "Calculating signature using v4 auth.")
227 | self._logger.info(3, "CanonicalRequest:\n%s\n" % canonical_request)
228 | string_to_sign = self.string_to_sign(request, canonical_request)
229 | self._logger.info(3, "StringToSign:\n%s\n" % string_to_sign)
230 | signature = self.signature(string_to_sign)
231 | self._logger.info(3, "Signature: %s" % signature)
232 |
233 | self._inject_signature_to_request(request, signature)
234 |
235 | def _inject_signature_to_request(self, request, signature):
236 | l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)]
237 | headers_to_sign = self.headers_to_sign(request)
238 | l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
239 | l.append('Signature=%s' % signature)
240 | request.headers['Authorization'] = ', '.join(l)
241 | return request
242 |
243 | def _modify_request_before_signing(self, request):
244 | if 'Authorization' in request.headers:
245 | del request.headers['Authorization']
246 | if 'Date' not in request.headers:
247 | request.headers['X-Amz-Date'] = self.timestamp
248 | if self.credentials.token:
249 | request.headers['X-Amz-Security-Token'] = self.credentials.token
250 | request.headers['X-Amz-Content-SHA256'] = EMPTY_SHA256_HASH
251 |
252 |
253 | def _check_s3_urls(urls):
254 | pattern = "s3.*\.amazonaws\.com"
255 | if isinstance(urls, basestring):
256 | if re.compile(pattern).findall(urls) != []:
257 | return True
258 | elif isinstance(urls, list):
259 | for url in urls:
260 | if re.compile(pattern).findall(url) != []:
261 | break
262 | else:
263 | # Only for the list with all non-S3 URLs
264 | return False
265 | return True
266 |
267 |
268 | def get_region_from_s3url(url):
269 | pattern = "s3-(.*)\.amazonaws\.com"
270 | groups = re.compile(pattern).findall(url)
271 | if groups != [] and len(groups) == 1:
272 | return groups[0]
273 | else:
274 | # No region info in S3 URL
275 | return "us-east-1"
276 |
277 |
278 | def retry_url(url, retry_on_404=False, num_retries=retries, timeout=timeout):
279 | """
280 | Retry a url. This is specifically used for accessing the metadata
281 | service on an instance. Since this address should never be proxied
282 | (for security reasons), we create a ProxyHandler with a NULL
283 | dictionary to override any proxy settings in the environment.
284 | """
285 |
286 | original = socket.getdefaulttimeout()
287 | socket.setdefaulttimeout(timeout)
288 |
289 | for i in range(0, num_retries):
290 | try:
291 | proxy_handler = urllib2.ProxyHandler({})
292 | opener = urllib2.build_opener(proxy_handler)
293 | req = urllib2.Request(url)
294 | r = opener.open(req)
295 | result = r.read()
296 | return result
297 | except urllib2.HTTPError as e:
298 | # in 2.6 you use getcode(), in 2.5 and earlier you use code
299 | if hasattr(e, 'getcode'):
300 | code = e.getcode()
301 | else:
302 | code = e.code
303 | if code == 404 and not retry_on_404:
304 | return None
305 | except Exception as e:
306 | pass
307 | print '[ERROR] Caught exception reading instance data'
308 | # If not on the last iteration of the loop then sleep.
309 | if i + 1 != num_retries:
310 | time.sleep(2 ** i)
311 | print '[ERROR] Unable to read instance data, giving up'
312 | return None
313 |
314 |
315 | def get_region(url=metadata_server, version="latest",
316 | params="meta-data/placement/availability-zone/"):
317 | """
318 | Fetch the region from AWS metadata store.
319 | """
320 | url = urlparse.urljoin(url, "/".join([version, params]))
321 | result = retry_url(url)
322 | return result[:-1].strip()
323 |
324 |
325 | def get_iam_role(url=metadata_server, version="latest",
326 | params="meta-data/iam/security-credentials/"):
327 | """
328 | Read IAM role from AWS metadata store.
329 | """
330 | url = urlparse.urljoin(url, "/".join([version, params]))
331 | result = retry_url(url)
332 | if result is None:
333 | # print "No IAM role found in the machine"
334 | return None
335 | else:
336 | return result
337 |
338 |
339 | def get_credentials_from_iam_role(url=metadata_server,
340 | version="latest",
341 | params="meta-data/iam/security-credentials/",
342 | iam_role=None):
343 | """
344 | Read IAM credentials from AWS metadata store.
345 | """
346 | url = urlparse.urljoin(url, "/".join([version, params, iam_role]))
347 | result = retry_url(url)
348 | if result is None:
349 | # print "No IAM credentials found in the machine"
350 | return None
351 | try:
352 | data = json.loads(result)
353 | except ValueError as e:
354 | # print "Corrupt data found in IAM credentials"
355 | return None
356 |
357 | access_key = data.get('AccessKeyId', None)
358 | secret_key = data.get('SecretAccessKey', None)
359 | token = data.get('Token', None)
360 |
361 | if access_key and secret_key and token:
362 | return (access_key.encode("utf-8"),
363 | secret_key.encode("utf-8"),
364 | token.encode("utf-8"))
365 | else:
366 | return None
367 |
368 |
369 | def init_hook(conduit):
370 | """
371 | Setup the S3 repositories
372 | """
373 | corrupt_repos = []
374 | s3_repos = {}
375 |
376 | repos = conduit.getRepos()
377 | for key, repo in repos.repos.iteritems():
378 | if isinstance(repo, YumRepository) and repo.enabled:
379 | if repo.baseurl and _check_s3_urls(repo.baseurl):
380 | s3_repos.update({key: repo})
381 |
382 | for key, repo in s3_repos.iteritems():
383 | try:
384 | new_repo = S3Repository(repo.id, repo, conduit)
385 | except IncorrectCredentialsError as e:
386 | # Credential Error is a general problem
387 | # will affect all S3 repos
388 | corrupt_repos = s3_repos.keys()
389 | break
390 | except Exception as e:
391 | corrupt_repos.append(key)
392 | continue
393 |
394 | # Correct yum repo on S3
395 | repos.delete(key)
396 | repos.add(new_repo)
397 |
398 | # Delete the incorrect yum repo on S3
399 | for repo in corrupt_repos:
400 | repos.delete(repo)
401 |
402 |
403 | class S3Repository(YumRepository):
404 |
405 | """
406 | Repository object for Amazon S3
407 | """
408 |
409 | def __init__(self, repoid, repo, conduit):
410 | super(S3Repository, self).__init__(repoid)
411 | self.repoid = repoid
412 | self.conduit = conduit
413 |
414 | # FIXME: dirty code here
415 | self.__dict__.update(repo.__dict__)
416 |
417 | # Inherited from YumRepository <-- Repository
418 | self.enable()
419 |
420 | # Find the AWS Credentials
421 | self.set_credentials()
422 |
423 | # Disabled region initialization
424 | # self.set_region()
425 |
426 | def _getFile(self, url=None, relative=None, local=None,
427 | start=None, end=None,
428 | copy_local=None, checkfunc=None, text=None,
429 | reget='simple', cache=True, size=None, **kwargs):
430 | """
431 | Patched _getFile func via AWS S3 REST API
432 | """
433 | mirrors = self.grab.mirrors
434 | # mirrors always exists as a list
435 | # and each element (dict) with a key named "mirror"
436 | for mirror in mirrors:
437 | baseurl = mirror["mirror"]
438 | super(S3Repository, self).grab.mirrors = [mirror]
439 | if _check_s3_urls(baseurl):
440 | region_name = get_region_from_s3url(baseurl)
441 | if region_name:
442 | self.region = region_name
443 | self.http_headers = self.fetch_headers(baseurl, relative)
444 | else:
445 | # non-S3 URL
446 | self.http_headers = tuple(
447 | self.__headersListFromDict(cache=cache))
448 | try:
449 | return super(S3Repository, self)._getFile(url, relative, local,
450 | start, end,
451 | copy_local,
452 | checkfunc, text,
453 | reget, cache,
454 | size, **kwargs)
455 | except Exception as e:
456 | self.conduit.info(3, str(e))
457 | raise
458 |
459 | __get = _getFile
460 |
461 | def set_region(self):
462 |
463 | # Fetch params from local config file
464 | global timeout, retries, metadata_server
465 | timeout = self.conduit.confInt('aws', 'timeout', default=timeout)
466 | retries = self.conduit.confInt('aws', 'retries', default=retries)
467 | metadata_server = self.conduit.confString('aws',
468 | 'metadata_server',
469 | default=metadata_server)
470 |
471 | # Fetch region from local config file
472 | self.region = self.conduit.confString('aws',
473 | 'region',
474 | default=None)
475 |
476 | if self.region:
477 | return True
478 |
479 | # Fetch region from meta data
480 | region = get_region()
481 | if region is None:
482 | self.conduit.info(3, "[ERROR] No region in the plugin conf "
483 | "for the repo '%s'" % self.repoid)
484 | raise NoRegionError
485 |
486 | self.region = region
487 | return True
488 |
489 | def set_credentials(self):
490 |
491 | # Fetch params from local config file
492 | global timeout, retries, metadata_server
493 | timeout = self.conduit.confInt('aws', 'timeout', default=timeout)
494 | retries = self.conduit.confInt('aws', 'retries', default=retries)
495 | metadata_server = self.conduit.confString('aws',
496 | 'metadata_server',
497 | default=metadata_server)
498 |
499 | # Fetch credentials from local config file
500 | self.access_key = self.conduit.confString('aws',
501 | 'access_key',
502 | default=None)
503 | self.secret_key = self.conduit.confString('aws',
504 | 'secret_key',
505 | default=None)
506 | self.token = self.conduit.confString('aws', 'token', default=None)
507 | if self.access_key and self.secret_key:
508 | return True
509 |
510 | # Fetch credentials from iam role meta data
511 | iam_role = get_iam_role()
512 | if iam_role is None:
513 | self.conduit.info(3, "[ERROR] No credentials in the plugin conf "
514 | "for the repo '%s'" % self.repoid)
515 | raise IncorrectCredentialsError
516 |
517 | credentials = get_credentials_from_iam_role(iam_role=iam_role)
518 | if credentials is None:
519 | self.conduit.info(3, "[ERROR] Fail to get IAM credentials"
520 | "for the repo '%s'" % self.repoid)
521 | raise IncorrectCredentialsError
522 |
523 | self.access_key, self.secret_key, self.token = credentials
524 | return True
525 |
526 | def fetch_headers(self, url, path):
527 | headers = {}
528 |
529 | # "\n" in the url, required by AWS S3 Auth v4
530 | url = urlparse.urljoin(url, urllib2.quote(path)) + "\n"
531 | credentials = Credentials(self.access_key, self.secret_key, self.token)
532 | request = HTTPRequest("GET", url)
533 | signer = S3SigV4Auth(credentials, "s3", self.region, self.conduit)
534 | signer.add_auth(request)
535 | return request.headers
536 |
537 |
538 | if __name__ == '__main__':
539 | pass
540 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/yum-s3/files/s3.repo:
--------------------------------------------------------------------------------
1 | [s3-repo]
2 | name=S3-repo
3 | baseurl=https://s3.amazonaws.com/MY_BUCKET_NAME
4 | enabled=0
5 | gpgcheck=0
6 |
--------------------------------------------------------------------------------
/Chapter 4/CodeCommit/salt/states/yum-s3/init.sls:
--------------------------------------------------------------------------------
1 | yum-s3_cob.py:
2 | file.managed:
3 | - name: /usr/lib/yum-plugins/cob.py
4 | - source: salt://yum-s3/files/cob.py
5 |
6 | yum-s3_cob.conf:
7 | file.managed:
8 | - name: /etc/yum/pluginconf.d/cob.conf
9 | - source: salt://yum-s3/files/cob.conf
10 |
11 | yum-s3_s3.repo:
12 | file.managed:
13 | - name: /etc/yum.repos.d/s3.repo
14 | - source: salt://yum-s3/files/s3.repo
15 |
--------------------------------------------------------------------------------
/Chapter 4/Terraform/iam_user_policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Effect": "Allow",
6 | "NotAction": [
7 | "codecommit:DeleteRepository"
8 | ],
9 | "Resource": "*"
10 | },
11 | {
12 | "Effect": "Allow",
13 | "NotAction": [
14 | "s3:DeleteBucket"
15 | ],
16 | "Resource": "*"
17 | },
18 | {
19 | "Sid": "Stmt1461764665000",
20 | "Effect": "Allow",
21 | "Action": [
22 | "ec2:AllocateAddress",
23 | "ec2:AssociateAddress",
24 | "ec2:AssociateRouteTable",
25 | "ec2:AttachInternetGateway",
26 | "ec2:AuthorizeSecurityGroupEgress",
27 | "ec2:AuthorizeSecurityGroupIngress",
28 | "ec2:CreateInternetGateway",
29 | "ec2:CreateRoute",
30 | "ec2:CreateRouteTable",
31 | "ec2:CreateSecurityGroup",
32 | "ec2:CreateSubnet",
33 | "ec2:CreateTags",
34 | "ec2:CreateVpc",
35 | "ec2:Describe*",
36 | "ec2:ModifySubnetAttribute",
37 | "ec2:RevokeSecurityGroupEgress",
38 | "iam:AddRoleToInstanceProfile",
39 | "iam:CreateInstanceProfile",
40 | "iam:CreateRole",
41 | "iam:Get*",
42 | "iam:PassRole",
43 | "iam:PutRolePolicy"
44 | ],
45 | "Resource": [
46 | "*"
47 | ]
48 | }
49 | ]
50 | }
51 |
--------------------------------------------------------------------------------
/Chapter 4/Terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "VPC ID" {
2 | value = "${aws_vpc.terraform-vpc.id}"
3 | }
4 |
5 | output "JENKINS EIP" {
6 | value = "${aws_eip.jenkins.public_ip}"
7 | }
8 |
--------------------------------------------------------------------------------
/Chapter 4/Terraform/resources.tf:
--------------------------------------------------------------------------------
1 | # Set a Provider
2 | provider "aws" {
3 | region = "${var.aws-region}"
4 | }
5 |
6 | ### VPC ###
7 |
8 | # Create a VPC
9 | resource "aws_vpc" "terraform-vpc" {
10 | cidr_block = "${var.vpc-cidr}"
11 |
12 | tags {
13 | Name = "${var.vpc-name}"
14 | }
15 | }
16 |
17 | # Create an Internet Gateway
18 | resource "aws_internet_gateway" "terraform-igw" {
19 | vpc_id = "${aws_vpc.terraform-vpc.id}"
20 | }
21 |
22 | # Create public route tables
23 | resource "aws_route_table" "public" {
24 | vpc_id = "${aws_vpc.terraform-vpc.id}"
25 | route {
26 | cidr_block = "0.0.0.0/0"
27 | gateway_id = "${aws_internet_gateway.terraform-igw.id}"
28 | }
29 |
30 | tags {
31 | Name = "Public"
32 | }
33 | }
34 |
35 | # Create and associate public subnets with a route table
36 | resource "aws_subnet" "public-1" {
37 | vpc_id = "${aws_vpc.terraform-vpc.id}"
38 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 1)}"
39 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index)}"
40 | map_public_ip_on_launch = true
41 |
42 | tags {
43 | Name = "Public"
44 | }
45 | }
46 |
47 | resource "aws_route_table_association" "public-1" {
48 | subnet_id = "${aws_subnet.public-1.id}"
49 | route_table_id = "${aws_route_table.public.id}"
50 | }
51 |
52 |
53 | ### EC2 ###
54 |
55 | resource "aws_security_group" "jenkins" {
56 | name = "jenkins"
57 | description = "ec2 instance security group"
58 | vpc_id = "${aws_vpc.terraform-vpc.id}"
59 |
60 | ingress {
61 | from_port = "22"
62 | to_port = "22"
63 | protocol = "tcp"
64 | cidr_blocks = ["0.0.0.0/0"]
65 | }
66 |
67 | ingress {
68 | from_port = "80"
69 | to_port = "80"
70 | protocol = "tcp"
71 | cidr_blocks = ["0.0.0.0/0"]
72 | }
73 |
74 | ingress {
75 | from_port = "443"
76 | to_port = "443"
77 | protocol = "tcp"
78 | cidr_blocks = ["0.0.0.0/0"]
79 | }
80 |
81 | egress {
82 | from_port = 0
83 | to_port = 0
84 | protocol = "-1"
85 | cidr_blocks = ["0.0.0.0/0"]
86 | }
87 |
88 | }
89 |
90 | resource "aws_iam_role" "jenkins" {
91 | name = "jenkins"
92 | path = "/"
93 | assume_role_policy = < >(logger -s -t $(basename $0)) 2>&1
155 | # Install Git and set CodeComit connection settings
156 | # (required for access via IAM roles)
157 | yum -y install git
158 | git config --system credential.helper '!aws codecommit credential-helper $@'
159 | git config --system credential.UseHttpPath true
160 | # Clone the Salt repository
161 | git clone https://git-codecommit.us-east-1.amazonaws.com/v1/repos/salt /srv/salt; chmod 700 /srv/salt
162 | # Install SaltStack
163 | yum -y install https://repo.saltstack.com/yum/amazon/salt-amzn-repo-latest-1.ami.noarch.rpm
164 | yum clean expire-cache; yum -y install salt-minion; chkconfig salt-minion off
165 | # Put custom minion config in place (for enabling masterless mode)
166 | cp -r /srv/salt/minion.d /etc/salt/
167 | ## Trigger a full Salt run
168 | salt-call state.apply
169 | EOF
170 |
171 | lifecycle { create_before_destroy = true }
172 | }
173 |
174 | resource "aws_eip" "jenkins" {
175 | instance = "${aws_instance.jenkins.id}"
176 | vpc = true
177 | }
178 |
--------------------------------------------------------------------------------
/Chapter 4/Terraform/terraform.tfvars:
--------------------------------------------------------------------------------
1 | aws-region = "us-east-1"
2 | vpc-cidr = "10.0.0.0/16"
3 | vpc-name = "Terraform"
4 | aws-availability-zones = "us-east-1b,us-east-1c"
5 | jenkins-ami-id = "ami-6869aa05"
6 | jenkins-instance-type = "t2.nano"
7 | jenkins-key-name = "terraform"
8 |
--------------------------------------------------------------------------------
/Chapter 4/Terraform/variables.tf:
--------------------------------------------------------------------------------
1 | ### VPC ###
2 | variable "aws-region" {
3 | type = "string"
4 | description = "AWS region"
5 | }
6 | variable "vpc-cidr" {
7 | type = "string"
8 | description = "VPC CIDR"
9 | }
10 | variable "vpc-name" {
11 | type = "string"
12 | description = "VPC name"
13 | }
14 | variable "aws-availability-zones" {
15 | type = "string"
16 | description = "AWS zones"
17 | }
18 |
19 | ### EC2 ###
20 | variable "jenkins-ami-id" {
21 | type="string"
22 | description = "EC2 AMI identifier"
23 | }
24 | variable "jenkins-instance-type" {
25 | type = "string"
26 | description = "EC2 instance type"
27 | }
28 | variable "jenkins-key-name" {
29 | type = "string"
30 | description = "EC2 ssh key name"
31 | }
32 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app-cdelivery/Jenkinsfile:
--------------------------------------------------------------------------------
1 | #!groovy
2 |
3 | node {
4 |
5 | step([$class: 'WsCleanup'])
6 |
7 | stage "Checkout Git repo"
8 | checkout scm
9 |
10 | stage "Checkout additional repos"
11 | dir("salt") {
12 | git "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/salt"
13 | }
14 |
15 | stage "Run Packer"
16 | sh "/opt/packer validate -var=\"appVersion=$APP_VERSION\" -var-file=packer/demo-app_vars.json packer/demo-app.json"
17 | sh "/opt/packer build -machine-readable -var=\"appVersion=$APP_VERSION\" -var-file=packer/demo-app_vars.json packer/demo-app.json | tee packer/packer.log"
18 |
19 | stage "Deploy AMI"
20 | def amiId = sh returnStdout: true, script:"tail -n1 packer/packer.log | awk '{printf \$NF}'"
21 | def ec2Keypair = "terraform"
22 | def secGroup = "sg-81e628fb"
23 | def instanceType = "t2.nano"
24 | def subnetId = "subnet-83fdf4a9"
25 | def instanceProfile = "demo-app"
26 |
27 | echo "Launching an instance from ${amiId}"
28 | sh "aws ec2 run-instances \
29 | --region us-east-1 \
30 | --image-id ${amiId} \
31 | --key-name ${ec2Keypair} \
32 | --security-group-ids ${secGroup} \
33 | --instance-type ${instanceType} \
34 | --subnet-id ${subnetId} \
35 | --iam-instance-profile Name=${instanceProfile} \
36 | | tee .ec2_run-instances.log \
37 | "
38 |
39 | def instanceId = sh returnStdout: true, script: "printf \$(jq .Instances[0].InstanceId < .ec2_run-instances.log)"
40 |
41 | sh "aws ec2 create-tags --resources ${instanceId} \
42 | --region us-east-1 \
43 | --tags Key=Name,Value=\"Jenkins (demo-app-$APP_VERSION)\" Key=CreatedBy,Value=Jenkins \
44 | "
45 |
46 | echo "Registering with ELB"
47 | def elbId = "demo-app-elb"
48 | sh "aws elb register-instances-with-load-balancer \
49 | --region us-east-1 \
50 | --load-balancer-name ${elbId} \
51 | --instances ${instanceId} \
52 | "
53 |
54 | echo "Waiting for the instance to come into service"
55 | sh "while [ \"x\$(aws elb describe-instance-health --region us-east-1 --load-balancer-name ${elbId} --instances ${instanceId} | jq .InstanceStates[].State | tr -d '\"')\" != \"xInService\" ]; do : ; sleep 60; done"
56 |
57 |
58 | stage "Run AB test"
59 | def elbUri = "http://demo-app-elb-2073087633.us-east-1.elb.amazonaws.com/"
60 | sh "ab -c5 -n1000 -d -S ${elbUri} | tee .ab.log"
61 | def non2xx = sh returnStdout: true, script:"set -o pipefail;(grep 'Non-2xx' .ab.log | awk '{printf \$NF}') || (printf 0)"
62 | def writeErr = sh returnStdout: true, script:"grep 'Write errors' .ab.log | awk '{printf \$NF}'"
63 | def failedReqs = sh returnStdout: true, script:"grep 'Failed requests' .ab.log | awk '{printf \$NF}'"
64 | def rps = sh returnStdout: true, script:"grep 'Requests per second' .ab.log | awk '{printf \$4}' | awk -F. '{printf \$1}'"
65 | def docLen = sh returnStdout: true, script:"grep 'Document Length' .ab.log | awk '{printf \$3}'"
66 |
67 | echo "Non2xx=${non2xx}, WriteErrors=${writeErr}, FailedReqs=${failedReqs}, ReqsPerSec=${rps}, DocLength=${docLen}"
68 | sh "if [ ${non2xx} -gt 10 ] || [ ${writeErr} -gt 10 ] || [ ${failedReqs} -gt 10 ] || [ ${rps} -lt 1000 ] || [ ${docLen} -lt 10 ]; then \
69 | echo \"ERR: AB test failed\" | tee -a .error.log; \
70 | fi \
71 | "
72 |
73 | stage "Terminate test instance"
74 | sh "aws ec2 terminate-instances --region us-east-1 --instance-ids ${instanceId}"
75 |
76 | stage "Verify test results"
77 | sh "if [ -s '.error.log' ]; then \
78 | cat '.error.log'; \
79 | :> '.error.log'; \
80 | exit 100; \
81 | else \
82 | echo 'Tests OK'; \
83 | fi \
84 | "
85 | }
86 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app-cdelivery/packer/demo-app.json:
--------------------------------------------------------------------------------
1 | {
2 | "variables": {
3 | "srcAmiId": null,
4 | "amiName": null,
5 | "sshUser": null,
6 | "instanceProfile": null,
7 | "subnetId": null,
8 | "vpcId": null,
9 | "userDataFile": null,
10 | "appVersion": null
11 | },
12 | "builders": [{
13 | "type": "amazon-ebs",
14 | "region": "us-east-1",
15 | "source_ami": "{{user `srcAmiId`}}",
16 | "instance_type": "t2.nano",
17 | "ssh_username": "{{user `sshUser`}}",
18 | "ami_name": "{{user `amiName`}}-{{timestamp}}",
19 | "iam_instance_profile": "{{user `instanceProfile`}}",
20 | "subnet_id": "{{user `subnetId`}}",
21 | "vpc_id": "{{user `vpcId`}}",
22 | "user_data_file": "{{user `userDataFile`}}",
23 | "run_tags": {
24 | "Name": "Packer ({{user `amiName`}}-{{timestamp}})",
25 | "CreatedBy": "Jenkins"
26 | },
27 | "tags": {
28 | "Name": "{{user `amiName`}}-{{timestamp}}",
29 | "CreatedBy": "Jenkins"
30 | }
31 | }],
32 | "provisioners": [
33 | {
34 | "type": "shell",
35 | "inline": [
36 | "echo 'Waiting for the instance to fully boot up...'",
37 | "sleep 30" ,
38 | "echo \"Setting APP_VERSION to {{user `appVersion`}}\"",
39 | "echo \"{{user `appVersion`}}\" > /tmp/APP_VERSION"
40 | ]
41 | },
42 | {
43 | "type": "salt-masterless",
44 | "skip_bootstrap": true,
45 | "local_state_tree": "salt/states",
46 | "local_pillar_roots": "salt/pillars"
47 | },
48 | {
49 | "type": "file",
50 | "source": "serverspec",
51 | "destination": "/tmp/"
52 | },
53 | {
54 | "type": "shell",
55 | "inline": [
56 | "echo 'Installing Serverspec tests...'",
57 | "sudo gem install --no-document rake serverspec",
58 | "echo 'Running Serverspec tests...'",
59 | "cd /tmp/serverspec && sudo /usr/local/bin/rake spec"
60 | ]
61 | }
62 | ]
63 | }
64 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app-cdelivery/packer/demo-app_userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euf -o pipefail
4 | exec 1> >(logger -s -t $(basename $0)) 2>&1
5 |
6 | # Install SaltStack
7 | yum -y install https://repo.saltstack.com/yum/amazon/salt-amzn-repo-latest-1.ami.noarch.rpm
8 | yum clean expire-cache; yum -y install salt-minion; chkconfig salt-minion off
9 |
10 | # Put custom grains in place
11 | echo -e 'grains:\n roles:\n - demo-app' > /etc/salt/minion.d/grains.conf
12 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app-cdelivery/packer/demo-app_vars.json:
--------------------------------------------------------------------------------
1 | {
2 | "srcAmiId": "ami-6869aa05",
3 | "amiName": "demo-app",
4 | "sshUser": "ec2-user",
5 | "instanceProfile": "demo-app",
6 | "subnetId": "subnet-83fdf4a9",
7 | "vpcId": "vpc-cf3b4aa8",
8 | "userDataFile": "packer/demo-app_userdata.sh"
9 | }
10 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app-cdelivery/serverspec/.rspec:
--------------------------------------------------------------------------------
1 | --color
2 | --format documentation
3 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app-cdelivery/serverspec/Rakefile:
--------------------------------------------------------------------------------
1 | require 'rake'
2 | require 'rspec/core/rake_task'
3 |
4 | task :spec => 'spec:all'
5 | task :default => :spec
6 |
7 | namespace :spec do
8 | targets = []
9 | Dir.glob('./spec/*').each do |dir|
10 | next unless File.directory?(dir)
11 | target = File.basename(dir)
12 | target = "_#{target}" if target == "default"
13 | targets << target
14 | end
15 |
16 | task :all => targets
17 | task :default => :all
18 |
19 | targets.each do |target|
20 | original_target = target == "_default" ? target[1..-1] : target
21 | desc "Run serverspec tests to #{original_target}"
22 | RSpec::Core::RakeTask.new(target.to_sym) do |t|
23 | ENV['TARGET_HOST'] = original_target
24 | t.pattern = "spec/#{original_target}/*_spec.rb"
25 | end
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app-cdelivery/serverspec/spec/localhost/demo-app_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | versionFile = open('/tmp/APP_VERSION')
4 | appVersion = versionFile.read.chomp
5 |
6 | describe package("demo-app-#{appVersion}") do
7 | it { should be_installed }
8 | end
9 |
10 | describe service('php-fpm') do
11 | it { should be_enabled }
12 | it { should be_running }
13 | end
14 |
15 | describe service('nginx') do
16 | it { should be_enabled }
17 | it { should be_running }
18 | end
19 |
20 | describe user('veselin') do
21 | it { should exist }
22 | it { should have_authorized_key 'ssh-rsa AAAAB3NzaC1yc...' }
23 | end
24 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app-cdelivery/serverspec/spec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'serverspec'
2 |
3 | set :backend, :exec
4 |
5 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app/.gitignore:
--------------------------------------------------------------------------------
1 | rpm/
2 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app/Jenkinsfile:
--------------------------------------------------------------------------------
1 | #!groovy
2 |
3 | node {
4 |
5 | step([$class: 'WsCleanup'])
6 |
7 | stage "Checkout Git repo"
8 | checkout scm
9 | def gitHash = sh returnStdout: true, script:"printf \$(git rev-parse --short HEAD)"
10 | echo "Proceeding with Git hash: ${gitHash}"
11 |
12 | stage "Run tests"
13 | sh "docker run -v \$(pwd):/app --rm phpunit/phpunit tests/"
14 |
15 | stage "Build RPM"
16 | sh "[ -d ./rpm ] || mkdir ./rpm"
17 | sh "docker run -v \$(pwd)/src:/data/demo-app -v \$(pwd)/rpm:/data/rpm --rm tenzer/fpm \
18 | fpm -s dir -t rpm -n demo-app -v ${gitHash} \
19 | --description \"Demo PHP app\" \
20 | --directories /var/www/demo-app \
21 | --package /data/rpm/demo-app-${gitHash}.rpm \
22 | /data/demo-app=/var/www/ \
23 | "
24 |
25 | stage "Update YUM repo"
26 | sh "[ -d ~/repo/rpm/demo-app/ ] || mkdir -p ~/repo/rpm/demo-app/"
27 | sh "mv ./rpm/*.rpm ~/repo/rpm/demo-app/"
28 | sh "createrepo --update --cachedir ~/repo.cache ~/repo/"
29 | sh "aws s3 sync ~/repo s3://YUM_REPO_NAME/ --region us-east-1 --delete"
30 |
31 | stage "Check YUM repo"
32 | sh "sudo yum clean expire-cache >/dev/null"
33 | sh "sudo yum repolist >/dev/null"
34 | sh "yum info demo-app-\$(git rev-parse --short HEAD)"
35 |
36 | stage "Trigger downstream"
37 | build job: "demo-app-cdelivery", parameters: [[$class: "StringParameterValue", name: "APP_VERSION", value: "${gitHash}-1"]], wait: false
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/demo-app/src/index.php:
--------------------------------------------------------------------------------
1 | assertEquals($expected, $actual);
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/minion.d/masterless.conf:
--------------------------------------------------------------------------------
1 | file_client: local
2 | file_roots:
3 | base:
4 | - /srv/salt/states
5 | pillar_roots:
6 | base:
7 | - /srv/salt/pillars
8 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/pillars/nginx.sls:
--------------------------------------------------------------------------------
1 | nginx:
2 | crt: |
3 | -----BEGIN CERTIFICATE-----
4 | MIIDGjCCAgKgAwIBAgIJAOyoVgqZyUcZMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV
5 | BAMTB2plbmtpbnMwHhcNMTYwNzA5MTQ1MjIwWhcNMjYwNzA3MTQ1MjIwWjASMRAw
6 | DgYDVQQDEwdqZW5raW5zMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
7 | rW0WfTqJjWuslGE5wmUfcDcEILADe/X7L4MbxpOs7EPlX4BQuU4bIJBnaP3taWgR
8 | 8X3+Mcje8emnP2iMh4NCngSqPLia77jP4zCfxDGxcd4giK1RQsufsVRdDoaQ4ibB
9 | y6gaC0VPmA8pCOZGTdh6S4WLxiMbKlaTC/LT92Z/0m4JXDtCPZMN3pPQL3aPE8yW
10 | xybo2fhnyozLsW4SgzBe92cSuROKIsYAaWv6WoWgSnN0vRASrEMPmiMzYindcMA0
11 | VIi9DfL1sQcsS90pBfPbX7AGe0I/rD0J8WEy5gXS+XTK9nPtVrt/Vu30yEDWiU7c
12 | C8vWI3qs6adb+s093EPw/QIDAQABo3MwcTAdBgNVHQ4EFgQU1ABZ6EroAlhhLeQS
13 | hFpa70vhmZQwQgYDVR0jBDswOYAU1ABZ6EroAlhhLeQShFpa70vhmZShFqQUMBIx
14 | EDAOBgNVBAMTB2plbmtpbnOCCQDsqFYKmclHGTAMBgNVHRMEBTADAQH/MA0GCSqG
15 | SIb3DQEBBQUAA4IBAQBbwLB4sCRlwEgL+ClYDdPIDp+3Uthf2tKt9VjlhF/zKxPD
16 | KcBOPVLxPNJoq4El/akTkbNpUhU2iS1jFS+tFA7Wxf8MqLr9Zc1aNmkoWzjqkHYS
17 | PLARumMruw4axiqCP2xtiqXhPMRvPVBTmhMeRsPBCA0QWCgCZmgLuo+xcg9cFBT3
18 | C2O6JY6qlb0XJv2y/7npPiFr2V18G2aweyaf/FvtFnODVmqSi+HecID8ZmH9OaN4
19 | 6HpZgJ0LOG7RUH9rVvPg9fdukQY20PhakAsXhOLdRp9ddODUA4dTG1ano3RW6f8n
20 | jJ5HhA0Cb0tRrezqHbdTeOB+91I731az3QmmgKbO
21 | -----END CERTIFICATE-----
22 | key: |
23 | -----BEGIN RSA PRIVATE KEY-----
24 | MIIEpAIBAAKCAQEArW0WfTqJjWuslGE5wmUfcDcEILADe/X7L4MbxpOs7EPlX4BQ
25 | uU4bIJBnaP3taWgR8X3+Mcje8emnP2iMh4NCngSqPLia77jP4zCfxDGxcd4giK1R
26 | QsufsVRdDoaQ4ibBy6gaC0VPmA8pCOZGTdh6S4WLxiMbKlaTC/LT92Z/0m4JXDtC
27 | PZMN3pPQL3aPE8yWxybo2fhnyozLsW4SgzBe92cSuROKIsYAaWv6WoWgSnN0vRAS
28 | rEMPmiMzYindcMA0VIi9DfL1sQcsS90pBfPbX7AGe0I/rD0J8WEy5gXS+XTK9nPt
29 | Vrt/Vu30yEDWiU7cC8vWI3qs6adb+s093EPw/QIDAQABAoIBAQCav7+UiNpavdym
30 | Hkd65d7ys7TUMhs5zpmPoM71F6ryu/b9i7L8Vuyv1wrfTc4+AyYXtdRPuizt8g9R
31 | 7kmPVhnohMMfIZ7nD6M415eIassqjwm6y+S51Javllbe8kZv9iNxRZPPwM4wIj78
32 | ePX82pDtuMGrUIIZ+lyGCe0IUob2Tc61lx4A1ai5KlU0aWfhzTOnBa3NWet5l2Gs
33 | bHryHcz/TFKKaYryG+LzKDU2yOrH07hz8IAMAlXFR9baZOwSn7T71SkNPYOvyuLp
34 | dm5vln8BcfkoEv6AU+Bi3+Pi7y1UeFXVfre2jIxWIH62bBAoBK10bgQZ3L7qE3+O
35 | XZamoLVlAoGBAOKlSZ66TkfPaHFgbg2I6L9CqdqFvlSu1XRXWEvl6hM4b22DrpC2
36 | G/mytGu0c+U93TQNY04JhgFDf9h61BIMj/awNyAODiMsRambAaqHCAi+Dgy6p5VK
37 | K1dmFLXbmu1qwabVhgZdCBl34g4ACtASAJGwTDEWpcDGcsocy72CXaKLAoGBAMPj
38 | PfOWuW9xrMEWfbvKXqruJ46bWyBnwaQBViSd/33O56rsLHUZRDg2JkDDA336U1o0
39 | eaRHQL7BLszCuvSQpzEVnamCVhXYZ9yzzVL7YdUk92TTKQqZCriB8Cx/TN6Lxci/
40 | AyDmLyHmrW2oU0YsaNWa9G73BMRA7X78rwkKwlOXAoGAKmJ+whBVU1iWT52Y9y8D
41 | V8E/wn4AehW4FWnAOXFltPJ45CIcIzPrR2cEFqBIjDZlh7Z5O77MMLBO2E0gG7/9
42 | rESICpaWTj2ZSX6TcTCPcBMazYaakHCuaknM1bWb44pzbJ/B0K7VNO4WeEfJvd+f
43 | +57coNF7bfGuxd1cvLQEjsECgYAQFKONT55Ba8+GulXwCJjk51AQAOjmLB5VXFa6
44 | As5qgYW7HlA3/K8A/lD9mAS9XsNg8FXcCo1iG3HAFWxLj2RqPyAGPlDa0j0UfR4p
45 | 5cEOQk6c1EjWOeILa39P59NjoY3HAQc8uCi+W7V4/wx3AkZI4sOWKGkXw0y70/p6
46 | f4ucWQKBgQDDrmTymslhh/9YH96bdlwFh4iOyrOy3ZZa4UNYLpSXU6owWZRH8KfT
47 | Nn9vWmUE5IB1r9or8GaY/ibMKrYwo6ulAOg3uJNmXL0mKZH2lJOsTEnKZD9d7AZH
48 | 6ESfi4Jwk4lsgNhI9hfxC9QAY8wI8yKFHu+5zK7WuI8EELAkvHj8pw==
49 | -----END RSA PRIVATE KEY-----
50 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/pillars/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - users
4 | - nginx
5 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/pillars/users.sls:
--------------------------------------------------------------------------------
1 | users:
2 | veselin:
3 | uid: 5001
4 | password: '$1$wZ0gQOOo$HEN/gDGS85dEZM7QZVlFz/'
5 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/demo-app/init.sls:
--------------------------------------------------------------------------------
1 | {% set APP_VERSION = salt['cmd.run']('cat /tmp/APP_VERSION') %}
2 |
3 | include:
4 | - nginx
5 |
6 | demo-app:
7 | pkg.installed:
8 | - name: demo-app
9 | - version: {{ APP_VERSION }}
10 | - require_in:
11 | - service: nginx
12 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/docker/init.sls:
--------------------------------------------------------------------------------
1 | docker:
2 | pkg.installed: []
3 |
4 | service.running:
5 | - enable: True
6 | - reload: True
7 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/jenkins/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - docker
3 | - nginx
4 |
5 | jenkins_prereq:
6 | pkg.installed:
7 | - pkgs:
8 | - java-1.7.0-openjdk
9 | - gcc
10 | - make
11 | - createrepo
12 | - jq
13 | - httpd-tools
14 |
15 | jenkins:
16 | pkg.installed:
17 | - sources:
18 | - jenkins: http://mirrors.jenkins-ci.org/redhat-stable/jenkins-2.7.1-1.1.noarch.rpm
19 | - require:
20 | - pkg: jenkins_prereq
21 |
22 | user.present:
23 | - groups:
24 | - docker
25 | - require:
26 | - pkg: docker
27 |
28 | service.running:
29 | - enable: True
30 | - reload: True
31 | - require:
32 | - pkg: jenkins
33 | - user: jenkins
34 | - require_in:
35 | - service: nginx
36 |
37 | file.append:
38 | - name: /etc/sysconfig/jenkins
39 | - text: |
40 | ### Salt config
41 | JENKINS_LISTEN_ADDRESS="127.0.0.1"
42 | JENKINS_AJP_PORT="-1"
43 | - require:
44 | - pkg: jenkins
45 | - require_in:
46 | - service: jenkins
47 | - watch_in:
48 | - service: jenkins
49 |
50 | jenkins_sudoers:
51 | file.managed:
52 | - name: /etc/sudoers.d/jenkins
53 | - contents: 'jenkins ALL=(ALL) NOPASSWD:NOEXEC: /usr/bin/yum clean expire-cache, /usr/bin/yum repolist'
54 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/nginx/demo-app.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - nginx
3 |
4 | /etc/nginx/conf.d/demo-app.conf:
5 | file.managed:
6 | - source: salt://nginx/files/demo-app.conf
7 | - require:
8 | - pkg: nginx
9 | - require_in:
10 | - service: nginx
11 | - watch_in:
12 | - service: nginx
13 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/nginx/files/demo-app.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name *.amazonaws.com;
4 |
5 | root /var/www/demo-app;
6 | index index.php;
7 |
8 | location / {
9 | try_files $uri $uri/ =404;
10 | }
11 |
12 | location ~ \.php$ {
13 | try_files $uri =404;
14 | fastcgi_split_path_info ^(.+\.php)(/.+)$;
15 | fastcgi_pass 127.0.0.1:9000;
16 | fastcgi_index index.php;
17 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
18 | include fastcgi_params;
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/nginx/files/jenkins.conf:
--------------------------------------------------------------------------------
1 | upstream jenkins {
2 | server 127.0.0.1:8080 fail_timeout=0;
3 | }
4 |
5 | server {
6 | listen 80;
7 | server_name *.amazonaws.com;
8 | return 301 https://$host$request_uri;
9 | }
10 |
11 | server {
12 | listen 443 ssl;
13 | server_name *.amazonaws.com;
14 |
15 | ssl_certificate /etc/nginx/ssl/server.crt;
16 | ssl_certificate_key /etc/nginx/ssl/server.key;
17 |
18 | location / {
19 | proxy_set_header Host $host:$server_port;
20 | proxy_set_header X-Real-IP $remote_addr;
21 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
22 | proxy_set_header X-Forwarded-Proto $scheme;
23 | proxy_redirect http:// https://;
24 | proxy_pass http://jenkins;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/nginx/init.sls:
--------------------------------------------------------------------------------
1 | nginx:
2 | pkg.installed: []
3 |
4 | service.running:
5 | - enable: True
6 | - reload: True
7 | - require:
8 | - pkg: nginx
9 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/nginx/jenkins.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - nginx
3 |
4 | /etc/nginx/conf.d/jenkins.conf:
5 | file.managed:
6 | - source: salt://nginx/files/jenkins.conf
7 | - require:
8 | - pkg: nginx
9 | - require_in:
10 | - service: nginx
11 | - watch_in:
12 | - service: nginx
13 |
14 | {% for FIL in ['crt','key'] %}
15 | /etc/nginx/ssl/server.{{ FIL }}:
16 | file.managed:
17 | - makedirs: True
18 | - mode: 400
19 | - contents_pillar: nginx:{{ FIL }}
20 | - require:
21 | - pkg: nginx
22 | - require_in:
23 | - service: nginx
24 | - watch_in:
25 | - service: nginx
26 | {% endfor %}
27 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/packer/init.sls:
--------------------------------------------------------------------------------
1 | packer:
2 | archive.extracted:
3 | - name: /opt/
4 | - source: 'https://releases.hashicorp.com/packer/0.10.1/packer_0.10.1_linux_amd64.zip'
5 | - source_hash: md5=3a54499fdf753e7e7c682f5d704f684f
6 | - archive_format: zip
7 | - if_missing: /opt/packer
8 |
9 | cmd.wait:
10 | - name: 'chmod +x /opt/packer'
11 | - watch:
12 | - archive: packer
13 |
14 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/php-fpm/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - nginx
3 |
4 | php-fpm:
5 | pkg.installed:
6 | - name: php-fpm
7 | - require:
8 | - pkg: nginx
9 |
10 | service.running:
11 | - name: php-fpm
12 | - enable: True
13 | - reload: True
14 | - require_in:
15 | - service: nginx
16 |
17 | php-fpm_www.conf_1:
18 | file.replace:
19 | - name: /etc/php-fpm.d/www.conf
20 | - pattern: ^user = apache$
21 | - repl: user = nginx
22 | - require:
23 | - pkg: php-fpm
24 | - require_in:
25 | - service: php-fpm
26 | - watch_in:
27 | - service: php-fpm
28 |
29 | php-fpm_www.conf_2:
30 | file.replace:
31 | - name: /etc/php-fpm.d/www.conf
32 | - pattern: ^group = apache$
33 | - repl: group = nginx
34 | - require:
35 | - pkg: php-fpm
36 | - require_in:
37 | - service: php-fpm
38 | - watch_in:
39 | - service: php-fpm
40 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - users
4 | - yum-s3
5 |
6 | 'roles:jenkins':
7 | - match: grain
8 | - jenkins
9 | - nginx.jenkins
10 | - docker
11 | - packer
12 |
13 | 'roles:demo-app':
14 | - match: grain
15 | - php-fpm
16 | - nginx.demo-app
17 | - demo-app
18 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/users/files/veselin.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EA...
2 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/users/init.sls:
--------------------------------------------------------------------------------
1 | veselin:
2 | user.present:
3 | - fullname: Veselin Kantsev
4 | - uid: {{ salt['pillar.get']('users:veselin:uid') }}
5 | - password: {{ salt['pillar.get']('users:veselin:password') }}
6 | - groups:
7 | - wheel
8 |
9 | ssh_auth.present:
10 | - user: veselin
11 | - source: salt://users/files/veselin.pub
12 | - require:
13 | - user: veselin
14 |
15 | file.managed:
16 | - name: /etc/sudoers.d/veselin
17 | - contents: 'veselin ALL=(ALL) ALL'
18 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/yum-s3/files/cob.conf:
--------------------------------------------------------------------------------
1 | ;
2 | ; Cob: yet another yum S3 plugin - /etc/yum/pluginconf.d/cob.conf
3 | ;
4 | ; Copyright 2014-2015, Henry Huang . All Rights Reserved.
5 | ;
6 | ; Licensed under the Apache License, Version 2.0 (the "License");
7 | ; you may not use this file except in compliance with the License.
8 | ; You may obtain a copy of the License at
9 | ;
10 | ; http://www.apache.org/licenses/LICENSE-2.0
11 | ;
12 | ; Unless required by applicable law or agreed to in writing, software
13 | ; distributed under the License is distributed on an "AS IS" BASIS,
14 | ; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | ; See the License for the specific language governing permissions and
16 | ; limitations under the License.
17 | ;
18 | [main]
19 | cachedir=/var/cache/yum/$basearch/$releasever
20 | keepcache=1
21 | debuglevel=4
22 | logfile=/var/log/yum.log
23 | exactarch=1
24 | obsoletes=0
25 | gpgcheck=0
26 | plugins=1
27 | distroverpkg=centos-release
28 | enabled=1
29 |
30 | [aws]
31 | # access_key =
32 | # secret_key =
33 | timeout = 60
34 | retries = 5
35 | metadata_server = http://169.254.169.254
36 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/yum-s3/files/s3.repo:
--------------------------------------------------------------------------------
1 | [s3-repo]
2 | name=S3-repo
3 | baseurl=https://s3.amazonaws.com/...
4 | enabled=0
5 | gpgcheck=0
6 |
--------------------------------------------------------------------------------
/Chapter 5/CodeCommit/salt/states/yum-s3/init.sls:
--------------------------------------------------------------------------------
1 | yum-s3_cob.py:
2 | file.managed:
3 | - name: /usr/lib/yum-plugins/cob.py
4 | - source: salt://yum-s3/files/cob.py
5 |
6 | yum-s3_cob.conf:
7 | file.managed:
8 | - name: /etc/yum/pluginconf.d/cob.conf
9 | - source: salt://yum-s3/files/cob.conf
10 |
11 | yum-s3_s3.repo:
12 | file.managed:
13 | - name: /etc/yum.repos.d/s3.repo
14 | - source: salt://yum-s3/files/s3.repo
15 |
--------------------------------------------------------------------------------
/Chapter 5/Terraform/iam_user_policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Effect": "Allow",
6 | "NotAction": [
7 | "codecommit:DeleteRepository"
8 | ],
9 | "Resource": "*"
10 | },
11 | {
12 | "Effect": "Allow",
13 | "NotAction": [
14 | "s3:DeleteBucket"
15 | ],
16 | "Resource": "*"
17 | },
18 | {
19 | "Sid": "Stmt1461764665000",
20 | "Effect": "Allow",
21 | "Action": [
22 | "ec2:AllocateAddress",
23 | "ec2:AssociateAddress",
24 | "ec2:AssociateRouteTable",
25 | "ec2:AttachInternetGateway",
26 | "ec2:AuthorizeSecurityGroupEgress",
27 | "ec2:AuthorizeSecurityGroupIngress",
28 | "ec2:CreateInternetGateway",
29 | "ec2:CreateRoute",
30 | "ec2:CreateRouteTable",
31 | "ec2:CreateSecurityGroup",
32 | "ec2:CreateSubnet",
33 | "ec2:CreateTags",
34 | "ec2:CreateVpc",
35 | "ec2:Describe*",
36 | "ec2:ModifySubnetAttribute",
37 | "ec2:RevokeSecurityGroupEgress",
38 | "elasticloadbalancing:AddTags",
39 | "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
40 | "elasticloadbalancing:AttachLoadBalancerToSubnets",
41 | "elasticloadbalancing:CreateLoadBalancer",
42 | "elasticloadbalancing:CreateLoadBalancerListeners",
43 | "elasticloadbalancing:Describe*",
44 | "elasticloadbalancing:ModifyLoadBalancerAttributes",
45 | "iam:AddRoleToInstanceProfile",
46 | "iam:CreateInstanceProfile",
47 | "iam:CreateRole",
48 | "iam:Get*",
49 | "iam:PassRole",
50 | "iam:PutRolePolicy"
51 | ],
52 | "Resource": [
53 | "*"
54 | ]
55 | }
56 | ]
57 | }
58 |
--------------------------------------------------------------------------------
/Chapter 5/Terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "VPC ID" {
2 | value = "${aws_vpc.terraform-vpc.id}"
3 | }
4 |
5 | output "JENKINS EIP" {
6 | value = "${aws_eip.jenkins.public_ip}"
7 | }
8 |
9 | output "ELB URI" {
10 | value = "${aws_elb.demo-app-elb.dns_name}"
11 | }
12 |
13 | output "Private subnet ID" {
14 | value = "${aws_subnet.private-1.id}"
15 | }
16 |
17 | output "Demo-app secgroup" {
18 | value = "${aws_security_group.demo-app.id}"
19 | }
20 |
--------------------------------------------------------------------------------
/Chapter 5/Terraform/resources.tf:
--------------------------------------------------------------------------------
1 | # Set a Provider
2 | provider "aws" {
3 | region = "${var.aws-region}"
4 | }
5 |
6 | ### VPC ###
7 |
8 | # Create a VPC
9 | resource "aws_vpc" "terraform-vpc" {
10 | cidr_block = "${var.vpc-cidr}"
11 |
12 | tags {
13 | Name = "${var.vpc-name}"
14 | }
15 | }
16 |
17 | # Create an Internet Gateway
18 | resource "aws_internet_gateway" "terraform-igw" {
19 | vpc_id = "${aws_vpc.terraform-vpc.id}"
20 | }
21 |
22 | # Create NAT
23 | resource "aws_eip" "nat-eip" {
24 | vpc = true
25 | }
26 |
27 | resource "aws_nat_gateway" "terraform-nat" {
28 | allocation_id = "${aws_eip.nat-eip.id}"
29 | subnet_id = "${aws_subnet.public-1.id}"
30 | depends_on = ["aws_internet_gateway.terraform-igw"]
31 | }
32 |
33 | # Create public and private route tables
34 | resource "aws_route_table" "public" {
35 | vpc_id = "${aws_vpc.terraform-vpc.id}"
36 | route {
37 | cidr_block = "0.0.0.0/0"
38 | gateway_id = "${aws_internet_gateway.terraform-igw.id}"
39 | }
40 |
41 | tags {
42 | Name = "Public"
43 | }
44 | }
45 |
46 | resource "aws_route_table" "private" {
47 | vpc_id = "${aws_vpc.terraform-vpc.id}"
48 | route {
49 | cidr_block = "0.0.0.0/0"
50 | nat_gateway_id = "${aws_nat_gateway.terraform-nat.id}"
51 | }
52 |
53 | tags {
54 | Name = "Private"
55 | }
56 | }
57 |
58 | # Create and associate public subnets with a route table
59 | resource "aws_subnet" "public-1" {
60 | vpc_id = "${aws_vpc.terraform-vpc.id}"
61 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 1)}"
62 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index)}"
63 | map_public_ip_on_launch = true
64 |
65 | tags {
66 | Name = "Public"
67 | }
68 | }
69 |
70 | resource "aws_route_table_association" "public-1" {
71 | subnet_id = "${aws_subnet.public-1.id}"
72 | route_table_id = "${aws_route_table.public.id}"
73 | }
74 |
75 | # Create and associate private subnets with a route table
76 | resource "aws_subnet" "private-1" {
77 | vpc_id = "${aws_vpc.terraform-vpc.id}"
78 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 2)}"
79 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index)}"
80 | map_public_ip_on_launch = false
81 |
82 | tags {
83 | Name = "Private"
84 | }
85 | }
86 |
87 | resource "aws_route_table_association" "private-1" {
88 | subnet_id = "${aws_subnet.private-1.id}"
89 | route_table_id = "${aws_route_table.private.id}"
90 | }
91 |
92 | ### IAM ###
93 |
94 | resource "aws_iam_role" "jenkins" {
95 | name = "jenkins"
96 | path = "/"
97 | assume_role_policy = < >(logger -s -t $(basename $0)) 2>&1
352 | # Install Git and set CodeComit connection settings
353 | # (required for access via IAM roles)
354 | yum -y install git
355 | git config --system credential.helper '!aws codecommit credential-helper $@'
356 | git config --system credential.UseHttpPath true
357 | # Clone the Salt repository
358 | git clone https://git-codecommit.us-east-1.amazonaws.com/v1/repos/salt /srv/salt; chmod 700 /srv/salt
359 | # Install SaltStack
360 | yum -y install https://repo.saltstack.com/yum/amazon/salt-amzn-repo-latest-1.ami.noarch.rpm
361 | yum clean expire-cache; yum -y install salt-minion; chkconfig salt-minion off
362 | # Put custom minion config in place (for enabling masterless mode)
363 | cp -r /srv/salt/minion.d /etc/salt/
364 | echo -e 'grains:\n roles:\n - jenkins' > /etc/salt/minion.d/grains.conf
365 | ## Trigger a full Salt run
366 | salt-call state.apply
367 | EOF
368 |
369 | lifecycle { create_before_destroy = true }
370 | }
371 |
372 | resource "aws_eip" "jenkins" {
373 | instance = "${aws_instance.jenkins.id}"
374 | vpc = true
375 | }
376 |
--------------------------------------------------------------------------------
/Chapter 5/Terraform/terraform.tfvars:
--------------------------------------------------------------------------------
1 | aws-region = "us-east-1"
2 | vpc-cidr = "10.0.0.0/16"
3 | vpc-name = "Terraform"
4 | aws-availability-zones = "us-east-1b,us-east-1c"
5 | jenkins-ami-id = "ami-6869aa05"
6 | jenkins-instance-type = "t2.nano"
7 | jenkins-key-name = "terraform"
8 |
--------------------------------------------------------------------------------
/Chapter 5/Terraform/variables.tf:
--------------------------------------------------------------------------------
1 | ### VPC ###
2 | variable "aws-region" {
3 | type = "string"
4 | description = "AWS region"
5 | }
6 | variable "vpc-cidr" {
7 | type = "string"
8 | description = "VPC CIDR"
9 | }
10 | variable "vpc-name" {
11 | type = "string"
12 | description = "VPC name"
13 | }
14 | variable "aws-availability-zones" {
15 | type = "string"
16 | description = "AWS zones"
17 | }
18 |
19 | ### EC2 ###
20 | variable "jenkins-ami-id" {
21 | type="string"
22 | description = "EC2 AMI identifier"
23 | }
24 | variable "jenkins-instance-type" {
25 | type = "string"
26 | description = "EC2 instance type"
27 | }
28 | variable "jenkins-key-name" {
29 | type = "string"
30 | description = "EC2 ssh key name"
31 | }
32 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdelivery/Jenkinsfile:
--------------------------------------------------------------------------------
1 | #!groovy
2 |
3 | node {
4 |
5 | step([$class: 'WsCleanup'])
6 |
7 | stage "Checkout Git repo"
8 | checkout scm
9 |
10 | stage "Checkout additional repos"
11 | dir("salt") {
12 | git "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/salt"
13 | }
14 |
15 | stage "Run Packer"
16 | sh "/opt/packer validate -var=\"appVersion=$APP_VERSION\" -var-file=packer/demo-app_vars.json packer/demo-app.json"
17 | sh "/opt/packer build -machine-readable -var=\"appVersion=$APP_VERSION\" -var-file=packer/demo-app_vars.json packer/demo-app.json | tee packer/packer.log"
18 |
19 | stage "Deploy AMI"
20 | def amiId = sh returnStdout: true, script:"tail -n1 packer/packer.log | awk '{printf \$NF}'"
21 | def ec2Keypair = "terraform"
22 | def secGroup = "sg-88cf43f2"
23 | def instanceType = "t2.nano"
24 | def subnetId = "subnet-b6e2049b"
25 | def instanceProfile = "demo-app"
26 |
27 | echo "Launching an instance from ${amiId}"
28 | sh "aws ec2 run-instances \
29 | --region us-east-1 \
30 | --image-id ${amiId} \
31 | --key-name ${ec2Keypair} \
32 | --security-group-ids ${secGroup} \
33 | --instance-type ${instanceType} \
34 | --subnet-id ${subnetId} \
35 | --iam-instance-profile Name=${instanceProfile} \
36 | | tee .ec2_run-instances.log \
37 | "
38 |
39 | def instanceId = sh returnStdout: true, script: "printf \$(jq .Instances[0].InstanceId < .ec2_run-instances.log)"
40 |
41 | sh "aws ec2 create-tags --resources ${instanceId} \
42 | --region us-east-1 \
43 | --tags Key=Name,Value=\"Jenkins (demo-app-$APP_VERSION)\" Key=CreatedBy,Value=Jenkins \
44 | "
45 |
46 | echo "Registering with ELB"
47 | def elbId = "demo-app-elb"
48 | sh "aws elb register-instances-with-load-balancer \
49 | --region us-east-1 \
50 | --load-balancer-name ${elbId} \
51 | --instances ${instanceId} \
52 | "
53 |
54 | echo "Waiting for the instance to come into service"
55 | sh "while [ \"x\$(aws elb describe-instance-health --region us-east-1 --load-balancer-name ${elbId} --instances ${instanceId} | jq .InstanceStates[].State | tr -d '\"')\" != \"xInService\" ]; do : ; sleep 60; done"
56 |
57 | stage "Run AB test"
58 | def elbUri = "http://demo-app-elb-1758897314.us-east-1.elb.amazonaws.com/"
59 | sh "ab -c5 -n1000 -d -S ${elbUri} | tee .ab.log"
60 | def non2xx = sh returnStdout: true, script:"set -o pipefail;(grep 'Non-2xx' .ab.log | awk '{printf \$NF}') || (printf 0)"
61 | def writeErr = sh returnStdout: true, script:"grep 'Write errors' .ab.log | awk '{printf \$NF}'"
62 | def failedReqs = sh returnStdout: true, script:"grep 'Failed requests' .ab.log | awk '{printf \$NF}'"
63 | def rps = sh returnStdout: true, script:"grep 'Requests per second' .ab.log | awk '{printf \$4}' | awk -F. '{printf \$1}'"
64 | def docLen = sh returnStdout: true, script:"grep 'Document Length' .ab.log | awk '{printf \$3}'"
65 |
66 | echo "Non2xx=${non2xx}, WriteErrors=${writeErr}, FailedReqs=${failedReqs}, ReqsPerSec=${rps}, DocLength=${docLen}"
67 | sh "if [ ${non2xx} -gt 10 ] || [ ${writeErr} -gt 10 ] || [ ${failedReqs} -gt 10 ] || [ ${rps} -lt 500 ] || [ ${docLen} -lt 10 ]; then \
68 | echo \"ERR: AB test failed\" | tee -a .error.log; \
69 | fi \
70 | "
71 |
72 | stage "Terminate test instance"
73 | sh "aws ec2 terminate-instances --region us-east-1 --instance-ids ${instanceId}"
74 |
75 | stage "Verify test results"
76 | sh "if [ -s '.error.log' ]; then \
77 | cat '.error.log'; \
78 | :> '.error.log'; \
79 | exit 100; \
80 | else \
81 | echo 'Tests OK'; \
82 | fi \
83 | "
84 |
85 | stage "Trigger downstream"
86 | build job: "demo-app-cdeployment", parameters: [[$class: "StringParameterValue", name: "AMI_ID", value: "${amiId}"]], wait: false
87 |
88 | }
89 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdelivery/packer/demo-app.json:
--------------------------------------------------------------------------------
1 | {
2 | "variables": {
3 | "srcAmiId": null,
4 | "amiName": null,
5 | "sshUser": null,
6 | "instanceProfile": null,
7 | "subnetId": null,
8 | "vpcId": null,
9 | "userDataFile": null,
10 | "appVersion": null
11 | },
12 | "builders": [{
13 | "type": "amazon-ebs",
14 | "region": "us-east-1",
15 | "source_ami": "{{user `srcAmiId`}}",
16 | "instance_type": "t2.nano",
17 | "ssh_username": "{{user `sshUser`}}",
18 | "ami_name": "{{user `amiName`}}-{{user `appVersion`}}-{{timestamp}}",
19 | "iam_instance_profile": "{{user `instanceProfile`}}",
20 | "subnet_id": "{{user `subnetId`}}",
21 | "vpc_id": "{{user `vpcId`}}",
22 | "user_data_file": "{{user `userDataFile`}}",
23 | "run_tags": {
24 | "Name": "Packer ({{user `amiName`}}-{{timestamp}})",
25 | "CreatedBy": "Jenkins"
26 | },
27 | "tags": {
28 | "Name": "{{user `amiName`}}-{{timestamp}}",
29 | "CreatedBy": "Jenkins"
30 | }
31 | }],
32 | "provisioners": [
33 | {
34 | "type": "shell",
35 | "inline": [
36 | "echo 'Waiting for the instance to fully boot up...'",
37 | "sleep 30" ,
38 | "echo \"Setting APP_VERSION to {{user `appVersion`}}\"",
39 | "echo \"{{user `appVersion`}}\" > /tmp/APP_VERSION"
40 | ]
41 | },
42 | {
43 | "type": "salt-masterless",
44 | "skip_bootstrap": true,
45 | "local_state_tree": "salt/states",
46 | "local_pillar_roots": "salt/pillars"
47 | },
48 | {
49 | "type": "file",
50 | "source": "serverspec",
51 | "destination": "/tmp/"
52 | },
53 | {
54 | "type": "shell",
55 | "inline": [
56 | "echo 'Installing Serverspec tests...'",
57 | "sudo gem install --no-document rake serverspec",
58 | "echo 'Running Serverspec tests...'",
59 | "cd /tmp/serverspec && sudo /usr/local/bin/rake spec"
60 | ]
61 | }
62 | ]
63 | }
64 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdelivery/packer/demo-app_userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euf -o pipefail
4 | exec 1> >(logger -s -t $(basename $0)) 2>&1
5 |
6 | # Install SaltStack
7 | yum -y install https://repo.saltstack.com/yum/amazon/salt-amzn-repo-latest-1.ami.noarch.rpm
8 | yum clean expire-cache; yum -y install salt-minion; chkconfig salt-minion off
9 |
10 | # Put custom grains in place
11 | echo -e 'grains:\n roles:\n - demo-app' > /etc/salt/minion.d/grains.conf
12 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdelivery/packer/demo-app_vars.json:
--------------------------------------------------------------------------------
1 | {
2 | "srcAmiId": "ami-6869aa05",
3 | "amiName": "demo-app",
4 | "sshUser": "ec2-user",
5 | "instanceProfile": "demo-app",
6 | "subnetId": "subnet-b6e2049b",
7 | "vpcId": "vpc-462d6721",
8 | "userDataFile": "packer/demo-app_userdata.sh"
9 | }
10 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdelivery/serverspec/.rspec:
--------------------------------------------------------------------------------
1 | --color
2 | --format documentation
3 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdelivery/serverspec/Rakefile:
--------------------------------------------------------------------------------
1 | require 'rake'
2 | require 'rspec/core/rake_task'
3 |
4 | task :spec => 'spec:all'
5 | task :default => :spec
6 |
7 | namespace :spec do
8 | targets = []
9 | Dir.glob('./spec/*').each do |dir|
10 | next unless File.directory?(dir)
11 | target = File.basename(dir)
12 | target = "_#{target}" if target == "default"
13 | targets << target
14 | end
15 |
16 | task :all => targets
17 | task :default => :all
18 |
19 | targets.each do |target|
20 | original_target = target == "_default" ? target[1..-1] : target
21 | desc "Run serverspec tests to #{original_target}"
22 | RSpec::Core::RakeTask.new(target.to_sym) do |t|
23 | ENV['TARGET_HOST'] = original_target
24 | t.pattern = "spec/#{original_target}/*_spec.rb"
25 | end
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdelivery/serverspec/spec/localhost/demo-app_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | versionFile = open('/tmp/APP_VERSION')
4 | appVersion = versionFile.read.chomp
5 |
6 | describe package("demo-app-#{appVersion}") do
7 | it { should be_installed }
8 | end
9 |
10 | describe service('php-fpm') do
11 | it { should be_enabled }
12 | it { should be_running }
13 | end
14 |
15 | describe service('nginx') do
16 | it { should be_enabled }
17 | it { should be_running }
18 | end
19 |
20 | describe user('veselin') do
21 | it { should exist }
22 | it { should have_authorized_key 'ssh-rsa AAAA...' }
23 | end
24 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdelivery/serverspec/spec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'serverspec'
2 |
3 | set :backend, :exec
4 |
5 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdeployment/Jenkinsfile:
--------------------------------------------------------------------------------
1 | #!groovy
2 |
3 | node {
4 |
5 | step([$class: 'WsCleanup'])
6 |
7 | stage "Checkout Git repo"
8 | checkout scm
9 |
10 | stage "Deploy AMI"
11 | sh returnStdout: false, script: "bash ./cdeployment.sh ${AMI_ID}"
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app-cdeployment/cdeployment.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ef -o pipefail
3 |
4 | blueGroup="demo-app-blue"
5 | greenGroup="demo-app-green"
6 | elbName="demo-app-elb-prod"
7 | AMI_ID=${1}
8 |
9 | function techo() {
10 | echo "[$(date +%s)] " ${1}
11 | }
12 |
13 | function Err() {
14 | techo "ERR: ${1}"
15 | exit 100
16 | }
17 |
18 | function rollback() {
19 | techo "Metrics check failed, rolling back"
20 | aws autoscaling update-auto-scaling-group --auto-scaling-group-name ${newActiveGroup} \
21 | --min-size 0
22 | techo "Instances ${1} entering standby in group ${newActiveGroup}"
23 | aws autoscaling enter-standby --should-decrement-desired-capacity \
24 | --auto-scaling-group-name ${newActiveGroup} --instance-ids ${1}
25 | techo "Detaching ${elbName} from ${newActiveGroup}"
26 | aws autoscaling detach-load-balancers --auto-scaling-group-name ${newActiveGroup} \
27 | --load-balancer-names ${elbName}
28 | Err "Deployment rolled back. Please check instances in StandBy."
29 | }
30 |
31 | function wait_for_instances() {
32 | techo ">>> Waiting for instances to launch"
33 | asgInstances=()
34 |
35 | while [ ${#asgInstances[*]} -ne ${1} ];do
36 | sleep 10
37 | asgInstances=($(aws autoscaling describe-auto-scaling-groups \
38 | --auto-scaling-group-name ${newActiveGroup} | jq .AutoScalingGroups[0].Instances[].InstanceId | tr -d '"' ))
39 | techo "Launched ${#asgInstances[*]} out of ${1}"
40 | done
41 |
42 | techo ">>> Waiting for instances to become available"
43 | asgInstancesReady=0
44 | iterList=(${asgInstances[*]})
45 |
46 | while [ ${asgInstancesReady} -lt ${#asgInstances[*]} ];do
47 | sleep 10
48 | for i in ${iterList[*]};do
49 | asgInstanceState=$(aws autoscaling describe-auto-scaling-instances \
50 | --instance-ids ${i} | jq .AutoScalingInstances[0].LifecycleState | tr -d '"')
51 |
52 | if [[ ${asgInstanceState} == "InService" ]];then
53 | asgInstancesReady="$((asgInstancesReady+1))"
54 | iterList=(${asgInstances[*]/${i}/})
55 | fi
56 | done
57 | techo "Available ${asgInstancesReady} out of ${#asgInstances[*]}"
58 | done
59 |
60 | techo ">>> Waiting for ELB instances to become InService"
61 | elbInstancesReady=0
62 | iterList=(${asgInstances[*]})
63 |
64 | while [ ${elbInstancesReady} -lt ${#asgInstances[*]} ];do
65 | sleep 10
66 | for i in ${iterList[*]};do
67 | elbInstanceState=$(aws elb describe-instance-health \
68 | --load-balancer-name ${elbName} --instances ${i} | jq .InstanceStates[].State | tr -d '"')
69 |
70 | if [[ ${elbInstanceState} == "InService" ]];then
71 | elbInstancesReady=$((elbInstancesReady+1))
72 | iterList=(${asgInstances[*]/${i}/})
73 | fi
74 | done
75 | techo "InService ${elbInstancesReady} out of ${#asgInstances[*]}"
76 | done
77 | }
78 |
79 | # Set region for AWS CLI
80 | export AWS_DEFAULT_REGION="us-east-1"
81 |
82 | # Validate AMI ID
83 | [[ ${AMI_ID} = ami-* ]] || Err "AMI ID ${AMI_ID} is invalid"
84 |
85 | # Check ELBs attached to ASGs
86 | blueElb=$(aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names ${blueGroup} | \
87 | jq .AutoScalingGroups[0].LoadBalancerNames[0] | tr -d '"')
88 | greenElb=$(aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names ${greenGroup} | \
89 | jq .AutoScalingGroups[0].LoadBalancerNames[0] | tr -d '"')
90 |
91 | [[ "${blueElb}" != "${greenElb}" ]] || Err "Identical ELB value for both groups"
92 |
93 | # Mark the group with Prod ELB attachment as Active
94 | if [[ "${blueElb}" == "${elbName}" ]]; then
95 | activeGroup=${blueGroup}
96 | newActiveGroup=${greenGroup}
97 | elif [[ "${greenElb}" == "${elbName}" ]]; then
98 | activeGroup=${greenGroup}
99 | newActiveGroup=${blueGroup}
100 | fi
101 |
102 | # Validate groups
103 | [ -n "${activeGroup}" ] || Err "Missing activeGroup"
104 | [ -n "${newActiveGroup}" ] || Err "Missing newActiveGroup"
105 |
106 | techo "Active group: ${activeGroup}"
107 | techo "New active group: ${newActiveGroup}"
108 |
109 | # Ensure the NewActive group is not in use
110 | asgInstances=($(aws autoscaling describe-auto-scaling-groups \
111 | --auto-scaling-group-name ${newActiveGroup} | jq .AutoScalingGroups[0].Instances[].InstanceId | tr -d '"' ))
112 | [ ${#asgInstances[*]} -eq 0 ] || Err "Found instances attached to ${newActiveGroup}!"
113 |
114 | # Get capacity counts from the Active group
115 | activeDesired=$(aws autoscaling describe-auto-scaling-groups \
116 | --auto-scaling-group-name ${activeGroup} | jq .AutoScalingGroups[0].DesiredCapacity)
117 | activeMin=$(aws autoscaling describe-auto-scaling-groups \
118 | --auto-scaling-group-name ${activeGroup} | jq .AutoScalingGroups[0].MinSize)
119 | activeMax=$(aws autoscaling describe-auto-scaling-groups \
120 | --auto-scaling-group-name ${activeGroup} | jq .AutoScalingGroups[0].MaxSize)
121 | scaleStep=$(( (30 * ${activeDesired}) /100 ))
122 |
123 | # The Active group is expected to have instances in use
124 | [ ${activeDesired} -gt 0 ] || Err "Active group ${activeGroup} is set to 0 instances!"
125 |
126 | # Round small floats to 1
127 | [ ${scaleStep} -gt 0 ] || scaleStep=1
128 |
129 | techo "### Scale UP secondary ASG"
130 | techo ">>> Creating a Launch Configuration"
131 |
132 | activeInstance=$(aws autoscaling describe-auto-scaling-groups \
133 | --auto-scaling-group-name ${activeGroup} | jq .AutoScalingGroups[0].Instances[0].InstanceId | tr -d '"')
134 |
135 | [[ ${activeInstance} = i-* ]] || Err "activeInstance ${activeInstance} is invalid"
136 |
137 | launchConf="demo-app-${AMI_ID}-$(date +%s)"
138 |
139 | aws autoscaling create-launch-configuration --launch-configuration-name ${launchConf} \
140 | --image-id ${AMI_ID} --instance-id ${activeInstance}
141 |
142 | techo ">>> Attaching ${launchConf} to ${newActiveGroup}"
143 | aws autoscaling update-auto-scaling-group --auto-scaling-group-name ${newActiveGroup} \
144 | --launch-configuration-name ${launchConf}
145 |
146 | techo ">>> Attaching ${elbName} to ${newActiveGroup}"
147 | aws autoscaling attach-load-balancers --auto-scaling-group-name ${newActiveGroup} \
148 | --load-balancer-names ${elbName}
149 |
150 | techo ">>> Increasing ${newActiveGroup} capacity (min/max/desired) to ${scaleStep}"
151 | aws autoscaling update-auto-scaling-group --auto-scaling-group-name ${newActiveGroup} \
152 | --min-size ${scaleStep} --max-size ${scaleStep} --desired-capacity ${scaleStep}
153 |
154 | wait_for_instances ${scaleStep}
155 |
156 | # Placeholder for metrics checks
157 | techo ">>> Checking error metrics"
158 | sleep 5
159 | doRollback=false
160 | ${doRollback} && rollback "${asgInstances[*]}"
161 |
162 | techo ">>> Matching ${newActiveGroup} capacity (min/max/desired) to that of ${activeGroup}"
163 | aws autoscaling update-auto-scaling-group --auto-scaling-group-name ${newActiveGroup} \
164 | --min-size ${activeMin} --max-size ${activeMax} --desired-capacity ${activeDesired}
165 |
166 | wait_for_instances ${activeDesired}
167 |
168 | # Placeholder for metrics checks
169 | techo ">>> Checking error metrics"
170 | sleep 5
171 | doRollback=false
172 | ${doRollback} && rollback "${asgInstances[*]}"
173 |
174 | techo "### Scale DOWN primary ASG"
175 | techo ">>> Reducing ${activeGroup} size to 0"
176 | aws autoscaling update-auto-scaling-group --auto-scaling-group-name ${activeGroup} \
177 | --min-size 0 --max-size 0 --desired-capacity 0
178 |
179 | techo ">>> Detaching ${elbName} from ${activeGroup}"
180 | aws autoscaling detach-load-balancers --auto-scaling-group-name ${activeGroup} \
181 | --load-balancer-names ${elbName}
182 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app/.gitignore:
--------------------------------------------------------------------------------
1 | rpm/
2 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app/Jenkinsfile:
--------------------------------------------------------------------------------
1 | #!groovy
2 |
3 | node {
4 |
5 | step([$class: 'WsCleanup'])
6 |
7 | stage "Checkout Git repo"
8 | checkout scm
9 | def gitHash = sh returnStdout: true, script:"printf \$(git rev-parse --short HEAD)"
10 | echo "Proceeding with Git hash: ${gitHash}"
11 |
12 | stage "Run tests"
13 | sh "docker run -v \$(pwd):/app --rm phpunit/phpunit tests/"
14 |
15 | stage "Build RPM"
16 | sh "[ -d ./rpm ] || mkdir ./rpm"
17 | sh "docker run -v \$(pwd)/src:/data/demo-app -v \$(pwd)/rpm:/data/rpm --rm tenzer/fpm \
18 | fpm -s dir -t rpm -n demo-app -v ${gitHash} \
19 | --description \"Demo PHP app\" \
20 | --directories /var/www/demo-app \
21 | --package /data/rpm/demo-app-${gitHash}.rpm \
22 | /data/demo-app=/var/www/ \
23 | "
24 |
25 | stage "Update YUM repo"
26 | sh "[ -d ~/repo/rpm/demo-app/ ] || mkdir -p ~/repo/rpm/demo-app/"
27 | sh "mv ./rpm/*.rpm ~/repo/rpm/demo-app/"
28 | sh "createrepo --update --cachedir ~/repo.cache ~/repo/"
29 | sh "aws s3 sync ~/repo s3://S3_BUCKET_NAME/ --region us-east-1 --delete"
30 |
31 | stage "Check YUM repo"
32 | sh "sudo yum clean expire-cache >/dev/null"
33 | sh "sudo yum repolist >/dev/null"
34 | sh "yum info demo-app-${gitHash}"
35 |
36 | stage "Trigger downstream"
37 | build job: "demo-app-cdelivery", parameters: [[$class: "StringParameterValue", name: "APP_VERSION", value: "${gitHash}-1"]], wait: false
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/demo-app/src/index.php:
--------------------------------------------------------------------------------
1 | assertEquals($expected, $actual);
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/minion.d/masterless.conf:
--------------------------------------------------------------------------------
1 | file_client: local
2 | file_roots:
3 | base:
4 | - /srv/salt/states
5 | pillar_roots:
6 | base:
7 | - /srv/salt/pillars
8 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/pillars/nginx.sls:
--------------------------------------------------------------------------------
1 | nginx:
2 | crt: |
3 | -----BEGIN CERTIFICATE-----
4 | MIIDGjCCAgKgAwIBAgIJAOyoVgqZyUcZMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV
5 | BAMTB2plbmtpbnMwHhcNMTYwNzA5MTQ1MjIwWhcNMjYwNzA3MTQ1MjIwWjASMRAw
6 | DgYDVQQDEwdqZW5raW5zMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
7 | rW0WfTqJjWuslGE5wmUfcDcEILADe/X7L4MbxpOs7EPlX4BQuU4bIJBnaP3taWgR
8 | 8X3+Mcje8emnP2iMh4NCngSqPLia77jP4zCfxDGxcd4giK1RQsufsVRdDoaQ4ibB
9 | y6gaC0VPmA8pCOZGTdh6S4WLxiMbKlaTC/LT92Z/0m4JXDtCPZMN3pPQL3aPE8yW
10 | xybo2fhnyozLsW4SgzBe92cSuROKIsYAaWv6WoWgSnN0vRASrEMPmiMzYindcMA0
11 | VIi9DfL1sQcsS90pBfPbX7AGe0I/rD0J8WEy5gXS+XTK9nPtVrt/Vu30yEDWiU7c
12 | C8vWI3qs6adb+s093EPw/QIDAQABo3MwcTAdBgNVHQ4EFgQU1ABZ6EroAlhhLeQS
13 | hFpa70vhmZQwQgYDVR0jBDswOYAU1ABZ6EroAlhhLeQShFpa70vhmZShFqQUMBIx
14 | EDAOBgNVBAMTB2plbmtpbnOCCQDsqFYKmclHGTAMBgNVHRMEBTADAQH/MA0GCSqG
15 | SIb3DQEBBQUAA4IBAQBbwLB4sCRlwEgL+ClYDdPIDp+3Uthf2tKt9VjlhF/zKxPD
16 | KcBOPVLxPNJoq4El/akTkbNpUhU2iS1jFS+tFA7Wxf8MqLr9Zc1aNmkoWzjqkHYS
17 | PLARumMruw4axiqCP2xtiqXhPMRvPVBTmhMeRsPBCA0QWCgCZmgLuo+xcg9cFBT3
18 | C2O6JY6qlb0XJv2y/7npPiFr2V18G2aweyaf/FvtFnODVmqSi+HecID8ZmH9OaN4
19 | 6HpZgJ0LOG7RUH9rVvPg9fdukQY20PhakAsXhOLdRp9ddODUA4dTG1ano3RW6f8n
20 | jJ5HhA0Cb0tRrezqHbdTeOB+91I731az3QmmgKbO
21 | -----END CERTIFICATE-----
22 | key: |
23 | -----BEGIN RSA PRIVATE KEY-----
24 | MIIEpAIBAAKCAQEArW0WfTqJjWuslGE5wmUfcDcEILADe/X7L4MbxpOs7EPlX4BQ
25 | uU4bIJBnaP3taWgR8X3+Mcje8emnP2iMh4NCngSqPLia77jP4zCfxDGxcd4giK1R
26 | QsufsVRdDoaQ4ibBy6gaC0VPmA8pCOZGTdh6S4WLxiMbKlaTC/LT92Z/0m4JXDtC
27 | PZMN3pPQL3aPE8yWxybo2fhnyozLsW4SgzBe92cSuROKIsYAaWv6WoWgSnN0vRAS
28 | rEMPmiMzYindcMA0VIi9DfL1sQcsS90pBfPbX7AGe0I/rD0J8WEy5gXS+XTK9nPt
29 | Vrt/Vu30yEDWiU7cC8vWI3qs6adb+s093EPw/QIDAQABAoIBAQCav7+UiNpavdym
30 | Hkd65d7ys7TUMhs5zpmPoM71F6ryu/b9i7L8Vuyv1wrfTc4+AyYXtdRPuizt8g9R
31 | 7kmPVhnohMMfIZ7nD6M415eIassqjwm6y+S51Javllbe8kZv9iNxRZPPwM4wIj78
32 | ePX82pDtuMGrUIIZ+lyGCe0IUob2Tc61lx4A1ai5KlU0aWfhzTOnBa3NWet5l2Gs
33 | bHryHcz/TFKKaYryG+LzKDU2yOrH07hz8IAMAlXFR9baZOwSn7T71SkNPYOvyuLp
34 | dm5vln8BcfkoEv6AU+Bi3+Pi7y1UeFXVfre2jIxWIH62bBAoBK10bgQZ3L7qE3+O
35 | XZamoLVlAoGBAOKlSZ66TkfPaHFgbg2I6L9CqdqFvlSu1XRXWEvl6hM4b22DrpC2
36 | G/mytGu0c+U93TQNY04JhgFDf9h61BIMj/awNyAODiMsRambAaqHCAi+Dgy6p5VK
37 | K1dmFLXbmu1qwabVhgZdCBl34g4ACtASAJGwTDEWpcDGcsocy72CXaKLAoGBAMPj
38 | PfOWuW9xrMEWfbvKXqruJ46bWyBnwaQBViSd/33O56rsLHUZRDg2JkDDA336U1o0
39 | eaRHQL7BLszCuvSQpzEVnamCVhXYZ9yzzVL7YdUk92TTKQqZCriB8Cx/TN6Lxci/
40 | AyDmLyHmrW2oU0YsaNWa9G73BMRA7X78rwkKwlOXAoGAKmJ+whBVU1iWT52Y9y8D
41 | V8E/wn4AehW4FWnAOXFltPJ45CIcIzPrR2cEFqBIjDZlh7Z5O77MMLBO2E0gG7/9
42 | rESICpaWTj2ZSX6TcTCPcBMazYaakHCuaknM1bWb44pzbJ/B0K7VNO4WeEfJvd+f
43 | +57coNF7bfGuxd1cvLQEjsECgYAQFKONT55Ba8+GulXwCJjk51AQAOjmLB5VXFa6
44 | As5qgYW7HlA3/K8A/lD9mAS9XsNg8FXcCo1iG3HAFWxLj2RqPyAGPlDa0j0UfR4p
45 | 5cEOQk6c1EjWOeILa39P59NjoY3HAQc8uCi+W7V4/wx3AkZI4sOWKGkXw0y70/p6
46 | f4ucWQKBgQDDrmTymslhh/9YH96bdlwFh4iOyrOy3ZZa4UNYLpSXU6owWZRH8KfT
47 | Nn9vWmUE5IB1r9or8GaY/ibMKrYwo6ulAOg3uJNmXL0mKZH2lJOsTEnKZD9d7AZH
48 | 6ESfi4Jwk4lsgNhI9hfxC9QAY8wI8yKFHu+5zK7WuI8EELAkvHj8pw==
49 | -----END RSA PRIVATE KEY-----
50 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/pillars/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - users
4 | - nginx
5 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/pillars/users.sls:
--------------------------------------------------------------------------------
1 | users:
2 | veselin:
3 | uid: 5001
4 | password: '$1$wZ0gQOOo$HEN/gDGS85dEZM7QZVlFz/'
5 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/demo-app/init.sls:
--------------------------------------------------------------------------------
1 | {% set APP_VERSION = salt['cmd.run']('cat /tmp/APP_VERSION') %}
2 |
3 | include:
4 | - nginx
5 |
6 | demo-app:
7 | pkg.installed:
8 | - name: demo-app
9 | - version: {{ APP_VERSION }}
10 | - require_in:
11 | - service: nginx
12 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/docker/init.sls:
--------------------------------------------------------------------------------
1 | docker:
2 | pkg.installed: []
3 |
4 | service.running:
5 | - enable: True
6 | - reload: True
7 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/jenkins/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - docker
3 | - nginx
4 |
5 | jenkins_prereq:
6 | pkg.installed:
7 | - pkgs:
8 | - java-1.7.0-openjdk
9 | - gcc
10 | - make
11 | - createrepo
12 | - jq
13 | - httpd-tools
14 |
15 | jenkins:
16 | pkg.installed:
17 | - sources:
18 | - jenkins: http://mirrors.jenkins-ci.org/redhat-stable/jenkins-2.7.1-1.1.noarch.rpm
19 | - require:
20 | - pkg: jenkins_prereq
21 |
22 | user.present:
23 | - groups:
24 | - docker
25 | - require:
26 | - pkg: docker
27 |
28 | service.running:
29 | - enable: True
30 | - reload: True
31 | - require:
32 | - pkg: jenkins
33 | - user: jenkins
34 | - require_in:
35 | - service: nginx
36 |
37 | file.append:
38 | - name: /etc/sysconfig/jenkins
39 | - text: |
40 | ### Salt config
41 | JENKINS_LISTEN_ADDRESS="127.0.0.1"
42 | JENKINS_AJP_PORT="-1"
43 | - require:
44 | - pkg: jenkins
45 | - require_in:
46 | - service: jenkins
47 | - watch_in:
48 | - service: jenkins
49 |
50 | jenkins_sudoers:
51 | file.managed:
52 | - name: /etc/sudoers.d/jenkins
53 | - contents: 'jenkins ALL=(ALL) NOPASSWD:NOEXEC: /usr/bin/yum clean expire-cache, /usr/bin/yum repolist'
54 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/nginx/demo-app.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - nginx
3 |
4 | /etc/nginx/conf.d/demo-app.conf:
5 | file.managed:
6 | - source: salt://nginx/files/demo-app.conf
7 | - require:
8 | - pkg: nginx
9 | - require_in:
10 | - service: nginx
11 | - watch_in:
12 | - service: nginx
13 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/nginx/files/demo-app.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name *.amazonaws.com;
4 |
5 | root /var/www/demo-app;
6 | index index.php;
7 |
8 | location / {
9 | try_files $uri $uri/ =404;
10 | }
11 |
12 | location ~ \.php$ {
13 | try_files $uri =404;
14 | fastcgi_split_path_info ^(.+\.php)(/.+)$;
15 | fastcgi_pass 127.0.0.1:9000;
16 | fastcgi_index index.php;
17 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
18 | include fastcgi_params;
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/nginx/files/jenkins.conf:
--------------------------------------------------------------------------------
1 | upstream jenkins {
2 | server 127.0.0.1:8080 fail_timeout=0;
3 | }
4 |
5 | server {
6 | listen 80;
7 | server_name *.amazonaws.com;
8 | return 301 https://$host$request_uri;
9 | }
10 |
11 | server {
12 | listen 443 ssl;
13 | server_name *.amazonaws.com;
14 |
15 | ssl_certificate /etc/nginx/ssl/server.crt;
16 | ssl_certificate_key /etc/nginx/ssl/server.key;
17 |
18 | location / {
19 | proxy_set_header Host $host:$server_port;
20 | proxy_set_header X-Real-IP $remote_addr;
21 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
22 | proxy_set_header X-Forwarded-Proto $scheme;
23 | proxy_redirect http:// https://;
24 | proxy_pass http://jenkins;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/nginx/init.sls:
--------------------------------------------------------------------------------
1 | nginx:
2 | pkg.installed: []
3 |
4 | service.running:
5 | - enable: True
6 | - reload: True
7 | - require:
8 | - pkg: nginx
9 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/nginx/jenkins.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - nginx
3 |
4 | /etc/nginx/conf.d/jenkins.conf:
5 | file.managed:
6 | - source: salt://nginx/files/jenkins.conf
7 | - require:
8 | - pkg: nginx
9 | - require_in:
10 | - service: nginx
11 | - watch_in:
12 | - service: nginx
13 |
14 | {% for FIL in ['crt','key'] %}
15 | /etc/nginx/ssl/server.{{ FIL }}:
16 | file.managed:
17 | - makedirs: True
18 | - mode: 400
19 | - contents_pillar: nginx:{{ FIL }}
20 | - require:
21 | - pkg: nginx
22 | - require_in:
23 | - service: nginx
24 | - watch_in:
25 | - service: nginx
26 | {% endfor %}
27 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/packer/init.sls:
--------------------------------------------------------------------------------
1 | packer:
2 | archive.extracted:
3 | - name: /opt/
4 | - source: 'https://releases.hashicorp.com/packer/0.10.1/packer_0.10.1_linux_amd64.zip'
5 | - source_hash: md5=3a54499fdf753e7e7c682f5d704f684f
6 | - archive_format: zip
7 | - if_missing: /opt/packer
8 |
9 | cmd.wait:
10 | - name: 'chmod +x /opt/packer'
11 | - watch:
12 | - archive: packer
13 |
14 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/php-fpm/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - nginx
3 |
4 | php-fpm:
5 | pkg.installed:
6 | - name: php-fpm
7 | - require:
8 | - pkg: nginx
9 |
10 | service.running:
11 | - name: php-fpm
12 | - enable: True
13 | - reload: True
14 | - require_in:
15 | - service: nginx
16 |
17 | php-fpm_www.conf_1:
18 | file.replace:
19 | - name: /etc/php-fpm.d/www.conf
20 | - pattern: ^user = apache$
21 | - repl: user = nginx
22 | - require:
23 | - pkg: php-fpm
24 | - require_in:
25 | - service: php-fpm
26 | - watch_in:
27 | - service: php-fpm
28 |
29 | php-fpm_www.conf_2:
30 | file.replace:
31 | - name: /etc/php-fpm.d/www.conf
32 | - pattern: ^group = apache$
33 | - repl: group = nginx
34 | - require:
35 | - pkg: php-fpm
36 | - require_in:
37 | - service: php-fpm
38 | - watch_in:
39 | - service: php-fpm
40 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - users
4 | - yum-s3
5 |
6 | 'roles:jenkins':
7 | - match: grain
8 | - jenkins
9 | - nginx.jenkins
10 | - docker
11 | - packer
12 |
13 | 'roles:demo-app':
14 | - match: grain
15 | - php-fpm
16 | - nginx.demo-app
17 | - demo-app
18 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/users/files/veselin.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAA...
2 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/users/init.sls:
--------------------------------------------------------------------------------
1 | veselin:
2 | user.present:
3 | - fullname: Veselin Kantsev
4 | - uid: {{ salt['pillar.get']('users:veselin:uid') }}
5 | - password: {{ salt['pillar.get']('users:veselin:password') }}
6 | - groups:
7 | - wheel
8 |
9 | ssh_auth.present:
10 | - user: veselin
11 | - source: salt://users/files/veselin.pub
12 | - require:
13 | - user: veselin
14 |
15 | file.managed:
16 | - name: /etc/sudoers.d/veselin
17 | - contents: 'veselin ALL=(ALL) ALL'
18 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/yum-s3/files/cob.conf:
--------------------------------------------------------------------------------
1 | ;
2 | ; Cob: yet another yum S3 plugin - /etc/yum/pluginconf.d/cob.conf
3 | ;
4 | ; Copyright 2014-2015, Henry Huang . All Rights Reserved.
5 | ;
6 | ; Licensed under the Apache License, Version 2.0 (the "License");
7 | ; you may not use this file except in compliance with the License.
8 | ; You may obtain a copy of the License at
9 | ;
10 | ; http://www.apache.org/licenses/LICENSE-2.0
11 | ;
12 | ; Unless required by applicable law or agreed to in writing, software
13 | ; distributed under the License is distributed on an "AS IS" BASIS,
14 | ; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | ; See the License for the specific language governing permissions and
16 | ; limitations under the License.
17 | ;
18 | [main]
19 | cachedir=/var/cache/yum/$basearch/$releasever
20 | keepcache=1
21 | debuglevel=4
22 | logfile=/var/log/yum.log
23 | exactarch=1
24 | obsoletes=0
25 | gpgcheck=0
26 | plugins=1
27 | distroverpkg=centos-release
28 | enabled=1
29 |
30 | [aws]
31 | # access_key =
32 | # secret_key =
33 | timeout = 60
34 | retries = 5
35 | metadata_server = http://169.254.169.254
36 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/yum-s3/files/s3.repo:
--------------------------------------------------------------------------------
1 | [s3-repo]
2 | name=S3-repo
3 | baseurl=https://S3_BUCKET_URL
4 | enabled=0
5 | gpgcheck=0
6 |
--------------------------------------------------------------------------------
/Chapter 6/CodeCommit/salt/states/yum-s3/init.sls:
--------------------------------------------------------------------------------
1 | yum-s3_cob.py:
2 | file.managed:
3 | - name: /usr/lib/yum-plugins/cob.py
4 | - source: salt://yum-s3/files/cob.py
5 |
6 | yum-s3_cob.conf:
7 | file.managed:
8 | - name: /etc/yum/pluginconf.d/cob.conf
9 | - source: salt://yum-s3/files/cob.conf
10 |
11 | yum-s3_s3.repo:
12 | file.managed:
13 | - name: /etc/yum.repos.d/s3.repo
14 | - source: salt://yum-s3/files/s3.repo
15 |
--------------------------------------------------------------------------------
/Chapter 6/Terraform/iam_user_policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Effect": "Allow",
6 | "NotAction": [
7 | "codecommit:DeleteRepository"
8 | ],
9 | "Resource": "*"
10 | },
11 | {
12 | "Effect": "Allow",
13 | "NotAction": [
14 | "s3:DeleteBucket"
15 | ],
16 | "Resource": "*"
17 | },
18 | {
19 | "Sid": "Stmt1461764665000",
20 | "Effect": "Allow",
21 | "Action": [
22 | "ec2:AllocateAddress",
23 | "ec2:AssociateAddress",
24 | "ec2:AssociateRouteTable",
25 | "ec2:AttachInternetGateway",
26 | "ec2:AuthorizeSecurityGroupEgress",
27 | "ec2:AuthorizeSecurityGroupIngress",
28 | "ec2:CreateInternetGateway",
29 | "ec2:CreateRoute",
30 | "ec2:CreateRouteTable",
31 | "ec2:CreateSecurityGroup",
32 | "ec2:CreateSubnet",
33 | "ec2:CreateTags",
34 | "ec2:CreateVpc",
35 | "ec2:Describe*",
36 | "ec2:ModifySubnetAttribute",
37 | "ec2:RevokeSecurityGroupEgress",
38 | "elasticloadbalancing:AddTags",
39 | "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
40 | "elasticloadbalancing:AttachLoadBalancerToSubnets",
41 | "elasticloadbalancing:CreateLoadBalancer",
42 | "elasticloadbalancing:CreateLoadBalancerListeners",
43 | "elasticloadbalancing:Describe*",
44 | "elasticloadbalancing:ModifyLoadBalancerAttributes",
45 | "iam:AddRoleToInstanceProfile",
46 | "iam:CreateInstanceProfile",
47 | "iam:CreateRole",
48 | "iam:Get*",
49 | "iam:PassRole",
50 | "iam:PutRolePolicy"
51 | ],
52 | "Resource": [
53 | "*"
54 | ]
55 | }
56 | ]
57 | }
58 |
--------------------------------------------------------------------------------
/Chapter 6/Terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "VPC ID" {
2 | value = "${aws_vpc.terraform-vpc.id}"
3 | }
4 |
5 | output "JENKINS EIP" {
6 | value = "${aws_eip.jenkins.public_ip}"
7 | }
8 |
9 | output "ELB URI" {
10 | value = "${aws_elb.demo-app-elb.dns_name}"
11 | }
12 |
13 | output "ELB URI PROD" {
14 | value = "${aws_elb.demo-app-elb-prod.dns_name}"
15 | }
16 |
17 | output "Private subnet ID" {
18 | value = "${aws_subnet.private-1.id}"
19 | }
20 |
21 | output "Demo-app secgroup" {
22 | value = "${aws_security_group.demo-app.id}"
23 | }
24 |
--------------------------------------------------------------------------------
/Chapter 6/Terraform/resources.tf:
--------------------------------------------------------------------------------
1 | # Set a Provider
2 | provider "aws" {
3 | region = "${var.aws-region}"
4 | }
5 |
6 | ### VPC ###
7 |
8 | # Create a VPC
9 | resource "aws_vpc" "terraform-vpc" {
10 | cidr_block = "${var.vpc-cidr}"
11 |
12 | tags {
13 | Name = "${var.vpc-name}"
14 | }
15 | }
16 |
17 | # Create an Internet Gateway
18 | resource "aws_internet_gateway" "terraform-igw" {
19 | vpc_id = "${aws_vpc.terraform-vpc.id}"
20 | }
21 |
22 | # Create NAT
23 | resource "aws_eip" "nat-eip" {
24 | vpc = true
25 | }
26 |
27 | resource "aws_nat_gateway" "terraform-nat" {
28 | allocation_id = "${aws_eip.nat-eip.id}"
29 | subnet_id = "${aws_subnet.public-1.id}"
30 | depends_on = ["aws_internet_gateway.terraform-igw"]
31 | }
32 |
33 | # Create public and private route tables
34 | resource "aws_route_table" "public" {
35 | vpc_id = "${aws_vpc.terraform-vpc.id}"
36 | route {
37 | cidr_block = "0.0.0.0/0"
38 | gateway_id = "${aws_internet_gateway.terraform-igw.id}"
39 | }
40 |
41 | tags {
42 | Name = "Public"
43 | }
44 | }
45 |
46 | resource "aws_route_table" "private" {
47 | vpc_id = "${aws_vpc.terraform-vpc.id}"
48 | route {
49 | cidr_block = "0.0.0.0/0"
50 | nat_gateway_id = "${aws_nat_gateway.terraform-nat.id}"
51 | }
52 |
53 | tags {
54 | Name = "Private"
55 | }
56 | }
57 |
58 | # Create and associate public subnets with a route table
59 | resource "aws_subnet" "public-1" {
60 | vpc_id = "${aws_vpc.terraform-vpc.id}"
61 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 1)}"
62 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index)}"
63 | map_public_ip_on_launch = true
64 |
65 | tags {
66 | Name = "Public"
67 | }
68 | }
69 |
70 | resource "aws_route_table_association" "public-1" {
71 | subnet_id = "${aws_subnet.public-1.id}"
72 | route_table_id = "${aws_route_table.public.id}"
73 | }
74 |
75 | resource "aws_subnet" "public-2" {
76 | vpc_id = "${aws_vpc.terraform-vpc.id}"
77 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 3)}"
78 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index + 1)}"
79 | map_public_ip_on_launch = true
80 |
81 | tags {
82 | Name = "Public"
83 | }
84 | }
85 |
86 | resource "aws_route_table_association" "public-2" {
87 | subnet_id = "${aws_subnet.public-2.id}"
88 | route_table_id = "${aws_route_table.public.id}"
89 | }
90 |
91 | # Create and associate private subnets with a route table
92 | resource "aws_subnet" "private-1" {
93 | vpc_id = "${aws_vpc.terraform-vpc.id}"
94 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 2)}"
95 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index)}"
96 | map_public_ip_on_launch = false
97 |
98 | tags {
99 | Name = "Private"
100 | }
101 | }
102 |
103 | resource "aws_route_table_association" "private-1" {
104 | subnet_id = "${aws_subnet.private-1.id}"
105 | route_table_id = "${aws_route_table.private.id}"
106 | }
107 |
108 | resource "aws_subnet" "private-2" {
109 | vpc_id = "${aws_vpc.terraform-vpc.id}"
110 | cidr_block = "${cidrsubnet(var.vpc-cidr, 8, 4)}"
111 | availability_zone = "${element(split(",",var.aws-availability-zones), count.index +1)}"
112 | map_public_ip_on_launch = false
113 |
114 | tags {
115 | Name = "Private"
116 | }
117 | }
118 |
119 | resource "aws_route_table_association" "private-2" {
120 | subnet_id = "${aws_subnet.private-2.id}"
121 | route_table_id = "${aws_route_table.private.id}"
122 | }
123 |
124 | ### IAM ###
125 |
126 | resource "aws_iam_role" "jenkins" {
127 | name = "jenkins"
128 | path = "/"
129 | assume_role_policy = < >(logger -s -t $(basename $0)) 2>&1
425 | # Install Git and set CodeComit connection settings
426 | # (required for access via IAM roles)
427 | yum -y install git
428 | git config --system credential.helper '!aws codecommit credential-helper $@'
429 | git config --system credential.UseHttpPath true
430 | # Clone the Salt repository
431 | git clone https://git-codecommit.us-east-1.amazonaws.com/v1/repos/salt /srv/salt; chmod 700 /srv/salt
432 | # Install SaltStack
433 | yum -y install https://repo.saltstack.com/yum/amazon/salt-amzn-repo-latest-1.ami.noarch.rpm
434 | yum clean expire-cache; yum -y install salt-minion; chkconfig salt-minion off
435 | # Put custom minion config in place (for enabling masterless mode)
436 | cp -r /srv/salt/minion.d /etc/salt/
437 | echo -e 'grains:\n roles:\n - jenkins' > /etc/salt/minion.d/grains.conf
438 | ## Trigger a full Salt run
439 | salt-call state.apply
440 | EOF
441 |
442 | lifecycle { create_before_destroy = true }
443 | }
444 |
445 | resource "aws_eip" "jenkins" {
446 | instance = "${aws_instance.jenkins.id}"
447 | vpc = true
448 | }
449 |
450 | resource "aws_launch_configuration" "demo-app-lcfg" {
451 | name = "placeholder_launch_config"
452 | image_id = "${var.jenkins-ami-id}"
453 | instance_type = "${var.jenkins-instance-type}"
454 | iam_instance_profile = "${aws_iam_instance_profile.demo-app.id}"
455 | security_groups = ["${aws_security_group.demo-app.id}"]
456 | }
457 |
458 | resource "aws_autoscaling_group" "demo-app-blue" {
459 | name = "demo-app-blue"
460 | launch_configuration = "${aws_launch_configuration.demo-app-lcfg.id}"
461 | vpc_zone_identifier = ["${aws_subnet.private-1.id}", "${aws_subnet.private-2.id}"]
462 | min_size = 0
463 | max_size = 0
464 |
465 | tag {
466 | key = "ASG"
467 | value = "demo-app-blue"
468 | propagate_at_launch = true
469 | }
470 | }
471 |
472 | resource "aws_autoscaling_group" "demo-app-green" {
473 | name = "demo-app-green"
474 | launch_configuration = "${aws_launch_configuration.demo-app-lcfg.id}"
475 | vpc_zone_identifier = ["${aws_subnet.private-1.id}", "${aws_subnet.private-2.id}"]
476 | min_size = 0
477 | max_size = 0
478 |
479 | tag {
480 | key = "ASG"
481 | value = "demo-app-green"
482 | propagate_at_launch = true
483 | }
484 | }
485 |
--------------------------------------------------------------------------------
/Chapter 6/Terraform/terraform.tfvars:
--------------------------------------------------------------------------------
1 | aws-region = "us-east-1"
2 | vpc-cidr = "10.0.0.0/16"
3 | vpc-name = "Terraform"
4 | aws-availability-zones = "us-east-1b,us-east-1c"
5 | jenkins-ami-id = "ami-6869aa05"
6 | jenkins-instance-type = "t2.nano"
7 | jenkins-key-name = "terraform"
8 |
--------------------------------------------------------------------------------
/Chapter 6/Terraform/variables.tf:
--------------------------------------------------------------------------------
1 | ### VPC ###
2 | variable "aws-region" {
3 | type = "string"
4 | description = "AWS region"
5 | }
6 | variable "vpc-cidr" {
7 | type = "string"
8 | description = "VPC CIDR"
9 | }
10 | variable "vpc-name" {
11 | type = "string"
12 | description = "VPC name"
13 | }
14 | variable "aws-availability-zones" {
15 | type = "string"
16 | description = "AWS zones"
17 | }
18 |
19 | ### EC2 ###
20 | variable "jenkins-ami-id" {
21 | type="string"
22 | description = "EC2 AMI identifier"
23 | }
24 | variable "jenkins-instance-type" {
25 | type = "string"
26 | description = "EC2 instance type"
27 | }
28 | variable "jenkins-key-name" {
29 | type = "string"
30 | description = "EC2 ssh key name"
31 | }
32 |
--------------------------------------------------------------------------------
/Chapter 7/elk/etc/elasticsearch/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: wonga-bonga
2 | index.number_of_shards: 1
3 | index.number_of_replicas: 0
4 | index :
5 | refresh_interval: 5s
6 |
--------------------------------------------------------------------------------
/Chapter 7/elk/etc/filebeat/filebeat.yml:
--------------------------------------------------------------------------------
1 | ################### Filebeat Configuration Example #########################
2 |
3 | ############################# Filebeat ######################################
4 | filebeat:
5 | # List of prospectors to fetch data.
6 | prospectors:
7 | # Each - is a prospector. Below are the prospector specific configurations
8 | -
9 | # Paths that should be crawled and fetched. Glob based paths.
10 | # To fetch all ".log" files from a specific level of subdirectories
11 | # /var/log/*/*.log can be used.
12 | # For each file found under this path, a harvester is started.
13 | # Make sure not file is defined twice as this can lead to unexpected behaviour.
14 | paths:
15 | - /var/log/*.log
16 | - /var/log/messages
17 | - /var/log/secure
18 | #- c:\programdata\elasticsearch\logs\*
19 |
20 | # Configure the file encoding for reading files with international characters
21 | # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
22 | # Some sample encodings:
23 | # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
24 | # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
25 | #encoding: plain
26 |
27 | # Type of the files. Based on this the way the file is read is decided.
28 | # The different types cannot be mixed in one prospector
29 | #
30 | # Possible options are:
31 | # * log: Reads every line of the log file (default)
32 | # * stdin: Reads the standard in
33 | input_type: log
34 |
35 | # Exclude lines. A list of regular expressions to match. It drops the lines that are
36 | # matching any regular expression from the list. The include_lines is called before
37 | # exclude_lines. By default, no lines are dropped.
38 | # exclude_lines: ["^DBG"]
39 |
40 | # Include lines. A list of regular expressions to match. It exports the lines that are
41 | # matching any regular expression from the list. The include_lines is called before
42 | # exclude_lines. By default, all the lines are exported.
43 | # include_lines: ["^ERR", "^WARN"]
44 |
45 | # Exclude files. A list of regular expressions to match. Filebeat drops the files that
46 | # are matching any regular expression from the list. By default, no files are dropped.
47 | # exclude_files: [".gz$"]
48 |
49 | # Optional additional fields. These field can be freely picked
50 | # to add additional information to the crawled log files for filtering
51 | #fields:
52 | # level: debug
53 | # review: 1
54 |
55 | # Set to true to store the additional fields as top level fields instead
56 | # of under the "fields" sub-dictionary. In case of name conflicts with the
57 | # fields added by Filebeat itself, the custom fields overwrite the default
58 | # fields.
59 | #fields_under_root: false
60 |
61 | # Ignore files which were modified more then the defined timespan in the past.
62 | # In case all files on your system must be read you can set this value very large.
63 | # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
64 | #ignore_older: 0
65 |
66 | # Close older closes the file handler for which were not modified
67 | # for longer then close_older
68 | # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
69 | #close_older: 1h
70 |
71 | # Type to be published in the 'type' field. For Elasticsearch output,
72 | # the type defines the document type these entries should be stored
73 | # in. Default: log
74 | #document_type: log
75 |
76 | # Scan frequency in seconds.
77 | # How often these files should be checked for changes. In case it is set
78 | # to 0s, it is done as often as possible. Default: 10s
79 | #scan_frequency: 10s
80 |
81 | # Defines the buffer size every harvester uses when fetching the file
82 | #harvester_buffer_size: 16384
83 |
84 | # Maximum number of bytes a single log event can have
85 | # All bytes after max_bytes are discarded and not sent. The default is 10MB.
86 | # This is especially useful for multiline log messages which can get large.
87 | #max_bytes: 10485760
88 |
89 | # Mutiline can be used for log messages spanning multiple lines. This is common
90 | # for Java Stack Traces or C-Line Continuation
91 | #multiline:
92 |
93 | # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
94 | #pattern: ^\[
95 |
96 | # Defines if the pattern set under pattern should be negated or not. Default is false.
97 | #negate: false
98 |
99 | # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
100 | # that was (not) matched before or after or as long as a pattern is not matched based on negate.
101 | # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
102 | #match: after
103 |
104 | # The maximum number of lines that are combined to one event.
105 | # In case there are more the max_lines the additional lines are discarded.
106 | # Default is 500
107 | #max_lines: 500
108 |
109 | # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
110 | # Default is 5s.
111 | #timeout: 5s
112 |
113 | # Setting tail_files to true means filebeat starts readding new files at the end
114 | # instead of the beginning. If this is used in combination with log rotation
115 | # this can mean that the first entries of a new file are skipped.
116 | #tail_files: false
117 |
118 | # Backoff values define how agressively filebeat crawls new files for updates
119 | # The default values can be used in most cases. Backoff defines how long it is waited
120 | # to check a file again after EOF is reached. Default is 1s which means the file
121 | # is checked every second if new lines were added. This leads to a near real time crawling.
122 | # Every time a new line appears, backoff is reset to the initial value.
123 | #backoff: 1s
124 |
125 | # Max backoff defines what the maximum backoff time is. After having backed off multiple times
126 | # from checking the files, the waiting time will never exceed max_backoff idenependent of the
127 | # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
128 | # file after having backed off multiple times, it takes a maximum of 10s to read the new line
129 | #max_backoff: 10s
130 |
131 | # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
132 | # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
133 | # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
134 | #backoff_factor: 2
135 |
136 | # This option closes a file, as soon as the file name changes.
137 | # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause
138 | # issues when the file is removed, as the file will not be fully removed until also Filebeat closes
139 | # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the
140 | # same name can be created. Turning this feature on the other hand can lead to loss of data
141 | # on rotate files. It can happen that after file rotation the beginning of the new
142 | # file is skipped, as the reading starts at the end. We recommend to leave this option on false
143 | # but lower the ignore_older value to release files faster.
144 | #force_close_files: false
145 |
146 | # Additional prospector
147 | #-
148 | # Configuration to use stdin input
149 | #input_type: stdin
150 |
151 | # General filebeat configuration options
152 | #
153 | # Event count spool threshold - forces network flush if exceeded
154 | #spool_size: 2048
155 |
156 | # Enable async publisher pipeline in filebeat (Experimental!)
157 | #publish_async: false
158 |
159 | # Defines how often the spooler is flushed. After idle_timeout the spooler is
160 | # Flush even though spool_size is not reached.
161 | #idle_timeout: 5s
162 |
163 | # Name of the registry file. Per default it is put in the current working
164 | # directory. In case the working directory is changed after when running
165 | # filebeat again, indexing starts from the beginning again.
166 | registry_file: /var/lib/filebeat/registry
167 |
168 | # Full Path to directory with additional prospector configuration files. Each file must end with .yml
169 | # These config files must have the full filebeat config part inside, but only
170 | # the prospector part is processed. All global options like spool_size are ignored.
171 | # The config_dir MUST point to a different directory then where the main filebeat config file is in.
172 | #config_dir:
173 |
174 | ###############################################################################
175 | ############################# Libbeat Config ##################################
176 | # Base config file used by all other beats for using libbeat features
177 |
178 | ############################# Output ##########################################
179 |
180 | # Configure what outputs to use when sending the data collected by the beat.
181 | # Multiple outputs may be used.
182 | output:
183 |
184 | ### Elasticsearch as output
185 | #elasticsearch:
186 | # Array of hosts to connect to.
187 | # Scheme and port can be left out and will be set to the default (http and 9200)
188 | # In case you specify and additional path, the scheme is required: http://localhost:9200/path
189 | # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
190 | # hosts: ["localhost:9200"]
191 |
192 | # Optional protocol and basic auth credentials.
193 | #protocol: "https"
194 | #username: "admin"
195 | #password: "s3cr3t"
196 |
197 | # Number of workers per Elasticsearch host.
198 | #worker: 1
199 |
200 | # Optional index name. The default is "filebeat" and generates
201 | # [filebeat-]YYYY.MM.DD keys.
202 | #index: "filebeat"
203 |
204 | # A template is used to set the mapping in Elasticsearch
205 | # By default template loading is disabled and no template is loaded.
206 | # These settings can be adjusted to load your own template or overwrite existing ones
207 | #template:
208 |
209 | # Template name. By default the template name is filebeat.
210 | #name: "filebeat"
211 |
212 | # Path to template file
213 | #path: "filebeat.template.json"
214 |
215 | # Overwrite existing template
216 | #overwrite: false
217 |
218 | # Optional HTTP Path
219 | #path: "/elasticsearch"
220 |
221 | # Proxy server url
222 | #proxy_url: http://proxy:3128
223 |
224 | # The number of times a particular Elasticsearch index operation is attempted. If
225 | # the indexing operation doesn't succeed after this many retries, the events are
226 | # dropped. The default is 3.
227 | #max_retries: 3
228 |
229 | # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
230 | # The default is 50.
231 | #bulk_max_size: 50
232 |
233 | # Configure http request timeout before failing an request to Elasticsearch.
234 | #timeout: 90
235 |
236 | # The number of seconds to wait for new events between two bulk API index requests.
237 | # If `bulk_max_size` is reached before this interval expires, addition bulk index
238 | # requests are made.
239 | #flush_interval: 1
240 |
241 | # Boolean that sets if the topology is kept in Elasticsearch. The default is
242 | # false. This option makes sense only for Packetbeat.
243 | #save_topology: false
244 |
245 | # The time to live in seconds for the topology information that is stored in
246 | # Elasticsearch. The default is 15 seconds.
247 | #topology_expire: 15
248 |
249 | # tls configuration. By default is off.
250 | #tls:
251 | # List of root certificates for HTTPS server verifications
252 | #certificate_authorities: ["/etc/pki/root/ca.pem"]
253 |
254 | # Certificate for TLS client authentication
255 | #certificate: "/etc/pki/client/cert.pem"
256 |
257 | # Client Certificate Key
258 | #certificate_key: "/etc/pki/client/cert.key"
259 |
260 | # Controls whether the client verifies server certificates and host name.
261 | # If insecure is set to true, all server host names and certificates will be
262 | # accepted. In this mode TLS based connections are susceptible to
263 | # man-in-the-middle attacks. Use only for testing.
264 | #insecure: true
265 |
266 | # Configure cipher suites to be used for TLS connections
267 | #cipher_suites: []
268 |
269 | # Configure curve types for ECDHE based cipher suites
270 | #curve_types: []
271 |
272 | # Configure minimum TLS version allowed for connection to logstash
273 | #min_version: 1.0
274 |
275 | # Configure maximum TLS version allowed for connection to logstash
276 | #max_version: 1.2
277 |
278 |
279 | ### Logstash as output
280 | logstash:
281 | # The Logstash hosts
282 | hosts: ["localhost:5044"]
283 |
284 | # Number of workers per Logstash host.
285 | #worker: 1
286 |
287 | # The maximum number of events to bulk into a single batch window. The
288 | # default is 2048.
289 | #bulk_max_size: 2048
290 |
291 | # Set gzip compression level.
292 | #compression_level: 3
293 |
294 | # Optional load balance the events between the Logstash hosts
295 | #loadbalance: true
296 |
297 | # Optional index name. The default index name depends on the each beat.
298 | # For Packetbeat, the default is set to packetbeat, for Topbeat
299 | # top topbeat and for Filebeat to filebeat.
300 | #index: filebeat
301 |
302 | # Optional TLS. By default is off.
303 | #tls:
304 | # List of root certificates for HTTPS server verifications
305 | #certificate_authorities: ["/etc/pki/root/ca.pem"]
306 |
307 | # Certificate for TLS client authentication
308 | #certificate: "/etc/pki/client/cert.pem"
309 |
310 | # Client Certificate Key
311 | #certificate_key: "/etc/pki/client/cert.key"
312 |
313 | # Controls whether the client verifies server certificates and host name.
314 | # If insecure is set to true, all server host names and certificates will be
315 | # accepted. In this mode TLS based connections are susceptible to
316 | # man-in-the-middle attacks. Use only for testing.
317 | #insecure: true
318 |
319 | # Configure cipher suites to be used for TLS connections
320 | #cipher_suites: []
321 |
322 | # Configure curve types for ECDHE based cipher suites
323 | #curve_types: []
324 |
325 |
326 | ### File as output
327 | #file:
328 | # Path to the directory where to save the generated files. The option is mandatory.
329 | #path: "/tmp/filebeat"
330 |
331 | # Name of the generated files. The default is `filebeat` and it generates files: `filebeat`, `filebeat.1`, `filebeat.2`, etc.
332 | #filename: filebeat
333 |
334 | # Maximum size in kilobytes of each file. When this size is reached, the files are
335 | # rotated. The default value is 10 MB.
336 | #rotate_every_kb: 10000
337 |
338 | # Maximum number of files under path. When this number of files is reached, the
339 | # oldest file is deleted and the rest are shifted from last to first. The default
340 | # is 7 files.
341 | #number_of_files: 7
342 |
343 |
344 | ### Console output
345 | # console:
346 | # Pretty print json event
347 | #pretty: false
348 |
349 |
350 | ############################# Shipper #########################################
351 |
352 | shipper:
353 | # The name of the shipper that publishes the network data. It can be used to group
354 | # all the transactions sent by a single shipper in the web interface.
355 | # If this options is not defined, the hostname is used.
356 | #name:
357 |
358 | # The tags of the shipper are included in their own field with each
359 | # transaction published. Tags make it easy to group servers by different
360 | # logical properties.
361 | #tags: ["service-X", "web-tier"]
362 |
363 | # Uncomment the following if you want to ignore transactions created
364 | # by the server on which the shipper is installed. This option is useful
365 | # to remove duplicates if shippers are installed on multiple servers.
366 | #ignore_outgoing: true
367 |
368 | # How often (in seconds) shippers are publishing their IPs to the topology map.
369 | # The default is 10 seconds.
370 | #refresh_topology_freq: 10
371 |
372 | # Expiration time (in seconds) of the IPs published by a shipper to the topology map.
373 | # All the IPs will be deleted afterwards. Note, that the value must be higher than
374 | # refresh_topology_freq. The default is 15 seconds.
375 | #topology_expire: 15
376 |
377 | # Internal queue size for single events in processing pipeline
378 | #queue_size: 1000
379 |
380 | # Configure local GeoIP database support.
381 | # If no paths are not configured geoip is disabled.
382 | #geoip:
383 | #paths:
384 | # - "/usr/share/GeoIP/GeoLiteCity.dat"
385 | # - "/usr/local/var/GeoIP/GeoLiteCity.dat"
386 |
387 |
388 | ############################# Logging #########################################
389 |
390 | # There are three options for the log ouput: syslog, file, stderr.
391 | # Under Windos systems, the log files are per default sent to the file output,
392 | # under all other system per default to syslog.
393 | logging:
394 |
395 | # Send all logging output to syslog. On Windows default is false, otherwise
396 | # default is true.
397 | #to_syslog: true
398 |
399 | # Write all logging output to files. Beats automatically rotate files if rotateeverybytes
400 | # limit is reached.
401 | #to_files: false
402 |
403 | # To enable logging to files, to_files option has to be set to true
404 | files:
405 | # The directory where the log files will written to.
406 | #path: /var/log/mybeat
407 |
408 | # The name of the files where the logs are written to.
409 | #name: mybeat
410 |
411 | # Configure log file size limit. If limit is reached, log file will be
412 | # automatically rotated
413 | rotateeverybytes: 10485760 # = 10MB
414 |
415 | # Number of rotated log files to keep. Oldest files will be deleted first.
416 | #keepfiles: 7
417 |
418 | # Enable debug output for selected components. To enable all selectors use ["*"]
419 | # Other available selectors are beat, publish, service
420 | # Multiple selectors can be chained.
421 | #selectors: [ ]
422 |
423 | # Sets log level. The default log level is error.
424 | # Available log levels are: critical, error, warning, info, debug
425 | #level: error
426 |
427 |
428 |
--------------------------------------------------------------------------------
/Chapter 7/elk/etc/logstash/conf.d/main.conf:
--------------------------------------------------------------------------------
1 | input {
2 | beats {
3 | port => 5044
4 | }
5 | }
6 |
7 | filter {
8 | if [type] == "nginx-access" {
9 | grok {
10 | match => { "message" => "%{NGINXACCESS}" }
11 | }
12 | }
13 | }
14 |
15 | output {
16 | elasticsearch {
17 | hosts => "localhost:9200"
18 | manage_template => false
19 | index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
20 | document_type => "%{[@metadata][type]}"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/Chapter 7/elk/opt/logstash/patterns/nginx:
--------------------------------------------------------------------------------
1 | NGUSERNAME [a-zA-Z\.\@\-\+_%]+
2 | NGUSER %{NGUSERNAME}
3 | NGINXACCESS %{IPORHOST:clientip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:timestamp}\] "%{WORD:verb} %{URIPATHPARAM:request} HTTP/%{NUMBER:httpversion}" %{NUMBER:response} (?:%{NUMBER:bytes}|-) (?:"(?:%{URI:referrer}|-)"|%{QS:referrer}) %{QS:agent}
4 |
--------------------------------------------------------------------------------
/Chapter 7/promjenkins/opt/prometheus/alertmanager/alertmanager.yml:
--------------------------------------------------------------------------------
1 | global:
2 | smtp_smarthost: 'localhost:25'
3 | smtp_from: 'alertmanager@example.org'
4 |
5 | route:
6 | group_by: ['alertname', 'cluster', 'service']
7 | group_wait: 30s
8 | group_interval: 5m
9 | repeat_interval: 1h
10 | receiver: team-X-mails
11 |
12 | routes:
13 | - receiver: 'jenkins-webhook'
14 | match:
15 | alertname: "High_disk_space_usage"
16 |
17 | receivers:
18 | - name: 'team-X-mails'
19 | email_configs:
20 | - to: 'team-X+alerts@example.org'
21 | require_tls: false
22 | send_resolved: true
23 |
24 | - name: 'jenkins-webhook'
25 | webhook_configs:
26 | - url: http://localhost:8888
27 |
--------------------------------------------------------------------------------
/Chapter 7/promjenkins/opt/prometheus/executor/executor.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [[ "$AMX_STATUS" != "firing" ]]; then
4 | exit 0
5 | fi
6 |
7 |
8 | main() {
9 | for i in $(seq 1 "$AMX_ALERT_LEN"); do
10 | ALERT_NAME=AMX_ALERT_${i}_LABEL_alertname
11 | INSTANCE=AMX_ALERT_${i}_LABEL_instance
12 | LABELS=$(set|egrep "^AMX_ALERT_${i}_LABEL_"|tr '\n' ' '|base64 -w0)
13 | PAYLOAD="{'parameter': [{'name':'alertcount', 'value':'${i}'}, {'name':'alertname', 'value':'${!ALERT_NAME}'}, {'name':'instance', 'value':'${!INSTANCE}'}, {'name':'labels', 'value':'${LABELS}'}]}"
14 | curl -s -X POST http://localhost:8080/job/prometheus_webhook/build --user 'prometheus:password' --data-urlencode json="${PAYLOAD}"
15 | done
16 | wait
17 | }
18 |
19 | main "$@"
20 |
--------------------------------------------------------------------------------
/Chapter 7/promjenkins/opt/prometheus/server/prometheus.yml:
--------------------------------------------------------------------------------
1 | # my global config
2 | global:
3 | scrape_interval: 15s # By default, scrape targets every 15 seconds.
4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds.
5 | # scrape_timeout is set to the global default (10s).
6 |
7 | # Attach these labels to any time series or alerts when communicating with
8 | # external systems (federation, remote storage, Alertmanager).
9 | external_labels:
10 | monitor: 'codelab-monitor'
11 |
12 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
13 | rule_files:
14 | - "rules/*.rules"
15 |
16 | # A scrape configuration containing exactly one endpoint to scrape:
17 | # Here it's Prometheus itself.
18 | scrape_configs:
19 | # The job name is added as a label `job=` to any timeseries scraped from this config.
20 | - job_name: 'prometheus'
21 |
22 | # Override the global default and scrape targets from this job every 5 seconds.
23 | scrape_interval: 5s
24 |
25 | # metrics_path defaults to '/metrics'
26 | # scheme defaults to 'http'.
27 |
28 | static_configs:
29 | - targets: ['localhost:9090']
30 |
31 | - job_name: 'ec2'
32 | ec2_sd_configs:
33 | - region: 'us-east-1'
34 | access_key: '{REDACTED}'
35 | secret_key: '{REDACTED}'
36 | port: 9126
37 | relabel_configs:
38 | - source_labels: [__meta_ec2_tag_Name]
39 | regex: ^webserver
40 | action: keep
41 |
--------------------------------------------------------------------------------
/Chapter 7/promjenkins/opt/prometheus/server/rules/disk.rules:
--------------------------------------------------------------------------------
1 | ALERT High_disk_space_usage
2 | IF disk_used_percent > 20
3 | FOR 1m
4 | ANNOTATIONS {
5 | summary = "High disk space usage on {{ $labels.instance }}",
6 | description = "{{ $labels.instance }} has a disk_used value of {{ $value }}% on {{ $labels.path }})",
7 | }
8 |
--------------------------------------------------------------------------------
/Chapter 7/promjenkins/opt/prometheus/server/rules/keepalive.rules:
--------------------------------------------------------------------------------
1 | ALERT Keepalive
2 | IF up == 0
3 | FOR 1m
4 | ANNOTATIONS {
5 | summary = "Instance {{$labels.instance}} down",
6 | description = "{{$labels.instance}} of job {{$labels.job}} has been down for more than 1 minute."
7 | }
8 |
--------------------------------------------------------------------------------
/Chapter 7/webserver/user_data.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Filebeat and NGINX
4 | yum -y install https://download.elastic.co/beats/filebeat/filebeat-1.3.1-x86_64.rpm
5 | yum -y install nginx
6 |
7 | cat << EOF > /etc/filebeat/filebeat.yml
8 | filebeat:
9 | prospectors:
10 | -
11 | paths:
12 | - /var/log/*.log
13 | - /var/log/messages
14 | - /var/log/secure
15 | -
16 | paths:
17 | - /var/log/nginx/access.log
18 | document_type: nginx-access
19 | registry_file: /var/lib/filebeat/registry
20 | output:
21 | logstash:
22 | hosts: ["10.0.1.132:5044"]
23 | EOF
24 |
25 | service nginx start
26 | service filebeat start
27 |
28 | # Install Telegraf
29 | yum -y install https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.x86_64.rpm
30 | cat << EOF > /etc/telegraf/telegraf.conf
31 |
32 | [global_tags]
33 | [agent]
34 | interval = "10s"
35 | round_interval = true
36 | metric_batch_size = 1000
37 | metric_buffer_limit = 10000
38 | collection_jitter = "0s"
39 | flush_interval = "10s"
40 | flush_jitter = "0s"
41 | precision = ""
42 | debug = false
43 | quiet = false
44 | hostname = ""
45 | omit_hostname = false
46 | [[outputs.prometheus_client]]
47 | listen = ":9126"
48 | [[inputs.cpu]]
49 | percpu = true
50 | totalcpu = true
51 | fielddrop = ["time_*"]
52 | [[inputs.disk]]
53 | ignore_fs = ["tmpfs", "devtmpfs"]
54 | [[inputs.diskio]]
55 | [[inputs.kernel]]
56 | [[inputs.mem]]
57 | [[inputs.processes]]
58 | [[inputs.swap]]
59 | [[inputs.system]]
60 | EOF
61 |
62 | service telegraf start
63 |
64 | # Add Jenkins's key
65 | cat << EOF >> /home/ec2-user/.ssh/authorized_keys
66 | {{JENKINS_PUB_KEY_GOES_HERE}}
67 | EOF
68 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Packt
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | # Implementing DevOps on AWS
5 | This is the code repository for [Implementing DevOps on AWS](https://www.packtpub.com/virtualization-and-cloud/implementing-devops-aws?utm_source=github&utm_medium=repository&utm_content=9781786460141), published by Packt. It contains all the supporting project files necessary to work through the book from start to finish.
6 |
7 | ## About the Book
8 | This book will help you to drastically reduce the amount of time spent on development and increase the reliability of your software deployments on AWS using popular DevOps methods of automation.
9 |
10 | ## Instructions and Navigations
11 | All of the code is organized into folders. Each folder starts with a number followed by the application name. For example, Chapter 2
12 |
13 | The code will look like the following:
14 |
15 | aws-region = "us-east-1"
16 | vpc-cidr = "10.0.0.0/16"
17 | vpc-name = "Terraform"
18 | aws-availability-zones = "us-east-1b,us-east-1c"
19 |
20 | There are no code files for the following chapters:
21 |
22 | - **Chapter 1** - What is DevOps and Should You Care?
23 | - **Chapter 8** - Optimize for Scale and Cost
24 | - **Chapter 9** - Secure Your AWS Environment
25 | - **Chapter 10** - Aws Tips and Tricks
26 |
27 | ### Software requirements:
28 | The practical examples found in this book involve the use of AWS resources, thus an AWS
29 | account will be required.
30 |
31 | The client-side tools used in the examples, such as the AWS CLI and Terraform, are
32 | supported on most common operating systems (Linux/Windows/Mac OS).
33 |
34 | Here is the list of softwares used in the book:
35 |
36 | * CloudFormation - https://aws.amazon.com/cloudformation/
37 | * Terraform - https://www.terraform.io/
38 | * SaltStack - https://saltstack.com/community/
39 | * Jenkins CI - https://jenkins.io
40 | * Docker - https://www.docker.com/
41 | * FPM - https://github.com/jordansissel/fpm
42 | * Packer - https://www.packer.io/
43 | * Serverspec - http://serverspec.org/
44 | * Elasticsearch - https://www.elastic.co/
45 | * Kibana - https://www.elastic.co/
46 | * Logstash - https://www.elastic.co/
47 | * Prometheus - https://prometheus.io/
48 |
49 | ## Related Products:
50 |
51 | * [Designing AWS Environments [Video]]( https://www.packtpub.com/virtualization-and-cloud/designing-aws-solutions-video?utm_source=github&utm_medium=repository&utm_content=9781786467492 )
52 |
53 | * [AWS Administration – The Definitive Guide]( https://www.packtpub.com/virtualization-and-cloud/aws-administration-guide?utm_source=github&utm_medium=repository&utm_content=9781782173755 )
54 |
55 | * [Learning AWS Lumberyard Game Development]( https://www.packtpub.com/game-development/learning-aws-lumberyard-game-development?utm_source=github&utm_medium=repository&utm_content=9781786460868 )
56 |
57 | * [Amazon S3 Essentials]( https://www.packtpub.com/virtualization-and-cloud/amazon-s3-essentials?utm_source=github&utm_medium=repository&utm_content=9781783554898 )
58 |
59 | ### Suggestions and Feedback
60 | [Click here]( https://docs.google.com/forms/d/e/1FAIpQLSe5qwunkGf6PUvzPirPDtuy1Du5Rlzew23UBp2S-P3wB-GcwQ/viewform ) if you have any feedback or suggestions.
61 |
62 | ### Download a free PDF
63 |
64 | If you have already purchased a print or Kindle version of this book, you can get a DRM-free PDF version at no cost.
Simply click on the link to claim your free PDF.
65 | https://packt.link/free-ebook/9781786460141
--------------------------------------------------------------------------------