├── .ansible-lint
├── .gitignore
├── .yamllint
├── CyberArk Software EULA 2016.pdf
├── Jenkinsfile
├── LICENSE.md
├── README.md
├── ansible.cfg
├── inventories
├── production
│ ├── group_vars
│ │ ├── README.md
│ │ └── windows.yml
│ ├── host_vars
│ │ └── README.md
│ └── hosts
└── staging
│ ├── group_vars
│ ├── README.md
│ └── windows.yml
│ ├── host_vars
│ └── README.md
│ └── hosts
├── logs
└── ansible.log
├── pas-orchestrator.yml
├── requirements.txt
├── requirements.yml
├── tasks
├── cpm.yml
├── main.yml
├── psm.yml
├── pvwa.yml
├── register_cpm.yml
├── register_psm.yml
├── register_pvwa.yml
└── set_facts.yml
└── tests
├── playbooks
├── deploy_vault.yml
├── deploy_vaultdr.yml
├── pas-infrastructure
│ ├── README.md
│ ├── ansible.cfg
│ ├── create_ec2_batch.yml
│ ├── ec2-infrastructure.yml
│ ├── files
│ │ └── diskpart.txt
│ ├── inventory
│ │ ├── ec2.ini
│ │ └── ec2.py
│ └── outputs
│ │ └── hosts.yml
└── roles
│ └── cf_deploy
│ ├── .gitignore
│ ├── README.md
│ ├── defaults
│ └── main.yml
│ ├── handlers
│ └── main.yml
│ ├── meta
│ ├── .galaxy_install_info
│ └── main.yml
│ ├── tasks
│ └── main.yml
│ └── vars
│ └── main.yml
└── requirements.txt
/.ansible-lint:
--------------------------------------------------------------------------------
1 | exclude_paths:
2 | - .*/
3 | parseable: true
4 | quiet: true
5 | use_default_rules: true
6 | verbosity: 1
7 | skip_list:
8 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # ignore all retries files
2 | *.retry
3 | .idea/*
4 |
5 | # ignore components symlink
6 | roles/cpm
7 | roles/psm
8 | roles/pvwa
9 | roles/psmp
10 |
11 | # ignore components log
12 | logs/cpm
13 | logs/psm
14 | logs/pvwa
15 | logs/psmp
16 |
--------------------------------------------------------------------------------
/.yamllint:
--------------------------------------------------------------------------------
1 | ---
2 | ignore: |
3 | .*/
4 | tests/playbooks
5 |
6 | rules:
7 | braces:
8 | min-spaces-inside: 0
9 | max-spaces-inside: 0
10 | min-spaces-inside-empty: -1
11 | max-spaces-inside-empty: -1
12 | brackets:
13 | min-spaces-inside: 0
14 | max-spaces-inside: 0
15 | min-spaces-inside-empty: -1
16 | max-spaces-inside-empty: -1
17 | colons:
18 | max-spaces-before: 0
19 | max-spaces-after: 1
20 | commas:
21 | max-spaces-before: 0
22 | min-spaces-after: 1
23 | max-spaces-after: 1
24 | comments:
25 | level: warning
26 | require-starting-space: true
27 | min-spaces-from-content: 2
28 | comments-indentation:
29 | level: warning
30 | document-end: disable
31 | document-start:
32 | level: warning
33 | present: true
34 | empty-lines:
35 | max: 2
36 | max-start: 0
37 | max-end: 0
38 | empty-values:
39 | forbid-in-block-mappings: false
40 | forbid-in-flow-mappings: false
41 | hyphens:
42 | max-spaces-after: 1
43 | indentation:
44 | spaces: consistent
45 | indent-sequences: true
46 | check-multi-line-strings: false
47 | key-duplicates: enable
48 | key-ordering: disable
49 | line-length:
50 | max: 120
51 | level: warning
52 | allow-non-breakable-words: true
53 | allow-non-breakable-inline-mappings: false
54 | new-line-at-end-of-file: enable
55 | new-lines:
56 | type: unix
57 | trailing-spaces: enable
58 | truthy: disable
59 |
--------------------------------------------------------------------------------
/CyberArk Software EULA 2016.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberark/pas-orchestrator/c1e00f645c8775653c2d574563cdd06082961a2a/CyberArk Software EULA 2016.pdf
--------------------------------------------------------------------------------
/Jenkinsfile:
--------------------------------------------------------------------------------
1 | pipeline {
2 | agent {
3 | node {
4 | label 'ansible'
5 | }
6 | }
7 | environment {
8 | AWS_REGION = sh(script: 'curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | python3 -c "import json,sys;obj=json.load(sys.stdin);print (obj[\'region\'])"', returnStdout: true).trim()
9 | // shortCommit = sh(script: "git log -n 1 --pretty=format:'%h'", returnStdout: true).trim()
10 | CYBERARK_VERSION = "v12.2"
11 | ENV_TIMESTAMP = sh(script: "date +%s", returnStdout: true).trim()
12 | }
13 | stages {
14 | stage('Install virtual environment') {
15 | steps {
16 | sh '''
17 | python3 -m pip install --user virtualenv
18 | python3 -m virtualenv .testenv
19 | source .testenv/bin/activate
20 | pip install -r requirements.txt
21 | pip install -r tests/requirements.txt
22 | '''
23 | }
24 | }
25 | stage('yamllint validation') {
26 | steps {
27 | sh '''
28 | source .testenv/bin/activate
29 | yamllint .
30 | '''
31 | }
32 | }
33 | stage('Install ansible roles') {
34 | steps {
35 | sh '''
36 | source .testenv/bin/activate
37 | ansible-galaxy install -r requirements.yml
38 | '''
39 | }
40 | }
41 | stage('Download packages') {
42 | steps {
43 | withCredentials([
44 | string(credentialsId: 'default_packages_bucket', variable: 'default_packages_bucket')
45 | ]) {
46 | dir ('/tmp/packages') {
47 | s3Download(file:'/tmp/packages/psm.zip', bucket:"$default_packages_bucket", path:"Packages/${env.CYBERARK_VERSION}/Privileged Session Manager-Rls-${env.CYBERARK_VERSION}.zip", pathStyleAccessEnabled: true, force:true)
48 | s3Download(file:'/tmp/packages/cpm.zip', bucket:"$default_packages_bucket", path:"Packages/${env.CYBERARK_VERSION}/Central Policy Manager-Rls-${env.CYBERARK_VERSION}.zip", pathStyleAccessEnabled: true, force:true)
49 | s3Download(file:'/tmp/packages/pvwa.zip', bucket:"$default_packages_bucket", path:"Packages/${env.CYBERARK_VERSION}/Password Vault Web Access-Rls-${env.CYBERARK_VERSION}.zip", pathStyleAccessEnabled: true, force:true)
50 | }
51 | }
52 | }
53 | }
54 | stage('Deploy Environments') {
55 | parallel {
56 | stage('Deploy Environment for TC1') {
57 | stages {
58 | stage('Deploy Vault') {
59 | steps {
60 | withCredentials([
61 | usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user'),
62 | string(credentialsId: 'default_keypair', variable: 'default_keypair'),
63 | string(credentialsId: 'default_s3_bucket', variable: 'default_s3_bucket')
64 | ]) {
65 | sh '''
66 | source .testenv/bin/activate
67 | ansible-playbook tests/playbooks/deploy_vault.yml -v -e "keypair=$default_keypair bucket=$default_s3_bucket ansible_user=$ansible_user ansible_password=$ansible_password tc_number=1 env_timestamp=$ENV_TIMESTAMP"
68 | '''
69 | }
70 | }
71 | }
72 | stage('Provision in-domain testing environment') {
73 | steps {
74 | withCredentials([
75 | usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user'),
76 | string(credentialsId: 'default_keypair', variable: 'default_keypair')
77 | ]) {
78 | sh '''
79 | source .testenv/bin/activate
80 | ansible-playbook tests/playbooks/pas-infrastructure/ec2-infrastructure.yml -e "aws_region=$AWS_REGION keypair=$default_keypair ec2_instance_type=m4.large public_ip=no pas_count=1 indomain=yes tc_number=1 ansible_user=$ansible_user ansible_password=$ansible_password env_timestamp=$ENV_TIMESTAMP"
81 | '''
82 | }
83 | }
84 | }
85 | stage('Run pas-orchestrator in-domain #0 failure (TC5)') {
86 | steps {
87 | withCredentials([usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user')]) {
88 | sh '''
89 | source .testenv/bin/activate
90 | VAULT_IP=$(cat /tmp/vault_ip_tc_1.txt)
91 | cp -r tests/playbooks/pas-infrastructure/outputs/hosts_tc_1.yml inventories/staging/hosts_tc_1.yml
92 | '''
93 | catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
94 | sh '''
95 | source .testenv/bin/activate
96 | ansible-playbook pas-orchestrator.yml -i inventories/staging/hosts_tc_1.yml -v -e "accept_eula=yes vault_ip=$VAULT_IP vault_password='blahblah' cpm_zip_file_path=/tmp/packages/cpm.zip psm_zip_file_path=/tmp/packages/psm.zip pvwa_zip_file_path=/tmp/packages/pvwa.zip connect_with_rdp=Yes ansible_user='cyberark.com\\\\$ansible_user' ansible_password=$ansible_password cpm_username=TcOne"
97 | '''
98 | }
99 | }
100 | }
101 | }
102 | stage('Run pas-orchestrator in-domain #1') {
103 | steps {
104 | withCredentials([usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user')]) {
105 | sh '''
106 | source .testenv/bin/activate
107 | VAULT_IP=$(cat /tmp/vault_ip_tc_1.txt)
108 | cp -r tests/playbooks/pas-infrastructure/outputs/hosts_tc_1.yml inventories/staging/hosts_tc_1.yml
109 | ansible-playbook pas-orchestrator.yml -i inventories/staging/hosts_tc_1.yml -v -e "accept_eula=yes vault_ip=$VAULT_IP vault_password=$ansible_password psm_hardening=false cpm_zip_file_path=/tmp/packages/cpm.zip psm_zip_file_path=/tmp/packages/psm.zip pvwa_zip_file_path=/tmp/packages/pvwa.zip connect_with_rdp=Yes ansible_user='cyberark.com\\\\$ansible_user' ansible_password=$ansible_password cpm_username=TcOne"
110 | '''
111 | }
112 | }
113 | }
114 | stage('Run pas-orchestrator in-domain #2 (TC4)') {
115 | steps {
116 | withCredentials([usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user')]) {
117 | sh '''
118 | source .testenv/bin/activate
119 | VAULT_IP=$(cat /tmp/vault_ip_tc_1.txt)
120 | cp -r tests/playbooks/pas-infrastructure/outputs/hosts_tc_1.yml inventories/staging/hosts_tc_1.yml
121 | ansible-playbook pas-orchestrator.yml -i inventories/staging/hosts_tc_1.yml -v -e "accept_eula=yes vault_ip=$VAULT_IP vault_password=$ansible_password psm_hardening=false cpm_zip_file_path=/tmp/packages/cpm.zip psm_zip_file_path=/tmp/packages/psm.zip pvwa_zip_file_path=/tmp/packages/pvwa.zip connect_with_rdp=Yes ansible_user='cyberark.com\\\\$ansible_user' ansible_password=$ansible_password cpm_username=TcOne"
122 | '''
123 | }
124 | }
125 | }
126 | }
127 | }
128 | stage('Deploy Environment for TC2') {
129 | stages {
130 | stage('Deploy Vault') {
131 | steps {
132 | sleep 10
133 | withCredentials([
134 | usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user'),
135 | string(credentialsId: 'default_keypair', variable: 'default_keypair'),
136 | string(credentialsId: 'default_s3_bucket', variable: 'default_s3_bucket')
137 | ]) {
138 | sh '''
139 | source .testenv/bin/activate
140 | ansible-playbook tests/playbooks/deploy_vault.yml -v -e "keypair=$default_keypair bucket=$default_s3_bucket ansible_user=$ansible_user ansible_password=$ansible_password tc_number=2 env_timestamp=$ENV_TIMESTAMP"
141 | '''
142 | }
143 | }
144 | }
145 | stage('Deploy Vault DR') {
146 | steps {
147 | withCredentials([
148 | usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user'),
149 | string(credentialsId: 'default_keypair', variable: 'default_keypair'),
150 | string(credentialsId: 'default_s3_bucket', variable: 'default_s3_bucket')
151 | ]) {
152 | sh '''
153 | VAULT_IP=$(cat /tmp/vault_ip_tc_2.txt)
154 | source .testenv/bin/activate
155 | ansible-playbook tests/playbooks/deploy_vaultdr.yml -v -e "keypair=$default_keypair bucket=$default_s3_bucket ansible_user=$ansible_user ansible_password=$ansible_password tc_number=2 vault_ip=$VAULT_IP env_timestamp=$ENV_TIMESTAMP"
156 | '''
157 | }
158 | }
159 | }
160 | stage('Provision in-domain testing environment') {
161 | steps {
162 | withCredentials([
163 | usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user'),
164 | string(credentialsId: 'default_keypair', variable: 'default_keypair')
165 | ]) {
166 | sh '''
167 | source .testenv/bin/activate
168 | ansible-playbook tests/playbooks/pas-infrastructure/ec2-infrastructure.yml -e "aws_region=$AWS_REGION keypair=$default_keypair ec2_instance_type=m4.large public_ip=no pas_count=5 indomain=yes tc_number=2 ansible_user=$ansible_user ansible_password=$ansible_password env_timestamp=$ENV_TIMESTAMP"
169 | '''
170 | }
171 | }
172 | }
173 | stage('Run pas-orchestrator in-domain #1') {
174 | steps {
175 | withCredentials([usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user')]) {
176 | sh '''
177 | source .testenv/bin/activate
178 | VAULT_IP=$(cat /tmp/vault_ip_tc_2.txt)
179 | VAULT_DR_IP=$(cat /tmp/vaultdr_ip_tc_2.txt)
180 | cp -r tests/playbooks/pas-infrastructure/outputs/hosts_tc_2.yml inventories/staging/hosts_tc_2.yml
181 | ansible-playbook pas-orchestrator.yml -i inventories/staging/hosts_tc_2.yml -v -e "accept_eula=yes vault_ip=$VAULT_IP dr_vault_ip=$VAULT_DR_IP vault_password=$ansible_password cpm_zip_file_path=/tmp/packages/cpm.zip psm_zip_file_path=/tmp/packages/psm.zip pvwa_zip_file_path=/tmp/packages/pvwa.zip pvwa_installation_drive=\'D:\' cpm_installation_drive=\'D:' psm_installation_drive=\'D:\' connect_with_rdp=Yes ansible_user='cyberark.com\\\\$ansible_user' ansible_password=$ansible_password"
182 | '''
183 | }
184 | }
185 | }
186 | stage('Run pas-orchestrator in-domain #2 (TC4)') {
187 | steps {
188 | withCredentials([usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user')]) {
189 | sh '''
190 | source .testenv/bin/activate
191 | VAULT_IP=$(cat /tmp/vault_ip_tc_2.txt)
192 | VAULT_DR_IP=$(cat /tmp/vaultdr_ip_tc_2.txt)
193 | cp -r tests/playbooks/pas-infrastructure/outputs/hosts_tc_2.yml inventories/staging/hosts_tc_2.yml
194 | ansible-playbook pas-orchestrator.yml -i inventories/staging/hosts_tc_2.yml -v -e "accept_eula=yes vault_ip=$VAULT_IP dr_vault_ip=$VAULT_DR_IP vault_password=$ansible_password cpm_zip_file_path=/tmp/packages/cpm.zip psm_zip_file_path=/tmp/packages/psm.zip pvwa_zip_file_path=/tmp/packages/pvwa.zip pvwa_installation_drive=\'D:\' cpm_installation_drive=\'D:' psm_installation_drive=\'D:\' connect_with_rdp=Yes ansible_user='cyberark.com\\\\$ansible_user' ansible_password=$ansible_password"
195 | '''
196 | }
197 | }
198 | }
199 | }
200 | }
201 | stage('Deploy Environment for TC3') {
202 | stages {
203 | stage('Deploy Vault') {
204 | steps {
205 | sleep 20
206 | withCredentials([
207 | usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user'),
208 | string(credentialsId: 'default_keypair', variable: 'default_keypair'),
209 | string(credentialsId: 'default_s3_bucket', variable: 'default_s3_bucket')
210 | ]) {
211 | sh '''
212 | source .testenv/bin/activate
213 | ansible-playbook tests/playbooks/deploy_vault.yml -v -e "keypair=$default_keypair bucket=$default_s3_bucket ansible_user=$ansible_user ansible_password=$ansible_password tc_number=3 env_timestamp=$ENV_TIMESTAMP"
214 | '''
215 | }
216 | }
217 | }
218 | stage('Deploy Vault DR') {
219 | steps {
220 | withCredentials([
221 | usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user'),
222 | string(credentialsId: 'default_keypair', variable: 'default_keypair'),
223 | string(credentialsId: 'default_s3_bucket', variable: 'default_s3_bucket')
224 | ]) {
225 | sh '''
226 | VAULT_IP=$(cat /tmp/vault_ip_tc_3.txt)
227 | source .testenv/bin/activate
228 | ansible-playbook tests/playbooks/deploy_vaultdr.yml -v -e "keypair=$default_keypair bucket=$default_s3_bucket ansible_user=$ansible_user ansible_password=$ansible_password tc_number=3 vault_ip=$VAULT_IP env_timestamp=$ENV_TIMESTAMP"
229 | '''
230 | }
231 | }
232 | }
233 | stage('Provision out-of-domain testing environment') {
234 | steps {
235 | withCredentials([
236 | usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user'),
237 | string(credentialsId: 'default_keypair', variable: 'default_keypair')
238 | ]) {
239 | sh '''
240 | source .testenv/bin/activate
241 | ansible-playbook tests/playbooks/pas-infrastructure/ec2-infrastructure.yml -e "aws_region=$AWS_REGION keypair=$default_keypair ec2_instance_type=m4.large public_ip=no pas_count=1 indomain=no tc_number=3 ansible_user=$ansible_user ansible_password=$ansible_password env_timestamp=$ENV_TIMESTAMP"
242 | '''
243 | }
244 | }
245 | }
246 | stage('Run pas-orchestrator out-of-domain') {
247 | steps {
248 | withCredentials([usernamePassword(credentialsId: 'default_vault_credentials', passwordVariable: 'ansible_password', usernameVariable: 'ansible_user')]) {
249 | sh '''
250 | source .testenv/bin/activate
251 | VAULT_IP=$(cat /tmp/vault_ip_tc_3.txt)
252 | VAULT_DR_IP=$(cat /tmp/vaultdr_ip_tc_3.txt)
253 | cp -r tests/playbooks/pas-infrastructure/outputs/hosts_tc_3.yml inventories/staging/hosts_tc_3.yml
254 | ansible-playbook pas-orchestrator.yml -i inventories/staging/hosts_tc_3.yml -v -e "accept_eula=yes vault_ip=$VAULT_IP dr_vault_ip=$VAULT_DR_IP vault_password=$ansible_password pvwa_hardening=false cpm_hardening=false psm_hardening=false psm_out_of_domain=true cpm_zip_file_path=/tmp/packages/cpm.zip psm_zip_file_path=/tmp/packages/psm.zip pvwa_zip_file_path=/tmp/packages/pvwa.zip connect_with_rdp=Yes ansible_user='$ansible_user' ansible_password=$ansible_password cpm_username=VeryLongNameRealyIAmVeryLongAskAnybody"
255 | '''
256 | }
257 | }
258 | }
259 | }
260 | }
261 | }
262 | }
263 | }
264 | post('Archiving artifacts and Cleanup') {
265 | always {
266 | archiveArtifacts artifacts: 'inventories/staging/hosts_tc_*.yml', fingerprint: true
267 | archiveArtifacts artifacts: 'logs/**/*.log', fingerprint: true
268 | archiveArtifacts artifacts: 'logs/ansible.log', fingerprint: true
269 | }
270 | cleanup {
271 | sh '''
272 | source .testenv/bin/activate
273 |
274 | # Terminate EC2 instances
275 | instance_ids=$(aws ec2 describe-instances --region $AWS_REGION --query 'Reservations[].Instances[].InstanceId' --filters "Name=tag:Timestamp,Values=$ENV_TIMESTAMP" --output text)
276 | aws ec2 terminate-instances --region $AWS_REGION --instance-ids $instance_ids
277 | aws ec2 wait instance-terminated --region $AWS_REGION --instance-ids $instance_ids
278 |
279 | instance_ids=$(aws ec2 describe-instances --region $AWS_REGION --query 'Reservations[].Instances[].InstanceId' --filters "Name=tag:aws:cloudformation:stack-name,Values=$(cat /tmp/cf_vault_tc_1.txt)" --output text)
280 | aws ec2 terminate-instances --region $AWS_REGION --instance-ids $instance_ids
281 | aws ec2 wait instance-terminated --region $AWS_REGION --instance-ids $instance_ids
282 |
283 | instance_ids=$(aws ec2 describe-instances --region $AWS_REGION --query 'Reservations[].Instances[].InstanceId' --filters "Name=tag:aws:cloudformation:stack-name,Values=$(cat /tmp/cf_vault_tc_2.txt)" --output text)
284 | aws ec2 terminate-instances --region $AWS_REGION --instance-ids $instance_ids
285 | aws ec2 wait instance-terminated --region $AWS_REGION --instance-ids $instance_ids
286 |
287 | instance_ids=$(aws ec2 describe-instances --region $AWS_REGION --query 'Reservations[].Instances[].InstanceId' --filters "Name=tag:aws:cloudformation:stack-name,Values=$(cat /tmp/cf_vaultdr_tc_2.txt)" --output text)
288 | aws ec2 terminate-instances --region $AWS_REGION --instance-ids $instance_ids
289 | aws ec2 wait instance-terminated --region $AWS_REGION --instance-ids $instance_ids
290 |
291 | instance_ids=$(aws ec2 describe-instances --region $AWS_REGION --query 'Reservations[].Instances[].InstanceId' --filters "Name=tag:aws:cloudformation:stack-name,Values=$(cat /tmp/cf_vault_tc_3.txt)" --output text)
292 | aws ec2 terminate-instances --region $AWS_REGION --instance-ids $instance_ids
293 | aws ec2 wait instance-terminated --region $AWS_REGION --instance-ids $instance_ids
294 |
295 | instance_ids=$(aws ec2 describe-instances --region $AWS_REGION --query 'Reservations[].Instances[].InstanceId' --filters "Name=tag:aws:cloudformation:stack-name,Values=$(cat /tmp/cf_vaultdr_tc_3.txt)" --output text)
296 | aws ec2 terminate-instances --region $AWS_REGION --instance-ids $instance_ids
297 | aws ec2 wait instance-terminated --region $AWS_REGION --instance-ids $instance_ids
298 |
299 | # Delete security groups
300 | sleep 60
301 | aws ec2 describe-security-groups --region $AWS_REGION --query 'SecurityGroups[*].{ID:GroupId}' --filters "Name=tag:Timestamp,Values=$ENV_TIMESTAMP" --output text | awk '{print $1}' | while read line; do aws ec2 delete-security-group --region $AWS_REGION --group-id $line; done
302 |
303 | # Delete Vault Cloudformations
304 | aws cloudformation delete-stack --region $AWS_REGION --stack-name $(cat /tmp/cf_vault_tc_1.txt)
305 | aws cloudformation delete-stack --region $AWS_REGION --stack-name $(cat /tmp/cf_vaultdr_tc_2.txt)
306 | aws cloudformation delete-stack --region $AWS_REGION --stack-name $(cat /tmp/cf_vault_tc_2.txt)
307 | aws cloudformation delete-stack --region $AWS_REGION --stack-name $(cat /tmp/cf_vaultdr_tc_3.txt)
308 | aws cloudformation delete-stack --region $AWS_REGION --stack-name $(cat /tmp/cf_vault_tc_3.txt)
309 | '''
310 | }
311 | }
312 | }
313 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Copyright 2013-2018 CyberArk Software Ltd. http://CyberArk.com.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | https://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
15 | Apache License
16 | Version 2.0, January 2004
17 | https://www.apache.org/licenses/
18 |
19 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
20 |
21 | 1. Definitions.
22 |
23 | "License" shall mean the terms and conditions for use, reproduction,
24 | and distribution as defined by Sections 1 through 9 of this document.
25 |
26 | "Licensor" shall mean the copyright owner or entity authorized by
27 | the copyright owner that is granting the License.
28 |
29 | "Legal Entity" shall mean the union of the acting entity and all
30 | other entities that control, are controlled by, or are under common
31 | control with that entity. For the purposes of this definition,
32 | "control" means (i) the power, direct or indirect, to cause the
33 | direction or management of such entity, whether by contract or
34 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
35 | outstanding shares, or (iii) beneficial ownership of such entity.
36 |
37 | "You" (or "Your") shall mean an individual or Legal Entity
38 | exercising permissions granted by this License.
39 |
40 | "Source" form shall mean the preferred form for making modifications,
41 | including but not limited to software source code, documentation
42 | source, and configuration files.
43 |
44 | "Object" form shall mean any form resulting from mechanical
45 | transformation or translation of a Source form, including but
46 | not limited to compiled object code, generated documentation,
47 | and conversions to other media types.
48 |
49 | "Work" shall mean the work of authorship, whether in Source or
50 | Object form, made available under the License, as indicated by a
51 | copyright notice that is included in or attached to the work
52 | (an example is provided in the Appendix below).
53 |
54 | "Derivative Works" shall mean any work, whether in Source or Object
55 | form, that is based on (or derived from) the Work and for which the
56 | editorial revisions, annotations, elaborations, or other modifications
57 | represent, as a whole, an original work of authorship. For the purposes
58 | of this License, Derivative Works shall not include works that remain
59 | separable from, or merely link (or bind by name) to the interfaces of,
60 | the Work and Derivative Works thereof.
61 |
62 | "Contribution" shall mean any work of authorship, including
63 | the original version of the Work and any modifications or additions
64 | to that Work or Derivative Works thereof, that is intentionally
65 | submitted to Licensor for inclusion in the Work by the copyright owner
66 | or by an individual or Legal Entity authorized to submit on behalf of
67 | the copyright owner. For the purposes of this definition, "submitted"
68 | means any form of electronic, verbal, or written communication sent
69 | to the Licensor or its representatives, including but not limited to
70 | communication on electronic mailing lists, source code control systems,
71 | and issue tracking systems that are managed by, or on behalf of, the
72 | Licensor for the purpose of discussing and improving the Work, but
73 | excluding communication that is conspicuously marked or otherwise
74 | designated in writing by the copyright owner as "Not a Contribution."
75 |
76 | "Contributor" shall mean Licensor and any individual or Legal Entity
77 | on behalf of whom a Contribution has been received by Licensor and
78 | subsequently incorporated within the Work.
79 |
80 | 2. Grant of Copyright License. Subject to the terms and conditions of
81 | this License, each Contributor hereby grants to You a perpetual,
82 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
83 | copyright license to reproduce, prepare Derivative Works of,
84 | publicly display, publicly perform, sublicense, and distribute the
85 | Work and such Derivative Works in Source or Object form.
86 |
87 | 3. Grant of Patent License. Subject to the terms and conditions of
88 | this License, each Contributor hereby grants to You a perpetual,
89 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
90 | (except as stated in this section) patent license to make, have made,
91 | use, offer to sell, sell, import, and otherwise transfer the Work,
92 | where such license applies only to those patent claims licensable
93 | by such Contributor that are necessarily infringed by their
94 | Contribution(s) alone or by combination of their Contribution(s)
95 | with the Work to which such Contribution(s) was submitted. If You
96 | institute patent litigation against any entity (including a
97 | cross-claim or counterclaim in a lawsuit) alleging that the Work
98 | or a Contribution incorporated within the Work constitutes direct
99 | or contributory patent infringement, then any patent licenses
100 | granted to You under this License for that Work shall terminate
101 | as of the date such litigation is filed.
102 |
103 | 4. Redistribution. You may reproduce and distribute copies of the
104 | Work or Derivative Works thereof in any medium, with or without
105 | modifications, and in Source or Object form, provided that You
106 | meet the following conditions:
107 |
108 | (a) You must give any other recipients of the Work or
109 | Derivative Works a copy of this License; and
110 |
111 | (b) You must cause any modified files to carry prominent notices
112 | stating that You changed the files; and
113 |
114 | (c) You must retain, in the Source form of any Derivative Works
115 | that You distribute, all copyright, patent, trademark, and
116 | attribution notices from the Source form of the Work,
117 | excluding those notices that do not pertain to any part of
118 | the Derivative Works; and
119 |
120 | (d) If the Work includes a "NOTICE" text file as part of its
121 | distribution, then any Derivative Works that You distribute must
122 | include a readable copy of the attribution notices contained
123 | within such NOTICE file, excluding those notices that do not
124 | pertain to any part of the Derivative Works, in at least one
125 | of the following places: within a NOTICE text file distributed
126 | as part of the Derivative Works; within the Source form or
127 | documentation, if provided along with the Derivative Works; or,
128 | within a display generated by the Derivative Works, if and
129 | wherever such third-party notices normally appear. The contents
130 | of the NOTICE file are for informational purposes only and
131 | do not modify the License. You may add Your own attribution
132 | notices within Derivative Works that You distribute, alongside
133 | or as an addendum to the NOTICE text from the Work, provided
134 | that such additional attribution notices cannot be construed
135 | as modifying the License.
136 |
137 | You may add Your own copyright statement to Your modifications and
138 | may provide additional or different license terms and conditions
139 | for use, reproduction, or distribution of Your modifications, or
140 | for any such Derivative Works as a whole, provided Your use,
141 | reproduction, and distribution of the Work otherwise complies with
142 | the conditions stated in this License.
143 |
144 | 5. Submission of Contributions. Unless You explicitly state otherwise,
145 | any Contribution intentionally submitted for inclusion in the Work
146 | by You to the Licensor shall be under the terms and conditions of
147 | this License, without any additional terms or conditions.
148 | Notwithstanding the above, nothing herein shall supersede or modify
149 | the terms of any separate license agreement you may have executed
150 | with Licensor regarding such Contributions.
151 |
152 | 6. Trademarks. This License does not grant permission to use the trade
153 | names, trademarks, service marks, or product names of the Licensor,
154 | except as required for reasonable and customary use in describing the
155 | origin of the Work and reproducing the content of the NOTICE file.
156 |
157 | 7. Disclaimer of Warranty. Unless required by applicable law or
158 | agreed to in writing, Licensor provides the Work (and each
159 | Contributor provides its Contributions) on an "AS IS" BASIS,
160 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
161 | implied, including, without limitation, any warranties or conditions
162 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
163 | PARTICULAR PURPOSE. You are solely responsible for determining the
164 | appropriateness of using or redistributing the Work and assume any
165 | risks associated with Your exercise of permissions under this License.
166 |
167 | 8. Limitation of Liability. In no event and under no legal theory,
168 | whether in tort (including negligence), contract, or otherwise,
169 | unless required by applicable law (such as deliberate and grossly
170 | negligent acts) or agreed to in writing, shall any Contributor be
171 | liable to You for damages, including any direct, indirect, special,
172 | incidental, or consequential damages of any character arising as a
173 | result of this License or out of the use or inability to use the
174 | Work (including but not limited to damages for loss of goodwill,
175 | work stoppage, computer failure or malfunction, or any and all
176 | other commercial damages or losses), even if such Contributor
177 | has been advised of the possibility of such damages.
178 |
179 | 9. Accepting Warranty or Additional Liability. While redistributing
180 | the Work or Derivative Works thereof, You may choose to offer,
181 | and charge a fee for, acceptance of support, warranty, indemnity,
182 | or other liability obligations and/or rights consistent with this
183 | License. However, in accepting such obligations, You may act only
184 | on Your own behalf and on Your sole responsibility, not on behalf
185 | of any other Contributor, and only if You agree to indemnify,
186 | defend, and hold each Contributor harmless for any liability
187 | incurred by, or claims asserted against, such Contributor by reason
188 | of your accepting any such warranty or additional liability.
189 |
190 | END OF TERMS AND CONDITIONS
191 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PAS-Orchestrator
2 |
3 | In today’s modern infrastructure, organizations are moving towards hybrid environments, which consist of multiple public clouds, private clouds and on-premises platforms.
4 |
5 | CyberArk has created a tailored installation and deployment method for each platform to enable easy implementation. For example, CloudFormation templates enable easy deployment on AWS, while Azure Resource Manager (ARM) templates enable easy deployment on Azure. However, it is difficult to combine the different methods to orchestrate and automate a hybrid deployment.
6 |
7 | PAS Orchestrator is a set of Ansible roles which provides a holistic solution to deploying CyberArk Core PAS components simultaneously in multiple environments, regardless of the environment’s location.
8 |
9 | The Ansible roles are responsible for the entire deployment process, and can be integrated with the organization’s CI/CD pipeline.
10 |
11 | Each PAS component’s Ansible role is responsible for the component end-2-end deployment, which includes the following stages for each component:
12 | - Copy the installation package to the target server
13 | - Installing prerequisites
14 | - Silent installation of the component
15 | - Post installation procedure and hardening
16 | - Registration in the Vault
17 |
18 | Ansible Roles for PVWA, CPM and PSM can be found in the following links:
19 | - PSM: [https://github.com/cyberark/psm](https://github.com/cyberark/psm)
20 | - CPM: [https://github.com/cyberark/cpm](https://github.com/cyberark/cpm)
21 | - PVWA: [https://github.com/cyberark/pvwa](https://github.com/cyberark/pvwa)
22 |
23 | The PAS Orchestrator role is an example of how to use the component roles
24 | demonstrating paralel installation on multiple remote servers
25 |
26 | ## Requirements
27 |
28 | - IP addresses / hosts to execute the playbook against with Windows 2016 installed on the remote hosts
29 | - WinRM open on port 5986 (**not 5985**) on the remote host
30 | - Pywinrm is installed on the workstation running the playbook
31 | - The workstation running the playbook must have network connectivity to the remote host
32 | - The remote host must have Network connectivity to the CyberArk vault and the repository server
33 | - 443 port outbound
34 | - 443 port outbound (for PVWA only)
35 | - 1858 port outbound
36 | - Administrator access to the remote host
37 | - CyberArk components CD image on the workstation running the playbook
38 |
39 | ## Environment setup
40 |
41 | - Get the PAS Orchestrator Playbook
42 | ```
43 | git clone https://github.com/cyberark/pas-orchestrator.git
44 | cd pas-orchestrator
45 | ```
46 | - Install Python requirements
47 | ```
48 | pip install -r requirements.txt
49 | ```
50 | - Get the components roles
51 | ```
52 | ansible-galaxy install --roles-path ./roles --role-file requirements.yml
53 | ```
54 | - Update the inventories hosts file with the remote hosts IPs
55 |
56 | ## Role Variables
57 |
58 | These are the variables used in this playbook
59 |
60 | **Deployment Variables**
61 |
62 | | Variable | Required | Default | Comments |
63 | |----------------------------------|--------------|--------------------------------------------------------------------------------|------------------------------------------|
64 | | vault_ip | yes | None | Vault ip to perform registration |
65 | | dr_vault_ip | no | None | vault dr ip to perform registration |
66 | | vault_port | no | 1858 | vault port |
67 | | vault_username | no | "administrator" | vault username to perform registration |
68 | | vault_password | yes | None | vault password to perform registration |
69 | | accept_eula | yes | "No" | Accepting EULA condition |
70 | | cpm_zip_file_path | yes | None | Path to zipped CPM image |
71 | | pvwa_zip_file_path | yes | None | Path to zipped PVWA image |
72 | | psm_zip_file_path | yes | None | Path to zipped PSM image |
73 | | cpm_username | no | "PasswordManager" | Vault Component's username |
74 |
75 | Variables related to the components can be found on the Components README
76 |
77 | ## Usage
78 |
79 | The Role consists of two parts, each part runs independently:
80 |
81 | **Part 1 - Components Deployment**
82 |
83 | The task will trigger the components main roles, each role will trigger it's sub tasks (prerequisities/installation, etc.)
84 | by default, all tasks are set to true except registration.
85 | This process executes tasks on all hosts in parallel, reducing deployment time
86 |
87 | *IMPORTANT: Component Registration should be always set to false in this phase
88 |
89 | **Part 2 - Components Registration**
90 |
91 | This task will execute the registration process of the components, all the previous tasks are set to false and only registration is enabled
92 | This process executes the registration of each component in serial
93 |
94 | ## Inventory
95 |
96 | Prior to running pas-orchestrator hosts file should be "updated" [https://github.com/cyberark/pas-orchestrator/blob/master/inventories/production/hosts] with relevant hosts data.
97 |
98 | # file: production
99 | # TODO: Add description how to add hosts
100 |
101 | [pvwa]
102 | # Add here list of hosts or ip adresses of pvwa dedicated machines
103 | # pvwa01.example.com
104 | # pvwa02.example.com
105 | 10.2.0.155
106 |
107 |
108 | [cpm]
109 | # Add here list of hosts or ip adresses of cpm dedicated machines
110 | # cpm01.example.com
111 | # cpm02.example.com
112 | 10.2.0.155
113 | # Add cpm with custom component user name (default is PasswordManager)
114 | 10.2.0.156 cpm_username=LinuxManager
115 |
116 | [psm]
117 | # Add here list of hosts or ip adresses of psm dedicated machines
118 | # psm01.example.com
119 | # psm02.example.com
120 | 10.2.0.155
121 |
122 |
123 | [psmp]
124 | # Add here list of hosts or ip adresses of psmp dedicated machines
125 | # psmp01.example.com
126 | # psmp02.example.com
127 |
128 |
129 | # DO NOT EDIT BELOW!!!
130 | [windows:children]
131 | pvwa
132 | cpm
133 | psm
134 |
135 | ## Pulling specific version example :
136 |
137 | ````
138 | git clone -b v12.0 -s https://github.com/cyberark/pas-orchestrator.git
139 | ````
140 |
141 | Note : using the -s it saves about 10M of diskspace and doesn’t download the unnecessary stuff.
142 |
143 | ## Running the playbook:
144 |
145 | To run the above playbook, execute the following command example :
146 |
147 | ansible-playbook -i ./inventories/production pas-orchestrator.yml -e "vault_ip=VAULT_IP ansible_user=DOMAIN\USER cpm_zip_file_path=/tmp/pas_packages/cpm.zip pvwa_zip_file_path=/tmp/pas_packages/pvwa.zip psm_zip_file_path=/tmp/pas_packages/psm.zip connect_with_rdp=Yes accept_eula=Yes"
148 |
149 | Command example for out of Domain , no hardening deployment in drive D with custom component username for cpm:
150 |
151 | ansible-playbook -i ./inventories/production pas-orchestrator.yml -e "vault_ip=VAULT_IP ansible_user=DOMAIN\USER cpm_zip_file_path=/tmp/pas_packages/cpm.zip pvwa_zip_file_path=/tmp/pas_packages/pvwa.zip psm_zip_file_path=/tmp/pas_packages/psm.zip {psm_out_of_domain:true} connect_with_rdp=Yes accept_eula=Yes psm_installation_drive=D: cpm_installation_drive=D: pvwa_installation_drive=D: {psm_hardening:false} {cpm_hardening:false} {pvwa_hardening:false} cpm_username=WinManager"
152 |
153 | ** *Vault and remote host passwords are entered via Prompt*
154 |
155 | ## Troubleshooting
156 |
157 | In case of a failure, a Log folder with be created on the Ansible workstation with the relevant logs copied from the remote host machine.
158 | The logs are available under - pas-orchestrator/tasks/logs
159 |
160 | ## Idempotence
161 | Every stage in the roles contains validation and can be run multiple times without error in case of success or any ansible related error.This does not apply to a component installation error for which in some cases a second execution will not assist in recovery and There might be left over artifacts (i.e. app users in the vault)
162 |
163 | ## Limitations
164 | - Only single component per server is supported
165 | - There is a check sum verification to the CD image zip file , it must be the original CyberArk release
166 |
167 | ## License
168 |
169 | Apache License, Version 2.0
170 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 |
3 | # Default path to roles directory
4 | roles_path = ./roles
5 |
6 | # Adds timestamp to each task and add a recap on the end of the playbook
7 | callback_whitelist = profile_tasks
8 | log_path = ./logs/ansible.log
9 | ; host_key_checking = False
--------------------------------------------------------------------------------
/inventories/production/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberark/pas-orchestrator/c1e00f645c8775653c2d574563cdd06082961a2a/inventories/production/group_vars/README.md
--------------------------------------------------------------------------------
/inventories/production/group_vars/windows.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # file: group_vars/windows
3 | any_errors_fatal: false
4 | ansible_python_interpreter: python
5 | ansible_winrm_server_cert_validation: ignore
6 | ansible_connection: winrm
7 | ansible_winrm_transport: credssp
8 | ansible_port: 5986
9 | ansible_winrm_read_timeout_sec: 800
10 | ansible_winrm_operation_timeout_sec: 600
11 |
12 | ansible_user: "{{ hostvars['localhost']['ansible_user'] }}"
13 | ansible_password: "{{ hostvars['localhost']['ansible_password'] }}"
14 | accept_eula: "{{ hostvars['localhost']['accept_eula'] }}"
15 | vault_ip: "{{ hostvars['localhost']['vault_ip'] }}"
16 | vault_password: "{{ hostvars['localhost']['vault_password'] }}"
17 | cpm_zip_file_path: "{{ hostvars['localhost']['cpm_zip_file_path'] }}"
18 | pvwa_zip_file_path: "{{ hostvars['localhost']['pvwa_zip_file_path'] }}"
19 | psm_zip_file_path: "{{ hostvars['localhost']['psm_zip_file_path'] }}"
20 |
21 | ansible_become: yes
22 | ansible_become_method: runas
23 | ansible_become_user: "{{ ansible_user }}"
24 | ansible_become_password: "{{ ansible_password }}"
25 | ansible_become_flags: logon_type=new_credentials logon_flags=netcredentials_only
26 |
--------------------------------------------------------------------------------
/inventories/production/host_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberark/pas-orchestrator/c1e00f645c8775653c2d574563cdd06082961a2a/inventories/production/host_vars/README.md
--------------------------------------------------------------------------------
/inventories/production/hosts:
--------------------------------------------------------------------------------
1 | # file: production
2 | # TODO: Add description how to add hosts
3 |
4 | [pvwa]
5 | # Add here list of hosts or ip adresses of pvwa dedicated machines
6 | # pvwa01.example.com
7 | # pvwa02.example.com
8 | 10.2.0.155
9 |
10 |
11 | [cpm]
12 | # Add here list of hosts or ip adresses of cpm dedicated machines
13 | # cpm01.example.com
14 | # cpm02.example.com
15 | 10.2.0.155
16 |
17 |
18 | [psm]
19 | # Add here list of hosts or ip adresses of psm dedicated machines
20 | # psm01.example.com
21 | # psm02.example.com
22 | 10.2.0.155
23 |
24 |
25 | [psmp]
26 | # Add here list of hosts or ip adresses of psmp dedicated machines
27 | # psmp01.example.com
28 | # psmp02.example.com
29 |
30 |
31 | # DO NOT EDIT BELOW!!!
32 | [windows:children]
33 | pvwa
34 | cpm
35 | psm
--------------------------------------------------------------------------------
/inventories/staging/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberark/pas-orchestrator/c1e00f645c8775653c2d574563cdd06082961a2a/inventories/staging/group_vars/README.md
--------------------------------------------------------------------------------
/inventories/staging/group_vars/windows.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # file: group_vars/windows
3 | any_errors_fatal: false
4 | ansible_python_interpreter: python
5 | ansible_winrm_server_cert_validation: ignore
6 | ansible_connection: winrm
7 | ansible_winrm_transport: credssp
8 | ansible_port: 5986
9 | ansible_winrm_read_timeout_sec: 800
10 | ansible_winrm_operation_timeout_sec: 600
11 |
12 | ansible_user: "{{ hostvars['localhost']['ansible_user'] }}"
13 | ansible_password: "{{ hostvars['localhost']['ansible_password'] }}"
14 | accept_eula: "{{ hostvars['localhost']['accept_eula'] }}"
15 | vault_ip: "{{ hostvars['localhost']['vault_ip'] }}"
16 | vault_password: "{{ hostvars['localhost']['vault_password'] }}"
17 | cpm_zip_file_path: "{{ hostvars['localhost']['cpm_zip_file_path'] }}"
18 | pvwa_zip_file_path: "{{ hostvars['localhost']['pvwa_zip_file_path'] }}"
19 | psm_zip_file_path: "{{ hostvars['localhost']['psm_zip_file_path'] }}"
20 |
21 | ansible_become: yes
22 | ansible_become_method: runas
23 | ansible_become_user: "{{ ansible_user }}"
24 | ansible_become_password: "{{ ansible_password }}"
25 | ansible_become_flags: logon_type=new_credentials logon_flags=netcredentials_only
26 |
--------------------------------------------------------------------------------
/inventories/staging/host_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberark/pas-orchestrator/c1e00f645c8775653c2d574563cdd06082961a2a/inventories/staging/host_vars/README.md
--------------------------------------------------------------------------------
/inventories/staging/hosts:
--------------------------------------------------------------------------------
1 | # file: production
2 | # TODO: Add description how to add hosts
3 |
4 | [pvwa]
5 | # Add here list of hosts or ip adresses of pvwa dedicated machines
6 | # pvwa01.example.com
7 | # pvwa02.example.com
8 | 10.2.0.145
9 |
10 |
11 | [cpm]
12 | # Add here list of hosts or ip adresses of cpm dedicated machines
13 | # cpm01.example.com
14 | # cpm02.example.com
15 | 10.2.0.31
16 | 10.2.0.94
17 | 10.2.0.103
18 | 10.2.0.219
19 |
20 |
21 | [psm]
22 | # Add here list of hosts or ip adresses of psm dedicated machines
23 | # psm01.example.com
24 | # psm02.example.com
25 | 10.2.0.145
26 |
27 |
28 | [psmp]
29 | # Add here list of hosts or ip adresses of psmp dedicated machines
30 | # psmp01.example.com
31 | # psmp02.example.com
32 |
33 |
34 | # DO NOT EDIT BELOW!!!
35 | [windows:children]
36 | pvwa
37 | cpm
38 | psm
--------------------------------------------------------------------------------
/logs/ansible.log:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberark/pas-orchestrator/c1e00f645c8775653c2d574563cdd06082961a2a/logs/ansible.log
--------------------------------------------------------------------------------
/pas-orchestrator.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - import_playbook: tasks/main.yml
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | ansible==2.8.8
2 | boto==2.49.0
3 | boto3==1.12.8
4 | botocore==1.15.8
5 | certifi==2019.11.28
6 | cffi==1.14.0
7 | chardet==3.0.4
8 | cryptography==2.8
9 | docutils==0.15.2
10 | idna==2.9
11 | Jinja2==2.11.1
12 | jmespath==0.9.5
13 | MarkupSafe==1.1.1
14 | ntlm-auth==1.4.0
15 | pyasn1==0.4.8
16 | pycparser==2.19
17 | pyOpenSSL==19.1.0
18 | python-dateutil==2.8.1
19 | pywinrm==0.4.1
20 | PyYAML==5.3
21 | requests==2.23.0
22 | requests-credssp==1.1.1
23 | requests-ntlm==1.1.0
24 | s3transfer==0.3.3
25 | six==1.14.0
26 | urllib3==1.25.8
27 | xmltodict==0.12.0
28 |
--------------------------------------------------------------------------------
/requirements.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # pvwa git repo
4 | - src: https://github.com/cyberark/pvwa.git
5 | scm: git
6 | version: v12.2
7 |
8 | # cpm git repo
9 | - src: https://github.com/cyberark/cpm.git
10 | scm: git
11 | version: v12.2
12 |
13 | # psm git repo
14 | - src: https://github.com/cyberark/psm.git
15 | scm: git
16 | version: v12.2
17 |
--------------------------------------------------------------------------------
/tasks/cpm.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include_role:
4 | name: cpm
5 | vars:
6 | - cpm_extract: true
7 | - cpm_prerequisites: true
8 | - cpm_install: true
9 | - cpm_postinstall: true
10 | - cpm_hardening: true
11 | - cpm_registration: false
12 | - cpm_official: false
13 |
--------------------------------------------------------------------------------
/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # file: main.yml
3 |
4 | - hosts: localhost
5 | connection: local
6 | vars_prompt:
7 |
8 | - name: accept_eula
9 | prompt: "Please accept EULA"
10 | default: "No"
11 | private: no
12 |
13 | - name: vault_ip
14 | prompt: "Please enter the Vault ip address"
15 | private: no
16 |
17 | - name: vault_password
18 | prompt: "Please enter the Vault Administrator password"
19 | private: yes
20 |
21 | - name: ansible_user
22 | prompt: "Please enter the Hosts administrator username"
23 | default: "Administrator"
24 | private: no
25 |
26 | - name: ansible_password
27 | prompt: "Please enter the Hosts Administrator password"
28 | private: yes
29 |
30 | - name: cpm_zip_file_path
31 | prompt: "Please enter CPM installation package location"
32 | default: "/tmp/pas_packages/cpm.zip"
33 | private: no
34 |
35 | - name: pvwa_zip_file_path
36 | prompt: "Please enter PVWA installation package location"
37 | default: "/tmp/pas_packages/pvwa.zip"
38 | private: no
39 |
40 | - name: psm_zip_file_path
41 | prompt: "Please enter PSM installation package location"
42 | default: "/tmp/pas_packages/psm.zip"
43 | private: no
44 |
45 | tasks:
46 |
47 | - name: Set required parameters from prompt
48 | include_tasks: set_facts.yml
49 |
50 | - hosts: pvwa:cpm:psm
51 | strategy: free
52 |
53 | tasks:
54 |
55 | - include_tasks: cpm.yml
56 | when: "'cpm' in group_names"
57 | - include_tasks: pvwa.yml
58 | when: "'pvwa' in group_names"
59 | - include_tasks: psm.yml
60 | when: "'psm' in group_names"
61 |
62 |
63 | # PSM Register Component Requires the First PVWA IP Address
64 |
65 | - hosts: pvwa:cpm:psm
66 | serial: 1
67 | tasks:
68 |
69 | - include_tasks: register_pvwa.yml
70 | when: "'pvwa' in group_names"
71 | - include_tasks: register_cpm.yml
72 | when: "'cpm' in group_names"
73 | - include_tasks: register_psm.yml
74 | when: "'psm' in group_names"
75 |
--------------------------------------------------------------------------------
/tasks/psm.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include_role:
4 | name: psm
5 | vars:
6 | - psm_extract: true
7 | - psm_prerequisites: true
8 | - psm_install: true
9 | - psm_postinstall: true
10 | - psm_hardening: true
11 | - psm_registration: false
12 | - psm_official: false
13 |
--------------------------------------------------------------------------------
/tasks/pvwa.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: PVWA Role Block
4 | block:
5 |
6 | - include_role:
7 | name: pvwa
8 | vars:
9 | - pvwa_extract: true
10 | - pvwa_prerequisites: true
11 | - pvwa_install: true
12 | - pvwa_postinstall: true
13 | - pvwa_hardening: true
14 | - pvwa_registration: false
15 | - pvwa_official: false
16 |
17 | rescue:
18 |
19 | - include_role:
20 | name: pvwa
21 | vars:
22 | - pvwa_extract: true
23 | - pvwa_prerequisites: true
24 | - pvwa_install: true
25 | - pvwa_postinstall: true
26 | - pvwa_hardening: true
27 | - pvwa_registration: false
28 | - pvwa_official: false
29 |
--------------------------------------------------------------------------------
/tasks/register_cpm.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Register cpm
4 | include_role:
5 | name: cpm
6 | vars:
7 | - cpm_registration: true
8 |
--------------------------------------------------------------------------------
/tasks/register_psm.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Register psm
4 | include_role:
5 | name: psm
6 | vars:
7 | - psm_registration: true
8 |
--------------------------------------------------------------------------------
/tasks/register_pvwa.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Register pvwa
4 | include_role:
5 | name: pvwa
6 | vars:
7 | - pvwa_registration: true
8 |
--------------------------------------------------------------------------------
/tasks/set_facts.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Pass input parameters to relevant roles
4 | set_fact:
5 | accept_eula: "{{ accept_eula }}"
6 | vault_ip: "{{ vault_ip }}"
7 | vault_password: "{{ vault_password }}"
8 | ansible_user: "{{ ansible_user }}"
9 | ansible_password: "{{ ansible_password }}"
10 | cpm_zip_file_path: "{{ cpm_zip_file_path }}"
11 | pvwa_zip_file_path: "{{ pvwa_zip_file_path }}"
12 | psm_zip_file_path: "{{ psm_zip_file_path }}"
13 | no_log: true
14 |
--------------------------------------------------------------------------------
/tests/playbooks/deploy_vault.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: localhost
4 | gather_facts: no
5 | tasks:
6 |
7 | - name: Get current region
8 | shell: |
9 | echo $AWS_REGION
10 | register: my_region
11 |
12 | - name: Get current vpc id
13 | shell: |
14 | INTERFACE=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/)
15 | VPC_ID=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/${INTERFACE}/vpc-id)
16 | echo $VPC_ID
17 | register: my_vpc
18 |
19 | - name: Get current subnet id
20 | shell: |
21 | INTERFACE=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/)
22 | SUBNET_ID=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/${INTERFACE}/subnet-id)
23 | echo $SUBNET_ID
24 | register: my_subnet
25 |
26 | - name: Create security group for Vault
27 | ec2_group:
28 | name: "PAS-Orchestrator-Vault-{{ env_timestamp }}"
29 | description: Security Group for PAS Orchestrator Vault
30 | vpc_id: "{{ my_vpc.stdout }}"
31 | rules:
32 | - proto: tcp
33 | ports:
34 | - 1858
35 | cidr_ip: 0.0.0.0/0
36 | rule_desc: Allow all traffic on port 1858
37 | tags:
38 | Timestamp: "{{ env_timestamp }}"
39 | register: my_sg
40 |
41 | - name: Deploy Vault
42 | include_role:
43 | name: cf_deploy
44 | vars:
45 | - deploy_bucket: cloud-initiatives-pipeline-bucket
46 | - cf_template_url: https://raw.githubusercontent.com/cyberark/pas-on-cloud/develop/aws/Vault-Single-Deployment.yaml
47 | - cf_template_parameters:
48 | EULA: Accept
49 | KeyName: "{{ keypair }}"
50 | VaultFilesBucket: "{{ bucket }}"
51 | LicenseFile: license.xml
52 | RecoveryPublicKey: recpub.key
53 | VaultAdminPassword: "{{ ansible_password }}"
54 | RetypeAdminPassword: "{{ ansible_password }}"
55 | VaultMasterPassword: "{{ ansible_password }}"
56 | RetypeMasterPassword: "{{ ansible_password }}"
57 | DRVaultPassword: "{{ ansible_password }}"
58 | RetypeDRVaultPassword: "{{ ansible_password }}"
59 | Secret: "{{ ansible_password }}"
60 | SecretRetype: "{{ ansible_password }}"
61 | VaultInstanceName: "[PAS-Orchestrator] Vault"
62 | VaultInstanceType: m5.large
63 | VaultHostName: vault
64 | VaultInstanceSecurityGroups: "{{ my_sg.group_id }}"
65 | VaultInstanceSubnetId: "{{ my_subnet.stdout }}"
66 | - aws_region: "{{ my_region.stdout }}"
67 |
68 | - name: Get Vault machine details from CloudFormation
69 | ec2_instance_facts:
70 | region: "{{ my_region.stdout }}"
71 | instance_ids:
72 | - "{{ cf_output | json_query('[?logical_resource_id == `VaultMachine` ] | [0].physical_resource_id') }}"
73 | register: vault_machine
74 |
75 | - name: Save Vault ip address in text file
76 | copy:
77 | dest: "/tmp/vault_ip_tc_{{ tc_number }}.txt"
78 | content: |
79 | {{ vault_machine.instances[0].private_ip_address }}
80 |
81 | - name: Save cloudformation id for later cleanup
82 | copy:
83 | dest: "/tmp/cf_vault_tc_{{ tc_number }}.txt"
84 | content: |
85 | {{ cloudformation_stack_id }}
86 |
--------------------------------------------------------------------------------
/tests/playbooks/deploy_vaultdr.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: localhost
4 | gather_facts: no
5 | tasks:
6 |
7 | - name: Get current region
8 | shell: |
9 | echo $AWS_REGION
10 | register: my_region
11 |
12 | - name: Get current vpc id
13 | shell: |
14 | INTERFACE=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/)
15 | VPC_ID=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/${INTERFACE}/vpc-id)
16 | echo $VPC_ID
17 | register: my_vpc
18 |
19 | - name: Get current subnet id
20 | shell: |
21 | INTERFACE=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/)
22 | SUBNET_ID=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/${INTERFACE}/subnet-id)
23 | echo $SUBNET_ID
24 | register: my_subnet
25 |
26 | - name: Create security group for Vault
27 | ec2_group:
28 | name: "PAS-Orchestrator-DRVault-{{ env_timestamp }}"
29 | description: Security Group for PAS Orchestrator DRVault
30 | vpc_id: "{{ my_vpc.stdout }}"
31 | rules:
32 | - proto: tcp
33 | ports:
34 | - 1858
35 | cidr_ip: 0.0.0.0/0
36 | rule_desc: Allow all traffic on port 1858
37 | tags:
38 | Timestamp: "{{ env_timestamp }}"
39 | register: my_sg
40 |
41 | - name: Deploy Vault
42 | include_role:
43 | name: cf_deploy
44 | vars:
45 | - deploy_bucket: cloud-initiatives-pipeline-bucket
46 | - cf_template_url: https://raw.githubusercontent.com/cyberark/pas-on-cloud/develop/aws/DRVault-Single-Deployment.yaml
47 | - cf_template_parameters:
48 | EULA: Accept
49 | KeyName: "{{ keypair }}"
50 | VaultPrivateIP: "{{ vault_ip }}"
51 | VaultDRPassword: "{{ ansible_password }}"
52 | Secret: "{{ ansible_password }}"
53 | VaultInstanceName: "[PAS-Orchestrator] DRVault"
54 | VaultInstanceType: m5.large
55 | VaultHostName: vaultdr
56 | VaultInstanceSecurityGroups: "{{ my_sg.group_id }}"
57 | DRInstanceSubnetId: "{{ my_subnet.stdout }}"
58 | - aws_region: "{{ my_region.stdout }}"
59 |
60 | - name: Get Vault machine details from CloudFormation
61 | ec2_instance_facts:
62 | region: "{{ my_region.stdout }}"
63 | instance_ids:
64 | - "{{ cf_output | json_query('[?logical_resource_id == `VaultDRMachine` ] | [0].physical_resource_id') }}"
65 | register: vaultdr_machine
66 |
67 | - name: Save Vault ip address in text file
68 | copy:
69 | dest: "/tmp/vaultdr_ip_tc_{{ tc_number }}.txt"
70 | content: |
71 | {{ vaultdr_machine.instances[0].private_ip_address }}
72 |
73 | - name: Save cloudformation id for later cleanup
74 | copy:
75 | dest: "/tmp/cf_vaultdr_tc_{{ tc_number }}.txt"
76 | content: |
77 | {{ cloudformation_stack_id }}
78 |
--------------------------------------------------------------------------------
/tests/playbooks/pas-infrastructure/README.md:
--------------------------------------------------------------------------------
1 | # EC2-PAS-Infrastructure
2 |
3 | This Playbook provisions an infrastructure on AWS to prepare a DC environment and Domain machines for PAS-Orchestrator
4 |
5 | Requirements
6 | ------------
7 |
8 | - Security Group on the Target VPC with all the neccesary ports open for Domain Controller and WinRM.
9 | - Pip libraries: `jq`, `yq`, `json2yaml`:
10 | `pip install jq yq json2yaml --user`
11 |
12 | ## Role Variables
13 |
14 | A list of vaiables the playbook is using
15 |
16 | | Variable | Comments |
17 | |----------------------|--------------------|
18 | | aws_region | AWS Region |
19 | | keypair | KeyPair |
20 | | ec2_instance_type | Instance Type |
21 | | public_ip | Public Ip Yes/No |
22 | | subnet_id | Subnet ID |
23 | | security_group | Security Group ID |
24 | | win2012_ami_id | AMI for DC |
25 | | win2016_ami_id | AMI for PAS EC2 |
26 | | pas_count | Number of Machines |
27 | | ansible_user | Ansible User |
28 | | ansible_password | Ansible Password |
29 | | indomain | Yes/No |
30 | | comp_sg | sg-xxxxxx |
31 | | dc_sg | sg-xxxxxx |
32 |
33 | ## Running the playbook:
34 |
35 | To run the above playbook:
36 |
37 | ansible-playbook ec2-infrastructure.yml -e "aws_region=my-region keypair=My-KP ec2_instance_type=t2.size public_ip=yes/no subnet_id=subnet-xxxxxx security_group=sg-xxxxxx win2012_ami_id=ami-xxxxxx win2016_ami_id=ami-xxxxxx dc_sg=sg-xxxxxx comp_sg=sg-xxxxxx pas_count=10 ansible_user=Administrator ansible_password=nopass when: indomain=yes"
38 |
39 | ## Outputs:
40 |
41 | You will get a hosts file in `outputs/hosts.yml` you can use for the PAS-Orchestrator.
42 | When using PAS-Orchestrator, you will see the relevant host groups on: `tag_Type_pvwa`, `tag_Type_cpm`, `tag_Type_psm`.
43 |
44 | ## License
45 |
46 | **TBD**
47 |
--------------------------------------------------------------------------------
/tests/playbooks/pas-infrastructure/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | # Adds timestamp to each task and add a recap on the end of the playbook
3 | callback_whitelist = profile_tasks
4 | # Disalbe host key checking
5 | host_key_checking = False
--------------------------------------------------------------------------------
/tests/playbooks/pas-infrastructure/create_ec2_batch.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Generate EC2 Batch Machines
4 | ec2:
5 | aws_access_key: "{{ aws_access_key | default(omit) }}"
6 | aws_secret_key: "{{ aws_secret_key | default(omit) }}"
7 | security_token: "{{ aws_security_token | default(omit) }}"
8 | key_name: "{{ keypair }}"
9 | instance_type: "{{ ec2_instance_type }}"
10 | image: "{{ ami_id }}"
11 | wait: yes
12 | group_id: "{{ security_group }}"
13 | count: "{{ count }}"
14 | vpc_subnet_id: "{{ subnet_id }}"
15 | assign_public_ip: "{{ public_ip }}"
16 | region: "{{ aws_region }}"
17 | volumes:
18 | - device_name: /dev/sda1
19 | volume_type: gp2
20 | volume_size: 30
21 | delete_on_termination: yes
22 | - device_name: /dev/sda2
23 | volume_type: gp2
24 | volume_size: 30
25 | delete_on_termination: yes
26 | instance_tags:
27 | Name: "{{ component_name }}-Machine-{{ lookup('pipe','date +%Y-%m-%d-%H-%M-%S') }}"
28 | Type: "{{ component_name }}"
29 | Domain: "{{ indomain }}"
30 | TC_Number: "{{ tc_number }}"
31 | Timestamp: "{{ env_timestamp }}"
32 | user_data: "{{ ansible_user_data }}"
33 | instance_profile_name: "{{ instance_profile | default('') }}"
34 | register: ec2_instances
35 |
36 | - set_fact:
37 | ec2_machines: "{{ ec2_instances }}"
--------------------------------------------------------------------------------
/tests/playbooks/pas-infrastructure/ec2-infrastructure.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: localhost
4 | gather_facts: no
5 | vars:
6 | ansible_user_data: |
7 |
8 | # Change Password
9 | $admin = [adsi]("WinNT://./administrator, user")
10 | $admin.PSBase.Invoke("SetPassword", "{{ ansible_password }}")
11 |
12 | # Configure machine for ansible remoting
13 | [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
14 | $url = "https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1"
15 | $file = "$env:temp\ConfigureRemotingForAnsible.ps1"
16 | Invoke-WebRequest -Uri $url -OutFile $file
17 | powershell.exe -ExecutionPolicy ByPass -File $file -EnableCredSSP
18 |
19 |
20 | tasks:
21 |
22 | - name: Get current vpc id
23 | shell: |
24 | INTERFACE=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/)
25 | VPC_ID=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/${INTERFACE}/vpc-id)
26 | echo $VPC_ID
27 | register: my_vpc
28 |
29 | - name: Get current subnet id
30 | shell: |
31 | INTERFACE=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/)
32 | SUBNET_ID=$(curl --silent http://169.254.169.254/latest/meta-data/network/interfaces/macs/${INTERFACE}/subnet-id)
33 | echo $SUBNET_ID
34 | register: my_subnet
35 |
36 | - set_fact:
37 | subnet_id: "{{ my_subnet.stdout }}"
38 |
39 | - name: Create security group for DC
40 | ec2_group:
41 | name: "PAS-Orchestrator-DC-{{ env_timestamp }}"
42 | description: Security Group for PAS Orchestrator Domain Controller
43 | vpc_id: "{{ my_vpc.stdout }}"
44 | rules:
45 | - proto: all
46 | cidr_ip: 0.0.0.0/0
47 | tags:
48 | Timestamp: "{{ env_timestamp }}"
49 | register: dc_sg
50 |
51 | - name: Create security group for Components
52 | ec2_group:
53 | name: "PAS-Orchestrator-Components-{{ env_timestamp }}"
54 | description: Security Group for PAS Orchestrator Components
55 | vpc_id: "{{ my_vpc.stdout }}"
56 | rules:
57 | - proto: tcp
58 | from_port: 1
59 | to_port: 65535
60 | cidr_ip: 0.0.0.0/0
61 | tags:
62 | Timestamp: "{{ env_timestamp }}"
63 | register: comp_sg
64 |
65 | - name: Get latest windows 2012 ami
66 | ec2_ami_facts:
67 | owners: 801119661308
68 | filters:
69 | name: "Windows_Server-2012-R2_RTM-English-64Bit-Base-*"
70 | register: ami_find_2012
71 |
72 | - name: Get latest windows 2016 ami
73 | ec2_ami_facts:
74 | owners: 801119661308
75 | filters:
76 | name: "Windows_Server-2016-English-Full-Base-*"
77 | register: ami_find_2016
78 |
79 | - name: Create Domain Controller
80 | include_tasks: create_ec2_batch.yml
81 | vars:
82 | - component_name: dc
83 | - count: 1
84 | - ami_id: "{{ ami_find_2012.images[0].image_id }}"
85 | - security_group: "{{ dc_sg.group_id }}"
86 | when: indomain == "yes"
87 |
88 | - name: Add DC Machine to Host Group
89 | add_host:
90 | name: "{{ ec2_machines.instances[0].private_ip }}"
91 | group: dc
92 | when: indomain == "yes"
93 |
94 | - name: Save domain controller ip address
95 | set_fact:
96 | dc_ip: "{{ ec2_machines.instances[0].private_ip }}"
97 | when: indomain == "yes"
98 |
99 | - name: Create PVWA Machines
100 | include_tasks: create_ec2_batch.yml
101 | vars:
102 | - component_name: pvwa
103 | - count: "{{ pas_count }}"
104 | - ami_id: "{{ ami_find_2016.images[0].image_id }}"
105 | - security_group: "{{ comp_sg.group_id }}"
106 |
107 | - name: Add PVWA Machines to Host Group
108 | add_host:
109 | name: "{{ item.private_ip }}"
110 | group: pvwa
111 | with_items: "{{ ec2_machines.instances }}"
112 |
113 | - name: Create CPM Machines
114 | include_tasks: create_ec2_batch.yml
115 | vars:
116 | - component_name: cpm
117 | - count: "{{ pas_count }}"
118 | - ami_id: "{{ ami_find_2016.images[0].image_id }}"
119 | - security_group: "{{ comp_sg.group_id }}"
120 |
121 | - name: Add CPM to Host Group
122 | add_host:
123 | name: "{{ item.private_ip }}"
124 | group: cpm
125 | with_items: "{{ ec2_machines.instances }}"
126 |
127 | - name: Create PSM Machines
128 | include_tasks: create_ec2_batch.yml
129 | vars:
130 | - component_name: psm
131 | - count: "{{ pas_count }}"
132 | - ami_id: "{{ ami_find_2016.images[0].image_id }}"
133 | - security_group: "{{ comp_sg.group_id }}"
134 |
135 | - name: Add PSM to Host Group
136 | add_host:
137 | name: "{{ item.private_ip }}"
138 | group: psm
139 | with_items: "{{ ec2_machines.instances }}"
140 |
141 | - hosts: dc
142 | gather_facts: no
143 | vars:
144 | ansible_connection: winrm
145 | ansible_winrm_server_cert_validation: ignore
146 | ansible_winrm_transport: basic
147 | ansible_port: 5986
148 | domain: cyberark.com
149 |
150 | tasks:
151 |
152 | - name: Wait for DC to Respond
153 | wait_for_connection:
154 | timeout: 600
155 | when: indomain == "yes"
156 |
157 | - name: Install Active Directory Feature
158 | win_feature:
159 | name: AD-Domain-Services
160 | include_management_tools: yes
161 | include_sub_features: yes
162 | state: present
163 | when: indomain == "yes"
164 |
165 | - name: Install Domain on Active Directory
166 | win_domain:
167 | dns_domain_name: "{{ domain }}"
168 | safe_mode_password: "{{ ansible_password }}"
169 | register: ad
170 | when: indomain == "yes"
171 |
172 | - name: Reboot Domain Controller
173 | win_reboot:
174 | msg: "Installing AD. Rebooting..."
175 | pre_reboot_delay: 15
176 | reboot_timeout: 300
177 | when:
178 | - ad.changed
179 | - indomain == "yes"
180 |
181 | - hosts: pvwa:cpm:psm
182 | gather_facts: no
183 | vars:
184 | ansible_connection: winrm
185 | ansible_winrm_server_cert_validation: ignore
186 | ansible_winrm_transport: basic
187 | ansible_port: 5986
188 | domain: cyberark.com
189 |
190 | tasks:
191 |
192 | - name: Wait for PAS Machines to Respond
193 | wait_for_connection:
194 | timeout: 600
195 |
196 | - name: Get roles directory dirname
197 | set_fact:
198 | dotnet_installer_path: "C:\\ndp48-x86-x64-allos-enu.exe"
199 |
200 | - name: Download .NET Framework 4.8
201 | win_get_url:
202 | url: https://download.visualstudio.microsoft.com/download/pr/014120d7-d689-4305-befd-3cb711108212/0fd66638cde16859462a6243a4629a50/ndp48-x86-x64-allos-enu.exe
203 | dest: "{{ dotnet_installer_path }}"
204 |
205 | - name: Install Microsoft .NET Framework 4.8
206 | win_package:
207 | path: "{{ dotnet_installer_path }}"
208 | product_id: '{50e73eb2-10f7-4457-954a-6b06fccc7d04}'
209 | arguments: /q /norestart
210 | register: dotnet_install
211 |
212 | - name: Delete .NET Framework Installer
213 | win_file:
214 | path: "{{ dotnet_installer_path }}"
215 | state: absent
216 |
217 | - name: Copy diskpart script
218 | win_copy:
219 | src: files/diskpart.txt
220 | dest: C:\
221 |
222 | - name: Bring D drive online
223 | win_shell: |
224 | diskpart /s diskpart.txt
225 | args:
226 | chdir: C:\
227 | ignore_errors: true
228 |
229 | - name: Set DNS on IPV4 Adapter to DC
230 | win_dns_client:
231 | adapter_names: '*'
232 | ipv4_addresses:
233 | - "{{ hostvars['localhost']['dc_ip'] }}"
234 | - "8.8.8.8"
235 | log_path: C:\dns_log.txt
236 | when: indomain == "yes"
237 |
238 | - name: Flush DNS Cache
239 | win_shell: |
240 | Clear-DnsClientCache
241 | Register-DnsClient
242 | when: indomain == "yes"
243 |
244 | - name: Join PAS Machines to the Domain
245 | win_domain_membership:
246 | dns_domain_name: "{{ domain }}"
247 | domain_admin_user: "{{ domain }}\\{{ ansible_user }}"
248 | domain_admin_password: "{{ ansible_password }}"
249 | state: domain
250 | register: domain_state
251 | when: indomain == "yes"
252 |
253 | - name: Reboot PAS Machines to Apply Changes
254 | win_reboot:
255 | reboot_timeout: 300
256 |
257 | - hosts: localhost
258 | gather_facts: no
259 | connection: local
260 | tasks:
261 |
262 | - name: Copy EC2 py files to Outputs Fodler
263 | copy:
264 | src: "inventory/{{ item }}"
265 | dest: "outputs/{{ item }}"
266 | remote_src: yes
267 | with_items:
268 | - ec2.py
269 | - ec2.ini
270 |
271 | - name: Make ec2.py executable
272 | file:
273 | path: outputs/ec2.py
274 | mode: "+x"
275 |
276 | - name: Change the domain placeholder on ec2.ini
277 | shell: |
278 | sed -i -- 's/tc_number_placeholder/{{ tc_number }}/g' outputs/ec2.ini
279 |
280 | - name: Get Dynamic Inventory
281 | shell: |
282 | ansible-inventory -i outputs/ec2.py --list --export -y | yq '.all.children | with_entries( select(.key|contains("tag_Type") ) )' | jq -s '{windows:{children:.[]}}' >> outputs/hosts.json
283 | register: inventory
284 |
285 | - name: Convert Inventory JSON to YAML
286 | command: "json2yaml outputs/hosts.json outputs/hosts_tc_{{ tc_number }}.yml"
287 |
288 | - name: Remove Empty Objects
289 | command: "sed -i -- 's/ {}//g' outputs/hosts_tc_{{ tc_number }}.yml"
290 |
291 | - name: Remove tag_Type_ string
292 | command: "sed -i -- 's/tag_Type_//g' outputs/hosts_tc_{{ tc_number }}.yml"
293 |
294 | - name: Remove Leftovers from Outputs Folder
295 | file:
296 | path: "{{ item }}"
297 | state: absent
298 | with_items:
299 | - outputs/hosts.json
300 | - outputs/ec2.py
301 | - outputs/ec2.ini
302 |
--------------------------------------------------------------------------------
/tests/playbooks/pas-infrastructure/files/diskpart.txt:
--------------------------------------------------------------------------------
1 | select disk 1
2 | attributes disk clear readonly
3 | online disk
4 | convert mbr
5 | create partition primary
6 | format quick fs=ntfs label="Alternate"
7 | assign letter="D"
--------------------------------------------------------------------------------
/tests/playbooks/pas-infrastructure/inventory/ec2.ini:
--------------------------------------------------------------------------------
1 | # Ansible EC2 external inventory script settings
2 | #
3 |
4 | [ec2]
5 |
6 | # to talk to a private eucalyptus instance uncomment these lines
7 | # and edit edit eucalyptus_host to be the host name of your cloud controller
8 | #eucalyptus = True
9 | #eucalyptus_host = clc.cloud.domain.org
10 |
11 | # AWS regions to make calls to. Set this to 'all' to make request to all regions
12 | # in AWS and merge the results together. Alternatively, set this to a comma
13 | # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not
14 | # provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or
15 | # AWS_DEFAULT_REGION environment variable will be read to determine the region.
16 | regions = auto
17 | # regions_exclude = us-gov-west-1, cn-north-1
18 |
19 | # When generating inventory, Ansible needs to know how to address a server.
20 | # Each EC2 instance has a lot of variables associated with it. Here is the list:
21 | # http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
22 | # Below are 2 variables that are used as the address of a server:
23 | # - destination_variable
24 | # - vpc_destination_variable
25 |
26 | # This is the normal destination variable to use. If you are running Ansible
27 | # from outside EC2, then 'public_dns_name' makes the most sense. If you are
28 | # running Ansible from within EC2, then perhaps you want to use the internal
29 | # address, and should set this to 'private_dns_name'. The key of an EC2 tag
30 | # may optionally be used; however the boto instance variables hold precedence
31 | # in the event of a collision.
32 | #it can
33 | destination_variable = public_dns_name
34 |
35 | # This allows you to override the inventory_name with an ec2 variable, instead
36 | # of using the destination_variable above. Addressing (aka ansible_ssh_host)
37 | # will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
38 | #hostname_variable = tag_Name
39 |
40 | # For server inside a VPC, using DNS names may not make sense. When an instance
41 | # has 'subnet_id' set, this variable is used. If the subnet is public, setting
42 | # this to 'ip_address' will return the public IP address. For instances in a
43 | # private subnet, this should be set to 'private_ip_address', and Ansible must
44 | # be run from within EC2. The key of an EC2 tag may optionally be used; however
45 | # the boto instance variables hold precedence in the event of a collision.
46 | # WARNING: - instances that are in the private vpc, _without_ public ip address
47 | # will not be listed in the inventory until You set:
48 | # vpc_destination_variable = private_ip_address
49 | vpc_destination_variable = private_ip_address
50 |
51 | # The following two settings allow flexible ansible host naming based on a
52 | # python format string and a comma-separated list of ec2 tags. Note that:
53 | #
54 | # 1) If the tags referenced are not present for some instances, empty strings
55 | # will be substituted in the format string.
56 | # 2) This overrides both destination_variable and vpc_destination_variable.
57 | #
58 | #destination_format = {0}.{1}.example.com
59 | #destination_format_tags = Name,environment
60 |
61 | # To tag instances on EC2 with the resource records that point to them from
62 | # Route53, set 'route53' to True.
63 | route53 = False
64 |
65 | # To use Route53 records as the inventory hostnames, uncomment and set
66 | # to equal the domain name you wish to use. You must also have 'route53' (above)
67 | # set to True.
68 | # route53_hostnames = .example.com
69 |
70 | # To exclude RDS instances from the inventory, uncomment and set to False.
71 | #rds = False
72 |
73 | # To exclude ElastiCache instances from the inventory, uncomment and set to False.
74 | elasticache = False
75 |
76 | # Additionally, you can specify the list of zones to exclude looking up in
77 | # 'route53_excluded_zones' as a comma-separated list.
78 | # route53_excluded_zones = samplezone1.com, samplezone2.com
79 |
80 | # By default, only EC2 instances in the 'running' state are returned. Set
81 | # 'all_instances' to True to return all instances regardless of state.
82 | all_instances = False
83 |
84 | # By default, only EC2 instances in the 'running' state are returned. Specify
85 | # EC2 instance states to return as a comma-separated list. This
86 | # option is overridden when 'all_instances' is True.
87 | # instance_states = pending, running, shutting-down, terminated, stopping, stopped
88 |
89 | # By default, only RDS instances in the 'available' state are returned. Set
90 | # 'all_rds_instances' to True return all RDS instances regardless of state.
91 | all_rds_instances = False
92 |
93 | # Include RDS cluster information (Aurora etc.)
94 | include_rds_clusters = False
95 |
96 | # By default, only ElastiCache clusters and nodes in the 'available' state
97 | # are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
98 | # to True return all ElastiCache clusters and nodes, regardless of state.
99 | #
100 | # Note that all_elasticache_nodes only applies to listed clusters. That means
101 | # if you set all_elastic_clusters to false, no node will be return from
102 | # unavailable clusters, regardless of the state and to what you set for
103 | # all_elasticache_nodes.
104 | all_elasticache_replication_groups = False
105 | all_elasticache_clusters = False
106 | all_elasticache_nodes = False
107 |
108 | # API calls to EC2 are slow. For this reason, we cache the results of an API
109 | # call. Set this to the path you want cache files to be written to. Two files
110 | # will be written to this directory:
111 | # - ansible-ec2.cache
112 | # - ansible-ec2.index
113 | cache_path = ~/.ansible/tmp
114 |
115 | # The number of seconds a cache file is considered valid. After this many
116 | # seconds, a new API call will be made, and the cache file will be updated.
117 | # To disable the cache, set this value to 0
118 | cache_max_age = 0
119 |
120 | # Organize groups into a nested/hierarchy instead of a flat namespace.
121 | nested_groups = False
122 |
123 | # Replace - tags when creating groups to avoid issues with ansible
124 | replace_dash_in_groups = True
125 |
126 | # If set to true, any tag of the form "a,b,c" is expanded into a list
127 | # and the results are used to create additional tag_* inventory groups.
128 | expand_csv_tags = False
129 |
130 | # The EC2 inventory output can become very large. To manage its size,
131 | # configure which groups should be created.
132 | group_by_instance_id = False
133 | group_by_region = False
134 | group_by_availability_zone = False
135 | group_by_aws_account = False
136 | group_by_ami_id = False
137 | group_by_instance_type = False
138 | group_by_instance_state = False
139 | group_by_platform = False
140 | group_by_key_pair = False
141 | group_by_vpc_id = False
142 | group_by_security_group = False
143 | group_by_tag_keys = True
144 | group_by_tag_none = False
145 | group_by_route53_names = False
146 | group_by_rds_engine = False
147 | group_by_rds_parameter_group = False
148 | group_by_elasticache_engine = False
149 | group_by_elasticache_cluster = False
150 | group_by_elasticache_parameter_group = False
151 | group_by_elasticache_replication_group = False
152 |
153 | # If you only want to include hosts that match a certain regular expression
154 | # pattern_include = staging-*
155 |
156 | # If you want to exclude any hosts that match a certain regular expression
157 | # pattern_exclude = staging-*
158 |
159 | # Instance filters can be used to control which instances are retrieved for
160 | # inventory. For the full list of possible filters, please read the EC2 API
161 | # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
162 | # Filters are key/value pairs separated by '=', to list multiple filters use
163 | # a list separated by commas. To "AND" criteria together, use "&". Note that
164 | # the "AND" is not useful along with stack_filters and so such usage is not allowed.
165 | # See examples below.
166 |
167 | # If you want to apply multiple filters simultaneously, set stack_filters to
168 | # True. Default behaviour is to combine the results of all filters. Stacking
169 | # allows the use of multiple conditions to filter down, for example by
170 | # environment and type of host.
171 | stack_filters = False
172 |
173 | # Retrieve only instances with (key=value) env=staging tag
174 | # instance_filters = tag:env=staging
175 |
176 | # Retrieve only instances with role=webservers OR role=dbservers tag
177 | instance_filters = tag:Type=pvwa&tag:TC_Number=tc_number_placeholder,tag:Type=cpm&tag:TC_Number=tc_number_placeholder,tag:Type=psm&tag:TC_Number=tc_number_placeholder
178 |
179 | # Retrieve only t1.micro instances OR instances with tag env=staging
180 | # instance_filters = instance-type=t1.micro,tag:env=staging
181 |
182 | # You can use wildcards in filter values also. Below will list instances which
183 | # tag Name value matches webservers1*
184 | # (ex. webservers15, webservers1a, webservers123 etc)
185 | # instance_filters = tag:Name=webservers1*
186 |
187 | # Retrieve only instances of type t1.micro that also have tag env=stage
188 | # instance_filters = instance-type=t1.micro&tag:env=stage
189 |
190 | # Retrieve instances of type t1.micro AND tag env=stage, as well as any instance
191 | # that are of type m3.large, regardless of env tag
192 | # instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large
193 |
194 | # An IAM role can be assumed, so all requests are run as that role.
195 | # This can be useful for connecting across different accounts, or to limit user
196 | # access
197 | # iam_role = role-arn
198 |
199 | # A boto configuration profile may be used to separate out credentials
200 | # see http://boto.readthedocs.org/en/latest/boto_config_tut.html
201 | # boto_profile = some-boto-profile-name
202 |
203 |
204 | [credentials]
205 |
206 | # The AWS credentials can optionally be specified here. Credentials specified
207 | # here are ignored if the environment variable AWS_ACCESS_KEY_ID or
208 | # AWS_PROFILE is set, or if the boto_profile property above is set.
209 | #
210 | # Supplying AWS credentials here is not recommended, as it introduces
211 | # non-trivial security concerns. When going down this route, please make sure
212 | # to set access permissions for this file correctly, e.g. handle it the same
213 | # way as you would a private SSH key.
214 | #
215 | # Unlike the boto and AWS configure files, this section does not support
216 | # profiles.
217 | #
218 | # aws_access_key_id = AXXXXXXXXXXXXXX
219 | # aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
220 | # aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
221 |
--------------------------------------------------------------------------------
/tests/playbooks/pas-infrastructure/inventory/ec2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | EC2 external inventory script
5 | =================================
6 |
7 | Generates inventory that Ansible can understand by making API request to
8 | AWS EC2 using the Boto library.
9 |
10 | NOTE: This script assumes Ansible is being executed where the environment
11 | variables needed for Boto have already been set:
12 | export AWS_ACCESS_KEY_ID='AK123'
13 | export AWS_SECRET_ACCESS_KEY='abc123'
14 |
15 | Optional region environment variable if region is 'auto'
16 |
17 | This script also assumes that there is an ec2.ini file alongside it. To specify a
18 | different path to ec2.ini, define the EC2_INI_PATH environment variable:
19 |
20 | export EC2_INI_PATH=/path/to/my_ec2.ini
21 |
22 | If you're using eucalyptus you need to set the above variables and
23 | you need to define:
24 |
25 | export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
26 |
27 | If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
28 | using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
29 | the AWS_PROFILE variable:
30 |
31 | AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
32 |
33 | For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
34 |
35 | You can filter for specific EC2 instances by creating an environment variable
36 | named EC2_INSTANCE_FILTERS, which has the same format as the instance_filters
37 | entry documented in ec2.ini. For example, to find all hosts whose name begins
38 | with 'webserver', one might use:
39 |
40 | export EC2_INSTANCE_FILTERS='tag:Name=webserver*'
41 |
42 | When run against a specific host, this script returns the following variables:
43 | - ec2_ami_launch_index
44 | - ec2_architecture
45 | - ec2_association
46 | - ec2_attachTime
47 | - ec2_attachment
48 | - ec2_attachmentId
49 | - ec2_block_devices
50 | - ec2_client_token
51 | - ec2_deleteOnTermination
52 | - ec2_description
53 | - ec2_deviceIndex
54 | - ec2_dns_name
55 | - ec2_eventsSet
56 | - ec2_group_name
57 | - ec2_hypervisor
58 | - ec2_id
59 | - ec2_image_id
60 | - ec2_instanceState
61 | - ec2_instance_type
62 | - ec2_ipOwnerId
63 | - ec2_ip_address
64 | - ec2_item
65 | - ec2_kernel
66 | - ec2_key_name
67 | - ec2_launch_time
68 | - ec2_monitored
69 | - ec2_monitoring
70 | - ec2_networkInterfaceId
71 | - ec2_ownerId
72 | - ec2_persistent
73 | - ec2_placement
74 | - ec2_platform
75 | - ec2_previous_state
76 | - ec2_private_dns_name
77 | - ec2_private_ip_address
78 | - ec2_publicIp
79 | - ec2_public_dns_name
80 | - ec2_ramdisk
81 | - ec2_reason
82 | - ec2_region
83 | - ec2_requester_id
84 | - ec2_root_device_name
85 | - ec2_root_device_type
86 | - ec2_security_group_ids
87 | - ec2_security_group_names
88 | - ec2_shutdown_state
89 | - ec2_sourceDestCheck
90 | - ec2_spot_instance_request_id
91 | - ec2_state
92 | - ec2_state_code
93 | - ec2_state_reason
94 | - ec2_status
95 | - ec2_subnet_id
96 | - ec2_tenancy
97 | - ec2_virtualization_type
98 | - ec2_vpc_id
99 |
100 | These variables are pulled out of a boto.ec2.instance object. There is a lack of
101 | consistency with variable spellings (camelCase and underscores) since this
102 | just loops through all variables the object exposes. It is preferred to use the
103 | ones with underscores when multiple exist.
104 |
105 | In addition, if an instance has AWS tags associated with it, each tag is a new
106 | variable named:
107 | - ec2_tag_[Key] = [Value]
108 |
109 | Security groups are comma-separated in 'ec2_security_group_ids' and
110 | 'ec2_security_group_names'.
111 |
112 | When destination_format and destination_format_tags are specified
113 | the destination_format can be built from the instance tags and attributes.
114 | The behavior will first check the user defined tags, then proceed to
115 | check instance attributes, and finally if neither are found 'nil' will
116 | be used instead.
117 |
118 | 'my_instance': {
119 | 'region': 'us-east-1', # attribute
120 | 'availability_zone': 'us-east-1a', # attribute
121 | 'private_dns_name': '172.31.0.1', # attribute
122 | 'ec2_tag_deployment': 'blue', # tag
123 | 'ec2_tag_clusterid': 'ansible', # tag
124 | 'ec2_tag_Name': 'webserver', # tag
125 | ...
126 | }
127 |
128 | Inside of the ec2.ini file the following settings are specified:
129 | ...
130 | destination_format: {0}-{1}-{2}-{3}
131 | destination_format_tags: Name,clusterid,deployment,private_dns_name
132 | ...
133 |
134 | These settings would produce a destination_format as the following:
135 | 'webserver-ansible-blue-172.31.0.1'
136 | '''
137 |
138 | # (c) 2012, Peter Sankauskas
139 | #
140 | # This file is part of Ansible,
141 | #
142 | # Ansible is free software: you can redistribute it and/or modify
143 | # it under the terms of the GNU General Public License as published by
144 | # the Free Software Foundation, either version 3 of the License, or
145 | # (at your option) any later version.
146 | #
147 | # Ansible is distributed in the hope that it will be useful,
148 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
149 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
150 | # GNU General Public License for more details.
151 | #
152 | # You should have received a copy of the GNU General Public License
153 | # along with Ansible. If not, see .
154 |
155 | ######################################################################
156 |
157 | import sys
158 | import os
159 | import argparse
160 | import re
161 | from time import time
162 | from copy import deepcopy
163 | import boto
164 | from boto import ec2
165 | from boto import rds
166 | from boto import elasticache
167 | from boto import route53
168 | from boto import sts
169 | import six
170 |
171 | from ansible.module_utils import ec2 as ec2_utils
172 |
173 | HAS_BOTO3 = False
174 | try:
175 | import boto3 # noqa
176 | HAS_BOTO3 = True
177 | except ImportError:
178 | pass
179 |
180 | from six.moves import configparser
181 | from collections import defaultdict
182 |
183 | import json
184 |
185 | DEFAULTS = {
186 | 'all_elasticache_clusters': 'False',
187 | 'all_elasticache_nodes': 'False',
188 | 'all_elasticache_replication_groups': 'False',
189 | 'all_instances': 'False',
190 | 'all_rds_instances': 'False',
191 | 'aws_access_key_id': '',
192 | 'aws_secret_access_key': '',
193 | 'aws_security_token': '',
194 | 'boto_profile': '',
195 | 'cache_max_age': '300',
196 | 'cache_path': '~/.ansible/tmp',
197 | 'destination_variable': 'public_dns_name',
198 | 'elasticache': 'True',
199 | 'eucalyptus': 'False',
200 | 'eucalyptus_host': '',
201 | 'expand_csv_tags': 'False',
202 | 'group_by_ami_id': 'True',
203 | 'group_by_availability_zone': 'True',
204 | 'group_by_aws_account': 'False',
205 | 'group_by_elasticache_cluster': 'True',
206 | 'group_by_elasticache_engine': 'True',
207 | 'group_by_elasticache_parameter_group': 'True',
208 | 'group_by_elasticache_replication_group': 'True',
209 | 'group_by_instance_id': 'True',
210 | 'group_by_instance_state': 'False',
211 | 'group_by_instance_type': 'True',
212 | 'group_by_key_pair': 'True',
213 | 'group_by_platform': 'True',
214 | 'group_by_rds_engine': 'True',
215 | 'group_by_rds_parameter_group': 'True',
216 | 'group_by_region': 'True',
217 | 'group_by_route53_names': 'True',
218 | 'group_by_security_group': 'True',
219 | 'group_by_tag_keys': 'True',
220 | 'group_by_tag_none': 'True',
221 | 'group_by_vpc_id': 'True',
222 | 'hostname_variable': '',
223 | 'iam_role': '',
224 | 'include_rds_clusters': 'False',
225 | 'nested_groups': 'False',
226 | 'pattern_exclude': '',
227 | 'pattern_include': '',
228 | 'rds': 'False',
229 | 'regions': 'all',
230 | 'regions_exclude': 'us-gov-west-1, cn-north-1',
231 | 'replace_dash_in_groups': 'True',
232 | 'route53': 'False',
233 | 'route53_excluded_zones': '',
234 | 'route53_hostnames': '',
235 | 'stack_filters': 'False',
236 | 'vpc_destination_variable': 'ip_address'
237 | }
238 |
239 |
240 | class Ec2Inventory(object):
241 |
242 | def _empty_inventory(self):
243 | return {"_meta": {"hostvars": {}}}
244 |
245 | def __init__(self):
246 | ''' Main execution path '''
247 |
248 | # Inventory grouped by instance IDs, tags, security groups, regions,
249 | # and availability zones
250 | self.inventory = self._empty_inventory()
251 |
252 | self.aws_account_id = None
253 |
254 | # Index of hostname (address) to instance ID
255 | self.index = {}
256 |
257 | # Boto profile to use (if any)
258 | self.boto_profile = None
259 |
260 | # AWS credentials.
261 | self.credentials = {}
262 |
263 | # Read settings and parse CLI arguments
264 | self.parse_cli_args()
265 | self.read_settings()
266 |
267 | # Make sure that profile_name is not passed at all if not set
268 | # as pre 2.24 boto will fall over otherwise
269 | if self.boto_profile:
270 | if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
271 | self.fail_with_error("boto version must be >= 2.24 to use profile")
272 |
273 | # Cache
274 | if self.args.refresh_cache:
275 | self.do_api_calls_update_cache()
276 | elif not self.is_cache_valid():
277 | self.do_api_calls_update_cache()
278 |
279 | # Data to print
280 | if self.args.host:
281 | data_to_print = self.get_host_info()
282 |
283 | elif self.args.list:
284 | # Display list of instances for inventory
285 | if self.inventory == self._empty_inventory():
286 | data_to_print = self.get_inventory_from_cache()
287 | else:
288 | data_to_print = self.json_format_dict(self.inventory, True)
289 |
290 | print(data_to_print)
291 |
292 | def is_cache_valid(self):
293 | ''' Determines if the cache files have expired, or if it is still valid '''
294 |
295 | if os.path.isfile(self.cache_path_cache):
296 | mod_time = os.path.getmtime(self.cache_path_cache)
297 | current_time = time()
298 | if (mod_time + self.cache_max_age) > current_time:
299 | if os.path.isfile(self.cache_path_index):
300 | return True
301 |
302 | return False
303 |
304 | def read_settings(self):
305 | ''' Reads the settings from the ec2.ini file '''
306 |
307 | scriptbasename = __file__
308 | scriptbasename = os.path.basename(scriptbasename)
309 | scriptbasename = scriptbasename.replace('.py', '')
310 |
311 | defaults = {
312 | 'ec2': {
313 | 'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'),
314 | 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
315 | }
316 | }
317 |
318 | if six.PY3:
319 | config = configparser.ConfigParser(DEFAULTS)
320 | else:
321 | config = configparser.SafeConfigParser(DEFAULTS)
322 | ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path'])
323 | ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path))
324 |
325 | if not os.path.isfile(ec2_ini_path):
326 | ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback'])
327 |
328 | if os.path.isfile(ec2_ini_path):
329 | config.read(ec2_ini_path)
330 |
331 | # Add empty sections if they don't exist
332 | try:
333 | config.add_section('ec2')
334 | except configparser.DuplicateSectionError:
335 | pass
336 |
337 | try:
338 | config.add_section('credentials')
339 | except configparser.DuplicateSectionError:
340 | pass
341 |
342 | # is eucalyptus?
343 | self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
344 | self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
345 |
346 | # Regions
347 | self.regions = []
348 | config_regions = config.get('ec2', 'regions')
349 | if (config_regions == 'all'):
350 | if self.eucalyptus_host:
351 | self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
352 | else:
353 | config_regions_exclude = config.get('ec2', 'regions_exclude')
354 |
355 | for region_info in ec2.regions():
356 | if region_info.name not in config_regions_exclude:
357 | self.regions.append(region_info.name)
358 | else:
359 | self.regions = config_regions.split(",")
360 | if 'auto' in self.regions:
361 | env_region = os.environ.get('AWS_REGION')
362 | if env_region is None:
363 | env_region = os.environ.get('AWS_DEFAULT_REGION')
364 | self.regions = [env_region]
365 |
366 | # Destination addresses
367 | self.destination_variable = config.get('ec2', 'destination_variable')
368 | self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
369 | self.hostname_variable = config.get('ec2', 'hostname_variable')
370 |
371 | if config.has_option('ec2', 'destination_format') and \
372 | config.has_option('ec2', 'destination_format_tags'):
373 | self.destination_format = config.get('ec2', 'destination_format')
374 | self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
375 | else:
376 | self.destination_format = None
377 | self.destination_format_tags = None
378 |
379 | # Route53
380 | self.route53_enabled = config.getboolean('ec2', 'route53')
381 | self.route53_hostnames = config.get('ec2', 'route53_hostnames')
382 |
383 | self.route53_excluded_zones = []
384 | self.route53_excluded_zones = [a for a in config.get('ec2', 'route53_excluded_zones').split(',') if a]
385 |
386 | # Include RDS instances?
387 | self.rds_enabled = config.getboolean('ec2', 'rds')
388 |
389 | # Include RDS cluster instances?
390 | self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
391 |
392 | # Include ElastiCache instances?
393 | self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
394 |
395 | # Return all EC2 instances?
396 | self.all_instances = config.getboolean('ec2', 'all_instances')
397 |
398 | # Instance states to be gathered in inventory. Default is 'running'.
399 | # Setting 'all_instances' to 'yes' overrides this option.
400 | ec2_valid_instance_states = [
401 | 'pending',
402 | 'running',
403 | 'shutting-down',
404 | 'terminated',
405 | 'stopping',
406 | 'stopped'
407 | ]
408 | self.ec2_instance_states = []
409 | if self.all_instances:
410 | self.ec2_instance_states = ec2_valid_instance_states
411 | elif config.has_option('ec2', 'instance_states'):
412 | for instance_state in config.get('ec2', 'instance_states').split(','):
413 | instance_state = instance_state.strip()
414 | if instance_state not in ec2_valid_instance_states:
415 | continue
416 | self.ec2_instance_states.append(instance_state)
417 | else:
418 | self.ec2_instance_states = ['running']
419 |
420 | # Return all RDS instances? (if RDS is enabled)
421 | self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
422 |
423 | # Return all ElastiCache replication groups? (if ElastiCache is enabled)
424 | self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
425 |
426 | # Return all ElastiCache clusters? (if ElastiCache is enabled)
427 | self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
428 |
429 | # Return all ElastiCache nodes? (if ElastiCache is enabled)
430 | self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
431 |
432 | # boto configuration profile (prefer CLI argument then environment variables then config file)
433 | self.boto_profile = self.args.boto_profile or \
434 | os.environ.get('AWS_PROFILE') or \
435 | config.get('ec2', 'boto_profile')
436 |
437 | # AWS credentials (prefer environment variables)
438 | if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
439 | os.environ.get('AWS_PROFILE')):
440 |
441 | aws_access_key_id = config.get('credentials', 'aws_access_key_id')
442 | aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
443 | aws_security_token = config.get('credentials', 'aws_security_token')
444 |
445 | if aws_access_key_id:
446 | self.credentials = {
447 | 'aws_access_key_id': aws_access_key_id,
448 | 'aws_secret_access_key': aws_secret_access_key
449 | }
450 | if aws_security_token:
451 | self.credentials['security_token'] = aws_security_token
452 |
453 | # Cache related
454 | cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
455 | if self.boto_profile:
456 | cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
457 | if not os.path.exists(cache_dir):
458 | os.makedirs(cache_dir)
459 |
460 | cache_name = 'ansible-ec2'
461 | cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id'))
462 | if cache_id:
463 | cache_name = '%s-%s' % (cache_name, cache_id)
464 | cache_name += '-' + str(abs(hash(__file__)))[1:7]
465 | self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name)
466 | self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name)
467 | self.cache_max_age = config.getint('ec2', 'cache_max_age')
468 |
469 | self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
470 |
471 | # Configure nested groups instead of flat namespace.
472 | self.nested_groups = config.getboolean('ec2', 'nested_groups')
473 |
474 | # Replace dash or not in group names
475 | self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
476 |
477 | # IAM role to assume for connection
478 | self.iam_role = config.get('ec2', 'iam_role')
479 |
480 | # Configure which groups should be created.
481 |
482 | group_by_options = [a for a in DEFAULTS if a.startswith('group_by')]
483 | for option in group_by_options:
484 | setattr(self, option, config.getboolean('ec2', option))
485 |
486 | # Do we need to just include hosts that match a pattern?
487 | self.pattern_include = config.get('ec2', 'pattern_include')
488 | if self.pattern_include:
489 | self.pattern_include = re.compile(self.pattern_include)
490 |
491 | # Do we need to exclude hosts that match a pattern?
492 | self.pattern_exclude = config.get('ec2', 'pattern_exclude')
493 | if self.pattern_exclude:
494 | self.pattern_exclude = re.compile(self.pattern_exclude)
495 |
496 | # Do we want to stack multiple filters?
497 | self.stack_filters = config.getboolean('ec2', 'stack_filters')
498 |
499 | # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
500 | self.ec2_instance_filters = []
501 |
502 | if config.has_option('ec2', 'instance_filters') or 'EC2_INSTANCE_FILTERS' in os.environ:
503 | filters = os.getenv('EC2_INSTANCE_FILTERS', config.get('ec2', 'instance_filters') if config.has_option('ec2', 'instance_filters') else '')
504 |
505 | if self.stack_filters and '&' in filters:
506 | self.fail_with_error("AND filters along with stack_filter enabled is not supported.\n")
507 |
508 | filter_sets = [f for f in filters.split(',') if f]
509 |
510 | for filter_set in filter_sets:
511 | filters = {}
512 | filter_set = filter_set.strip()
513 | for instance_filter in filter_set.split("&"):
514 | instance_filter = instance_filter.strip()
515 | if not instance_filter or '=' not in instance_filter:
516 | continue
517 | filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
518 | if not filter_key:
519 | continue
520 | filters[filter_key] = filter_value
521 | self.ec2_instance_filters.append(filters.copy())
522 |
523 | def parse_cli_args(self):
524 | ''' Command line argument processing '''
525 |
526 | parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
527 | parser.add_argument('--list', action='store_true', default=True,
528 | help='List instances (default: True)')
529 | parser.add_argument('--host', action='store',
530 | help='Get all the variables about a specific instance')
531 | parser.add_argument('--refresh-cache', action='store_true', default=False,
532 | help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
533 | parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
534 | help='Use boto profile for connections to EC2')
535 | self.args = parser.parse_args()
536 |
537 | def do_api_calls_update_cache(self):
538 | ''' Do API calls to each region, and save data in cache files '''
539 |
540 | if self.route53_enabled:
541 | self.get_route53_records()
542 |
543 | for region in self.regions:
544 | self.get_instances_by_region(region)
545 | if self.rds_enabled:
546 | self.get_rds_instances_by_region(region)
547 | if self.elasticache_enabled:
548 | self.get_elasticache_clusters_by_region(region)
549 | self.get_elasticache_replication_groups_by_region(region)
550 | if self.include_rds_clusters:
551 | self.include_rds_clusters_by_region(region)
552 |
553 | self.write_to_cache(self.inventory, self.cache_path_cache)
554 | self.write_to_cache(self.index, self.cache_path_index)
555 |
556 | def connect(self, region):
557 | ''' create connection to api server'''
558 | if self.eucalyptus:
559 | conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
560 | conn.APIVersion = '2010-08-31'
561 | else:
562 | conn = self.connect_to_aws(ec2, region)
563 | return conn
564 |
565 | def boto_fix_security_token_in_profile(self, connect_args):
566 | ''' monkey patch for boto issue boto/boto#2100 '''
567 | profile = 'profile ' + self.boto_profile
568 | if boto.config.has_option(profile, 'aws_security_token'):
569 | connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
570 | return connect_args
571 |
572 | def connect_to_aws(self, module, region):
573 | connect_args = deepcopy(self.credentials)
574 |
575 | # only pass the profile name if it's set (as it is not supported by older boto versions)
576 | if self.boto_profile:
577 | connect_args['profile_name'] = self.boto_profile
578 | self.boto_fix_security_token_in_profile(connect_args)
579 |
580 | if self.iam_role:
581 | sts_conn = sts.connect_to_region(region, **connect_args)
582 | role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory')
583 | connect_args['aws_access_key_id'] = role.credentials.access_key
584 | connect_args['aws_secret_access_key'] = role.credentials.secret_key
585 | connect_args['security_token'] = role.credentials.session_token
586 |
587 | conn = module.connect_to_region(region, **connect_args)
588 | # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
589 | if conn is None:
590 | self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
591 | return conn
592 |
593 | def get_instances_by_region(self, region):
594 | ''' Makes an AWS EC2 API call to the list of instances in a particular
595 | region '''
596 |
597 | try:
598 | conn = self.connect(region)
599 | reservations = []
600 | if self.ec2_instance_filters:
601 | if self.stack_filters:
602 | filters_dict = {}
603 | for filters in self.ec2_instance_filters:
604 | filters_dict.update(filters)
605 | reservations.extend(conn.get_all_instances(filters=filters_dict))
606 | else:
607 | for filters in self.ec2_instance_filters:
608 | reservations.extend(conn.get_all_instances(filters=filters))
609 | else:
610 | reservations = conn.get_all_instances()
611 |
612 | # Pull the tags back in a second step
613 | # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
614 | # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
615 | instance_ids = []
616 | for reservation in reservations:
617 | instance_ids.extend([instance.id for instance in reservation.instances])
618 |
619 | max_filter_value = 199
620 | tags = []
621 | for i in range(0, len(instance_ids), max_filter_value):
622 | tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]}))
623 |
624 | tags_by_instance_id = defaultdict(dict)
625 | for tag in tags:
626 | tags_by_instance_id[tag.res_id][tag.name] = tag.value
627 |
628 | if (not self.aws_account_id) and reservations:
629 | self.aws_account_id = reservations[0].owner_id
630 |
631 | for reservation in reservations:
632 | for instance in reservation.instances:
633 | instance.tags = tags_by_instance_id[instance.id]
634 | self.add_instance(instance, region)
635 |
636 | except boto.exception.BotoServerError as e:
637 | if e.error_code == 'AuthFailure':
638 | error = self.get_auth_error_message()
639 | else:
640 | backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
641 | error = "Error connecting to %s backend.\n%s" % (backend, e.message)
642 | self.fail_with_error(error, 'getting EC2 instances')
643 |
644 | def tags_match_filters(self, tags):
645 | ''' return True if given tags match configured filters '''
646 | if not self.ec2_instance_filters:
647 | return True
648 |
649 | for filters in self.ec2_instance_filters:
650 | for filter_name, filter_value in filters.items():
651 | if filter_name[:4] != 'tag:':
652 | continue
653 | filter_name = filter_name[4:]
654 | if filter_name not in tags:
655 | if self.stack_filters:
656 | return False
657 | continue
658 | if isinstance(filter_value, list):
659 | if self.stack_filters and tags[filter_name] not in filter_value:
660 | return False
661 | if not self.stack_filters and tags[filter_name] in filter_value:
662 | return True
663 | if isinstance(filter_value, six.string_types):
664 | if self.stack_filters and tags[filter_name] != filter_value:
665 | return False
666 | if not self.stack_filters and tags[filter_name] == filter_value:
667 | return True
668 |
669 | return self.stack_filters
670 |
671 | def get_rds_instances_by_region(self, region):
672 | ''' Makes an AWS API call to the list of RDS instances in a particular
673 | region '''
674 |
675 | if not HAS_BOTO3:
676 | self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again",
677 | "getting RDS instances")
678 |
679 | client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
680 | db_instances = client.describe_db_instances()
681 |
682 | try:
683 | conn = self.connect_to_aws(rds, region)
684 | if conn:
685 | marker = None
686 | while True:
687 | instances = conn.get_all_dbinstances(marker=marker)
688 | marker = instances.marker
689 | for index, instance in enumerate(instances):
690 | # Add tags to instances.
691 | instance.arn = db_instances['DBInstances'][index]['DBInstanceArn']
692 | tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList']
693 | instance.tags = {}
694 | for tag in tags:
695 | instance.tags[tag['Key']] = tag['Value']
696 | if self.tags_match_filters(instance.tags):
697 | self.add_rds_instance(instance, region)
698 | if not marker:
699 | break
700 | except boto.exception.BotoServerError as e:
701 | error = e.reason
702 |
703 | if e.error_code == 'AuthFailure':
704 | error = self.get_auth_error_message()
705 | elif e.error_code == "OptInRequired":
706 | error = "RDS hasn't been enabled for this account yet. " \
707 | "You must either log in to the RDS service through the AWS console to enable it, " \
708 | "or set 'rds = False' in ec2.ini"
709 | elif not e.reason == "Forbidden":
710 | error = "Looks like AWS RDS is down:\n%s" % e.message
711 | self.fail_with_error(error, 'getting RDS instances')
712 |
713 | def include_rds_clusters_by_region(self, region):
714 | if not HAS_BOTO3:
715 | self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
716 | "getting RDS clusters")
717 |
718 | client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
719 |
720 | marker, clusters = '', []
721 | while marker is not None:
722 | resp = client.describe_db_clusters(Marker=marker)
723 | clusters.extend(resp["DBClusters"])
724 | marker = resp.get('Marker', None)
725 |
726 | account_id = boto.connect_iam().get_user().arn.split(':')[4]
727 | c_dict = {}
728 | for c in clusters:
729 | # remove these datetime objects as there is no serialisation to json
730 | # currently in place and we don't need the data yet
731 | if 'EarliestRestorableTime' in c:
732 | del c['EarliestRestorableTime']
733 | if 'LatestRestorableTime' in c:
734 | del c['LatestRestorableTime']
735 |
736 | if not self.ec2_instance_filters:
737 | matches_filter = True
738 | else:
739 | matches_filter = False
740 |
741 | try:
742 | # arn:aws:rds::::
743 | tags = client.list_tags_for_resource(
744 | ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
745 | c['Tags'] = tags['TagList']
746 |
747 | if self.ec2_instance_filters:
748 | for filters in self.ec2_instance_filters:
749 | for filter_key, filter_values in filters.items():
750 | # get AWS tag key e.g. tag:env will be 'env'
751 | tag_name = filter_key.split(":", 1)[1]
752 | # Filter values is a list (if you put multiple values for the same tag name)
753 | matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
754 |
755 | if matches_filter:
756 | # it matches a filter, so stop looking for further matches
757 | break
758 |
759 | if matches_filter:
760 | break
761 |
762 | except Exception as e:
763 | if e.message.find('DBInstanceNotFound') >= 0:
764 | # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
765 | # Ignore errors when trying to find tags for these
766 | pass
767 |
768 | # ignore empty clusters caused by AWS bug
769 | if len(c['DBClusterMembers']) == 0:
770 | continue
771 | elif matches_filter:
772 | c_dict[c['DBClusterIdentifier']] = c
773 |
774 | self.inventory['db_clusters'] = c_dict
775 |
776 | def get_elasticache_clusters_by_region(self, region):
777 | ''' Makes an AWS API call to the list of ElastiCache clusters (with
778 | nodes' info) in a particular region.'''
779 |
780 | # ElastiCache boto module doesn't provide a get_all_instances method,
781 | # that's why we need to call describe directly (it would be called by
782 | # the shorthand method anyway...)
783 | clusters = []
784 | try:
785 | conn = self.connect_to_aws(elasticache, region)
786 | if conn:
787 | # show_cache_node_info = True
788 | # because we also want nodes' information
789 | _marker = 1
790 | while _marker:
791 | if _marker == 1:
792 | _marker = None
793 | response = conn.describe_cache_clusters(None, None, _marker, True)
794 | _marker = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['Marker']
795 | try:
796 | # Boto also doesn't provide wrapper classes to CacheClusters or
797 | # CacheNodes. Because of that we can't make use of the get_list
798 | # method in the AWSQueryConnection. Let's do the work manually
799 | clusters = clusters + response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
800 | except KeyError as e:
801 | error = "ElastiCache query to AWS failed (unexpected format)."
802 | self.fail_with_error(error, 'getting ElastiCache clusters')
803 | except boto.exception.BotoServerError as e:
804 | error = e.reason
805 |
806 | if e.error_code == 'AuthFailure':
807 | error = self.get_auth_error_message()
808 | elif e.error_code == "OptInRequired":
809 | error = "ElastiCache hasn't been enabled for this account yet. " \
810 | "You must either log in to the ElastiCache service through the AWS console to enable it, " \
811 | "or set 'elasticache = False' in ec2.ini"
812 | elif not e.reason == "Forbidden":
813 | error = "Looks like AWS ElastiCache is down:\n%s" % e.message
814 | self.fail_with_error(error, 'getting ElastiCache clusters')
815 |
816 | for cluster in clusters:
817 | self.add_elasticache_cluster(cluster, region)
818 |
819 | def get_elasticache_replication_groups_by_region(self, region):
820 | ''' Makes an AWS API call to the list of ElastiCache replication groups
821 | in a particular region.'''
822 |
823 | # ElastiCache boto module doesn't provide a get_all_instances method,
824 | # that's why we need to call describe directly (it would be called by
825 | # the shorthand method anyway...)
826 | try:
827 | conn = self.connect_to_aws(elasticache, region)
828 | if conn:
829 | response = conn.describe_replication_groups()
830 |
831 | except boto.exception.BotoServerError as e:
832 | error = e.reason
833 |
834 | if e.error_code == 'AuthFailure':
835 | error = self.get_auth_error_message()
836 | if not e.reason == "Forbidden":
837 | error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
838 | self.fail_with_error(error, 'getting ElastiCache clusters')
839 |
840 | try:
841 | # Boto also doesn't provide wrapper classes to ReplicationGroups
842 | # Because of that we can't make use of the get_list method in the
843 | # AWSQueryConnection. Let's do the work manually
844 | replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
845 |
846 | except KeyError as e:
847 | error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
848 | self.fail_with_error(error, 'getting ElastiCache clusters')
849 |
850 | for replication_group in replication_groups:
851 | self.add_elasticache_replication_group(replication_group, region)
852 |
853 | def get_auth_error_message(self):
854 | ''' create an informative error message if there is an issue authenticating'''
855 | errors = ["Authentication error retrieving ec2 inventory."]
856 | if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
857 | errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
858 | else:
859 | errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
860 |
861 | boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
862 | boto_config_found = [p for p in boto_paths if os.path.isfile(os.path.expanduser(p))]
863 | if len(boto_config_found) > 0:
864 | errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
865 | else:
866 | errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
867 |
868 | return '\n'.join(errors)
869 |
870 | def fail_with_error(self, err_msg, err_operation=None):
871 | '''log an error to std err for ansible-playbook to consume and exit'''
872 | if err_operation:
873 | err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
874 | err_msg=err_msg, err_operation=err_operation)
875 | sys.stderr.write(err_msg)
876 | sys.exit(1)
877 |
878 | def get_instance(self, region, instance_id):
879 | conn = self.connect(region)
880 |
881 | reservations = conn.get_all_instances([instance_id])
882 | for reservation in reservations:
883 | for instance in reservation.instances:
884 | return instance
885 |
886 | def add_instance(self, instance, region):
887 | ''' Adds an instance to the inventory and index, as long as it is
888 | addressable '''
889 |
890 | # Only return instances with desired instance states
891 | if instance.state not in self.ec2_instance_states:
892 | return
893 |
894 | # Select the best destination address
895 | # When destination_format and destination_format_tags are specified
896 | # the following code will attempt to find the instance tags first,
897 | # then the instance attributes next, and finally if neither are found
898 | # assign nil for the desired destination format attribute.
899 | if self.destination_format and self.destination_format_tags:
900 | dest_vars = []
901 | inst_tags = getattr(instance, 'tags')
902 | for tag in self.destination_format_tags:
903 | if tag in inst_tags:
904 | dest_vars.append(inst_tags[tag])
905 | elif hasattr(instance, tag):
906 | dest_vars.append(getattr(instance, tag))
907 | else:
908 | dest_vars.append('nil')
909 |
910 | dest = self.destination_format.format(*dest_vars)
911 | elif instance.subnet_id:
912 | dest = getattr(instance, self.vpc_destination_variable, None)
913 | if dest is None:
914 | dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
915 | else:
916 | dest = getattr(instance, self.destination_variable, None)
917 | if dest is None:
918 | dest = getattr(instance, 'tags').get(self.destination_variable, None)
919 |
920 | if not dest:
921 | # Skip instances we cannot address (e.g. private VPC subnet)
922 | return
923 |
924 | # Set the inventory name
925 | hostname = None
926 | if self.hostname_variable:
927 | if self.hostname_variable.startswith('tag_'):
928 | hostname = instance.tags.get(self.hostname_variable[4:], None)
929 | else:
930 | hostname = getattr(instance, self.hostname_variable)
931 |
932 | # set the hostname from route53
933 | if self.route53_enabled and self.route53_hostnames:
934 | route53_names = self.get_instance_route53_names(instance)
935 | for name in route53_names:
936 | if name.endswith(self.route53_hostnames):
937 | hostname = name
938 |
939 | # If we can't get a nice hostname, use the destination address
940 | if not hostname:
941 | hostname = dest
942 | # to_safe strips hostname characters like dots, so don't strip route53 hostnames
943 | elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames):
944 | hostname = hostname.lower()
945 | else:
946 | hostname = self.to_safe(hostname).lower()
947 |
948 | # if we only want to include hosts that match a pattern, skip those that don't
949 | if self.pattern_include and not self.pattern_include.match(hostname):
950 | return
951 |
952 | # if we need to exclude hosts that match a pattern, skip those
953 | if self.pattern_exclude and self.pattern_exclude.match(hostname):
954 | return
955 |
956 | # Add to index
957 | self.index[hostname] = [region, instance.id]
958 |
959 | # Inventory: Group by instance ID (always a group of 1)
960 | if self.group_by_instance_id:
961 | self.inventory[instance.id] = [hostname]
962 | if self.nested_groups:
963 | self.push_group(self.inventory, 'instances', instance.id)
964 |
965 | # Inventory: Group by region
966 | if self.group_by_region:
967 | self.push(self.inventory, region, hostname)
968 | if self.nested_groups:
969 | self.push_group(self.inventory, 'regions', region)
970 |
971 | # Inventory: Group by availability zone
972 | if self.group_by_availability_zone:
973 | self.push(self.inventory, instance.placement, hostname)
974 | if self.nested_groups:
975 | if self.group_by_region:
976 | self.push_group(self.inventory, region, instance.placement)
977 | self.push_group(self.inventory, 'zones', instance.placement)
978 |
979 | # Inventory: Group by Amazon Machine Image (AMI) ID
980 | if self.group_by_ami_id:
981 | ami_id = self.to_safe(instance.image_id)
982 | self.push(self.inventory, ami_id, hostname)
983 | if self.nested_groups:
984 | self.push_group(self.inventory, 'images', ami_id)
985 |
986 | # Inventory: Group by instance type
987 | if self.group_by_instance_type:
988 | type_name = self.to_safe('type_' + instance.instance_type)
989 | self.push(self.inventory, type_name, hostname)
990 | if self.nested_groups:
991 | self.push_group(self.inventory, 'types', type_name)
992 |
993 | # Inventory: Group by instance state
994 | if self.group_by_instance_state:
995 | state_name = self.to_safe('instance_state_' + instance.state)
996 | self.push(self.inventory, state_name, hostname)
997 | if self.nested_groups:
998 | self.push_group(self.inventory, 'instance_states', state_name)
999 |
1000 | # Inventory: Group by platform
1001 | if self.group_by_platform:
1002 | if instance.platform:
1003 | platform = self.to_safe('platform_' + instance.platform)
1004 | else:
1005 | platform = self.to_safe('platform_undefined')
1006 | self.push(self.inventory, platform, hostname)
1007 | if self.nested_groups:
1008 | self.push_group(self.inventory, 'platforms', platform)
1009 |
1010 | # Inventory: Group by key pair
1011 | if self.group_by_key_pair and instance.key_name:
1012 | key_name = self.to_safe('key_' + instance.key_name)
1013 | self.push(self.inventory, key_name, hostname)
1014 | if self.nested_groups:
1015 | self.push_group(self.inventory, 'keys', key_name)
1016 |
1017 | # Inventory: Group by VPC
1018 | if self.group_by_vpc_id and instance.vpc_id:
1019 | vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
1020 | self.push(self.inventory, vpc_id_name, hostname)
1021 | if self.nested_groups:
1022 | self.push_group(self.inventory, 'vpcs', vpc_id_name)
1023 |
1024 | # Inventory: Group by security group
1025 | if self.group_by_security_group:
1026 | try:
1027 | for group in instance.groups:
1028 | key = self.to_safe("security_group_" + group.name)
1029 | self.push(self.inventory, key, hostname)
1030 | if self.nested_groups:
1031 | self.push_group(self.inventory, 'security_groups', key)
1032 | except AttributeError:
1033 | self.fail_with_error('\n'.join(['Package boto seems a bit older.',
1034 | 'Please upgrade boto >= 2.3.0.']))
1035 |
1036 | # Inventory: Group by AWS account ID
1037 | if self.group_by_aws_account:
1038 | self.push(self.inventory, self.aws_account_id, hostname)
1039 | if self.nested_groups:
1040 | self.push_group(self.inventory, 'accounts', self.aws_account_id)
1041 |
1042 | # Inventory: Group by tag keys
1043 | if self.group_by_tag_keys:
1044 | for k, v in instance.tags.items():
1045 | if self.expand_csv_tags and v and ',' in v:
1046 | values = map(lambda x: x.strip(), v.split(','))
1047 | else:
1048 | values = [v]
1049 |
1050 | for v in values:
1051 | if v:
1052 | key = self.to_safe("tag_" + k + "=" + v)
1053 | else:
1054 | key = self.to_safe("tag_" + k)
1055 | self.push(self.inventory, key, hostname)
1056 | if self.nested_groups:
1057 | self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
1058 | if v:
1059 | self.push_group(self.inventory, self.to_safe("tag_" + k), key)
1060 |
1061 | # Inventory: Group by Route53 domain names if enabled
1062 | if self.route53_enabled and self.group_by_route53_names:
1063 | route53_names = self.get_instance_route53_names(instance)
1064 | for name in route53_names:
1065 | self.push(self.inventory, name, hostname)
1066 | if self.nested_groups:
1067 | self.push_group(self.inventory, 'route53', name)
1068 |
1069 | # Global Tag: instances without tags
1070 | if self.group_by_tag_none and len(instance.tags) == 0:
1071 | self.push(self.inventory, 'tag_none', hostname)
1072 | if self.nested_groups:
1073 | self.push_group(self.inventory, 'tags', 'tag_none')
1074 |
1075 | # Global Tag: tag all EC2 instances
1076 | self.push(self.inventory, 'ec2', hostname)
1077 |
1078 | self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
1079 | self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
1080 |
1081 | def add_rds_instance(self, instance, region):
1082 | ''' Adds an RDS instance to the inventory and index, as long as it is
1083 | addressable '''
1084 |
1085 | # Only want available instances unless all_rds_instances is True
1086 | if not self.all_rds_instances and instance.status != 'available':
1087 | return
1088 |
1089 | # Select the best destination address
1090 | dest = instance.endpoint[0]
1091 |
1092 | if not dest:
1093 | # Skip instances we cannot address (e.g. private VPC subnet)
1094 | return
1095 |
1096 | # Set the inventory name
1097 | hostname = None
1098 | if self.hostname_variable:
1099 | if self.hostname_variable.startswith('tag_'):
1100 | hostname = instance.tags.get(self.hostname_variable[4:], None)
1101 | else:
1102 | hostname = getattr(instance, self.hostname_variable)
1103 |
1104 | # If we can't get a nice hostname, use the destination address
1105 | if not hostname:
1106 | hostname = dest
1107 |
1108 | hostname = self.to_safe(hostname).lower()
1109 |
1110 | # Add to index
1111 | self.index[hostname] = [region, instance.id]
1112 |
1113 | # Inventory: Group by instance ID (always a group of 1)
1114 | if self.group_by_instance_id:
1115 | self.inventory[instance.id] = [hostname]
1116 | if self.nested_groups:
1117 | self.push_group(self.inventory, 'instances', instance.id)
1118 |
1119 | # Inventory: Group by region
1120 | if self.group_by_region:
1121 | self.push(self.inventory, region, hostname)
1122 | if self.nested_groups:
1123 | self.push_group(self.inventory, 'regions', region)
1124 |
1125 | # Inventory: Group by availability zone
1126 | if self.group_by_availability_zone:
1127 | self.push(self.inventory, instance.availability_zone, hostname)
1128 | if self.nested_groups:
1129 | if self.group_by_region:
1130 | self.push_group(self.inventory, region, instance.availability_zone)
1131 | self.push_group(self.inventory, 'zones', instance.availability_zone)
1132 |
1133 | # Inventory: Group by instance type
1134 | if self.group_by_instance_type:
1135 | type_name = self.to_safe('type_' + instance.instance_class)
1136 | self.push(self.inventory, type_name, hostname)
1137 | if self.nested_groups:
1138 | self.push_group(self.inventory, 'types', type_name)
1139 |
1140 | # Inventory: Group by VPC
1141 | if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
1142 | vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
1143 | self.push(self.inventory, vpc_id_name, hostname)
1144 | if self.nested_groups:
1145 | self.push_group(self.inventory, 'vpcs', vpc_id_name)
1146 |
1147 | # Inventory: Group by security group
1148 | if self.group_by_security_group:
1149 | try:
1150 | if instance.security_group:
1151 | key = self.to_safe("security_group_" + instance.security_group.name)
1152 | self.push(self.inventory, key, hostname)
1153 | if self.nested_groups:
1154 | self.push_group(self.inventory, 'security_groups', key)
1155 |
1156 | except AttributeError:
1157 | self.fail_with_error('\n'.join(['Package boto seems a bit older.',
1158 | 'Please upgrade boto >= 2.3.0.']))
1159 | # Inventory: Group by tag keys
1160 | if self.group_by_tag_keys:
1161 | for k, v in instance.tags.items():
1162 | if self.expand_csv_tags and v and ',' in v:
1163 | values = map(lambda x: x.strip(), v.split(','))
1164 | else:
1165 | values = [v]
1166 |
1167 | for v in values:
1168 | if v:
1169 | key = self.to_safe("tag_" + k + "=" + v)
1170 | else:
1171 | key = self.to_safe("tag_" + k)
1172 | self.push(self.inventory, key, hostname)
1173 | if self.nested_groups:
1174 | self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
1175 | if v:
1176 | self.push_group(self.inventory, self.to_safe("tag_" + k), key)
1177 |
1178 | # Inventory: Group by engine
1179 | if self.group_by_rds_engine:
1180 | self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
1181 | if self.nested_groups:
1182 | self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
1183 |
1184 | # Inventory: Group by parameter group
1185 | if self.group_by_rds_parameter_group:
1186 | self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
1187 | if self.nested_groups:
1188 | self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
1189 |
1190 | # Global Tag: instances without tags
1191 | if self.group_by_tag_none and len(instance.tags) == 0:
1192 | self.push(self.inventory, 'tag_none', hostname)
1193 | if self.nested_groups:
1194 | self.push_group(self.inventory, 'tags', 'tag_none')
1195 |
1196 | # Global Tag: all RDS instances
1197 | self.push(self.inventory, 'rds', hostname)
1198 |
1199 | self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
1200 | self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
1201 |
1202 | def add_elasticache_cluster(self, cluster, region):
1203 | ''' Adds an ElastiCache cluster to the inventory and index, as long as
1204 | it's nodes are addressable '''
1205 |
1206 | # Only want available clusters unless all_elasticache_clusters is True
1207 | if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
1208 | return
1209 |
1210 | # Select the best destination address
1211 | if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
1212 | # Memcached cluster
1213 | dest = cluster['ConfigurationEndpoint']['Address']
1214 | is_redis = False
1215 | else:
1216 | # Redis sigle node cluster
1217 | # Because all Redis clusters are single nodes, we'll merge the
1218 | # info from the cluster with info about the node
1219 | dest = cluster['CacheNodes'][0]['Endpoint']['Address']
1220 | is_redis = True
1221 |
1222 | if not dest:
1223 | # Skip clusters we cannot address (e.g. private VPC subnet)
1224 | return
1225 |
1226 | # Add to index
1227 | self.index[dest] = [region, cluster['CacheClusterId']]
1228 |
1229 | # Inventory: Group by instance ID (always a group of 1)
1230 | if self.group_by_instance_id:
1231 | self.inventory[cluster['CacheClusterId']] = [dest]
1232 | if self.nested_groups:
1233 | self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
1234 |
1235 | # Inventory: Group by region
1236 | if self.group_by_region and not is_redis:
1237 | self.push(self.inventory, region, dest)
1238 | if self.nested_groups:
1239 | self.push_group(self.inventory, 'regions', region)
1240 |
1241 | # Inventory: Group by availability zone
1242 | if self.group_by_availability_zone and not is_redis:
1243 | self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
1244 | if self.nested_groups:
1245 | if self.group_by_region:
1246 | self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
1247 | self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
1248 |
1249 | # Inventory: Group by node type
1250 | if self.group_by_instance_type and not is_redis:
1251 | type_name = self.to_safe('type_' + cluster['CacheNodeType'])
1252 | self.push(self.inventory, type_name, dest)
1253 | if self.nested_groups:
1254 | self.push_group(self.inventory, 'types', type_name)
1255 |
1256 | # Inventory: Group by VPC (information not available in the current
1257 | # AWS API version for ElastiCache)
1258 |
1259 | # Inventory: Group by security group
1260 | if self.group_by_security_group and not is_redis:
1261 |
1262 | # Check for the existence of the 'SecurityGroups' key and also if
1263 | # this key has some value. When the cluster is not placed in a SG
1264 | # the query can return None here and cause an error.
1265 | if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
1266 | for security_group in cluster['SecurityGroups']:
1267 | key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
1268 | self.push(self.inventory, key, dest)
1269 | if self.nested_groups:
1270 | self.push_group(self.inventory, 'security_groups', key)
1271 |
1272 | # Inventory: Group by engine
1273 | if self.group_by_elasticache_engine and not is_redis:
1274 | self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
1275 | if self.nested_groups:
1276 | self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
1277 |
1278 | # Inventory: Group by parameter group
1279 | if self.group_by_elasticache_parameter_group:
1280 | self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
1281 | if self.nested_groups:
1282 | self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
1283 |
1284 | # Inventory: Group by replication group
1285 | if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
1286 | self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
1287 | if self.nested_groups:
1288 | self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
1289 |
1290 | # Global Tag: all ElastiCache clusters
1291 | self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
1292 |
1293 | host_info = self.get_host_info_dict_from_describe_dict(cluster)
1294 |
1295 | self.inventory["_meta"]["hostvars"][dest] = host_info
1296 |
1297 | # Add the nodes
1298 | for node in cluster['CacheNodes']:
1299 | self.add_elasticache_node(node, cluster, region)
1300 |
1301 | def add_elasticache_node(self, node, cluster, region):
1302 | ''' Adds an ElastiCache node to the inventory and index, as long as
1303 | it is addressable '''
1304 |
1305 | # Only want available nodes unless all_elasticache_nodes is True
1306 | if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
1307 | return
1308 |
1309 | # Select the best destination address
1310 | dest = node['Endpoint']['Address']
1311 |
1312 | if not dest:
1313 | # Skip nodes we cannot address (e.g. private VPC subnet)
1314 | return
1315 |
1316 | node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
1317 |
1318 | # Add to index
1319 | self.index[dest] = [region, node_id]
1320 |
1321 | # Inventory: Group by node ID (always a group of 1)
1322 | if self.group_by_instance_id:
1323 | self.inventory[node_id] = [dest]
1324 | if self.nested_groups:
1325 | self.push_group(self.inventory, 'instances', node_id)
1326 |
1327 | # Inventory: Group by region
1328 | if self.group_by_region:
1329 | self.push(self.inventory, region, dest)
1330 | if self.nested_groups:
1331 | self.push_group(self.inventory, 'regions', region)
1332 |
1333 | # Inventory: Group by availability zone
1334 | if self.group_by_availability_zone:
1335 | self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
1336 | if self.nested_groups:
1337 | if self.group_by_region:
1338 | self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
1339 | self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
1340 |
1341 | # Inventory: Group by node type
1342 | if self.group_by_instance_type:
1343 | type_name = self.to_safe('type_' + cluster['CacheNodeType'])
1344 | self.push(self.inventory, type_name, dest)
1345 | if self.nested_groups:
1346 | self.push_group(self.inventory, 'types', type_name)
1347 |
1348 | # Inventory: Group by VPC (information not available in the current
1349 | # AWS API version for ElastiCache)
1350 |
1351 | # Inventory: Group by security group
1352 | if self.group_by_security_group:
1353 |
1354 | # Check for the existence of the 'SecurityGroups' key and also if
1355 | # this key has some value. When the cluster is not placed in a SG
1356 | # the query can return None here and cause an error.
1357 | if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
1358 | for security_group in cluster['SecurityGroups']:
1359 | key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
1360 | self.push(self.inventory, key, dest)
1361 | if self.nested_groups:
1362 | self.push_group(self.inventory, 'security_groups', key)
1363 |
1364 | # Inventory: Group by engine
1365 | if self.group_by_elasticache_engine:
1366 | self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
1367 | if self.nested_groups:
1368 | self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
1369 |
1370 | # Inventory: Group by parameter group (done at cluster level)
1371 |
1372 | # Inventory: Group by replication group (done at cluster level)
1373 |
1374 | # Inventory: Group by ElastiCache Cluster
1375 | if self.group_by_elasticache_cluster:
1376 | self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
1377 |
1378 | # Global Tag: all ElastiCache nodes
1379 | self.push(self.inventory, 'elasticache_nodes', dest)
1380 |
1381 | host_info = self.get_host_info_dict_from_describe_dict(node)
1382 |
1383 | if dest in self.inventory["_meta"]["hostvars"]:
1384 | self.inventory["_meta"]["hostvars"][dest].update(host_info)
1385 | else:
1386 | self.inventory["_meta"]["hostvars"][dest] = host_info
1387 |
1388 | def add_elasticache_replication_group(self, replication_group, region):
1389 | ''' Adds an ElastiCache replication group to the inventory and index '''
1390 |
1391 | # Only want available clusters unless all_elasticache_replication_groups is True
1392 | if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
1393 | return
1394 |
1395 | # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis)
1396 | if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \
1397 | replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None:
1398 | return
1399 |
1400 | # Select the best destination address (PrimaryEndpoint)
1401 | dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
1402 |
1403 | # Add to index
1404 | self.index[dest] = [region, replication_group['ReplicationGroupId']]
1405 |
1406 | # Inventory: Group by ID (always a group of 1)
1407 | if self.group_by_instance_id:
1408 | self.inventory[replication_group['ReplicationGroupId']] = [dest]
1409 | if self.nested_groups:
1410 | self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
1411 |
1412 | # Inventory: Group by region
1413 | if self.group_by_region:
1414 | self.push(self.inventory, region, dest)
1415 | if self.nested_groups:
1416 | self.push_group(self.inventory, 'regions', region)
1417 |
1418 | # Inventory: Group by availability zone (doesn't apply to replication groups)
1419 |
1420 | # Inventory: Group by node type (doesn't apply to replication groups)
1421 |
1422 | # Inventory: Group by VPC (information not available in the current
1423 | # AWS API version for replication groups
1424 |
1425 | # Inventory: Group by security group (doesn't apply to replication groups)
1426 | # Check this value in cluster level
1427 |
1428 | # Inventory: Group by engine (replication groups are always Redis)
1429 | if self.group_by_elasticache_engine:
1430 | self.push(self.inventory, 'elasticache_redis', dest)
1431 | if self.nested_groups:
1432 | self.push_group(self.inventory, 'elasticache_engines', 'redis')
1433 |
1434 | # Global Tag: all ElastiCache clusters
1435 | self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
1436 |
1437 | host_info = self.get_host_info_dict_from_describe_dict(replication_group)
1438 |
1439 | self.inventory["_meta"]["hostvars"][dest] = host_info
1440 |
1441 | def get_route53_records(self):
1442 | ''' Get and store the map of resource records to domain names that
1443 | point to them. '''
1444 |
1445 | if self.boto_profile:
1446 | r53_conn = route53.Route53Connection(profile_name=self.boto_profile)
1447 | else:
1448 | r53_conn = route53.Route53Connection()
1449 | all_zones = r53_conn.get_zones()
1450 |
1451 | route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones]
1452 |
1453 | self.route53_records = {}
1454 |
1455 | for zone in route53_zones:
1456 | rrsets = r53_conn.get_all_rrsets(zone.id)
1457 |
1458 | for record_set in rrsets:
1459 | record_name = record_set.name
1460 |
1461 | if record_name.endswith('.'):
1462 | record_name = record_name[:-1]
1463 |
1464 | for resource in record_set.resource_records:
1465 | self.route53_records.setdefault(resource, set())
1466 | self.route53_records[resource].add(record_name)
1467 |
1468 | def get_instance_route53_names(self, instance):
1469 | ''' Check if an instance is referenced in the records we have from
1470 | Route53. If it is, return the list of domain names pointing to said
1471 | instance. If nothing points to it, return an empty list. '''
1472 |
1473 | instance_attributes = ['public_dns_name', 'private_dns_name',
1474 | 'ip_address', 'private_ip_address']
1475 |
1476 | name_list = set()
1477 |
1478 | for attrib in instance_attributes:
1479 | try:
1480 | value = getattr(instance, attrib)
1481 | except AttributeError:
1482 | continue
1483 |
1484 | if value in self.route53_records:
1485 | name_list.update(self.route53_records[value])
1486 |
1487 | return list(name_list)
1488 |
1489 | def get_host_info_dict_from_instance(self, instance):
1490 | instance_vars = {}
1491 | for key in vars(instance):
1492 | value = getattr(instance, key)
1493 | key = self.to_safe('ec2_' + key)
1494 |
1495 | # Handle complex types
1496 | # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
1497 | if key == 'ec2__state':
1498 | instance_vars['ec2_state'] = instance.state or ''
1499 | instance_vars['ec2_state_code'] = instance.state_code
1500 | elif key == 'ec2__previous_state':
1501 | instance_vars['ec2_previous_state'] = instance.previous_state or ''
1502 | instance_vars['ec2_previous_state_code'] = instance.previous_state_code
1503 | elif isinstance(value, (int, bool)):
1504 | instance_vars[key] = value
1505 | elif isinstance(value, six.string_types):
1506 | instance_vars[key] = value.strip()
1507 | elif value is None:
1508 | instance_vars[key] = ''
1509 | elif key == 'ec2_region':
1510 | instance_vars[key] = value.name
1511 | elif key == 'ec2__placement':
1512 | instance_vars['ec2_placement'] = value.zone
1513 | elif key == 'ec2_tags':
1514 | for k, v in value.items():
1515 | if self.expand_csv_tags and ',' in v:
1516 | v = list(map(lambda x: x.strip(), v.split(',')))
1517 | key = self.to_safe('ec2_tag_' + k)
1518 | instance_vars[key] = v
1519 | elif key == 'ec2_groups':
1520 | group_ids = []
1521 | group_names = []
1522 | for group in value:
1523 | group_ids.append(group.id)
1524 | group_names.append(group.name)
1525 | instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
1526 | instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
1527 | elif key == 'ec2_block_device_mapping':
1528 | instance_vars["ec2_block_devices"] = {}
1529 | for k, v in value.items():
1530 | instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id
1531 | else:
1532 | pass
1533 | # TODO Product codes if someone finds them useful
1534 | # print key
1535 | # print type(value)
1536 | # print value
1537 |
1538 | instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
1539 |
1540 | return instance_vars
1541 |
1542 | def get_host_info_dict_from_describe_dict(self, describe_dict):
1543 | ''' Parses the dictionary returned by the API call into a flat list
1544 | of parameters. This method should be used only when 'describe' is
1545 | used directly because Boto doesn't provide specific classes. '''
1546 |
1547 | # I really don't agree with prefixing everything with 'ec2'
1548 | # because EC2, RDS and ElastiCache are different services.
1549 | # I'm just following the pattern used until now to not break any
1550 | # compatibility.
1551 |
1552 | host_info = {}
1553 | for key in describe_dict:
1554 | value = describe_dict[key]
1555 | key = self.to_safe('ec2_' + self.uncammelize(key))
1556 |
1557 | # Handle complex types
1558 |
1559 | # Target: Memcached Cache Clusters
1560 | if key == 'ec2_configuration_endpoint' and value:
1561 | host_info['ec2_configuration_endpoint_address'] = value['Address']
1562 | host_info['ec2_configuration_endpoint_port'] = value['Port']
1563 |
1564 | # Target: Cache Nodes and Redis Cache Clusters (single node)
1565 | if key == 'ec2_endpoint' and value:
1566 | host_info['ec2_endpoint_address'] = value['Address']
1567 | host_info['ec2_endpoint_port'] = value['Port']
1568 |
1569 | # Target: Redis Replication Groups
1570 | if key == 'ec2_node_groups' and value:
1571 | host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
1572 | host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
1573 | replica_count = 0
1574 | for node in value[0]['NodeGroupMembers']:
1575 | if node['CurrentRole'] == 'primary':
1576 | host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
1577 | host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
1578 | host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
1579 | elif node['CurrentRole'] == 'replica':
1580 | host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address']
1581 | host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port']
1582 | host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId']
1583 | replica_count += 1
1584 |
1585 | # Target: Redis Replication Groups
1586 | if key == 'ec2_member_clusters' and value:
1587 | host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
1588 |
1589 | # Target: All Cache Clusters
1590 | elif key == 'ec2_cache_parameter_group':
1591 | host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
1592 | host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
1593 | host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
1594 |
1595 | # Target: Almost everything
1596 | elif key == 'ec2_security_groups':
1597 |
1598 | # Skip if SecurityGroups is None
1599 | # (it is possible to have the key defined but no value in it).
1600 | if value is not None:
1601 | sg_ids = []
1602 | for sg in value:
1603 | sg_ids.append(sg['SecurityGroupId'])
1604 | host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
1605 |
1606 | # Target: Everything
1607 | # Preserve booleans and integers
1608 | elif isinstance(value, (int, bool)):
1609 | host_info[key] = value
1610 |
1611 | # Target: Everything
1612 | # Sanitize string values
1613 | elif isinstance(value, six.string_types):
1614 | host_info[key] = value.strip()
1615 |
1616 | # Target: Everything
1617 | # Replace None by an empty string
1618 | elif value is None:
1619 | host_info[key] = ''
1620 |
1621 | else:
1622 | # Remove non-processed complex types
1623 | pass
1624 |
1625 | return host_info
1626 |
1627 | def get_host_info(self):
1628 | ''' Get variables about a specific host '''
1629 |
1630 | if len(self.index) == 0:
1631 | # Need to load index from cache
1632 | self.load_index_from_cache()
1633 |
1634 | if self.args.host not in self.index:
1635 | # try updating the cache
1636 | self.do_api_calls_update_cache()
1637 | if self.args.host not in self.index:
1638 | # host might not exist anymore
1639 | return self.json_format_dict({}, True)
1640 |
1641 | (region, instance_id) = self.index[self.args.host]
1642 |
1643 | instance = self.get_instance(region, instance_id)
1644 | return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
1645 |
1646 | def push(self, my_dict, key, element):
1647 | ''' Push an element onto an array that may not have been defined in
1648 | the dict '''
1649 | group_info = my_dict.setdefault(key, [])
1650 | if isinstance(group_info, dict):
1651 | host_list = group_info.setdefault('hosts', [])
1652 | host_list.append(element)
1653 | else:
1654 | group_info.append(element)
1655 |
1656 | def push_group(self, my_dict, key, element):
1657 | ''' Push a group as a child of another group. '''
1658 | parent_group = my_dict.setdefault(key, {})
1659 | if not isinstance(parent_group, dict):
1660 | parent_group = my_dict[key] = {'hosts': parent_group}
1661 | child_groups = parent_group.setdefault('children', [])
1662 | if element not in child_groups:
1663 | child_groups.append(element)
1664 |
1665 | def get_inventory_from_cache(self):
1666 | ''' Reads the inventory from the cache file and returns it as a JSON
1667 | object '''
1668 |
1669 | with open(self.cache_path_cache, 'r') as f:
1670 | json_inventory = f.read()
1671 | return json_inventory
1672 |
1673 | def load_index_from_cache(self):
1674 | ''' Reads the index from the cache file sets self.index '''
1675 |
1676 | with open(self.cache_path_index, 'rb') as f:
1677 | self.index = json.load(f)
1678 |
1679 | def write_to_cache(self, data, filename):
1680 | ''' Writes data in JSON format to a file '''
1681 |
1682 | json_data = self.json_format_dict(data, True)
1683 | with open(filename, 'w') as f:
1684 | f.write(json_data)
1685 |
1686 | def uncammelize(self, key):
1687 | temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
1688 | return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
1689 |
1690 | def to_safe(self, word):
1691 | ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
1692 | regex = r"[^A-Za-z0-9\_"
1693 | if not self.replace_dash_in_groups:
1694 | regex += r"\-"
1695 | return re.sub(regex + "]", "_", word)
1696 |
1697 | def json_format_dict(self, data, pretty=False):
1698 | ''' Converts a dict to a JSON object and dumps it as a formatted
1699 | string '''
1700 |
1701 | if pretty:
1702 | return json.dumps(data, sort_keys=True, indent=2)
1703 | else:
1704 | return json.dumps(data)
1705 |
1706 |
1707 | if __name__ == '__main__':
1708 | # Run the script
1709 | Ec2Inventory()
1710 |
--------------------------------------------------------------------------------
/tests/playbooks/pas-infrastructure/outputs/hosts.yml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cyberark/pas-orchestrator/c1e00f645c8775653c2d574563cdd06082961a2a/tests/playbooks/pas-infrastructure/outputs/hosts.yml
--------------------------------------------------------------------------------
/tests/playbooks/roles/cf_deploy/.gitignore:
--------------------------------------------------------------------------------
1 | *.retry
2 | */__pycache__
3 | *.pyc
4 |
--------------------------------------------------------------------------------
/tests/playbooks/roles/cf_deploy/README.md:
--------------------------------------------------------------------------------
1 |
2 | cf_deploy
3 | =========
4 | This role deploys cloudformation to aws
5 |
6 | Role Tasks
7 | --------------
8 | - **main** - Deploys the cloudformation
9 |
10 | Role Variables
11 | --------------
12 |
13 | ### General variables
14 |
15 | - **deploy_bucket** - The S3 bucket used to upload the cloudformation before deploying
16 | - **cf_template_url** - The URL to fetch the cloudformation before uploading it to the deployment bucket
17 | - **cf_template_parameters** - The parameters passed to the cloudformation
18 | - **aws_region** - The AWS Region that the cloudformation is going to be deployed to
19 |
20 | Outputs
21 | ------------
22 | - **cf_output** - The JSON output with all the cloudformation stack resources
23 |
24 | Dependencies
25 | ------------
26 |
27 |
28 | Example Playbook
29 | ----------------
30 |
31 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
32 |
33 | - hosts: localhost
34 | connection: local
35 | gather_facts: no
36 | tasks:
37 | - include_role:
38 | name: cf_deploy
39 | vars:
40 | - bucket: mybucket
41 | - cf_template_url: https://raw.githubusercontent.com/organization/repository/cloudformation.template
42 | - cf_template_parameters:
43 | Parameter1: Value1
44 | Parameter2: Value2
45 | - aws_region: us-east-1
46 |
47 | Todo
48 | -------
49 |
50 |
51 | License
52 | -------
53 |
54 | BSD
55 |
56 | Author Information
57 | ------------------
58 |
59 | Avishay Bar,
60 | Cloud Initiatives team,
61 | CyberArk 2018
62 |
--------------------------------------------------------------------------------
/tests/playbooks/roles/cf_deploy/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for cf_deploy
3 |
--------------------------------------------------------------------------------
/tests/playbooks/roles/cf_deploy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for cf_deploy
3 |
--------------------------------------------------------------------------------
/tests/playbooks/roles/cf_deploy/meta/.galaxy_install_info:
--------------------------------------------------------------------------------
1 | {install_date: 'Sun Dec 30 11:01:47 2018', version: master}
2 |
--------------------------------------------------------------------------------
/tests/playbooks/roles/cf_deploy/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 2.4
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # Provide a list of supported platforms, and for each platform a list of versions.
34 | # If you don't wish to enumerate all versions for a particular platform, use 'all'.
35 | # To view available platforms and versions (or releases), visit:
36 | # https://galaxy.ansible.com/api/v1/platforms/
37 | #
38 | # platforms:
39 | # - name: Fedora
40 | # versions:
41 | # - all
42 | # - 25
43 | # - name: SomePlatform
44 | # versions:
45 | # - all
46 | # - 1.0
47 | # - 7
48 | # - 99.99
49 |
50 | galaxy_tags: []
51 | # List tags for your role here, one per line. A tag is a keyword that describes
52 | # and categorizes the role. Users find roles by searching for tags. Be sure to
53 | # remove the '[]' above, if you add tags to this list.
54 | #
55 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
56 | # Maximum 20 tags per role.
57 |
58 | dependencies: []
59 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
60 | # if you add dependencies to this list.
61 |
--------------------------------------------------------------------------------
/tests/playbooks/roles/cf_deploy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for cf_deploy
3 |
4 | - name: Get Timestamp and Store it in a variable
5 | set_fact: "timestamp={{ lookup('pipe','date +%Y-%m-%d-%H-%M-%S') }}"
6 |
7 | - name: Set String for CloudFormation Stack ID
8 | set_fact:
9 | cloudformation_stack_id: "ACF-{{ lookup('pipe','date +%Y-%m-%d-%H-%M-%S') }}"
10 |
11 | - name: Create workspace directory on temp folder
12 | file:
13 | path: "/tmp/tmp-{{ timestamp }}"
14 | state: directory
15 | register: workspace
16 |
17 | - set_fact:
18 | tmp_path: "{{ workspace.path }}"
19 |
20 | - name: Get CloudFormation Template from Git
21 | get_url:
22 | url: "{{ cf_template_url }}"
23 | dest: "{{ tmp_path }}/cf.json"
24 |
25 | - name: Manipulate CloudFormation
26 | shell: "{{ manipulation_commands }}"
27 | when: manipulation_commands != ""
28 |
29 | - name: Out file when manipulate cloudFormation is empty
30 | shell: "cat {{ tmp_path }}/cf.json > {{ tmp_path }}/cf-out.json"
31 | when: manipulation_commands == ""
32 |
33 | - name: Upload CloudFormation Template to S3
34 | aws_s3:
35 | bucket: "{{ deploy_bucket }}"
36 | object: "{{ tmp_path }}/cf.json"
37 | src: "{{ tmp_path }}/cf-out.json"
38 | mode: put
39 |
40 | - name: Create CloudFormation Stack
41 | cloudformation:
42 | stack_name: "{{ cloudformation_stack_id }}"
43 | state: "present"
44 | region: "{{ aws_region }}"
45 | disable_rollback: true
46 | template_url: "https://s3.amazonaws.com/{{ deploy_bucket }}{{ tmp_path }}/cf.json"
47 | template_parameters: "{{ cf_template_parameters }}"
48 | tags:
49 | Stack: "ansible-cloudformation"
50 | register: cf
51 |
52 | - name: Register Output to ansible fact
53 | set_fact:
54 | cf_output: "{{ cf.stack_resources }}"
55 |
--------------------------------------------------------------------------------
/tests/playbooks/roles/cf_deploy/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for cf_deploy
3 |
4 | manipulation_commands: ""
5 |
--------------------------------------------------------------------------------
/tests/requirements.txt:
--------------------------------------------------------------------------------
1 | ansible==2.9.6
2 | ansible-lint==4.2.0
3 | argcomplete==1.11.1
4 | awscli==1.18.32
5 | botocore==1.15.32
6 | cffi==1.14.0
7 | colorama==0.4.3
8 | cryptography==2.8
9 | docopt==0.6.2
10 | docutils==0.15.2
11 | importlib-metadata==1.6.0
12 | Jinja2==2.11.1
13 | jmespath==0.9.5
14 | jq==0.1.6
15 | json2yaml==1.1.1
16 | MarkupSafe==1.1.1
17 | pathspec==0.7.0
18 | pyaml==20.3.1
19 | pyasn1==0.4.8
20 | pycparser==2.20
21 | python-dateutil==2.8.1
22 | PyYAML==5.3.1
23 | rsa==3.4.2
24 | ruamel.yaml==0.16.10
25 | ruamel.yaml.clib==0.2.0
26 | s3transfer==0.3.3
27 | six==1.14.0
28 | urllib3==1.25.8
29 | xmltodict==0.12.0
30 | yamllint==1.21.0
31 | yq==2.10.0
32 | zipp==3.1.0
33 |
--------------------------------------------------------------------------------