├── .gitignore
├── CONTRIBUTING.md
├── Dockerfile
├── Pipfile
├── Pipfile.lock
├── README.md
├── ansible.cfg
├── base.Dockerfile
├── entrypoint.sh
├── inventory
├── inventory.aws_ec2.yml
└── inventory.yml
├── op.py
├── playbooks
├── aws
│ ├── create_users.yml
│ ├── group_vars
│ │ └── all.yml
│ ├── provision.yml
│ ├── provision_istio.yml
│ ├── start_instances.yml
│ ├── stop_instances.yml
│ └── teardown.yml
└── roles
│ ├── create_users
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
│ ├── host_facts_aws
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
│ ├── host_prep_bastion
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
│ ├── host_prep_general
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
│ ├── host_prep_istio
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── 99-elasticsearch.conf
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
│ ├── host_prep_openshift
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ └── docker-storage-setup.j2
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
│ ├── install_istio
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── istio-installation.yaml
│ │ └── master-config.patch
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
│ ├── install_openshift
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── ansible.cfg
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── certs.yml
│ │ └── main.yml
│ ├── templates
│ │ ├── hosts.3.10.example
│ │ ├── hosts.3.11.example
│ │ ├── hosts.3.9.example
│ │ ├── hosts.multi.3.10.ini.j2
│ │ ├── hosts.multi.3.11.ini.j2
│ │ └── hosts.multi.3.9.ini.j2
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
│ ├── provision_aws
│ ├── README.md
│ ├── defaults
│ │ ├── main.yml
│ │ └── rhel_ami.sh
│ ├── files
│ │ └── user_data
│ │ │ ├── app.yml
│ │ │ ├── infra.yml
│ │ │ └── master.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── ec2.yml
│ │ ├── ec2_teardown.yml
│ │ ├── keypair.yml
│ │ ├── keypair_teardown.yml
│ │ ├── main.yml
│ │ ├── route53.yml
│ │ ├── route53_teardown.yml
│ │ ├── security_groups.yml
│ │ ├── security_groups_teardown.yml
│ │ ├── teardown.yml
│ │ ├── vpc.yml
│ │ └── vpc_teardown.yml
│ ├── templates
│ │ └── user_data
│ │ │ └── master.yml.j2
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
│ └── rhsm_subscribe
│ ├── README.md
│ ├── defaults
│ └── main.yml
│ ├── handlers
│ └── main.yml
│ ├── meta
│ └── main.yml
│ ├── tasks
│ ├── aws.yml
│ └── main.yml
│ ├── tests
│ ├── inventory
│ └── test.yml
│ └── vars
│ └── main.yml
└── vars
├── aws.example.env
└── aws.example.yml
/.gitignore:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | # Project
3 | ###############################################################################
4 |
5 | playbooks/aws/keys/*
6 | !playbooks/aws/keys/.gitkeep
7 |
8 | vars/*
9 | !vars/aws.example.yml
10 | !vars/aws.example.env
11 |
12 | .vscode/
13 | .idea/
14 |
15 | ###############################################################################
16 | # Python
17 | # https://github.com/github/gitignore/blob/master/Python.gitignore
18 | ###############################################################################
19 |
20 | # Byte-compiled / optimized / DLL files
21 | __pycache__/
22 | *.py[cod]
23 | *$py.class
24 |
25 | # C extensions
26 | *.so
27 |
28 | # Distribution / packaging
29 | .Python
30 | env/
31 | build/
32 | develop-eggs/
33 | dist/
34 | downloads/
35 | eggs/
36 | .eggs/
37 | lib/
38 | lib64/
39 | parts/
40 | sdist/
41 | var/
42 | wheels/
43 | *.egg-info/
44 | .installed.cfg
45 | *.egg
46 |
47 | # PyInstaller
48 | # Usually these files are written by a python script from a template
49 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
50 | *.manifest
51 | *.spec
52 |
53 | # Installer logs
54 | pip-log.txt
55 | pip-delete-this-directory.txt
56 |
57 | # Unit test / coverage reports
58 | htmlcov/
59 | .tox/
60 | .coverage
61 | .coverage.*
62 | .cache
63 | nosetests.xml
64 | coverage.xml
65 | *,cover
66 | .hypothesis/
67 |
68 | # Translations
69 | *.mo
70 | *.pot
71 |
72 | # Django stuff:
73 | *.log
74 | local_settings.py
75 |
76 | # Flask stuff:
77 | instance/
78 | .webassets-cache
79 |
80 | # Scrapy stuff:
81 | .scrapy
82 |
83 | # Sphinx documentation
84 | docs/_build/
85 |
86 | # PyBuilder
87 | target/
88 |
89 | # Jupyter Notebook
90 | .ipynb_checkpoints
91 |
92 | # pyenv
93 | .python-version
94 |
95 | # celery beat schedule file
96 | celerybeat-schedule
97 |
98 | # dotenv
99 | .env
100 |
101 | # virtualenv
102 | .venv/
103 | venv/
104 | ENV/
105 |
106 | # Spyder project settings
107 | .spyderproject
108 |
109 | # Rope project settings
110 | .ropeproject
111 |
112 | ###############################################################################
113 | # VirtualEnv
114 | # https://github.com/github/gitignore/blob/master/Global/VirtualEnv.gitignore
115 | ###############################################################################
116 |
117 | # Virtualenv
118 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
119 | .Python
120 | [Bb]in
121 | [Ii]nclude
122 | [Ll]ib
123 | [Ll]ib64
124 | [Ll]ocal
125 | [Ss]cripts
126 | pyvenv.cfg
127 | .venv
128 | pip-selfcheck.json
129 |
130 | ###############################################################################
131 | # JetBrains
132 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
133 | ###############################################################################
134 |
135 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
136 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
137 |
138 | # User-specific stuff:
139 | .idea/workspace.xml
140 | .idea/tasks.xml
141 |
142 | # Sensitive or high-churn files:
143 | .idea/dataSources/
144 | .idea/dataSources.ids
145 | .idea/dataSources.xml
146 | .idea/dataSources.local.xml
147 | .idea/sqlDataSources.xml
148 | .idea/dynamic.xml
149 | .idea/uiDesigner.xml
150 |
151 | # Gradle:
152 | .idea/gradle.xml
153 | .idea/libraries
154 |
155 | # Mongo Explorer plugin:
156 | .idea/mongoSettings.xml
157 |
158 | ## File-based project format:
159 | *.iws
160 |
161 | ## Plugin-specific files:
162 |
163 | # IntelliJ
164 | /out/
165 |
166 | # mpeltonen/sbt-idea plugin
167 | .idea_modules/
168 |
169 | # JIRA plugin
170 | atlassian-ide-plugin.xml
171 |
172 | # Crashlytics plugin (for Android Studio and IntelliJ)
173 | com_crashlytics_export_strings.xml
174 | crashlytics.properties
175 | crashlytics-build.properties
176 | fabric.properties
177 |
178 | ###############################################################################
179 | # Eclipse
180 | # https://github.com/github/gitignore/blob/master/Global/Eclipse.gitignore
181 | ###############################################################################
182 |
183 |
184 | .metadata
185 | bin/
186 | tmp/
187 | *.tmp
188 | *.bak
189 | *.swp
190 | *~.nib
191 | local.properties
192 | .settings/
193 | .loadpath
194 | .recommenders
195 |
196 | # Eclipse Core
197 | .project
198 |
199 | # External tool builders
200 | .externalToolBuilders/
201 |
202 | # Locally stored "Eclipse launch configurations"
203 | *.launch
204 |
205 | # PyDev specific (Python IDE for Eclipse)
206 | *.pydevproject
207 |
208 | # CDT-specific (C/C++ Development Tooling)
209 | .cproject
210 |
211 | # JDT-specific (Eclipse Java Development Tools)
212 | .classpath
213 |
214 | # Java annotation processor (APT)
215 | .factorypath
216 |
217 | # PDT-specific (PHP Development Tools)
218 | .buildpath
219 |
220 | # sbteclipse plugin
221 | .target
222 |
223 | # Tern plugin
224 | .tern-project
225 |
226 | # TeXlipse plugin
227 | .texlipse
228 |
229 | # STS (Spring Tool Suite)
230 | .springBeans
231 |
232 | # Code Recommenders
233 | .recommenders/
234 |
235 | ###############################################################################
236 | # Sublime Text
237 | # https://github.com/github/gitignore/blob/master/Global/SublimeText.gitignore
238 | ###############################################################################
239 |
240 | # cache files for sublime text
241 | *.tmlanguage.cache
242 | *.tmPreferences.cache
243 | *.stTheme.cache
244 |
245 | # workspace files are user-specific
246 | *.sublime-workspace
247 |
248 | # project files should be checked into the repository, unless a significant
249 | # proportion of contributors will probably not be using SublimeText
250 | # *.sublime-project
251 |
252 | # sftp configuration file
253 | sftp-config.json
254 |
255 | # Package control specific files
256 | Package Control.last-run
257 | Package Control.ca-list
258 | Package Control.ca-bundle
259 | Package Control.system-ca-bundle
260 | Package Control.cache/
261 | Package Control.ca-certs/
262 | Package Control.merged-ca-bundle
263 | Package Control.user-ca-bundle
264 | oscrypto-ca-bundle.crt
265 | bh_unicode_properties.cache
266 |
267 | # Sublime-github package stores a github token in this file
268 | # https://packagecontrol.io/packages/sublime-github
269 | GitHub.sublime-settings
270 |
271 | ###############################################################################
272 | # Vim
273 | # https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
274 | ###############################################################################
275 |
276 | # swap
277 | [._]*.s[a-v][a-z]
278 | [._]*.sw[a-p]
279 | [._]s[a-v][a-z]
280 | [._]sw[a-p]
281 | # session
282 | Session.vim
283 | # temporary
284 | .netrwhist
285 | *~
286 | # auto-generated tag files
287 | tags
288 |
289 | ###############################################################################
290 | # Linux
291 | # https://github.com/github/gitignore/blob/master/Global/Linux.gitignore
292 | ###############################################################################
293 |
294 | *~
295 |
296 | # temporary files which can be created if a process still has a handle open of a deleted file
297 | .fuse_hidden*
298 |
299 | # KDE directory preferences
300 | .directory
301 |
302 | # Linux trash folder which might appear on any partition or disk
303 | .Trash-*
304 |
305 | # .nfs files are created when an open file is removed but is still being accessed
306 | .nfs*
307 |
308 | ###############################################################################
309 | # macOS
310 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore
311 | ###############################################################################
312 |
313 | *.DS_Store
314 | .AppleDouble
315 | .LSOverride
316 |
317 | # Icon must end with two \r
318 | Icon
319 |
320 |
321 | # Thumbnails
322 | ._*
323 |
324 | # Files that might appear in the root of a volume
325 | .DocumentRevisions-V100
326 | .fseventsd
327 | .Spotlight-V100
328 | .TemporaryItems
329 | .Trashes
330 | .VolumeIcon.icns
331 | .com.apple.timemachine.donotpresent
332 |
333 | # Directories potentially created on remote AFP share
334 | .AppleDB
335 | .AppleDesktop
336 | Network Trash Folder
337 | Temporary Items
338 | .apdisk
339 |
340 | ###############################################################################
341 | # Windows
342 | # https://github.com/github/gitignore/blob/master/Global/Windows.gitignore
343 | ###############################################################################
344 |
345 | # Windows thumbnail cache files
346 | Thumbs.db
347 | ehthumbs.db
348 | ehthumbs_vista.db
349 |
350 | # Folder config file
351 | Desktop.ini
352 |
353 | # Recycle Bin used on file shares
354 | $RECYCLE.BIN/
355 |
356 | # Windows Installer files
357 | *.cab
358 | *.msi
359 | *.msm
360 | *.msp
361 |
362 | # Windows shortcuts
363 | *.lnk
364 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contribution Guidelines
2 |
3 | ## Did you find a bug?
4 |
5 | - **Ensure the bug was not already reported** by searching on GitHub
6 | under [Issues](https://github.com/jaredhocutt/openshift-provision/issues).
7 |
8 | - If you're unable to find an open issue addressing the problem,
9 | [open a new one](https://github.com/jaredhocutt/openshift-provision/issues/new).
10 |
11 | Be sure to include a **title and clear description**, as much relevant
12 | information as possible.
13 |
14 | ## Did you write a patch that fixes a bug?
15 |
16 | - Open a new GitHub pull request with the patch.
17 |
18 | - Ensure the PR description clearly describes the problem and solution.
19 | Include the relevant issue number if applicable.
20 |
21 | ## Do you intend to add a new feature or change an existing one?
22 |
23 | - Open a [new GitHub issue](https://github.com/jaredhocutt/openshift-provision/issues/new)
24 | describing the new feature or change to an existing one.
25 |
26 | - Add the **enhancement** label to the issue.
27 |
28 | - Write your code and test it.
29 |
30 | - Open a new GitHub pull request with the new feature.
31 |
32 | - Ensure the PR description clearly describes the changes made.
33 | Include the relevant issue number.
34 |
35 | ## Thanks!
36 |
37 | This OpenShift provision project is a volunteer effort. We welcome any
38 | help in making this project better!
39 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM quay.io/jhocutt/openshift-provision-base:latest
2 |
3 | ENV PYCURL_SSL_LIBRARY=openssl
4 |
5 | WORKDIR /app
6 |
7 | COPY Pipfile /app/Pipfile
8 | COPY Pipfile.lock /app/Pipfile.lock
9 | RUN pipenv install --system --deploy
10 |
11 | COPY . /app
12 |
13 | ENTRYPOINT ["/app/entrypoint.sh"]
14 |
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | url = "https://pypi.python.org/simple"
3 | verify_ssl = true
4 | name = "pypi"
5 |
6 | [dev-packages]
7 | pylint = "<2.0.0"
8 |
9 | [packages]
10 | ansible = "==2.6.6"
11 | boto = "*"
12 | "boto3" = "*"
13 | ovirt-engine-sdk-python = "*"
14 |
15 | [requires]
16 | python_version = "3.6"
17 |
--------------------------------------------------------------------------------
/Pipfile.lock:
--------------------------------------------------------------------------------
1 | {
2 | "_meta": {
3 | "hash": {
4 | "sha256": "9c342d5dd92cf5b4296826ce0b796cf845a98677f242d67d50e6a88e1b7a77cf"
5 | },
6 | "pipfile-spec": 6,
7 | "requires": {
8 | "python_version": "3.6"
9 | },
10 | "sources": [
11 | {
12 | "name": "pypi",
13 | "url": "https://pypi.python.org/simple",
14 | "verify_ssl": true
15 | }
16 | ]
17 | },
18 | "default": {
19 | "ansible": {
20 | "hashes": [
21 | "sha256:15ffb3447ed9fa072288d0fdeaa0086ed25d65d683441fa777d95cfddeacb108"
22 | ],
23 | "index": "pypi",
24 | "version": "==2.6.6"
25 | },
26 | "bcrypt": {
27 | "hashes": [
28 | "sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89",
29 | "sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42",
30 | "sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294",
31 | "sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161",
32 | "sha256:6305557019906466fc42dbc53b46da004e72fd7a551c044a827e572c82191752",
33 | "sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31",
34 | "sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5",
35 | "sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c",
36 | "sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0",
37 | "sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de",
38 | "sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e",
39 | "sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052",
40 | "sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09",
41 | "sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105",
42 | "sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133",
43 | "sha256:ce4e4f0deb51d38b1611a27f330426154f2980e66582dc5f438aad38b5f24fc1",
44 | "sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7",
45 | "sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc"
46 | ],
47 | "version": "==3.1.7"
48 | },
49 | "boto": {
50 | "hashes": [
51 | "sha256:147758d41ae7240dc989f0039f27da8ca0d53734be0eb869ef16e3adcfa462e8",
52 | "sha256:ea0d3b40a2d852767be77ca343b58a9e3a4b00d9db440efb8da74b4e58025e5a"
53 | ],
54 | "index": "pypi",
55 | "version": "==2.49.0"
56 | },
57 | "boto3": {
58 | "hashes": [
59 | "sha256:122603b00f8c458236d1bd09850bdea56fc45f271e75ca38e66dbce37f72cada",
60 | "sha256:99ec19dc4f0aa8a8354db7baebe1ff57bd18aeb6a539b28693b2e8ca8dc3d85b"
61 | ],
62 | "index": "pypi",
63 | "version": "==1.9.80"
64 | },
65 | "botocore": {
66 | "hashes": [
67 | "sha256:3baf129118575602ada9926f5166d82d02273c250d0feb313fc270944b27c48b",
68 | "sha256:dc080aed4f9b220a9e916ca29ca97a9d37e8e1d296fe89cbaeef929bf0c8066b"
69 | ],
70 | "version": "==1.12.253"
71 | },
72 | "cffi": {
73 | "hashes": [
74 | "sha256:001bf3242a1bb04d985d63e138230802c6c8d4db3668fb545fb5005ddf5bb5ff",
75 | "sha256:00789914be39dffba161cfc5be31b55775de5ba2235fe49aa28c148236c4e06b",
76 | "sha256:028a579fc9aed3af38f4892bdcc7390508adabc30c6af4a6e4f611b0c680e6ac",
77 | "sha256:14491a910663bf9f13ddf2bc8f60562d6bc5315c1f09c704937ef17293fb85b0",
78 | "sha256:1cae98a7054b5c9391eb3249b86e0e99ab1e02bb0cc0575da191aedadbdf4384",
79 | "sha256:2089ed025da3919d2e75a4d963d008330c96751127dd6f73c8dc0c65041b4c26",
80 | "sha256:2d384f4a127a15ba701207f7639d94106693b6cd64173d6c8988e2c25f3ac2b6",
81 | "sha256:337d448e5a725bba2d8293c48d9353fc68d0e9e4088d62a9571def317797522b",
82 | "sha256:399aed636c7d3749bbed55bc907c3288cb43c65c4389964ad5ff849b6370603e",
83 | "sha256:3b911c2dbd4f423b4c4fcca138cadde747abdb20d196c4a48708b8a2d32b16dd",
84 | "sha256:3d311bcc4a41408cf5854f06ef2c5cab88f9fded37a3b95936c9879c1640d4c2",
85 | "sha256:62ae9af2d069ea2698bf536dcfe1e4eed9090211dbaafeeedf5cb6c41b352f66",
86 | "sha256:66e41db66b47d0d8672d8ed2708ba91b2f2524ece3dee48b5dfb36be8c2f21dc",
87 | "sha256:675686925a9fb403edba0114db74e741d8181683dcf216be697d208857e04ca8",
88 | "sha256:7e63cbcf2429a8dbfe48dcc2322d5f2220b77b2e17b7ba023d6166d84655da55",
89 | "sha256:8a6c688fefb4e1cd56feb6c511984a6c4f7ec7d2a1ff31a10254f3c817054ae4",
90 | "sha256:8c0ffc886aea5df6a1762d0019e9cb05f825d0eec1f520c51be9d198701daee5",
91 | "sha256:95cd16d3dee553f882540c1ffe331d085c9e629499ceadfbda4d4fde635f4b7d",
92 | "sha256:99f748a7e71ff382613b4e1acc0ac83bf7ad167fb3802e35e90d9763daba4d78",
93 | "sha256:b8c78301cefcf5fd914aad35d3c04c2b21ce8629b5e4f4e45ae6812e461910fa",
94 | "sha256:c420917b188a5582a56d8b93bdd8e0f6eca08c84ff623a4c16e809152cd35793",
95 | "sha256:c43866529f2f06fe0edc6246eb4faa34f03fe88b64a0a9a942561c8e22f4b71f",
96 | "sha256:cab50b8c2250b46fe738c77dbd25ce017d5e6fb35d3407606e7a4180656a5a6a",
97 | "sha256:cef128cb4d5e0b3493f058f10ce32365972c554572ff821e175dbc6f8ff6924f",
98 | "sha256:cf16e3cf6c0a5fdd9bc10c21687e19d29ad1fe863372b5543deaec1039581a30",
99 | "sha256:e56c744aa6ff427a607763346e4170629caf7e48ead6921745986db3692f987f",
100 | "sha256:e577934fc5f8779c554639376beeaa5657d54349096ef24abe8c74c5d9c117c3",
101 | "sha256:f2b0fa0c01d8a0c7483afd9f31d7ecf2d71760ca24499c8697aeb5ca37dc090c"
102 | ],
103 | "version": "==1.14.0"
104 | },
105 | "cryptography": {
106 | "hashes": [
107 | "sha256:091d31c42f444c6f519485ed528d8b451d1a0c7bf30e8ca583a0cac44b8a0df6",
108 | "sha256:18452582a3c85b96014b45686af264563e3e5d99d226589f057ace56196ec78b",
109 | "sha256:1dfa985f62b137909496e7fc182dac687206d8d089dd03eaeb28ae16eec8e7d5",
110 | "sha256:1e4014639d3d73fbc5ceff206049c5a9a849cefd106a49fa7aaaa25cc0ce35cf",
111 | "sha256:22e91636a51170df0ae4dcbd250d318fd28c9f491c4e50b625a49964b24fe46e",
112 | "sha256:3b3eba865ea2754738616f87292b7f29448aec342a7c720956f8083d252bf28b",
113 | "sha256:651448cd2e3a6bc2bb76c3663785133c40d5e1a8c1a9c5429e4354201c6024ae",
114 | "sha256:726086c17f94747cedbee6efa77e99ae170caebeb1116353c6cf0ab67ea6829b",
115 | "sha256:844a76bc04472e5135b909da6aed84360f522ff5dfa47f93e3dd2a0b84a89fa0",
116 | "sha256:88c881dd5a147e08d1bdcf2315c04972381d026cdb803325c03fe2b4a8ed858b",
117 | "sha256:96c080ae7118c10fcbe6229ab43eb8b090fccd31a09ef55f83f690d1ef619a1d",
118 | "sha256:a0c30272fb4ddda5f5ffc1089d7405b7a71b0b0f51993cb4e5dbb4590b2fc229",
119 | "sha256:bb1f0281887d89617b4c68e8db9a2c42b9efebf2702a3c5bf70599421a8623e3",
120 | "sha256:c447cf087cf2dbddc1add6987bbe2f767ed5317adb2d08af940db517dd704365",
121 | "sha256:c4fd17d92e9d55b84707f4fd09992081ba872d1a0c610c109c18e062e06a2e55",
122 | "sha256:d0d5aeaedd29be304848f1c5059074a740fa9f6f26b84c5b63e8b29e73dfc270",
123 | "sha256:daf54a4b07d67ad437ff239c8a4080cfd1cc7213df57d33c97de7b4738048d5e",
124 | "sha256:e993468c859d084d5579e2ebee101de8f5a27ce8e2159959b6673b418fd8c785",
125 | "sha256:f118a95c7480f5be0df8afeb9a11bd199aa20afab7a96bcf20409b411a3a85f0"
126 | ],
127 | "version": "==2.9.2"
128 | },
129 | "docutils": {
130 | "hashes": [
131 | "sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0",
132 | "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827",
133 | "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99"
134 | ],
135 | "version": "==0.15.2"
136 | },
137 | "jinja2": {
138 | "hashes": [
139 | "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0",
140 | "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035"
141 | ],
142 | "version": "==2.11.2"
143 | },
144 | "jmespath": {
145 | "hashes": [
146 | "sha256:695cb76fa78a10663425d5b73ddc5714eb711157e52704d69be03b1a02ba4fec",
147 | "sha256:cca55c8d153173e21baa59983015ad0daf603f9cb799904ff057bfb8ff8dc2d9"
148 | ],
149 | "version": "==0.9.5"
150 | },
151 | "markupsafe": {
152 | "hashes": [
153 | "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
154 | "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161",
155 | "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235",
156 | "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5",
157 | "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42",
158 | "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff",
159 | "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
160 | "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1",
161 | "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e",
162 | "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183",
163 | "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66",
164 | "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b",
165 | "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1",
166 | "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15",
167 | "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1",
168 | "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e",
169 | "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b",
170 | "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905",
171 | "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735",
172 | "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d",
173 | "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e",
174 | "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d",
175 | "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c",
176 | "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21",
177 | "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2",
178 | "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5",
179 | "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b",
180 | "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6",
181 | "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f",
182 | "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f",
183 | "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2",
184 | "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7",
185 | "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be"
186 | ],
187 | "version": "==1.1.1"
188 | },
189 | "ovirt-engine-sdk-python": {
190 | "hashes": [
191 | "sha256:65d375b1f29ffb6ddf86352f70b729aa2ecd287b6d4ca017c9bebde043a7bedd"
192 | ],
193 | "index": "pypi",
194 | "version": "==4.2.9"
195 | },
196 | "paramiko": {
197 | "hashes": [
198 | "sha256:920492895db8013f6cc0179293147f830b8c7b21fdfc839b6bad760c27459d9f",
199 | "sha256:9c980875fa4d2cb751604664e9a2d0f69096643f5be4db1b99599fe114a97b2f"
200 | ],
201 | "version": "==2.7.1"
202 | },
203 | "pycparser": {
204 | "hashes": [
205 | "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0",
206 | "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"
207 | ],
208 | "version": "==2.20"
209 | },
210 | "pycurl": {
211 | "hashes": [
212 | "sha256:1957c867e2a341f5526c107c7bbc5014d6e75fdc2a14294fcb8a47663fbd2e15",
213 | "sha256:50aee0469511a9708a1f1a50d510b5ec2013fc6f5e720c32bbcb3b9c7b0f45b1",
214 | "sha256:667db26516e50ce4a853745906f3b149c24756d85061b9d966eb7ec43a8c48a4",
215 | "sha256:7cc13d3421cbd31921d77e22d1f57c0e1a8d0fb461938a587689a93162ccef2f",
216 | "sha256:a0c62dbc66b9b947832307d6cf7bdb5e4da906ce7b3efe6f74292e8f3dc5abe3",
217 | "sha256:a6966e8d9ccda31c6d077c4f8673aaa88141cc73d50e110e93e627b816d17fd1",
218 | "sha256:beadfa7f052626864d70eb33cec8f2aeece12dfb483c2424cc07b057f83b7d35",
219 | "sha256:c5c379c8cc777dda397f86f0d0049480250ae84a82a9d99d668f461d368fb39c",
220 | "sha256:ec7dd291545842295b7b56c12c90ffad2976cc7070c98d7b1517b7b6cd5994b3"
221 | ],
222 | "version": "==7.43.0.5"
223 | },
224 | "pynacl": {
225 | "hashes": [
226 | "sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255",
227 | "sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c",
228 | "sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e",
229 | "sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae",
230 | "sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621",
231 | "sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56",
232 | "sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39",
233 | "sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310",
234 | "sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1",
235 | "sha256:53126cd91356342dcae7e209f840212a58dcf1177ad52c1d938d428eebc9fee5",
236 | "sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a",
237 | "sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786",
238 | "sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b",
239 | "sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b",
240 | "sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f",
241 | "sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20",
242 | "sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415",
243 | "sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715",
244 | "sha256:bf459128feb543cfca16a95f8da31e2e65e4c5257d2f3dfa8c0c1031139c9c92",
245 | "sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1",
246 | "sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0"
247 | ],
248 | "version": "==1.3.0"
249 | },
250 | "python-dateutil": {
251 | "hashes": [
252 | "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c",
253 | "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"
254 | ],
255 | "markers": "python_version >= '2.7'",
256 | "version": "==2.8.1"
257 | },
258 | "pyyaml": {
259 | "hashes": [
260 | "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97",
261 | "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76",
262 | "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2",
263 | "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648",
264 | "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf",
265 | "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f",
266 | "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2",
267 | "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee",
268 | "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d",
269 | "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c",
270 | "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"
271 | ],
272 | "version": "==5.3.1"
273 | },
274 | "s3transfer": {
275 | "hashes": [
276 | "sha256:90dc18e028989c609146e241ea153250be451e05ecc0c2832565231dacdf59c1",
277 | "sha256:c7a9ec356982d5e9ab2d4b46391a7d6a950e2b04c472419f5fdec70cc0ada72f"
278 | ],
279 | "version": "==0.1.13"
280 | },
281 | "six": {
282 | "hashes": [
283 | "sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a",
284 | "sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c"
285 | ],
286 | "version": "==1.14.0"
287 | },
288 | "urllib3": {
289 | "hashes": [
290 | "sha256:4c291ca23bbb55c76518905869ef34bdd5f0e46af7afe6861e8375643ffee1a0",
291 | "sha256:9a247273df709c4fedb38c711e44292304f73f39ab01beda9f6b9fc375669ac3"
292 | ],
293 | "index": "pypi",
294 | "version": "==1.24.2"
295 | }
296 | },
297 | "develop": {
298 | "astroid": {
299 | "hashes": [
300 | "sha256:87de48a92e29cedf7210ffa853d11441e7ad94cb47bacd91b023499b51cbc756",
301 | "sha256:d25869fc7f44f1d9fb7d24fd7ea0639656f5355fc3089cd1f3d18c6ec6b124c7"
302 | ],
303 | "version": "==1.6.6"
304 | },
305 | "isort": {
306 | "hashes": [
307 | "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1",
308 | "sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd"
309 | ],
310 | "version": "==4.3.21"
311 | },
312 | "lazy-object-proxy": {
313 | "hashes": [
314 | "sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d",
315 | "sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449",
316 | "sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08",
317 | "sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a",
318 | "sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50",
319 | "sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd",
320 | "sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239",
321 | "sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb",
322 | "sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea",
323 | "sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e",
324 | "sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156",
325 | "sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142",
326 | "sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442",
327 | "sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62",
328 | "sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db",
329 | "sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531",
330 | "sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383",
331 | "sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a",
332 | "sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357",
333 | "sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4",
334 | "sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0"
335 | ],
336 | "version": "==1.4.3"
337 | },
338 | "mccabe": {
339 | "hashes": [
340 | "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
341 | "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
342 | ],
343 | "version": "==0.6.1"
344 | },
345 | "pylint": {
346 | "hashes": [
347 | "sha256:02c2b6d268695a8b64ad61847f92e611e6afcff33fd26c3a2125370c4662905d",
348 | "sha256:ee1e85575587c5b58ddafa25e1c1b01691ef172e139fc25585e5d3f02451da93"
349 | ],
350 | "index": "pypi",
351 | "version": "==1.9.4"
352 | },
353 | "six": {
354 | "hashes": [
355 | "sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a",
356 | "sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c"
357 | ],
358 | "version": "==1.14.0"
359 | },
360 | "wrapt": {
361 | "hashes": [
362 | "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"
363 | ],
364 | "version": "==1.12.1"
365 | }
366 | }
367 | }
368 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # OpenShift Provisioner
2 |
3 | Provision infrastructure and install OpenShift.
4 |
5 | The goal of this project is to support installing OpenShift across multiple
6 | clouds and virtualization platforms.
7 |
8 | ## Getting Started
9 |
10 | This project is built using Ansible playbooks, including the use of modules
11 | that require additional packages to be installed in order to function.
12 |
13 | To make the use of this project easier, a container has been created that has
14 | all of the required dependencies and is configured to work out of the box.
15 |
16 | To use the container, you will need a container runtime. I recommend using
17 | [podman](https://github.com/projectatomic/libpod) or
18 | [docker](https://www.docker.com/community-edition).
19 |
20 | Start by cloning this repo:
21 |
22 | ```bash
23 | git clone https://github.com/jaredhocutt/openshift-provision.git
24 |
25 | cd openshift-provision/
26 | ```
27 |
28 | ## Known Issues
29 |
30 | ### Issue: Docker for Mac does not work
31 |
32 | Running this tool using Docker for Mac does not work. During the OpenShift
33 | installation portion, which is a long running process, the Ansible SSH
34 | connection will drop and not recover.
35 |
36 | This does not happen when running from a Linux machine. Therefore, the
37 | current workaround is to use a Linux VM (either on your Mac or running
38 | in AWS) and execute this tool from that Linux VM.
39 |
40 | This issue will exhibit itself with an error that looks similar to the following:
41 |
42 | ```bash
43 | TASK [install_openshift : Run OpenShift installer (this will take a while!)] **
44 | Friday 17 August 2018 21:26:08 +0000 (0:02:35.555) 0:26:59.634 *********
45 | fatal: [ec2-18-232-178-150.compute-1.amazonaws.com]: UNREACHABLE! => {
46 | "changed": false,
47 | "unreachable": true
48 | }
49 |
50 | MSG:
51 |
52 | Failed to connect to the host via ssh: Shared connection to 18.232.178.150 closed.
53 | ```
54 |
55 | ## Provisioners
56 |
57 | ### AWS
58 |
59 | #### Deploy
60 |
61 | ##### Step 1
62 |
63 | There are several variables that you will need to define before running the
64 | AWS provisioner.
65 |
66 | **Variables**
67 |
68 | | Variable | Required | Default | Description |
69 | | -------------------------------- | ------------------ | ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
70 | | `cluster_name` | :heavy_check_mark: | `openshift` | The name of the cluster.
This value will be in your DNS entries and should conform to valid DNS characters. |
71 | | `openshift_version` | :heavy_check_mark: | | The OpenShift version to install. This tool currently supports 3.9, 3.10, and 3.11.
**IMPORTANT:** Make sure this value is quoted, otherwise 3.10 gets read as 3.1 instead of 3.10. |
72 | | `openshift_base_domain` | :heavy_check_mark: | | The base subdomain to use for your cluster.
Example: If you set this to `example.com`, a DNS entry for `.example.com` will be created) |
73 | | `cert_email_address` | :heavy_check_mark: | | The email address to use when generating Lets Encrypt certs for the cluster. |
74 | | `aws_region` | :heavy_check_mark: | | The AWS region (i.e. `us-east-1`) |
75 | | `ec2_ami_type` | :heavy_check_mark: | `hourly` | If you have Cloud Access setup for your account, set this to `cloud_access`. Otherwise, set this to `hourly`. |
76 | | `route53_hosted_zone_id` | :heavy_check_mark: | | The ID of the Route53 hosted zone (i.e. `YP563J79RELJ4C`) |
77 | | `rhsm_username` | :heavy_check_mark: | | Your RHSM username |
78 | | `rhsm_password` | :heavy_check_mark: | | Your RHSM password |
79 | | `rhsm_pool` | :heavy_check_mark: | | The RHSM pool ID that contains OpenShift subscriptions |
80 | | `redhat_registry_username` | :heavy_check_mark: | | Your Red Hat registry username. This will default to `rhsm_username` if not specified.
To create a registry service account, go to https://access.redhat.com/terms-based-registry/. |
81 | | `redhat_registry_password` | :heavy_check_mark: | | Your Red Hat registry password/token. This will default to `rhsm_password` if not specified.
To create a registry service account, go to https://access.redhat.com/terms-based-registry/. |
82 | | `openshift_users` | | `[]` | A list of users to create in the OpenShift cluster.
Each item in the list should include `username`, `password`, and optionally `admin`. See the example vars file below for an example. |
83 | | `app_node_count` | | `2` | The number of app nodes to provision |
84 | | `ec2_vpc_cidr_block` | | `172.31.0.0/16` | The CIDR block for the VPC |
85 | | `ec2_instance_type_master` | | `m4.xlarge` | The EC2 instance type for the master node |
86 | | `ec2_instance_type_infra` | | `m4.xlarge` | The EC2 instance type for the infra node |
87 | | `ec2_instance_type_app` | | `m4.large` | The EC2 instance type for the app nodes |
88 | | `ec2_volume_size_master_root` | | `60` | The root disk size (in GB) for the master node |
89 | | `ec2_volume_size_infra_root` | | `60` | The root disk size (in GB) for the infra node |
90 | | `ec2_volume_size_app_root` | | `60` | The root disk size (in GB) for the app nodes |
91 | | `ec2_volume_size_cns` | | `150` | The disk size (in GB) for the CNS disk |
92 | | `cns_block_host_volume_size` | | `50` | The volume size (in GB) of GlusterFS volumes that will be automatically create to host glusterblock volumes |
93 | | `openshift_registry_volume_size` | | `20` | The volume size (in GB) to provision for the integration OpenShift registry |
94 | | `openshift_network_plugin` | | `redhat/openshift-ovs-networkpolicy` | The network plugin to configure. Available options are:
`redhat/openshift-ovs-subnet`
`redhat/openshift-ovs-multitenant`
`redhat/openshift-ovs-networkpolicy` |
95 | | `letsencrypt_cert_generation` | | `yes` | If you want LetsEncrypt certs generated for the cluster, leave this defaulted to `yes`. Otherwise set to `no`. This feature was originally added to work around hitting LetEncrypt limits and being able to work around them. |
96 | | `openshift_ssh_password` | | | By default, SSH password auth for the deployed hosts is disabled. If you'd like to enable SSH password auth, set this to the password you'd like to be set for the default user. |
97 | | `openshift_version_minor` | | | The specific minor version of OpenShift you want to install, otherwise the latest minor release of the `openshift_version` specified will be installed.
Example: `3.11.43` |
98 | | `addons` | | | A list of addons to install in the cluster. See the table of available addons below. |
99 |
100 | **Addons**
101 |
102 | | Addon | Description |
103 | | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
104 | | `istio ` | Provision Istio for this cluster.
Note: you still need to opt-in on each application so this won't break other apps. Also note that several sub-components will get installed so there will be an added cluster footprint of memory and compute usage [Read more about Istio here.](https://docs.openshift.com/container-platform/3.11/servicemesh-install/servicemesh-install.html#updating-master-configuration) |
105 |
106 | For your convenience, there is an example variables file at
107 | `/vars/aws.example.yml`. Go ahead and make a copy of this
108 | file and update the variable values. The contents of that file is also shown below.
109 |
110 | This guide will assume the file is located at `/vars/aws.yml`.
111 |
112 | ```yaml
113 | # The name of the cluster.
114 | # This value will be in your DNS entries and should conform to valid DNS characters.
115 | cluster_name: openshift
116 |
117 | # The OpenShift version to install
118 | # IMPORTANT: Make sure this value is quoted, otherwise it gets read as 3.1 instead of 3.10
119 | openshift_version: "3.11"
120 | # The base subdomain to use for your cluster.
121 | # Example: If you set this to `example.com`, a DNS entry for `.example.com` will be created)
122 | openshift_base_domain: example.com
123 |
124 | # The email address to use when generating Lets Encrypt certs for the cluster.
125 | cert_email_address: foo@example.com
126 |
127 | # The AWS region (i.e. `us-east-1`)
128 | aws_region: us-east-1
129 | # If you have Cloud Access setup for your account, set this to `cloud_access`. Otherwise, set this to `hourly`.
130 | ec2_ami_type: cloud_access
131 | # The ID of the Route53 hosted zone
132 | route53_hosted_zone_id: YP563J79RELJ4C
133 |
134 | # Your RHSM username
135 | rhsm_username: foo@example.com
136 | # Your RHSM password
137 | rhsm_password: P@55w0rD
138 | # The RHSM pool ID that contains OpenShift subscriptions
139 | rhsm_pool: ba4e7732f8abcdad545c7f62df736d1f
140 |
141 | # Your Red Hat registry username
142 | redhat_registry_username: 1234567|foo
143 | # Your Red Hat registry password/token
144 | redhat_registry_password: 0535VZW0qDK3fBjFwJE93emjk8fmzNBLJ2XHN8TNrAsxmaqDOOz2G
145 |
146 | # The users to create in OpenShift
147 | openshift_users:
148 | - username: admin
149 | password: password
150 | admin: yes
151 | - username: user1
152 | password: password
153 | - username: user2
154 | password: password
155 | - username: user3
156 | password: password
157 | ```
158 |
159 | ##### Step 2
160 |
161 | You will also need to set a few environment variables. For your convenience,
162 | there is an example environment file at `/vars/aws.example.env`.
163 | Go ahead and make a copy of this file and update the environment variable values. The
164 | contents of that file is also shown below.
165 |
166 | This guide will assume the file is located at `/vars/aws.env`.
167 |
168 | ```
169 | AWS_ACCESS_KEY_ID=your_access_key
170 | AWS_SECRET_ACCESS_KEY=your_secret_key
171 | ```
172 |
173 | ##### Step 3
174 |
175 | Now you're ready to provision an OpenShift cluster in AWS.
176 |
177 | ```bash
178 | sudo ./op.py --env-file vars/aws.env --vars-file vars/aws.yml provision
179 | ```
180 |
181 | If you are looking to use this project to create and setup the infrastructure
182 | for an OpenShift install, but skip the install, you can run:
183 |
184 | ```bash
185 | sudo ./op.py --env-file vars/aws.env --vars-file vars/aws.yml provision --skip-tags openshift_deploy_cluster
186 | ```
187 |
188 | Once the provisioning has completed successfully, you will be able to access
189 | your cluster at `{{ cluster_name }}.{{ openshift_base_domain }}`.
190 |
191 | For example, if you set:
192 |
193 | ```yaml
194 | cluster_name: ocp
195 | openshift_base_domain: mydomain.com
196 | ```
197 |
198 | your OpenShift cluster would be accessible at `ocp.mydomain.com`.
199 |
200 | #### Manage
201 |
202 | There is a helper script to make it easy to run this provisioner. It is the
203 | `op.py` script.
204 |
205 | You will see how to use `op.py` in the following subsections.
206 |
207 | **Note:** For most actions of `op.py`, it will first try to pull the latest
208 | version of the bundled provisioner. If you are on a slow connection (i.e. hotel wifi)
209 | and want to skip this, pass the `--no-update` option.
210 |
211 | ##### Start / Stop
212 |
213 | After your environment is provisioned, it's likely you'll want to shut it down
214 | when you're not using it and be able to start it back up when you need it.
215 |
216 | This is recommended as the compute costs for this cluster can get pricey if
217 | left running. For example, the default cluster config would be:
218 |
219 | - 1 t2.medium at $0.0464 per hour
220 | - 2 m4.xlarge at $0.20 per hour
221 | - 2 m4.large at $0.10 per hour
222 |
223 | That comes out to a little more than $15 per day and ~$465 per month.
224 |
225 | You can start and stop your cluster by:
226 |
227 | ```bash
228 | # Start cluster
229 | sudo ./op.py --env-file vars/aws.env --vars-file vars/aws.yml start
230 | # Stop cluster
231 | sudo ./op.py --env-file vars/aws.env --vars-file vars/aws.yml stop
232 | ```
233 |
234 | ##### SSH
235 |
236 | If you need to SSH into the bastion/master, you can do that by:
237 |
238 | ```bash
239 | sudo ./op.py --env-file vars/aws.env --vars-file vars/aws.yml ssh
240 | ```
241 |
242 | ##### Create / Update Users
243 |
244 | If you need to add or update users in OpenShift:
245 |
246 | ```bash
247 | sudo ./op.py --env-file vars/aws.env --vars-file vars/aws.yml create_users
248 | ```
249 |
250 | ##### Teardown
251 |
252 | Once you no longer need your environment, you can tear it down by:
253 |
254 | ```bash
255 | sudo ./op.py --env-file vars/aws.env --vars-file vars/aws.yml teardown
256 | ```
257 |
258 | ## Modifying The Playbooks
259 |
260 | By default this tool uses the released versions of the repo playbooks. If you want to tweak anything locally and have the `op.py` script uses those changes you can. Be sure to pass `--dev` on the command line for those local changes to be used.
261 |
262 | For more info please see the [CONTRIBUTING.md](./CONTRIBUTING.md) guidelines
263 |
264 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 |
3 | inventory = inventory
4 | forks = 50
5 | callback_whitelist = timer, profile_tasks
6 |
7 | log_path = ansible.log
8 | roles_path = playbooks/roles
9 |
10 | retry_files_enabled = False
11 | host_key_checking = False
12 | stdout_callback = debug
13 |
14 | [inventory]
15 |
16 | enable_plugins = aws_ec2, yaml
17 |
--------------------------------------------------------------------------------
/base.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/fedora:28
2 |
3 | ENV PYCURL_SSL_LIBRARY=openssl
4 |
5 | RUN \
6 | dnf install -y \
7 | gcc \
8 | libcurl-devel \
9 | libxml2-devel \
10 | openssh-clients \
11 | openssl-devel \
12 | python3 \
13 | python3-pip \
14 | python3-devel \
15 | which
16 |
17 | RUN \
18 | ln -sf /usr/bin/python3 /usr/local/bin/python \
19 | && ln -sf /usr/bin/pip3 /usr/local/bin/pip \
20 | && pip install --upgrade pip \
21 | && pip install pipenv \
22 | && dnf clean all \
23 | && mkdir -p /app
24 |
25 | WORKDIR /app
26 |
27 | CMD ["echo", "This is a base image and isn't meant to be run directly."]
28 |
--------------------------------------------------------------------------------
/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | if [[ $# == 0 ]]; then
6 | if [[ -t 0 ]]; then
7 | echo "Starting shell..."
8 | echo
9 |
10 | exec "bash"
11 | else
12 | echo "An interactive shell was not detected."
13 | echo
14 | echo "By default, this container starts a bash shell, be sure you are passing '-it' to your run command."
15 |
16 | exit 1
17 | fi
18 | else
19 | exec "$@"
20 | fi
21 |
--------------------------------------------------------------------------------
/inventory/inventory.aws_ec2.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | plugin: aws_ec2
4 |
5 | strict_permissions: no
6 |
7 | regions:
8 | - us-east-1
9 | - us-east-2
10 | - us-west-1
11 |
12 | keyed_groups:
13 | - prefix: openshift_role
14 | key: tags.OpenShiftRole
15 | - prefix: openshift_cluster
16 | key: tags.OpenShiftClusterNameVerbose
17 |
18 | groups:
19 | openshift_nodes:
20 | tags.OpenShiftRole in ['master', 'infra', 'app']
21 | openshift_bastion:
22 | tags.OpenShiftBastion == 'true'
23 |
--------------------------------------------------------------------------------
/inventory/inventory.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | all:
4 | hosts:
5 | localhost:
6 | ansible_python_interpreter: python
7 | ansible_connection: local
8 | children:
9 | aws_ec2:
10 | vars:
11 | ansible_user: ec2-user
12 | provision_type: aws
13 |
--------------------------------------------------------------------------------
/op.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import argparse
4 | import os
5 | import subprocess
6 |
7 |
8 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
9 | SUPPORTED_CONTAINER_RUNTIMES = ['podman', 'docker']
10 |
11 |
12 | class ContainerRuntimeMissingError(Exception):
13 | pass
14 |
15 |
16 | class OpenShiftProvision(object):
17 | def __init__(self, env_file, vars_file, no_update=False, dev=False, playbook_args=[]):
18 | self.env_file = env_file
19 | self.vars_file = vars_file
20 | self.no_update = no_update
21 | self.dev = dev
22 | self.playbook_args = playbook_args
23 |
24 | self.container_runtime = self._container_runtime()
25 | self.container_image = 'quay.io/jhocutt/openshift-provision'
26 | self.keys_dir = self._keys_dir()
27 | self.container_command_args = self._container_command_args()
28 |
29 | def _container_runtime(self):
30 | for runtime in SUPPORTED_CONTAINER_RUNTIMES:
31 | try:
32 | subprocess.call([runtime, '--version'],
33 | stdout=subprocess.PIPE,
34 | stderr=subprocess.PIPE)
35 | return runtime
36 | except OSError:
37 | pass
38 |
39 | raise ContainerRuntimeMissingError()
40 |
41 | def _keys_dir(self):
42 | keys_dir = os.path.join(BASE_DIR, 'playbooks', 'aws', 'keys')
43 |
44 | if not os.path.exists(keys_dir):
45 | os.mkdir(keys_dir)
46 |
47 | return keys_dir
48 |
49 | def _container_command_args(self):
50 | cmd_args = [
51 | self.container_runtime,
52 | 'run',
53 | '-it',
54 | '--rm',
55 | '--env-file', self.env_file,
56 | '--volume', '{}:/app_vars:z'.format(os.path.dirname(os.path.abspath(self.vars_file))),
57 | '--volume', '{}:/app_keys:z'.format(os.path.join(BASE_DIR, self.keys_dir)),
58 | ]
59 |
60 | if self.dev:
61 | cmd_args = cmd_args + [
62 | '--volume', '{}:/app:z'.format(BASE_DIR),
63 | ]
64 |
65 | cmd_args.append(self.container_image)
66 |
67 | return cmd_args
68 |
69 | def _pull_latest_container(self):
70 | if self.no_update:
71 | print('Skipping image update.')
72 | return
73 |
74 | subprocess.call([
75 | self.container_runtime,
76 | 'pull',
77 | self.container_image,
78 | ])
79 |
80 | def _run_playbook_command(self, playbook):
81 | self._pull_latest_container()
82 |
83 | cmd_args = self.container_command_args + [
84 | 'ansible-playbook',
85 | playbook,
86 | '-e', 'keys_dir=/app_keys',
87 | '-e', '@/app_vars/{}'.format(os.path.basename(self.vars_file)),
88 | ] + self.playbook_args
89 |
90 | subprocess.call(cmd_args)
91 |
92 | def provision(self):
93 | self._run_playbook_command('playbooks/aws/provision.yml')
94 |
95 | def addon_istio(self):
96 | self._run_playbook_command('playbooks/aws/provision_istio.yml')
97 |
98 | def start_instances(self):
99 | self._run_playbook_command('playbooks/aws/start_instances.yml')
100 |
101 | def stop_instances(self):
102 | self._run_playbook_command('playbooks/aws/stop_instances.yml')
103 |
104 | def teardown(self):
105 | self._run_playbook_command('playbooks/aws/teardown.yml')
106 |
107 | def create_users(self):
108 | self._run_playbook_command('playbooks/aws/create_users.yml')
109 |
110 | def shell(self):
111 | self._pull_latest_container()
112 | subprocess.call(self.container_command_args + ['bash',])
113 |
114 | def ssh(self):
115 | import yaml
116 |
117 | with open(self.vars_file, 'r') as f:
118 | vars_data = yaml.load(f)
119 |
120 | bastion_hostname = 'bastion.{}.{}'.format(
121 | vars_data['cluster_name'],
122 | vars_data['openshift_base_domain']
123 | )
124 | keypair_filename = '/app_keys/{}-{}.pem'.format(
125 | vars_data['cluster_name'],
126 | vars_data['openshift_base_domain'].replace('.', '-')
127 | )
128 |
129 | self._pull_latest_container()
130 |
131 | cmd_args = self.container_command_args + [
132 | 'ssh',
133 | '-i', keypair_filename,
134 | '-o', 'StrictHostKeyChecking=no',
135 | 'ec2-user@{}'.format(bastion_hostname),
136 | ] + self.playbook_args
137 |
138 | subprocess.call(cmd_args)
139 |
140 |
141 | def check_file_exists(value):
142 | if not os.path.isfile(value):
143 | raise argparse.ArgumentTypeError('The path {} does not exist'.format(value))
144 | return value
145 |
146 |
147 | if __name__ == '__main__':
148 | parser = argparse.ArgumentParser()
149 | subparsers = parser.add_subparsers()
150 |
151 | parser_provision = subparsers.add_parser('provision')
152 | parser_provision.set_defaults(action='provision')
153 |
154 | parser_start = subparsers.add_parser('start')
155 | parser_start.set_defaults(action='start')
156 |
157 | parser_stop = subparsers.add_parser('stop')
158 | parser_stop.set_defaults(action='stop')
159 |
160 | parser_teardown = subparsers.add_parser('teardown')
161 | parser_teardown.set_defaults(action='teardown')
162 |
163 | parser_create_users = subparsers.add_parser('create_users')
164 | parser_create_users.set_defaults(action='create_users')
165 |
166 | parser_shell = subparsers.add_parser('shell')
167 | parser_shell.set_defaults(action='shell')
168 |
169 | parser_ssh = subparsers.add_parser('ssh')
170 | parser_ssh.set_defaults(action='ssh')
171 |
172 | parser_addon = subparsers.add_parser('addon')
173 | parser_addon.set_defaults(action='addon')
174 | parser_addon.add_argument('addon',
175 | choices=['istio',])
176 |
177 | parser.add_argument('--env-file',
178 | required=True,
179 | type=check_file_exists,
180 | help='file of environment variables')
181 | parser.add_argument('--vars-file',
182 | required=True,
183 | type=check_file_exists,
184 | help='file of ansible variables')
185 | parser.add_argument('--no-update',
186 | action='store_true')
187 | parser.add_argument('--dev',
188 | action='store_true')
189 | known_args, extra_args = parser.parse_known_args()
190 |
191 | if os.geteuid() != 0:
192 | print('This script requires root privileges.')
193 | exit(1)
194 |
195 | try:
196 | op = OpenShiftProvision(known_args.env_file,
197 | known_args.vars_file,
198 | known_args.no_update,
199 | known_args.dev,
200 | extra_args)
201 | except ContainerRuntimeMissingError:
202 | print('\n'.join([
203 | 'You do not have a supported container runtime installed.',
204 | '',
205 | 'This script supports the following container runtimes:',
206 | '\n'.join(' - {}'.format(i) for i in SUPPORTED_CONTAINER_RUNTIMES),
207 | '',
208 | 'Please install one of those options and try again.'
209 | ]))
210 |
211 | if known_args.action == 'provision':
212 | op.provision()
213 | elif known_args.action == 'start':
214 | op.start_instances()
215 | elif known_args.action == 'stop':
216 | op.stop_instances()
217 | elif known_args.action == 'teardown':
218 | op.teardown()
219 | elif known_args.action == 'create_users':
220 | op.create_users()
221 | elif known_args.action == 'addon':
222 | if known_args.addon == 'istio':
223 | op.addon_istio()
224 | elif known_args.action == 'shell':
225 | op.shell()
226 | elif known_args.action == 'ssh':
227 | op.ssh()
228 |
--------------------------------------------------------------------------------
/playbooks/aws/create_users.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: localhost
4 | roles:
5 | - host_facts_aws
6 |
7 | - hosts: openshift_role_master:&openshift_cluster_{{ hostvars.localhost.cluster_group_name }}
8 | roles:
9 | - create_users
10 |
--------------------------------------------------------------------------------
/playbooks/aws/group_vars/all.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | openshift_public_hostname: "{{ cluster_name }}.{{ openshift_base_domain }}"
4 | cluster_name_verbose: "{{ openshift_public_hostname | replace('.', '-') }}"
5 | cluster_group_name: "{{ cluster_name_verbose | replace('-', '_') }}"
6 |
7 | keys_dir: "{{ playbook_dir }}/keys"
8 | ec2_key_name: "{{ cluster_name_verbose }}"
9 | ec2_key_file: "{{ keys_dir }}/{{ ec2_key_name }}.pem"
10 |
--------------------------------------------------------------------------------
/playbooks/aws/provision.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: localhost
4 | roles:
5 | - provision_aws
6 | - host_facts_aws
7 |
8 | - hosts: openshift_cluster_{{ hostvars.localhost.cluster_group_name }}
9 | any_errors_fatal: yes
10 | roles:
11 | - rhsm_subscribe
12 | - host_prep_general
13 |
14 | - hosts: openshift_nodes:&openshift_cluster_{{ hostvars.localhost.cluster_group_name }}
15 | any_errors_fatal: yes
16 | roles:
17 | - host_prep_openshift
18 |
19 | - hosts: openshift_bastion:&openshift_cluster_{{ hostvars.localhost.cluster_group_name }}
20 | any_errors_fatal: yes
21 | roles:
22 | - host_prep_bastion
23 | - install_openshift
24 |
25 | - hosts: openshift_role_master:&openshift_cluster_{{ hostvars.localhost.cluster_group_name }}
26 | roles:
27 | - create_users
28 | tags:
29 | - openshift_deploy_cluster
30 |
31 | - hosts: openshift_nodes:&openshift_cluster_{{ hostvars.localhost.cluster_group_name }}
32 | tasks:
33 | - name: Prepare all nodes for Istio
34 | include_role:
35 | name: host_prep_istio
36 | when:
37 | - addons is defined
38 | - "'istio' in addons"
39 |
40 | - hosts: openshift_role_master:&openshift_cluster_{{ hostvars.localhost.cluster_group_name }}
41 | tasks:
42 | - name: Install Istio using the master node
43 | include_role:
44 | name: install_istio
45 | when:
46 | - addons is defined
47 | - "'istio' in addons"
48 |
--------------------------------------------------------------------------------
/playbooks/aws/provision_istio.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: localhost
4 | roles:
5 | - host_facts_aws
6 |
7 | - hosts: openshift_nodes:&openshift_cluster_{{ hostvars.localhost.cluster_group_name }}
8 | tasks:
9 | - name: Prepare all nodes for Istio
10 | include_role:
11 | name: host_prep_istio
12 |
13 | - hosts: openshift_role_master:&openshift_cluster_{{ hostvars.localhost.cluster_group_name }}
14 | tasks:
15 | - name: Install Istio using the master node
16 | include_role:
17 | name: install_istio
18 |
--------------------------------------------------------------------------------
/playbooks/aws/start_instances.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: localhost
4 | tasks:
5 | - name: Start EC2 instances
6 | ec2_instance:
7 | region: "{{ aws_region }}"
8 | filters:
9 | tag:OpenShiftClusterNameVerbose: "{{ hostvars.localhost.cluster_name_verbose }}"
10 | state: started
11 |
--------------------------------------------------------------------------------
/playbooks/aws/stop_instances.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: localhost
4 | gather_facts: no
5 | tasks:
6 | - name: Stop EC2 instances
7 | ec2_instance:
8 | region: "{{ aws_region }}"
9 | filters:
10 | tag:OpenShiftClusterNameVerbose: "{{ hostvars.localhost.cluster_name_verbose }}"
11 | state: stopped
12 |
--------------------------------------------------------------------------------
/playbooks/aws/teardown.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: localhost
4 | gather_facts: no
5 | tasks:
6 | - name: Teardown OpenShift environment
7 | include_role:
8 | name: provision_aws
9 | tasks_from: teardown
10 |
--------------------------------------------------------------------------------
/playbooks/roles/create_users/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/create_users/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | openshift_users: []
4 |
--------------------------------------------------------------------------------
/playbooks/roles/create_users/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for create_users
--------------------------------------------------------------------------------
/playbooks/roles/create_users/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/playbooks/roles/create_users/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Create users
4 | command: htpasswd -b /etc/origin/master/htpasswd '{{ item.username }}' '{{ item.password }}'
5 | with_items: "{{ openshift_users }}"
6 | become: yes
7 |
8 | - name: Create admin users
9 | command: oc adm policy add-cluster-role-to-user cluster-admin '{{ item.username }}'
10 | run_once: yes
11 | with_items: "{{ openshift_users }}"
12 | when: item.admin is defined and item.admin == True
13 |
--------------------------------------------------------------------------------
/playbooks/roles/create_users/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/create_users/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - create_users
--------------------------------------------------------------------------------
/playbooks/roles/create_users/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for create_users
--------------------------------------------------------------------------------
/playbooks/roles/host_facts_aws/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/host_facts_aws/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | openshift_public_hostname: "{{ cluster_name }}.{{ openshift_base_domain }}"
4 |
5 | cluster_name_verbose: "{{ openshift_public_hostname | replace('.', '-') }}"
6 | cluster_group_name: "{{ cluster_name_verbose | replace('-', '_') }}"
7 |
8 | keys_dir: "{{ playbook_dir }}/keys"
9 | ec2_key_name: "{{ cluster_name_verbose }}"
10 | ec2_key_file: "{{ keys_dir }}/{{ ec2_key_name }}.pem"
11 |
--------------------------------------------------------------------------------
/playbooks/roles/host_facts_aws/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for host_facts_aws
--------------------------------------------------------------------------------
/playbooks/roles/host_facts_aws/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/playbooks/roles/host_facts_aws/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Refresh inventory to update EC2 instances information
4 | meta: refresh_inventory
5 |
6 | - name: Add host facts to OpenShift nodes
7 | add_host:
8 | name: "{{ item }}"
9 | ansible_host: "{{ hostvars[item].public_ip_address }}"
10 | ansible_ssh_private_key_file: "{{ ec2_key_file }}"
11 | rhsm_consumer_name: "{{ hostvars[item].tags.Name }}"
12 | with_items: "{{ groups.openshift_nodes | intersect(groups['openshift_cluster_' + cluster_group_name]) }}"
13 | changed_when: no
14 |
15 | - name: Wait for nodes to boot
16 | wait_for:
17 | host: "{{ hostvars[item].ansible_host }}"
18 | port: 22
19 | search_regex: OpenSSH
20 | delegate_to: localhost
21 | with_items: "{{ groups.openshift_nodes | intersect(groups['openshift_cluster_' + cluster_group_name]) }}"
22 |
--------------------------------------------------------------------------------
/playbooks/roles/host_facts_aws/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/host_facts_aws/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - host_facts_aws
--------------------------------------------------------------------------------
/playbooks/roles/host_facts_aws/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for host_facts_aws
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_bastion/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_bastion/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | openshift_public_hostname: "{{ cluster_name }}.{{ openshift_base_domain }}"
4 |
5 | cluster_name_verbose: "{{ openshift_public_hostname | replace('.', '-') }}"
6 | cluster_group_name: "{{ cluster_name_verbose | replace('-', '_') }}"
7 |
8 | keys_dir: "{{ playbook_dir }}/keys"
9 | ec2_key_name: "{{ cluster_name_verbose }}"
10 | ec2_key_file: "{{ keys_dir }}/{{ ec2_key_name }}.pem"
11 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_bastion/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for host_prep_bastion
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_bastion/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
58 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_bastion/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Install docker
4 | yum:
5 | name:
6 | - docker-1.13.1
7 | state: present
8 | when: (openshift_version | string) in ["3.9", "3.10", "3.11"]
9 | become: yes
10 |
11 | - name: Install OpenShift playbooks (3.9)
12 | yum:
13 | name:
14 | - atomic-openshift-utils
15 | state: present
16 | when:
17 | - openshift_version_minor is not defined
18 | - (openshift_version | string) in ["3.9"]
19 | become: yes
20 |
21 | - name: Install OpenShift playbooks (3.9) - minor version
22 | yum:
23 | name:
24 | - atomic-openshift-utils-{{ openshift_version_minor }}
25 | state: present
26 | when:
27 | - openshift_version_minor is defined
28 | - (openshift_version | string) in ["3.9"]
29 | become: yes
30 |
31 | - name: Install OpenShift playbooks (3.10, 3.11)
32 | yum:
33 | name:
34 | - openshift-ansible
35 | state: present
36 | when:
37 | - openshift_version_minor is not defined
38 | - (openshift_version | string) in ["3.10", "3.11"]
39 | become: yes
40 |
41 | - name: Install OpenShift playbooks (3.10, 3.11) - minor version
42 | yum:
43 | name:
44 | - openshift-ansible-{{ openshift_version_minor }}
45 | state: present
46 | when:
47 | - openshift_version_minor is defined
48 | - (openshift_version | string) in ["3.10", "3.11"]
49 | become: yes
50 |
51 | - name: Start and enable docker
52 | service:
53 | name: docker
54 | enabled: yes
55 | state: started
56 | become: yes
57 |
58 | - name: Copy EC2 key
59 | copy:
60 | src: "{{ ec2_key_file }}"
61 | dest: ~/.ssh/id_rsa
62 | mode: 0600
63 | backup: yes
64 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_bastion/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_bastion/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - host_prep_bastion
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_bastion/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for host_prep_bastion
3 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_general/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_general/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for host_prep_general
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_general/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for host_prep_general
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_general/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_general/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Update all packages
4 | yum:
5 | name: "*"
6 | state: latest
7 | exclude: atomic-openshift-ansible*,openshift-ansible*
8 | retries: 3 # This seems to fail sometimes, so lets retry before failing everything
9 | until: update_all_packages.rc == 0
10 | register: update_all_packages
11 | become: yes
12 |
13 | - block:
14 | - name: Reboot host
15 | shell: sleep 30 && shutdown -r now
16 | async: 30
17 | poll: 0
18 | ignore_errors: yes
19 | become: yes
20 |
21 | - name: Wait for nodes to boot
22 | wait_for:
23 | host: "{{ ansible_host }}"
24 | port: 22
25 | delay: 30
26 | timeout: 300
27 | search_regex: OpenSSH
28 | delegate_to: localhost
29 | when: "'kernel' in update_all_packages.results"
30 |
31 | - name: Install preferred packages
32 | yum:
33 | name:
34 | - vim
35 | - screen
36 | state: present
37 | become: yes
38 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_general/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_general/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - host_prep_general
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_general/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for host_prep_general
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_istio/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_istio/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for host_prep_istio
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_istio/files/99-elasticsearch.conf:
--------------------------------------------------------------------------------
1 | vm.max_map_count = 262144
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_istio/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for host_prep_istio
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_istio/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 2.4
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # Provide a list of supported platforms, and for each platform a list of versions.
34 | # If you don't wish to enumerate all versions for a particular platform, use 'all'.
35 | # To view available platforms and versions (or releases), visit:
36 | # https://galaxy.ansible.com/api/v1/platforms/
37 | #
38 | # platforms:
39 | # - name: Fedora
40 | # versions:
41 | # - all
42 | # - 25
43 | # - name: SomePlatform
44 | # versions:
45 | # - all
46 | # - 1.0
47 | # - 7
48 | # - 99.99
49 |
50 | galaxy_tags: []
51 | # List tags for your role here, one per line. A tag is a keyword that describes
52 | # and categorizes the role. Users find roles by searching for tags. Be sure to
53 | # remove the '[]' above, if you add tags to this list.
54 | #
55 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
56 | # Maximum 20 tags per role.
57 |
58 | dependencies: []
59 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
60 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_istio/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: update nodes for Elasticsearch max_map_count
4 | copy:
5 | src: 99-elasticsearch.conf
6 | dest: /etc/sysctl.d/99-elasticsearch.conf
7 | owner: root
8 | group: root
9 | mode: 0644
10 | become: yes
11 |
12 | - name: update max_map_count
13 | command: sysctl vm.max_map_count=262144
14 | become: yes
15 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_istio/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_istio/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - host_prep_istio
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_istio/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for host_prep_istio
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_openshift/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_openshift/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for host_prep_openshift
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_openshift/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for host_prep_openshift
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_openshift/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_openshift/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # There is a current bug with DNSmasq causing things to fail during an
4 | # OpenShift install. Installing/upgrading and then rebooting the nodes is
5 | # the current workaround.
6 | - name: Install/upgrade dnsmasq
7 | yum:
8 | name: dnsmasq
9 | state: latest
10 | register: install_update_dnsmasq
11 | become: yes
12 |
13 | - block:
14 | - name: Reboot host
15 | shell: sleep 30 && shutdown -r now
16 | async: 30
17 | poll: 0
18 | ignore_errors: yes
19 | become: yes
20 |
21 | - name: Wait for nodes to boot
22 | wait_for:
23 | host: "{{ ansible_host }}"
24 | port: 22
25 | delay: 30
26 | timeout: 300
27 | search_regex: OpenSSH
28 | delegate_to: localhost
29 | when: install_update_dnsmasq.changed
30 |
31 | - name: Install Docker
32 | yum:
33 | name: docker-1.13.1 # TODO: Make this a varible based on openshift_version
34 | state: present
35 | become: yes
36 |
37 | - name: Write docker-storage-setup file
38 | template:
39 | src: docker-storage-setup.j2
40 | dest: /etc/sysconfig/docker-storage-setup
41 | owner: root
42 | group: root
43 | mode: 0644
44 | register: write_docker_storage_setup_file
45 | become: yes
46 |
47 | - name: Run docker-storage-setup
48 | command: docker-storage-setup
49 | when: write_docker_storage_setup_file.changed
50 | become: yes
51 |
52 | - name: Start and enable docker
53 | service:
54 | name: docker
55 | enabled: yes
56 | state: started
57 | become: yes
58 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_openshift/templates/docker-storage-setup.j2:
--------------------------------------------------------------------------------
1 | STORAGE_DRIVER=overlay2
2 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_openshift/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_openshift/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - host_prep_openshift
--------------------------------------------------------------------------------
/playbooks/roles/host_prep_openshift/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for host_prep_openshift
3 |
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for install_istio
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/files/istio-installation.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: istio.openshift.com/v1alpha3
2 | kind: ControlPlane
3 | metadata:
4 | name: basic-install
5 | spec:
6 | launcher:
7 | enabled: false
8 | # specify the url to master, e.g. https://master.some.domain.com:443
9 | LAUNCHER_MISSIONCONTROL_OPENSHIFT_CONSOLE_URL:
10 | # Your GitHub username
11 | LAUNCHER_MISSIONCONTROL_GITHUB_USERNAME:
12 | # Your GitHub Mission Control access token
13 | LAUNCHER_MISSIONCONTROL_GITHUB_TOKEN:
14 |
15 | threeScale:
16 | enabled: false
17 |
18 | istio:
19 | global:
20 | proxy:
21 | resources:
22 | requests:
23 | cpu: 100m
24 | memory: 128Mi
25 | limits:
26 | cpu: 500m
27 | memory: 128Mi
28 |
29 | gateways:
30 | istio-egressgateway:
31 | autoscaleEnabled: false
32 | istio-ingressgateway:
33 | autoscaleEnabled: false
34 | ior_enabled: false
35 |
36 | mixer:
37 | policy:
38 | autoscaleEnabled: false
39 |
40 | telemetry:
41 | autoscaleEnabled: false
42 | resources:
43 | requests:
44 | cpu: 100m
45 | memory: 1G
46 | limits:
47 | cpu: 500m
48 | memory: 4G
49 |
50 | pilot:
51 | autoscaleEnabled: false
52 | traceSampling: 100.0
53 |
54 | kiali:
55 | dashboard:
56 | user: admin
57 | passphrase: admin
58 | tracing:
59 | enabled: true
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/files/master-config.patch:
--------------------------------------------------------------------------------
1 | {"admissionConfig": {"pluginConfig": {"ValidatingAdmissionWebhook": {"configuration": {"kind": "WebhookAdmission", "kubeConfigFile": "/dev/null", "apiVersion": "apiserver.config.k8s.io/v1alpha1"}}, "MutatingAdmissionWebhook": {"configuration": {"kind": "WebhookAdmission", "kubeConfigFile": "/dev/null", "apiVersion": "apiserver.config.k8s.io/v1alpha1"}}}}}
2 |
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for install_istio
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 2.4
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # Provide a list of supported platforms, and for each platform a list of versions.
34 | # If you don't wish to enumerate all versions for a particular platform, use 'all'.
35 | # To view available platforms and versions (or releases), visit:
36 | # https://galaxy.ansible.com/api/v1/platforms/
37 | #
38 | # platforms:
39 | # - name: Fedora
40 | # versions:
41 | # - all
42 | # - 25
43 | # - name: SomePlatform
44 | # versions:
45 | # - all
46 | # - 1.0
47 | # - 7
48 | # - 99.99
49 |
50 | galaxy_tags: []
51 | # List tags for your role here, one per line. A tag is a keyword that describes
52 | # and categorizes the role. Users find roles by searching for tags. Be sure to
53 | # remove the '[]' above, if you add tags to this list.
54 | #
55 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
56 | # Maximum 20 tags per role.
57 |
58 | dependencies: []
59 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
60 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Copy master config patch file
4 | copy:
5 | src: master-config.patch
6 | dest: /etc/origin/master/master-config.patch
7 | become: yes
8 |
9 | - name: Create new master config
10 | shell: oc ex config patch master-config.yaml --patch "$(cat master-config.patch)" --type merge > master-config.new.yaml
11 | args:
12 | chdir: /etc/origin/master/
13 | become: yes
14 |
15 | - name: Replace master config with with patched version
16 | copy:
17 | remote_src: yes
18 | src: /etc/origin/master/master-config.new.yaml
19 | dest: /etc/origin/master/master-config.yaml
20 | backup: yes
21 | register: replace_master_config
22 | become: yes
23 |
24 | # TODO: delete master-config.new.yaml
25 |
26 | - name: Restart master API server and controllers
27 | shell: /usr/local/bin/master-restart api && /usr/local/bin/master-restart controllers
28 | when: replace_master_config.changed
29 | become: yes
30 |
31 | - name: Wait for master API and controllers to become available
32 | command: oc version
33 | register: wait_for_master_api
34 | until: wait_for_master_api.rc == 0
35 | retries: 10
36 | delay: 5
37 |
38 | # TODO: check if Istio operator project exists
39 |
40 | - name: Create Istio operator project
41 | command: oc new-project istio-operator
42 |
43 | # TODO: check if Istio operator system exists
44 |
45 | - name: Create Istio system project
46 | command: oc new-project istio-system
47 |
48 | # TODO: check if operator exists
49 |
50 | - name: Install operator
51 | command: oc apply -n istio-operator -f https://raw.githubusercontent.com/Maistra/istio-operator/maistra-0.10/deploy/servicemesh-operator.yaml
52 |
53 | # TODO: check if istio installation exists
54 |
55 | - name: Copy control plane config
56 | copy:
57 | src: istio-installation.yaml
58 | dest: /tmp/istio-installation.yaml
59 |
60 | - name: install the control plane
61 | command: oc create -n istio-system -f /tmp/istio-installation.yaml
62 |
63 | # TODO: delete istio-installation.yaml
64 |
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - install_istio
--------------------------------------------------------------------------------
/playbooks/roles/install_istio/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for install_istio
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | openshift_public_hostname: "{{ cluster_name }}.{{ openshift_base_domain }}"
4 |
5 | cluster_name_verbose: "{{ openshift_public_hostname | replace('.', '-') }}"
6 | cluster_group_name: "{{ cluster_name_verbose | replace('-', '_') }}"
7 |
8 | keys_dir: "{{ playbook_dir }}/keys"
9 | ec2_key_name: "{{ cluster_name_verbose }}"
10 | ec2_key_file: "{{ keys_dir }}/{{ ec2_key_name }}.pem"
11 |
12 | letsencrypt_cert_generation: yes
13 |
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/files/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 |
3 | forks = 20
4 | callback_whitelist = timer, profile_tasks
5 |
6 | log_path = ~/ansible.log
7 |
8 | retry_files_enabled = False
9 | host_key_checking = False
10 | stdout_callback = debug
11 |
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for install_openshift
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/tasks/certs.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Get wildcard SSL cert for OpenShift router
4 | command: >-
5 | docker run --rm --name certbot
6 | -v "/etc/letsencrypt:/etc/letsencrypt:z"
7 | -v "/var/lib/letsencrypt:/var/lib/letsencrypt:z"
8 | -e AWS_ACCESS_KEY_ID={{ lookup('env', 'AWS_ACCESS_KEY_ID') }}
9 | -e AWS_SECRET_ACCESS_KEY={{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}
10 | certbot/dns-route53 certonly
11 | --non-interactive
12 | --agree-tos
13 | --email "{{ cert_email_address }}"
14 | --dns-route53
15 | --dns-route53-propagation-seconds 45
16 | --server https://acme-v02.api.letsencrypt.org/directory
17 | --domain "{{ openshift_public_hostname }}"
18 | --domain "*.apps.{{ openshift_public_hostname }}"
19 | args:
20 | creates: /etc/letsencrypt/live/{{ openshift_public_hostname }}
21 | become: yes
22 |
23 | - name: Give the {{ ansible_user }} access to the certs
24 | acl:
25 | path: "/etc/letsencrypt"
26 | entity: "{{ ansible_user }}"
27 | etype: user
28 | permissions: rx
29 | recursive: yes
30 | follow: yes
31 | state: present
32 | become: yes
33 |
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - import_tasks: certs.yml
4 | when: letsencrypt_cert_generation
5 |
6 | - name: Copy Ansible config
7 | copy:
8 | src: ansible.cfg
9 | dest: /etc/ansible/ansible.cfg
10 | owner: root
11 | group: root
12 | mode: 0644
13 | backup: yes
14 | become: yes
15 |
16 | - name: Copy hosts template
17 | template:
18 | src: hosts.{{ cluster_type }}.{{ openshift_version }}.ini.j2
19 | dest: /etc/ansible/hosts
20 | owner: root
21 | group: root
22 | mode: 0644
23 | backup: yes
24 | become: yes
25 |
26 | - name: Run OpenShift prerequisites
27 | command: ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml
28 | async: 900 # 15 minutes
29 | poll: 30
30 | tags: openshift_deploy_cluster
31 |
32 | - name: Run OpenShift installer (this will take a while!)
33 | command: ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml
34 | async: 5400 # 90 minutes
35 | poll: 30
36 | tags: openshift_deploy_cluster
37 |
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/templates/hosts.multi.3.10.ini.j2:
--------------------------------------------------------------------------------
1 | [masters]
2 | {{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }}
3 |
4 | [etcd]
5 | {{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }}
6 |
7 | [nodes]
8 | {{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }} openshift_node_group_name='node-config-master'
9 | {{ hostvars[(groups.openshift_role_infra | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }} openshift_node_group_name='node-config-infra'
10 | {% for app_node in (groups.openshift_role_app | intersect(groups['openshift_cluster_' + cluster_group_name])) %}
11 | {{ hostvars[app_node].private_dns_name }} openshift_node_group_name='node-config-compute'
12 | {% endfor %}
13 |
14 | [glusterfs]
15 | {{ hostvars[(groups.openshift_role_infra | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }} glusterfs_devices='[ "{{ cns_storage_device }}" ]'
16 | {% for app_node in (groups.openshift_role_app | intersect(groups['openshift_cluster_' + cluster_group_name])) %}
17 | {{ hostvars[app_node].private_dns_name }} glusterfs_devices='[ "{{ cns_storage_device }}" ]'
18 | {% endfor %}
19 |
20 | # Create an OSEv3 group that contains the masters and nodes groups
21 | [OSEv3:children]
22 | masters
23 | nodes
24 | etcd
25 | glusterfs
26 |
27 | [OSEv3:vars]
28 | # Disable checks that are meant to catch issues early. This is an automated install that
29 | # also creates the infrastructure so it is assumed that things are configured correctly.
30 | # This will speed up the execution of the installer.
31 | openshift_disable_check=memory_availability,disk_availability,docker_storage,docker_storage_driver,docker_image_availability,package_version,package_availability,package_update
32 |
33 | ###############################################################################
34 | # Common/ Required configuration variables follow #
35 | ###############################################################################
36 | # SSH user, this user should allow ssh based auth without requiring a
37 | # password. If using ssh key based auth, then the key should be managed by an
38 | # ssh agent.
39 | ansible_user=ec2-user
40 |
41 | # If ansible_user is not root, ansible_become must be set to true and the
42 | # user must be configured for passwordless sudo
43 | ansible_become=yes
44 |
45 | # Specify the deployment type. Valid values are origin and openshift-enterprise.
46 | openshift_deployment_type=openshift-enterprise
47 |
48 | # Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
49 | # rely on the version running on the first master. Works best for containerized installs where we can usually
50 | # use this to lookup the latest exact version of the container images, which is the tag actually used to configure
51 | # the cluster. For RPM installations we just verify the version detected in your configured repos matches this
52 | # release.
53 | openshift_release="{{ openshift_version }}"
54 |
55 | # default subdomain to use for exposed routes, you should have wildcard dns
56 | # for *.apps.test.example.com that points at your infra nodes which will run
57 | # your router
58 | openshift_master_default_subdomain=apps.{{ openshift_public_hostname }}
59 |
60 |
61 | ###############################################################################
62 | # Additional configuration variables follow #
63 | ###############################################################################
64 |
65 | # Debug level for all OpenShift components (Defaults to 2)
66 | debug_level=2
67 |
68 | {% if openshift_version_minor is defined %}
69 | # Specify an exact container image tag to install or configure.
70 | # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
71 | # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
72 | openshift_image_tag=v{{ openshift_version_minor }}
73 |
74 | # Specify an exact rpm version to install or configure.
75 | # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
76 | # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
77 | openshift_pkg_version=-{{ openshift_version_minor }}
78 | {% endif %}
79 |
80 | # Manage openshift example imagestreams and templates during install and upgrade
81 | openshift_install_examples=true
82 |
83 | # htpasswd auth
84 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
85 | # Defining htpasswd users
86 | #openshift_master_htpasswd_users={'user1': '', 'user2': ''}
87 | # or
88 | #openshift_master_htpasswd_file=
89 |
90 | # Enable cockpit
91 | osm_use_cockpit=true
92 | #
93 | # Set cockpit plugins
94 | osm_cockpit_plugins=['cockpit-kubernetes']
95 |
96 | # If an external load balancer is used public hostname should resolve to
97 | # external load balancer address
98 | openshift_master_cluster_public_hostname={{ openshift_public_hostname }}
99 |
100 | # default project node selector
101 | osm_default_node_selector='node-role.kubernetes.io/compute=true'
102 |
103 | # OpenShift Storage Options
104 | #
105 | openshift_storage_glusterfs_storageclass=true
106 | openshift_storage_glusterfs_storageclass_default=true
107 |
108 | openshift_storage_glusterfs_block_deploy=true
109 | openshift_storage_glusterfs_block_storageclass=true
110 | openshift_storage_glusterfs_block_storageclass_default=false
111 | openshift_storage_glusterfs_block_host_vol_size=15
112 |
113 | # OpenShift Router Options
114 | #
115 | # An OpenShift router will be created during install if there are
116 | # nodes present with labels matching the default router selector,
117 | # "node-role.kubernetes.io/infra=true".
118 | #
119 | # Example:
120 | # [nodes]
121 | # node.example.com openshift_node_group_name="node-config-infra"
122 | #
123 | # Router selector (optional)
124 | # Router will only be created if nodes matching this label are present.
125 | # Default value: 'node-role.kubernetes.io/infra=true'
126 | openshift_hosted_router_selector='node-role.kubernetes.io/infra=true'
127 |
128 | {% if letsencrypt_cert_generation %}
129 | # Router certificate (optional)
130 | # Provide local certificate paths which will be configured as the
131 | # router's default certificate.
132 | openshift_hosted_router_certificate={"certfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/cert.pem", "keyfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/privkey.pem", "cafile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/chain.pem"}
133 | {% endif %}
134 |
135 | # Openshift Registry Options
136 | #
137 | # An OpenShift registry will be created during install if there are
138 | # nodes present with labels matching the default registry selector,
139 | # "node-role.kubernetes.io/infra=true".
140 | #
141 | # Example:
142 | # [nodes]
143 | # node.example.com openshift_node_group_name="node-config-infra"
144 | #
145 | # Registry selector (optional)
146 | # Registry will only be created if nodes matching this label are present.
147 | # Default value: 'node-role.kubernetes.io/infra=true'
148 | openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true'
149 | #
150 | # Registry Storage Options
151 | #
152 | openshift_hosted_registry_storage_kind=glusterfs
153 | openshift_hosted_registry_storage_volume_size={{ openshift_registry_volume_size }}Gi
154 |
155 | # Metrics deployment
156 | # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
157 | #
158 | # By default metrics are not automatically deployed, set this to enable them
159 | openshift_metrics_install_metrics=true
160 | #
161 | # Storage Options
162 | # If openshift_metrics_storage_kind is unset then metrics will be stored
163 | # in an EmptyDir volume and will be deleted when the cassandra pod terminates.
164 | # Storage options A & B currently support only one cassandra pod which is
165 | # generally enough for up to 1000 pods. Additional volumes can be created
166 | # manually after the fact and metrics scaled per the docs.
167 | #
168 | openshift_metrics_cassandra_storage_type=dynamic
169 | openshift_metrics_cassandra_pvc_size=10Gi
170 | openshift_metrics_cassandra_pvc_storage_class_name=glusterfs-storage-block
171 | #
172 | # Other Metrics Options -- Common items you may wish to reconfigure, for the complete
173 | # list of options please see roles/openshift_metrics/README.md
174 | #
175 | openshift_logging_es_memory_limit=8Gi
176 | openshift_metrics_cassandra_nodeselector={'node-role.kubernetes.io/infra': 'true'}
177 | openshift_metrics_hawkular_nodeselector={'node-role.kubernetes.io/infra': 'true'}
178 | openshift_metrics_heapster_nodeselector={'node-role.kubernetes.io/infra': 'true'}
179 |
180 | # Logging deployment
181 | #
182 | # Currently logging deployment is disabled by default, enable it by setting this
183 | openshift_logging_install_logging=true
184 | #
185 | # Logging storage config
186 | #
187 | openshift_logging_es_pvc_dynamic=true
188 | openshift_logging_es_pvc_size=10Gi
189 | openshift_logging_es_pvc_storage_class_name=glusterfs-storage-block
190 | #
191 | # Other Logging Options -- Common items you may wish to reconfigure, for the complete
192 | # list of options please see roles/openshift_logging/README.md
193 | #
194 | openshift_logging_es_nodeselector={'node-role.kubernetes.io/infra': 'true'}
195 | openshift_logging_curator_nodeselector={'node-role.kubernetes.io/infra': 'true'}
196 | openshift_logging_kibana_nodeselector={'node-role.kubernetes.io/infra': 'true'}
197 |
198 | # Prometheus deployment
199 | #
200 | # Currently prometheus deployment is disabled by default, enable it by setting this
201 | openshift_hosted_prometheus_deploy=true
202 | #
203 | # Prometheus storage config
204 | # By default prometheus uses emptydir storage, if you want to persist you should
205 | # configure it to use pvc storage type. Each volume must be ReadWriteOnce.
206 | openshift_prometheus_storage_type=pvc
207 | openshift_prometheus_storage_class=glusterfs-storage-block
208 | openshift_prometheus_pvc_size=10Gi
209 |
210 | openshift_prometheus_alertmanager_storage_type=pvc
211 | openshift_prometheus_alertmanager_storage_class=glusterfs-storage-block
212 | openshift_prometheus_alertmanager_pvc_size=10Gi
213 |
214 | openshift_prometheus_alertbuffer_storage_type=pvc
215 | openshift_prometheus_alertbuffer_storage_class=glusterfs-storage-block
216 | openshift_prometheus_alertbuffer_pvc_size=10Gi
217 |
218 | # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
219 | os_sdn_network_plugin_name='{{ openshift_network_plugin }}'
220 |
221 | # Configure master API and console ports.
222 | openshift_master_api_port=443
223 | openshift_master_console_port=443
224 |
225 | {% if letsencrypt_cert_generation %}
226 | # Configure custom named certificates (SNI certificates)
227 | #
228 | # https://docs.openshift.org/latest/install_config/certificate_customization.html
229 | # https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
230 | #
231 | # NOTE: openshift_master_named_certificates is cached on masters and is an
232 | # additive fact, meaning that each run with a different set of certificates
233 | # will add the newly provided certificates to the cached set of certificates.
234 | #
235 | # An optional CA may be specified for each named certificate. CAs will
236 | # be added to the OpenShift CA bundle which allows for the named
237 | # certificate to be served for internal cluster communication.
238 | #
239 | # If you would like openshift_master_named_certificates to be overwritten with
240 | # the provided value, specify openshift_master_overwrite_named_certificates.
241 | openshift_master_overwrite_named_certificates=true
242 | #
243 | # Provide local certificate paths which will be deployed to masters
244 | #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
245 | #
246 | # Detected names may be overridden by specifying the "names" key
247 | openshift_master_named_certificates=[{"certfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/fullchain.pem", "keyfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/privkey.pem", "names": ["{{ openshift_public_hostname }}"]}]
248 | {% endif %}
249 |
250 | # Enable service catalog
251 | openshift_enable_service_catalog=true
252 |
253 | # Enable template service broker (requires service catalog to be enabled, above)
254 | template_service_broker_install=true
255 | template_service_broker_selector={'node-role.kubernetes.io/infra': 'true'}
256 |
257 | # Enable ansible service broker
258 | ansible_service_broker_install=true
259 | ansible_service_broker_node_selector={'node-role.kubernetes.io/infra': 'true'}
260 |
261 | # Firewall configuration
262 | #
263 | os_firewall_use_firewalld=true
264 | # You can open additional firewall ports by defining them as a list. of service
265 | # names and ports/port ranges for either masters or nodes.
266 | #openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}]
267 | #openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}]
268 |
269 | # Enable unsupported configurations, things that will yield a partially
270 | # functioning cluster but would not be supported for production use
271 | #openshift_enable_unsupported_configurations=false
272 |
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/templates/hosts.multi.3.11.ini.j2:
--------------------------------------------------------------------------------
1 | [masters]
2 | {{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }}
3 |
4 | [etcd]
5 | {{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }}
6 |
7 | [nodes]
8 | {{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }} openshift_node_group_name='node-config-master'
9 | {{ hostvars[(groups.openshift_role_infra | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }} openshift_node_group_name='node-config-infra'
10 | {% for app_node in (groups.openshift_role_app | intersect(groups['openshift_cluster_' + cluster_group_name])) %}
11 | {{ hostvars[app_node].private_dns_name }} openshift_node_group_name='node-config-compute'
12 | {% endfor %}
13 |
14 | [glusterfs]
15 | {{ hostvars[(groups.openshift_role_infra | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }} glusterfs_devices='[ "{{ cns_storage_device }}" ]'
16 | {% for app_node in (groups.openshift_role_app | intersect(groups['openshift_cluster_' + cluster_group_name])) %}
17 | {{ hostvars[app_node].private_dns_name }} glusterfs_devices='[ "{{ cns_storage_device }}" ]'
18 | {% endfor %}
19 |
20 | # Create an OSEv3 group that contains the masters and nodes groups
21 | [OSEv3:children]
22 | masters
23 | nodes
24 | etcd
25 | glusterfs
26 |
27 | [OSEv3:vars]
28 | # Disable checks that are meant to catch issues early. This is an automated install that
29 | # also creates the infrastructure so it is assumed that things are configured correctly.
30 | # This will speed up the execution of the installer.
31 | openshift_disable_check=memory_availability,disk_availability,docker_storage,docker_storage_driver,docker_image_availability,package_version,package_availability,package_update
32 |
33 | ###############################################################################
34 | # Common/ Required configuration variables follow #
35 | ###############################################################################
36 | # SSH user, this user should allow ssh based auth without requiring a
37 | # password. If using ssh key based auth, then the key should be managed by an
38 | # ssh agent.
39 | ansible_user=ec2-user
40 |
41 | # If ansible_user is not root, ansible_become must be set to true and the
42 | # user must be configured for passwordless sudo
43 | ansible_become=yes
44 |
45 | # Specify the deployment type. Valid values are origin and openshift-enterprise.
46 | openshift_deployment_type=openshift-enterprise
47 |
48 | # Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
49 | # rely on the version running on the first master. Works best for containerized installs where we can usually
50 | # use this to lookup the latest exact version of the container images, which is the tag actually used to configure
51 | # the cluster. For RPM installations we just verify the version detected in your configured repos matches this
52 | # release.
53 | openshift_release="{{ openshift_version }}"
54 |
55 | # default subdomain to use for exposed routes, you should have wildcard dns
56 | # for *.apps.test.example.com that points at your infra nodes which will run
57 | # your router
58 | openshift_master_default_subdomain=apps.{{ openshift_public_hostname }}
59 |
60 |
61 | ###############################################################################
62 | # Additional configuration variables follow #
63 | ###############################################################################
64 |
65 | # Debug level for all OpenShift components (Defaults to 2)
66 | debug_level=2
67 |
68 | {% if openshift_version_minor is defined %}
69 | # Specify an exact container image tag to install or configure.
70 | # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
71 | # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
72 | openshift_image_tag=v{{ openshift_version_minor }}
73 |
74 | # Specify an exact rpm version to install or configure.
75 | # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
76 | # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
77 | openshift_pkg_version=-{{ openshift_version_minor }}
78 | {% endif %}
79 |
80 | # Manage openshift example imagestreams and templates during install and upgrade
81 | openshift_install_examples=true
82 |
83 | # If oreg_url points to a registry requiring authentication, provide the following:
84 | oreg_auth_user='{{ redhat_registry_username | default(rhsm_username) }}'
85 | oreg_auth_password='{{ redhat_registry_password | default(rhsm_password) }}'
86 | # NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
87 | # oreg_auth_pass should be generated from running docker login.
88 |
89 | # htpasswd auth
90 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
91 | # Defining htpasswd users
92 | #openshift_master_htpasswd_users={'user1': '', 'user2': ''}
93 | # or
94 | #openshift_master_htpasswd_file=
95 |
96 | # Enable cockpit
97 | osm_use_cockpit=true
98 | #
99 | # Set cockpit plugins
100 | osm_cockpit_plugins=['cockpit-kubernetes']
101 |
102 | # If an external load balancer is used public hostname should resolve to
103 | # external load balancer address
104 | openshift_master_cluster_public_hostname={{ openshift_public_hostname }}
105 |
106 | # default project node selector
107 | osm_default_node_selector='node-role.kubernetes.io/compute=true'
108 |
109 | # OpenShift Storage Options
110 | #
111 | openshift_storage_glusterfs_storageclass=true
112 | openshift_storage_glusterfs_storageclass_default=true
113 |
114 | openshift_storage_glusterfs_block_deploy=true
115 | openshift_storage_glusterfs_block_storageclass=true
116 | openshift_storage_glusterfs_block_storageclass_default=false
117 | openshift_storage_glusterfs_block_host_vol_size=15
118 |
119 | # OpenShift Router Options
120 | #
121 | # An OpenShift router will be created during install if there are
122 | # nodes present with labels matching the default router selector,
123 | # "node-role.kubernetes.io/infra=true".
124 | #
125 | # Example:
126 | # [nodes]
127 | # node.example.com openshift_node_group_name="node-config-infra"
128 | #
129 | # Router selector (optional)
130 | # Router will only be created if nodes matching this label are present.
131 | # Default value: 'node-role.kubernetes.io/infra=true'
132 | openshift_hosted_router_selector='node-role.kubernetes.io/infra=true'
133 |
134 | {% if letsencrypt_cert_generation %}
135 | # Router certificate (optional)
136 | # Provide local certificate paths which will be configured as the
137 | # router's default certificate.
138 | openshift_hosted_router_certificate={"certfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/cert.pem", "keyfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/privkey.pem", "cafile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/chain.pem"}
139 | {% endif %}
140 |
141 | # Openshift Registry Options
142 | #
143 | # An OpenShift registry will be created during install if there are
144 | # nodes present with labels matching the default registry selector,
145 | # "node-role.kubernetes.io/infra=true".
146 | #
147 | # Example:
148 | # [nodes]
149 | # node.example.com openshift_node_group_name="node-config-infra"
150 | #
151 | # Registry selector (optional)
152 | # Registry will only be created if nodes matching this label are present.
153 | # Default value: 'node-role.kubernetes.io/infra=true'
154 | openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true'
155 | #
156 | # Registry Storage Options
157 | #
158 | openshift_hosted_registry_storage_kind=glusterfs
159 | openshift_hosted_registry_storage_volume_size={{ openshift_registry_volume_size }}Gi
160 |
161 | # Metrics deployment
162 | # See: https://docs.openshift.com/container-platform/latest/install_config/cluster_metrics.html
163 | #
164 | # By default metrics are not automatically deployed, set this to enable them
165 | openshift_metrics_install_metrics=true
166 | #
167 | # metrics-server deployment
168 | # By default, metrics-server is not automatically deployed, unless metrics is also
169 | # deployed. Deploying metrics-server is necessary to use the HorizontalPodAutoscaler.
170 | # Set this to enable it.
171 | openshift_metrics_server_install=true
172 | #
173 | # Storage Options
174 | # If openshift_metrics_storage_kind is unset then metrics will be stored
175 | # in an EmptyDir volume and will be deleted when the cassandra pod terminates.
176 | # Storage options A & B currently support only one cassandra pod which is
177 | # generally enough for up to 1000 pods. Additional volumes can be created
178 | # manually after the fact and metrics scaled per the docs.
179 | #
180 | openshift_metrics_cassandra_storage_type=dynamic
181 | openshift_metrics_cassandra_pvc_size=10Gi
182 | openshift_metrics_cassandra_pvc_storage_class_name=glusterfs-storage-block
183 | #
184 | # Other Metrics Options -- Common items you may wish to reconfigure, for the complete
185 | # list of options please see roles/openshift_metrics/README.md
186 | #
187 | openshift_logging_es_memory_limit=8Gi
188 | openshift_metrics_cassandra_nodeselector={'node-role.kubernetes.io/infra': 'true'}
189 | openshift_metrics_hawkular_nodeselector={'node-role.kubernetes.io/infra': 'true'}
190 | openshift_metrics_heapster_nodeselector={'node-role.kubernetes.io/infra': 'true'}
191 |
192 | # Cluster monitoring
193 | #
194 | # Cluster monitoring is enabled by default, disable it by setting
195 | openshift_cluster_monitoring_operator_install=true
196 | #
197 | # Cluster monitoring configuration variables allow setting the amount of
198 | # storage requested through PersistentVolumeClaims.
199 | #
200 | openshift_cluster_monitoring_operator_prometheus_storage_enabled=true
201 | openshift_cluster_monitoring_operator_prometheus_storage_class_name=glusterfs-storage-block
202 | openshift_cluster_monitoring_operator_prometheus_storage_capacity="10Gi"
203 |
204 | openshift_cluster_monitoring_operator_alertmanager_storage_enabled=true
205 | openshift_cluster_monitoring_operator_alertmanager_storage_class_name=glusterfs-storage-block
206 | openshift_cluster_monitoring_operator_alertmanager_storage_capacity="10Gi"
207 | #
208 | # Other cluster monitoring options
209 | #
210 | openshift_cluster_monitoring_operator_node_selector={'node-role.kubernetes.io/infra': 'true'}
211 |
212 | # Logging deployment
213 | #
214 | # Currently logging deployment is disabled by default, enable it by setting this
215 | openshift_logging_install_logging=true
216 | #
217 | # Logging storage config
218 | #
219 | openshift_logging_es_pvc_dynamic=true
220 | openshift_logging_es_pvc_size=10Gi
221 | openshift_logging_es_pvc_storage_class_name=glusterfs-storage-block
222 | #
223 | # Other Logging Options -- Common items you may wish to reconfigure, for the complete
224 | # list of options please see roles/openshift_logging/README.md
225 | #
226 | openshift_logging_es_nodeselector={'node-role.kubernetes.io/infra': 'true'}
227 | openshift_logging_curator_nodeselector={'node-role.kubernetes.io/infra': 'true'}
228 | openshift_logging_kibana_nodeselector={'node-role.kubernetes.io/infra': 'true'}
229 |
230 | # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
231 | os_sdn_network_plugin_name='{{ openshift_network_plugin }}'
232 |
233 | # Configure master API and console ports.
234 | openshift_master_api_port=443
235 | openshift_master_console_port=443
236 |
237 | {% if letsencrypt_cert_generation %}
238 | # Configure custom named certificates (SNI certificates)
239 | #
240 | # https://docs.okd.io/latest/install_config/certificate_customization.html
241 | # https://docs.openshift.com/container-platform/latest/install_config/certificate_customization.html
242 | #
243 | # NOTE: openshift_master_named_certificates is cached on masters and is an
244 | # additive fact, meaning that each run with a different set of certificates
245 | # will add the newly provided certificates to the cached set of certificates.
246 | #
247 | # An optional CA may be specified for each named certificate. CAs will
248 | # be added to the OpenShift CA bundle which allows for the named
249 | # certificate to be served for internal cluster communication.
250 | #
251 | # If you would like openshift_master_named_certificates to be overwritten with
252 | # the provided value, specify openshift_master_overwrite_named_certificates.
253 | openshift_master_overwrite_named_certificates=true
254 | #
255 | # Provide local certificate paths which will be deployed to masters
256 | #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
257 | #
258 | # Detected names may be overridden by specifying the "names" key
259 | openshift_master_named_certificates=[{"certfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/fullchain.pem", "keyfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/privkey.pem", "names": ["{{ openshift_public_hostname }}"]}]
260 | {% endif %}
261 |
262 | # Enable service catalog
263 | openshift_enable_service_catalog=true
264 |
265 | # Enable template service broker (requires service catalog to be enabled, above)
266 | template_service_broker_install=true
267 | template_service_broker_selector={'node-role.kubernetes.io/infra': 'true'}
268 |
269 | # Enable ansible service broker
270 | ansible_service_broker_install=true
271 | ansible_service_broker_node_selector={'node-role.kubernetes.io/infra': 'true'}
272 |
273 | # Firewall configuration
274 | os_firewall_use_firewalld=true
275 | # You can open additional firewall ports by defining them as a list. of service
276 | # names and ports/port ranges for either masters or nodes.
277 | #openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}]
278 | #openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}]
279 |
280 | # Enable unsupported configurations, things that will yield a partially
281 | # functioning cluster but would not be supported for production use
282 | #openshift_enable_unsupported_configurations=false
283 |
284 | # FIX for gluster and 3.11.latest - uncomment to use (and pass --dev to op.py)
285 | # see: https://access.redhat.com/solutions/3949971
286 | openshift_storage_glusterfs_image=registry.redhat.io/rhgs3/rhgs-server-rhel7:v3.11
287 | openshift_storage_glusterfs_block_image=registry.redhat.io/rhgs3/rhgs-gluster-block-prov-rhel7:v3.11
288 | openshift_storage_glusterfs_heketi_image=registry.redhat.io/rhgs3/rhgs-volmanager-rhel7:v3.11
289 |
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/templates/hosts.multi.3.9.ini.j2:
--------------------------------------------------------------------------------
1 | [masters]
2 | {{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }}
3 |
4 | [etcd]
5 | {{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }}
6 |
7 | [nodes]
8 | {{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }}
9 | {{ hostvars[(groups.openshift_role_infra | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }} openshift_node_labels="{'role': 'infra'}"
10 | {% for app_node in (groups.openshift_role_app | intersect(groups['openshift_cluster_' + cluster_group_name])) %}
11 | {{ hostvars[app_node].private_dns_name }} openshift_node_labels="{'role': 'app'}"
12 | {% endfor %}
13 |
14 | [glusterfs]
15 | {{ hostvars[(groups.openshift_role_infra | intersect(groups['openshift_cluster_' + cluster_group_name])).0].private_dns_name }} glusterfs_devices='[ "{{ cns_storage_device }}" ]'
16 | {% for app_node in (groups.openshift_role_app | intersect(groups['openshift_cluster_' + cluster_group_name])) %}
17 | {{ hostvars[app_node].private_dns_name }} glusterfs_devices='[ "{{ cns_storage_device }}" ]'
18 | {% endfor %}
19 |
20 | # Create an OSEv3 group that contains the masters and nodes groups
21 | [OSEv3:children]
22 | masters
23 | nodes
24 | etcd
25 | glusterfs
26 |
27 | [OSEv3:vars]
28 | # Disable checks that are meant to catch issues early. This is an automated install that
29 | # also creates the infrastructure so it is assumed that things are configured correctly.
30 | # This will speed up the execution of the installer.
31 | openshift_disable_check=memory_availability,disk_availability,docker_storage,docker_storage_driver,docker_image_availability,package_version,package_availability,package_update
32 |
33 | ###############################################################################
34 | # Common/ Required configuration variables follow #
35 | ###############################################################################
36 | # SSH user, this user should allow ssh based auth without requiring a
37 | # password. If using ssh key based auth, then the key should be managed by an
38 | # ssh agent.
39 | ansible_user=ec2-user
40 |
41 | # If ansible_user is not root, ansible_become must be set to true and the
42 | # user must be configured for passwordless sudo
43 | ansible_become=yes
44 |
45 | # Specify the deployment type. Valid values are origin and openshift-enterprise.
46 | #openshift_deployment_type=origin
47 | openshift_deployment_type=openshift-enterprise
48 |
49 | # Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
50 | # rely on the version running on the first master. Works best for containerized installs where we can usually
51 | # use this to lookup the latest exact version of the container images, which is the tag actually used to configure
52 | # the cluster. For RPM installations we just verify the version detected in your configured repos matches this
53 | # release.
54 | openshift_release=v{{ openshift_version }}
55 |
56 | # default subdomain to use for exposed routes, you should have wildcard dns
57 | # for *.apps.test.example.com that points at your infra nodes which will run
58 | # your router
59 | openshift_master_default_subdomain=apps.{{ openshift_public_hostname }}
60 |
61 | ###############################################################################
62 | # Additional configuration variables follow #
63 | ###############################################################################
64 |
65 | # Debug level for all OpenShift components (Defaults to 2)
66 | debug_level=2
67 |
68 | {% if openshift_version_minor is defined %}
69 | # Specify an exact container image tag to install or configure.
70 | # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
71 | # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
72 | openshift_image_tag=v{{ openshift_version_minor }}
73 |
74 | # Specify an exact rpm version to install or configure.
75 | # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
76 | # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
77 | openshift_pkg_version=-{{ openshift_version_minor }}
78 | {% endif %}
79 |
80 | # Manage openshift example imagestreams and templates during install and upgrade
81 | openshift_install_examples=true
82 |
83 | # htpasswd auth
84 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
85 | # Defining htpasswd users
86 | #openshift_master_htpasswd_users={'user1': '', 'user2': ''}
87 | # or
88 | #openshift_master_htpasswd_file=
89 |
90 | # Cloud Provider Configuration
91 | #
92 | # AWS
93 | #openshift_cloudprovider_kind=aws
94 | # Note: IAM profiles may be used instead of storing API credentials on disk.
95 | #openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
96 | #openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
97 | #openshift_clusterid="{{ cluster_name_verbose }}"
98 |
99 | # Enable cockpit
100 | osm_use_cockpit=true
101 | #
102 | # Set cockpit plugins
103 | osm_cockpit_plugins=['cockpit-kubernetes']
104 |
105 | # If an external load balancer is used public hostname should resolve to
106 | # external load balancer address
107 | openshift_master_cluster_public_hostname={{ openshift_public_hostname }}
108 |
109 | # default project node selector
110 | osm_default_node_selector='role=app'
111 |
112 | # OpenShift Storage Options
113 | #
114 | openshift_storage_glusterfs_storageclass=true
115 | openshift_storage_glusterfs_storageclass_default=true
116 |
117 | # OpenShift Router Options
118 | #
119 | # An OpenShift router will be created during install if there are
120 | # nodes present with labels matching the default router selector,
121 | # "region=infra". Set openshift_node_labels per node as needed in
122 | # order to label nodes.
123 | #
124 | # Example:
125 | # [nodes]
126 | # node.example.com openshift_node_labels="{'region': 'infra'}"
127 | #
128 | # Router selector (optional)
129 | # Router will only be created if nodes matching this label are present.
130 | # Default value: 'region=infra'
131 | openshift_hosted_router_selector='role=infra'
132 |
133 | {% if letsencrypt_cert_generation %}
134 | # Router certificate (optional)
135 | # Provide local certificate paths which will be configured as the
136 | # router's default certificate.
137 | openshift_hosted_router_certificate={"certfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/cert.pem", "keyfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/privkey.pem", "cafile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/chain.pem"}
138 | {% endif %}
139 |
140 | # Openshift Registry Options
141 | #
142 | # An OpenShift registry will be created during install if there are
143 | # nodes present with labels matching the default registry selector,
144 | # "region=infra". Set openshift_node_labels per node as needed in
145 | # order to label nodes.
146 | #
147 | # Example:
148 | # [nodes]
149 | # node.example.com openshift_node_labels="{'region': 'infra'}"
150 | #
151 | # Registry selector (optional)
152 | # Registry will only be created if nodes matching this label are present.
153 | # Default value: 'region=infra'
154 | openshift_hosted_registry_selector='role=infra'
155 |
156 | # Registry Storage Options
157 | #
158 | openshift_hosted_registry_storage_kind=glusterfs
159 | openshift_hosted_registry_storage_volume_size={{ openshift_registry_volume_size }}Gi
160 |
161 | # Metrics deployment
162 | # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
163 | #
164 | # By default metrics are not automatically deployed, set this to enable them
165 | openshift_metrics_install_metrics=true
166 | #
167 | # Storage Options
168 | openshift_metrics_cassandra_storage_type=dynamic
169 | #
170 | # Other Metrics Options -- Common items you may wish to reconfigure, for the complete
171 | # list of options please see roles/openshift_metrics/README.md
172 | #
173 | openshift_metrics_cassandra_nodeselector={'role': 'infra'}
174 | openshift_metrics_hawkular_nodeselector={'role': 'infra'}
175 | openshift_metrics_heapster_nodeselector={'role': 'infra'}
176 |
177 | # Logging deployment
178 | #
179 | # Currently logging deployment is disabled by default, enable it by setting this
180 | openshift_logging_install_logging=true
181 | #
182 | # Logging storage config
183 | openshift_logging_elasticsearch_storage_type=pvc
184 | #
185 | # Other Logging Options -- Common items you may wish to reconfigure, for the complete
186 | # list of options please see roles/openshift_logging/README.md
187 | #
188 | openshift_logging_es_nodeselector={'role': 'infra'}
189 | openshift_logging_curator_nodeselector={'role': 'infra'}
190 | openshift_logging_kibana_nodeselector={'role': 'infra'}
191 |
192 | # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
193 | os_sdn_network_plugin_name='{{ openshift_network_plugin }}'
194 |
195 | # Configure master API and console ports.
196 | openshift_master_api_port=443
197 | openshift_master_console_port=443
198 |
199 | {% if letsencrypt_cert_generation %}
200 | # Configure custom named certificates (SNI certificates)
201 | #
202 | # https://docs.openshift.org/latest/install_config/certificate_customization.html
203 | # https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
204 | #
205 | # NOTE: openshift_master_named_certificates is cached on masters and is an
206 | # additive fact, meaning that each run with a different set of certificates
207 | # will add the newly provided certificates to the cached set of certificates.
208 | #
209 | # An optional CA may be specified for each named certificate. CAs will
210 | # be added to the OpenShift CA bundle which allows for the named
211 | # certificate to be served for internal cluster communication.
212 | #
213 | # If you would like openshift_master_named_certificates to be overwritten with
214 | # the provided value, specify openshift_master_overwrite_named_certificates.
215 | openshift_master_overwrite_named_certificates=true
216 | #
217 | # Provide local certificate paths which will be deployed to masters
218 | #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
219 | #
220 | # Detected names may be overridden by specifying the "names" key
221 | openshift_master_named_certificates=[{"certfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/fullchain.pem", "keyfile": "/etc/letsencrypt/live/{{ openshift_public_hostname }}/privkey.pem", "names": ["{{ openshift_public_hostname }}"]}]
222 | {% endif %}
223 |
224 | # Enable service catalog
225 | openshift_enable_service_catalog=true
226 |
227 | # Enable template service broker (requires service catalog to be enabled, above)
228 | template_service_broker_install=true
229 | template_service_broker_selector={'role': 'infra'}
230 |
231 | # Enable ansible service broker
232 | ansible_service_broker_install=true
233 | ansible_service_broker_node_selector={'role': 'infra'}
234 |
235 | ###############################################################################
236 | # Workaround for bug in 3.9.30 #
237 | # #
238 | # KB: https://access.redhat.com/solutions/3480921 #
239 | # BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1583500 #
240 | # BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1516534 #
241 | ###############################################################################
242 |
243 | oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
244 | openshift_storage_glusterfs_image=registry.access.redhat.com/rhgs3/rhgs-server-rhel7
245 | openshift_storage_glusterfs_heketi_image=registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7
246 | openshift_storage_glusterfs_block_image=registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7
247 | openshift_examples_modify_imagestreams=true
248 |
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - install_openshift
--------------------------------------------------------------------------------
/playbooks/roles/install_openshift/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | cluster_type: multi
4 |
5 | app_node_count: 2
6 |
7 | cns_storage_device: /dev/xvdb
8 | cns_block_host_volume_size: 50
9 |
10 | openshift_registry_volume_size: 20
11 |
12 | openshift_network_plugin: redhat/openshift-ovs-networkpolicy
13 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | openshift_public_hostname: "{{ cluster_name }}.{{ openshift_base_domain }}"
4 |
5 | cluster_name_verbose: "{{ openshift_public_hostname | replace('.', '-') }}"
6 | cluster_group_name: "{{ cluster_name_verbose | replace('-', '_') }}"
7 |
8 | keys_dir: "{{ playbook_dir }}/keys"
9 | ec2_key_name: "{{ openshift_public_hostname | replace('.', '-') }}"
10 | ec2_key_file: "{{ keys_dir }}/{{ ec2_key_name }}.pem"
11 |
12 | ec2_ami_ids:
13 | ap-south-1:
14 | cloud_access: ami-c2603bad
15 | hourly: ami-5b673c34
16 | eu-west-3:
17 | cloud_access: ami-5326902e
18 | hourly: ami-5026902d
19 | eu-west-2:
20 | cloud_access: ami-e81cfa8f
21 | hourly: ami-7c1bfd1b
22 | eu-west-1:
23 | cloud_access: ami-7776200e
24 | hourly: ami-7c491f05
25 | ap-northeast-2:
26 | cloud_access: ami-c213bfac
27 | hourly: ami-3eee4150
28 | ap-northeast-1:
29 | cloud_access: ami-2a0f5d4c
30 | hourly: ami-6b0d5f0d
31 | sa-east-1:
32 | cloud_access: ami-4cb2e620
33 | hourly: ami-b0b7e3dc
34 | ca-central-1:
35 | cloud_access: ami-09f3756d
36 | hourly: ami-49f0762d
37 | ap-southeast-1:
38 | cloud_access: ami-5a134c26
39 | hourly: ami-76144b0a
40 | ap-southeast-2:
41 | cloud_access: ami-3d5e935f
42 | hourly: ami-67589505
43 | eu-central-1:
44 | cloud_access: ami-526d3eb9
45 | hourly: ami-c86c3f23
46 | us-east-1:
47 | cloud_access: ami-0d70a070
48 | hourly: ami-6871a115
49 | us-east-2:
50 | cloud_access: ami-8f2617ea
51 | hourly: ami-03291866
52 | us-west-1:
53 | cloud_access: ami-e4716784
54 | hourly: ami-18726478
55 | us-west-2:
56 | cloud_access: ami-c6e27cbe
57 | hourly: ami-28e07e50
58 |
59 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/defaults/rhel_ami.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ec2_regions=$(aws ec2 describe-regions --query 'Regions[].{Name:RegionName}' --output text)
4 |
5 | echo "ec2_ami_ids:"
6 |
7 | for ec2_region in ${ec2_regions};
8 | do
9 | echo " ${ec2_region}:"
10 |
11 | access_images=($(aws ec2 describe-images --owners 309956199498 --query 'Images[*].[ImageId]' --filters "Name=name,Values=RHEL-7.5?*GA*Access*" --region ${ec2_region} --output text | sort -r))
12 | echo " cloud_access: ${access_images[0]}"
13 | hourly_images=($(aws ec2 describe-images --owners 309956199498 --query 'Images[*].[ImageId]' --filters "Name=name,Values=RHEL-7.5?*GA*Hourly*" --region ${ec2_region} --output text | sort -r))
14 | echo " hourly: ${hourly_images[0]}"
15 | done
16 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/files/user_data/app.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | users:
3 | - default
4 |
5 | system_info:
6 | default_user:
7 | name: ec2-user
8 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/files/user_data/infra.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | users:
3 | - default
4 |
5 | system_info:
6 | default_user:
7 | name: ec2-user
8 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/files/user_data/master.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | users:
3 | - default
4 |
5 | system_info:
6 | default_user:
7 | name: ec2-user
8 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for provision_aws
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/ec2.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Create master instance(s)
4 | ec2:
5 | region: "{{ aws_region }}"
6 | instance_type: "{{ ec2_instance_type_master }}"
7 | image: "{{ ec2_ami_ids[aws_region][ec2_ami_type] }}"
8 | key_name: "{{ ec2_key_name }}"
9 | user_data: "{{ lookup('template', 'user_data/master.yml.j2') }}"
10 | vpc_subnet_id: "{{ ec2_public_subnet_id }}"
11 | group_id:
12 | - "{{ ec2_create_security_group_ssh.group_id }}"
13 | - "{{ ec2_create_security_group_cluster.group_id }}"
14 | - "{{ ec2_create_security_group_master.group_id }}"
15 | volumes:
16 | - device_name: /dev/sda1
17 | volume_type: gp2
18 | volume_size: "{{ ec2_volume_size_master_root }}"
19 | delete_on_termination: yes
20 | instance_tags:
21 | OpenShiftClusterName: "{{ cluster_name }}"
22 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
23 | OpenShiftRole: master
24 | exact_count: "{{ master_node_count }}"
25 | count_tag:
26 | OpenShiftClusterName: "{{ cluster_name }}"
27 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
28 | OpenShiftRole: master
29 | wait: yes
30 | register: ec2_create_master_instances
31 |
32 | - name: Get list of master instance(s) IDs
33 | set_fact:
34 | ec2_create_master_instance_ids: "{{ ec2_create_master_instances.tagged_instances | map(attribute='id') | list | sort }}"
35 |
36 | - name: Tag master instance(s)
37 | ec2_tag:
38 | region: "{{ aws_region }}"
39 | resource: "{{ item }}"
40 | tags:
41 | Name: "{{ cluster_name_verbose }}-master{{ '%02d' | format(idx + 1) }}"
42 | state: present
43 | loop: "{{ ec2_create_master_instance_ids }}"
44 | loop_control:
45 | index_var: idx
46 |
47 | - name: Tag first master instance as the bastion
48 | ec2_tag:
49 | region: "{{ aws_region }}"
50 | resource: "{{ ec2_create_master_instance_ids[0] }}"
51 | tags:
52 | OpenShiftBastion: "true"
53 | state: present
54 |
55 | - name: Create infra instance(s)
56 | ec2:
57 | region: "{{ aws_region }}"
58 | instance_type: "{{ ec2_instance_type_infra }}"
59 | image: "{{ ec2_ami_ids[aws_region][ec2_ami_type] }}"
60 | key_name: "{{ ec2_key_name }}"
61 | user_data: "{{ lookup('file', 'user_data/infra.yml') }}"
62 | vpc_subnet_id: "{{ ec2_public_subnet_id }}"
63 | group_id:
64 | - "{{ ec2_create_security_group_ssh.group_id }}"
65 | - "{{ ec2_create_security_group_cluster.group_id }}"
66 | - "{{ ec2_create_security_group_infra.group_id }}"
67 | volumes:
68 | - device_name: /dev/sda1
69 | volume_type: gp2
70 | volume_size: "{{ ec2_volume_size_infra_root }}"
71 | delete_on_termination: yes
72 | - device_name: /dev/xvdb
73 | volume_type: gp2
74 | volume_size: "{{ ec2_volume_size_cns }}"
75 | delete_on_termination: yes
76 | instance_tags:
77 | OpenShiftClusterName: "{{ cluster_name }}"
78 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
79 | OpenShiftRole: infra
80 | exact_count: "{{ infra_node_count }}"
81 | count_tag:
82 | OpenShiftClusterName: "{{ cluster_name }}"
83 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
84 | OpenShiftRole: infra
85 | wait: yes
86 | register: ec2_create_infra_instances
87 |
88 | - name: Get list of infra instance(s) IDs
89 | set_fact:
90 | ec2_create_infra_instance_ids: "{{ ec2_create_infra_instances.tagged_instances | map(attribute='id') | list | sort }}"
91 |
92 | - name: Tag infra instance(s)
93 | ec2_tag:
94 | region: "{{ aws_region }}"
95 | resource: "{{ item }}"
96 | tags:
97 | Name: "{{ cluster_name_verbose }}-infra{{ '%02d' | format(idx + 1) }}"
98 | state: present
99 | loop: "{{ ec2_create_infra_instance_ids }}"
100 | loop_control:
101 | index_var: idx
102 |
103 | - name: Create app instance(s)
104 | ec2:
105 | region: "{{ aws_region }}"
106 | instance_type: "{{ ec2_instance_type_app }}"
107 | image: "{{ ec2_ami_ids[aws_region][ec2_ami_type] }}"
108 | key_name: "{{ ec2_key_name }}"
109 | user_data: "{{ lookup('file', 'user_data/app.yml') }}"
110 | vpc_subnet_id: "{{ ec2_public_subnet_id }}"
111 | group_id:
112 | - "{{ ec2_create_security_group_ssh.group_id }}"
113 | - "{{ ec2_create_security_group_cluster.group_id }}"
114 | volumes:
115 | - device_name: /dev/sda1
116 | volume_type: gp2
117 | volume_size: "{{ ec2_volume_size_app_root }}"
118 | delete_on_termination: yes
119 | - device_name: /dev/xvdb
120 | volume_type: gp2
121 | volume_size: "{{ ec2_volume_size_cns }}"
122 | delete_on_termination: yes
123 | instance_tags:
124 | OpenShiftClusterName: "{{ cluster_name }}"
125 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
126 | OpenShiftRole: app
127 | exact_count: "{{ app_node_count }}"
128 | count_tag:
129 | OpenShiftClusterName: "{{ cluster_name }}"
130 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
131 | OpenShiftRole: app
132 | wait: yes
133 | register: ec2_create_app_instances
134 |
135 | - name: Get list of app instance(s) IDs
136 | set_fact:
137 | ec2_create_app_instance_ids: "{{ ec2_create_app_instances.tagged_instances | map(attribute='id') | list | sort }}"
138 |
139 | - name: Tag app instance(s)
140 | ec2_tag:
141 | region: "{{ aws_region }}"
142 | resource: "{{ item }}"
143 | tags:
144 | Name: "{{ cluster_name_verbose }}-app{{ '%02d' | format(idx + 1) }}"
145 | state: present
146 | loop: "{{ ec2_create_app_instance_ids }}"
147 | loop_control:
148 | index_var: idx
149 |
150 | # NOTE: The _odd_ syntax for the tags is due to needing to have
151 | # a variable name in the tag key.
152 | - name: Add Kubernetes cluster tag to instances
153 | ec2_tag:
154 | region: "{{ aws_region }}"
155 | resource: "{{ item }}"
156 | tags: "{
157 | 'kubernetes.io/cluster/{{ cluster_name_verbose }}': '{{ cluster_name_verbose }}'
158 | }"
159 | state: present
160 | with_items:
161 | - "{{ ec2_create_master_instance_ids }}"
162 | - "{{ ec2_create_infra_instance_ids }}"
163 | - "{{ ec2_create_app_instance_ids }}"
164 |
165 | - name: Create elastic IP for master instance
166 | ec2_eip:
167 | device_id: "{{ ec2_create_master_instance_ids[0] }}"
168 | region: "{{ aws_region }}"
169 | state: present
170 | in_vpc: yes
171 | when: ec2_create_master_instance_ids | length == 1
172 |
173 | - name: Create elastic IP for infra instance
174 | ec2_eip:
175 | device_id: "{{ ec2_create_infra_instance_ids[0] }}"
176 | region: "{{ aws_region }}"
177 | state: present
178 | in_vpc: yes
179 | when: ec2_create_infra_instance_ids | length == 1
180 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/ec2_teardown.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Find EC2 instances
4 | ec2_instance_facts:
5 | region: "{{ aws_region }}"
6 | filters:
7 | tag:OpenShiftClusterName: "{{ cluster_name }}"
8 | tag:OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
9 | register: ec2_find_instances
10 |
11 | - name: Delete elastic IPs for instances
12 | ec2_eip:
13 | device_id: "{{ item.instance_id }}"
14 | region: "{{ aws_region }}"
15 | release_on_disassociation: yes
16 | state: absent
17 | with_items: "{{ ec2_find_instances.instances }}"
18 |
19 | - name: Delete EC2 instances
20 | ec2_instance:
21 | region: "{{ aws_region }}"
22 | filters:
23 | tag:OpenShiftClusterName: "{{ cluster_name }}"
24 | tag:OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
25 | state: absent
26 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/keypair.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Create EC2 key pair
4 | ec2_key:
5 | name: "{{ ec2_key_name }}"
6 | region: "{{ aws_region }}"
7 | force: no
8 | state: present
9 | register: ec2_key
10 |
11 | - name: Create keys directory
12 | file:
13 | path: "{{ keys_dir }}"
14 | state: directory
15 | owner: "{{ ansible_user_uid }}"
16 | group: "{{ ansible_user_gid }}"
17 | mode: 0700
18 | become: yes
19 |
20 | - name: Output EC2 private key
21 | copy:
22 | dest: "{{ ec2_key_file }}"
23 | content: "{{ ec2_key.key.private_key }}"
24 | owner: "{{ ansible_user_uid }}"
25 | group: "{{ ansible_user_gid }}"
26 | mode: 0600
27 | when: ec2_key.changed
28 |
29 | - name: Set permissions for EC2 private key
30 | file:
31 | path: "{{ ec2_key_file }}"
32 | state: file
33 | owner: "{{ ansible_user_uid }}"
34 | group: "{{ ansible_user_gid }}"
35 | mode: 0600
36 | become: yes
37 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/keypair_teardown.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Delete EC2 key pair
4 | ec2_key:
5 | name: "{{ ec2_key_name }}"
6 | region: "{{ aws_region }}"
7 | state: absent
8 | register: ec2_key
9 |
10 | - name: Delete EC2 private key
11 | file:
12 | path: "{{ ec2_key_file }}"
13 | state: absent
14 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - import_tasks: keypair.yml
4 |
5 | - import_tasks: vpc.yml
6 |
7 | - import_tasks: security_groups.yml
8 |
9 | - import_tasks: ec2.yml
10 |
11 | - import_tasks: route53.yml
12 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/route53.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Refresh inventory to update EC2 instances information
4 | meta: refresh_inventory
5 |
6 | - name: Find Route53 hosted zone
7 | route53_facts:
8 | hosted_zone_id: "{{ route53_hosted_zone_id }}"
9 | query: hosted_zone
10 | hosted_zone_method: details
11 | register: route53_find_hosted_zone
12 |
13 | - set_fact:
14 | route53_hosted_zone: "{{ route53_find_hosted_zone.HostedZone.Name }}"
15 |
16 | - name: Create DNS entry for bastion
17 | route53:
18 | record: "bastion.{{ openshift_public_hostname }}"
19 | hosted_zone_id: "{{ route53_hosted_zone_id }}"
20 | zone: "{{ route53_hosted_zone }}"
21 | type: A
22 | ttl: 300
23 | value: "{{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name]) | first)].public_ip_address }}"
24 | state: present
25 |
26 | - name: Create DNS entry for API
27 | route53:
28 | record: "{{ openshift_public_hostname }}"
29 | hosted_zone_id: "{{ route53_hosted_zone_id }}"
30 | zone: "{{ route53_hosted_zone }}"
31 | type: A
32 | ttl: 300
33 | value: "{{ hostvars[(groups.openshift_role_master | intersect(groups['openshift_cluster_' + cluster_group_name]) | first)].public_ip_address }}"
34 | state: present
35 |
36 | - name: Create wildcard DNS entry for apps
37 | route53:
38 | record: "*.apps.{{ openshift_public_hostname }}"
39 | hosted_zone_id: "{{ route53_hosted_zone_id }}"
40 | zone: "{{ route53_hosted_zone }}"
41 | type: A
42 | ttl: 300
43 | value: "{{ hostvars[(groups.openshift_role_infra | intersect(groups['openshift_cluster_' + cluster_group_name]) | first)].public_ip_address }}"
44 | state: present
45 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/route53_teardown.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Find Route53 hosted zone
4 | route53_facts:
5 | hosted_zone_id: "{{ route53_hosted_zone_id }}"
6 | query: hosted_zone
7 | hosted_zone_method: details
8 | register: route53_find_hosted_zone
9 |
10 | - set_fact:
11 | route53_hosted_zone: "{{ route53_find_hosted_zone.HostedZone.Name }}"
12 |
13 | - name: Get DNS entry for bastion
14 | route53:
15 | record: "bastion.{{ openshift_public_hostname }}"
16 | hosted_zone_id: "{{ route53_hosted_zone_id }}"
17 | zone: "{{ route53_hosted_zone }}"
18 | type: A
19 | state: get
20 | register: route53_get_bastion_record
21 |
22 | - name: Get DNS entry for API
23 | route53:
24 | record: "{{ openshift_public_hostname }}"
25 | hosted_zone_id: "{{ route53_hosted_zone_id }}"
26 | zone: "{{ route53_hosted_zone }}"
27 | type: A
28 | state: get
29 | register: route53_get_api_record
30 |
31 | - name: Get wildcard DNS entry for apps
32 | route53:
33 | record: "*.apps.{{ openshift_public_hostname }}"
34 | hosted_zone_id: "{{ route53_hosted_zone_id }}"
35 | zone: "{{ route53_hosted_zone }}"
36 | type: A
37 | state: get
38 | register: route53_get_apps_wildcard_record
39 |
40 | - name: Delete DNS records
41 | route53:
42 | record: "{{ item.set.record }}"
43 | zone: "{{ item.set.zone }}"
44 | ttl: "{{ item.set.ttl }}"
45 | type: "{{ item.set.type }}"
46 | value: "{{ item.set.value }}"
47 | state: absent
48 | with_items:
49 | - "{{ route53_get_bastion_record }}"
50 | - "{{ route53_get_api_record }}"
51 | - "{{ route53_get_apps_wildcard_record }}"
52 | when: item.set
53 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/security_groups.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Create SSH security group
4 | ec2_group:
5 | name: "{{ cluster_name_verbose }}-ssh"
6 | description: "{{ cluster_name_verbose }}-ssh"
7 | vpc_id: "{{ ec2_vpc_id }}"
8 | region: "{{ aws_region }}"
9 | rules:
10 | - proto: tcp
11 | cidr_ip: 0.0.0.0/0
12 | ports: 22
13 | tags:
14 | Name: "{{ cluster_name_verbose }}-ssh"
15 | OpenShiftClusterName: "{{ cluster_name }}"
16 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
17 | state: present
18 | register: ec2_create_security_group_ssh
19 |
20 | - name: Create master security group
21 | ec2_group:
22 | name: "{{ cluster_name_verbose }}-master"
23 | description: "{{ cluster_name_verbose }}-master"
24 | vpc_id: "{{ ec2_vpc_id }}"
25 | region: "{{ aws_region }}"
26 | rules:
27 | - proto: tcp
28 | cidr_ip: 0.0.0.0/0
29 | ports: 443
30 | tags:
31 | Name: "{{ cluster_name_verbose }}-master"
32 | OpenShiftClusterName: "{{ cluster_name }}"
33 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
34 | state: present
35 | register: ec2_create_security_group_master
36 |
37 | - name: Create infra security group
38 | ec2_group:
39 | name: "{{ cluster_name_verbose }}-infra"
40 | description: "{{ cluster_name_verbose }}-infra"
41 | vpc_id: "{{ ec2_vpc_id }}"
42 | region: "{{ aws_region }}"
43 | rules:
44 | - proto: tcp
45 | cidr_ip: 0.0.0.0/0
46 | ports:
47 | - 80
48 | - 443
49 | tags:
50 | Name: "{{ cluster_name_verbose }}-infra"
51 | OpenShiftClusterName: "{{ cluster_name }}"
52 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
53 | state: present
54 | register: ec2_create_security_group_infra
55 |
56 | - name: Create cluster security group
57 | ec2_group:
58 | name: "{{ cluster_name_verbose }}-cluster"
59 | description: "{{ cluster_name_verbose }}-cluster"
60 | vpc_id: "{{ ec2_vpc_id }}"
61 | region: "{{ aws_region }}"
62 | rules:
63 | - proto: all
64 | group_name: "{{ cluster_name_verbose }}-cluster"
65 | tags:
66 | Name: "{{ cluster_name_verbose }}-cluster"
67 | OpenShiftClusterName: "{{ cluster_name }}"
68 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
69 | state: present
70 | register: ec2_create_security_group_cluster
71 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/security_groups_teardown.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Find security groups
4 | ec2_group_facts:
5 | region: "{{ aws_region }}"
6 | filters:
7 | tag:OpenShiftClusterName: "{{ cluster_name }}"
8 | tag:OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
9 | register: ec2_find_security_groups
10 |
11 | - name: Delete security groups
12 | ec2_group:
13 | group_id: "{{ item.group_id }}"
14 | region: "{{ aws_region }}"
15 | state: absent
16 | with_items: "{{ ec2_find_security_groups.security_groups }}"
17 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/teardown.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - import_tasks: route53_teardown.yml
4 |
5 | - import_tasks: ec2_teardown.yml
6 |
7 | - import_tasks: security_groups_teardown.yml
8 |
9 | - import_tasks: vpc_teardown.yml
10 |
11 | - import_tasks: keypair_teardown.yml
12 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/vpc.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Create VPC
4 | ec2_vpc_net:
5 | name: "{{ cluster_name_verbose }}"
6 | region: "{{ aws_region }}"
7 | cidr_block: "{{ ec2_vpc_cidr_block }}"
8 | dns_hostnames: yes
9 | dns_support: yes
10 | tags:
11 | Name: "{{ cluster_name_verbose }}"
12 | OpenShiftClusterName: "{{ cluster_name }}"
13 | OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
14 | state: present
15 | register: ec2_create_vpc
16 |
17 | - set_fact:
18 | ec2_vpc_id: "{{ ec2_create_vpc.vpc.id }}"
19 |
20 | - name: Find default route table
21 | ec2_vpc_route_table_facts:
22 | region: "{{ aws_region }}"
23 | filters:
24 | vpc-id: "{{ ec2_vpc_id }}"
25 | association.main: "true"
26 | register: ec2_find_default_route_table
27 |
28 | - set_fact:
29 | ec2_default_route_table: "{{ ec2_find_default_route_table.route_tables[0] | default(omit) }}"
30 |
31 | - set_fact:
32 | ec2_default_route_table_id: "{{ ec2_default_route_table.id | default(omit) }}"
33 |
34 | - name: Create DHCP option set
35 | ec2_vpc_dhcp_option:
36 | vpc_id: "{{ ec2_vpc_id }}"
37 | region: "{{ aws_region }}"
38 | domain_name: "{{ 'ec2.internal' if aws_region == 'us-east-1' else aws_region + '.compute.internal' }}"
39 | dns_servers:
40 | - AmazonProvidedDNS
41 | state: present
42 |
43 | - name: Create Internet gateway
44 | ec2_vpc_igw:
45 | vpc_id: "{{ ec2_vpc_id }}"
46 | region: "{{ aws_region }}"
47 | tags:
48 | Name: "{{ cluster_name_verbose }}"
49 | state: present
50 | register: ec2_create_igw
51 |
52 | - set_fact:
53 | ec2_igw_id: "{{ ec2_create_igw.gateway_id }}"
54 |
55 | - name: Create public subnet
56 | ec2_vpc_subnet:
57 | vpc_id: "{{ ec2_vpc_id }}"
58 | region: "{{ aws_region }}"
59 | cidr: "{{ ec2_vpc_cidr_block }}"
60 | map_public: yes
61 | tags:
62 | Name: "{{ cluster_name_verbose }}-public"
63 | state: present
64 | register: ec2_create_public_subnet
65 |
66 | - set_fact:
67 | ec2_public_subnet_id: "{{ ec2_create_public_subnet.subnet.id }}"
68 |
69 | - name: Create public route table
70 | ec2_vpc_route_table:
71 | vpc_id: "{{ ec2_vpc_id }}"
72 | route_table_id: "{{ ec2_default_route_table_id | default(omit) }}"
73 | region: "{{ aws_region }}"
74 | lookup: id
75 | subnets:
76 | - "{{ ec2_public_subnet_id }}"
77 | routes:
78 | - dest: 0.0.0.0/0
79 | gateway_id: "{{ ec2_igw_id }}"
80 | state: present
81 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tasks/vpc_teardown.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Find VPC
4 | ec2_vpc_net_facts:
5 | region: "{{ aws_region }}"
6 | filters:
7 | tag:OpenShiftClusterName: "{{ cluster_name }}"
8 | tag:OpenShiftClusterNameVerbose: "{{ cluster_name_verbose }}"
9 | register: ec2_find_vpc
10 |
11 | - set_fact:
12 | ec2_vpc_id: "{{ ec2_find_vpc.vpcs[0].vpc_id }}"
13 | ec2_vpc_dhcp_options_id: "{{ ec2_find_vpc.vpcs[0].dhcp_options_id }}"
14 | when: (ec2_find_vpc.vpcs | length) > 0
15 |
16 | - name: Find subnets
17 | ec2_vpc_subnet_facts:
18 | region: "{{ aws_region }}"
19 | filters:
20 | vpc-id: "{{ ec2_vpc_id }}"
21 | register: ec2_find_subnets
22 | when: ec2_vpc_id is defined
23 |
24 | - name: Delete subnets
25 | ec2_vpc_subnet:
26 | vpc_id: "{{ ec2_vpc_id }}"
27 | region: "{{ aws_region }}"
28 | cidr: "{{ item.cidr_block }}"
29 | state: absent
30 | with_items: "{{ ec2_find_subnets.subnets }}"
31 | when: ec2_find_subnets.subnets is defined
32 |
33 | - name: Delete Internet gateway
34 | ec2_vpc_igw:
35 | vpc_id: "{{ ec2_vpc_id }}"
36 | region: "{{ aws_region }}"
37 | state: absent
38 | when: ec2_vpc_id is defined
39 |
40 | - name: Delete DHCP option set
41 | ec2_vpc_dhcp_option:
42 | dhcp_options_id: "{{ ec2_vpc_dhcp_options_id }}"
43 | region: "{{ aws_region }}"
44 | state: absent
45 | when: ec2_vpc_dhcp_options_id is defined
46 |
47 | - name: Delete VPC
48 | ec2_vpc_net:
49 | name: "{{ cluster_name_verbose }}"
50 | region: "{{ aws_region }}"
51 | cidr_block: "{{ ec2_vpc_cidr_block }}"
52 | state: absent
53 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/templates/user_data/master.yml.j2:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | users:
3 | - default
4 |
5 | system_info:
6 | default_user:
7 | name: ec2-user
8 |
9 | {% if openshift_ssh_password %}
10 | ssh_pwauth: True
11 |
12 | chpasswd:
13 | list: |
14 | ec2-user:{{ openshift_ssh_password }}
15 | expire: false
16 | {% endif %}
17 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - provision_aws
--------------------------------------------------------------------------------
/playbooks/roles/provision_aws/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | cluster_type: multi
4 |
5 | master_node_count: 1
6 | infra_node_count: 1
7 | app_node_count: 2
8 |
9 | # Use "cloud_access" if enabled for account to save on costs
10 | ec2_ami_type: hourly
11 |
12 | ec2_vpc_cidr_block: 172.31.0.0/16
13 |
14 | ec2_instance_type_master: m4.xlarge
15 | ec2_instance_type_infra: m4.xlarge
16 | ec2_instance_type_app: m4.large
17 |
18 | ec2_volume_size_master_root: 60
19 | ec2_volume_size_infra_root: 60
20 | ec2_volume_size_app_root: 60
21 | ec2_volume_size_cns: 150
22 |
23 | openshift_ssh_password: no
24 |
--------------------------------------------------------------------------------
/playbooks/roles/rhsm_subscribe/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/playbooks/roles/rhsm_subscribe/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | provision_type: ""
4 |
5 | ansible_repo_version: "2.4"
6 |
--------------------------------------------------------------------------------
/playbooks/roles/rhsm_subscribe/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for rhsm_subscribe
--------------------------------------------------------------------------------
/playbooks/roles/rhsm_subscribe/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/playbooks/roles/rhsm_subscribe/tasks/aws.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Disable amazon-id YUM plugin
4 | lineinfile:
5 | dest: /etc/yum/pluginconf.d/amazon-id.conf
6 | regexp: "^enabled="
7 | line: "enabled=0"
8 | register: disable_plugin_amazon_id
9 | become: yes
10 |
11 | - name: Disable rhui-lb YUM plugin
12 | lineinfile:
13 | dest: /etc/yum/pluginconf.d/rhui-lb.conf
14 | regexp: "^enabled="
15 | line: "enabled=0"
16 | register: disable_plugin_rhui_lb
17 | become: yes
18 |
19 | - name: Stop choose_repo service
20 | service:
21 | name: choose_repo
22 | state: stopped
23 | become: yes
24 |
25 | - name: Disable choose_repo service
26 | service:
27 | name: choose_repo
28 | enabled: no
29 | become: yes
30 |
31 | - name: Disable all repos
32 | command: "yum-config-manager --disable '*'"
33 | when: >
34 | disable_plugin_amazon_id.changed
35 | or disable_plugin_rhui_lb.changed
36 | register: disable_all_repos
37 | become: yes
38 |
--------------------------------------------------------------------------------
/playbooks/roles/rhsm_subscribe/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include_tasks: aws.yml
4 | when: provision_type == 'aws'
5 |
6 | - name: Register host to RHSM and attach pool
7 | redhat_subscription:
8 | username: "{{ rhsm_username }}"
9 | password: "{{ rhsm_password }}"
10 | pool_ids: "{{ rhsm_pool }}"
11 | consumer_name: "{{ rhsm_consumer_name | default(omit) }}"
12 | state: present
13 | retries: 3 # This seems to fail sometimes, so lets retry before failing everything
14 | become: yes
15 |
16 | - name: Set Ansible version for required repos (OCP 3.11)
17 | set_fact:
18 | ansible_repo_version: "2.6"
19 | when: (openshift_version | string) == "3.11"
20 |
21 | - name: Enable required repositories
22 | rhsm_repository:
23 | name: "{{ openshift_repos }}"
24 | state: enabled
25 | retries: 3 # This seems to fail sometimes, so lets retry before failing everything
26 | register: openshift_enable_repos
27 | become: yes
28 |
29 | - name: Disable non-required repositories
30 | rhsm_repository:
31 | name: "{{
32 | openshift_enable_repos.repositories |
33 | map(attribute='id') |
34 | difference(openshift_repos) }}"
35 | state: disabled
36 | retries: 3 # This seems to fail sometimes, so lets retry before failing everything
37 | become: yes
38 |
--------------------------------------------------------------------------------
/playbooks/roles/rhsm_subscribe/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/playbooks/roles/rhsm_subscribe/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - rhsm_subscribe
--------------------------------------------------------------------------------
/playbooks/roles/rhsm_subscribe/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | openshift_repos:
4 | - rhel-7-server-rpms
5 | - rhel-7-server-extras-rpms
6 | - rhel-7-server-optional-rpms
7 | - rhel-7-server-ose-{{ openshift_version }}-rpms
8 | - rhel-7-fast-datapath-rpms
9 | - rhel-7-server-ansible-{{ ansible_repo_version }}-rpms
10 |
11 |
--------------------------------------------------------------------------------
/vars/aws.example.env:
--------------------------------------------------------------------------------
1 | AWS_ACCESS_KEY_ID=your_access_key
2 | AWS_SECRET_ACCESS_KEY=your_secret_key
3 |
--------------------------------------------------------------------------------
/vars/aws.example.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # The name of the cluster.
4 | # This value will be in your DNS entries and should conform to valid DNS characters.
5 | cluster_name: openshift
6 |
7 | # The OpenShift version to install
8 | # IMPORTANT: Make sure this value is quoted, otherwise it gets read as 3.1 instead of 3.10
9 | openshift_version: "3.11"
10 | openshift_version_minor: "3.11.43"
11 | # The base subdomain to use for your cluster.
12 | # Example: If you set this to `example.com`, a DNS entry for `.example.com` will be created)
13 | openshift_base_domain: example.com
14 |
15 | # The email address to use when generating Lets Encrypt certs for the cluster.
16 | cert_email_address: foo@example.com
17 |
18 | # The AWS region (i.e. `us-east-1`)
19 | aws_region: us-east-1
20 | # If you have Cloud Access setup for your account, set this to `cloud_access`. Otherwise, set this to `hourly`.
21 | ec2_ami_type: cloud_access
22 | # The ID of the Route53 hosted zone
23 | route53_hosted_zone_id: YP563J79RELJ4C
24 |
25 | # Your RHSM username
26 | rhsm_username: foo@example.com
27 | # Your RHSM password
28 | rhsm_password: P@55w0rD
29 | # The RHSM pool ID that contains OpenShift subscriptions
30 | rhsm_pool: ba4e7732f8abcdad545c7f62df736d1f
31 |
32 | # Your Red Hat registry username
33 | redhat_registry_username: 1234567|foo
34 | # Your Red Hat registry password/token
35 | redhat_registry_password: 0535VZW0qDK3fBjFwJE93emjk8fmzNBLJ2XHN8TNrAsxmaqDOOz2G
36 |
37 | # The users to create in OpenShift
38 | openshift_users:
39 | - username: admin
40 | password: password
41 | admin: yes
42 | - username: user1
43 | password: password
44 | - username: user2
45 | password: password
46 | - username: user3
47 | password: password
48 |
49 | # Addons
50 | # addons:
51 | # - istio
52 |
--------------------------------------------------------------------------------