├── .github └── workflows │ └── build_container.yaml ├── .gitignore ├── Containerfile ├── LICENSE ├── Pipfile ├── Pipfile.lock ├── README.md ├── docs ├── bundle_content.md ├── install_openshift.md └── unpack_content.md ├── hack ├── cloudformation.yaml └── generate_ami_mapping.sh ├── ocp4_disconnected ├── __init__.py ├── bundle.py ├── imageset-config-template.yaml ├── imageset.py └── unpack.sh └── setup.cfg /.github/workflows/build_container.yaml: -------------------------------------------------------------------------------- 1 | name: container-build-and-push 2 | on: 3 | push: 4 | branches: 5 | - main 6 | 7 | env: 8 | REGISTRY: ghcr.io 9 | IMAGE_NAME: ${{ github.repository }} 10 | 11 | jobs: 12 | build-and-push-image: 13 | runs-on: ubuntu-latest 14 | 15 | permissions: 16 | contents: read 17 | packages: write 18 | 19 | steps: 20 | - name: Clone the repository 21 | uses: actions/checkout@v4 22 | 23 | - name: Log in to the container registry 24 | uses: docker/login-action@v3 25 | with: 26 | registry: ${{ env.REGISTRY }} 27 | username: ${{ github.actor }} 28 | password: ${{ secrets.GITHUB_TOKEN }} 29 | 30 | - name: Extract container metadata 31 | uses: docker/metadata-action@v5 32 | id: meta 33 | with: 34 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 35 | tags: | 36 | type=raw,value=latest,enable={{ is_default_branch }} 37 | type=sha,format=long 38 | 39 | - name: Build and push container image 40 | uses: docker/build-push-action@v5 41 | id: build-and-push 42 | with: 43 | context: . 44 | file: ./Containerfile 45 | push: true 46 | tags: ${{ steps.meta.outputs.tags }} 47 | labels: ${{ steps.meta.outputs.labels }} 48 | 49 | - name: Output image metadata 50 | run: echo "${{ steps.build-and-push.outputs.metadata }}" 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Project 3 | ############################################################################### 4 | 5 | 6 | 7 | ############################################################################### 8 | # Python 9 | # https://github.com/github/gitignore/blob/main/Python.gitignore 10 | ############################################################################### 11 | 12 | # Byte-compiled / optimized / DLL files 13 | __pycache__/ 14 | *.py[cod] 15 | *$py.class 16 | 17 | # C extensions 18 | *.so 19 | 20 | # Distribution / packaging 21 | .Python 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | wheels/ 34 | share/python-wheels/ 35 | *.egg-info/ 36 | .installed.cfg 37 | *.egg 38 | MANIFEST 39 | 40 | # PyInstaller 41 | # Usually these files are written by a python script from a template 42 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 43 | *.manifest 44 | *.spec 45 | 46 | # Installer logs 47 | pip-log.txt 48 | pip-delete-this-directory.txt 49 | 50 | # Unit test / coverage reports 51 | htmlcov/ 52 | .tox/ 53 | .nox/ 54 | .coverage 55 | .coverage.* 56 | .cache 57 | nosetests.xml 58 | coverage.xml 59 | *.cover 60 | *.py,cover 61 | .hypothesis/ 62 | .pytest_cache/ 63 | cover/ 64 | 65 | # Translations 66 | *.mo 67 | *.pot 68 | 69 | # Django stuff: 70 | *.log 71 | local_settings.py 72 | db.sqlite3 73 | db.sqlite3-journal 74 | 75 | # Flask stuff: 76 | instance/ 77 | .webassets-cache 78 | 79 | # Scrapy stuff: 80 | .scrapy 81 | 82 | # Sphinx documentation 83 | docs/_build/ 84 | 85 | # PyBuilder 86 | .pybuilder/ 87 | target/ 88 | 89 | # Jupyter Notebook 90 | .ipynb_checkpoints 91 | 92 | # IPython 93 | profile_default/ 94 | ipython_config.py 95 | 96 | # pyenv 97 | # For a library or package, you might want to ignore these files since the code is 98 | # intended to run in multiple environments; otherwise, check them in: 99 | # .python-version 100 | 101 | # pipenv 102 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 103 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 104 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 105 | # install all needed dependencies. 106 | #Pipfile.lock 107 | 108 | # poetry 109 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 110 | # This is especially recommended for binary packages to ensure reproducibility, and is more 111 | # commonly ignored for libraries. 112 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 113 | #poetry.lock 114 | 115 | # pdm 116 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 117 | #pdm.lock 118 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 119 | # in version control. 120 | # https://pdm.fming.dev/#use-with-ide 121 | .pdm.toml 122 | 123 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 124 | __pypackages__/ 125 | 126 | # Celery stuff 127 | celerybeat-schedule 128 | celerybeat.pid 129 | 130 | # SageMath parsed files 131 | *.sage.py 132 | 133 | # Environments 134 | .env 135 | .venv 136 | env/ 137 | venv/ 138 | ENV/ 139 | env.bak/ 140 | venv.bak/ 141 | 142 | # Spyder project settings 143 | .spyderproject 144 | .spyproject 145 | 146 | # Rope project settings 147 | .ropeproject 148 | 149 | # mkdocs documentation 150 | /site 151 | 152 | # mypy 153 | .mypy_cache/ 154 | .dmypy.json 155 | dmypy.json 156 | 157 | # Pyre type checker 158 | .pyre/ 159 | 160 | # pytype static type analyzer 161 | .pytype/ 162 | 163 | # Cython debug symbols 164 | cython_debug/ 165 | 166 | # PyCharm 167 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 168 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 169 | # and can be added to the global gitignore or merged into this file. For a more nuclear 170 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 171 | #.idea/ 172 | 173 | 174 | ############################################################################### 175 | # VirtualEnv 176 | # https://github.com/github/gitignore/blob/main/Global/VirtualEnv.gitignore 177 | ############################################################################### 178 | 179 | # Virtualenv 180 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ 181 | .Python 182 | [Bb]in 183 | [Ii]nclude 184 | [Ll]ib 185 | [Ll]ib64 186 | [Ll]ocal 187 | [Ss]cripts 188 | pyvenv.cfg 189 | .venv 190 | pip-selfcheck.json 191 | 192 | 193 | ############################################################################### 194 | # VSCode 195 | # https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore 196 | ############################################################################### 197 | 198 | .vscode/* 199 | !.vscode/settings.json 200 | !.vscode/tasks.json 201 | !.vscode/launch.json 202 | !.vscode/extensions.json 203 | !.vscode/*.code-snippets 204 | 205 | # Local History for Visual Studio Code 206 | .history/ 207 | 208 | # Built Visual Studio Code Extensions 209 | *.vsix 210 | 211 | 212 | ############################################################################### 213 | # Vim 214 | # https://github.com/github/gitignore/blob/main/Global/Vim.gitignore 215 | ############################################################################### 216 | 217 | # Swap 218 | [._]*.s[a-v][a-z] 219 | !*.svg # comment out if you don't need vector files 220 | [._]*.sw[a-p] 221 | [._]s[a-rt-v][a-z] 222 | [._]ss[a-gi-z] 223 | [._]sw[a-p] 224 | 225 | # Session 226 | Session.vim 227 | Sessionx.vim 228 | 229 | # Temporary 230 | .netrwhist 231 | *~ 232 | # Auto-generated tag files 233 | tags 234 | # Persistent undo 235 | [._]*.un~ 236 | 237 | 238 | ############################################################################### 239 | # MacOS 240 | # https://github.com/github/gitignore/blob/main/Global/macOS.gitignore 241 | ############################################################################### 242 | 243 | # General 244 | .DS_Store 245 | .AppleDouble 246 | .LSOverride 247 | 248 | # Icon must end with two \r 249 | Icon 250 | 251 | 252 | # Thumbnails 253 | ._* 254 | 255 | # Files that might appear in the root of a volume 256 | .DocumentRevisions-V100 257 | .fseventsd 258 | .Spotlight-V100 259 | .TemporaryItems 260 | .Trashes 261 | .VolumeIcon.icns 262 | .com.apple.timemachine.donotpresent 263 | 264 | # Directories potentially created on remote AFP share 265 | .AppleDB 266 | .AppleDesktop 267 | Network Trash Folder 268 | Temporary Items 269 | .apdisk 270 | -------------------------------------------------------------------------------- /Containerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi8/python-311:latest 2 | 3 | ENV PYTHONPATH=/opt/app-root/src:$PYTHONPATH 4 | 5 | USER root 6 | 7 | RUN yum install --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms -y \ 8 | yum-utils \ 9 | && rm -rf /var/cache/yum 10 | 11 | COPY Pipfile Pipfile.lock /opt/app-root/src 12 | RUN pip3 install --upgrade pip \ 13 | && pip3 install pipenv \ 14 | && pipenv install --system --deploy 15 | 16 | COPY . /opt/app-root/src 17 | 18 | VOLUME /mnt/data 19 | 20 | ENTRYPOINT ["python3", "-m", "ocp4_disconnected.bundle", "--output-dir", "/mnt/data"] 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Red Hat 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | requests = "*" 8 | click = "*" 9 | colorlog = "*" 10 | tqdm = "*" 11 | pyyaml = "*" 12 | 13 | [dev-packages] 14 | autopep8 = "*" 15 | 16 | [requires] 17 | python_version = "3.11" 18 | -------------------------------------------------------------------------------- /Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "42558ad78d638ef917b13751f5dcfe47ba3847d82b83d72301ed62c1a0b4c4a1" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "3.11" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "certifi": { 20 | "hashes": [ 21 | "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082", 22 | "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9" 23 | ], 24 | "markers": "python_version >= '3.6'", 25 | "version": "==2023.7.22" 26 | }, 27 | "charset-normalizer": { 28 | "hashes": [ 29 | "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96", 30 | "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c", 31 | "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710", 32 | "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706", 33 | "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020", 34 | "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252", 35 | "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad", 36 | "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329", 37 | "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a", 38 | "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f", 39 | "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6", 40 | "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4", 41 | "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a", 42 | "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46", 43 | "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2", 44 | "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23", 45 | "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace", 46 | "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd", 47 | "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982", 48 | "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10", 49 | "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2", 50 | "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea", 51 | "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09", 52 | "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5", 53 | "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149", 54 | "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489", 55 | "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9", 56 | "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80", 57 | "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592", 58 | "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3", 59 | "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6", 60 | "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed", 61 | "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c", 62 | "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200", 63 | "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a", 64 | "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e", 65 | "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d", 66 | "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6", 67 | "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623", 68 | "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669", 69 | "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3", 70 | "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa", 71 | "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9", 72 | "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2", 73 | "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f", 74 | "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1", 75 | "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4", 76 | "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a", 77 | "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8", 78 | "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3", 79 | "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029", 80 | "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f", 81 | "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959", 82 | "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22", 83 | "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7", 84 | "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952", 85 | "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346", 86 | "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e", 87 | "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d", 88 | "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299", 89 | "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd", 90 | "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a", 91 | "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3", 92 | "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037", 93 | "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94", 94 | "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c", 95 | "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858", 96 | "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a", 97 | "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449", 98 | "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c", 99 | "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918", 100 | "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1", 101 | "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c", 102 | "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac", 103 | "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa" 104 | ], 105 | "markers": "python_full_version >= '3.7.0'", 106 | "version": "==3.2.0" 107 | }, 108 | "click": { 109 | "hashes": [ 110 | "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", 111 | "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de" 112 | ], 113 | "index": "pypi", 114 | "version": "==8.1.7" 115 | }, 116 | "colorlog": { 117 | "hashes": [ 118 | "sha256:0d33ca236784a1ba3ff9c532d4964126d8a2c44f1f0cb1d2b0728196f512f662", 119 | "sha256:bd94bd21c1e13fac7bd3153f4bc3a7dc0eb0974b8bc2fdf1a989e474f6e582e5" 120 | ], 121 | "index": "pypi", 122 | "version": "==6.7.0" 123 | }, 124 | "idna": { 125 | "hashes": [ 126 | "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4", 127 | "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2" 128 | ], 129 | "markers": "python_version >= '3.5'", 130 | "version": "==3.4" 131 | }, 132 | "pyyaml": { 133 | "hashes": [ 134 | "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", 135 | "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", 136 | "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", 137 | "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", 138 | "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595", 139 | "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", 140 | "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", 141 | "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", 142 | "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", 143 | "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", 144 | "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", 145 | "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", 146 | "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6", 147 | "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", 148 | "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", 149 | "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", 150 | "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", 151 | "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", 152 | "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", 153 | "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", 154 | "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", 155 | "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", 156 | "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", 157 | "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", 158 | "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0", 159 | "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515", 160 | "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c", 161 | "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c", 162 | "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924", 163 | "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34", 164 | "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", 165 | "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", 166 | "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", 167 | "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", 168 | "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", 169 | "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", 170 | "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", 171 | "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585", 172 | "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d", 173 | "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" 174 | ], 175 | "index": "pypi", 176 | "version": "==6.0.1" 177 | }, 178 | "requests": { 179 | "hashes": [ 180 | "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", 181 | "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1" 182 | ], 183 | "index": "pypi", 184 | "version": "==2.31.0" 185 | }, 186 | "tqdm": { 187 | "hashes": [ 188 | "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386", 189 | "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7" 190 | ], 191 | "index": "pypi", 192 | "version": "==4.66.1" 193 | }, 194 | "urllib3": { 195 | "hashes": [ 196 | "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11", 197 | "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4" 198 | ], 199 | "markers": "python_version >= '3.7'", 200 | "version": "==2.0.4" 201 | } 202 | }, 203 | "develop": { 204 | "autopep8": { 205 | "hashes": [ 206 | "sha256:ba4901621c7f94c6fce134437d577009886d5e3bfa46ee64e1d1d864a5b93cc2", 207 | "sha256:ca72bd1e3bf0aa40636860fa07e3f1a762a18d0943cf359b3de09221059ffbd9" 208 | ], 209 | "index": "pypi", 210 | "version": "==2.0.3" 211 | }, 212 | "pycodestyle": { 213 | "hashes": [ 214 | "sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0", 215 | "sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8" 216 | ], 217 | "markers": "python_version >= '3.8'", 218 | "version": "==2.11.0" 219 | } 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenShift 4 Disconnected Bundler 2 | 3 | A tool that makes it easier to get started when deploying OpenShift clusters in 4 | a disconnected / air-gapped networks. The tool will bundle up all of the 5 | required dependencies (e.g. container images, CLI tools, etc.) that can be 6 | transfered to the disconnected environment. The generated bundle includes a 7 | script to unpack and host the artifacts in the disconnected environment so that 8 | they can be used to install OpenShift. 9 | 10 | ## Documentation 11 | 12 | The following documentation will walk you through how to use this tool. 13 | Included in this repository is a CloudFormation script that will create a 14 | simulated disconnected environment that you can use to try it out. To assist in 15 | illustrating how to use this tool, this documentation will take advantage of an 16 | environment created using that CloudFormation script. 17 | 18 | If you are providing your own environment, you can skip ahead to the 19 | [Bundle Content](docs/bundle_content.md) section. 20 | 21 | At a high level, the process for using this tool to deploy an OpenShift cluster 22 | in a disconnected environment looks like this: 23 | 24 | - Using a host with access to the internet, run this tool to download all of 25 | the content required for an OpenShift install and bundle it 26 | - Transfer the generated content bundle to the disconnected environment (we 27 | will use S3 in our example) 28 | - Create a RHEL host in the disconnected environment and pull the bundle of 29 | content to the host 30 | - Unpack the bundle and run the provided script to stand up the required 31 | supporting infrastructure (e.g. container registry) and generate the 32 | configuration required to install in a disconnected environment 33 | - Run the OpenShift installer after adding the required configuration 34 | 35 | ## Prepare Environment 36 | 37 | As mentioned above, we are going to create a simulated disconnected environment 38 | in AWS using a CloudFormation script provided in this repository. 39 | 40 | Export the required environment variables to setup your AWS credentials and region. 41 | 42 | ```bash 43 | # If you do not have AWS credentials configured, export these instead: 44 | # export AWS_ACCESS_KEY_ID= 45 | # export AWS_SECRET_ACCESS_KEY= 46 | export AWS_PROFILE=rhdp 47 | export AWS_REGION=us-east-2 48 | ``` 49 | 50 | The only required parameter for the CloudFormation script is the name of the 51 | key pair you wish to use to be able to SSH into the EC2 instances that are 52 | created. If you do not already have a key pair, you can easily create one using 53 | your existing SSH public key. 54 | 55 | ```bash 56 | ssh-keygen -q -N '' -f ~/.ssh/ocp4-disconnected 57 | 58 | aws ec2 import-key-pair --key-name ocp4-disconnected --public-key-material fileb://~/.ssh/ocp4-disconnected.pub 59 | ``` 60 | 61 | Once your key pair is in place, create the simulated disconnected environment 62 | using the CloudFormation script. 63 | 64 | ```bash 65 | curl https://raw.githubusercontent.com/redhatgov/ocp4-disconnected/main/hack/cloudformation.yaml -o /tmp/ocp4-disconnected-cf.yaml 66 | 67 | aws cloudformation create-stack \ 68 | --stack-name ocp4-disconnected \ 69 | --template-body file:///tmp/ocp4-disconnected-cf.yaml \ 70 | --capabilities CAPABILITY_NAMED_IAM \ 71 | --parameters "ParameterKey=KeyName,ParameterValue=ocp4-disconnected" 72 | ``` 73 | 74 | Wait for the CloudFormation stack to finish deploying. Once the output is 75 | `CREATE_COMPLETE`, you can use _Ctrl-C_ to exit the `watch` command. It usually 76 | takes in the ballpark of 5-6 minutes to deploy the entire stack. 77 | 78 | ```bash 79 | watch -n 10 aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].StackStatus' 80 | ``` 81 | 82 | Now that the CloudFormation stack has finished deploying, we can capture the 83 | output in order to get the IP addresses we need in order to connect to the EC2 84 | instances that were created. 85 | 86 | ```bash 87 | aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs' 88 | ``` 89 | 90 | However, as we go through this walkthrough, the commands to grab the value from 91 | the outputs will be provided. 92 | 93 | [Next: Bundle Content >>](docs/bundle_content.md) 94 | -------------------------------------------------------------------------------- /docs/bundle_content.md: -------------------------------------------------------------------------------- 1 | # OpenShift 4 Disconnected Bundler Documentation 2 | 3 | ## Bundle Content 4 | 5 | For our walkthrough, we will be using the Jump host as our host to download the 6 | content bundle since it has access to the internet. This isn't required and 7 | these steps could be done from your laptop or any other internet connected 8 | host. We are doing using the jump host in this case because it has a fast 9 | connection to the internet and is avaliable as part of our simulated 10 | disconnected environment in the nxext step. 11 | 12 | Start by connecting to the Jump host via SSH. You can grab the public IP of the 13 | Jump host from the outputs captured during the environment prep stage or you 14 | can use the command below as a convenience. 15 | 16 | ```bash 17 | export JUMP_HOST_PUBLIC_IP=$(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`JumpInstancePublicIp`].OutputValue' --output text) 18 | 19 | ssh-add ~/.ssh/ocp4-disconnected 20 | ssh ec2-user@${JUMP_HOST_PUBLIC_IP} 21 | ``` 22 | 23 | ### Pull Secret 24 | 25 | In order to pull the container images required to install OpenShift, you will 26 | need a pull secret that allows you to authenticate to the Red Hat registry. 27 | 28 | If you do not already have a Red Hat account, register for a [FREE Red Hat 29 | Developer account](https://developers.redhat.com/register/) that will give you 30 | access to Red Hat software, including the ability to get the pull secret needed 31 | for this tool. 32 | 33 | You can find your pull secret on the [Red Hat OpenShift 34 | Console](https://console.redhat.com/openshift/install/pull-secret) 35 | 36 | Keep this page handy as you will be prompted for the value of your pull secret 37 | when you get ready to run the tool. 38 | 39 | ### Download and Package Content 40 | 41 | We will now run this tool the download and package the content we'll need in 42 | order to perform an install of OpenShift in a disconnected environment in the 43 | next step This .tool accepts a few parameters and will prompt for any that are 44 | not passed in. For this walkthrough, we will use the tool in that method and 45 | have it prompt us for the required input. 46 | 47 | > [!WARNING] 48 | > If you choose to specify parameters via the CLI instead of being prompted, it 49 | > is **NOT** recommended to override `--output-dir` as it is already 50 | > configured as part of the container to output to `/mnt/data`. You should map 51 | > a volume in your `podman run` (as seen below) to `/mnt/data` to ensure your 52 | > data persists. 53 | 54 | The information that we need to give the tool is: 55 | 56 | - The OpenShift version to download content for, and 57 | - Our pull secret from above. The pull secret only needsto be specified on the 58 | first run as it will be saved and used for future runs. 59 | 60 | If you do not specify the OpenShift version when prompted, it will default to 61 | the latest stable version. You can also specify the version in the format of 62 | `x.y` (e.g. `4.12`) and it will find the latest stable of that version. If you 63 | want a specific z-stream release, you can also specify the version as `x.y.z` 64 | (e.g. `4.12.30`). 65 | 66 | ```bash 67 | podman pull ghcr.io/redhatgov/ocp4-disconnected:latest 68 | 69 | podman run -it --rm --name ocp4-disconnected --platform linux/amd64 --volume /mnt/ocp4_data:/mnt/data:z ghcr.io/redhatgov/ocp4-disconnected:latest 70 | ``` 71 | 72 | After the tool finishes, all of the content we need is in a single tar file at 73 | `/mnt/ocp4_data/ocp4_bundle.tar`. 74 | 75 | If you are doing this in a different environment than this simulated 76 | environment and have passed a different path as your `--volume` mapping in the 77 | command above, the `ocp4_bundle.tar` will be in whichever directory you 78 | specified. 79 | 80 | ### Transfer Content 81 | 82 | Now that we have our `ocp4_bundle.tar`, we are ready to transfer to the 83 | disconnected environment. As mentioned earlier, in a real environment we would 84 | follow the approved process for transferring content to our disconnected 85 | environment, but for this walkthrough we will be using an S3 bucket. You can 86 | grab the name of the S3 bucket from the outputs captured during the environment 87 | prep stage or you can use the command below as a convenience. 88 | 89 | ```bash 90 | export S3_TRANSFER_BUCKET=$(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`S3TransferBucket`].OutputValue' --output text) 91 | 92 | aws s3 cp /mnt/ocp4_data/ocp4_bundle.tar s3://${S3_TRANSFER_BUCKET} 93 | ``` 94 | 95 | At this point, we are done with the work we need to do in the connected 96 | environment. In the next steps, we will be working in the disconnected 97 | environment. 98 | 99 | [<< Back: Prepare Environment](../README.md) - [Next: Unpack Content >>](unpack_content.md) 100 | -------------------------------------------------------------------------------- /docs/install_openshift.md: -------------------------------------------------------------------------------- 1 | # OpenShift 4 Disconnected Bundler Documentation 2 | 3 | ## Install OpenShift 4 | 5 | Now that our disconnected environment is setup and our registry has the 6 | required content, we're ready to install OpenShift. 7 | 8 | As mentioned in the previous section, we need to create an 9 | `install-config.yaml` that will tell OpenShift how to perform the installation. 10 | As part of generating this file, we will also be sure to copy and paste the 11 | configuration output of the `unpack.sh` script to the bottom of our generated 12 | file. 13 | 14 | ### Create Install Configuration 15 | 16 | The details of what goes in the `install-config.yaml` will differ depending on 17 | the target platform where your OpenShift cluster will be running. In this 18 | walkthrough, we will be targeting the simulated disconnected environment that 19 | we created in AWS, but for details for other target platforms, you can refer to 20 | the [OpenShift documentation][ocp_platforms]. 21 | 22 | For the purposes of our walkthrough, we will use the High Side host since it is 23 | already available in the disconnected environment. It's not required to do the 24 | installation from the same host where the content is being hosted and we are 25 | only doing it for convenience in this walkthrough. 26 | 27 | If you are not already, connect to the High Side host via SSH. 28 | 29 | ```bash 30 | export JUMP_HOST_PUBLIC_IP=$(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`JumpInstancePublicIp`].OutputValue' --output text) 31 | export HIGHSIDE_HOST_PRIVATE_IP=$(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`HighSideInstancePrivateIp`].OutputValue' --output text) 32 | 33 | ssh-add ~/.ssh/ocp4-disconnected 34 | ssh -J ec2-user@${JUMP_HOST_PUBLIC_IP} ec2-user@${HIGHSIDE_HOST_PRIVATE_IP} 35 | ``` 36 | 37 | Create an SSH key to use for the OpenShift cluster. 38 | 39 | ```bash 40 | ssh-keygen -q -N '' -f ~/.ssh/ocp4-install 41 | ``` 42 | 43 | Create a directory to use as workspace for the installation. 44 | 45 | ```bash 46 | mkdir ~/ocp4-install 47 | ``` 48 | 49 | Before we run the installer to generate our `install-config.yaml`, we need to 50 | create our pull secret for the disconnected environment that contains the 51 | authentication information for our mirror registry. 52 | 53 | ```bash 54 | export REGISTRY_USERNAME=openshift 55 | export REGISTRY_PASSWORD=$(echo -n `head -n 1 /mnt/ocp4_data/registry/registry_password`) 56 | 57 | cat << EOF | jq -r tostring > ~/pull-secret.json 58 | { 59 | "auths": { 60 | "$(hostname --fqdn):8443": { 61 | "auth": "$(echo -n "openshift:${REGISTRY_PASSWORD}" | base64 -w0)" 62 | } 63 | } 64 | } 65 | EOF 66 | 67 | cat ~/pull-secret.json 68 | ``` 69 | 70 | You will want to copy the output to your clipboard. 71 | 72 | To start, we will use the `openshift-install` command to generate the initial 73 | `install-config.yaml` by answering the prompts. 74 | 75 | 76 | ```bash 77 | openshift-install create install-config --dir ~/ocp4-install 78 | ``` 79 | 80 | The table below shows the descriptions of each prompt you will see. 81 | 82 | | Prompt | Description | 83 | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | 84 | | SSH Public Key | The SSH public key used to access all nodes within the cluster. | 85 | | Platform | The platform on which the cluster will run. For a full list of platforms, including those not supported by this wizard, see https://github.com/openshift/installer | 86 | | Region | The AWS region to be used for installation. | 87 | | Base Domain | The base domain of the cluster. All DNS records will be sub-domains of this base and will also include the cluster name. | 88 | | Cluster Name | The name of the cluster. This will be used when generating sub-domains. | 89 | | Pull Secret | The container registry pull secret for this cluster, as a single line of JSON (e.g. `{"auths": {...}}`). | 90 | 91 | For the purposes of our walkthrough, we'll want to answer the prompts as shown 92 | below. 93 | 94 | > [!IMPORTANT] 95 | > If you have a domain you want to pushlish your cluster under, you can set 96 | > **Base Domain** to that instead of `example.com`. The full domain of your 97 | > cluster will become `{{ Cluster Name }}.{{ Base Domain }}`, so be sure to set 98 | > **Base Domain** accordingly. 99 | > 100 | > However, we will be deploying this cluster isolated to the VPC only, so using 101 | > any **Base Domain** will suffice since it will resolve within the VPC via a 102 | > private Route53 zone. 103 | 104 | ```text 105 | ? SSH Public Key /home/ec2-user/.ssh/ocp4-install.pub 106 | ? Platform aws 107 | ? Region us-east-2 108 | ? Base Domain example.com 109 | ? Cluster Name ocp4-disconnected 110 | ? Pull Secret [? for help] ******************************************************************************************************************************** 111 | ``` 112 | 113 | After answering the prompts, our `install-config.yaml` is located at 114 | `~/ocp4-install/install-config.yaml`. Before we are able to start the install, 115 | we need to add some items to the `install-config.yaml` file. Specifically, 116 | since we are deploying into an existing VPC, we need to let the installer know 117 | which subnets to use and the details to pull images from our mirror registry. 118 | 119 | To make this easier, we'll take advantage of the AWS CLI and `yq` to make these 120 | edits for us. However, to describe what we're doing, we are updating 121 | `platform.aws.subnets` to be the list of disconnected subnets from our VPC and 122 | also updating `networking.machineNetwork` to contain the CIDR of those subnets. 123 | 124 | ```bash 125 | export PRIVATE_SUBNETS=$(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`PrivateSubnets`].OutputValue' --output text | sed 's/,/\n/g') 126 | 127 | yq -i '.platform.aws.subnets = []' ~/ocp4-install/install-config.yaml 128 | yq -i '.networking.machineNetwork = []' ~/ocp4-install/install-config.yaml 129 | yq -i '.publish = "Internal"' ~/ocp4-install/install-config.yaml 130 | 131 | for subnet in ${PRIVATE_SUBNETS}; do 132 | yq -i '.platform.aws.subnets += "'${subnet}'"' ~/ocp4-install/install-config.yaml 133 | yq -i '.networking.machineNetwork += {"cidr": '$(aws ec2 describe-subnets --subnet-ids ${subnet} | jq '.Subnets[0].CidrBlock')'}' ~/ocp4-install/install-config.yaml 134 | done 135 | ``` 136 | 137 | After we run the commands above, in our `install-config.yaml` we should see the 138 | `platform.aws.subnets` and `networking.machineNetwork` updated to look similar 139 | to the output below. 140 | 141 | ```yaml 142 | networking: 143 | clusterNetwork: 144 | - cidr: 10.128.0.0/14 145 | hostPrefix: 23 146 | machineNetwork: 147 | - cidr: 10.0.48.0/20 148 | - cidr: 10.0.64.0/20 149 | - cidr: 10.0.80.0/20 150 | networkType: OVNKubernetes 151 | serviceNetwork: 152 | - 172.30.0.0/16 153 | 154 | platform: 155 | aws: 156 | region: us-east-2 157 | subnets: 158 | - subnet-067e552c71153a6a3 159 | - subnet-05a8d8fe1fd2accfe 160 | - subnet-004cc1714240d33db 161 | ``` 162 | 163 | The last modification we need to make to our `install-config.yaml` is to add 164 | the output from the `unpack.sh` script that will tell the OpenShift installer 165 | where to find the images in the mirror registry. 166 | 167 | ```bash 168 | yq -i '. *= load("/mnt/ocp4_data/registry/install_config_registry.yaml")' ~/ocp4-install/install-config.yaml 169 | ``` 170 | 171 | We're almost ready to start the OpenShift installation. One last thing we'll do 172 | is make a backup copy of our `install-config.yaml` because the installation 173 | process will consume the one in `~/ocp4-install`. 174 | 175 | ```bash 176 | cp ~/ocp4-install/install-config.yaml ~/ocp4-install/install-config.yaml.bak 177 | ``` 178 | 179 | During the OpenShift installation process, the installer creates all of the AWS 180 | resources required for the OpenShift cluster. We will need an IAM User with the 181 | [appropriate permissions][iam_permissions] to perform this installation, which 182 | was already created for us by the CloudFormation template for our simulated 183 | disconnected environment. 184 | 185 | We first need to create keys we can use before we run the OpenShift installer. 186 | 187 | ```bash 188 | export OCP_INSTALL_ACCESS_KEY=$(aws iam create-access-key --user-name $(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`InstallIamUser`].OutputValue' --output text)) 189 | 190 | aws configure set aws_access_key_id $(echo ${OCP_INSTALL_ACCESS_KEY} | jq '.AccessKey.AccessKeyId') --profile ocp4-install 191 | aws configure set aws_secret_access_key $(echo ${OCP_INSTALL_ACCESS_KEY} | jq '.AccessKey.SecretAccessKey') --profile ocp4-install 192 | aws configure set region $(curl --silent http://169.254.169.254/latest/meta-data/placement/region) --profile ocp4-install 193 | ``` 194 | 195 | Now we're ready to start the OpenShift install. Once you kick things off, it 196 | will start creating the infrastructure needed for the cluster and then install 197 | OpenShift. 198 | 199 | ```bash 200 | export AWS_PROFILE=ocp4-install 201 | export AWS_EC2_METADATA_DISABLED=true 202 | 203 | openshift-install create cluster --dir ~/ocp4-install 204 | ``` 205 | 206 | It will likely take 30+ minutes for the installer to provision all of the 207 | infrastructure and deploy OpenShift to that infrastructure. Once the 208 | installation is complete, we have a OpenShift cluster running in your 209 | disconnected environment! 210 | 211 | You will also see some important information at the end of the installation 212 | that contains the URL and login credentials for your OpenShift cluster. 213 | 214 | ```text 215 | INFO Access the OpenShift web-console here: https://console-openshift-console.apps.ocp4-disconnected.example.com 216 | INFO Login to the console with user: "kubeadmin", and password: "*****-*****-*****-*****" 217 | ``` 218 | 219 | In order for us to connect to that cluster in our simulated disconnected 220 | environment, we will need to setup a connection through our Jump host to be 221 | able to access the cluster as it is not exposed to the internet. To do this, we 222 | will use a tool called `sshuttle`. If you do not have `sshuttle` on your 223 | computer, you can install it for your platform by following the instructions in 224 | the README on the project page. 225 | 226 | https://github.com/sshuttle/sshuttle 227 | 228 | At this point, you can disconnect from the High Side host so that you are back 229 | at the terminal prompt of your computer. 230 | 231 | To create our tunnel to the Jump host, we'll ensure that DNS queries and all 232 | traffic goes through our Jump host by running the following from our computer. 233 | 234 | ```bash 235 | export JUMP_HOST_PUBLIC_IP=$(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`JumpInstancePublicIp`].OutputValue' --output text) 236 | 237 | ssh-add ~/.ssh/ocp4-disconnected 238 | sshuttle --python /usr/libexec/platform-python --dns --remote ec2-user@${JUMP_HOST_PUBLIC_IP} 0.0.0.0/0 239 | ``` 240 | 241 | We should now be able to open 242 | https://console-openshift-console.apps.ocp4-disconnected.example.com in our 243 | browser and login with the credentials given at the end of the installation. 244 | 245 | If you need to recover those credentials at any time, the username will always 246 | be `kubeadmin` and the password can be found at 247 | `~/ocp4-install/auth/kubeadmin-password` on the High Side host. 248 | 249 | 250 | [ocp_platforms]: https://docs.openshift.com/container-platform/latest/installing/installing-preparing.html#installing-preparing-install-manage 251 | [iam_permissions]: https://docs.openshift.com/container-platform/4.12/installing/installing_aws/installing-aws-account.html#installation-aws-permissions_installing-aws-account 252 | -------------------------------------------------------------------------------- /docs/unpack_content.md: -------------------------------------------------------------------------------- 1 | # OpenShift 4 Disconnected Bundler Documentation 2 | 3 | ## Unpack Content 4 | 5 | Now that our content has been uploaded to the S3 bucket, we are ready to pull 6 | it down to the host in the disconnected environment and unpack it. 7 | 8 | > [!NOTE] 9 | > If you're still connected to the Jump host from the previous steps, `exit` 10 | > from the SSH connection so that you are back to your primary terminal prompt 11 | > on your machine. 12 | 13 | Start by connecting to the High Side host via SSH. Since this host is in the 14 | disconnected environment, it is not directly accesible and we will need to 15 | connect to it through the Jump host. You will need the public IP of the Jump 16 | host and the private IP of the High Side host from the outputs captured during 17 | the environment prep stage or you can use the command below as a convenience. 18 | 19 | ```bash 20 | export JUMP_HOST_PUBLIC_IP=$(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`JumpInstancePublicIp`].OutputValue' --output text) 21 | export HIGHSIDE_HOST_PRIVATE_IP=$(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`HighSideInstancePrivateIp`].OutputValue' --output text) 22 | 23 | ssh-add ~/.ssh/ocp4-disconnected 24 | ssh -J ec2-user@${JUMP_HOST_PUBLIC_IP} ec2-user@${HIGHSIDE_HOST_PRIVATE_IP} 25 | ``` 26 | 27 | We are now ready to pull `ocp4_bundle.tar` from our S3 bucket to our High Side 28 | host. To reiterate again, in a real environment we would follow the approved 29 | process for transferring content to our disconnected environment, but for this 30 | walkthrough we will be using an S3 bucket. You can grab the name of the S3 31 | bucket from the outputs captured during the environment prep stage or you can 32 | use the command below as a convenience. 33 | 34 | ```bash 35 | export S3_TRANSER_BUCKET=$(aws cloudformation describe-stacks --stack-name ocp4-disconnected --query 'Stacks[0].Outputs[?OutputKey==`S3TransferBucket`].OutputValue' --output text) 36 | 37 | aws s3 cp s3://${S3_TRANSER_BUCKET}/ocp4_bundle.tar /mnt/ocp4_data 38 | ``` 39 | 40 | We will unpack the tar file next so that we can get access to the content 41 | inside. 42 | 43 | ```bash 44 | tar --extract --verbose --directory /mnt/ocp4_data --file /mnt/ocp4_data/ocp4_bundle.tar 45 | ``` 46 | 47 | After extracting the `ocp4_bundle.tar` file, we now have the content we need to 48 | install OpenShift in a disconnected environment. But before we can do that, we 49 | need to put everything in the right place. Also included in the tar file that 50 | we unpacked is a script named `unpack.sh` that automates this for us. 51 | 52 | We'll run the script and wait for it to finish. During this process it will 53 | unpack the binaries needed and put them in the appropriate places on the 54 | filesystem (e.g. `/usr/local/bin`), it will deploy the OpenShift Mirror 55 | Registry, and then it will populate the mirror registry with the container 56 | images. 57 | 58 | ```bash 59 | /mnt/ocp4_data/unpack.sh 60 | ``` 61 | 62 | Once the `unpack.sh` script finishes, it will output a configuration that you 63 | will need to copy and paste to the end of your `install-config.yaml` that's 64 | used to install OpenShift. We'll create the `install-config.yaml` in the next 65 | section and those instructions will show you where to put that configuration. 66 | 67 | > [!NOTE] 68 | > If you lose the confirmation output at any point, you can run `unpack.sh` 69 | > again and it will skip things it's already done and output the configuration 70 | > output again. 71 | 72 | Example output: 73 | 74 | ```text 75 | additionalTrustBundle: |- 76 |   -----BEGIN CERTIFICATE----- 77 |   MIIEATCCAumgAwIBAgIUG5laoRL+8bfF2DPxJPzR6mAN2SwwDQYJKoZIhvcNAQEL 78 |   BQAwgYIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTERMA8GA1UEBwwITmV3IFlv 79 |   cmsxDTALBgNVBAoMBFF1YXkxETAPBgNVBAsMCERpdmlzaW9uMTEwLwYDVQQDDChp 80 |   cC0xMC0wLTQ5LTQyLnVzLWVhc3QtMi5jb21wdXRlLmludGVybmFsMB4XDTIzMDkx 81 |   NTE4MTA1OFoXDTI2MDcwNTE4MTA1OFowgYIxCzAJBgNVBAYTAlVTMQswCQYDVQQI 82 |   DAJWQTERMA8GA1UEBwwITmV3IFlvcmsxDTALBgNVBAoMBFF1YXkxETAPBgNVBAsM 83 |   CERpdmlzaW9uMTEwLwYDVQQDDChpcC0xMC0wLTQ5LTQyLnVzLWVhc3QtMi5jb21w 84 |   dXRlLmludGVybmFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6hqU 85 |   hh1uEH05SihdcdEB4qBo3sbpm5rt3XzfB5U4Q1zJcSNqGFxcsHy4M4tgH6WRaSco 86 |   E0VqjlnuxzmOkBAnbGnCNHHJxRmRakm3CMBmaK6zA+/k4RjhVzXnaFqlXeditSx3 87 |   d1rsd7FMdbWdNgrQaHPIuV2rtKFU9/bI0y4S+TH1GUNfakSTQzo1knbB4vC81DFZ 88 |   o8wC9M9d3T9rGIeWtNPWD3kIYLSwhw8Cdk0Dms3SMhBnhUWLQq5zJmj0gK1SELH6 89 |   2ZzNVESRpmMcDeiqEaaLUIQRDDrpmHECweNX+PQqyeopxxhLPIRB2WMJEbaeNtXI 90 |   XgVe8vD9h5VdMSOLdQIDAQABo20wazALBgNVHQ8EBAMCAuQwEwYDVR0lBAwwCgYI 91 |   KwYBBQUHAwEwMwYDVR0RBCwwKoIoaXAtMTAtMC00OS00Mi51cy1lYXN0LTIuY29t 92 |   cHV0ZS5pbnRlcm5hbDASBgNVHRMBAf8ECDAGAQH/AgEBMA0GCSqGSIb3DQEBCwUA 93 |   A4IBAQCCwlTbg7m/D3Akp5/bufQyL751x2UTxqY3dPUFQXrMh+hUaoFaOd9NZdE1 94 |   laiTMTmiXhatnpSoh3tvKpFqy41GPqEr+jRPQ/J1H8Luok5k9ud58ikn7PsbtZpW 95 |   sXxQGJb0dQouPzQNwTWXtvtFtP9ydrB9rRQGh+x7Je4+uwmz9w31e8uyEudrw0sb 96 |   iTUDpftyGYJeTBDJySEZNF7jGABEny2jPVWnG3rXtEj2Lkt4ZkwixLTHFYZtbfp+ 97 |   W/vAur1bnkbtm1p21SkeI/sE8D2KXLynPkaXfYIbF4bgs0N7KCfRLQXgUbwrIdI5 98 |   GwgfEglJ+zHNyH64ixCBXEJqy4ti 99 |   -----END CERTIFICATE----- 100 | imageContentSources: 101 |   - mirrors: 102 |       - ip-10-0-49-42.us-east-2.compute.internal:8443/ubi8 103 |     source: registry.access.redhat.com/ubi8 104 |   - mirrors: 105 |       - ip-10-0-49-42.us-east-2.compute.internal:8443/openshift/release 106 |     source: quay.io/openshift-release-dev/ocp-v4.0-art-dev 107 |   - mirrors: 108 |       - ip-10-0-49-42.us-east-2.compute.internal:8443/openshift/release-images 109 |     source: quay.io/openshift-release-dev/ocp-release 110 | ``` 111 | 112 | [<< Back: Bundle Content](bundle_content.md) - [Next: Install OpenShift >>](install_openshift.md) 113 | -------------------------------------------------------------------------------- /hack/cloudformation.yaml: -------------------------------------------------------------------------------- 1 | Description: > 2 | This template is used to simulate a disconnected environment. It deploys a 3 | VPC with a single public subnet and three (3) private subnets, spread across 4 | three (3) Availability Zones. It deploys an internet gateway, with a default 5 | route on the public subnet. A NAT instance running a Squid proxy is deployed 6 | in the public subnet and is configured to block all traffic except for access 7 | to the AWS APIs. The NAT instance is configured as the default route for the 8 | private subnets, which effectively makes the private subnets simulate a 9 | disconnected environment. A jump host is also deployed in the public subnet 10 | and is configured to allow access to the VPC. 11 | 12 | Parameters: 13 | VpcCidr: 14 | Description: The IP range (CIDR notation) for this VPC 15 | Type: String 16 | Default: 10.0.0.0/16 17 | 18 | KeyName: 19 | Description: The name of EC2 key pair to use for the NAT instance 20 | Type: AWS::EC2::KeyPair::KeyName 21 | 22 | S3TransferBucketName: 23 | Description: The name of the S3 bucket that will be used to transfer files 24 | Type: String 25 | Default: autogenerate 26 | 27 | NatInstanceType: 28 | Description: The instance type to use for the NAT instance 29 | Type: String 30 | Default: t3.small 31 | AllowedValues: 32 | - t3.small 33 | - t3.medium 34 | - t3.large 35 | - m4.large 36 | - m4.xlarge 37 | - m5.large 38 | - m5.xlarge 39 | - c4.large 40 | - c4.xlarge 41 | - c5.large 42 | - c5.xlarge 43 | 44 | JumpInstanceType: 45 | Description: The instance type to use for the Jump instance 46 | Type: String 47 | Default: t3.small 48 | AllowedValues: 49 | - t3.small 50 | - t3.medium 51 | - t3.large 52 | - m4.large 53 | - m4.xlarge 54 | - m5.large 55 | - m5.xlarge 56 | - c4.large 57 | - c4.xlarge 58 | - c5.large 59 | - c5.xlarge 60 | 61 | HighSideInstanceType: 62 | Description: The instance type to use for the High Side instance 63 | Type: String 64 | Default: t3.large 65 | AllowedValues: 66 | - t3.large 67 | - t3.xlarge 68 | - m4.large 69 | - m4.xlarge 70 | - m5.large 71 | - m5.xlarge 72 | - c4.large 73 | - c4.xlarge 74 | - c5.large 75 | - c5.xlarge 76 | 77 | JumpInstanceDataVolumeSize: 78 | Description: The size of the data volume to attach to the Jump instance 79 | Type: Number 80 | Default: 150 81 | 82 | HighSideInstanceDataVolumeSize: 83 | Description: The size of the data volume to attach to the High Side instance 84 | Type: Number 85 | Default: 300 86 | 87 | Mappings: 88 | RegionMap: 89 | ap-south-1: 90 | ami: ami-0c0c57e3703b50314 91 | eu-north-1: 92 | ami: ami-078433bb440ad8323 93 | eu-west-3: 94 | ami: ami-0f2a71b6170e045d8 95 | eu-west-2: 96 | ami: ami-070e87281ec7b1a4d 97 | eu-west-1: 98 | ami: ami-008609c5b9c86c915 99 | ap-northeast-3: 100 | ami: ami-00f970067534af873 101 | ap-northeast-2: 102 | ami: ami-08c3474db131766ea 103 | ap-northeast-1: 104 | ami: ami-01b760eab729a9f4c 105 | ca-central-1: 106 | ami: ami-05eeb0575804a82d2 107 | sa-east-1: 108 | ami: ami-02c7c2dafbcfda401 109 | ap-southeast-1: 110 | ami: ami-0cef209741bb92e90 111 | ap-southeast-2: 112 | ami: ami-03a01cc2ae9bd8483 113 | eu-central-1: 114 | ami: ami-029d7f7f77709899f 115 | us-east-1: 116 | ami: ami-033d3612433d4049b 117 | us-east-2: 118 | ami: ami-04617ae0baf90b791 119 | us-west-1: 120 | ami: ami-09b6a34cb89a8c924 121 | us-west-2: 122 | ami: ami-08e6ba516533a3ce8 123 | 124 | Conditions: 125 | GenerateS3BucketName: !Equals [!Ref S3TransferBucketName, autogenerate] 126 | 127 | Resources: 128 | Vpc: 129 | Type: AWS::EC2::VPC 130 | Properties: 131 | CidrBlock: !Ref VpcCidr 132 | EnableDnsSupport: true 133 | EnableDnsHostnames: true 134 | Tags: 135 | - Key: Name 136 | Value: !Sub ${AWS::StackName}-vpc 137 | 138 | InternetGateway: 139 | Type: AWS::EC2::InternetGateway 140 | Properties: 141 | Tags: 142 | - Key: Name 143 | Value: !Sub ${AWS::StackName}-igw 144 | 145 | InternetGatewayAttachment: 146 | Type: AWS::EC2::VPCGatewayAttachment 147 | Properties: 148 | InternetGatewayId: !Ref InternetGateway 149 | VpcId: !Ref Vpc 150 | 151 | ############################################################################# 152 | # Public Subnet 153 | ############################################################################# 154 | 155 | PublicSubnet: 156 | Type: AWS::EC2::Subnet 157 | Properties: 158 | VpcId: !Ref Vpc 159 | AvailabilityZone: !Select [0, !GetAZs ''] 160 | CidrBlock: !Select [0, !Cidr [!Ref VpcCidr, 6, 12]] 161 | MapPublicIpOnLaunch: true 162 | Tags: 163 | - Key: Name 164 | Value: !Sub ${AWS::StackName}-public 165 | 166 | PublicRouteTable: 167 | Type: AWS::EC2::RouteTable 168 | Properties: 169 | VpcId: !Ref Vpc 170 | Tags: 171 | - Key: Name 172 | Value: !Sub ${AWS::StackName}-public 173 | 174 | DefaultPublicRoute: 175 | Type: AWS::EC2::Route 176 | DependsOn: InternetGatewayAttachment 177 | Properties: 178 | RouteTableId: !Ref PublicRouteTable 179 | DestinationCidrBlock: 0.0.0.0/0 180 | GatewayId: !Ref InternetGateway 181 | 182 | PublicSubnet1RouteTableAssociation: 183 | Type: AWS::EC2::SubnetRouteTableAssociation 184 | Properties: 185 | RouteTableId: !Ref PublicRouteTable 186 | SubnetId: !Ref PublicSubnet 187 | 188 | ############################################################################# 189 | # Private Subnets 190 | ############################################################################# 191 | 192 | PrivateSubnet1: 193 | Type: AWS::EC2::Subnet 194 | Properties: 195 | VpcId: !Ref Vpc 196 | AvailabilityZone: !Select [0, !GetAZs ''] 197 | CidrBlock: !Select [3, !Cidr [!Ref VpcCidr, 6, 12]] 198 | MapPublicIpOnLaunch: false 199 | Tags: 200 | - Key: Name 201 | Value: !Join ["-", [!Ref AWS::StackName, "private", !Select [0, !GetAZs '']]] 202 | 203 | PrivateRouteTable1: 204 | Type: AWS::EC2::RouteTable 205 | Properties: 206 | VpcId: !Ref Vpc 207 | Tags: 208 | - Key: Name 209 | Value: !Join ["-", [!Ref AWS::StackName, "private", !GetAtt PrivateSubnet1.AvailabilityZone]] 210 | 211 | DefaultPrivateRoute1: 212 | Type: AWS::EC2::Route 213 | DependsOn: NatInstance 214 | Properties: 215 | RouteTableId: !Ref PrivateRouteTable1 216 | DestinationCidrBlock: 0.0.0.0/0 217 | InstanceId: !Ref NatInstance 218 | 219 | PrivateSubnetRouteTableAssociation1: 220 | Type: AWS::EC2::SubnetRouteTableAssociation 221 | Properties: 222 | RouteTableId: !Ref PrivateRouteTable1 223 | SubnetId: !Ref PrivateSubnet1 224 | 225 | PrivateSubnet2: 226 | Type: AWS::EC2::Subnet 227 | Properties: 228 | VpcId: !Ref Vpc 229 | AvailabilityZone: !Select [1, !GetAZs ''] 230 | CidrBlock: !Select [4, !Cidr [!Ref VpcCidr, 6, 12]] 231 | MapPublicIpOnLaunch: false 232 | Tags: 233 | - Key: Name 234 | Value: !Join ["-", [!Ref AWS::StackName, "private", !Select [1, !GetAZs '']]] 235 | 236 | PrivateRouteTable2: 237 | Type: AWS::EC2::RouteTable 238 | Properties: 239 | VpcId: !Ref Vpc 240 | Tags: 241 | - Key: Name 242 | Value: !Join ["-", [!Ref AWS::StackName, "private", !GetAtt PrivateSubnet2.AvailabilityZone]] 243 | 244 | DefaultPrivateRoute2: 245 | Type: AWS::EC2::Route 246 | DependsOn: NatInstance 247 | Properties: 248 | RouteTableId: !Ref PrivateRouteTable2 249 | DestinationCidrBlock: 0.0.0.0/0 250 | InstanceId: !Ref NatInstance 251 | 252 | PrivateSubnetRouteTableAssociation2: 253 | Type: AWS::EC2::SubnetRouteTableAssociation 254 | Properties: 255 | RouteTableId: !Ref PrivateRouteTable2 256 | SubnetId: !Ref PrivateSubnet2 257 | 258 | PrivateSubnet3: 259 | Type: AWS::EC2::Subnet 260 | Properties: 261 | VpcId: !Ref Vpc 262 | AvailabilityZone: !Select [2, !GetAZs ''] 263 | CidrBlock: !Select [5, !Cidr [!Ref VpcCidr, 6, 12]] 264 | MapPublicIpOnLaunch: false 265 | Tags: 266 | - Key: Name 267 | Value: !Join ["-", [!Ref AWS::StackName, "private", !Select [2, !GetAZs '']]] 268 | 269 | PrivateRouteTable3: 270 | Type: AWS::EC2::RouteTable 271 | Properties: 272 | VpcId: !Ref Vpc 273 | Tags: 274 | - Key: Name 275 | Value: !Join ["-", [!Ref AWS::StackName, "private", !GetAtt PrivateSubnet3.AvailabilityZone]] 276 | 277 | DefaultPrivateRoute3: 278 | Type: AWS::EC2::Route 279 | DependsOn: NatInstance 280 | Properties: 281 | RouteTableId: !Ref PrivateRouteTable3 282 | DestinationCidrBlock: 0.0.0.0/0 283 | InstanceId: !Ref NatInstance 284 | 285 | PrivateSubnetRouteTableAssociation3: 286 | Type: AWS::EC2::SubnetRouteTableAssociation 287 | Properties: 288 | RouteTableId: !Ref PrivateRouteTable3 289 | SubnetId: !Ref PrivateSubnet3 290 | 291 | DefaultInstanceRole: 292 | Type: AWS::IAM::Role 293 | DependsOn: InstallIamUser 294 | Properties: 295 | RoleName: !Sub ${AWS::StackName}-default-role 296 | AssumeRolePolicyDocument: 297 | Version: "2012-10-17" 298 | Statement: 299 | - Effect: Allow 300 | Principal: 301 | Service: 302 | - ec2.amazonaws.com 303 | Action: 304 | - 'sts:AssumeRole' 305 | Path: / 306 | Policies: 307 | - PolicyName: root 308 | PolicyDocument: 309 | Version: "2012-10-17" 310 | Statement: 311 | - Effect: Allow 312 | Action: 313 | - s3:* 314 | Resource: 315 | - !Join 316 | - "" 317 | - - "arn:aws:s3:::" 318 | - !If [GenerateS3BucketName, !Sub "ocp4-disconnected-${AWS::AccountId}-${AWS::Region}", !Ref S3TransferBucketName] 319 | - Effect: Allow 320 | Action: 321 | - s3:* 322 | Resource: 323 | - !Join 324 | - "" 325 | - - "arn:aws:s3:::" 326 | - !If [GenerateS3BucketName, !Sub "ocp4-disconnected-${AWS::AccountId}-${AWS::Region}", !Ref S3TransferBucketName] 327 | - "/*" 328 | - Effect: Allow 329 | Action: 330 | - iam:CreateAccessKey 331 | Resource: 332 | - !GetAtt InstallIamUser.Arn 333 | - Effect: Allow 334 | Action: 335 | - cloudformation:DescribeStacks 336 | - ec2:DescribeSubnets 337 | Resource: "*" 338 | 339 | DefaultInstanceProfile: 340 | Type: AWS::IAM::InstanceProfile 341 | Properties: 342 | InstanceProfileName: !Sub ${AWS::StackName}-default-profile 343 | Path: / 344 | Roles: 345 | - !Ref DefaultInstanceRole 346 | 347 | InstallIamUser: 348 | Type: AWS::IAM::User 349 | Properties: 350 | UserName: !Sub ${AWS::StackName}-install-user 351 | Policies: 352 | - PolicyName: AdministratorAccess 353 | PolicyDocument: 354 | Version: "2012-10-17" 355 | Statement: 356 | - Effect: Allow 357 | Action: "*" 358 | Resource: "*" 359 | 360 | ############################################################################# 361 | # NAT Instance 362 | ############################################################################# 363 | 364 | NatInstanceRole: 365 | Type: AWS::IAM::Role 366 | Properties: 367 | RoleName: !Sub ${AWS::StackName}-nat-role 368 | AssumeRolePolicyDocument: 369 | Version: "2012-10-17" 370 | Statement: 371 | - Effect: Allow 372 | Principal: 373 | Service: 374 | - ec2.amazonaws.com 375 | Action: 376 | - 'sts:AssumeRole' 377 | Path: / 378 | ManagedPolicyArns: 379 | - arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM 380 | - arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy 381 | Policies: 382 | - PolicyName: root 383 | PolicyDocument: 384 | Version: "2012-10-17" 385 | Statement: 386 | - Effect: Allow 387 | Action: 388 | - ec2:ModifyInstanceAttribute 389 | Resource: "*" 390 | 391 | NatInstanceProfile: 392 | Type: AWS::IAM::InstanceProfile 393 | Properties: 394 | InstanceProfileName: !Sub ${AWS::StackName}-nat-profile 395 | Path: / 396 | Roles: 397 | - !Ref NatInstanceRole 398 | 399 | NatInstanceSecurityGroup: 400 | Type: AWS::EC2::SecurityGroup 401 | Properties: 402 | GroupName: !Sub ${AWS::StackName}-nat-sg 403 | GroupDescription: Allow HTTP and HTTPS from VPC to NAT instance 404 | VpcId: !Ref Vpc 405 | SecurityGroupIngress: 406 | - IpProtocol: tcp 407 | FromPort: 80 408 | ToPort: 80 409 | CidrIp: !Ref VpcCidr 410 | Description: HTTP 411 | - IpProtocol: tcp 412 | FromPort: 443 413 | ToPort: 443 414 | CidrIp: !Ref VpcCidr 415 | Description: HTTPS 416 | - IpProtocol: tcp 417 | FromPort: 22 418 | ToPort: 22 419 | CidrIp: !Ref VpcCidr 420 | Description: SSH 421 | Tags: 422 | - Key: Name 423 | Value: !Sub ${AWS::StackName}-nat-sg 424 | 425 | NatInstanceWaitHandle: 426 | Type: AWS::CloudFormation::WaitConditionHandle 427 | Properties: {} 428 | 429 | NatInstanceWaitCondition: 430 | Type: AWS::CloudFormation::WaitCondition 431 | DependsOn: NatInstance 432 | Properties: 433 | Handle: !Ref NatInstanceWaitHandle 434 | Timeout: 900 435 | 436 | NatInstance: 437 | Type: AWS::EC2::Instance 438 | Properties: 439 | ImageId: !FindInMap ["RegionMap", !Ref "AWS::Region", "ami"] 440 | KeyName: !Ref KeyName 441 | InstanceType: !Ref NatInstanceType 442 | IamInstanceProfile: !Ref NatInstanceProfile 443 | NetworkInterfaces: 444 | - DeviceIndex: 0 445 | AssociatePublicIpAddress: true 446 | GroupSet: 447 | - !Ref NatInstanceSecurityGroup 448 | SubnetId: !Ref PublicSubnet 449 | Tags: 450 | - Key: Name 451 | Value: !Sub ${AWS::StackName}-nat 452 | UserData: 453 | Fn::Base64: !Sub | 454 | #!/bin/bash -xe 455 | # Redirect the user-data output to the console logs 456 | exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 457 | 458 | # Display a friendly name in bash 459 | # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-hostname.html#set-hostname-shell 460 | sed -i 's/\([[:blank:]]*\[[[:blank:]]"\$PS1"[[:blank:]]*=[[:blank:]]*".*\)\(\\h\)\(.*\)/\1$NICKNAME\3/g' /etc/bashrc 461 | echo "export NICKNAME=nat" > /etc/profile.d/friendly_prompt.sh 462 | 463 | # Apply the latest security patches 464 | yum update -y --security 465 | yum install -y unzip vim firewalld 466 | 467 | # Install the AWS CLI 468 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o /tmp/awscliv2.zip 469 | unzip -q -d /tmp /tmp/awscliv2.zip && rm -f /tmp/awscliv2.zip && /tmp/aws/install && rm -rf /tmp/aws 470 | 471 | # Disable source / destination check. It cannot be disabled from the launch configuration 472 | region=${AWS::Region} 473 | instanceid=`curl -s http://169.254.169.254/latest/meta-data/instance-id` 474 | aws ec2 modify-instance-attribute --no-source-dest-check --instance-id $instanceid --region $region 475 | 476 | # Add RHUI cert to trust bundle 477 | trust anchor /etc/pki/rhui/cdn.redhat.com-chain.crt 478 | update-ca-trust 479 | 480 | # Install and start Squid 481 | yum install -y squid 482 | /usr/lib64/squid/security_file_certgen -c -s /var/spool/squid/ssl_db -M 4MB 483 | systemctl enable --now squid 484 | sleep 5 485 | 486 | cp -a /etc/squid /etc/squid_orig 487 | 488 | # Create a SSL certificate for the SslBump Squid module 489 | mkdir /etc/squid/ssl 490 | pushd /etc/squid/ssl 491 | openssl genrsa -out squid.key 4096 492 | openssl req -new -key squid.key -out squid.csr -subj "/C=US/ST=VA/L=squid/O=squid/CN=squid" 493 | openssl x509 -req -days 3650 -in squid.csr -signkey squid.key -out squid.crt 494 | cat squid.key squid.crt >> squid.pem 495 | 496 | echo '.amazonaws.com' > /etc/squid/whitelist.txt 497 | echo '.cloudfront.net' >> /etc/squid/whitelist.txt 498 | echo '.aws.ce.redhat.com' >> /etc/squid/whitelist.txt 499 | 500 | cat > /etc/squid/squid.conf << 'EOF' 501 | 502 | visible_hostname squid 503 | cache deny all 504 | 505 | # Log format and rotation 506 | logformat squid %ts.%03tu %6tr %>a %Ss/%03>Hs %sni %Sh/%> /etc/sysctl.d/10-custom.conf 545 | firewall-offline-cmd --add-forward-port=port=80:proto=tcp:toport=3129 546 | firewall-offline-cmd --add-forward-port=port=443:proto=tcp:toport=3130 547 | firewall-offline-cmd --add-masquerade 548 | systemctl enable --now firewalld 549 | 550 | curl -X PUT -H 'Content-Type:' --data-binary '{"Status" : "SUCCESS","Reason" : "Configuration Complete","UniqueId" : "8675309","Data" : "Application has completed configuration."}' "${NatInstanceWaitHandle}" 551 | 552 | ############################################################################# 553 | # Jump Instance 554 | ############################################################################# 555 | 556 | JumpInstanceSecurityGroup: 557 | Type: AWS::EC2::SecurityGroup 558 | Properties: 559 | GroupName: !Sub ${AWS::StackName}-jump-sg 560 | GroupDescription: Allow SSH from everywhere to Jump instance 561 | VpcId: !Ref Vpc 562 | SecurityGroupIngress: 563 | - IpProtocol: tcp 564 | FromPort: 22 565 | ToPort: 22 566 | CidrIp: 0.0.0.0/0 567 | Description: SSH 568 | Tags: 569 | - Key: Name 570 | Value: !Sub ${AWS::StackName}-jump-sg 571 | 572 | JumpInstance: 573 | Type: AWS::EC2::Instance 574 | DependsOn: InternetGatewayAttachment 575 | Properties: 576 | ImageId: !FindInMap ["RegionMap", !Ref "AWS::Region", "ami"] 577 | KeyName: !Ref KeyName 578 | InstanceType: !Ref JumpInstanceType 579 | IamInstanceProfile: !Ref DefaultInstanceProfile 580 | NetworkInterfaces: 581 | - DeviceIndex: 0 582 | AssociatePublicIpAddress: true 583 | GroupSet: 584 | - !Ref JumpInstanceSecurityGroup 585 | SubnetId: !Ref PublicSubnet 586 | BlockDeviceMappings: 587 | - DeviceName: /dev/sdh 588 | Ebs: 589 | VolumeType: gp3 590 | DeleteOnTermination: true 591 | VolumeSize: !Ref JumpInstanceDataVolumeSize 592 | Tags: 593 | - Key: Name 594 | Value: !Sub ${AWS::StackName}-jump 595 | UserData: 596 | Fn::Base64: !Sub | 597 | #!/bin/bash -xe 598 | # Redirect the user-data output to the console logs 599 | exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 600 | 601 | # Display a friendly name in bash 602 | # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-hostname.html#set-hostname-shell 603 | sed -i 's/\([[:blank:]]*\[[[:blank:]]"\$PS1"[[:blank:]]*=[[:blank:]]*".*\)\(\\h\)\(.*\)/\1$NICKNAME\3/g' /etc/bashrc 604 | echo "export NICKNAME=jump" > /etc/profile.d/friendly_prompt.sh 605 | 606 | # Apply the latest security patches 607 | yum update -y --security 608 | yum install -y unzip vim lvm2 git podman 609 | 610 | # Partition and mount data disk 611 | pvcreate /dev/nvme1n1 612 | vgcreate data /dev/nvme1n1 613 | lvcreate --name ocp --extents 100%VG data 614 | mkfs.xfs /dev/mapper/data-ocp 615 | mkdir /mnt/ocp4_data 616 | echo '/dev/mapper/data-ocp /mnt/ocp4_data xfs defaults 0 0' >> /etc/fstab 617 | systemctl daemon-reload 618 | mount /mnt/ocp4_data 619 | chown ec2-user:ec2-user /mnt/ocp4_data 620 | 621 | # Install the AWS CLI 622 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o /tmp/awscliv2.zip 623 | unzip -q -d /tmp /tmp/awscliv2.zip && rm -f /tmp/awscliv2.zip && /tmp/aws/install && rm -rf /tmp/aws 624 | 625 | JumpInstanceEip: 626 | Type: AWS::EC2::EIP 627 | DependsOn: JumpInstance 628 | Properties: 629 | InstanceId: !Ref JumpInstance 630 | 631 | ############################################################################# 632 | # High Side Instance 633 | ############################################################################# 634 | 635 | HighSideInstanceSecurityGroup: 636 | Type: AWS::EC2::SecurityGroup 637 | Properties: 638 | GroupName: !Sub ${AWS::StackName}-highside-sg 639 | GroupDescription: Allow SSH from VPC to High Side instance 640 | VpcId: !Ref Vpc 641 | SecurityGroupIngress: 642 | - IpProtocol: tcp 643 | FromPort: 22 644 | ToPort: 22 645 | CidrIp: !Ref VpcCidr 646 | Description: SSH 647 | - IpProtocol: tcp 648 | FromPort: 8443 649 | ToPort: 8443 650 | CidrIp: !Ref VpcCidr 651 | Description: Mirror Registry 652 | Tags: 653 | - Key: Name 654 | Value: !Sub ${AWS::StackName}-highside-sg 655 | 656 | HighSideInstance: 657 | Type: AWS::EC2::Instance 658 | DependsOn: NatInstanceWaitCondition 659 | Properties: 660 | ImageId: !FindInMap ["RegionMap", !Ref "AWS::Region", "ami"] 661 | KeyName: !Ref KeyName 662 | InstanceType: !Ref HighSideInstanceType 663 | IamInstanceProfile: !Ref DefaultInstanceProfile 664 | NetworkInterfaces: 665 | - DeviceIndex: 0 666 | AssociatePublicIpAddress: false 667 | GroupSet: 668 | - !Ref HighSideInstanceSecurityGroup 669 | SubnetId: !Ref PrivateSubnet1 670 | BlockDeviceMappings: 671 | - DeviceName: /dev/sdh 672 | Ebs: 673 | VolumeType: gp3 674 | DeleteOnTermination: true 675 | VolumeSize: !Ref HighSideInstanceDataVolumeSize 676 | Tags: 677 | - Key: Name 678 | Value: !Sub ${AWS::StackName}-highside 679 | UserData: 680 | Fn::Base64: !Sub | 681 | #!/bin/bash -xe 682 | # Redirect the user-data output to the console logs 683 | exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 684 | 685 | # Display a friendly name in bash 686 | # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-hostname.html#set-hostname-shell 687 | sed -i 's/\([[:blank:]]*\[[[:blank:]]"\$PS1"[[:blank:]]*=[[:blank:]]*".*\)\(\\h\)\(.*\)/\1$NICKNAME\3/g' /etc/bashrc 688 | echo "export NICKNAME=highside" > /etc/profile.d/friendly_prompt.sh 689 | 690 | # Apply the latest security patches 691 | yum update -y --security 692 | yum install -y unzip vim lvm2 podman 693 | 694 | # Partition and mount data disk 695 | pvcreate /dev/nvme1n1 696 | vgcreate data /dev/nvme1n1 697 | lvcreate --name ocp --extents 100%VG data 698 | mkfs.xfs /dev/mapper/data-ocp 699 | mkdir /mnt/ocp4_data 700 | echo '/dev/mapper/data-ocp /mnt/ocp4_data xfs defaults 0 0' >> /etc/fstab 701 | systemctl daemon-reload 702 | mount /mnt/ocp4_data 703 | chown ec2-user:ec2-user /mnt/ocp4_data 704 | 705 | # Install the AWS CLI 706 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o /tmp/awscliv2.zip 707 | unzip -q -d /tmp /tmp/awscliv2.zip && rm -f /tmp/awscliv2.zip && /tmp/aws/install && rm -rf /tmp/aws 708 | 709 | ############################################################################# 710 | # S3 Transfer Bucket 711 | ############################################################################# 712 | 713 | S3TransferBucket: 714 | Type: AWS::S3::Bucket 715 | DeletionPolicy: Delete 716 | Properties: 717 | BucketName: !If [GenerateS3BucketName, !Sub "${AWS::StackName}-${AWS::AccountId}-${AWS::Region}", !Ref S3TransferBucketName] 718 | 719 | Outputs: 720 | Vpc: 721 | Description: A reference to the created VPC 722 | Value: !Ref Vpc 723 | 724 | PrivateSubnets: 725 | Description: A list of the private subnets 726 | Value: !Join [",", [!Ref PrivateSubnet1, !Ref PrivateSubnet2, !Ref PrivateSubnet3]] 727 | 728 | PublicSubnet1: 729 | Description: A reference to the public subnet 730 | Value: !Ref PublicSubnet 731 | 732 | PrivateSubnet1: 733 | Description: A reference to the private subnet in the 1st Availability Zone 734 | Value: !Ref PrivateSubnet1 735 | 736 | PrivateSubnet2: 737 | Description: A reference to the private subnet in the 2nd Availability Zone 738 | Value: !Ref PrivateSubnet2 739 | 740 | PrivateSubnet3: 741 | Description: A reference to the private subnet in the 3rd Availability Zone 742 | Value: !Ref PrivateSubnet3 743 | 744 | S3TransferBucket: 745 | Description: A reference to the S3 transfer bucket 746 | Value: !Ref S3TransferBucket 747 | 748 | JumpInstancePublicIp: 749 | Description: A reference to the public EIP allocated to the Jump instance 750 | Value: !Ref JumpInstanceEip 751 | 752 | HighSideInstancePrivateIp: 753 | Description: A reference to the private IP to the High Side instance 754 | Value: !GetAtt HighSideInstance.PrivateIp 755 | 756 | InstallIamUser: 757 | Description: A reference to the IAM User to use for installing OpenShift 758 | Value: !Ref InstallIamUser 759 | -------------------------------------------------------------------------------- /hack/generate_ami_mapping.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "AWS_PROFILE for AWS commercial regions:" 4 | read aws_profile_commercial 5 | 6 | echo "AWS_PROFILE for AWS GovCloud regions:" 7 | read aws_profile_govcloud 8 | 9 | rhel_version="RHEL-8.8" 10 | profiles=("${aws_profile_commercial}" "${aws_profile_govcloud}") 11 | 12 | echo 13 | echo 14 | echo "Mappings:" 15 | echo " RegionMap:" 16 | 17 | for profile in ${profiles[@]}; do 18 | export AWS_PROFILE=${profile} 19 | 20 | regions=$(aws ec2 describe-regions --output text --query 'Regions[*].RegionName') 21 | 22 | for region in $regions; do 23 | ami=$(aws ec2 describe-images --region ${region} --filters "Name=name,Values='${rhel_version}*-Hourly*'" "Name=architecture,Values=x86_64" | jq -r '.Images |= sort_by(.CreationDate) | .Images | reverse | .[0].ImageId') 24 | echo " ${region}:" 25 | echo " ami: ${ami}" 26 | done 27 | done 28 | -------------------------------------------------------------------------------- /ocp4_disconnected/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from pathlib import Path 4 | 5 | 6 | MIRROR_URL = 'https://mirror.openshift.com/pub/openshift-v4/clients/' 7 | 8 | BASE_DIR = Path(__file__).resolve(strict=True).parent 9 | -------------------------------------------------------------------------------- /ocp4_disconnected/bundle.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import json 4 | from pathlib import Path 5 | import re 6 | import shutil 7 | import subprocess 8 | import tarfile 9 | 10 | import click 11 | import colorlog 12 | import requests 13 | from requests.compat import urljoin 14 | from tqdm import tqdm 15 | 16 | from . import BASE_DIR, MIRROR_URL 17 | from .imageset import ImagesetConfig 18 | 19 | 20 | CONTEXT_SETTINGS = dict( 21 | help_option_names=['-h', '--help'], 22 | max_content_width=100, 23 | ) 24 | 25 | colorlog.basicConfig( 26 | format='%(log_color)s%(levelname)s%(reset)s:%(asctime)s:%(name)s:%(message)s', 27 | datefmt='%Y-%m-%d %H:%M:%S', 28 | level=colorlog.WARNING, 29 | ) 30 | logger = colorlog.getLogger('bundle') 31 | logger.setLevel(colorlog.DEBUG) 32 | 33 | 34 | class Bundle(): 35 | def __init__(self, openshift_version: str, output_dir: str, pull_secret: str) -> None: 36 | self.openshift_version = openshift_version 37 | self.output_dir = Path(output_dir) 38 | self.pull_secret = pull_secret 39 | 40 | self.real_openshift_version = self._real_openshift_version() 41 | self.images_dir = self.output_dir.joinpath('images') 42 | self.metadata_dir = self.output_dir.joinpath('metadata') 43 | self.binaries_dir = self.output_dir.joinpath('bin') 44 | 45 | self.clients_dir = self.output_dir.joinpath('clients') 46 | self.clients_version_dir = self.clients_dir.joinpath(self.real_openshift_version) 47 | 48 | self.docker_config_dir = Path.home().joinpath('.docker') 49 | self.make_output_dirs() 50 | 51 | def _release_info(self) -> str: 52 | version_url = self.openshift_version 53 | 54 | match = re.fullmatch(r'(4\.\d+)', self.openshift_version) 55 | if match: 56 | version_url = f'stable-{self.openshift_version}' 57 | logger.info(f'Converted {self.openshift_version} to {version_url} for release info URL') 58 | 59 | try: 60 | r = requests.get(urljoin(MIRROR_URL, f'ocp/{version_url}/release.txt')) 61 | r.raise_for_status() 62 | except requests.HTTPError: 63 | logger.exception(f'Unable to find release info for OpenShift version {self.openshift_version}') 64 | raise 65 | 66 | return r.text 67 | 68 | def _real_openshift_version(self) -> str: 69 | release_info = self._release_info() 70 | 71 | match = re.search(r'Name:\s+(4\.\d+\.\d+)', release_info) 72 | if match: 73 | logger.info(f'Using OpenShift version {match.group(1)}') 74 | real_version = match.group(1) 75 | else: 76 | raise ValueError('Unable to find OpenShift version number in release info') 77 | 78 | if int(real_version.split('.')[1]) < 10: 79 | raise ValueError('OpenShift versions before 4.10 are not supported by this tool') 80 | 81 | return real_version 82 | 83 | def make_output_dirs(self) -> None: 84 | output_dirs = [ 85 | self.images_dir, 86 | self.metadata_dir, 87 | self.binaries_dir, 88 | self.clients_dir, 89 | self.clients_version_dir, 90 | self.docker_config_dir, 91 | ] 92 | for directory in output_dirs: 93 | directory.mkdir(parents=True, exist_ok=True) 94 | 95 | def download_with_progress_bar(self, url: str, output_path: Path) -> Path: 96 | try: 97 | r = requests.get(url, stream=True) 98 | r.raise_for_status() 99 | except requests.HTTPError: 100 | logger.exception(f'Unable to download {url}') 101 | 102 | progress_bar = tqdm(total=int(r.headers.get('content-length', 0)), unit='iB', unit_scale=True) 103 | with output_path.open('wb') as f: 104 | for data in r.iter_content(1024): 105 | progress_bar.update(len(data)) 106 | f.write(data) 107 | progress_bar.close() 108 | 109 | return output_path 110 | 111 | def download_cli_artifacts(self, name: str, filename: str, output_dir: Path = None, url: str = None) -> Path: 112 | if output_dir is None: 113 | output_dir = self.clients_version_dir 114 | 115 | output_path = output_dir.joinpath(filename) 116 | 117 | if not output_path.is_file(): 118 | logger.info(f'Downloading the {name}') 119 | 120 | if not url: 121 | url = urljoin(MIRROR_URL, f'ocp/{self.real_openshift_version}/{filename}') 122 | output_path = self.download_with_progress_bar(url, output_path) 123 | 124 | logger.info(f'{name} download complete') 125 | else: 126 | logger.info(f'{name} has already been downloaded, skipping') 127 | 128 | return output_path 129 | 130 | def extract_binaries(self, tarfile_path: Path, binaries: list) -> None: 131 | if binaries: 132 | logger.info(f'Extracting <{", ".join(binaries)}> from {tarfile_path}') 133 | else: 134 | logger.info(f'Extracting {tarfile_path}') 135 | 136 | def extract(tar): 137 | for name in tar.getnames(): 138 | output_path = self.binaries_dir.joinpath(name) 139 | if binaries: 140 | if name in binaries: 141 | if output_path.is_file(): 142 | logger.info(f'File already extracted {output_path}, skipping') 143 | else: 144 | tar.extract(name, path=self.binaries_dir) 145 | # Ensure the extracted binary is executable 146 | output_path.chmod(0o755) 147 | else: 148 | if output_path.is_file(): 149 | logger.info(f'File already extracted {output_path}, skipping') 150 | else: 151 | tar.extract(name, path=self.binaries_dir) 152 | 153 | with tarfile.open(str(tarfile_path)) as tar: 154 | extract(tar) 155 | logger.info(f'Extracting complete for {tarfile_path}') 156 | 157 | def download_installer(self) -> None: 158 | self.download_cli_artifacts('OpenShift installer', 'openshift-install-linux.tar.gz') 159 | 160 | def download_clients(self) -> None: 161 | output_path = self.download_cli_artifacts('OpenShift clients', 'openshift-client-linux.tar.gz') 162 | self.extract_binaries(output_path, ['oc', 'kubectl']) 163 | 164 | def download_oc_mirror(self) -> None: 165 | output_path = self.download_cli_artifacts('OpenShift mirror plugin', 'oc-mirror.tar.gz') 166 | self.extract_binaries(output_path, ['oc-mirror']) 167 | 168 | def download_mirror_registry(self) -> None: 169 | self.download_cli_artifacts('OpenShift mirror registry', 'mirror-registry.tar.gz', output_dir=self.clients_dir, 170 | url=urljoin(MIRROR_URL, 'mirror-registry/latest/mirror-registry.tar.gz')) 171 | 172 | def download_jq(self) -> None: 173 | self.download_cli_artifacts('jq', 'jq', output_dir=self.clients_dir, 174 | url='https://github.com/jqlang/jq/releases/latest/download/jq-linux-amd64') 175 | 176 | def download_yq(self) -> None: 177 | self.download_cli_artifacts('yq', 'yq', output_dir=self.clients_dir, 178 | url='https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64') 179 | 180 | def mirror_images(self, attempt_count: int = 1) -> None: 181 | # TODO: see if symlinking to the /mnt/data path works 182 | # TODO: check if file exists before writing as to not clobber an existing one 183 | docker_config = self.docker_config_dir.joinpath('config.json') 184 | logger.info(f'Writing pull secret to {docker_config}') 185 | docker_config.write_text(self.pull_secret) 186 | 187 | imageset_config = ImagesetConfig(self.real_openshift_version, self.output_dir, self.metadata_dir) 188 | imageset_config.create_imageset_config() 189 | 190 | cmd_env = { 191 | 'PATH': f'{self.binaries_dir}:$PATH', 192 | 'HOME': str(Path.home()), 193 | } 194 | 195 | logger.info(f'Mirroring images using config {imageset_config.config_path} (grab a coffee, this will take a while)') 196 | mirror = subprocess.run( 197 | [ 198 | 'oc', 199 | 'mirror', 200 | '--config', imageset_config.config_path.absolute(), 201 | f'file://{self.images_dir.absolute()}', 202 | ], 203 | env=cmd_env, 204 | cwd=self.metadata_dir, 205 | ) 206 | 207 | # I'm not sure why, but the mirror command fails sometimes. Should we retry a few times before giving up? 208 | # https://github.com/openshift/oc-mirror/issues/175 209 | try: 210 | mirror.check_returncode() 211 | except subprocess.CalledProcessError: 212 | if attempt_count >= 3: 213 | raise 214 | 215 | logger.error('This failture seems to happen occasionally, retry again') 216 | self.mirror_images(attempt_count+1) 217 | 218 | def cleanup(self) -> None: 219 | shutil.rmtree(self.images_dir.joinpath('oc-mirror-workspace')) 220 | 221 | def bundle(self) -> None: 222 | self.download_installer() 223 | self.download_clients() 224 | self.download_oc_mirror() 225 | self.download_mirror_registry() 226 | self.download_jq() 227 | self.download_yq() 228 | self.mirror_images() 229 | self.cleanup() 230 | 231 | # TODO: Bundle incremental data since last run instead of all data 232 | bundle_path = self.output_dir.joinpath('ocp4_bundle.tar') 233 | logger.info(f'Bundling all content into tar file at {bundle_path}') 234 | with tarfile.open(str(bundle_path), 'w') as tar: 235 | logger.info('Adding clients to tar file') 236 | tar.add(self.clients_version_dir, arcname=self.clients_dir.stem) 237 | for child in self.clients_dir.iterdir(): 238 | if child.is_file(): 239 | tar.add(child, arcname=f'{self.clients_dir.stem}/{child.name}') 240 | 241 | logger.info('Adding images to tar file') 242 | tar.add(self.images_dir, arcname=self.images_dir.stem) 243 | 244 | logger.info('Adding unpack script to tar file') 245 | tar.add(BASE_DIR.joinpath('unpack.sh'), arcname='unpack.sh') 246 | 247 | logger.info('Completed bundle') 248 | 249 | 250 | def get_pull_secret(ctx, param, value): 251 | output_dir = ctx.params.get('output_dir') 252 | if output_dir is None: 253 | raise click.ClickException('When specifying --pull-secret you must also provide --output-dir') 254 | 255 | output_path = Path(ctx.params['output_dir']).joinpath('pull-secret.json') 256 | 257 | if output_path.is_file() and value is None: 258 | return output_path.read_text() 259 | 260 | if value is None: 261 | pull_secret = click.prompt('Pull Secret (input hidden)', hide_input=True) 262 | return get_pull_secret(ctx, param, pull_secret) 263 | else: 264 | try: 265 | json.loads(value) 266 | except json.JSONDecodeError: 267 | raise click.BadParameter('The pull secret specified is not valid JSON') 268 | output_path.write_text(value) 269 | return value 270 | 271 | @click.command(context_settings=CONTEXT_SETTINGS) 272 | @click.option('--openshift-version', prompt='OpenShift Version', required=True, default='latest', 273 | help='The version of OpenShift (e.g. 4.12, 4.12.23, latest) you would like to create an air-gapped package for') 274 | @click.option('--output-dir', prompt='Output Directory', required=True, 275 | help='The directory to output the content needed for an air-gapped install') 276 | @click.option('--pull-secret', required=False, callback=get_pull_secret, 277 | help='The pull secret used to pull images from Red Hat') 278 | def main(openshift_version, pull_secret, output_dir): 279 | """Bundle all of the artifacts needed for an OpenShift 4 install in a 280 | disconnected / air-gapped environment. 281 | 282 | When prompted for your Pull Secret, it can be found at: 283 | https://console.redhat.com/openshift/install/pull-secret 284 | """ 285 | b = Bundle(openshift_version, output_dir, pull_secret) 286 | b.bundle() 287 | 288 | if __name__ == '__main__': 289 | main() 290 | -------------------------------------------------------------------------------- /ocp4_disconnected/imageset-config-template.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mirror.openshift.io/v1alpha2 3 | kind: ImageSetConfiguration 4 | storageConfig: 5 | local: 6 | path: ./ 7 | mirror: 8 | platform: 9 | channels: [] 10 | graph: true 11 | operators: [] 12 | additionalImages: [] 13 | -------------------------------------------------------------------------------- /ocp4_disconnected/imageset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from pathlib import Path 4 | 5 | import colorlog 6 | import yaml 7 | 8 | from . import BASE_DIR 9 | 10 | 11 | colorlog.basicConfig( 12 | format='%(log_color)s%(levelname)s%(reset)s:%(asctime)s:%(name)s:%(message)s', 13 | datefmt='%Y-%m-%d %H:%M:%S', 14 | level=colorlog.WARNING, 15 | ) 16 | logger = colorlog.getLogger('imageset') 17 | logger.setLevel(colorlog.DEBUG) 18 | 19 | 20 | # https://github.com/openshift/oc-mirror/blob/main/docs/imageset-config-ref.yaml 21 | class ImagesetConfig(): 22 | def __init__(self, openshift_version_xyz: str, config_dir: Path, storage_dir: Path) -> None: 23 | self.openshift_version_xyz = openshift_version_xyz 24 | self.config_dir = config_dir 25 | self.storage_dir = storage_dir 26 | 27 | self.config_path = self.config_dir.joinpath(f'imageset-config.yaml') 28 | 29 | self.openshift_version_xy = '.'.join(self.openshift_version_xyz.split('.')[0:2]) 30 | self.openshift_release_channel = f'fast-{self.openshift_version_xy}' 31 | self.imageset_config = self._imageset_config() 32 | 33 | def _imageset_config(self) -> dict: 34 | if self.config_path.is_file(): 35 | with self.config_path.open('r') as f: 36 | return yaml.safe_load(f) 37 | 38 | with BASE_DIR.joinpath('imageset-config-template.yaml').open('r') as f: 39 | return yaml.safe_load(f) 40 | 41 | def set_storage_config(self) -> None: 42 | self.imageset_config['storageConfig']['local']['path'] = str(self.storage_dir.absolute()) 43 | 44 | def append_openshift_release(self) -> None: 45 | channels = self.imageset_config['mirror']['platform']['channels'] 46 | for channel in channels: 47 | if channel['name'] == self.openshift_release_channel: 48 | logger.info(f'Found release channel in imageset config, updating version range') 49 | 50 | min_version_z_stream = int(channel['minVersion'].split('.')[2]) 51 | max_version_z_stream = int(channel['maxVersion'].split('.')[2]) 52 | z_stream = int(self.openshift_version_xyz.split('.')[2]) 53 | 54 | if z_stream < min_version_z_stream: 55 | logger.info(f'OpenShift version {self.openshift_version_xyz} is less than current minimum version in imageset, updating') 56 | channel['minVersion'] = self.openshift_version_xyz 57 | elif z_stream > max_version_z_stream: 58 | logger.info(f'OpenShift version {self.openshift_version_xyz} is greater than current maximum version in imageset, updating') 59 | channel['maxVersion'] = self.openshift_version_xyz 60 | return 61 | 62 | self.imageset_config['mirror']['platform']['channels'].append({ 63 | 'name': self.openshift_release_channel, 64 | 'type': 'ocp', 65 | 'minVersion': self.openshift_version_xyz, 66 | 'maxVersion': self.openshift_version_xyz, 67 | 'shortestPath': True, 68 | }) 69 | 70 | def append_operator_catalog(self) -> None: 71 | pass 72 | 73 | def append_additional_images(self) -> None: 74 | pass 75 | 76 | def create_imageset_config(self) -> None: 77 | self.set_storage_config() 78 | self.append_openshift_release() 79 | self.append_operator_catalog() 80 | self.append_additional_images() 81 | 82 | logger.info(f'Writing imageset config to {self.config_path}') 83 | with self.config_path.open('w') as f: 84 | yaml.dump(self.imageset_config, f) 85 | -------------------------------------------------------------------------------- /ocp4_disconnected/unpack.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 6 | 7 | CLIENTS_DIR=${SCRIPT_DIR}/clients 8 | IMAGES_DIR=${SCRIPT_DIR}/images 9 | BIN_DIR=${SCRIPT_DIR}/bin 10 | REGISTRY_DIR=${SCRIPT_DIR}/registry 11 | METADATA_DIR=${SCRIPT_DIR}/metadata 12 | 13 | REGISTRY_PASSWORD=$(tr -dc A-Za-z0-9 ${REGISTRY_DIR}/registry_password 38 | 39 | cd ${REGISTRY_DIR} 40 | ${BIN_DIR}/mirror-registry install \ 41 | --quayHostname $(hostname --fqdn) \ 42 | --quayRoot ${REGISTRY_DIR}/quay-install \ 43 | --quayStorage ${REGISTRY_DIR}/quay-storage \ 44 | --pgStorage ${REGISTRY_DIR}/pg-data \ 45 | --initUser openshift \ 46 | --initPassword ${REGISTRY_PASSWORD} 47 | fi 48 | 49 | if [[ ! -f /etc/pki/ca-trust/source/anchors/quay_mirror_registry_ca.pem ]]; then 50 | sudo cp ${REGISTRY_DIR}/quay-install/quay-rootCA/rootCA.pem /etc/pki/ca-trust/source/anchors/quay_mirror_registry_ca.pem 51 | sudo update-ca-trust extract 52 | fi 53 | 54 | ############################################################################### 55 | # Clients 56 | ############################################################################### 57 | 58 | cd ${SCRIPT_DIR} 59 | if [[ ! -f /usr/local/bin/oc ]]; then 60 | sudo tar --extract --verbose --directory /usr/local/bin --file ${CLIENTS_DIR}/openshift-client-linux.tar.gz oc kubectl 61 | sudo chmod +x /usr/local/bin/oc /usr/local/bin/kubectl 62 | fi 63 | 64 | if [[ ! -f /usr/local/bin/oc-mirror ]]; then 65 | sudo tar --extract --verbose --directory /usr/local/bin --file ${CLIENTS_DIR}/oc-mirror.tar.gz oc-mirror 66 | sudo chmod +x /usr/local/bin/oc-mirror 67 | fi 68 | 69 | if [[ ! -f /usr/local/bin/openshift-install ]]; then 70 | sudo tar --extract --verbose --directory /usr/local/bin --file ${CLIENTS_DIR}/openshift-install-linux.tar.gz openshift-install 71 | sudo chmod +x /usr/local/bin/openshift-install 72 | fi 73 | 74 | if [[ ! -f /usr/local/bin/jq ]]; then 75 | sudo cp ${CLIENTS_DIR}/jq /usr/local/bin/jq 76 | sudo chmod +x /usr/local/bin/jq 77 | fi 78 | 79 | if [[ ! -f /usr/local/bin/yq ]]; then 80 | sudo cp ${CLIENTS_DIR}/yq /usr/local/bin/yq 81 | sudo chmod +x /usr/local/bin/yq 82 | fi 83 | 84 | ############################################################################### 85 | # Populate Mirror Registry 86 | ############################################################################### 87 | 88 | mkdir -p ${METADATA_DIR} 89 | podman login --username openshift --password $(cat ${REGISTRY_DIR}/registry_password) $(hostname --fqdn):8443 90 | 91 | cd ${METADATA_DIR} 92 | 93 | set +e 94 | oc mirror --from=${LATEST_IMAGES_FILE} docker://$(hostname --fqdn):8443 2>/tmp/oc-mirror-error.log 95 | 96 | if [[ $? != 0 ]]; then 97 | if ! grep --quiet 'expecting imageset with prefix mirror_seq' /tmp/oc-mirror-error.log; then 98 | rm -f /tmp/oc-mirror-error.log 99 | exit 1 100 | fi 101 | fi 102 | 103 | set -e 104 | cat /tmp/oc-mirror-error.log 105 | rm -f /tmp/oc-mirror-error.log 106 | 107 | for results_dir in $(find ${METADATA_DIR}/oc-mirror-workspace -type d -name 'results-*' | sort -Vr); do 108 | if [[ -f ${results_dir}/imageContentSourcePolicy.yaml ]]; then 109 | LATEST_ICSP_FILE=${results_dir}/imageContentSourcePolicy.yaml 110 | break 111 | fi 112 | done 113 | 114 | ############################################################################### 115 | # Install Config 116 | ############################################################################### 117 | 118 | yq eval --null-input '{"additionalTrustBundle": "'"$( ${REGISTRY_DIR}/install_config_registry.yaml 119 | cat ${LATEST_ICSP_FILE} | yq eval --no-doc '.spec.repositoryDigestMirrors' | yq eval '{"imageContentSources": . }' >> ${REGISTRY_DIR}/install_config_registry.yaml 120 | 121 | echo 122 | echo 123 | cat << EOF 124 | The binaries have been unpacked to /usr/local/bin and the container images have 125 | been uploaded to the mirror registry. You are ready to create the 126 | install-config.yaml for the target environment. 127 | 128 | The mirror registry login information is: 129 | Username: openshift 130 | Password: `cat ${REGISTRY_DIR}/registry_password` 131 | 132 | If you are following along with the walkthrough provided with this tool, check 133 | next steps for how to generate the install-config.yaml for the example environment 134 | that's provided. If you are targeting a different environment, the walkthrough 135 | should also provide some links to the OpenShift documentation for how to 136 | generate the install-config.yaml for the target environment. 137 | 138 | Once you have your install-config.yaml created for your target environment, 139 | there's one update to the install-config.yaml we need to make that will tell 140 | the OpenShift installer to use our mirrored content instead of defaulting to 141 | reaching out to the internet for content. 142 | 143 | Copy and paste the following blocks of YAML to the end of your install-config.yaml. 144 | This output has also been saved to ${REGISTRY_DIR}/install_config_registry.yaml 145 | EOF 146 | echo 147 | echo 148 | 149 | cat ${REGISTRY_DIR}/install_config_registry.yaml 150 | 151 | echo 152 | echo 153 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [pycodestyle] 2 | max_line_length = 99 3 | ignore = E501 4 | --------------------------------------------------------------------------------