├── .gitignore ├── Makefile ├── README.md ├── bin ├── kubectl ├── oc └── openshift-install ├── config.env └── src ├── actions ├── cleantools.sh ├── create_cluster.sh ├── customize_cluster.sh ├── destroy_cluster.sh ├── download_tools.sh ├── list_clusters.sh ├── login_cluster.sh ├── main.sh ├── usage.sh └── use_cluster.sh ├── config ├── 00_directories.sh ├── 10_defaults.sh ├── 20_credentials.sh ├── 40_scripts.sh └── templates │ ├── .merge-templates.sh │ ├── aws │ └── default.yaml │ ├── azure │ └── default.yaml │ ├── gcp │ └── default.yaml │ ├── openstack │ └── default.yaml │ ├── ovirt │ └── default.yaml │ └── vsphere │ ├── 4.16.yaml │ └── default.yaml ├── control ├── 00_init.sh └── 99_execute.sh ├── helpers ├── 00_output.sh ├── 10_oc.sh ├── 20_parse_args.sh ├── 30_create_install_config.sh ├── 40_cloud_credentials.sh ├── 50_get_cluster_version.sh ├── 60_get_cluster_platform.sh └── 70_validate_options.sh └── scripts ├── .merge-scripts.sh ├── add-htpasswd-idp ├── configure-registry ├── delete-kubeadmin-user ├── deploy-cluster-logging ├── deploy-pipelines ├── deploy-rhsso └── deploy-service-mesh /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | openshift-install-wrapper 3 | src/scripts/functions 4 | src/scripts/names 5 | src/config/30_templates.sh -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ############### 2 | # BOILERPLATE # 3 | ############### 4 | # Import config. 5 | # You can change the default config with `make CONFIG="config_special.env" build` 6 | CONFIG ?= config.env 7 | include $(CONFIG) 8 | export $(shell sed 's/=.*//' $(CONFIG)) 9 | 10 | .PHONY: help 11 | .DEFAULT_GOAL := help 12 | 13 | ######## 14 | # HELP # 15 | ######## 16 | help: ## Shows this message. 17 | @echo -e "Makefile helper for ${NAME} ${VERSION}.\n\nCommands reference:" 18 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) 19 | @echo 20 | version: ## Output the current version. 21 | @echo $(VERSION) 22 | 23 | ########### 24 | # INSTALL # 25 | ########### 26 | .ONESHELL: 27 | merge-scripts: create-main-wrapper ## Merge the scripts in the main script 28 | @echo Merging scripts into openshift-install-wrapper. 29 | cd src/scripts && ./.merge-scripts.sh 30 | 31 | merge-templates: ## Merge the install-config templates in the templates file 32 | @echo Merging install-config templates into variables file. 33 | cd src/config/templates && ./.merge-templates.sh 34 | 35 | create-main-wrapper: ## Creates the wrapper with all the src/ content 36 | @echo Combining source files into openshift-install-wrapper. 37 | @cat src/control/00_init.sh src/config/*.sh src/helpers/*.sh src/actions/*.sh src/control/99_execute.sh > openshift-install-wrapper 38 | @chmod +x openshift-install-wrapper 39 | 40 | create-binary-wrappers: ## Install helper wrappers for oc, kubectl, openshift-install 41 | @echo Preparing binary wrappers. 42 | @sed -i 's/^VERSION=.*/VERSION=$(VERSION)/' openshift-install-wrapper 43 | @sed -i "s|^__basedir=.*|__basedir=$(TARGETDIR)|" openshift-install-wrapper 44 | @sed -i "s|^WRAPPER_BASEDIR=.*|WRAPPER_BASEDIR=$(TARGETDIR)/bin|" bin/openshift-install 45 | @sed -i "s|^WRAPPER_BASEDIR=.*|WRAPPER_BASEDIR=$(TARGETDIR)/bin|" bin/oc 46 | @sed -i "s|^WRAPPER_BASEDIR=.*|WRAPPER_BASEDIR=$(TARGETDIR)/bin|" bin/kubectl 47 | 48 | create-dirs: ## Create directories 49 | @echo Creating target directory $(TARGETDIR). 50 | @mkdir -p $(TARGETDIR)/{bin,clusters,config} 51 | 52 | copy-binary-wrappers: ## Copy wrappers to binaries directory 53 | @echo Copying wrappers. 54 | @mv -f openshift-install-wrapper $(TARGETDIR)/bin 55 | @cp -f bin/* $(TARGETDIR)/bin 56 | @chmod 755 $(TARGETDIR)/bin/openshift-install-wrapper 57 | @echo "Wrappers installed in $(TARGETDIR)/bin. Please remember to add this location to your PATH to use it." 58 | 59 | install: create-dirs merge-templates create-main-wrapper create-binary-wrappers merge-scripts copy-binary-wrappers ## Installs the script 60 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenShift Installer Wrapper 2 | ## Index 3 | 1. [Description](#description) 4 | 2. [Preparation](#preparation) 5 | 3. [Usage](#usage) 6 | 4. [Client wrappers](#client-wrappers) 7 | 5. [Adding customizations](#adding-customization-scripts) 8 | 6. [TODO](#todo) 9 | 7. [Known Issues](#known-issues) 10 | 11 | ## Description 12 | This is a wrapper for the official `openshift-install` binary to perform IPI (Installer Provided Infrastructure) installations of OpenShift 4. 13 | 14 | Features: 15 | - downloads the installer and client for the desired version 16 | - creates sample cloud credential files in the right location for each cloud provider 17 | - creates install-config.yaml for each cloud provider 18 | - allows customize a previously installed cluster with some pre-made scripts 19 | - includes client wrappers to use any of them based on environment variables 20 | 21 | ## Preparation 22 | The wrapper leverages in `openshift-install` all the tasks but it requires some pre-requisites: 23 | - proper credentials for every cloud provider 24 | - RSA key 25 | - pull secret 26 | 27 | ### Script installation 28 | This will copy the script to the default location `$HOME/.local/ocp4`: 29 | ``` 30 | $ make install 31 | ``` 32 | 33 | For other locations set `TARGETDIR` at your convenience: 34 | ``` 35 | $ make TARGETDIR=/opt/ocp4 install 36 | ``` 37 | 38 | Add the directory to your `$PATH` variable so you can invoke it directly: 39 | ``` 40 | $ export PATH=$PATH:$HOME/.local/ocp4/bin 41 | ``` 42 | 43 | ### Cloud credentials 44 | If you don't have such credentials yet (ie. if you don't use aws-cli at all), you can invoke the wrapper for every platform so it will create the sample file and then proceed to edit it with your own credentials: 45 | ``` 46 | $ openshift-install-wrapper --init --platform aws 47 | → Validating environment... 48 | → Creating target directory... 49 | → Creating sample cloud credentials file for aws... 50 | ✔ Created sample file /home/sgarcia/.aws/credentials. Please edit it to add the proper credentials for each provider before trying to install any cluster or it will fail. 51 | ``` 52 | 53 | ### Other files 54 | - Copy the RSA public key that will be injected in the instances to `$HOME/.config/openshift-install-wrapper/config/ssh-key.pub` 55 | - Copy the pull secret to `$HOME/.config/openshift-install-wrapper/config/pull-secret.json` 56 | 57 | ## Usage 58 | The list of features and options is increasing as changes are made. Check the `--help` parameter for the newest list. 59 | 60 | ``` 61 | $ openshift-install-wrapper --help 62 | OpenShift installation wrapper for IPI installations. Version: 1.0.0 63 | 64 | Usage: openshift-install-wrapper [--init|--install|--destroy|--customize] [options] 65 | 66 | Options: 67 | --name - name of the cluster 68 | --domain - name of the domain for the cluster 69 | --version - version to install 70 | --platform - cloud provider (only aws supported for now) 71 | --region - cloud provider region 72 | 73 | --force - force installation (cleanup files if required) 74 | --init - initialize the tool and credentials 75 | --install - install the cluster 76 | --destroy - destroy the cluster 77 | --customize - customize the cluster with some post-install actions 78 | --use - sets KUBECONFIG and/or env vars to use a given cluster 79 | --login - uses the default kubeadmin password to login in a given cluster 80 | --list - lists all existing clusters 81 | 82 | --verbose - shows more information during the execution 83 | --quiet - quiet mode (no output at all) 84 | --help|-h - shows this message 85 | ``` 86 | 87 | ### Install a cluster 88 | ``` 89 | $ openshift-install-wrapper --install \ 90 | --name sgarcia-ocp447 \ 91 | --domain aws.gmbros.net \ 92 | --version 4.4.7 \ 93 | --platform aws \ 94 | --region eu-west-1 95 | → Validating environment... 96 | → Checking if installer for 4.4.7 is already present... 97 | ✔ Installer for 4.4.7 found. Continuing. 98 | → Checking if the cluster directory already exists... 99 | → Creating install-config.yaml file... 100 | ✔ Using specific install-config for aws-4.4.7... 101 | → Running "openshift-install" to create a cluster... 102 | ✔ Cluster created! 103 | To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/sgarcia/.local/ocp4/clusters/sgarcia-ocp447-3.aws.gmbros.net/auth/kubeconfig' 104 | Access the OpenShift web-console here: https://console-openshift-console.apps.sgarcia-ocp447.aws.gmbros.net 105 | Login to the console with user: kubeadmin, password: m2MsJ-vKNcn-8zTHM-A9NVU 106 | ``` 107 | 108 | ### Destroy a cluster 109 | ``` 110 | $ openshift-install-wrapper --destroy \ 111 | --name sgarcia-ocp447 \ 112 | --domain aws.gmbros.net 113 | → Validating environment... 114 | → Finding version in cluster directory... 115 | ✔ Version detected: 4.4.7. 116 | → Finding platform in cluster directory... 117 | ✔ Platform detected: aws. 118 | → Checking if installer for 4.4.7 is already present... 119 | ✔ Installer for 4.4.7 found. Continuing. 120 | → Running "openshift-install" to destroy a cluster... 121 | ✔ Cluster destroyed! 122 | → Removing directory... 123 | ``` 124 | 125 | ### Customize a cluster 126 | ``` 127 | $ openshift-install-wrapper --customize delete-kubeadmin-user \ 128 | --name sgarcia-ocp447 \ 129 | --domain aws.gmbros.net 130 | → Validating environment... 131 | → Finding version in cluster directory... 132 | ✔ Version detected: 4.4.7. 133 | → Finding platform in cluster directory... 134 | ✔ Platform detected: aws. 135 | → Checking if client binaries for 4.4.7 are already present... 136 | ✔ Client binaries for 4.4.7 are found. Continuing. 137 | → Running delete-kubeadmin-user... 138 | secret "kubeadmin" deleted 139 | ``` 140 | 141 | ### Use/login into a cluster with default KUBECONFIG/kubeadmin 142 | For login, use the script as usual: 143 | ``` 144 | $ openshift-install-wrapper --login \ 145 | --name sgarcia-ocp447 \ 146 | --domain aws.gmbros.net 147 | ```` 148 | 149 | For direct KUBECONFIG access, use the script with the $() syntax: 150 | ``` 151 | $ $( openshift-install-wrapper --use \ 152 | --name sgarcia-ocp447 \ 153 | --domain aws.gmbros.net ) 154 | ``` 155 | 156 | ### Troubleshooting 157 | - Use `--verbose` to get extra information during the execution, including the full output of `openshift-install` 158 | - Review `$HOME/.local/ocp4/clusters//.openshift_install_wrapper.log` for useful output 159 | 160 | ## Client wrappers 161 | Some helper wrappers will be installed in the target directory which will help to use the different versions of the tools (`openshift-install`, `oc` and `kubelet`) by using environment variables. 162 | 163 | With this client wrappers you can move from one client version to another easily by setting the `$OCP4_VERSION` variable so the right binary will be used transparently without messing with PATH and/or alias for your commands. 164 | 165 | All three wrappers will include some goodies: 166 | - will read the variable `$OCP4_VERSION` to change the final binary. 167 | - will read the variables `$OC_VERSION`, `$KUBELET_VERSION`, and `$OPENSHIFT_INSTALL_VERSION` respectively to change the final version, regardless `$OCP4_VERSION`, for individual testing. 168 | - add a `--wrapper-info` parameter to show the available versions for each one. 169 | 170 | Example: 171 | ```sh 172 | $ oc version 173 | Client Version: 4.6.9 174 | 175 | $ oc --wrapper-info 176 | Wrapper info: 177 | - WRAPPER_NAME: oc 178 | - WRAPPER_VERSION: default 179 | - WRAPPER_BASEDIR: /home/sgarcia/.local/ocp4/bin 180 | 181 | Available versions (for $OC_VERSION and/or $OCP4_VERSION): 182 | - 4.4.30 183 | - 4.5.16 184 | - 4.6.12 185 | - 4.6.9 186 | - default (4.6.9) 187 | 188 | $ export OCP4_VERSION=4.6.12 189 | 190 | $ oc version 191 | Client Version: 4.6.12 192 | 193 | $ kubectl version 194 | Client Version: 4.6.12 195 | 196 | $ openshift-install version 197 | /home/sgarcia/.local/ocp4/bin/openshift-install-4.6.12 4.6.12 198 | built from commit eded5eb5b6c77e2af2a2c537093da8bf3711f494 199 | release image quay.io/openshift-release-dev/ocp-release@sha256:5c3618ab914eb66267b7c552a9b51c3018c3a8f8acf08ce1ff7ae4bfdd3a82bd 200 | ``` 201 | 202 | ## Adding customization scripts 203 | In order to add new scripts, they must meet some requisites: 204 | - they must be created in the `scripts/` directory 205 | - they must have a descriptive name and do not overlap with any existing command or function in the system 206 | - optionally they can have a name and a description (single line) 207 | - they must contain some markers and a `main()` function. Use any existing script as a baseline or use this one: 208 | ```sh 209 | #!/bin/bash 210 | 211 | # Optional fields 212 | # script name: delete-kubeadmin-user 213 | # script description: Removes the kubeadmin secret 214 | 215 | # Mandatory function 216 | # start main - do not remove this line and do not change the function name 217 | main() { 218 | oc delete secret kubeadmin -n kube-system \ 219 | && success "Secret kubeadmin successfully deleted." \ 220 | || err "Error deleting kubeadmin secret. It probably doesn't exists anymore. Skipping." 221 | } 222 | # end main - do not remove this line 223 | 224 | # Optionally, keep this if you want to run your script manually or for testing. 225 | main $@ 226 | ``` 227 | 228 | On the other hand, in order to provide flexibility, every script will receive the next parameters: 229 | | Parameter | Description | Example | 230 | |:----------:|:-------------|:---------| 231 | | `$1` | name of the customization and arguments in the command line | `deploy-rhsso=namespace=rhsso:version=4.2.2` | 232 | | `$2` | full path to the cluster installation directory | `/opt/ocp4/clusters/sgarcia-ocp447/` | 233 | | `$3` | full path to the right `oc` client binary | `/opt/ocp4/bin/oc-4.4.7` | 234 | | `$4` | verbose mode flag (`0` or `1`) | `0` | 235 | | `$5` | quiet mode flag (`0` or `1`) | `0` | 236 | | `$6` | cluster version | `4.4.7` | 237 | | `$7` | cluster name | `sgarcia-ocp447` | 238 | | `$8` | cluster subdomain | `aws.gmbros.net` | 239 | | `$9` | cloud platform | `aws` | 240 | 241 | In the same way, there are a few functions ready to be used from the customizations: 242 | - `oc()`, which runs a command in the cluster with `system:admin` permissions and aligns the output with the `--verbose` flag 243 | - `success()`, which allows to print a message after succeeding in a command with a check icon 244 | - `err()`, which allows to print a message after failing in a command with a cross icon 245 | - `die()`, which allows to print a message after failing in a command with a cross icon and stops the execution 246 | - `parse_args_as_variables()`, which will parse an string and declare variables based on the content of it 247 | 248 | Optionally, your customization can receive extra arguments sent in the commandline. This is useful if you customization is flexible 249 | in what it does or it allows different actions (like installing different versions of an operator). As an example: 250 | ```sh 251 | $ openshift-install-wrapper --customize add-htpasswd-idp 252 | --name sgarciam-ocp447 \ 253 | --domain aws.gmbros.net 254 | → Validating environment... 255 | → Finding version in cluster directory... 256 | ✔ Version detected: 4.4.7. 257 | → Finding platform in cluster directory... 258 | ✔ Platform detected: aws. 259 | → Checking if client binaries for 4.4.7 are already present... 260 | ✔ Client binaries for 4.4.7 are found. Continuing. 261 | → Running add-htpasswd-idp... 262 | ✔ Adding default users: admin, user and guest. 263 | ✔ Secret htpasswd-secret created/configured successfully. 264 | ✔ OAuth/cluster successfully configured. 265 | 266 | $ openshift-install-wrapper --customize add-htpasswd-idp=admin=adminpwd:user1=user1pwd 267 | --name sgarciam-ocp447 \ 268 | --domain aws.gmbros.net 269 | → Validating environment... 270 | → Finding version in cluster directory... 271 | ✔ Version detected: 4.4.7. 272 | → Finding platform in cluster directory... 273 | ✔ Platform detected: aws. 274 | → Checking if client binaries for 4.4.7 are already present... 275 | ✔ Client binaries for 4.4.7 are found. Continuing. 276 | → Running add-htpasswd-idp... 277 | ✔ Using custom users. 278 | ✔ Adding user admin with password adminpwd. 279 | ✔ Adding user user1 with password user1pwd. 280 | ✔ Secret htpasswd-secret created/configured successfully. 281 | ✔ OAuth/cluster successfully configured. 282 | ``` 283 | 284 | Finally, after adding your new script in the directory, remember to run `make install` in order to install a new version of `openshift-install-wrapper` with your script embedded on it. 285 | 286 | ## TODO 287 | - ~~Read cluster version and cluster platform from install directory for `--customize`, and `--destroy` operations~~ 288 | - ~~Add GCP support~~ 289 | - Improve `--list` output 290 | - Improve console output (ie. include timestamps) 291 | - Implement `--expire` parameter to delete (using cron) a cluster after a certain time 292 | 293 | ## Known issues 294 | - Error handling when the cloud credentials are invalid or the installation fails 295 | - Lines with # in customization scripts are removed during the merge. This must be fixed. 296 | 297 | ## Contact 298 | Reach me in [Twitter](http://twitter.com/soukron) or email in soukron _at_ gmbros.net 299 | 300 | ## License 301 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use 302 | this file except in compliance with the License. You may obtain a copy of the 303 | License at 304 | 305 | http://www.apache.org/licenses/LICENSE-2.0 306 | 307 | Unless required by applicable law or agreed to in writing, software distributed 308 | under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 309 | CONDITIONS OF ANY KIND, either express or implied. See the License for the 310 | specific language governing permissions and limitations under the License. 311 | 312 | [here]:http://gnu.org/licenses/gpl.html 313 | -------------------------------------------------------------------------------- /bin/kubectl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WRAPPER_NAME=kubectl 4 | WRAPPER_VERSION_VAR=KUBECTL_VERSION 5 | WRAPPER_BASEDIR=~/.local/ocp4/bin 6 | 7 | # Don't need to edit anything after this line 8 | WRAPPER_VERSION=${!WRAPPER_VERSION_VAR:-${OCP4_VERSION:-"default"}} 9 | WRAPPER_CMD=`basename $0`-${WRAPPER_VERSION} 10 | 11 | # Check for --wrapper-info argument to show help 12 | if [ "$1" == "--wrapper-info" ]; then 13 | echo "Wrapper info: 14 | - WRAPPER_NAME: ${WRAPPER_NAME} 15 | - WRAPPER_VERSION: ${WRAPPER_VERSION} 16 | - WRAPPER_BASEDIR: ${WRAPPER_BASEDIR}" 17 | echo 18 | 19 | echo -e "Available versions (for \$${WRAPPER_VERSION_VAR} and/or \$OCP4_VERSION):" 20 | for version in `ls -1 ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-* | grep -v wrapper | rev | cut -d - -f 1 | rev`; do 21 | echo -n " - ${version}" 22 | if [ "${version}" == "default" ]; then 23 | echo " ($(readlink ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-* | rev | cut -d - -f 1 | rev))" 24 | fi 25 | echo 26 | done 27 | 28 | exit 29 | fi 30 | 31 | # Helper to set the default version 32 | if [ "$1" == "--wrapper-set-default" ]; then 33 | if [ ! -f ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-${2} ]; then 34 | echo "Error: unable to set as default a non-existant version" 35 | else 36 | ln -sf ${WRAPPER_NAME}-${2} ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-default 37 | fi 38 | 39 | exit 40 | fi 41 | 42 | # Check if binary exists and exit if not 43 | if [ ! -f ${WRAPPER_BASEDIR}/${WRAPPER_CMD} ]; then 44 | echo "Error: file ${WRAPPER_CMD} not found in ${WRAPPER_BASEDIR}. Set variable \$${WRAPPER_VERSION_VAR}, \${OCP4_VERSION} or symlink any version as default" 45 | exit 46 | fi 47 | 48 | # Launch the tool 49 | ${WRAPPER_BASEDIR}/${WRAPPER_CMD} "$@" 50 | -------------------------------------------------------------------------------- /bin/oc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WRAPPER_NAME=oc 4 | WRAPPER_VERSION_VAR=OC_VERSION 5 | WRAPPER_BASEDIR=~/.local/ocp4/bin 6 | 7 | # Don't need to edit anything after this line 8 | WRAPPER_VERSION=${!WRAPPER_VERSION_VAR:-${OCP4_VERSION:-"default"}} 9 | WRAPPER_CMD=`basename $0`-${WRAPPER_VERSION} 10 | 11 | # Check for --wrapper-info argument to show help 12 | if [ "$1" == "--wrapper-info" ]; then 13 | echo "Wrapper info: 14 | - WRAPPER_NAME: ${WRAPPER_NAME} 15 | - WRAPPER_VERSION: ${WRAPPER_VERSION} 16 | - WRAPPER_BASEDIR: ${WRAPPER_BASEDIR}" 17 | echo 18 | 19 | echo -e "Available versions (for \$${WRAPPER_VERSION_VAR} and/or \$OCP4_VERSION):" 20 | for version in `ls -1 ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-* | grep -v wrapper | rev | cut -d - -f 1 | rev`; do 21 | echo -n " - ${version}" 22 | if [ "${version}" == "default" ]; then 23 | echo " ($(readlink ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-* | rev | cut -d - -f 1 | rev))" 24 | fi 25 | echo 26 | done 27 | 28 | exit 29 | fi 30 | 31 | # Helper to set the default version 32 | if [ "$1" == "--wrapper-set-default" ]; then 33 | if [ ! -f ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-${2} ]; then 34 | echo "Error: unable to set as default a non-existant version" 35 | else 36 | ln -sf ${WRAPPER_NAME}-${2} ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-default 37 | fi 38 | 39 | exit 40 | fi 41 | 42 | # Check if binary exists and exit if not 43 | if [ ! -f ${WRAPPER_BASEDIR}/${WRAPPER_CMD} ]; then 44 | echo "Error: file ${WRAPPER_CMD} not found in ${WRAPPER_BASEDIR}. Set variable \$${WRAPPER_VERSION_VAR}, \${OCP4_VERSION} or symlink any version as default" 45 | exit 46 | fi 47 | 48 | # Launch the tool 49 | ${WRAPPER_BASEDIR}/${WRAPPER_CMD} "$@" 50 | -------------------------------------------------------------------------------- /bin/openshift-install: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WRAPPER_NAME=openshift-install 4 | WRAPPER_VERSION_VAR=OPENSHIFT_INSTALL_VERSION 5 | WRAPPER_BASEDIR=~/.local/ocp4/bin 6 | 7 | # Don't need to edit anything after this line 8 | WRAPPER_VERSION=${!WRAPPER_VERSION_VAR:-${OCP4_VERSION:-"default"}} 9 | WRAPPER_CMD=`basename $0`-${WRAPPER_VERSION} 10 | 11 | # Check for --wrapper-info argument to show help 12 | if [ "$1" == "--wrapper-info" ]; then 13 | echo "Wrapper info: 14 | - WRAPPER_NAME: ${WRAPPER_NAME} 15 | - WRAPPER_VERSION: ${WRAPPER_VERSION} 16 | - WRAPPER_BASEDIR: ${WRAPPER_BASEDIR}" 17 | echo 18 | 19 | echo -e "Available versions (for \$${WRAPPER_VERSION_VAR} and/or \$OCP4_VERSION):" 20 | for version in `ls -1 ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-* | grep -v wrapper | rev | cut -d - -f 1 | rev`; do 21 | echo -n " - ${version}" 22 | if [ "${version}" == "default" ]; then 23 | echo " ($(readlink ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-* | rev | cut -d - -f 1 | rev))" 24 | fi 25 | echo 26 | done 27 | 28 | exit 29 | fi 30 | 31 | # Helper to set the default version 32 | if [ "$1" == "--wrapper-set-default" ]; then 33 | if [ ! -f ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-${2} ]; then 34 | echo "Error: unable to set as default a non-existant version" 35 | else 36 | ln -sf ${WRAPPER_NAME}-${2} ${WRAPPER_BASEDIR}/${WRAPPER_NAME}-default 37 | fi 38 | 39 | exit 40 | fi 41 | 42 | # Check if binary exists and exit if not 43 | if [ ! -f ${WRAPPER_BASEDIR}/${WRAPPER_CMD} ]; then 44 | echo "Error: file ${WRAPPER_CMD} not found in ${WRAPPER_BASEDIR}. Set variable \$${WRAPPER_VERSION_VAR}, \${OCP4_VERSION} or symlink any version as default" 45 | exit 46 | fi 47 | 48 | # Launch the tool 49 | ${WRAPPER_BASEDIR}/${WRAPPER_CMD} "$@" 50 | -------------------------------------------------------------------------------- /config.env: -------------------------------------------------------------------------------- 1 | NAME=OpenShift Install Wrapper 2 | VERSION=1.5.1 3 | TARGETDIR=~/.local/ocp4 4 | -------------------------------------------------------------------------------- /src/actions/cleantools.sh: -------------------------------------------------------------------------------- 1 | # Clean tools, only enter here if action "--clean-tools" selected 2 | # Removes all previously downloaded installers and CLI clients of not installed versions 3 | cleantools() { 4 | out "→ Showing actual filesystem occupation." 5 | df -h "${__bindir}" 6 | 7 | out "→ Cleaning up all installer binaries..." 8 | rm -f ${__bindir}/openshift-install-[0-9]* 2> /dev/null && success "All installer removed!" 9 | 10 | [ $(ls ~/.cache/openshift-installer/image_cache/*.ova 2> /dev/null |wc -l) -ge 1 ] && \ 11 | out "→ Removing OVA images..." && \ 12 | rm -f ~/.cache/openshift-installer/image_cache/*.ova && success "All OVA images removed!" 13 | 14 | # Generate the list 15 | ls ${__bindir}/{oc,kubectl}-* > /tmp/cli-listxxxxxx.txt 16 | 17 | # Remove versions to maintain from the generated list 18 | for cluster in $(ls ${__clustersdir});do 19 | CLUSTER_VERSION="$(grep -Po '(?<=OpenShift Installer )[v0-9.]*' ${__clustersdir}/${cluster}/.openshift_install.log 2>/dev/null| head -n 1 | tr -d v)" 20 | grep -v ${CLUSTER_VERSION} /tmp/cli-listxxxxxx.txt > /tmp/cli-listxxxxxxAPPO.txt 21 | mv -f /tmp/cli-listxxxxxxAPPO.txt /tmp/cli-listxxxxxx.txt 22 | done 23 | # Remove all not used versions 24 | out "→ Cleaning up all unused CLI clients..." 25 | cat /tmp/cli-listxxxxxx.txt |xargs rm -f > /dev/null 2>&1 && success "CLI clients removed!" 26 | 27 | # Remove the list 28 | rm -f /tmp/cli-listxxxxxx.txt 29 | 30 | out "→ Showing actual filesystem occupation." 31 | df -h "${__bindir}" 32 | } 33 | 34 | -------------------------------------------------------------------------------- /src/actions/create_cluster.sh: -------------------------------------------------------------------------------- 1 | # create cluster 2 | create_cluster() { 3 | local clusterdir=${__clustersdir}/${INSTALLOPTS[name]}.${INSTALLOPTS[domain]} 4 | local installer=${__bindir}/openshift-install-${INSTALLOPTS[version]} 5 | 6 | if [[ ${DRY_RUN} -eq 1 ]]; then 7 | out "→ Dry-run execution detected. Exiting..." 8 | exit 9 | fi 10 | 11 | verbose " Saving a copy of install-config.yaml file." 12 | cp ${clusterdir}/install-config.yaml ${clusterdir}/install-config.yaml.orig 13 | 14 | if [[ ${EDIT_INSTALL_MANIFESTS} -eq 1 ]]; then 15 | out "→ Running \"openshift-install\" to create manifests..." 16 | ${installer} create manifests --dir=${clusterdir} 17 | success "Manifests created!" 18 | out "→ Stopping openshift-install-wrapper so you can edit the manifests." 19 | out " Manifests have been created at: ${clusterdir}" 20 | out " When done, resume openshift-install-wrapper with \"fg\"" 21 | (kill -STOP $$) 22 | out "→ Resuming openshift-install-wrapper" 23 | fi 24 | 25 | out "→ Running \"openshift-install\" to create a cluster..." 26 | verbose " Command: \"${installer} create cluster --dir=${clusterdir}\"" 27 | if [[ ${VERBOSE} -eq 1 ]]; then 28 | ${installer} create cluster --dir=${clusterdir} 29 | success "Cluster created!" 30 | else 31 | ${installer} create cluster --dir=${clusterdir} &> ${clusterdir}/.openshift_install_wrapper.log 32 | success "Cluster created!" 33 | tail -n 3 ${clusterdir}/.openshift_install_wrapper.log | cut -d \" -f 2- | tr -d "\"$" 34 | fi 35 | } 36 | 37 | -------------------------------------------------------------------------------- /src/actions/customize_cluster.sh: -------------------------------------------------------------------------------- 1 | # customize a cluster based on a list of scripts 2 | customize_cluster() { 3 | local clusterdir=${__clustersdir}/${INSTALLOPTS[name]}.${INSTALLOPTS[domain]} 4 | local client=${__bindir}/oc-${INSTALLOPTS[version]} 5 | 6 | for script in $( echo ${SCRIPTS} | tr -s "," " " ); do 7 | cmd=${script%%=*} 8 | type ${cmd} &>/dev/null && out "→ Running ${cmd} customization..." || die "${cmd} not found." 9 | ${cmd} ${script} ${clusterdir} ${client} ${VERBOSE} ${QUIET} ${INSTALLOPTS[*]} 10 | done 11 | } 12 | 13 | -------------------------------------------------------------------------------- /src/actions/destroy_cluster.sh: -------------------------------------------------------------------------------- 1 | # destroy cluster 2 | destroy_cluster() { 3 | local clusterdir=${__clustersdir}/${INSTALLOPTS[name]}.${INSTALLOPTS[domain]} 4 | local installer=${__bindir}/openshift-install-${INSTALLOPTS[version]} 5 | 6 | if [[ ${DRY_RUN} -eq 1 ]]; then 7 | out "→ Dry-run execution detected. Exiting..." 8 | exit 9 | fi 10 | 11 | out "→ Running \"openshift-install\" to destroy a cluster..." 12 | verbose " Command: \"${installer} destroy cluster --dir=${clusterdir}\"" 13 | if [[ ${VERBOSE} -eq 1 ]]; then 14 | ${installer} destroy cluster --dir=${clusterdir} 15 | else 16 | ${installer} destroy cluster --dir=${clusterdir} &> ${clusterdir}/.openshift_install_wrapper.log 17 | fi 18 | success "Cluster destroyed!" 19 | 20 | out "→ Removing directory..." 21 | verbose " Directory: ${clusterdir}" 22 | rm -fr ${clusterdir} 23 | } 24 | 25 | -------------------------------------------------------------------------------- /src/actions/download_tools.sh: -------------------------------------------------------------------------------- 1 | # download tools (if required) 2 | # extracts a binary from an image 3 | extract_from_image() { 4 | local command="$1" 5 | local target="$2" 6 | local image_url="$3" 7 | local quiet="$4" 8 | 9 | if [[ ${quiet} -eq 1 ]]; then 10 | command oc adm -a ${__configdir}/pull-secret.json release extract --command=${command} ${image_url} --to=${target} &>/dev/null 11 | else 12 | command oc adm -a ${__configdir}/pull-secret.json release extract --command=${command} ${image_url} --to=${target} 13 | fi 14 | } 15 | 16 | # downloads the binary from a url 17 | download_from_url() { 18 | local tool="$1" 19 | local version="$2" 20 | local target="$3" 21 | local quiet="$4" 22 | 23 | local tarfile="${tool}-linux-${version}.tar.gz" 24 | 25 | if [[ ${quiet} -eq 1 ]]; then 26 | wget ${__baseurl}/${version}/${tarfile} -O ${target}/${tarfile} &>/dev/null 27 | else 28 | wget ${__baseurl}/${version}/${tarfile} -O ${target}/${tarfile} 29 | fi 30 | 31 | tar xfa ${target}/${tarfile} -C ${target} 32 | } 33 | 34 | # download tools (if required) 35 | download_tools() { 36 | local tool=${1} 37 | 38 | if [[ $(echo ${INSTALLOPTS[version]} |grep -E "latest|stable|candidate|fast") ]];then 39 | INSTALLOPTS[version]=$( curl -s ${__baseurl}/${INSTALLOPTS[version]}/release.txt | grep "Release Metadata:" -A1 | grep Version | cut -d\: -f 2 | tr -d " " ) 40 | [[ ${INSTALLOPTS[version]} == "" ]] && die "Invalid version, check the version and retry." 41 | success "Version resolved to ${INSTALLOPTS[version]}." 42 | fi 43 | local version=${INSTALLOPTS[version]} 44 | local quiet=$(( ! ${VERBOSE} )) 45 | 46 | case ${tool} in 47 | installer) 48 | out "→ Checking if installer for ${version} is already present..." 49 | verbose " File: ${__bindir}/openshift-install-${version}" 50 | if [ ! -f ${__bindir}/openshift-install-${version} ]; then 51 | err "Installer not found. Downloading it..." 52 | 53 | if [[ -n "${INSTALLOPTS[custom-release-image]}" ]]; then 54 | extract_from_image "openshift-install" "${TMPDIR}" "${INSTALLOPTS[custom-release-image]}" ${quiet} 55 | else 56 | download_from_url "openshift-install" "${version}" "${TMPDIR}" ${quiet} 57 | fi 58 | 59 | mv -f ${TMPDIR}/openshift-install "${__bindir}/openshift-install-${version}" 60 | success "Successfuly downloaded installer for ${version}." 61 | else 62 | success "Installer for ${version} found. Continuing." 63 | fi 64 | ;; 65 | 66 | client) 67 | out "→ Checking if client binaries for ${version} are already present..." 68 | verbose " File: ${__bindir}/oc-${version}" 69 | if [ ! -f ${__bindir}/oc-${version} ]; then 70 | err "Client has not found. Downloading it..." 71 | 72 | if [[ -n "${INSTALLOPTS[custom-release-image]}" ]]; then 73 | extract_from_image "oc" "${TMPDIR}" "${INSTALLOPTS[custom-release-image]}" ${quiet} 74 | else 75 | download_from_url "openshift-client" "${version}" "${TMPDIR}" ${quiet} 76 | fi 77 | 78 | mv -f ${TMPDIR}/oc "${__bindir}/oc-${version}" 79 | success "Successfuly downloaded client binaries for ${version}." 80 | else 81 | success "Client binaries for ${version} are found. Continuing." 82 | fi 83 | ;; 84 | esac 85 | } 86 | -------------------------------------------------------------------------------- /src/actions/list_clusters.sh: -------------------------------------------------------------------------------- 1 | # list clusters 2 | list_clusters() { 3 | echo "NAME;VERSION;PLATFORM;STATUS;KUBEADMIN_PASSWORD;CONSOLE" > /tmp/list.$$ 4 | for cluster in $(ls ${__clustersdir} );do 5 | clusterdir=${__clustersdir}/${cluster} 6 | 7 | APISERVER="api.${cluster}" 8 | CONSOLE="https://console-openshift-console.apps.${cluster}" 9 | PLATFORM=$((cut -d, -f4 ${clusterdir}/metadata.json 2> /dev/null || echo "Unknown") |cut -d: -f1 |sed s/"[{,},\"]"//g) 10 | NAME=$((cut -d, -f1 ${clusterdir}/metadata.json 2> /dev/null || echo "Unknown") |cut -d: -f2 |sed s/"[{,},\"]"//g) 11 | ADMINPWD=$(cat ${clusterdir}/auth/kubeadmin-password 2> /dev/null || echo "Unknown" ) 12 | STATUS=$(curl -m 5 -k https://${APISERVER}:6443/healthz 2> /dev/null || echo "Unhealthy") 13 | CLUSTER_VERSION=$(grep -Po '(?<=OpenShift Installer )[v0-9.]*' ${clusterdir}/.openshift_install.log 2>/dev/null| head -n 1 | tr -d v) 14 | echo "${NAME};${CLUSTER_VERSION};${PLATFORM};${STATUS};${ADMINPWD};${CONSOLE}" >> /tmp/list.$$ 15 | done 16 | if [[ ${LISTFORMAT} == "csv" ]]; then 17 | cat /tmp/list.$$ 18 | else 19 | column -t -s ';' /tmp/list.$$ 20 | fi 21 | rm -fr /tmp/list.$$ 22 | } 23 | 24 | -------------------------------------------------------------------------------- /src/actions/login_cluster.sh: -------------------------------------------------------------------------------- 1 | # login into a cluster with default kubeadmin 2 | login_cluster() { 3 | local clusterdir=${__clustersdir}/${INSTALLOPTS[name]}.${INSTALLOPTS[domain]} 4 | local password=$( cat ${clusterdir}/auth/kubeadmin-password ) 5 | local client=${__bindir}/oc-${INSTALLOPTS[version]} 6 | 7 | export OCP4_VERSION=${INSTALLOPTS[version]} 8 | ${client} login --insecure-skip-tls-verify=false --username kubeadmin --password ${password} --server https://api.${INSTALLOPTS[name]}.${INSTALLOPTS[domain]}:6443 9 | } 10 | 11 | -------------------------------------------------------------------------------- /src/actions/main.sh: -------------------------------------------------------------------------------- 1 | # main function 2 | main() { 3 | # parse arguments from commandline 4 | while [[ ${1} = -?* ]]; do 5 | key="${1}" 6 | case ${key} in 7 | --name) shift; INSTALLOPTS[name]="${1}";; 8 | --domain) shift; INSTALLOPTS[domain]="${1}";; 9 | --version) shift; INSTALLOPTS[version]="${1}";; 10 | --platform) shift; INSTALLOPTS[platform]="${1}";; 11 | 12 | --region) shift; INSTALLOPTS[region]="${1}";; 13 | --master-replicas) shift; INSTALLOPTS[master-replicas]="${1}";; 14 | --worker-replicas) shift; INSTALLOPTS[worker-replicas]="${1}";; 15 | --network-type) shift; INSTALLOPTS[network-type]="${1}";; 16 | --machine-network) shift; INSTALLOPTS[machine-network]="${1}";; 17 | --tags) shift; INSTALLOPTS[tags]+=",${1}";; 18 | --edit-install-config) EDIT_INSTALL_CONFIG=1;; 19 | --edit-install-manifests) EDIT_INSTALL_MANIFESTS=1;; 20 | 21 | --dev-preview) __baseurl=${__baseurl/\/ocp/\/ocp-dev-preview};; 22 | --baseurl) shift; __baseurl="${1}";; 23 | --custom-release-image) shift; INSTALLOPTS[custom-release-image]="${1}";; 24 | 25 | --azure-resource-group) shift; INSTALLOPTS[azure-resource-group]="${1}";; 26 | 27 | --ovirt-cluster) shift; INSTALLOPTS[ovirt-cluster]="${1}";; 28 | --ovirt-storagedomain) shift; INSTALLOPTS[ovirt-storagedomain]="${1}";; 29 | --ovirt-network) shift; INSTALLOPTS[ovirt-network]="${1}";; 30 | --ovirt-vip-api) shift; INSTALLOPTS[ovirt-vip-api]="${1}";; 31 | --ovirt-vip-ingress) shift; INSTALLOPTS[ovirt-vip-ingress]="${1}";; 32 | --ovirt-vip-dns) shift; INSTALLOPTS[ovirt-vip-dns]="${1}";; 33 | 34 | --vsphere-vcenter) shift; INSTALLOPTS[vsphere-vcenter]="${1}";; 35 | --vsphere-vcenter-port) shift; INSTALLOPTS[vsphere-vcenter-port]="${1}";; 36 | --vsphere-username) shift; INSTALLOPTS[vsphere-username]="${1}";; 37 | --vsphere-password) shift; INSTALLOPTS[vsphere-password]="${1}";; 38 | --vsphere-cluster) shift; INSTALLOPTS[vsphere-cluster]="${1}";; 39 | --vsphere-datacenter) shift; INSTALLOPTS[vsphere-datacenter]="${1}";; 40 | --vsphere-datastore) shift; INSTALLOPTS[vsphere-datastore]="${1}";; 41 | --vsphere-network) shift; INSTALLOPTS[vsphere-network]="${1}";; 42 | --vsphere-vip-api) shift; INSTALLOPTS[vsphere-vip-api]="${1}";; 43 | --vsphere-vip-ingress) shift; INSTALLOPTS[vsphere-vip-ingress]="${1}";; 44 | --vsphere-disk-size-gb) shift; INSTALLOPTS[vsphere-disk-size-gb]="${1}";; 45 | 46 | --osp-vip-api) shift; INSTALLOPTS[osp-vip-api]="${1}";; 47 | --osp-vip-ingress) shift; INSTALLOPTS[osp-vip-ingress]="${1}";; 48 | --osp-cloud) shift; INSTALLOPTS[osp-cloud]="${1}";; 49 | --osp-ext-network) shift; INSTALLOPTS[osp-ext-network]="${1}";; 50 | --osp-os-image) shift; INSTALLOPTS[osp-os-image]="${1}";; 51 | --osp-os-flavor) shift; INSTALLOPTS[osp-os-flavor]="${1}";; 52 | 53 | --gcp-project-id) shift; INSTALLOPTS[gcp-project-id]="${1}";; 54 | 55 | --init) ACTION=init;; 56 | --install) ACTION=install;; 57 | --destroy) ACTION=destroy;; 58 | --customize) shift; ACTION=customize; SCRIPTS="${1}";; 59 | --use) ACTION=use; QUIET=1;; 60 | --login) ACTION=login; QUIET=1;; 61 | --list) ACTION=list;; 62 | --list-csv) ACTION=list; LISTFORMAT="csv";; 63 | --clean-tools) ACTION=cleantools ;; 64 | 65 | --force) FORCE=1;; 66 | --dry-run) DRY_RUN=1;; 67 | --verbose) VERBOSE=1;; 68 | --quiet) QUIET=1;; 69 | --help|-h) usage >&2; safe_exit;; 70 | *) 71 | die "Error: Invalid option ${1}.\n" 72 | ;; 73 | esac 74 | shift 75 | done 76 | 77 | WRAPPER_EDITOR="${EDITOR:-vi}" 78 | verbose "Using ${WRAPPER_EDITOR} as interactive editor" 79 | 80 | # create a temporary dir to work 81 | TMPDIR=$( mktemp -d -p . ) 82 | verbose "Using ${TMPDIR} as temporary directory" 83 | 84 | # create config dir if doesn't exists 85 | if [[ ! -d ${__configdir} ]]; then 86 | mkdir -p ${__configdir} &>/dev/null 87 | verbose "Creating ${__configdir}. You will probably need to add your ssh-key.pub and pull-secret.json files on it." 88 | fi 89 | 90 | # check if all the required parameters are provided 91 | validate_options 92 | 93 | # run the actions 94 | case ${ACTION} in 95 | install) 96 | download_tools installer 97 | download_tools client 98 | create_install_config 99 | create_cluster 100 | ;; 101 | destroy) 102 | download_tools installer 103 | destroy_cluster 104 | ;; 105 | customize) 106 | get_cluster_version 107 | download_tools client 108 | customize_cluster 109 | ;; 110 | use) 111 | get_cluster_version 112 | download_tools client 113 | use_cluster 114 | ;; 115 | login) 116 | get_cluster_version 117 | download_tools client 118 | login_cluster 119 | ;; 120 | list) 121 | list_clusters 122 | ;; 123 | init) 124 | create_cloud_credentials 125 | ;; 126 | cleantools) 127 | cleantools 128 | ;; 129 | esac 130 | } 131 | 132 | -------------------------------------------------------------------------------- /src/actions/usage.sh: -------------------------------------------------------------------------------- 1 | # usage 2 | usage() { 3 | cat < - name of the cluster 10 | --domain - name of the domain for the cluster 11 | --version - version to install 12 | --platform - cloud provider 13 | 14 | --region - sets the region in the cloud provider 15 | --master-replicas - optionally, sets the number of master nodes to deploy 16 | --worker-replicas - optionally, sets the number of worker nodes 17 | --network-type - optionally, sets network type: OpenShiftSDN or OVNKubernetes 18 | --machine-network - optionally, sets machineNetwork (default: 10.0.0.0/16) 19 | --edit-install-config - optionally, allows to edit the install-config.yaml before starting installation 20 | --edit-install-manifests - optionally, allows to edit the install manifests. After generating the manifests, 21 | the script will stop itself so the user modifies the manifests. 22 | Once done, user must continue the script with 'fg'. 23 | 24 | --dev-preview - required to install any dev-preview release 25 | --baseurl - sets the baseurl to download the binaries (overrides the use of --dev-preview) 26 | default: https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp 27 | --custom-release-image - uses a custom release image to extract the binaries from there. 28 | Requires a working oc binary in the PATH and pull secret with credentials for CI registry. 29 | 30 | --tags =[,key=value] - optionally, sets tags on the resources it creates (only implemented for platform=aws) 31 | use multiple options for multiple tags or comma-separated key/value pairs 32 | 33 | --azure-resource-group - provide the ResourceGroup where the domain exists 34 | 35 | --ovirt-cluster - ovirt cluster UUID 36 | --ovirt-storagedomain - ovirt storage domain UUID 37 | --ovirt-network - ovirt network name 38 | --ovirt-vip-api - IP address the cluster's API 39 | --ovirt-vip-ingress - IP address for the cluster's ingress 40 | --ovirt-vip-dns - IP address for the cluster's dns 41 | 42 | --vsphere-vcenter - fqdn or IP address of vCenter 43 | --vsphere-vcenter-port - port of vCenter 44 | --vsphere-username - username to login to vCenter 45 | --vsphere-password - password to login to vCenter 46 | --vsphere-cluster - vCenter cluster name 47 | --vsphere-datacenter - vCenter datacenter name 48 | --vsphere-datastore - vCenter datastore name 49 | --vsphere-network - vCenter network name 50 | --vsphere-vip-api - IP address the cluster's API 51 | --vsphere-vip-ingress - IP address for the cluster's ingress 52 | --vsphere-disk-size-gb - optionally, sets the disk size for the instances 53 | 54 | --osp-vip-api - floating IP for the cluster's API 55 | --osp-vip-ingress - floating IP for the cluster's Ingress 56 | --osp-cloud - name of the cloud to use from clouds.yaml 57 | --osp-ext-network - name of the external network to be used 58 | --osp-os-image - name or location of the RHCOS image to use 59 | --osp-os-flavor - name of the flavor to use 60 | 61 | --gcp-project-id - name of the GCP project 62 | 63 | --init - initialize the tool and credentials 64 | --install - install the cluster 65 | --destroy - destroy the cluster 66 | --customize - customize the cluster with some post-install actions 67 | --use - sets KUBECONFIG and/or env vars to use a given cluster 68 | --login - uses the default kubeadmin password to login in a given cluster 69 | --list - lists all existing clusters 70 | --list-csv - lists all existing clusters in CSV format 71 | --clean-tools - removes unecessary CLI clients and all installers 72 | 73 | --force - force installation (cleanup files if required) 74 | --dry-run - does all the preparation steps but doesn't run the installer to create/destroy a cluster 75 | --verbose - shows more information during the execution 76 | --quiet - quiet mode (no output at all) 77 | --help|-h - shows this message 78 | 79 | Available customizations: 80 | #__CUSTOM_SCRIPTS_NAMES__ 81 | EOF 82 | exit 0 83 | } 84 | 85 | -------------------------------------------------------------------------------- /src/actions/use_cluster.sh: -------------------------------------------------------------------------------- 1 | # use a cluster 2 | use_cluster() { 3 | local clusterdir=${__clustersdir}/${INSTALLOPTS[name]}.${INSTALLOPTS[domain]} 4 | 5 | echo "export KUBECONFIG=${clusterdir}/auth/kubeconfig" 6 | echo "export OCP4_VERSION=${INSTALLOPTS[version]}" 7 | } 8 | 9 | -------------------------------------------------------------------------------- /src/config/00_directories.sh: -------------------------------------------------------------------------------- 1 | # some directories 2 | __scriptdir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd ) 3 | __basedir=~/.local/ocp4 4 | __configdir=${HOME}/.config/openshift-install-wrapper/config 5 | __bindir=${__basedir}/bin 6 | __clustersdir=${__basedir}/clusters 7 | 8 | -------------------------------------------------------------------------------- /src/config/10_defaults.sh: -------------------------------------------------------------------------------- 1 | # binaries url 2 | __baseurl=https://mirror.openshift.com/pub/openshift-v4/clients/ocp 3 | 4 | # defaults 5 | VERSION=1.7.0 6 | VERBOSE=0 7 | QUIET=0 8 | FORCE=0 9 | TMPDIR= 10 | declare -A CONFIGFILES 11 | declare -A INSTALLOPTS 12 | declare -A INSTALLTEMPLATES 13 | 14 | -------------------------------------------------------------------------------- /src/config/20_credentials.sh: -------------------------------------------------------------------------------- 1 | # sample credentials files 2 | CONFIGFILES[aws]="${HOME}/.aws/credentials;W2RlZmF1bHRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gMTIzNDU2Nzg5MEFCQ0RFRkdISUoKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5ID0gMTIzNDU2Nzg5MEFCQ0RFRkdISUpLTE1OT2FiY2RlZmdoaWprbG1ubwo=" 3 | CONFIGFILES[azure]="${HOME}/.azure/osServicePrincipal.json;eyJzdWJzY3JpcHRpb25JZCI6IjEyMzQ1YWJjLTEyYWItMTJhYi0xMmFiLTEyMzQ1NmFiY2RlZiIsImNsaWVudElkIjoiMTIzNGFiY2QtMTJhYi0xMmFiLTEyYWItMTIzNDU2YWJjZGVmIiwiY2xpZW50U2VjcmV0IjoiMUFfMTIzNDU2YWJjZGVmZzEyMzQ1NTZhYmMuW1x1MDAzY1x1MDAyNmRAWkoja1x1MDAzZSIsInRlbmFudElkIjoiMTIzNDVhYmMtMTJhYi0xMmFiLTEyYWItMTIzNDU2YWJjZGVmIn0K" 4 | CONFIGFILES[ovirt]="${HOME}/.ovirt/ovirt-config.yaml;b3ZpcnRfdXJsOiBodHRwczovL09WSVJUX0ZRRE4vb3ZpcnQtZW5naW5lL2FwaQpvdmlydF91c2VybmFtZTogYWRtaW5AaW50ZXJuYWwKb3ZpcnRfcGFzc3dvcmQ6IHNlY3JldFBhc3N3b3JkCm92aXJ0X2luc2VjdXJlOiB0cnVlCg==" 5 | CONFIGFILES[openstack]="${HOME}/.config/openstack/clouds.yaml;Y2xvdWRzOgogIHNoaWZ0c3RhY2s6CiAgICBhdXRoOgogICAgICBhdXRoX3VybDogaHR0cDovLzEwLjEwLjE0LjQyOjUwMDAvdjMKICAgICAgcHJvamVjdF9uYW1lOiBzaGlmdHN0YWNrCiAgICAgIHVzZXJuYW1lOiBzaGlmdHN0YWNrX3VzZXIKICAgICAgcGFzc3dvcmQ6IFhYWAogICAgICB1c2VyX2RvbWFpbl9uYW1lOiBEZWZhdWx0CiAgICAgIHByb2plY3RfZG9tYWluX25hbWU6IERlZmF1bHQK" 6 | CONFIGFILES[gcp]="${HOME}/.gcp/osServiceAccount.json;ewogICJ0eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgInByb2plY3RfaWQiOiAicHJvamVjdC1pZCIsCiAgInByaXZhdGVfa2V5X2lkIjogImtleS1pZCIsCiAgInByaXZhdGVfa2V5IjogIi0tLS0tQkVHSU4gUFJJVkFURSBLRVktLS0tLVxucHJpdmF0ZS1rZXlcbi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS1cbiIsCiAgImNsaWVudF9lbWFpbCI6ICJzZXJ2aWNlLWFjY291bnQtZW1haWwiLAogICJjbGllbnRfaWQiOiAiY2xpZW50LWlkIiwKICAiYXV0aF91cmkiOiAiaHR0cHM6Ly9hY2NvdW50cy5nb29nbGUuY29tL28vb2F1dGgyL2F1dGgiLAogICJ0b2tlbl91cmkiOiAiaHR0cHM6Ly9hY2NvdW50cy5nb29nbGUuY29tL28vb2F1dGgyL3Rva2VuIiwKICAiYXV0aF9wcm92aWRlcl94NTA5X2NlcnRfdXJsIjogImh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL29hdXRoMi92MS9jZXJ0cyIsCiAgImNsaWVudF94NTA5X2NlcnRfdXJsIjogImh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3JvYm90L3YxL21ldGFkYXRhL3g1MDkvc2VydmljZS1hY2NvdW50LWVtYWlsIgp9Cgo=" 7 | 8 | -------------------------------------------------------------------------------- /src/config/40_scripts.sh: -------------------------------------------------------------------------------- 1 | # custom scripts 2 | test() { for param in ${@}; do echo -e ${param}"\n-----"; done } 3 | #__CUSTOM_SCRIPTS__ 4 | 5 | -------------------------------------------------------------------------------- /src/config/templates/.merge-templates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | output_file="../30_templates.sh" 4 | echo "# install-config templates" > "$output_file" 5 | 6 | for platform_dir in $(find . -mindepth 1 -maxdepth 1 -type d); do 7 | platform=$(basename "$platform_dir") 8 | if [[ ! -f "$platform_dir/default.yaml" ]]; then 9 | echo "Error: No default.yaml template for $platform platform. Fix the templates and try again." 10 | exit 11 | fi 12 | done 13 | 14 | for file in $(find . -type f -name "*.yaml"); do 15 | platform=$(basename $(dirname "$file")) 16 | filename=$(basename "$file" .yaml) 17 | key="${platform}-${filename}" 18 | 19 | base64_content=$(base64 -w 0 "$file") 20 | echo "INSTALLTEMPLATES[$key]=\"$base64_content\"" >> "$output_file" 21 | done 22 | echo "" >> "$output_file" -------------------------------------------------------------------------------- /src/config/templates/aws/default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | baseDomain: DOMAIN 3 | compute: 4 | - hyperthreading: Enabled 5 | name: worker 6 | platform: 7 | aws: 8 | type: m5.2xlarge 9 | replicas: WORKER-REPLICAS 10 | controlPlane: 11 | hyperthreading: Enabled 12 | name: master 13 | platform: {} 14 | replicas: MASTER-REPLICAS 15 | metadata: 16 | creationTimestamp: null 17 | name: NAME 18 | networking: 19 | clusterNetwork: 20 | - cidr: 10.128.0.0/14 21 | hostPrefix: 23 22 | machineNetwork: 23 | - cidr: MACHINE-NETWORK 24 | networkType: NETWORK-TYPE 25 | serviceNetwork: 26 | - 172.30.0.0/16 27 | platform: 28 | aws: 29 | region: REGION 30 | userTags: TAGS 31 | publish: External 32 | -------------------------------------------------------------------------------- /src/config/templates/azure/default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | baseDomain: DOMAIN 3 | compute: 4 | - architecture: amd64 5 | hyperthreading: Enabled 6 | name: worker 7 | platform: {} 8 | replicas: WORKER-REPLICAS 9 | controlPlane: 10 | architecture: amd64 11 | hyperthreading: Enabled 12 | name: master 13 | platform: {} 14 | replicas: MASTER-REPLICAS 15 | metadata: 16 | creationTimestamp: null 17 | name: NAME 18 | networking: 19 | clusterNetwork: 20 | - cidr: 10.128.0.0/14 21 | hostPrefix: 23 22 | machineNetwork: 23 | - cidr: MACHINE-NETWORK 24 | networkType: NETWORK-TYPE 25 | serviceNetwork: 26 | - 172.30.0.0/16 27 | platform: 28 | azure: 29 | baseDomainResourceGroupName: RESOURCEGROUP 30 | region: REGION 31 | publish: External 32 | -------------------------------------------------------------------------------- /src/config/templates/gcp/default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | baseDomain: DOMAIN 3 | compute: 4 | - architecture: amd64 5 | hyperthreading: Enabled 6 | name: worker 7 | platform: {} 8 | replicas: WORKER-REPLICAS 9 | controlPlane: 10 | architecture: amd64 11 | hyperthreading: Enabled 12 | name: master 13 | platform: {} 14 | replicas: MASTER-REPLICAS 15 | metadata: 16 | name: NAME 17 | networking: 18 | clusterNetwork: 19 | - cidr: 10.128.0.0/14 20 | hostPrefix: 23 21 | machineNetwork: 22 | - cidr: MACHINE-NETWORK 23 | networkType: NETWORK-TYPE 24 | serviceNetwork: 25 | - 172.30.0.0/16 26 | platform: 27 | gcp: 28 | projectID: GCP-PROJECT-ID 29 | region: REGION 30 | -------------------------------------------------------------------------------- /src/config/templates/openstack/default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | baseDomain: DOMAIN 3 | compute: 4 | - architecture: amd64 5 | hyperthreading: Enabled 6 | name: worker 7 | platform: {} 8 | replicas: WORKER-REPLICAS 9 | controlPlane: 10 | architecture: amd64 11 | hyperthreading: Enabled 12 | name: master 13 | platform: {} 14 | replicas: MASTER-REPLICAS 15 | metadata: 16 | name: NAME 17 | networking: 18 | clusterNetwork: 19 | - cidr: 10.128.0.0/14 20 | hostPrefix: 23 21 | machineNetwork: 22 | - cidr: MACHINE-NETWORK 23 | networkType: NETWORK-TYPE 24 | serviceNetwork: 25 | - 172.30.0.0/16 26 | platform: 27 | openstack: 28 | cloud: OSP-CLOUD 29 | computeFlavor: OSP-OS-FLAVOR 30 | externalNetwork: OSP-EXT-NETWORK 31 | clusterOSImage: OSP-OS-IMAGE 32 | lbFloatingIP: OSP-API-FIP 33 | ingressFloatingIP: OSP-INGRESS-FIP 34 | -------------------------------------------------------------------------------- /src/config/templates/ovirt/default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | baseDomain: DOMAIN 3 | controlPlane: 4 | name: master 5 | platform: 6 | ovirt: 7 | cpu: 8 | cores: 4 9 | sockets: 2 10 | memoryMB: 16384 11 | osDisk: 12 | sizeGB: 50 13 | vmType: server 14 | replicas: MASTER-REPLICAS 15 | compute: 16 | - name: worker 17 | platform: 18 | ovirt: 19 | cpu: 20 | cores: 4 21 | sockets: 4 22 | memoryMB: 16384 23 | osDisk: 24 | sizeGB: 50 25 | vmType: server 26 | replicas: WORKER-REPLICAS 27 | metadata: 28 | name: NAME 29 | platform: 30 | ovirt: 31 | api_vip: OVIRT-VIP-API 32 | ingress_vip: OVIRT-VIP-INGRESS 33 | dns_vip: OVIRT-VIP-DNS 34 | ovirt_cluster_id: OVIRT-CLUSTER 35 | ovirt_storage_domain_id: OVIRT-STORAGEDOMAN 36 | ovirt_network_name: OVIRT-NETWORK 37 | 38 | -------------------------------------------------------------------------------- /src/config/templates/vsphere/4.16.yaml: -------------------------------------------------------------------------------- 1 | additionalTrustBundlePolicy: Proxyonly 2 | apiVersion: v1 3 | baseDomain: DOMAIN 4 | compute: 5 | - architecture: amd64 6 | hyperthreading: Enabled 7 | name: worker 8 | platform: 9 | vsphere: 10 | osDisk: 11 | diskSizeGB: VSPHERE-DISK-SIZE-GB 12 | replicas: WORKER-REPLICAS 13 | controlPlane: 14 | architecture: amd64 15 | hyperthreading: Enabled 16 | name: master 17 | platform: 18 | vsphere: 19 | osDisk: 20 | diskSizeGB: VSPHERE-DISK-SIZE-GB 21 | replicas: MASTER-REPLICAS 22 | metadata: 23 | creationTimestamp: null 24 | name: NAME 25 | networking: 26 | clusterNetwork: 27 | - cidr: 10.128.0.0/14 28 | hostPrefix: 23 29 | machineNetwork: 30 | - cidr: MACHINE-NETWORK 31 | networkType: OVNKubernetes 32 | serviceNetwork: 33 | - 172.30.0.0/16 34 | platform: 35 | vsphere: 36 | apiVIPs: 37 | - VSPHERE-VIP-API 38 | failureDomains: 39 | - name: generated-failure-domain 40 | region: generated-region 41 | server: VSPHERE-VCENTER 42 | topology: 43 | computeCluster: /VSPHERE-DATACENTER/host/VSPHERE-CLUSTER 44 | datacenter: VSPHERE-DATACENTER 45 | datastore: /VSPHERE-DATACENTER/datastore/VSPHERE-DATASTORE 46 | networks: 47 | - VSPHERE-NETWORK 48 | resourcePool: /VSPHERE-DATACENTER/host/VSPHERE-CLUSTER//Resources 49 | zone: generated-zone 50 | ingressVIPs: 51 | - VSPHERE-VIP-INGRESS 52 | vcenters: 53 | - datacenters: 54 | - VSPHERE-DATACENTER 55 | password: VSPHERE-PASSWORD 56 | port: VSPHERE-VCENTER-PORT 57 | server: VSPHERE-VCENTER 58 | user: VSPHERE-USER 59 | publish: External 60 | -------------------------------------------------------------------------------- /src/config/templates/vsphere/default.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | baseDomain: DOMAIN 3 | compute: 4 | - architecture: amd64 5 | hyperthreading: Enabled 6 | name: worker 7 | platform: 8 | vsphere: 9 | osDisk: 10 | diskSizeGB: VSPHERE-DISK-SIZE-GB 11 | replicas: WORKER-REPLICAS 12 | controlPlane: 13 | architecture: amd64 14 | hyperthreading: Enabled 15 | name: master 16 | platform: 17 | vsphere: 18 | osDisk: 19 | diskSizeGB: VSPHERE-DISK-SIZE-GB 20 | replicas: MASTER-REPLICAS 21 | metadata: 22 | creationTimestamp: null 23 | name: NAME 24 | networking: 25 | clusterNetwork: 26 | - cidr: 10.128.0.0/14 27 | hostPrefix: 23 28 | machineNetwork: 29 | - cidr: MACHINE-NETWORK 30 | networkType: NETWORK-TYPE 31 | serviceNetwork: 32 | - 172.30.0.0/16 33 | platform: 34 | vsphere: 35 | apiVIP: VSPHERE-VIP-API 36 | cluster: VSPHERE-CLUSTER 37 | datacenter: VSPHERE-DATACENTER 38 | defaultDatastore: VSPHERE-DATASTORE 39 | ingressVIP: VSPHERE-VIP-INGRESS 40 | network: VSPHERE-NETWORK 41 | password: VSPHERE-PASSWORD 42 | username: VSPHERE-USER 43 | vCenter: VSPHERE-VCENTER 44 | publish: External 45 | -------------------------------------------------------------------------------- /src/control/00_init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Description: Script to run IPI installations from OpenShift 4 where supported 4 | # Author: Sergio Garcia (soukron@gmbros.net) 5 | # Source/License: https://github.com/soukron/openshift-install-wrapper 6 | 7 | # exit immediately on error 8 | set -e 9 | 10 | # detect whether output is piped or not. 11 | [[ -t 1 ]] && PIPED=0 || PIPED=1 12 | 13 | -------------------------------------------------------------------------------- /src/control/99_execute.sh: -------------------------------------------------------------------------------- 1 | # cleanup on exit 2 | cleanup_on_exit() { 3 | [[ ! -v KEEPTMP ]] && rm -fr ${TMPDIR} 4 | popd &>/dev/null 5 | kill 0 6 | } 7 | safe_exit() { 8 | trap - INT TERM EXIT 9 | exit 10 | } 11 | trap cleanup_on_exit INT TERM EXIT 12 | main "$@" 13 | -------------------------------------------------------------------------------- /src/helpers/00_output.sh: -------------------------------------------------------------------------------- 1 | # output helpers 2 | out() { 3 | [[ ${QUIET} -eq 1 ]] && return 4 | 5 | local message="$@" 6 | if ((PIPED)); then 7 | message=$(echo $message | sed ' 8 | s/\\[0-9]\{3\}\[[0-9]\(;[0-9]\{2\}\)\?m//g; 9 | s/✖/Error:/g; 10 | s/✔/Success:/g; 11 | ') 12 | fi 13 | printf '%b\n' "$message"; 14 | } 15 | die() { err "$@ Exiting..."; exit 1; } >&2 16 | err() { out " \033[1;31m✖\033[0m $@"; } >&2 17 | success() { out " \033[1;32m✔\033[0m $@"; } 18 | verbose() { [[ ${VERBOSE} -eq 1 ]] && out "$@" || true; } 19 | 20 | -------------------------------------------------------------------------------- /src/helpers/10_oc.sh: -------------------------------------------------------------------------------- 1 | # oc helper to run commands in a cluster 2 | oc() { 3 | if [[ ${VERBOSE} -eq 1 ]]; then 4 | ${client} --kubeconfig ${clusterdir}/auth/kubeconfig "$@" -v=6 5 | else 6 | if [[ ${QUIET} -eq 1 ]]; then 7 | ${client} --kubeconfig ${clusterdir}/auth/kubeconfig "$@" &>/dev/null 8 | else 9 | ${client} --kubeconfig ${clusterdir}/auth/kubeconfig "$@" 10 | fi 11 | fi 12 | } 13 | 14 | -------------------------------------------------------------------------------- /src/helpers/20_parse_args.sh: -------------------------------------------------------------------------------- 1 | # parse a string to export variables defined on it 2 | parse_args_as_variables() { 3 | if [[ ${1} == *"="* ]]; then 4 | IFS=':' read -r -a args <<< $( echo "${1}" | cut -d= -f2- ) 5 | for index in "${!args[@]}"; do 6 | export ${args[$index]%%=*}=${args[$index]##*=} 7 | done 8 | fi 9 | } 10 | 11 | -------------------------------------------------------------------------------- /src/helpers/30_create_install_config.sh: -------------------------------------------------------------------------------- 1 | # create install_config.yaml 2 | create_install_config() { 3 | local clusterdir=${__clustersdir}/${INSTALLOPTS[name]}.${INSTALLOPTS[domain]} 4 | 5 | out "→ Checking if the cluster directory already exists..." 6 | verbose " Directory: ${clusterdir}" 7 | if [ -d ${clusterdir} ]; then 8 | if [[ ${FORCE} -eq 0 ]]; then 9 | die "Directory is already present. Use --force to overwrite it (use with caution) or remove it manually before trying again." 10 | else 11 | out "→ Cleaning up existing directory's content..." 12 | rm -fr ${clusterdir} 13 | fi 14 | fi 15 | 16 | mkdir -p ${clusterdir} 17 | out "→ Creating install-config.yaml file..." 18 | key=${INSTALLOPTS[platform]}-${INSTALLOPTS[version]} 19 | if [[ -n "${INSTALLTEMPLATES[$key]}" ]]; then 20 | success "Using specific install-config template for ${key}..." 21 | installtemplate="${INSTALLTEMPLATES[$key]}" 22 | else 23 | minor_version="${INSTALLOPTS[version]%.*}" 24 | key="${INSTALLOPTS[platform]}-${minor_version}" 25 | 26 | if [[ -n "${INSTALLTEMPLATES[$key]}" ]]; then 27 | success "Using specific install-config template for ${key}..." 28 | installtemplate="${INSTALLTEMPLATES[$key]}" 29 | else 30 | key="${INSTALLOPTS[platform]}-default" 31 | 32 | if [[ -n "${INSTALLTEMPLATES[$key]}" ]]; then 33 | success "Using default install-config template for ${INSTALLOPTS[platform]}..." 34 | installtemplate="${INSTALLTEMPLATES[$key]}" 35 | else 36 | die "Unable to find a valid install-config template for the given platform/version." 37 | fi 38 | fi 39 | fi 40 | 41 | echo ${installtemplate} | base64 -d > ${clusterdir}/install-config.yaml 42 | 43 | sed -i "s/NAME/${INSTALLOPTS[name]}/g;" ${clusterdir}/install-config.yaml 44 | sed -i "s/DOMAIN/${INSTALLOPTS[domain]}/g;" ${clusterdir}/install-config.yaml 45 | sed -i "s/REGION/${INSTALLOPTS[region]}/g;" ${clusterdir}/install-config.yaml 46 | sed -i "s/WORKER-REPLICAS/${INSTALLOPTS[worker-replicas]:-3}/g;" ${clusterdir}/install-config.yaml 47 | sed -i "s/MASTER-REPLICAS/${INSTALLOPTS[master-replicas]:-3}/g;" ${clusterdir}/install-config.yaml 48 | sed -i "s/NETWORK-TYPE/${INSTALLOPTS[network-type]:-OpenShiftSDN}/g;" ${clusterdir}/install-config.yaml 49 | sed -i "s#MACHINE-NETWORK#${INSTALLOPTS[machine-network]:-10.0.0.0/16}#g;" ${clusterdir}/install-config.yaml 50 | if [[ ${INSTALLOPTS[platform]} == "aws" ]]; then 51 | sed -i "/TAGS/ { 52 | s/ TAGS/${INSTALLOPTS[tags]:-" {}"}/; 53 | s/,/\n /g; 54 | s/=/: /g; 55 | }" ${clusterdir}/install-config.yaml 56 | fi 57 | if [[ ${INSTALLOPTS[platform]} == "vsphere" ]]; then 58 | sed -i "s/VSPHERE-VIP-API/${INSTALLOPTS[vsphere-vip-api]}/g;" ${clusterdir}/install-config.yaml 59 | sed -i "s/VSPHERE-CLUSTER/${INSTALLOPTS[vsphere-cluster]}/g;" ${clusterdir}/install-config.yaml 60 | sed -i "s/VSPHERE-DATACENTER/${INSTALLOPTS[vsphere-datacenter]}/g;" ${clusterdir}/install-config.yaml 61 | sed -i "s/VSPHERE-DATASTORE/${INSTALLOPTS[vsphere-datastore]}/g;" ${clusterdir}/install-config.yaml 62 | sed -i "s/VSPHERE-VIP-INGRESS/${INSTALLOPTS[vsphere-vip-ingress]}/g;" ${clusterdir}/install-config.yaml 63 | sed -i "s/VSPHERE-NETWORK/${INSTALLOPTS[vsphere-network]}/g;" ${clusterdir}/install-config.yaml 64 | sed -i "s/VSPHERE-PASSWORD/${INSTALLOPTS[vsphere-password]}/g;" ${clusterdir}/install-config.yaml 65 | sed -i "s/VSPHERE-USER/${INSTALLOPTS[vsphere-username]}/g;" ${clusterdir}/install-config.yaml 66 | sed -i "s/VSPHERE-VCENTER-PORT/${INSTALLOPTS[vsphere-vcenter-port]}/g;" ${clusterdir}/install-config.yaml 67 | sed -i "s/VSPHERE-VCENTER/${INSTALLOPTS[vsphere-vcenter]}/g;" ${clusterdir}/install-config.yaml 68 | sed -i "s/VSPHERE-DISK-SIZE-GB/${INSTALLOPTS[vsphere-disk-size-gb]:-120}/g;" ${clusterdir}/install-config.yaml 69 | fi 70 | if [[ ${INSTALLOPTS[platform]} == "ovirt" ]]; then 71 | sed -i "s/OVIRT-VIP-API/${INSTALLOPTS[ovirt-vip-api]}/g;" ${clusterdir}/install-config.yaml 72 | sed -i "s/OVIRT-VIP-DNS/${INSTALLOPTS[ovirt-vip-dns]}/g;" ${clusterdir}/install-config.yaml 73 | sed -i "s/OVIRT-VIP-INGRESS/${INSTALLOPTS[ovirt-vip-ingress]}/g;" ${clusterdir}/install-config.yaml 74 | sed -i "s/OVIRT-CLUSTER/${INSTALLOPTS[ovirt-cluster]}/g;" ${clusterdir}/install-config.yaml 75 | sed -i "s/OVIRT-STORAGEDOMAN/${INSTALLOPTS[ovirt-storagedomain]}/g;" ${clusterdir}/install-config.yaml 76 | sed -i "s/OVIRT-NETWORK/${INSTALLOPTS[ovirt-network]}/g;" ${clusterdir}/install-config.yaml 77 | fi 78 | if [[ ${INSTALLOPTS[platform]} == "openstack" ]]; then 79 | sed -i "s/OSP-API-FIP/${INSTALLOPTS[osp-vip-api]}/g;" ${clusterdir}/install-config.yaml 80 | sed -i "s/OSP-INGRESS-FIP/${INSTALLOPTS[osp-vip-ingress]}/g;" ${clusterdir}/install-config.yaml 81 | sed -i "s/OSP-CLOUD/${INSTALLOPTS[osp-cloud]:-openstack}/g;" ${clusterdir}/install-config.yaml 82 | sed -i "s/OSP-EXT-NETWORK/${INSTALLOPTS[osp-ext-network]}/g;" ${clusterdir}/install-config.yaml 83 | sed -i "s/OSP-OS-IMAGE/${INSTALLOPTS[osp-os-image]}/g;" ${clusterdir}/install-config.yaml 84 | sed -i "s/OSP-OS-FLAVOR/${INSTALLOPTS[osp-os-flavor]:-m1.large}/g;" ${clusterdir}/install-config.yaml 85 | fi 86 | if [[ ${INSTALLOPTS[platform]} == "gcp" ]]; then 87 | sed -i "s/GCP-PROJECT-ID/${INSTALLOPTS[gcp-project-id]}/g;" ${clusterdir}/install-config.yaml 88 | sed -i "s/REGION/${INSTALLOPTS[region]}/g;" ${clusterdir}/install-config.yaml 89 | fi 90 | 91 | if [[ ! -f ${__configdir}/pull-secret.json ]]; then 92 | die "Missing pull secret in ${__configdir}/pull-secret.json. Please create the file before trying again." 93 | fi 94 | echo "pullSecret: '$(cat ${__configdir}/pull-secret.json)'" >> ${clusterdir}/install-config.yaml 95 | if [[ ! -f ${__configdir}/ssh-key.pub ]]; then 96 | die "Missing public RSA key in ${__configdir}/ssh-key.pub. Please create the file before trying again." 97 | fi 98 | echo "sshKey: $(cat ${__configdir}/ssh-key.pub)" >> ${clusterdir}/install-config.yaml 99 | 100 | if [[ ${INSTALLOPTS[platform]} == "azure" ]]; then 101 | sed -i "s/RESOURCEGROUP/${INSTALLOPTS[azure-resource-group]}/g;" ${clusterdir}/install-config.yaml 102 | fi 103 | 104 | if [[ ${EDIT_INSTALL_CONFIG} -eq 1 ]]; then 105 | verbose "Editing install-config with ${WRAPPER_EDITOR}" 106 | ${WRAPPER_EDITOR} "${clusterdir}/install-config.yaml" 107 | fi 108 | } 109 | 110 | -------------------------------------------------------------------------------- /src/helpers/40_cloud_credentials.sh: -------------------------------------------------------------------------------- 1 | # verify cloud credentials 2 | verify_cloud_credentials() { 3 | local platform=${INSTALLOPTS[platform]} 4 | local credentials=${CONFIGFILES[${platform}]%%;*} 5 | 6 | verbose " Credentials file: ${credentials}." 7 | if [[ ! -f ${credentials} ]]; then 8 | die "Error: Missing credentials file (${credentials})." 9 | fi 10 | } 11 | 12 | # create cloud credentials 13 | create_cloud_credentials() { 14 | local platform=${INSTALLOPTS[platform]} 15 | local credentials=${CONFIGFILES[${platform}]%%;*} 16 | local content=${CONFIGFILES[${platform}]#*;} 17 | 18 | out "→ Creating target directory..." 19 | mkdir -p $( dirname ${credentials} ) 20 | verbose " Directory: $( dirname ${credentials} )." 21 | 22 | out "→ Creating sample cloud credentials file for ${platform}..." 23 | if [[ -f ${credentials} ]]; then 24 | if [[ ${FORCE} -eq 0 ]]; then 25 | die "Credentials file is already present. Use --force to overwrite it (use with caution) or remove it manually before trying again." 26 | else 27 | out "→ Cleaning up existing credentials file..." 28 | rm -fr ${credentials} 29 | fi 30 | fi 31 | 32 | echo ${content} | base64 -d > ${credentials} 33 | success "Created sample file ${credentials}. Please edit it to add the proper credentials for each provider before trying to install any cluster or it will fail." 34 | } 35 | 36 | -------------------------------------------------------------------------------- /src/helpers/50_get_cluster_version.sh: -------------------------------------------------------------------------------- 1 | # find version 2 | get_cluster_version() { 3 | local clusterdir=${__clustersdir}/${INSTALLOPTS[name]}.${INSTALLOPTS[domain]} 4 | 5 | if [[ ! -z ${INSTALLOPTS[version]} ]]; then 6 | verbose "Version already defined with --version parameter to ${INSTALLOPTS[version]}. Skipping version detection..." 7 | return 8 | fi 9 | 10 | out "→ Finding version in cluster directory..." 11 | verbose " Directory: ${clusterdir}" 12 | INSTALLOPTS[version]=$(grep '="OpenShift Installer ' ${clusterdir}/.openshift_install.log 2>/dev/null |head -1 |tr -d '"'|awk '{ print $(NF) }') 13 | success "Version detected: ${INSTALLOPTS[version]}." 14 | 15 | [[ -z ${INSTALLOPTS[version]} ]] && die "Error: Can't find the installer version in the directory ${clusterdir}. Aborting." || true 16 | } 17 | 18 | -------------------------------------------------------------------------------- /src/helpers/60_get_cluster_platform.sh: -------------------------------------------------------------------------------- 1 | # find platform 2 | get_cluster_platform() { 3 | local clusterdir=${__clustersdir}/${INSTALLOPTS[name]}.${INSTALLOPTS[domain]} 4 | 5 | out "→ Finding platform in cluster directory..." 6 | verbose " Directory: ${clusterdir}" 7 | INSTALLOPTS[platform]=$(grep "^platform:" ${clusterdir}/install-config.yaml.orig -A 1 | tr -d "\n" | grep -Po '(?<=^platform: )[a-z]*') 8 | success "Platform detected: ${INSTALLOPTS[platform]}." 9 | 10 | [[ -z ${INSTALLOPTS[platform]} ]] && die "Error: Can't find the platform in the directory ${clusterdir}. Aborting." || true 11 | } 12 | 13 | -------------------------------------------------------------------------------- /src/helpers/70_validate_options.sh: -------------------------------------------------------------------------------- 1 | # check for an option 2 | require_option() { 3 | [[ ${INSTALLOPTS[${1}]} ]] || die "Error: Missing --${1} parameter, required for ${ACTION}." 4 | } 5 | 6 | # validate options depending on the choosen action 7 | validate_options() { 8 | out "→ Validating environment..." 9 | 10 | case ${ACTION} in 11 | install) 12 | require_option name 13 | require_option domain 14 | require_option version 15 | require_option platform 16 | case ${INSTALLOPTS[platform]} in 17 | aws) 18 | require_option region 19 | verify_cloud_credentials 20 | ;; 21 | azure) 22 | require_option region 23 | require_option azure-resource-group 24 | verify_cloud_credentials 25 | ;; 26 | vsphere) 27 | require_option vsphere-vcenter 28 | require_option vsphere-vcenter-port 29 | require_option vsphere-username 30 | require_option vsphere-password 31 | require_option vsphere-cluster 32 | require_option vsphere-datacenter 33 | require_option vsphere-datastore 34 | require_option vsphere-network 35 | require_option vsphere-vip-api 36 | require_option vsphere-vip-ingress 37 | ;; 38 | ovirt) 39 | require_option ovirt-cluster 40 | require_option ovirt-storagedomain 41 | require_option ovirt-network 42 | require_option ovirt-vip-api 43 | require_option ovirt-vip-ingress 44 | require_option ovirt-vip-dns 45 | ;; 46 | openstack) 47 | require_option osp-vip-api 48 | require_option osp-ext-network 49 | require_option osp-cloud 50 | ;; 51 | gcp) 52 | require_option gcp-project-id 53 | require_option region 54 | ;; 55 | *) 56 | die "Error: Platform ${INSTALLOPTS[platform]} not yet supported by this script" 57 | ;; 58 | esac 59 | ;; 60 | destroy) 61 | require_option name 62 | require_option domain 63 | get_cluster_version 64 | get_cluster_platform 65 | if [[ ${INSTALLOPTS[platform]} == "aws" ]] || [[ ${INSTALLOPTS[platform]} == "azure" ]] ;then 66 | verify_cloud_credentials 67 | fi 68 | ;; 69 | customize|use|login) 70 | require_option name 71 | require_option domain 72 | get_cluster_version 73 | get_cluster_platform 74 | ;; 75 | list) 76 | ;; 77 | init) 78 | require_option platform 79 | ;; 80 | cleantools) 81 | ;; 82 | *) 83 | die "Error: Missing action. Please use --help, --init, --install, --customize, --destroy, --use, --login, --list or --clean-tools." 84 | ;; 85 | esac 86 | } 87 | 88 | -------------------------------------------------------------------------------- /src/scripts/.merge-scripts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # some cleanup 3 | rm -f functions names openshift-install-wrapper 4 | for _file in *; do 5 | echo - Merging ${_file} 6 | 7 | # Extract data from the file 8 | _name=$( grep -Po "^# script name: \K(.*)$" ${_file} ) 9 | _description=$( grep -Po "^# script description: \K(.*)$" ${_file} ) 10 | 11 | # Use the filename if there's no name 12 | if [ -z "${_name}" ]; then 13 | echo " Warning: Emtpy name. Using filename as customization name" 14 | _name=${_file} 15 | fi 16 | 17 | # Extract the main function and rename it properly and add it to the functions file 18 | sed -n -e '/# start main/,/# end main/{ /^#.*/d;p }' ${_file} | sed "s/^main/${_name}/g" >> functions 19 | echo >> functions 20 | 21 | # Add the name and description (if any) in the names file 22 | if [ -z "${_description}" ]; then 23 | echo " Warning: Empty description" 24 | echo -e " - ${_name}" >> names 25 | else 26 | echo -e " - ${_name} - ${_description}" >> names 27 | fi 28 | done 29 | 30 | # replace the custom scripts marker with the functions file content 31 | sed -e '/\#__CUSTOM_SCRIPTS__/{r functions' -e 'd}' ../../openshift-install-wrapper > openshift-install-wrapper 32 | 33 | # replace the custom scripts names marker with the names file content 34 | sed -e '/\#__CUSTOM_SCRIPTS_NAMES__/{r names' -e 'd}' -i openshift-install-wrapper 35 | 36 | # replace original wrapper with included content 37 | mv -f openshift-install-wrapper ../../openshift-install-wrapper 38 | -------------------------------------------------------------------------------- /src/scripts/add-htpasswd-idp: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Optional fields 4 | # script name: add-htpasswd-idp 5 | # script description: Install a sample htpasswd file with admin, user and guest users 6 | 7 | # Mandatory function 8 | # start main - do not remove this line and do not change the function name 9 | main() { 10 | # check if htpasswd is available 11 | if ! command -v htpasswd &> /dev/null 12 | then 13 | err "htpasswd is required to run add-htpasswd-idp" 14 | exit 15 | fi 16 | 17 | # create cluster-admins group and bind it to cluster-admin role 18 | oc adm groups new cluster-admins \ 19 | && verbose "Group \"cluster-admins\" has been created." \ 20 | || err "Error creating \"cluster-admins\" group. Skipping." 21 | oc adm policy add-cluster-role-to-group cluster-admin cluster-admins \ 22 | && verbose "Group \"cluster-admins\" has been granted cluster-admin permissions." \ 23 | || err "Error binding \"cluster-admins\" to cluster-admin cluster role. Skipping." 24 | 25 | # parse parameters from ${1}, if any 26 | if [[ ${1} == *"="* ]]; then 27 | success "Using custom users from command line parameters." 28 | 29 | # NOTE: I tried to use bash substring extraction but failed when merging the script due to the # character 30 | IFS=':' read -r -a params <<< $( echo "${1}" | cut -d= -f2- ) 31 | 32 | for index in "${!params[@]}"; do 33 | # use -bc only for the first user 34 | [ ${index} -eq 0 ] && htpasswdargs="-bc" || htpasswdargs="-b" 35 | 36 | # add each user in the file 37 | # NOTE: I tried to use bash substring extraction but failed when merging the script 38 | user=$( echo ${params[index]} | cut -d = -f 1 ) 39 | password=$( echo ${params[index]} | cut -d = -f 2 ) 40 | htpasswd $htpasswdargs /tmp/htpasswd.tmp.$$ ${user} ${password} &>/dev/null \ 41 | && verbose "Adding user ${user} with password ${password}." \ 42 | || err "Error adding user ${password}. Skipping." 43 | 44 | # if the user is named "admin" add it to cluster-admins group 45 | if [ "${user}" == "admin" ]; then 46 | oc adm groups add-users cluster-admins admin \ 47 | && verbose "User \"admin\" has been added to cluster-admins group." \ 48 | || err "Error adding \"admin\" as a cluster-admins member. Skipping." 49 | fi 50 | done 51 | 52 | # get the base64 content and delete it 53 | HTPASSWD=$( cat /tmp/htpasswd.tmp.$$ | base64 -w 0 ) 54 | rm -fr /tmp/htpasswd.tmp.$$ 55 | else 56 | success "Adding default users: admin, user and guest." 57 | # Users: admin, user, guest 58 | # Passwords are generated based on the username 59 | # Tip: function claveOCP() { echo $1$1$1 | base64 -w8 | head -n 1; } 60 | HTPASSWD=YWRtaW46JGFwcjEkR2pDNi9TZHMkcTBWTElmaU5paE8vRXpNdmxNM2w3MAp1c2VyOiRhcHIxJDlQcGM4NnNvJE5NMEJCNFNzamNNbXgxTDlEU2FPMi8KZ3Vlc3Q6JGFwcjEkeWc1L1puTVkkZS5uSkROV0dNODlLQUVQVjVjSWY3MQo= 61 | 62 | oc adm groups add-users cluster-admins admin \ 63 | && verbose "User \"admin\" has been added to cluster-admins group." \ 64 | || err "Error adding \"admin\" as a cluster-admins member. Skipping." 65 | fi 66 | 67 | # Create htpassw-secret 68 | cat <&1 > /dev/null 14 | 15 | ### Check the last exited code to correctly comunicate the status 16 | if [[ $? -eq 0 ]]; then 17 | success "Image registry successfully configured with emptyDir." 18 | else 19 | err "Error while configuring the image registry. For troubleshooting try to run: \"oc get configs.imageregistry.operator.openshift.io cluster -oyaml\"" 20 | fi 21 | 22 | } 23 | # end main - do not remove this line 24 | 25 | # Optionally, keep this if you want to run your script manually or for testing. 26 | main $@ 27 | -------------------------------------------------------------------------------- /src/scripts/delete-kubeadmin-user: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Optional fields 4 | # script name: delete-kubeadmin-user 5 | # script description: Removes the kubeadmin secret 6 | 7 | # Mandatory function 8 | # start main - do not remove this line and do not change the function name 9 | main() { 10 | oc delete secret kubeadmin -n kube-system \ 11 | && success "Secret kubeadmin successfully deleted." \ 12 | || err "Error deleting kubeadmin secret. It probably doesn't exists anymore. Skipping." 13 | } 14 | # end main - do not remove this line 15 | 16 | # Optionally, keep this if you want to run your script manually or for testing. 17 | main $@ 18 | -------------------------------------------------------------------------------- /src/scripts/deploy-cluster-logging: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Optional fields 4 | # script name: deploy-cluster-logging 5 | # script description: Deploy cluster logging 6 | 7 | # Mandatory function 8 | # start main - do not remove this line and do not change the function name 9 | main() { 10 | # Default values 11 | op_es_channel=$(oc -n openshift-life-cycle-operator get packagemanifest elasticsearch-operator -o jsonpath='{.status.defaultChannel}' ) 12 | op_cl_channel=$(oc -n openshift-life-cycle-operator get packagemanifest cluster-logging -o jsonpath='{.status.defaultChannel}' ) 13 | es_pods=1 14 | es_redundancy="ZeroRedundancy" 15 | es_memory="2Gi" 16 | es_cpu="200m" 17 | kibana_pods=1 18 | kibana_memory="512Mi" 19 | kibana_cpu="500m" 20 | curator_memory="200Mi" 21 | curator_cpu="200m" 22 | # parse arguments from ${1}, if any, and export them as variables 23 | parse_args_as_variables "${1}" 24 | 25 | # Create the namespace for Elasticsearh operator 26 | cat << EOF | oc apply -f - \ 27 | && success "Namespace \"openshift-operators-redhat\" created successfully." \ 28 | || die "Error creating \"openshift-operators-redhat\" namespace." 29 | apiVersion: v1 30 | kind: Namespace 31 | metadata: 32 | name: openshift-operators-redhat 33 | annotations: 34 | openshift.io/node-selector: "" 35 | labels: 36 | openshift.io/cluster-monitoring: "true" 37 | EOF 38 | 39 | # Create the namespace for cluster logging operator 40 | cat << EOF | oc apply -f - \ 41 | && success "Namespace \"openshift-logging\" created successfully." \ 42 | || die "Error creating \"openshift-logging\" namespace." 43 | apiVersion: v1 44 | kind: Namespace 45 | metadata: 46 | name: openshift-logging 47 | annotations: 48 | openshift.io/node-selector: "" 49 | labels: 50 | openshift.io/cluster-monitoring: "true" 51 | EOF 52 | 53 | # Create operator group for Elasticsearch operator 54 | cat << EOF | oc apply -f - \ 55 | && success "Operator group for Elasticsearch Operator created successfully." \ 56 | || die "Error creating the Operator group for Elasticsearch Operator." 57 | apiVersion: operators.coreos.com/v1 58 | kind: OperatorGroup 59 | metadata: 60 | name: openshift-operators-redhat 61 | namespace: openshift-operators-redhat 62 | spec: {} 63 | EOF 64 | 65 | # Create a subscription for Elasticsearch operator 66 | cat << EOF | oc apply -f - \ 67 | && success "Subscription for Elasticsearch Operator created successfully." \ 68 | || die "Error creating the subscription for Elasticsearch Operator." 69 | apiVersion: operators.coreos.com/v1alpha1 70 | kind: Subscription 71 | metadata: 72 | name: "elasticsearch-operator" 73 | namespace: "openshift-operators-redhat" 74 | spec: 75 | channel: "${op_es_channel}" 76 | installPlanApproval: "Automatic" 77 | source: "redhat-operators" 78 | sourceNamespace: "openshift-marketplace" 79 | name: "elasticsearch-operator" 80 | EOF 81 | 82 | # Create operator group for Cluster Logging operator 83 | cat << EOF | oc apply -f - \ 84 | && success "Operator group for Cluster Logging Operator created successfully." \ 85 | || die "Error creating the Operator group for Cluster Logging Operator." 86 | apiVersion: operators.coreos.com/v1 87 | kind: OperatorGroup 88 | metadata: 89 | name: cluster-logging 90 | namespace: openshift-logging 91 | spec: {} 92 | EOF 93 | 94 | # Create a subscription for Cluster Logging operator 95 | cat << EOF | oc apply -f - \ 96 | && success "Subscription for Cluster Logging Operator created successfully." \ 97 | || die "Error creating the subscription for Cluster Logging Operator." 98 | apiVersion: operators.coreos.com/v1alpha1 99 | kind: Subscription 100 | metadata: 101 | name: "cluster-logging" 102 | namespace: "openshift-logging" 103 | spec: 104 | channel: "${op_cl_channel}" 105 | source: "redhat-operators" 106 | sourceNamespace: "openshift-marketplace" 107 | name: "cluster-logging" 108 | EOF 109 | 110 | verbose "Pausing fo 5 seconds..." 111 | sleep 5 112 | 113 | # Create Cluster Logging instance 114 | cat << EOF | oc apply -f - \ 115 | && success "Cluster Logging instance created successfully." \ 116 | || die "Error creating Cluster Logging instance." 117 | apiVersion: "logging.openshift.io/v1" 118 | kind: "ClusterLogging" 119 | metadata: 120 | name: "instance" 121 | namespace: "openshift-logging" 122 | spec: 123 | managementState: "Managed" 124 | logStore: 125 | type: "elasticsearch" 126 | elasticsearch: 127 | nodeCount: ${es_pods} 128 | resources: 129 | limits: 130 | memory: ${es_memory} 131 | requests: 132 | cpu: ${es_cpu} 133 | memory: ${es_memory} 134 | storage: {} 135 | redundancyPolicy: ${es_redundancy} 136 | visualization: 137 | type: "kibana" 138 | kibana: 139 | resources: 140 | limits: 141 | memory: ${kibana_memory} 142 | requests: 143 | cpu: ${kibana_cpu} 144 | memory: ${kibana_memory} 145 | replicas: ${kibana_pods} 146 | curation: 147 | type: "curator" 148 | curator: 149 | resources: 150 | limits: 151 | memory: ${curator_memory} 152 | requests: 153 | cpu: ${curator_cpu} 154 | memory: ${curator_memory} 155 | schedule: "*/5 * * * *" 156 | collection: 157 | type: "vector" 158 | EOF 159 | } 160 | # end main - do not remove this line 161 | 162 | # Optionally, keep this if you want to run your script manually or for testing. 163 | main $@ 164 | -------------------------------------------------------------------------------- /src/scripts/deploy-pipelines: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Optional fields 4 | # script name: deploy-pipelines 5 | # script description: Installs Pipelines operator 6 | 7 | # Mandatory function 8 | # start main - do not remove this line and do not change the function name 9 | main() { 10 | # default values 11 | version=1.1.2 12 | channel=preview 13 | approval=Automatic 14 | 15 | # parse arguments from ${1}, if any, and export them as variables 16 | parse_args_as_variables "${1}" 17 | 18 | # Create the subscription 19 | cat < /dev/null 2>&1 && echo true || echo false ) 14 | op_es_channel=$(oc -n openshift-life-cycle-operator get packagemanifest elasticsearch-operator -o jsonpath='{.status.defaultChannel}' ) 15 | op_es_sub=$(oc get sub elasticsearch-operator -n openshift-operators-redhat > /dev/null 2>&1 && echo true || echo false ) 16 | 17 | out "→ Configuring openshift-operators-redhat project" 18 | if [[ "${op_rh_ns}" != "true" ]];then 19 | cat << EOF |oc apply -f - > /dev/null 2>&1 || die "Not able to create Namespace 'openshift-operators-redhat'" 20 | apiVersion: v1 21 | kind: Namespace 22 | metadata: 23 | name: openshift-operators-redhat 24 | labels: 25 | openshift.io/cluster-monitoring: "true" 26 | EOF 27 | cat << EOF |oc apply -f - > /dev/null 2>&1 || die "Not able to create OperatorGroup 'openshift-operators-redhat'" 28 | apiVersion: operators.coreos.com/v1 29 | kind: OperatorGroup 30 | metadata: 31 | name: openshift-operators-redhat 32 | namespace: openshift-operators-redhat 33 | spec: {} 34 | EOF 35 | else 36 | success "openshift-operators-redhat project already exists." 37 | fi 38 | 39 | out "→ Installing Elasticsearch operator" 40 | if [[ "${op_es_sub}" != "true" ]];then 41 | cat << EOF | oc apply -f - > /dev/null 2>&1 || die "Not able to create Subscription 'elasticsearch-operator'" 42 | apiVersion: operators.coreos.com/v1alpha1 43 | kind: Subscription 44 | metadata: 45 | name: "elasticsearch-operator" 46 | namespace: "openshift-operators-redhat" 47 | spec: 48 | channel: "${op_es_channel}" 49 | installPlanApproval: "Automatic" 50 | source: "redhat-operators" 51 | sourceNamespace: "openshift-marketplace" 52 | name: "elasticsearch-operator" 53 | EOF 54 | else 55 | success "Elasticsearch operator already installed." 56 | fi 57 | 58 | 59 | ########## Install Jaeger operator 60 | out "→ Installing Jaeger operator" 61 | op_jg_sub=$(oc get sub jaeger-product -n openshift-operators > /dev/null 2>&1 && echo true || echo false ) 62 | if [[ "${op_jg_sub}" != "true" ]];then 63 | cat << EOF | oc apply -f - > /dev/null 2>&1 || die "Not able to create Subscription 'jaeger-product'" 64 | apiVersion: operators.coreos.com/v1alpha1 65 | kind: Subscription 66 | metadata: 67 | name: "jaeger-product" 68 | namespace: "openshift-operators" 69 | spec: 70 | channel: "stable" 71 | installPlanApproval: "Automatic" 72 | source: "redhat-operators" 73 | sourceNamespace: "openshift-marketplace" 74 | name: "jaeger-product" 75 | EOF 76 | else 77 | success "Jaeger operator already installed." 78 | fi 79 | 80 | ########## Install Kiali operator 81 | out "→ Installing Kiali operator" 82 | op_jg_sub=$(oc get sub kiali-ossm -n openshift-operators > /dev/null 2>&1 && echo true || echo false ) 83 | if [[ "${op_jg_sub}" != "true" ]];then 84 | cat << EOF | oc apply -f - > /dev/null 2>&1 || die "Not able to create Subscription 'kiali-ossm'" 85 | apiVersion: operators.coreos.com/v1alpha1 86 | kind: Subscription 87 | metadata: 88 | name: "kiali-ossm" 89 | namespace: "openshift-operators" 90 | spec: 91 | channel: "stable" 92 | installPlanApproval: "Automatic" 93 | source: "redhat-operators" 94 | sourceNamespace: "openshift-marketplace" 95 | name: "kiali-ossm" 96 | EOF 97 | else 98 | success "Kiali operator already installed." 99 | fi 100 | 101 | 102 | ########## Install Service Mesh operator 103 | out "→ Installing Service Mesh operator" 104 | op_sm_sub=$(oc get sub servicemeshoperator -n openshift-operators > /dev/null 2>&1 && echo true || echo false ) 105 | if [[ "${op_sm_sub}" != "true" ]];then 106 | cat << EOF | oc apply -f - > /dev/null 2>&1 || die "Not able to create Subscription 'servicemeshoperator'" 107 | apiVersion: operators.coreos.com/v1alpha1 108 | kind: Subscription 109 | metadata: 110 | name: "servicemeshoperator" 111 | namespace: "openshift-operators" 112 | spec: 113 | channel: "stable" 114 | installPlanApproval: "Automatic" 115 | source: "redhat-operators" 116 | sourceNamespace: "openshift-marketplace" 117 | name: "servicemeshoperator" 118 | EOF 119 | 120 | out "→ Waiting up to 90 seconds for the operator installation." 121 | sleep 15 #give 15 seconds for waiting the deployment to be created 122 | oc rollout status deploy/istio-operator -n openshift-operators --timeout=65s > /dev/null 2>&1 || die "Operator installation not completed." 123 | oc wait --for=condition=ContainersReady pod -n openshift-operators -l name=istio-operator --timeout=5s > /dev/null 2>&1 || die "Operator installation not completed." 124 | sleep 5 # 5 additional seconds to wait for the weebhook to start 125 | else 126 | success "Service Mesh operator already installed." 127 | fi 128 | 129 | ########## Create istio-system namespace 130 | out "→ Configuring ${namespace:-istio-system} namespace" 131 | op_istio_ns=$(oc get ns ${namespace:-istio-system} > /dev/null 2>&1 && echo true || echo false ) 132 | if [[ "${op_istio_ns}" != "true" ]];then 133 | oc create ns ${namespace:-istio-system} > /dev/null 2>&1 || die "Something wrong when creating ${namespace:-istio-system} namespace" 134 | else 135 | success "${namespace:-istio-system} namespace already present." 136 | fi 137 | 138 | ########## Create SM control plane 139 | out "→ Configuring Service Mesh control plane" 140 | op_sm_cp=$(oc get smcp basic -n ${namespace:-istio-system} > /dev/null 2>&1 && echo true || echo false ) 141 | if [[ "${op_sm_cp}" != "true" ]];then 142 | cat << EOF | oc apply -f - > /dev/null 2>&1 || die "Not able to create Service Mesh control plane" 143 | apiVersion: maistra.io/v2 144 | kind: ServiceMeshControlPlane 145 | metadata: 146 | name: basic 147 | namespace: ${namespace:-istio-system} 148 | spec: 149 | addons: 150 | grafana: 151 | enabled: true 152 | jaeger: 153 | install: 154 | storage: 155 | type: Memory 156 | kiali: 157 | enabled: true 158 | prometheus: 159 | enabled: true 160 | policy: 161 | type: ${policy:-Istiod} 162 | telemetry: 163 | type: ${policy:-Istiod} 164 | tracing: 165 | sampling: 10000 166 | type: Jaeger 167 | version: v${version:-2.0} 168 | EOF 169 | out "→ Waiting up to 3 minutes for the Control Plane to start." 170 | oc wait smcp basic -n ${namespace:-istio-system} --for condition=Ready=True --timeout=180s > /dev/null 2>&1 || die "Service Mesh control plane NOT Ready." 171 | success "Service Mesh control plane Ready." 172 | else 173 | success "Service Mesh control plane already present." 174 | fi 175 | 176 | ########## 177 | out "→ Configuring Service Mesh memeber role" 178 | op_sm_mr=$(oc get smmr default -n ${namespace:-istio-system} > /dev/null 2>&1 && echo true || echo false ) 179 | if [[ "${op_sm_mr}" != "true" ]];then 180 | cat << EOF | oc apply -f - > /dev/null 2>&1 || die "Not able to create Service Mesh member roll" 181 | apiVersion: maistra.io/v1 182 | kind: ServiceMeshMemberRoll 183 | metadata: 184 | name: default 185 | namespace: ${namespace:-istio-system} 186 | spec: {} 187 | EOF 188 | else 189 | success "Service Mesh memeber role already present." 190 | fi 191 | 192 | ########## Add samples - bookinfo and finish 193 | 194 | if [[ "${add_sample:-false}" == "true" ]];then 195 | out "→ Configuring samples in bookinfo namespace" 196 | oc -n ${namespace:-istio-system} patch --type='json' smmr default -p '[{"op": "add", "path": "/spec/members", "value":["'"bookinfo"'"]}]' > /dev/null 2>&1 \ 197 | || die "Something wrong when updating the SMMR!" 198 | [[ $(oc get ns bookinfo) ]] > /dev/null 2>&1 && success "bookinfo namespace already exists" || oc create ns bookinfo > /dev/null 2>&1 199 | out "→ Applying deployments..." 200 | oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.0/samples/bookinfo/platform/kube/bookinfo.yaml > /dev/null 2>&1 201 | oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.0/samples/bookinfo/networking/bookinfo-gateway.yaml > /dev/null 2>&1 202 | oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.0/samples/bookinfo/networking/destination-rule-all-mtls.yaml > /dev/null 2>&1 203 | out "→ Waiting up to 90 seconds for pod start." 204 | oc rollout status deploy/productpage-v1 -n bookinfo --timeout=90s > /dev/null 2>&1 || die "Bookinfo deploy not started." 205 | export GATEWAY_URL=$(oc -n ${namespace:-istio-system} get route istio-ingressgateway -o jsonpath='{.spec.host}') 206 | APPresponse=$(curl -o /dev/null -s -w "%{http_code}\n" http://$GATEWAY_URL/productpage) 207 | [[ "${APPresponse}" == "200" ]] && success "Service Mesh operator installed with example in bookinfo namespace!" 208 | [[ "${APPresponse}" == "200" ]] || err "Service Mesh installed but something wrong with the sample application." 209 | else 210 | success "Service Mesh operator installed!" 211 | fi 212 | 213 | 214 | 215 | } 216 | # end main - do not remove this line 217 | 218 | # Optionally, keep this if you want to run your script manually or for testing. 219 | main "$@" 220 | --------------------------------------------------------------------------------